filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_13274 | import copy
import json
import os
import platform
from datetime import datetime
from pathlib import Path
from subprocess import (run, CalledProcessError)
TERMINAL = {
'Linux': 'gnome-terminal',
'Windows': 'start powershell -WorkingDirectory',
'Darwin': 'open -n /Applications/Utilities/Terminal.app'
}
def openInExplorer(_path_):
_os_ = platform.system()
try:
if _os_ == 'Windows':
run([os.path.join(os.getenv('WINDIR'), 'explorer.exe'),
'/select', os.path.normpath(_path_)])
elif _os_ == 'Darwin':
run(['open', _path_])
elif _os_ == 'Linux':
run(['xdg-open', _path_])
return 'done'
except CalledProcessError:
return 'failed'
def projects():
"""morphing this into a function means it captures any and all changes to json"""
with open('projects.json') as json_config:
try:
return json.load(json_config)
except:
return {'_init': 'false'}
def loggedIn():
try:
return projects()['logged_in'] != 'false'
except KeyError:
doInit()
return loggedIn()
def dirScan(dir_to_scan):
"""10x faster parsing with ls -R over find!!"""
found_files = [str(f)
for f in Path(dir_to_scan).rglob("*corvid-package.json")]
corvid_files = []
for _file_ in found_files:
if not 'src/corvid-package.json' in _file_:
continue
project_loc = _file_.split('/src/corvid-package.json')[0]
try:
corvid_files.append({
'abs_dir': project_loc,
'slug': project_loc.split('/')[-1],
'last_updated': datetime.now(),
'due_date': 'none',
'favorited': 'false'
})
except IndexError:
print(_file_)
return corvid_files
def subdirs(project_path):
"""this function will open only useful files in project directory in your editor of choice"""
if not project_path.endswith('/'):
project_path += '/'
valid = {
'pages': {'path': 'src/pages', 'type': 'js'},
'backend': {'path': 'src/backend', 'type': 'any'},
'public': {'path': 'src/public', 'type': 'any'},
'lightboxes': {'path': 'src/lightboxes', 'type': 'js'}
}
def search(prop):
_path_ = project_path + valid[prop]['path']
to_open = Path(_path_).rglob("*")
def check(_):
if _.endswith('tsconfig.json') or _.endswith('authorization-config.json'):
return False
if valid[prop]['type'] == 'any':
return True
return valid[prop]['type'] == _.split('.')[-1]
return [json.dumps(f'{_path_}/{_file_}') for _file_ in to_open if check(_file_)]
return " ".join([search(_key_) for _key_ in {**valid}][0]) or None
def qzWrite(new_json):
"""rewrites json file with requested changes"""
new_json['last_updated'] = datetime.now()
with open('projects.json', 'w', encoding='utf-8') as current_json:
json.dump(new_json, current_json,
ensure_ascii=False, indent=2, default=str, sort_keys=True)
return 'done'
def isInt(n_o):
"""parseInt polyfill"""
try:
return int(n_o)
except ValueError:
return False
def isInit():
"""checks json to ensure existing install"""
try:
return projects()['_init'] and projects()['_init'] != 'false'
except KeyError:
doInit()
return True
def getProjects():
"""current projects list"""
try:
return projects()['local_projects']
except KeyError:
doInit()
return getProjects()
def writeProjects(new_projects):
if not isinstance(new_projects, list):
raise 'Not a valid projects format'
try:
re_write = clonedProjects()
current_projects = getProjects()
to_keep = [p for p in current_projects if p['abs_dir'] in new_projects]
for _p_ in new_projects:
if not _p_ in to_keep:
to_keep.append({
'slug': _p_.split('/')[-1],
'abs_dir': _p_,
'last_updated': datetime.now(),
'favorited': 'false',
'due_date': 'none'
})
re_write['local_projects'] = to_keep
qzWrite(re_write)
return 'done'
except:
raise 'Uncaught error writing projects'
def getDirs():
"""current directories dictionary"""
try:
return projects()['_watched']
except KeyError:
doInit()
return getDirs()
def writeDirs(new_dirs):
if not isinstance(new_dirs, list):
raise "Not a valid dirs format"
try:
re_write = clonedProjects()
for new_dir in new_dirs:
if new_dir == mainDir():
re_write['main'] = new_dir
else:
new_key = new_dir.split('/')[-1]
re_write['_watched'][new_key] = new_dir
return 'done'
except:
raise 'Uncaught error writing dirs'
def clonedProjects():
"""clones json file for overwriting current"""
return copy.deepcopy(projects())
def mainDir(new_dir=None):
"""sets or gets the main project directory
sets if argument is passed
gets if no argument"""
if not new_dir:
return getDirs()['main']
qz_clone = clonedProjects()
try:
curr_key = [c for c in {**getDirs()} if c == new_dir][0]
curr_path = getDirs()[curr_key]
repl_key = qz_clone['_watched']['main'].split('/')[-1]
qz_clone['_watched'][repl_key] = qz_clone['_watched']['main']
del qz_clone['_watched'][curr_key]
qz_clone['_watched']['main'] = curr_path
except KeyError:
raise 'Replacement failed'
qzWrite(qz_clone)
return 'done'
def doInit(refresh=False):
"""IMPORTANT: Changes made here must be made to rest of this script."""
usr_home = str(Path.home())
init_project = {
'_init': 'yes',
'_os': platform.system(),
'_watched': {'main': usr_home},
'last_updated': datetime.now(),
'local_projects': dirScan(usr_home),
}
if not refresh:
init_project['logged_in'] = 'false'
init_project['_created'] = datetime.now()
init_project['_config'] = {
'update_on_start': 'false',
'terminal': TERMINAL[platform.system()],
'text_editor': 'none',
'font': {'size': 'none', 'family': 'none'},
'highlight_color': 'none'
}
else:
init_project['logged_in'] = projects()['logged_in']
init_project['_created'] = projects()['_created']
init_project['_config'] = projects()['_config']
qzWrite(init_project)
return 'done'
def getByContext(context):
"""gets project by name or index and returns its full location path"""
if context == '0':
return getProjects()[0]['abs_dir']
if isInt(context):
return getProjects()[int(context)]['abs_dir']
if not '/' in context:
closest_match = ''
for _ix_, _item_ in enumerate(getProjects()):
if _item_['slug'] == context:
return _item_['abs_dir']
if _item_['slug'] in context:
closest_match = _item_['abs_dir']
if _ix_ == len(getProjects()) - 1 and closest_match != '':
return closest_match
else:
return [_path_['abs_dir'] for _path_ in getProjects() if context in _path_['abs_dir']][0]
return False
def withConfig(config=None):
"""sets or gets the config object"""
if isInit():
qz_clone = clonedProjects()
if not config:
return projects()['_config']
if isinstance(config, str):
try:
return projects()['_config'][config]
except:
raise 'main.py: Not a valid key in config'
if isinstance(config, dict):
for _key_ in {**config}:
qz_clone['_config'][_key_] = config[_key_]
qzWrite(qz_clone)
return qz_clone['_config']
raise f'main.py: {config} not a valid parameter for config method'
else:
doInit()
return withConfig(config)
def create(_dir_, _url_, do_debug=False):
"""
clones a new project
if auto is True, project editor is opened immediately upon creation
"""
filtered = [o for o in getProjects() if o['abs_dir'] == _dir_]
if len(filtered) > 0:
raise 'Project already exists!'
try:
if (os.path.isdir(_dir_)):
raise 'A project in that directory already exists!'
Path(_dir_).mkdir(parents=True, exist_ok=True)
if do_debug:
do_exec = 'corvid-debug'
else:
do_exec = 'corvid'
args = ['npx', do_exec, 'clone', _url_]
npm_init = run(['npm', 'init', '-y'], cwd=_dir_)
npm_init.check_returncode()
if npm_init.stderr:
print(npm_init.stderr)
else:
print(npm_init.stdout)
npx_downloading = run(args, cwd=_dir_)
npx_downloading.check_returncode()
if npx_downloading.stderr:
print(npx_downloading.stderr)
raise f"""main.py: Error creating {_dir_}
{npx_downloading.stderr}"""
if npx_downloading.stdout:
print(npx_downloading.stdout)
return 'done'
return 'Invalid params'
except CalledProcessError:
raise f"""main.py: failed to create {_dir_}
{CalledProcessError}"""
def openByContext(_id_, do_debug=False, text_editor=False):
"""opens project by index or name"""
try:
curr_f = getByContext(_id_)
if text_editor:
usr_editor = withConfig('editor') or 'atom'
found_files = subdirs(curr_f)
if found_files:
project_files = [usr_editor, *found_files]
else:
project_files = [usr_editor]
text_editor = run(project_files)
text_editor.check_returncode()
debug_state = 'corvid'
if do_debug:
debug_state += '-debug'
local_editor = run(
['npx', debug_state, 'open-editor'], cwd=curr_f)
local_editor.check_returncode()
return 'opening'
except CalledProcessError:
raise f"""main.py: Error opening {_id_}
{CalledProcessError}"""
def openInTerminal(_id_):
"""opens project directory in terminal emulator"""
try:
target_dir = getByContext(_id_)
try:
usr_emulator = withConfig('terminal')
except KeyError:
usr_emulator = TERMINAL[platform.system()]
opening_terminal = run([usr_emulator], cwd=target_dir)
opening_terminal.check_returncode()
return True
except CalledProcessError:
raise f"""main.py: Error opening {_id_}
{CalledProcessError}"""
def appendProject(_id_):
"""writes an existing project to watch list --- does not clone"""
qz_clone = clonedProjects()
if getByContext(_id_.split('/')[-1]):
return print('Project already exists!')
try:
new_project = {
'abs_dir': _id_,
'slug': _id_.split('/')[-1],
'last_updated': datetime.now(),
'due_date': 'none',
'favorited': 'false'
}
qz_clone['local_projects'].append(new_project)
qzWrite(qz_clone)
return 'done!'
except:
raise f'main.py: Error while appending {_id_}'
def deleteProject(_id_):
"""deletes a watched project's entry in the [projects] array"""
qz_clone = clonedProjects()
to_delete = getByContext(_id_)
to_write = []
for _item_ in qz_clone['local_projects']:
if _item_['abs_dir'] != to_delete:
to_write.append(_item_)
qz_clone['local_projects'] = to_write
qzWrite(qz_clone)
return 'done'
def getSnapshots(_id_):
"""returns an array of snapshot dirnames for given project"""
curr_f = getByContext(_id_) + '/.corvid/snapshots'
if not os.path.isdir(curr_f):
raise f'main.py: {_id_} has no snapshots yet!'
return [f for f in Path(curr_f).glob("*") if os.path.isdir(f)]
def toggleFavorite(_id_):
"""ability to tag projects as starred"""
qz_clone = clonedProjects()
focused_project = [px for px in getProjects(
) if px['abs_dir'] == getByContext(_id_)][0]
focused_index = qz_clone['local_projects'].index(focused_project)
is_favorited = focused_project['favorited']
if is_favorited == 'true':
qz_clone['local_projects'][focused_index]['favorited'] = 'false'
else:
qz_clone['local_projects'][focused_index]['favorited'] = 'true'
qzWrite(qz_clone)
return 'done'
def setDeadline(_id_, date_set):
"""adds or sets a project deadline"""
qz_clone = clonedProjects()
focused_project = [px for px in getProjects(
) if px['abs_dir'] == getByContext(_id_)][0]
to_set = qz_clone['local_projects'].index(focused_project)
if isinstance(date_set, str):
qz_clone['local_projects'][to_set]['due_date'] = date_set
qzWrite(qz_clone)
return 'done'
raise 'main.py: Not a valid date object'
def loginHandler():
qz_clone = clonedProjects()
try:
login_attempt = run(["npx", "corvid", "login"], capture_output=True)
if login_attempt.check_returncode() == 0:
qz_clone['logged_in'] = 'true'
else:
qz_clone['logged_in'] = 'false'
except CalledProcessError:
qz_clone['logged_in'] = 'false'
finally:
qzWrite(qz_clone)
def logoutHandler():
try:
qz_clone = clonedProjects()
logout_attempt = run(["npx", "corvid", "logout"], capture_output=True)
if logout_attempt.check_returncode() == 0:
qz_clone['logged_in'] = 'false'
else:
qz_clone['logged_in'] = 'true'
qzWrite(qz_clone)
except CalledProcessError:
return "logout aborted"
|
the-stack_0_13275 | """
ART Attack Runner
Version: 1.0
Author: Olivier Lemelin
Script that was built in order to automate the execution of ART.
"""
import os
import os.path
import fnmatch
import platform
import re
import subprocess
import sys
import hashlib
import json
import argparse
import yaml
import unidecode
# pylint: disable=line-too-long, invalid-name
TECHNIQUE_DIRECTORY_PATTERN = 'T*'
ATOMICS_DIR_RELATIVE_PATH = os.path.join("..", "..", "..", "atomics")
HASH_DB_RELATIVE_PATH = "techniques_hash.db"
COMMAND_TIMEOUT = 20
##########################################
# Filesystem & Helpers
##########################################
def get_platform():
"""Gets the current platform."""
# We need to handle the platform a bit differently in certain cases.
# Otherwise, we simply return the value that's given here.
plat = platform.system().lower()
if plat == "darwin":
# 'macos' is the term that is being used within the .yaml files.
plat = "macos"
return plat
def get_self_path():
"""Gets the full path to this script's directory."""
return os.path.dirname(os.path.abspath(__file__))
def get_yaml_file_from_dir(path_to_dir):
"""Returns path of the first file that matches "*.yaml" in a directory."""
for entry in os.listdir(path_to_dir):
if fnmatch.fnmatch(entry, '*.yaml'):
# Found the file!
return os.path.join(path_to_dir, entry)
print("No YAML file describing the technique in {}!".format(path_to_dir))
return None
def load_technique(path_to_dir):
"""Loads the YAML content of a technique from its directory. (T*)"""
# Get path to YAML file.
file_entry = get_yaml_file_from_dir(path_to_dir)
# Load and parses its content.
with open(file_entry, 'r', encoding="utf-8") as f:
return yaml.load(unidecode.unidecode(f.read()), Loader=yaml.SafeLoader)
def load_techniques():
"""Loads multiple techniques from the 'atomics' directory."""
# Get path to atomics directory.
atomics_path = os.path.join(get_self_path(),
ATOMICS_DIR_RELATIVE_PATH)
normalized_atomics_path = os.path.normpath(atomics_path)
print("Loading techniques from {}...".format(normalized_atomics_path))
# Create a dict to accept the techniques that will be loaded.
techniques = {}
print("Loading Technique", end="")
# For each tech directory in the main directory.
for atomic_entry in os.listdir(normalized_atomics_path):
# Make sure that it matches the current pattern.
if fnmatch.fnmatch(atomic_entry, TECHNIQUE_DIRECTORY_PATTERN):
print(", {}".format(atomic_entry), end="")
# Get path to tech dir.
path_to_dir = os.path.join(normalized_atomics_path, atomic_entry)
# Load, parse and add to dict.
tech = load_technique(path_to_dir)
techniques[atomic_entry] = tech
# Add path to technique's directory.
techniques[atomic_entry]["path"] = path_to_dir
print(".")
return techniques
def check_dependencies(executor, cwd):
dependencies = "dependencies"
dependencies_executor = "dependency_executor_name"
prereq_command = "prereq_command"
get_prereq_command = "get_prereq_command"
input_arguments = "input_arguments"
# If the executor doesn't have dependencies_executor key it doesn't have dependencies. Skip
if dependencies not in executor or dependencies not in executor:
print(
"No '{}' or '{}' section found in the yaml file. Skipping dependencies check.".format(dependencies_executor,
dependencies))
return True
launcher = executor[dependencies_executor]
for dep in executor[dependencies]:
args = executor[input_arguments] if input_arguments in executor else {}
final_parameters = set_parameters(args, {})
command = build_command(launcher, dep[prereq_command], final_parameters, cwd)
p = subprocess.Popen(launcher, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=os.environ, cwd=cwd)
p.communicate(bytes(command, "utf-8") + b"\n", timeout=COMMAND_TIMEOUT)
# If the dependencies are not satisfied the command will exit with code 1, 0 otherwise.
if p.returncode != 0:
print("Dependencies not found. Fetching them...")
if get_prereq_command not in dep:
print("Missing {} commands in the yaml file. Can't fetch requirements".format(get_prereq_command))
return False
command = build_command(launcher, dep[get_prereq_command], final_parameters, cwd)
d = subprocess.Popen(launcher, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=os.environ, cwd=cwd)
out, err = d.communicate(bytes(command, "utf-8") + b"\n", timeout=COMMAND_TIMEOUT)
p.terminate()
return True
##########################################
# Executors
##########################################
def is_valid_executor(exe, self_platform):
"""Validates that the executor can be run on the current platform."""
if self_platform not in exe["supported_platforms"]:
return False
# The "manual" executors need to be run by hand, normally.
# This script should not be running them.
if exe["executor"]["name"] == "manual":
return False
return True
def get_valid_executors(tech):
"""From a loaded technique, get all executors appropriate for the current platform."""
return list(filter(lambda x: is_valid_executor(x, get_platform()), tech['atomic_tests']))
def get_executors(tech):
"""From a loaded technique, get all executors."""
return tech['atomic_tests']
def print_input_arguments(executor):
"""Prints out the input arguments of an executor in a human-readable manner."""
if "input_arguments" in executor:
for name, values in executor["input_arguments"].items():
print("{name}: {description} (default: {default})".format(name=name,
description=values["description"],
default=values["default"]))
def print_executor(executor):
"""Prints an executor in a human-readable manner."""
print("\n-----------------------------------------------------------")
print("Name: " + executor["name"].strip())
print("Description: " + executor["description"].strip())
print("Platforms: " + ", ".join(map(lambda x: x.strip(), executor["supported_platforms"])))
print("\nArguments:")
print_input_arguments(executor)
print("\nLauncher: " + executor["executor"]["name"])
print("Command: " + executor["executor"]["command"] + "\n")
def executor_get_input_arguments(input_arguments):
"""Gets the input arguments from the user, displaying a prompt and converting them."""
# Empty dict to hold on the parameters.
parameters = {}
for name, values in input_arguments.items():
# If answer, use that.
answer = input_string("Please provide a parameter for '{name}' (blank for default)".format(name=name))
# If no answer, use the default.
if not answer:
answer = values["default"]
# Cast parameter to string
parameters[name] = str(answer)
return parameters
def print_non_interactive_command_line(technique_name, executor_number, parameters, check_dep, run_cleanup):
"""Prints the comand line to use in order to launch the technique non-interactively."""
flag_dep = ""
flag_cleanup = ""
if check_dep:
flag_dep = "--dependencies"
if run_cleanup:
flag_cleanup = "--cleanup"
print("In order to run this non-interactively:")
print(" Python:")
print(" techniques = runner.AtomicRunner()")
print(
" techniques.execute(\"{name}\", position={pos}, parameters={params}, dependencies={dep}, cleanup={cleanup})".format(
name=technique_name, pos=executor_number, params=parameters, dep=check_dep, cleanup=run_cleanup))
print(" Shell Script:")
print(" python3 runner.py run {name} {pos} --args '{params}' {dep} {cleanup}\n".format(name=technique_name,
pos=executor_number,
params=json.dumps(
parameters),
dep=flag_dep,
cleanup=flag_cleanup))
def interactive_apply_executor(executor, path, technique_name, executor_number):
"""Interactively run a given executor."""
# Prints information about the executor.
print_executor(executor)
# Request if we still want to run this.
if not yes_or_no("Do you want to run this? "):
print("Cancelled.")
return
# Request if we want to check the dependencies before running the executor.
check_dep = yes_or_no("Do you want to check dependencies? ")
# Request if we want to cleanup after the executor completes.
run_cleanup = yes_or_no("Do you want to run the cleanup after the executor completes? ")
# If so, get the input parameters.
if "input_arguments" in executor:
parameters = executor_get_input_arguments(executor["input_arguments"])
else:
parameters = {}
if check_dep:
if not check_dependencies(executor, path):
print("Check dependencies failed. Cancelling...")
return
# Prints the Command line to enter for non-interactive execution.
print_non_interactive_command_line(technique_name, executor_number, parameters, check_dep, run_cleanup)
launcher = convert_launcher(executor["executor"]["name"])
command = executor["executor"]["command"]
built_command = build_command(launcher, command, parameters, path)
# begin execution with the above parameters.
execute_command(launcher, built_command, path)
if run_cleanup:
apply_cleanup(executor, path, parameters)
def get_default_parameters(args):
"""Build a default parameters dictionary from the content of the YAML file."""
return {name: values["default"] for name, values in args.items()}
def set_parameters(executor_input_arguments, given_arguments):
"""Sets the default parameters if no value was given."""
# Default parameters as decribed in the executor.
default_parameters = get_default_parameters(executor_input_arguments)
# Merging default parameters with the given parameters, giving precedence
# to the given params.
final_parameters = {**default_parameters, **given_arguments}
# Cast parameters to string
for name, value in final_parameters.items():
final_parameters[name] = str(value)
return final_parameters
def apply_executor(executor, path, parameters):
"""Non-interactively run a given executor."""
args = executor["input_arguments"] if "input_arguments" in executor else {}
final_parameters = set_parameters(args, parameters)
launcher = convert_launcher(executor["executor"]["name"])
command = executor["executor"]["command"]
built_command = build_command(launcher, command, final_parameters, path)
# begin execution with the above parameters.
output = execute_command(launcher, built_command, path)
return output
def apply_cleanup(executor, path, parameters):
if "cleanup_command" not in executor["executor"] or executor["executor"]["cleanup_command"] == None:
print("No cleanup section found in the yaml file. Skipping...")
return
args = executor["input_arguments"] if "input_arguments" in executor else {}
final_parameters = set_parameters(args, parameters)
launcher = convert_launcher(executor["executor"]["name"])
command = executor["executor"]["cleanup_command"]
built_command = build_command(launcher, command, final_parameters, path)
# begin execution with the above parameters.
execute_command(launcher, built_command, path)
##########################################
# Text Input
##########################################
def yes_or_no(question):
"""Asks a yes or no question, and captures input. Blank input is interpreted as Y."""
reply = str(input(question + ' (Y/n): ')).capitalize().strip()
if reply == "": # pylint: disable=no-else-return
return True
elif reply[0] == 'Y':
return True
elif reply[0] == 'N':
return False
return yes_or_no("Please enter Y or N.")
def input_string(message):
"""Asks a question and captures the string output."""
return str(input(message + ': ')).strip()
def parse_number_input(user_input):
"""Converts a string of space-separated numbers to an array of numbers."""
lst_str = user_input.strip().split(' ')
return list(map(int, lst_str))
##########################################
# Commands
##########################################
class ManualExecutorException(Exception):
"""Custom Exception that we trigger triggered when we encounter manual executors."""
pass
def convert_launcher(launcher):
"""Takes the YAML launcher, and outputs an appropriate executable
to run the command."""
plat = get_platform()
# Regular command prompt.
if launcher == "command_prompt": # pylint: disable=no-else-return
if plat == "windows": # pylint: disable=no-else-return
# This is actually a 64bit CMD.EXE. Do not change this to a 32bits CMD.EXE
return "C:\\Windows\\System32\\cmd.exe"
elif plat == "linux":
# Good ol' Bourne Shell.
return "/bin/sh"
elif plat == "macos":
# I assume /bin/sh is available on OSX.
return "/bin/sh"
else:
# We hit a non-Linux, non-Windows OS. Use sh.
print("Warning: Unsupported platform {}! Using /bin/sh.".format(plat))
return "/bin/sh"
elif launcher == "powershell":
return "C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe"
elif launcher == "sh":
return "/bin/sh"
elif launcher == "bash":
return "/bin/bash"
elif launcher == "manual":
# We cannot process manual execution with this script. Raise an exception.
raise ManualExecutorException()
else:
# This launcher is not known. Returning it directly.
print("Warning: Launcher '{}' has no specific case! Invoking as is.".format(launcher))
return launcher
def build_command(launcher, command, parameters, path): # pylint: disable=unused-argument
"""Builds the command line that will eventually be run."""
# Using a closure! We use the replace to match found objects
# and replace them with the corresponding passed parameter.
def replacer(matchobj):
if matchobj.group(1) in parameters:
val = parameters[matchobj.group(1)]
else:
print("Warning: no match found while building the replacement string.")
val = None
return val
# Fix string interpolation (from ruby to Python!) -- ${}
command = re.sub(r"\$\{(.+?)\}", replacer, command)
# Fix string interpolation (from ruby to Python!) -- #{}
command = re.sub(r"\#\{(.+?)\}", replacer, command)
# Replace instances of PathToAtomicsFolder
atomics = os.path.join(path, "..")
command = command.replace("$PathToAtomicsFolder", atomics)
command = command.replace("PathToAtomicsFolder", atomics)
return command
def execute_subprocess(launcher, command, cwd):
p = subprocess.Popen(launcher, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=os.environ, cwd=cwd)
try:
outs, errs = p.communicate(bytes(command, "utf-8") + b"\n", timeout=COMMAND_TIMEOUT)
return outs, errs
except subprocess.TimeoutExpired as e:
# Display output if it exists.
if e.output:
print(e.output)
if e.stdout:
print(e.stdout)
if e.stderr:
print(e.stderr)
print("Command timed out!")
# Kill the process.
p.kill()
return "", ""
def print_process_output(outs, errs):
def clean_output(s):
# Remove Windows CLI garbage
s = re.sub(r"Microsoft\ Windows\ \[version .+\]\r?\nCopyright.*(\r?\n)+[A-Z]\:.+?\>", "", s)
return re.sub(r"(\r?\n)*[A-Z]\:.+?\>", "", s)
# Output the appropriate outputs if they exist.
if outs:
print("Output: {}".format(clean_output(outs.decode("utf-8", "ignore"))), flush=True)
else:
print("(No output)")
if errs:
print("Errors: {}".format(clean_output(errs.decode("utf-8", "ignore"))), flush=True)
def execute_command(launcher, command, cwd):
"""Executes a command with the given launcher."""
print("\n------------------------------------------------")
# If launcher is powershell we execute all commands under a single process
# powershell.exe -Command - (Tell powershell to read scripts from stdin)
if "powershell" in launcher:
outs, errs = execute_subprocess([launcher, '-Command', '-'], command, cwd)
print_process_output((command.encode() + b":\n" + outs), errs)
else:
cumulative_out = b""
cumulative_err = b""
for comm in command.split("\n"):
# We skip empty lines. This is due to the split just above.
if comm == "":
continue
# # We actually run the command itself.
outs, errs = execute_subprocess(launcher, comm, cwd)
print_process_output(outs, errs)
if outs is not None:
cumulative_out += b"> " + comm.encode() + b":\n" + outs
if errs is not None:
cumulative_err += errs
continue
outs = cumulative_out
errs = cumulative_err
return [outs, errs]
#########################################
# Hash database
#########################################
def load_hash_db():
"""Loads the hash database from a file, or create the empty file if it did not already exist."""
hash_db_path = os.path.join(get_self_path(), HASH_DB_RELATIVE_PATH)
try:
with open(hash_db_path, 'r') as f:
return json.load(f)
except json.JSONDecodeError:
print("Could not decode the JSON Hash DB! Please fix the syntax of the file.")
sys.exit(3)
except IOError:
print("File did not exist. Created a new empty Hash DB.")
empty_db = {}
write_hash_db(hash_db_path, empty_db)
return empty_db
def write_hash_db(hash_db_path, db):
"""Writes the hash DB dictionary to a file."""
with open(hash_db_path, 'w') as f:
json.dump(db, f, sort_keys=True, indent=4, separators=(',', ': '))
def check_hash_db(hash_db_path, executor_data, technique_name, executor_position):
"""Checks the hash DB for a hash, and verifies that it corresponds to the current executor data's
hash. Adds the hash to the current database if it does not already exist."""
hash_db = load_hash_db()
executor_position = str(executor_position)
# Tries to load the technique section.
if not technique_name in hash_db:
print("Technique section '{}' did not exist. Creating.".format(technique_name))
# Create section
hash_db[technique_name] = {}
new_hash = hashlib.sha256(json.dumps(executor_data).encode()).hexdigest()
# Tries to load the executor hash.
if not executor_position in hash_db[technique_name]:
print("Hash was not in DB. Adding.")
# Create the hash, since it does not exist. Return OK.
hash_db[technique_name][executor_position] = new_hash
# Write DB to file.
write_hash_db(hash_db_path, hash_db)
return True
old_hash = hash_db[technique_name][executor_position]
# If a previous hash already exists, compare both hashes.
return old_hash == new_hash
def clear_hash(hash_db_path, technique_to_clear, position_to_clear=-1):
"""Clears a hash from the DB, then saves the DB to a file."""
hash_db = load_hash_db()
if position_to_clear == -1:
# We clear out the whole technique.
del hash_db[technique_to_clear]
else:
# We clear the position.
del hash_db[technique_to_clear][str(position_to_clear)]
print("Hash cleared.")
write_hash_db(hash_db_path, hash_db)
#########################################
# Atomic Runner and Main
#########################################
class AtomicRunner():
"""Class that allows the execution, interactive or not, of the various techniques that are part of ART."""
def __init__(self):
"""Constructor. Ensures that the techniques are loaded before we can run them."""
# Loads techniques.
self.techniques = load_techniques()
def repl(self):
"""Presents a REPL to the user so that they may interactively run certain techniques."""
print("Enter the name of the technique that you would like to execute (eg. T1033). Type 'exit' to quit.")
i = input("> ").strip()
while True:
if i == "exit":
break
else:
if i in self.techniques:
self.interactive_execute(i)
else:
print("Technique '{}' does not exist.".format(i))
i = input("> ").strip()
def execute(self, technique_name, position=0, parameters=None, dependencies=False, cleanup=False):
"""Runs a technique non-interactively."""
parameters = parameters or {}
if technique_name not in self.techniques:
print("No technique {} found. Skipping...".format(technique_name))
return [b'', b'No technique found']
# Gets the tech.
tech = self.techniques[technique_name]
# Gets Executors.
executors = get_valid_executors(tech)
if len(executors) < position:
print("The position '{}' couldn't be found.".format(position))
print("The teqhnique {} has {} available tests for the current platform. Skipping...".format(technique_name,
len(
executors)))
return [b'', b'Executor not found. Out of bounds?']
print("================================================")
if dependencies:
print("Checking dependencies {}/{}\n".format(technique_name, position))
if not check_dependencies(executors[position], tech["path"]):
return [b'', b'Dependencies not met!']
print("Executing {}/{}\n".format(technique_name, position))
try:
# Get executor at given position.
executor = executors[position]
except IndexError:
print("Out of bounds: this executor is not part of that technique's list!")
return [b'', b'Out of bounds: this executor is not part of that technique\'s list!']
# Make sure that it is compatible.
if not is_valid_executor(executor, get_platform()):
print("Warning: This executor is not compatible with the current platform!")
return [b'', b'Warning: This executor is not compatible with the current platform!']
# Check that hash matches previous executor hash or that this is a new hash.
if not check_hash_db(HASH_DB_RELATIVE_PATH, executor, technique_name, position):
print("Warning: new executor fingerprint does not match the old one! Skipping this execution.")
print(
"To re-enable this test, review this specific executor, test your payload, and clear out this executor's hash from the database.")
print("Run this: python runner.py clearhash {} {}.".format(technique_name, position))
return [b'', b'Warning: new executor fingerprint does not match the old one! Skipping this execution.']
# Launch execution.
try:
response = apply_executor(executor, tech["path"], parameters)
except ManualExecutorException:
print("Cannot launch a technique with a manual executor. Aborting.")
return [b'', b'Cannot launch a technique with a manual executor. Aborting.']
finally:
if cleanup:
print("Running cleanup commands.")
apply_cleanup(executor, tech["path"], parameters)
return response
def interactive_execute(self, technique_name):
"""Interactively execute a single technique."""
# Gets the tech.
tech = self.techniques[technique_name]
# Gets the compatible executors for this current platform.
executors = get_valid_executors(tech)
# If there are none.
if not executors:
print("No valid executors for this platform/technique combination!")
return
# Display technique info
print("\n===========================================================")
print("{} - {}".format(tech["display_name"], tech["attack_technique"]))
# Get number of executors.
nb_executors = len(executors)
if nb_executors > 1:
# Displays all executors with the index (for the number choice).
for idx, executor in enumerate(executors):
# Make it better!
print("{}. ".format(idx))
print_executor(executor)
# Display prompt, and get input as number list.
while True:
user_input = input("Please choose your executors: (space-separated list of numbers): ")
try:
numbers = parse_number_input(user_input)
for i in numbers:
# Interactively apply all chosen executors.
interactive_apply_executor(executors[i], tech["path"], tech["attack_technique"], i)
break
except Exception as e: # pylint: disable=broad-except
print("Could not parse the input. make sure this is a space-separated list of integers.")
print(e)
else:
# We only have one executor in this case.
interactive_apply_executor(executors[0], tech["path"], tech["attack_technique"], 0)
def interactive(args): # pylint: disable=unused-argument
"""Launch the runner in interactive mode."""
runner = AtomicRunner()
runner.repl()
def run(args):
"""Launch the runner in non-interactive mode."""
runner = AtomicRunner()
runner.execute(args.technique, args.position, json.loads(args.args), args.dependencies, args.cleanup, )
def clear(args):
"""Clears a stale hash from the Hash DB."""
clear_hash(HASH_DB_RELATIVE_PATH, args.technique, args.position)
def main():
"""Main function, called every time this script is launched rather than imported."""
parser = argparse.ArgumentParser(description="Allows the automation of tests in the Atomic Red Team repository.")
subparsers = parser.add_subparsers()
parser_int = subparsers.add_parser('interactive', help='Runs the techniques interactively.')
parser_int.set_defaults(func=interactive)
parser_run = subparsers.add_parser('run', help="Ponctually runs a single technique / executor pair.")
parser_run.add_argument('technique', type=str, help="Technique to run.")
parser_run.add_argument('position', type=int, help="Position of the executor in technique to run.")
parser_run.add_argument("--dependencies", action='store_true',
help="Check for dependencies, in any, and fetch them if necessary.")
parser_run.add_argument("--cleanup", action='store_true',
help="Run cleanup commands, if any, after executor completed.")
parser_run.add_argument('--args', type=str, default="{}",
help="JSON string representing a dictionary of arguments (eg. '{ \"arg1\": \"val1\", \"arg2\": \"val2\" }' )")
parser_run.set_defaults(func=run)
parser_clear = subparsers.add_parser('clearhash',
help="Clears a hash from the database, allowing the technique to be run once again.")
parser_clear.add_argument('technique', type=str, help="Technique to run.")
parser_clear.add_argument('--position', '-p', type=int, default=-1,
help="Position of the executor in technique to run.")
parser_clear.set_defaults(func=clear)
try:
args = parser.parse_args()
args.func(args)
except AttributeError:
parser.print_help()
if __name__ == "__main__":
main()
|
the-stack_0_13276 | from collections import defaultdict
class UnionFind:
def __init__(self, n):
self.size = n
self.parents = [-1] * n
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y: return
if self.parents[x] > self.parents[y]: x, y = y, x
self.parents[x] += self.parents[y]
self.parents[y] = x
return x, y
def find(self, x):
if self.parents[x] < 0: return x
else:
self.parents[x] = self.find(self.parents[x])
return self.parents[x]
def group_size(self, x):
return -self.parents[self.find(x)]
def is_same_group(self, x, y):
return self.find(x) == self.find(y)
def members(self, x):
root_x = self.find(x)
return [i for i in range(self.size) if self.find(i) == root_x]
def roots(self):
return [i for i, x in enumerate(self.parents) if x < 0]
def group_count(self):
return len(self.roots())
def dict(self):
ret = defaultdict(list)
for x in range(self.size):
ret[self.find(x)].append(x)
return ret
n, m, k = map(int, input().split())
friend = [tuple(map(int, input().split())) for _ in range(m)]
enemy = [tuple(map(int, input().split())) for _ in range(k)]
follows = [[] for _ in range(n)]
for a, b in friend:
follows[a - 1].append(b - 1)
follows[b - 1].append(a - 1)
blocks = [[] for _ in range(n)]
for a, b in enemy:
blocks[a - 1].append(b - 1)
blocks[b - 1].append(a - 1)
dsu = UnionFind(n)
ans = [0] * n
for a, b in friend:
dsu.union(a - 1, b - 1)
for i in range(n):
ans_ = dsu.group_size(i) - 1
for j in follows[i]:
ans_ -= int(dsu.is_same_group(i, j))
for j in blocks[i]:
ans_ -= int(dsu.is_same_group(i, j))
ans[i] = ans_
print(*ans) |
the-stack_0_13277 | """Add contents_hash
Revision ID: 515f518eff57
Revises: 218fd78e07e8
Create Date: 2017-07-25 15:21:18.613141
"""
# revision identifiers, used by Alembic.
revision = '515f518eff57'
down_revision = '218fd78e07e8'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('RepositoryApps', sa.Column('contents_hash', sa.Unicode(length=255), nullable=True))
op.add_column('RepositoryApps', sa.Column('last_processed_contents_hash', sa.Unicode(length=255), nullable=True))
op.add_column('RepositoryApps', sa.Column('last_processed_downloaded_hash', sa.Unicode(length=255), nullable=True))
op.create_index(u'ix_RepositoryApps_contents_hash', 'RepositoryApps', ['contents_hash'], unique=False)
op.create_index(u'ix_RepositoryApps_last_processed_contents_hash', 'RepositoryApps', ['last_processed_contents_hash'], unique=False)
op.create_index(u'ix_RepositoryApps_last_processed_downloaded_hash', 'RepositoryApps', ['last_processed_downloaded_hash'], unique=False)
op.drop_index(u'ix_RepositoryApps_last_processed_hash', table_name='RepositoryApps')
op.drop_column('RepositoryApps', u'last_processed_hash')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('RepositoryApps', sa.Column(u'last_processed_hash', mysql.VARCHAR(length=255), nullable=True))
op.create_index(u'ix_RepositoryApps_last_processed_hash', 'RepositoryApps', [u'last_processed_hash'], unique=False)
op.drop_index(u'ix_RepositoryApps_last_processed_downloaded_hash', table_name='RepositoryApps')
op.drop_index(u'ix_RepositoryApps_last_processed_contents_hash', table_name='RepositoryApps')
op.drop_index(u'ix_RepositoryApps_contents_hash', table_name='RepositoryApps')
op.drop_column('RepositoryApps', 'last_processed_downloaded_hash')
op.drop_column('RepositoryApps', 'last_processed_contents_hash')
op.drop_column('RepositoryApps', 'contents_hash')
### end Alembic commands ###
|
the-stack_0_13279 | # model settings
model = dict(
type='CenterNet',
pretrained='./pretrain/darknet53.pth',
backbone=dict(
type='DarknetV3',
layers=[1, 2, 8, 8, 4],
inplanes=[3, 32, 64, 128, 256, 512],
planes=[32, 64, 128, 256, 512, 1024],
norm_cfg=dict(type='BN'),
out_indices=(1, 2, 3, 4),
frozen_stages=1,
norm_eval=False),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(128, 256, 512, 1024),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=2,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
wh_agnostic=True,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
avg_wh_weightv3=False,
hm_center_ratio=0.27,
center_ratio=0.01,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=12,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0003,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'eft53_htct027_whratio001_v1l_3lr_wd3e4_s123_nos_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
the-stack_0_13280 | #!/usr/bin/env python
from multiprocessing import Pool
import numpy as np
import os
import matplotlib.pyplot as plt
from functools import partial
import time
import copy
from scipy.stats import multivariate_normal
from scipy import stats
# from scipy.optimize import root
from scipy.optimize import bisect
from sklearn.gaussian_process.kernels import RBF, \
Matern
from pyapprox.density import tensor_product_pdf
from pyapprox.gaussian_process import CholeskySampler, AdaptiveGaussianProcess
from pyapprox.low_discrepancy_sequences import transformed_halton_sequence
from pyapprox.utilities import \
compute_f_divergence, pivoted_cholesky_decomposition, \
get_tensor_product_quadrature_rule
from pyapprox.probability_measure_sampling import rejection_sampling
from pyapprox.visualization import get_meshgrid_function_data
import matplotlib as mpl
mpl.rcParams['font.size'] = 16
mpl.rcParams['lines.linewidth'] = 3
mpl.rcParams['text.usetex'] = True # use latex for all text handling
mpl.rcParams['savefig.bbox'] = 'tight'
mpl.rcParams['savefig.format'] = 'pdf' # gives best resolution plots
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.titlesize'] = 20
mpl.rcParams['xtick.labelsize'] = 20
mpl.rcParams['ytick.labelsize'] = 20
mpl.rcParams['legend.fontsize'] = 16
# print mpl.rcParams.keys()
mpl.rcParams['text.latex.preamble'] = \
r'\usepackage{siunitx}\usepackage{amsmath}\usepackage{amssymb}'
def rosenbrock_function(x):
assert x.shape[0] == 2
x = 4*x-2
vals = ((1.-x[0, :])**2+100*(x[1, :]-x[0, :]**2)**2)[:, np.newaxis]
# vals = ((1.-x[0,:])**2+1*(x[1,:]-x[0,:]**2)**2)[:,np.newaxis]
return vals
def add_noise(values, noise_level):
noise = np.random.normal(0, noise_level)
return values + noise
class HaltonSampler(object):
def __init__(self, nvars, variables):
self.nvars = nvars
self.variables = variables
if self.variables is not None:
assert self.variables.num_vars() == self.nvars
self.marginal_icdfs = [
v.ppf for v in self.variables.all_variables()]
else:
self.marginal_icdfs = None
self.ntraining_samples = 0
self.training_samples = None
def __call__(self, nsamples):
self.training_samples = transformed_halton_sequence(
self.marginal_icdfs, self.nvars, nsamples)
new_samples = self.training_samples[:, self.ntraining_samples:]
self.ntraining_samples = self.training_samples.shape[1]
return new_samples, 0
def convergence_study(kernel, function, sampler,
num_vars, generate_samples, num_new_samples,
update_kernel_scale_num_samples,
noise_level=0, return_samples=False,
norm=np.linalg.norm, callback=None, gp_kernel=None):
# dirty hack to include two GP kernel types (for IVAR)
if hasattr(kernel, "__len__"):
# in this case, kernel is an array and we assume to have received
# two kernels
sampler_kernel = kernel[1]
kernel = kernel[0]
else:
sampler_kernel = kernel
# Instantiate a Gaussian Process model
if gp_kernel is None:
gp_kernel = kernel
gp = AdaptiveGaussianProcess(
gp_kernel, n_restarts_optimizer=10, alpha=1e-12)
gp.setup(function, sampler)
if hasattr(sampler, "set_gaussian_process"):
sampler.set_gaussian_process(gp)
print('sampler kernel', kernel, 'gp kernel', gp_kernel)
# Mesh the input space for evaluations of the real function,
# the prediction and its MSE
validation_samples = generate_samples()
validation_values = function(validation_samples).squeeze()
num_samples = np.cumsum(num_new_samples)
num_steps = num_new_samples.shape[0]
errors = np.empty(num_steps, dtype=float)
nsamples = np.empty(num_steps, dtype=int)
sample_step = 0
optimizer_step = 0
while sample_step < num_steps:
if hasattr(gp, 'kernel_'):
# if using const * rbf + noise kernel
# kernel.theta = gp.kernel_.k1.k2.theta
# if using const * rbf
# kernel.theta = gp.kernel_.k2.theta
# if using rbf
kernel.theta = gp.kernel_.theta
# Fit to data using Maximum Likelihood Estimation of the parameters
# if True:
if ((optimizer_step >= update_kernel_scale_num_samples.shape[0]) or
(sampler.ntraining_samples <
update_kernel_scale_num_samples[optimizer_step])):
gp.optimizer = None
else:
gp.optimizer = "fmin_l_bfgs_b"
optimizer_step += 1
flag = gp.refine(np.sum(num_new_samples[:sample_step+1]))
# allow points to be added to gp more often than gp is evaluated for
# validation
if sampler.ntraining_samples >= num_samples[sample_step]:
pred_values = gp(validation_samples, return_cov=False).squeeze()
# Compute error
assert pred_values.shape == validation_values.shape
error = norm(pred_values-validation_values)/norm(validation_values)
if callback is not None:
callback(gp)
print(gp.kernel_)
print('N', sampler.ntraining_samples, 'Error', error)
errors[sample_step] = error
nsamples[sample_step] = sampler.ntraining_samples
sample_step += 1
if flag > 0:
errors, nsamples = errors[:sample_step], nsamples[:sample_step]
print('Terminating study. Points are becoming ill conditioned')
break
if return_samples:
return errors, nsamples, sampler.training_samples
return errors, nsamples
def unnormalized_posterior(gp, prior_pdf, samples, temper_param=1):
prior_vals = prior_pdf(samples).squeeze()
gp_vals = gp.predict(samples.T).squeeze()
unnormalized_posterior_vals = prior_vals*np.exp(-gp_vals)**temper_param
return unnormalized_posterior_vals
class BayesianInferenceCholeskySampler(CholeskySampler):
def __init__(self, prior_pdf, num_vars,
num_candidate_samples, variables,
max_num_samples=None, generate_random_samples=None,
temper=True, true_nll=None):
self.prior_pdf = prior_pdf
if not temper:
self.temper_param = 1
else:
self.temper_param = 0
self.true_nll = true_nll
self.gp = None
super().__init__(num_vars, num_candidate_samples, variables,
None, generate_random_samples)
def set_gaussian_process(self, gp):
self.gp = gp
def increment_temper_param(self, num_training_samples):
samples = np.random.uniform(0, 1, (self.nvars, 1000))
density_vals_prev = self.weight_function(samples)
def objective(beta):
new_weight_function = partial(
unnormalized_posterior, self.gp, self.prior_pdf,
temper_param=beta)
density_vals = new_weight_function(samples)
II = np.where(density_vals_prev > 1e-15)[0]
JJ = np.where(density_vals_prev < 1e-15)[0]
assert len(np.where(density_vals[JJ] > 1e-15)[0]) == 0
ratio = np.zeros(samples.shape[1])
ratio[II] = density_vals[II]/density_vals_prev[II]
obj = ratio.std()/ratio.mean()
return obj
print('temper parameter', self.temper_param)
x0 = self.temper_param+1e-4
# result = root(lambda b: objective(b)-1, x0)
# x_opt = result.x
x_opt = bisect(lambda b: objective(b)-1, x0, 1)
self.temper_param = x_opt
def __call__(self, num_samples):
if self.gp is None:
raise ValueError("must call self.set_gaussian_process()")
if self.ntraining_samples > 0 and self.temper_param < 1:
self.increment_temper_param(self.training_samples)
assert self.temper_param <= 1
if self.ntraining_samples == 0:
weight_function = self.prior_pdf
else:
if self.true_nll is not None:
def weight_function(x): return self.prior_pdf(x)*np.exp(
-self.true_nll(x)[:, 0])**self.temper_param
else:
weight_function = partial(
unnormalized_posterior, self.gp, self.prior_pdf,
temper_param=self.temper_param)
self.set_weight_function(weight_function)
samples, flag = super().__call__(num_samples)
return samples, flag
def get_posterior_samples(num_vars, weight_function, nsamples):
x, w = get_tensor_product_quadrature_rule(
200, num_vars, np.polynomial.legendre.leggauss,
transform_samples=lambda x: (x+1)/2,
density_function=lambda x: 0.5*np.ones(x.shape[1]))
vals = weight_function(x)
C = 1/vals.dot(w)
def posterior_density(samples):
return weight_function(samples)*C
def proposal_density(samples):
return np.ones(samples.shape[1])
def generate_uniform_samples(nsamples):
return np.random.uniform(0, 1, (num_vars, nsamples))
def generate_proposal_samples(nsamples):
return np.random.uniform(0, 1, (num_vars, nsamples))
envelope_factor = C*vals.max()*1.1
rosenbrock_samples = rejection_sampling(
posterior_density, proposal_density,
generate_proposal_samples, envelope_factor,
num_vars, nsamples, verbose=True,
batch_size=None)
return rosenbrock_samples
def bayesian_inference_example():
init_scale = 0.1
num_vars = 2
num_candidate_samples = 10000
num_new_samples = np.asarray([20]+[5]*6+[25]*6+[50]*8)
nvalidation_samples = 10000
prior_pdf = partial(
tensor_product_pdf, univariate_pdfs=partial(stats.beta.pdf, a=1, b=1))
misfit_function = rosenbrock_function
def weight_function(samples):
prior_vals = prior_pdf(samples).squeeze()
misfit_vals = misfit_function(samples).squeeze()
vals = np.exp(-misfit_vals)*prior_vals
return vals
# Get validation samples from true posterior using rejection sampling
rosenbrock_samples = get_posterior_samples(
num_vars, weight_function, num_candidate_samples+nvalidation_samples)
def generate_random_samples(nsamples, idx=0):
assert idx+nsamples <= rosenbrock_samples.shape[1]
return rosenbrock_samples[:, idx:idx+nsamples]
generate_validation_samples = partial(
generate_random_samples, nvalidation_samples,
idx=num_candidate_samples)
# Must set variables if not using uniform prior on [0,1]^D
variables = None
def get_filename(method, fixed_scale):
filename = 'bayes-example-%s-d-%d-n-%d.npz' % (
method, num_vars, num_candidate_samples)
if not fixed_scale:
filename = filename[:-4]+'-opt.npz'
return filename
# defining kernel
length_scale = init_scale*np.ones(num_vars, dtype=float)
kernel = RBF(length_scale, (5e-2, 1))
# define quadrature rule to compute f divergence
div_type = 'hellinger'
quad_x, quad_w = get_tensor_product_quadrature_rule(
200, num_vars, np.polynomial.legendre.leggauss, transform_samples=None,
density_function=None)
quad_x = (quad_x+1)/2
quad_rule = quad_x, quad_w
fig, axs = plt.subplots(1, 3, figsize=(3*8, 6), sharey=False)
oracle_cholesky_sampler = CholeskySampler(
num_vars, num_candidate_samples, variables,
generate_random_samples=generate_random_samples)
oracle_cholesky_sampler.set_weight_function(weight_function)
oracle_cholesky_sampler.set_kernel(copy.deepcopy(kernel))
# to give prior an unfair but ultimately useless advantage
# use samples from poseterior as half of the candidates
prior_cholesky_sampler = CholeskySampler(
num_vars, num_candidate_samples, variables,
generate_random_samples=generate_random_samples)
prior_cholesky_sampler.set_weight_function(prior_pdf)
prior_cholesky_sampler.set_kernel(copy.deepcopy(kernel))
# this is the one Qian should use. The others are for comparision only
adaptive_cholesky_sampler = BayesianInferenceCholeskySampler(
prior_pdf, num_vars, num_candidate_samples, variables,
max_num_samples=num_new_samples.sum(),
generate_random_samples=None)
adaptive_cholesky_sampler.set_kernel(copy.deepcopy(kernel))
halton_sampler = HaltonSampler(num_vars, variables)
samplers = [oracle_cholesky_sampler, prior_cholesky_sampler,
adaptive_cholesky_sampler, halton_sampler][1:]
methods = ['Oracle-Weighted-Cholesky-b', 'Prior-Weighted-Cholesky-b',
'Learning-Weighted-Cholesky-b', 'Halton'][1:]
labels = [r'$\mathrm{Oracle\;Weighted\;Cholesky}$',
r'$\mathrm{Prior\;Weighted\;Cholesky}$',
r'$\mathrm{Adapted\;Weighted\;Cholesky}$',
r'$\mathrm{Halton}$'][1:]
fixed_scales = [True, False, False, False][1:]
for sampler, method, fixed_scale in zip(samplers, methods, fixed_scales):
filename = get_filename(method, fixed_scale)
print(filename)
if os.path.exists(filename):
continue
if fixed_scale:
update_kernel_scale_num_samples = np.empty(0)
else:
update_kernel_scale_num_samples = np.cumsum(num_new_samples)
divergences = []
cond_nums = []
temper_params = []
def callback(gp):
approx_density = partial(unnormalized_posterior, gp, prior_pdf)
exact_density = weight_function
error = compute_f_divergence(
approx_density, exact_density, quad_rule, div_type, True)
# print ('divergence',error)
divergences.append(error)
cond_nums.append(np.linalg.cond(gp.L_.dot(gp.L_.T)))
if hasattr(sampler, 'temper_param'):
temper_params.append(sampler.temper_param)
print(temper_params)
errors, nsamples, samples = convergence_study(
kernel, rosenbrock_function, sampler, num_vars,
generate_validation_samples, num_new_samples,
update_kernel_scale_num_samples, callback=callback,
return_samples=True)
np.savez(filename, nsamples=nsamples, errors=errors,
divergences=np.asarray(divergences),
cond_nums=np.asarray(cond_nums), samples=samples,
temper_params=np.asarray(temper_params))
styles = ['-', '--', '--', '--']
# styles = ['k-','r-.','b--','g:']
for method, label, ls, fixed_scale in zip(
methods, labels, styles, fixed_scales):
filename = get_filename(method, fixed_scale)
data = np.load(filename)
nsamples, errors = data['nsamples'], data['errors']
divergences, cond_nums = data['divergences'], data['cond_nums']
axs[0].loglog(nsamples, errors, ls=ls, label=label)
axs[1].loglog(nsamples, divergences, ls=ls, label=label)
axs[2].loglog(nsamples, cond_nums, ls=ls, label=label)
for ii in range(3):
axs[ii].set_xlabel(r'$m$')
axs[ii].set_xlim(10, 1000)
axs[0].set_ylabel(r'$\tilde{\epsilon}_{\omega,2}$', rotation=90)
ylim0 = axs[0].get_ylim()
ylim1 = axs[1].get_ylim()
ylim = [min(ylim0[0], ylim1[0]), max(ylim0[1], ylim1[1])]
axs[0].set_ylim(ylim)
axs[1].set_ylim(ylim)
axs[1].set_ylabel(r'$D_\mathrm{H}$', rotation=90)
axs[2].set_ylabel(r'$\kappa$', rotation=90)
figname = 'bayes_example_comparison_%d.pdf' % num_vars
axs[0].legend()
plt.savefig(figname)
method, fixed_scale = 'Learning-Weighted-Cholesky-b', False
filename = get_filename(method, fixed_scale)
print(filename)
adaptive_cholesky_samples = np.load(filename)['samples']
temper_params = np.load(filename)['temper_params']
nsamples = np.load(filename)['nsamples']
fig, axs = plt.subplots(1, 3, figsize=(3*8, 6))
cnt = 0
# plt.figure(figsize=(8,6))
# plt.semilogy(nsamples,temper_params)
axs[cnt].semilogy(np.arange(1, nsamples.shape[0]),
temper_params[1:], 'k-o')
axs[cnt].set_xlabel(r'$\mathrm{Iteration}$ $j$')
axs[cnt].set_ylabel(r'$\beta_j$')
cnt += 1
for ii in [6, -1]:
beta = temper_params[ii]
nn = nsamples[ii]
# should actually be using gp approximation of misfit for visualization
# here but true misfit is good enough for visualization
def weight_function(x): return prior_pdf(x).squeeze()*np.exp(
-misfit_function(x).squeeze())**beta
# plt.figure(figsize=(8,6))
plt_ranges = [0, 1, 0, 1]
X, Y, Z = get_meshgrid_function_data(weight_function, plt_ranges, 30)
pp = axs[cnt].contourf(X, Y, Z,
# levels=np.linspace(Z.min(),Z.max(),20),
levels=np.linspace(0, 1, 20),
cmap=mpl.cm.coolwarm)
axs[cnt].plot(
adaptive_cholesky_samples[0, :nn],
adaptive_cholesky_samples[1, :nn], 'ko')
axs[cnt].set_xlabel(r'$y_1$')
axs[cnt].set_ylabel(r'$y_2$')
cnt += 1
plt.colorbar(pp, ax=axs[cnt-1])
figname = 'bayes-example-temper-params.pdf'
plt.savefig(figname)
if __name__ == '__main__':
try:
import sklearn
except:
msg = 'Install sklearn using pip install sklearn'
raise Exception(msg)
bayesian_inference_example()
|
the-stack_0_13283 | # Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.benchmark import datagen, algorithms
from cuml.benchmark.bench_helper_funcs import _training_data_to_numpy
from cuml.benchmark.runners import AccuracyComparisonRunner, \
SpeedupComparisonRunner, run_variations
from cuml.common.import_utils import has_umap
from cuml.common.import_utils import has_xgboost
import numpy as np
import cudf
import pytest
from numba import cuda
from sklearn import metrics
import pandas as pd
import time
@pytest.mark.parametrize('dataset', ['blobs', 'regression', 'classification'])
def test_data_generators(dataset):
data = datagen.gen_data(dataset, "numpy", n_samples=100, n_features=10)
assert isinstance(data[0], np.ndarray)
assert data[0].shape[0] == 100
@pytest.mark.parametrize('input_type',
['numpy', 'cudf', 'pandas', 'gpuarray', 'gpuarray-c'])
def test_data_generator_types(input_type):
X, *_ = datagen.gen_data('blobs', input_type, n_samples=100, n_features=10)
if input_type == 'numpy':
assert isinstance(X, np.ndarray)
elif input_type == 'cudf':
assert isinstance(X, cudf.DataFrame)
elif input_type == 'pandas':
assert isinstance(X, pd.DataFrame)
elif input_type == 'gpuarray':
assert cuda.is_cuda_array(X)
elif input_type == 'gpuarray-c':
assert cuda.is_cuda_array(X)
else:
assert False
def test_data_generator_split():
X_train, y_train, X_test, y_test = datagen.gen_data(
'blobs', 'numpy', n_samples=100, n_features=10, test_fraction=0.20
)
assert X_train.shape == (100, 10)
assert X_test.shape == (25, 10)
def test_run_variations():
algo = algorithms.algorithm_by_name("LogisticRegression")
res = run_variations(
[algo],
dataset_name="classification",
bench_rows=[100, 200],
bench_dims=[10, 20],
)
assert res.shape[0] == 4
assert (res.n_samples == 100).sum() == 2
assert (res.n_features == 20).sum() == 2
def test_speedup_runner():
class MockAlgo:
def __init__(self, t):
self.t = t
def fit(self, X, y):
time.sleep(self.t)
return
def predict(self, X):
nr = X.shape[0]
res = np.zeros(nr)
res[0:int(nr / 5.0)] = 1.0
return res
class FastMockAlgo(MockAlgo):
def __init__(self):
MockAlgo.__init__(self, 0.1)
class SlowMockAlgo(MockAlgo):
def __init__(self):
MockAlgo.__init__(self, 2)
pair = algorithms.AlgorithmPair(
SlowMockAlgo,
FastMockAlgo,
shared_args={},
name="Mock",
accuracy_function=metrics.accuracy_score,
)
runner = SpeedupComparisonRunner(
[20], [5], dataset_name='zeros'
)
results = runner.run(pair)[0]
expected_speedup = SlowMockAlgo().t / FastMockAlgo().t
assert results["speedup"] == pytest.approx(expected_speedup, 0.4)
def test_multi_reps():
class CountingAlgo:
tot_reps = 0
def fit(self, X, y):
CountingAlgo.tot_reps += 1
pair = algorithms.AlgorithmPair(
CountingAlgo,
CountingAlgo,
shared_args={},
name="Counting",
)
runner = AccuracyComparisonRunner(
[20], [5], dataset_name='zeros', test_fraction=0.20, n_reps=4
)
runner.run(pair)
# Double the n_reps since it is used in cpu and cuml versions
assert CountingAlgo.tot_reps == 8
def test_accuracy_runner():
# Set up data that should deliver accuracy of 0.20 if all goes right
class MockAlgo:
def fit(self, X, y):
return
def predict(self, X):
nr = X.shape[0]
res = np.zeros(nr)
res[0:int(nr / 5.0)] = 1.0
return res
pair = algorithms.AlgorithmPair(
MockAlgo,
MockAlgo,
shared_args={},
name="Mock",
accuracy_function=metrics.accuracy_score,
)
runner = AccuracyComparisonRunner(
[20], [5], dataset_name='zeros', test_fraction=0.20
)
results = runner.run(pair)[0]
assert results["cuml_acc"] == pytest.approx(0.80)
# Only test a few algorithms (which collectively span several types)
# to reduce runtime burden
@pytest.mark.parametrize('algo_name', ['UMAP-Supervised',
'DBSCAN',
'LogisticRegression',
'ElasticNet',
'FIL'])
def test_real_algos_runner(algo_name):
pair = algorithms.algorithm_by_name(algo_name)
if (algo_name == 'UMAP-Supervised' and not has_umap()) or \
(algo_name == 'FIL' and not has_xgboost()):
pytest.xfail()
runner = AccuracyComparisonRunner(
[20], [5], dataset_name='classification', test_fraction=0.20
)
results = runner.run(pair)[0]
print(results)
assert results["cuml_acc"] is not None
@pytest.mark.parametrize('input_type', ['numpy', 'cudf', 'pandas', 'gpuarray'])
def test_training_data_to_numpy(input_type):
X, y, *_ = datagen.gen_data(
'blobs', input_type, n_samples=100, n_features=10
)
X_np, y_np = _training_data_to_numpy(X, y)
assert isinstance(X_np, np.ndarray)
assert isinstance(y_np, np.ndarray)
|
the-stack_0_13284 | paramwise_cfg = dict(
norm_decay_mult=0.0,
bias_decay_mult=0.0,
custom_keys={
'.absolute_pos_embed': dict(decay_mult=0.0),
'.relative_position_bias_table': dict(decay_mult=0.0)
})
# for batch in each gpu is 128, 8 gpu
# lr = 5e-4 * 128 * 8 / 512 = 0.001
optimizer = dict(
type='AdamW',
lr=5e-4 * 128 * 8 / 512,
weight_decay=0.05,
eps=1e-8,
betas=(0.9, 0.999),
paramwise_cfg=paramwise_cfg)
optimizer_config = dict(grad_clip=dict(max_norm=5.0))
# learning policy
lr_config = dict(
policy='CosineAnnealing',
by_epoch=False,
min_lr_ratio=1e-2,
warmup='linear',
warmup_ratio=1e-3,
warmup_iters=20 * 1252,
warmup_by_epoch=False)
runner = dict(type='EpochBasedRunner', max_epochs=300)
|
the-stack_0_13286 | from .store import FixityDocument as Doc
from .store import JSONSchemaCollection
from pprint import pprint
def get_schemas():
"""Get JSON schemas for FixityDocument
Returns:
JSONSchemaCollection: Object and document JSON schema that define the store
"""
schemas = JSONSchemaCollection(dict())
d1 = Doc()
d2 = Doc()
fname1 = d1.get_filename(document=True)
fname2 = d2.get_filename()
document_schema = d1.to_jsonschema(document=True)
object_schema = d2.to_jsonschema(document=False)
schemas[fname1] = document_schema
schemas[fname2] = object_schema
return schemas
|
the-stack_0_13287 | """
Visualize the notes network of a Zettelkasten.
Each arrow represents a link from one zettel to another. The script assumes
that zettels have filenames of the form "YYYYMMDDHHMM This is a title" and that
links have the form [[YYYYMMDDHHMM]]
"""
import glob
import os.path
import re
from textwrap import fill
PAT_ZK_ID = re.compile(r"^(?P<id>\d+)\s(.*)")
PAT_LINK = re.compile(r"\[\[(\d+)\]\]")
def parse_zettels(filepaths):
""" Parse the ID and title from the filename and first line of the file.
Assumes that the filename has the format "This is title" and the first line
of the file is the ID
"""
documents = {}
for filepath in filepaths:
basename = os.path.basename(filepath)
filename, ext = os.path.splitext(basename)
# collect zkn_id
with open(filepath, encoding="utf-8") as f:
# read file
file_read = f.read()
# search for the first string of 14 digits with arbitrary
# non-digits on either side.
zkn_id = re.search('\d{14}', file_read)
zkn_id = zkn_id.group(0)
# collect links
links = PAT_LINK.findall(file_read)
# now collect text
with open(filepath, encoding='utf-8') as f:
f.readline()
doctext = f.readlines()
# document = dict(id=r.group(1), title=r.group(2), links=links)
# document = dict(id = zkn_id, title = filename, links = links, text = doctext)
# documents.append(document)
documents[zkn_id] = dict(title = filename, links = links, text = doctext)
return documents
def create_graph(zettels, include_self_references=True, only_listed=True):
"""
Create of graph of the zettels linking to each other.
Parameters
----------
zettels : list of dictionaries
include_self_references : bool, optional
Include links to the source document. Defaults to True.
only_listed : bool, optional
Only include nodes in the graph it's actually one of the zettels.
Default is False.
Returns
-------
graph : cytoscape-compatible set of elements
"""
# Collect IDs from source zettels and from zettels linked
zettel_ids = set()
link_ids = set()
for zettel in zettels:
zettel_ids.add(zettel["id"])
link_ids.update(zettel["links"])
if only_listed:
ids_to_include = zettel_ids
else:
ids_to_include = zettel_ids | link_ids
# for zettel in zettels:
# graph.add_node(zettel["id"], title=zettel["title"])
# for link in zettel["links"]:
# if link not in ids_to_include:
# continue
# if include_self_references or (zettel["id"] != link):
# # Truth table for the inclusion conditional
# # IS_SAME IS_DIFF (Is different ID)
# # INCLUDE T T
# # DON'T INCLUDE F T
# graph.add_edge(zettel["id"], link)
elements = []
for zettel in zettels:
# add node
elements.append({
'data': {'id': zettel['id'], 'label': zettel['title']}
})
# add link_ids
for link in zettel["links"]:
if link not in ids_to_include:
continue
if include_self_references or (zettel["id"] != link):
# Truth table for the inclusion conditional
# IS_SAME IS_DIFF (Is different ID)
# INCLUDE T T
# DON'T INCLUDE F T
elements.append({
'data': {'source': zettel['id'], 'target': link}
})
return elements
def list_zettels(notes_dir, pattern="*.md"):
"""
List zettels in a directory.
Parameters
----------
notes_dir : str
Path to the directory containing the zettels.
pattern : str (optional)
Pattern matching zettels. The default is '*.md'. If there are multiple
patterns, separate them with a |, such as in '*.md|*.txt'
"""
filepaths = []
for patt in pattern.split("|"):
filepaths.extend(glob.glob(os.path.join(notes_dir, patt)))
return sorted(filepaths)
def parse_args(args=None):
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument(
"--notes-dir", default=".", help="path to folder containin notes. [.]"
)
parser.add_argument(
"--output",
default="zettel-network",
help="name of output file. [zettel-network]",
)
parser.add_argument(
"--pattern",
action="append",
help=(
"pattern to match notes. You can repeat this argument to"
" match multiple file types. [*.md]"
),
)
parser.add_argument(
"--use-graphviz",
action="store_true",
default=False,
help="Use Graphviz instead of plotly to render the network.",
)
parser.add_argument(
"--no-self-ref",
action="store_false",
default=True,
dest="include_self_references",
help="Do not include self-references in a zettel.",
)
parser.add_argument(
"--only-listed",
action="store_true",
default=False,
help="Only include links to documents that are in the file list",
)
parser.add_argument("zettel_paths", nargs="*", help="zettel file paths.")
args = parser.parse_args(args=args)
# Use the list of files the user specify, otherwise, fall back to
# listing a directory.
if not args.zettel_paths:
if args.pattern is None:
args.pattern = ["*.md"]
patterns = "|".join(args.pattern)
args.zettel_paths = list_zettels(args.notes_dir, pattern=patterns)
return args
def main(args=None):
args = parse_args(args)
zettels = parse_zettels(args.zettel_paths)
# Fail in case we didn't find a zettel
if not zettels:
raise FileNotFoundError("I'm sorry, I couldn't find any files.")
graph = create_graph(
zettels,
graph,
include_self_references=args.include_self_references,
only_listed=args.only_listed,
)
if __name__ == "__main__":
import sys
try:
sys.exit(main())
except FileNotFoundError as e:
# Failed either because it didn't find any files or because Graphviz
# wasn't installed
sys.exit(e)
|
the-stack_0_13289 | #!/usr/bin/python3
"""Test textlib module."""
#
# (C) Pywikibot team, 2011-2022
#
# Distributed under the terms of the MIT license.
#
import codecs
import functools
import os
import re
import unittest
from collections import OrderedDict
from contextlib import suppress
from unittest import mock
import pywikibot
from pywikibot import textlib
from pywikibot.backports import nullcontext
from pywikibot.exceptions import UnknownSiteError
from pywikibot.site._interwikimap import _IWEntry
from pywikibot.textlib import MultiTemplateMatchBuilder, extract_sections
from pywikibot.tools import has_module
from tests.aspects import (
DefaultDrySiteTestCase,
SiteAttributeTestCase,
TestCase,
require_modules,
)
files = {}
dirname = os.path.join(os.path.dirname(__file__), 'pages')
for f in ['enwiki_help_editing']:
with codecs.open(os.path.join(dirname, f + '.page'),
'r', 'utf-8') as content:
files[f] = content.read()
class TestSectionFunctions(TestCase):
"""Test wikitext section handling function."""
net = False
def setUp(self):
"""Setup tests."""
self.catresult1 = '[[Category:Cat1]]\n[[Category:Cat2]]\n'
super().setUp()
@staticmethod
def contains(fn, sn):
"""Invoke does_text_contain_section()."""
return textlib.does_text_contain_section(
files[fn], sn)
def assertContains(self, fn, sn, *args, **kwargs):
"""Test that files[fn] contains sn."""
self.assertEqual(self.contains(fn, sn), True, *args, **kwargs)
def assertNotContains(self, fn, sn, *args, **kwargs):
"""Test that files[fn] does not contain sn."""
self.assertEqual(self.contains(fn, sn), False, *args, **kwargs)
def testCurrentBehaviour(self):
"""Test that 'Editing' is found."""
self.assertContains('enwiki_help_editing', 'Editing')
def testSpacesInSection(self):
"""Test with spaces in section."""
self.assertContains('enwiki_help_editing', 'Minor_edits')
self.assertNotContains('enwiki_help_editing', '#Minor edits',
"Incorrect, '#Minor edits' does not work")
self.assertNotContains('enwiki_help_editing', 'Minor Edits',
'section hashes are case-sensitive')
self.assertNotContains('enwiki_help_editing', 'Minor_Edits',
'section hashes are case-sensitive')
@unittest.expectedFailure # TODO: T133276
def test_encoded_chars_in_section(self):
"""Test encoded chars in section."""
self.assertContains(
'enwiki_help_editing', 'Talk_.28discussion.29_pages',
'As used in the TOC')
def test_underline_characters_in_section(self):
"""Test with underline chars in section."""
self.assertContains('enwiki_help_editing', 'Talk_(discussion)_pages',
'Understood by mediawiki')
def test_spaces_outside_section(self):
"""Test with spaces around section."""
self.assertContains('enwiki_help_editing', 'Naming and_moving')
self.assertContains('enwiki_help_editing', ' Naming and_moving ')
self.assertContains('enwiki_help_editing', ' Naming and_moving_')
def test_link_in_section(self):
"""Test with link inside section."""
# section is ==[[Wiki markup]]==
self.assertContains('enwiki_help_editing', '[[Wiki markup]]',
'Link as section header')
self.assertContains('enwiki_help_editing', '[[:Wiki markup]]',
'section header link with preleading colon')
self.assertNotContains('enwiki_help_editing', 'Wiki markup',
'section header must be a link')
# section is ===[[:Help]]ful tips===
self.assertContains('enwiki_help_editing', '[[Help]]ful tips',
'Containing link')
self.assertContains('enwiki_help_editing', '[[:Help]]ful tips',
'Containing link with preleading colon')
self.assertNotContains('enwiki_help_editing', 'Helpful tips',
'section header must contain a link')
class TestFormatInterwiki(TestCase):
"""Test format functions."""
family = 'wikipedia'
code = 'en'
cached = True
def test_interwiki_format_Page(self):
"""Test formatting interwiki links using Page instances."""
interwikis = {
'de': pywikibot.Page(pywikibot.Link('de:German', self.site)),
'fr': pywikibot.Page(pywikibot.Link('fr:French', self.site))
}
self.assertEqual('[[de:German]]\n[[fr:French]]\n',
textlib.interwikiFormat(interwikis, self.site))
def test_interwiki_format_Link(self):
"""Test formatting interwiki links using Page instances."""
interwikis = {
'de': pywikibot.Link('de:German', self.site),
'fr': pywikibot.Link('fr:French', self.site),
}
self.assertEqual('[[de:German]]\n[[fr:French]]\n',
textlib.interwikiFormat(interwikis, self.site))
class TestFormatCategory(DefaultDrySiteTestCase):
"""Test category formatting."""
catresult = '[[Category:Cat1]]\n[[Category:Cat2]]\n'
def test_category_format_raw(self):
"""Test formatting categories as strings formatted as links."""
self.assertEqual(self.catresult,
textlib.categoryFormat(['[[Category:Cat1]]',
'[[Category:Cat2]]'],
self.site))
def test_category_format_bare(self):
"""Test formatting categories as strings."""
self.assertEqual(self.catresult,
textlib.categoryFormat(['Cat1', 'Cat2'], self.site))
def test_category_format_Category(self):
"""Test formatting categories as Category instances."""
data = [pywikibot.Category(self.site, 'Cat1'),
pywikibot.Category(self.site, 'Cat2')]
self.assertEqual(self.catresult,
textlib.categoryFormat(data, self.site))
def test_category_format_Page(self):
"""Test formatting categories as Page instances."""
data = [pywikibot.Page(self.site, 'Category:Cat1'),
pywikibot.Page(self.site, 'Category:Cat2')]
self.assertEqual(self.catresult,
textlib.categoryFormat(data, self.site))
class TestAddText(DefaultDrySiteTestCase):
"""Test add_text function."""
def test_add_text(self):
"""Test adding text."""
self.assertEqual(
textlib.add_text('foo\n[[Category:Foo]]', 'bar', site=self.site),
'foo\nbar\n\n[[Category:Foo]]'
)
class TestCategoryRearrangement(DefaultDrySiteTestCase):
"""
Ensure that sorting keys are not being lost.
Tests .getCategoryLinks() and .replaceCategoryLinks(),
with both a newline and an empty string as separators.
"""
old = '[[Category:Cat1]]\n[[Category:Cat2|]]\n' \
'[[Category:Cat1| ]]\n[[Category:Cat2|key]]'
def test_standard_links(self):
"""Test getting and replacing categories."""
cats = textlib.getCategoryLinks(self.old, site=self.site)
new = textlib.replaceCategoryLinks(self.old, cats, site=self.site)
self.assertEqual(self.old, new)
def test_indentation(self):
"""Test indentation from previous block."""
# Block of text
old = 'Some text\n\n' + self.old
cats = textlib.getCategoryLinks(old, site=self.site)
new = textlib.replaceCategoryLinks(old, cats, site=self.site)
self.assertEqual(old, new)
# DEFAULTSORT
old_ds = '{{DEFAULTSORT:key}}\n' + self.old
cats_ds = textlib.getCategoryLinks(old_ds, site=self.site)
new_ds = textlib.replaceCategoryLinks(old_ds, cats_ds, site=self.site)
self.assertEqual(old_ds, new_ds)
def test_in_place_replace(self):
"""Test in-place category change is reversible."""
dummy = pywikibot.Category(self.site, 'foo')
dummy.sortKey = 'bah'
cats = textlib.getCategoryLinks(self.old, site=self.site)
for count, cat in enumerate(textlib.getCategoryLinks(self.old,
site=self.site)):
with self.subTest(category=cat):
# Sanity checking
temp = textlib.replaceCategoryInPlace(self.old, cat, dummy,
site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cat,
site=self.site)
self.assertEqual(self.old, new)
self.assertEqual(count, 3)
# Testing removing categories
temp = textlib.replaceCategoryInPlace(self.old, cats[0],
None, site=self.site)
self.assertNotEqual(temp, self.old)
temp_cats = textlib.getCategoryLinks(temp, site=self.site)
self.assertNotIn(cats[0], temp_cats)
# First and third categories are the same
self.assertEqual([cats[1], cats[3]], temp_cats)
# Testing adding categories
temp = textlib.replaceCategoryInPlace(
self.old, cats[0], cats[1], site=self.site,
add_only=True)
self.assertNotEqual(temp, self.old)
temp_cats = textlib.getCategoryLinks(temp, site=self.site)
self.assertEqual([cats[0], cats[1], cats[1],
cats[2], cats[1], cats[3]], temp_cats)
new_cats = textlib.getCategoryLinks(new, site=self.site)
self.assertEqual(cats, new_cats)
def test_in_place_retain_sort(self):
"""Test in-place category change does not alter the sortkey."""
# sort key should be retained when the new cat sortKey is None
dummy = pywikibot.Category(self.site, 'foo')
self.assertIsNone(dummy.sortKey)
cats = textlib.getCategoryLinks(self.old, site=self.site)
self.assertEqual(cats[3].sortKey, 'key')
orig_sortkey = cats[3].sortKey
temp = textlib.replaceCategoryInPlace(self.old, cats[3],
dummy, site=self.site)
self.assertNotEqual(self.old, temp)
new_dummy = textlib.getCategoryLinks(temp, site=self.site)[3]
self.assertIsNotNone(new_dummy.sortKey)
self.assertEqual(orig_sortkey, new_dummy.sortKey)
class TestTemplatesInCategory(TestCase):
"""Tests to verify that templates in category links are handled."""
family = 'wikipedia'
code = 'en'
cached = True
def test_templates(self):
"""Test normal templates inside category links."""
self.site = self.get_site()
self.assertEqual(textlib.getCategoryLinks(
'[[Category:{{P1|Foo}}]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:Foo{{!}}bar]][[Category:Wiki{{P2||pedia}}]]',
self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar'),
pywikibot.page.Category(self.site, 'Wikipedia')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:Foo{{!}}and{{!}}bar]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='and|bar')])
for pattern in ('[[Category:{{P1|Foo}}|bar]]',
'[[Category:{{P1|{{P2|L33t|Foo}}}}|bar]]',
'[[Category:Foo{{!}}bar]]'):
with self.subTest(pattern=pattern):
self.assertEqual(textlib.getCategoryLinks(
pattern, self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo',
sort_key='bar')])
with mock.patch.object(pywikibot, 'warning', autospec=True) as warn:
textlib.getCategoryLinks('[[Category:nasty{{{!}}]]', self.site)
warn.assert_called_once_with(
'Invalid category title extracted: nasty{{{!}}')
class TestTemplateParams(TestCase):
"""Test to verify that template params extraction works."""
net = False
def _common_results(self, func):
"""Common cases."""
self.assertEqual(func('{{a}}'), [('a', OrderedDict())])
self.assertEqual(func('{{ a}}'), [('a', OrderedDict())])
self.assertEqual(func('{{a }}'), [('a', OrderedDict())])
self.assertEqual(func('{{ a }}'), [('a', OrderedDict())])
self.assertEqual(func('{{a|b=c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b|c=d}}'),
[('a', OrderedDict((('1', 'b'), ('c', 'd'))))])
self.assertEqual(func('{{a|b=c|f=g|d=e|1=}}'),
[('a', OrderedDict((('b', 'c'), ('f', 'g'),
('d', 'e'), ('1', ''))))])
self.assertEqual(func('{{a|1=2|c=d}}'),
[('a', OrderedDict((('1', '2'), ('c', 'd'))))])
self.assertEqual(func('{{a|c=d|1=2}}'),
[('a', OrderedDict((('c', 'd'), ('1', '2'))))])
self.assertEqual(func('{{a|5=d|a=b}}'),
[('a', OrderedDict((('5', 'd'), ('a', 'b'))))])
self.assertEqual(func('{{a|=2}}'),
[('a', OrderedDict((('', '2'), )))])
self.assertEqual(func('{{a|}}'),
[('a', OrderedDict((('1', ''), )))])
self.assertEqual(func('{{a|=|}}'),
[('a', OrderedDict((('', ''), ('1', ''))))])
self.assertEqual(func('{{a||}}'),
[('a', OrderedDict((('1', ''), ('2', ''))))])
self.assertEqual(func('{{a|b={{{1}}}}}'),
[('a', OrderedDict((('b', '{{{1}}}'), )))])
self.assertEqual(func('{{a|b=<noinclude>{{{1}}}</noinclude>}}'),
[('a',
OrderedDict((('b',
'<noinclude>{{{1}}}</noinclude>'),
)))])
self.assertEqual(func('{{Template:a|b=c}}'),
[('Template:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{template:a|b=c}}'),
[('template:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{:a|b=c}}'),
[(':a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b={{{1}}}|c={{{2}}}}}'),
[('a', OrderedDict((('b', '{{{1}}}'),
('c', '{{{2}}}'))))])
self.assertEqual(func('{{a|b=c}}{{d|e=f}}'),
[('a', OrderedDict((('b', 'c'), ))),
('d', OrderedDict((('e', 'f'), )))])
# initial '{' and '}' should be ignored as outer wikitext
self.assertEqual(func('{{{a|b}}X}'),
[('a', OrderedDict((('1', 'b'), )))])
# sf.net bug 1575: unclosed template
self.assertEqual(func('{{a'), [])
self.assertEqual(func('{{a}}{{foo|'), [('a', OrderedDict())])
def _unstripped(self, func):
"""Common cases of unstripped results."""
self.assertEqual(func('{{a|b=<!--{{{1}}}-->}}'),
[('a', OrderedDict((('b', '<!--{{{1}}}-->'), )))])
self.assertEqual(func('{{a| }}'),
[('a', OrderedDict((('1', ' '), )))])
self.assertEqual(func('{{a| | }}'),
[('a', OrderedDict((('1', ' '), ('2', ' '))))])
self.assertEqual(func('{{a| =|}}'),
[('a', OrderedDict(((' ', ''), ('1', ''))))])
self.assertEqual(func('{{a| b=c}}'),
[('a', OrderedDict(((' b', 'c'), )))])
self.assertEqual(func('{{a|b =c}}'),
[('a', OrderedDict((('b ', 'c'), )))])
self.assertEqual(func('{{a|b= c}}'),
[('a', OrderedDict((('b', ' c'), )))])
self.assertEqual(func('{{a|b=c }}'),
[('a', OrderedDict((('b', 'c '), )))])
self.assertEqual(func('{{a| foo |2= bar }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' bar '))))])
# The correct entry 'bar' is removed
self.assertEqual(func('{{a| foo |2= bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
# However whitespace prevents the correct item from being removed
self.assertEqual(func('{{a| foo | 2 = bar | baz }}'),
[('a', OrderedDict((('1', ' foo '), (' 2 ', ' bar '),
('2', ' baz '))))])
def _stripped(self, func):
"""Common cases of stripped results."""
self.assertEqual(func('{{a| }}'),
[('a', OrderedDict((('1', ' '), )))])
self.assertEqual(func('{{a| | }}'),
[('a', OrderedDict((('1', ' '), ('2', ' '))))])
self.assertEqual(func('{{a| =|}}'),
[('a', OrderedDict((('', ''), ('1', ''))))])
self.assertEqual(func('{{a| b=c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b =c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b= c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b=c }}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a| foo |2= bar }}'),
[('a', OrderedDict((('1', ' foo '), ('2', 'bar'))))])
# 'bar' is always removed
self.assertEqual(func('{{a| foo |2= bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
self.assertEqual(func('{{a| foo | 2 = bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
def _etp_regex_differs(self, func):
"""Common cases not handled the same by ETP_REGEX."""
# inner {} should be treated as part of the value
self.assertEqual(func('{{a|b={} }}'),
[('a', OrderedDict((('b', '{} '), )))])
def _order_differs(self, func):
"""Common cases where the order of templates differs."""
self.assertCountEqual(func('{{a|b={{c}}}}'),
[('a', OrderedDict((('b', '{{c}}'), ))),
('c', OrderedDict())])
self.assertCountEqual(func('{{a|{{c|d}}}}'),
[('c', OrderedDict((('1', 'd'), ))),
('a', OrderedDict([('1', '{{c|d}}')]))])
# inner '}' after {{b|c}} should be treated as wikitext
self.assertCountEqual(func('{{a|{{b|c}}}|d}}'),
[('a', OrderedDict([('1', '{{b|c}}}'),
('2', 'd')])),
('b', OrderedDict([('1', 'c')]))])
def _mwpfh_passes(self, func):
"""Common cases failing with wikitextparser but passes with mwpfh.
Probably the behaviour of regex or mwpfh is wrong.
"""
failing = has_module('wikitextparser')
patterns = [
'{{subst:a|b=c}}',
'{{safesubst:a|b=c}}',
'{{msgnw:a|b=c}}',
'{{subst::a|b=c}}'
]
context = self.assertRaises(AssertionError) \
if failing else nullcontext()
for template in patterns:
with self.subTest(template=template, failing=failing):
name = template.strip('{}').split('|')[0]
with context:
self.assertEqual(func(template),
[(name, OrderedDict((('b', 'c'), )))])
@require_modules('mwparserfromhell')
def test_extract_templates_params_mwpfh(self):
"""Test using mwparserfromhell."""
func = textlib.extract_templates_and_params
self._common_results(func)
self._order_differs(func)
self._unstripped(func)
self._etp_regex_differs(func)
self._mwpfh_passes(func)
self.assertCountEqual(func('{{a|{{c|{{d}}}}}}'),
[('c', OrderedDict((('1', '{{d}}'), ))),
('a', OrderedDict([('1', '{{c|{{d}}}}')])),
('d', OrderedDict())
])
self.assertCountEqual(func('{{a|{{c|{{d|}}}}}}'),
[('c', OrderedDict((('1', '{{d|}}'), ))),
('a', OrderedDict([('1', '{{c|{{d|}}}}')])),
('d', OrderedDict([('1', '')]))
])
@require_modules('mwparserfromhell')
def test_extract_templates_params_parser_stripped(self):
"""Test using mwparserfromhell with stripping."""
func = functools.partial(textlib.extract_templates_and_params,
strip=True)
self._common_results(func)
self._order_differs(func)
self._stripped(func)
@require_modules('wikitextparser')
def test_extract_templates_params_parser(self):
"""Test using wikitextparser."""
func = textlib.extract_templates_and_params
self._common_results(func)
self._order_differs(func)
self._unstripped(func)
self._etp_regex_differs(func)
self._mwpfh_passes(func)
self.assertCountEqual(func('{{a|{{c|{{d}}}}}}'),
[('c', OrderedDict((('1', '{{d}}'), ))),
('a', OrderedDict([('1', '{{c|{{d}}}}')])),
('d', OrderedDict())
])
self.assertCountEqual(func('{{a|{{c|{{d|}}}}}}'),
[('c', OrderedDict((('1', '{{d|}}'), ))),
('a', OrderedDict([('1', '{{c|{{d|}}}}')])),
('d', OrderedDict([('1', '')]))
])
@require_modules('mwparserfromhell')
def test_extract_templates_params(self):
"""Test that the normal entry point works."""
func = functools.partial(textlib.extract_templates_and_params,
remove_disabled_parts=False, strip=False)
self._common_results(func)
self._unstripped(func)
func = functools.partial(textlib.extract_templates_and_params,
remove_disabled_parts=False, strip=True)
self._common_results(func)
self._stripped(func)
def test_template_simple_regex(self):
"""Test using simple regex."""
func = textlib.extract_templates_and_params_regex_simple
self._common_results(func)
self._etp_regex_differs(func)
# The simple regex copies the whitespace of mwpfh, but does
# not have additional entries for nested templates.
self.assertEqual(func('{{a| b={{c}}}}'),
[('a', OrderedDict(((' b', '{{c}}'), )))])
self.assertEqual(func('{{a|b={{c}}}}'),
[('a', OrderedDict((('b', '{{c}}'), )))])
self.assertEqual(func('{{a|b= {{c}}}}'),
[('a', OrderedDict((('b', ' {{c}}'), )))])
self.assertEqual(func('{{a|b={{c}} }}'),
[('a', OrderedDict((('b', '{{c}} '), )))])
# These three are from _order_differs, and while the first works
self.assertEqual(func('{{a|{{c}} }}'),
[('a', OrderedDict((('1', '{{c}} '), )))])
# an inner '|' causes extract_template_and_params_regex_simple to
# split arguments incorrectly in the next two cases.
self.assertEqual(func('{{a|{{c|d}} }}'),
[('a', OrderedDict([('1', '{{c'),
('2', 'd}} ')]))])
self.assertEqual(func('{{a|{{b|c}}}|d}}'),
[('a', OrderedDict([('1', '{{b'),
('2', 'c}}}'),
('3', 'd')]))])
# Safe fallback to handle arbitrary template levels
# by merging top level templates together.
# i.e. 'b' is not recognised as a template, and 'foo' is also
# consumed as part of 'a'.
self.assertEqual(func('{{a|{{c|{{d|{{e|}}}} }} }} foo {{b}}'),
[(None, OrderedDict())])
def test_nested_template_regex_search(self):
"""Test NESTED_TEMPLATE_REGEX search."""
func = textlib.NESTED_TEMPLATE_REGEX.search
# Numerically named templates are rejected
self.assertIsNone(func('{{1}}'))
self.assertIsNone(func('{{#if:foo}}'))
self.assertIsNone(func('{{{1}}}'))
self.assertIsNone(func('{{{1|}}}'))
self.assertIsNone(func('{{{15|a}}}'))
self.assertIsNone(func('{{{1|{{{2|a}}} }}}'))
self.assertIsNone(func('{{{1|{{2|a}} }}}'))
def test_nested_template_regex_match(self):
"""Test NESTED_TEMPLATE_REGEX match."""
func = textlib.NESTED_TEMPLATE_REGEX.match
self.assertIsNotNone(func('{{CURRENTYEAR}}'))
self.assertIsNotNone(func('{{foo:bar}}'))
self.assertIsNone(func('{{1}}'))
self.assertIsNotNone(func('{{a|b={{CURRENTYEAR}} }}'))
self.assertIsNotNone(func('{{a|b={{{1}}} }}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|b={} }}'))
self.assertIsNotNone(func('{{:a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|{{c}} }}'))
self.assertIsNotNone(func('{{a|{{c|d}} }}'))
# All templates are captured when template depth is greater than 2
patterns = '{{a|{{c|{{d|}} }} | foo = bar }} foo {{bar}} baz', \
'{{a|\n{{c|{{d|}} }}\n| foo = bar }} foo {{bar}} baz'
for pattern in patterns:
m = func(pattern)
self.assertIsNotNone(m)
self.assertIsNotNone(m.group(0))
self.assertIsNone(m.group('name'))
self.assertIsNone(m.group(1))
self.assertIsNone(m.group('params'))
self.assertIsNone(m.group(2))
self.assertIsNotNone(m.group('unhandled_depth'))
self.assertTrue(m.group(0).endswith('foo {{bar}}'))
class TestDisabledParts(DefaultDrySiteTestCase):
"""Test the removeDisabledParts function in textlib."""
def test_remove_disabled_parts(self):
"""Test removeDisabledParts function."""
tests = {
'comment': '<!-- No comment yet -->',
'link': '[[Target link]]',
'source': '<source>foo := bar</source>',
'template': '{{Infobox\n|foo = bar}}',
'unknown': '<Unknown>This is an unknown pattern</unKnown>',
}
for test, pattern in tests.items():
with self.subTest(test=test):
self.assertEqual(
textlib.removeDisabledParts(pattern, tags=[test]), '')
def test_remove_disabled_parts_include(self):
"""Test removeDisabledParts function with the include argument."""
text = 'text <nowiki>tag</nowiki> text'
self.assertEqual(
textlib.removeDisabledParts(text, include=['nowiki']), text)
def test_remove_disabled_parts_order(self):
"""Test the order of the replacements in removeDisabledParts."""
text = 'text <ref>This is a reference.</ref> text'
regex = re.compile('</?ref>')
self.assertEqual(
textlib.removeDisabledParts(text, tags=['ref', regex]),
'text text')
self.assertEqual(
textlib.removeDisabledParts(text, tags=[regex, 'ref']),
'text This is a reference. text')
class TestReplaceLinks(TestCase):
"""Test the replace_links function in textlib."""
sites = {
'wt': {
'family': 'wiktionary',
'code': 'en',
},
'wp': {
'family': 'wikipedia',
'code': 'en',
}
}
dry = True
text = ('Hello [[World]], [[how|are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
@classmethod
def setUpClass(cls):
"""Create a fake interwiki cache."""
super().setUpClass()
# make APISite.interwiki work and prevent it from doing requests
for site in cls.sites.values():
mapping = {}
for iw in cls.sites.values():
mapping[iw['family']] = _IWEntry(True, 'invalid')
mapping[iw['family']]._site = iw['site']
mapping['bug'] = _IWEntry(False, 'invalid')
mapping['bug']._site = UnknownSiteError('Not a wiki')
mapping['en'] = _IWEntry(True, 'invalid')
mapping['en']._site = site['site']
site['site']._interwikimap._map = mapping
site['site']._interwikimap._site = None # prevent it from loading
cls.wp_site = cls.get_site('wp')
def test_replacements_function(self):
"""Test a dynamic function as the replacements."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
return pywikibot.Link('Homeworld', link.site)
if link.title.lower() == 'you':
return False
return None
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello [[Homeworld]], [[how|are]] you? Are you a [[bug:1337]]?')
def test_replacements_once(self):
"""Test dynamic replacement."""
def callback(link, text, groups, rng):
if link.title.lower() == 'you':
self._count += 1
if link.section:
return pywikibot.Link(
'{0}#{1}'
.format(self._count, link.section), link.site)
return pywikibot.Link('{0}'.format(self._count), link.site)
return None
self._count = 0 # buffer number of found instances
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello [[World]], [[how|are]] [[1#section]]? Are [[2]] a '
'[[bug:1337]]?')
del self._count
def test_unlink_all(self):
"""Test unlinking."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
return False
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello World, are you? Are you a [[bug:1337]]?')
def test_unlink_some(self):
"""Test unlinking only some links."""
self.assertEqual(
textlib.replace_links(self.text, ('World', False), self.wp_site),
'Hello World, [[how|are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
self.assertEqual(
textlib.replace_links('[[User:Namespace|Label]]\n'
'[[User:Namespace#Section|Labelz]]\n'
'[[Nothing]]',
('User:Namespace', False),
self.wp_site),
'Label\nLabelz\n[[Nothing]]')
def test_replace_neighbour(self):
"""Test that it replaces two neighbouring links."""
self.assertEqual(
textlib.replace_links('[[A]][[A]][[C]]',
('A', 'B'),
self.wp_site),
'[[B|A]][[B|A]][[C]]')
def test_replacements_simplify(self):
"""Test a tuple as replacement removing the need for a piped link."""
self.assertEqual(
textlib.replace_links(self.text,
('how', 'are'),
self.wp_site),
'Hello [[World]], [[are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
def test_replace_file(self):
"""Test that it respects the namespace."""
self.assertEqual(
textlib.replace_links(
'[[File:Meh.png|thumb|Description of [[fancy]]]] '
'[[Fancy]]...', ('File:Meh.png', 'File:Fancy.png'),
self.wp_site),
'[[File:Fancy.png|thumb|Description of [[fancy]]]] [[Fancy]]...')
def test_replace_strings(self):
"""Test if strings can be used."""
self.assertEqual(
textlib.replace_links(self.text, ('how', 'are'), self.wp_site),
'Hello [[World]], [[are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
def test_replace_invalid_link_text(self):
"""Test that it doesn't pipe a link when it's an invalid link."""
self.assertEqual(
textlib.replace_links('[[Target|Foo:]]', ('Target', 'Foo'),
self.wp_site), '[[Foo|Foo:]]')
def test_replace_modes(self):
"""Test replacing with or without label and section."""
source_text = '[[Foo#bar|baz]]'
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar'), self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site, 'Bar')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar', self.wp_site)),
self.wp_site),
'[[Bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar#snafu'),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar#snafu')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar#snafu',
self.wp_site)),
self.wp_site),
'[[Bar#snafu]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar|foo'),
self.wp_site), '[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar|foo')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar|foo',
self.wp_site)),
self.wp_site),
'[[Bar|foo]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar#snafu|foo'),
self.wp_site), '[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar#snafu|foo')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar#snafu|foo',
self.wp_site)),
self.wp_site),
'[[Bar#snafu|foo]]')
def test_replace_different_case(self):
"""Test that it uses piped links when the case is different."""
source_text = '[[Foo|Bar]] and [[Foo|bar]]'
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'bar'),
self.get_site('wp')),
'[[Bar]] and [[bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'bar'),
self.get_site('wt')),
'[[bar|Bar]] and [[bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar'),
self.get_site('wt')),
'[[Bar]] and [[Bar|bar]]')
@unittest.expectedFailure
def test_label_diff_namespace(self):
"""Test that it uses the old label when the new doesn't match."""
# These tests require to get the actual part which is before the title
# (interwiki and namespace prefixes) which could be then compared
# case insensitive.
self.assertEqual(
textlib.replace_links('[[Image:Foobar]]',
('File:Foobar', 'File:Foo'), self.wp_site),
'[[File:Foo|Image:Foobar]]')
self.assertEqual(
textlib.replace_links('[[en:File:Foobar]]',
('File:Foobar', 'File:Foo'), self.wp_site),
'[[File:Foo|en:File:Foobar]]')
def test_linktrails(self):
"""Test that the linktrails are used or applied."""
self.assertEqual(
textlib.replace_links('[[Foobar]]', ('Foobar', 'Foo'),
self.wp_site),
'[[Foo]]bar')
self.assertEqual(
textlib.replace_links('[[Talk:test]]s',
('Talk:Test', 'Talk:Tests'), self.wp_site),
'[[Talk:tests]]')
self.assertEqual(
textlib.replace_links('[[Talk:test]]s',
('Talk:Test', 'Project:Tests'),
self.wp_site),
'[[Project:Tests|Talk:tests]]')
def test_unicode_callback(self):
"""Test returning unicode in the callback."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
# This must be a unicode instance not bytes
return 'homewörlder'
return None
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello homewörlder, [[how|are]] [[you#section|you]]? '
'Are [[you]] a [[bug:1337]]?')
def test_bytes_callback(self):
"""Test returning bytes in the callback."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
# This must be a bytes instance not unicode
return b'homeworlder'
return None
with self.assertRaisesRegex(ValueError,
r'The result must be str and not bytes\.'):
textlib.replace_links(self.text, callback, self.wp_site)
def test_replace_interwiki_links(self):
"""Make sure interwiki links cannot be replaced."""
link = '[[fr:how]]'
self.assertEqual(
textlib.replace_links(link, ('fr:how', 'de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, (':fr:how', ':de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, ('how', 'de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, ('de:how', 'de:are'), self.wp_site),
link)
class TestReplaceLinksNonDry(TestCase):
"""Test the replace_links function in textlib non-dry."""
family = 'wikipedia'
code = 'en'
cached = True
def test_replace_interlanguage_links(self):
"""Test replacing interlanguage links."""
link = '[[:fr:how]]'
self.assertEqual(
textlib.replace_links(link, (':fr:how', ':de:are'),
self.site),
'[[:de:Are|fr:how]]')
self.assertEqual(
textlib.replace_links(link, ('fr:how', 'de:are'),
self.site),
'[[:de:Are|fr:how]]')
self.assertEqual(
textlib.replace_links(link, ('how', ':de:are'),
self.site),
link)
self.assertEqual(
textlib.replace_links(link, (':de:how', ':de:are'),
self.site),
link)
class TestLocalDigits(TestCase):
"""Test to verify that local digits are correctly being handled."""
net = False
def test_to_local(self):
"""Test converting Latin digits to local digits."""
self.assertEqual(textlib.to_local_digits(299792458, 'en'), 299792458)
self.assertEqual(
textlib.to_local_digits(299792458, 'fa'), '۲۹۹۷۹۲۴۵۸')
self.assertEqual(
textlib.to_local_digits(
'299792458 flash', 'fa'), '۲۹۹۷۹۲۴۵۸ flash')
self.assertEqual(
textlib.to_local_digits(
'299792458', 'km'), '២៩៩៧៩២៤៥៨')
class TestReplaceExcept(DefaultDrySiteTestCase):
"""Test to verify the replacements with exceptions are done correctly."""
def test_no_replace(self):
"""Test replacing when the old text does not match."""
self.assertEqual(textlib.replaceExcept('12345678', 'x', 'y', [],
site=self.site),
'12345678')
def test_simple_replace(self):
"""Test replacing without regex."""
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxxB', 'x', 'y', [],
site=self.site),
'AyyB')
self.assertEqual(textlib.replaceExcept('AxyxB', 'x', 'y', [],
site=self.site),
'AyyyB')
def test_regex_replace(self):
"""Test replacing with a regex."""
self.assertEqual(textlib.replaceExcept('A123B', r'\d', r'x', [],
site=self.site),
'AxxxB')
self.assertEqual(textlib.replaceExcept('A123B', r'\d+', r'x', [],
site=self.site),
'AxB')
self.assertEqual(textlib.replaceExcept('A123B',
r'A(\d)2(\d)B', r'A\1x\2B', [],
site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('', r'(a?)', r'\1B', [], site=self.site),
'B')
self.assertEqual(
textlib.replaceExcept('abc', r'x*', r'-', [], site=self.site),
'-a-b-c-')
# This is different from re.sub() as re.sub() doesn't
# allow None groups
self.assertEqual(
textlib.replaceExcept('', r'(a)?', r'\1\1', [], site=self.site),
'')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(\d)2(\d)B', r'A\g<1>x\g<2>B',
[], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(?P<b>\d)B',
r'A\g<a>x\g<b>B', [], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(\d)B',
r'A\g<a>x\g<2>B', [], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(\d)B',
r'A\g<a>x\2B', [], site=self.site),
'A1x3B')
# test regex with lookbehind.
self.assertEqual(
textlib.replaceExcept('A behindB C', r'(?<=behind)\w',
r'Z', [], site=self.site),
'A behindZ C')
# test regex with lookbehind and groups.
self.assertEqual(
textlib.replaceExcept('A behindB C D', r'(?<=behind)\w( )',
r'\g<1>Z', [], site=self.site),
'A behind ZC D')
# test regex with lookahead.
self.assertEqual(
textlib.replaceExcept('A Bahead C', r'\w(?=ahead)',
r'Z', [], site=self.site),
'A Zahead C')
# test regex with lookahead and groups.
self.assertEqual(
textlib.replaceExcept('A Bahead C D', r'( )\w(?=ahead)',
r'Z\g<1>', [], site=self.site),
'AZ ahead C D')
def test_case_sensitive(self):
"""Test replacing with different case sensitivity."""
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
caseInsensitive=False,
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxB', 'X', 'y', [],
caseInsensitive=False,
site=self.site),
'AxB')
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
caseInsensitive=True,
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxB', 'X', 'y', [],
caseInsensitive=True,
site=self.site),
'AyB')
def test_replace_with_marker(self):
"""Test replacing with a marker."""
self.assertEqual(textlib.replaceExcept('AxyxB', 'x', 'y', [],
marker='.',
site=self.site),
'Ayyy.B')
self.assertEqual(textlib.replaceExcept('AxyxB', '1', 'y', [],
marker='.',
site=self.site),
'AxyxB.')
def test_overlapping_replace(self):
"""Test replacing with and without overlap."""
self.assertEqual(textlib.replaceExcept('1111', '11', '21', [],
allowoverlap=False,
site=self.site),
'2121')
self.assertEqual(textlib.replaceExcept('1111', '11', '21', [],
allowoverlap=True,
site=self.site),
'2221')
self.assertEqual(textlib.replaceExcept('1\n= 1 =\n', '1', ' \n= 1 =\n',
['header'],
allowoverlap=True,
site=self.site),
' \n= 1 =\n\n= 1 =\n')
def test_replace_exception(self):
"""Test replacing not inside a specific regex."""
self.assertEqual(textlib.replaceExcept('123x123', '123', '000', [],
site=self.site),
'000x000')
self.assertEqual(textlib.replaceExcept('123x123', '123', '000',
[re.compile(r'\w123')],
site=self.site),
'000x123')
self.assertEqual(
textlib.replaceExcept(
'1\n= 1 =\n', '1', 'verylongreplacement', ['header'],
site=self.site),
'verylongreplacement\n= 1 =\n')
def test_replace_tags(self):
"""Test replacing not inside various tags."""
self.assertEqual(textlib.replaceExcept('A <!-- x --> B', 'x', 'y',
['comment'], site=self.site),
'A <!-- x --> B')
self.assertEqual(textlib.replaceExcept('\n==x==\n', 'x', 'y',
['header'], site=self.site),
'\n==x==\n')
self.assertEqual(textlib.replaceExcept('\n<!--'
'\ncomment-->==x==<!--comment'
'\n-->\n', 'x', 'y',
['header'], site=self.site),
'\n<!--\ncomment-->==x==<!--comment\n-->\n')
self.assertEqual(textlib.replaceExcept('<pre>x</pre>', 'x', 'y',
['pre'], site=self.site),
'<pre>x</pre>')
self.assertEqual(textlib.replaceExcept('<nowiki >x</nowiki >x',
'x', 'y', ['nowiki'],
site=self.site),
'<nowiki >x</nowiki >y') # T191559
self.assertEqual(textlib.replaceExcept('<source lang="xml">x</source>',
'x', 'y', ['source'],
site=self.site),
'<source lang="xml">x</source>')
self.assertEqual(
textlib.replaceExcept('<syntaxhighlight>x</syntaxhighlight>',
'x', 'y', ['source'], site=self.site),
'<syntaxhighlight>x</syntaxhighlight>')
self.assertEqual(
textlib.replaceExcept(
'<syntaxhighlight lang="xml">x</syntaxhighlight>',
'x', 'y', ['source'], site=self.site),
'<syntaxhighlight lang="xml">x</syntaxhighlight>')
self.assertEqual(
textlib.replaceExcept('<source>x</source>',
'x', 'y', ['syntaxhighlight'],
site=self.site),
'<source>x</source>')
self.assertEqual(textlib.replaceExcept('<includeonly>x</includeonly>',
'x', 'y', ['includeonly'],
site=self.site),
'<includeonly>x</includeonly>')
self.assertEqual(textlib.replaceExcept('<ref>x</ref>', 'x', 'y',
['ref'], site=self.site),
'<ref>x</ref>')
self.assertEqual(textlib.replaceExcept('<ref name="x">A</ref>',
'x', 'y',
['ref'], site=self.site),
'<ref name="x">A</ref>')
self.assertEqual(textlib.replaceExcept(' xA ', 'x', 'y',
['startspace'], site=self.site),
' xA ')
self.assertEqual(textlib.replaceExcept(':xA ', 'x', 'y',
['startcolon'], site=self.site),
':xA ')
self.assertEqual(textlib.replaceExcept('<table>x</table>', 'x', 'y',
['table'], site=self.site),
'<table>x</table>')
self.assertEqual(textlib.replaceExcept('x [http://www.sample.com x]',
'x', 'y', ['hyperlink'],
site=self.site),
'y [http://www.sample.com y]')
self.assertEqual(textlib.replaceExcept(
'x http://www.sample.com/x.html', 'x', 'y',
['hyperlink'], site=self.site), 'y http://www.sample.com/x.html')
self.assertEqual(textlib.replaceExcept('<gallery>x</gallery>',
'x', 'y', ['gallery'],
site=self.site),
'<gallery>x</gallery>')
self.assertEqual(textlib.replaceExcept('[[x]]', 'x', 'y', ['link'],
site=self.site),
'[[x]]')
self.assertEqual(textlib.replaceExcept('{{#property:p171}}', '1', '2',
['property'], site=self.site),
'{{#property:p171}}')
self.assertEqual(textlib.replaceExcept('{{#invoke:x}}', 'x', 'y',
['invoke'], site=self.site),
'{{#invoke:x}}')
self.assertEqual(
textlib.replaceExcept(
'<ref name=etwa /> not_in_ref <ref> in_ref </ref>',
'not_in_ref', 'text', ['ref'], site=self.site),
'<ref name=etwa /> text <ref> in_ref </ref>')
self.assertEqual(
textlib.replaceExcept(
'<ab> content </a>', 'content', 'text', ['a'], site=self.site),
'<ab> text </a>')
def test_replace_with_count(self):
"""Test replacing with count argument."""
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site),
'y [[y]] y y')
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site, count=5),
'y [[y]] y y')
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site, count=2),
'y [[y]] x x')
self.assertEqual(textlib.replaceExcept(
'x [[x]] x x', 'x', 'y', ['link'], site=self.site, count=2),
'y [[x]] y x')
def test_replace_tag_category(self):
"""Test replacing not inside category links."""
for ns_name in self.site.namespaces[14]:
self.assertEqual(textlib.replaceExcept('[[{}:x]]'.format(ns_name),
'x', 'y', ['category'],
site=self.site),
'[[{}:x]]'.format(ns_name))
def test_replace_tag_file(self):
"""Test replacing not inside file links."""
for ns_name in self.site.namespaces[6]:
self.assertEqual(textlib.replaceExcept('[[{}:x]]'.format(ns_name),
'x', 'y', ['file'],
site=self.site),
'[[{}:x]]'.format(ns_name))
self.assertEqual(
textlib.replaceExcept(
'[[File:x|foo]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|foo]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|foo|bar x]] x',
'x', 'y', ['file'], site=self.site),
'[[File:x|foo|bar x]] y')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|]][[File:x|foo]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|]][[File:x|foo]]')
self.assertEqual(
textlib.replaceExcept(
'[[NonFile:x]]',
'x', 'y', ['file'], site=self.site),
'[[NonFile:y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:]]',
'File:', 'NonFile:', ['file'], site=self.site),
'[[File:]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|[[foo]].]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|[[foo]].]]')
# ensure only links inside file are captured
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]][[bar]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]][[bar]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]][[bar]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]][[bar]].x]][[y]]')
# Correctly handle single brackets in the text.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [bar].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [bar].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[bar] [[foo]] .x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[bar] [[foo]] .x]][[y]]')
def test_replace_tag_file_invalid(self):
"""Test replacing not inside file links with invalid titles."""
# Correctly handle [ and ] inside wikilinks inside file link
# even though these are an invalid title.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [invalid ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid ]].x]][[y]]')
@unittest.expectedFailure
def test_replace_tag_file_failure(self):
"""Test showing limits of the file link regex."""
# When the double brackets are unbalanced, the regex
# does not correctly detect the end of the file link.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [[invalid ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[y]]')
def test_replace_tags_interwiki(self):
"""Test replacing not inside interwiki links."""
if ('es' not in self.site.family.langs
or 'ey' in self.site.family.langs):
raise unittest.SkipTest("family {} doesn't have languages"
.format(self.site))
self.assertEqual(textlib.replaceExcept('[[es:s]]', 's', 't',
['interwiki'], site=self.site),
'[[es:s]]') # "es" is a valid interwiki code
self.assertEqual(textlib.replaceExcept('[[ex:x]]', 'x', 'y',
['interwiki'], site=self.site),
'[[ey:y]]') # "ex" is not a valid interwiki code
def test_replace_template(self):
"""Test replacing not inside templates."""
template_sample = (r'a {{templatename '
r' | accessdate={{Fecha|1993}} '
r' |atitle=The [[real title]] }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
template_sample = (r'a {{templatename '
r' | 1={{a}}2{{a}} '
r' | 2={{a}}1{{a}} }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
template_sample = (r'a {{templatename '
r' | 1={{{a}}}2{{{a}}} '
r' | 2={{{a}}}1{{{a}}} }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
# sf.net bug 1575: unclosed template
template_sample = template_sample[:-2]
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
def test_replace_source_reference(self):
"""Test replacing in text which contains back references."""
# Don't use a valid reference number in the original string,
# in case it tries to apply that as a reference.
self.assertEqual(textlib.replaceExcept(r'\42', r'^(.*)$', r'X\1X',
[], site=self.site),
r'X\42X')
self.assertEqual(textlib.replaceExcept(
r'\g<bar>', r'^(?P<foo>.*)$', r'X\g<foo>X', [], site=self.site),
r'X\g<bar>X')
class TestMultiTemplateMatchBuilder(DefaultDrySiteTestCase):
"""Test MultiTemplateMatchBuilder."""
@classmethod
def setUpClass(cls):
"""Cache namespace 10 (Template) case sensitivity."""
super().setUpClass()
cls._template_not_case_sensitive = (
cls.get_site().namespaces.TEMPLATE.case != 'case-sensitive')
def test_no_match(self):
"""Test text without any desired templates."""
string = 'The quick brown fox'
builder = MultiTemplateMatchBuilder(self.site)
self.assertIsNone(re.search(builder.pattern('quick'), string))
def test_match(self):
"""Test text with one match without parameters."""
string = 'The {{quick}} brown fox'
builder = MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_with_params(self):
"""Test text with one match with parameters."""
string = 'The {{quick|brown}} fox'
builder = MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_msg(self):
"""Test text with {{msg:..}}."""
string = 'The {{msg:quick}} brown fox'
builder = MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_template_prefix(self):
"""Test pages with {{template:..}}."""
string = 'The {{%s:%s}} brown fox'
template = 'template'
builder = MultiTemplateMatchBuilder(self.site)
if self._template_not_case_sensitive:
quick_list = ('quick', 'Quick')
else:
quick_list = ('quick', )
for t in (template.upper(), template.lower(), template.title()):
for q in quick_list:
self.assertIsNotNone(re.search(builder.pattern('quick'),
string % (t, q)))
self.assertEqual(bool(re.search(builder.pattern('Quick'),
string % (t, q))),
self._template_not_case_sensitive)
class TestGetLanguageLinks(SiteAttributeTestCase):
"""Test :py:obj:`textlib.getLanguageLinks` function."""
sites = {
'enwp': {
'family': 'wikipedia',
'code': 'en',
},
'dewp': {
'family': 'wikipedia',
'code': 'de',
},
'commons': {
'family': 'commons',
'code': 'commons',
},
}
example_text = ('[[en:Site]] [[de:Site|Piped]] [[commons:Site]] '
'[[baden:Site]] [[fr:{{PAGENAME}}]]')
@classmethod
def setUpClass(cls):
"""Define set of valid targets for the example text."""
super().setUpClass()
cls.sites_set = {cls.enwp, cls.dewp}
def test_getLanguageLinks(self, key):
"""Test if the function returns the correct titles and sites."""
with mock.patch('pywikibot.output') as m:
lang_links = textlib.getLanguageLinks(self.example_text,
self.site)
m.assert_called_once_with(
'[getLanguageLinks] Text contains invalid interwiki link '
'[[fr:{{PAGENAME}}]].')
self.assertEqual({page.title() for page in lang_links.values()},
{'Site'})
self.assertEqual(set(lang_links), self.sites_set - {self.site})
class TestExtractSections(DefaultDrySiteTestCase):
"""Test the extract_sections function."""
def _extract_sections_tests(self, result, header, sections, footer):
"""Test extract_sections function."""
self.assertIsInstance(result, tuple)
self.assertIsInstance(result.sections, list)
self.assertEqual(result, (header, sections, footer))
self.assertEqual(result.header, header)
self.assertEqual(result.sections, sections)
self.assertEqual(result.footer, footer)
if result.sections:
for section in sections:
self.assertIsInstance(section, tuple)
self.assertLength(section, 2)
def test_no_sections_no_footer(self):
"""Test for text having no sections or footer."""
text = 'text'
result = extract_sections(text, self.site)
self._extract_sections_tests(result, text, [], '')
def test_no_sections_with_footer(self):
"""Test for text having footer but no section."""
text = 'text\n\n[[Category:A]]'
result = extract_sections(text, self.site)
self._extract_sections_tests(result, 'text\n\n', [], '[[Category:A]]')
def test_with_section_no_footer(self):
"""Test for text having sections but no footer."""
text = ('text\n\n'
'==title==\n'
'content')
result = extract_sections(text, self.site)
self._extract_sections_tests(
result, 'text\n\n', [('==title==', '\ncontent')], '')
def test_with_section_with_footer(self):
"""Test for text having sections and footer."""
text = ('text\n\n'
'==title==\n'
'content\n'
'[[Category:A]]\n')
result = extract_sections(text, self.site)
self._extract_sections_tests(
result,
'text\n\n', [('==title==', '\ncontent\n')], '[[Category:A]]\n')
def test_with_h1_and_h2_sections(self):
"""Test for text having h1 and h2 sections."""
text = ('text\n\n'
'=first level=\n'
'foo\n'
'==title==\n'
'bar')
result = extract_sections(text, self.site)
self._extract_sections_tests(
result,
'text\n\n',
[('=first level=', '\nfoo\n'), ('==title==', '\nbar')],
'')
def test_with_h4_and_h2_sections(self):
"""Test for text having h4 and h2 sections."""
text = ('text\n\n'
'====title====\n'
'==title 2==\n'
'content')
result = extract_sections(text, self.site)
self._extract_sections_tests(
result,
'text\n\n',
[('====title====', '\n'), ('==title 2==', '\ncontent')],
'')
def test_long_comment(self):
r"""Test for text having a long expanse of white space.
This is to catch certain regex issues caused by patterns like
r'(\s+)*$' (as found in older versions of extract_section).
They may not halt.
c.f.
https://www.regular-expressions.info/catastrophic.html
"""
text = '<!-- -->'
result = extract_sections(text, self.site)
self._extract_sections_tests(result, text, [], '')
if __name__ == '__main__': # pragma: no cover
with suppress(SystemExit):
unittest.main()
|
the-stack_0_13293 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gromacs(CMakePackage):
"""GROMACS (GROningen MAchine for Chemical Simulations) is a molecular
dynamics package primarily designed for simulations of proteins, lipids
and nucleic acids. It was originally developed in the Biophysical
Chemistry department of University of Groningen, and is now maintained
by contributors in universities and research centers across the world.
GROMACS is one of the fastest and most popular software packages
available and can run on CPUs as well as GPUs. It is free, open source
released under the GNU General Public License. Starting from version 4.6,
GROMACS is released under the GNU Lesser General Public License.
"""
homepage = 'http://www.gromacs.org'
url = 'http://ftp.gromacs.org/gromacs/gromacs-5.1.2.tar.gz'
git = 'https://github.com/gromacs/gromacs.git'
maintainers = ['junghans', 'marvinbernhardt']
version('develop', branch='master')
version('2019.4', sha256='ba4366eedfc8a1dbf6bddcef190be8cd75de53691133f305a7f9c296e5ca1867')
version('2019.3', sha256='4211a598bf3b7aca2b14ad991448947da9032566f13239b1a05a2d4824357573')
version('2019.2', sha256='bcbf5cc071926bc67baa5be6fb04f0986a2b107e1573e15fadcb7d7fc4fb9f7e')
version('2019.1', sha256='b2c37ed2fcd0e64c4efcabdc8ee581143986527192e6e647a197c76d9c4583ec')
version('2019', sha256='c5b281a5f0b5b4eeb1f4c7d4dc72f96985b566561ca28acc9c7c16f6ee110d0b')
version('2018.8', sha256='3776923415df4bc78869d7f387c834141fdcda930b2e75be979dc59ecfa6ebec')
version('2018.5', sha256='32261df6f7ec4149fc0508f9af416953d056e281590359838c1ed6644ba097b8')
version('2018.4', sha256='6f2ee458c730994a8549d6b4f601ecfc9432731462f8bd4ffa35d330d9aaa891')
version('2018.3', sha256='4423a49224972969c52af7b1f151579cea6ab52148d8d7cbae28c183520aa291')
version('2018.2', sha256='4bdde8120c510b6543afb4b18f82551fddb11851f7edbd814aa24022c5d37857')
version('2018.1', sha256='4d3533340499323fece83b4a2d4251fa856376f2426c541e00b8e6b4c0d705cd')
version('2018', sha256='deb5d0b749a52a0c6083367b5f50a99e08003208d81954fb49e7009e1b1fd0e9')
version('2016.6', sha256='bac0117d2cad21f9b94fe5b854fb9ae7435b098a6da4e732ee745f18e52473d7')
version('2016.5', sha256='57db26c6d9af84710a1e0c47a1f5bf63a22641456448dcd2eeb556ebd14e0b7c')
version('2016.4', sha256='4be9d3bfda0bdf3b5c53041e0b8344f7d22b75128759d9bfa9442fe65c289264')
version('2016.3', sha256='7bf00e74a9d38b7cef9356141d20e4ba9387289cbbfd4d11be479ef932d77d27')
version('5.1.5', sha256='c25266abf07690ecad16ed3996899b1d489cbb1ef733a1befb3b5c75c91a703e')
version('5.1.4', sha256='0f3793d8f1f0be747cf9ebb0b588fb2b2b5dc5acc32c3046a7bee2d2c03437bc')
version('5.1.2', sha256='39d6f1d7ae8ba38cea6089da40676bfa4049a49903d21551abc030992a58f304')
version('4.5.5', sha256='e0605e4810b0d552a8761fef5540c545beeaf85893f4a6e21df9905a33f871ba')
variant('mpi', default=True, description='Activate MPI support')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant(
'double', default=False,
description='Produces a double precision version of the executables')
variant('plumed', default=False, description='Enable PLUMED support')
variant('cuda', default=False, description='Enable CUDA support')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel',
'Reference', 'RelWithAssert', 'Profile'))
variant('simd', default='auto',
description='The SIMD instruction set to use',
values=('auto', 'none', 'SSE2', 'SSE4.1', 'AVX_128_FMA', 'AVX_256',
'AVX2_128', 'AVX2_256', 'AVX_512', 'AVX_512_KNL',
'IBM_QPX', 'Sparc64_HPC_ACE', 'IBM_VMX', 'IBM_VSX',
'ARM_NEON', 'ARM_NEON_ASIMD'))
variant('rdtscp', default=True, description='Enable RDTSCP instruction usage')
variant('mdrun_only', default=False,
description='Enables the build of a cut-down version'
' of libgromacs and/or the mdrun program')
variant('openmp', default=True, description='Enables OpenMP at configure time')
variant('double_precision', default=False, description='Enables a double-precision configuration')
depends_on('mpi', when='+mpi')
depends_on('plumed+mpi', when='+plumed+mpi')
depends_on('plumed~mpi', when='+plumed~mpi')
depends_on('fftw')
depends_on('[email protected]:3.99.99', type='build')
depends_on('[email protected]:3.99.99', type='build', when='@2018:')
depends_on('cuda', when='+cuda')
patch('gmxDetectCpu-cmake-3.14.patch', when='@2018:2019.3^[email protected]:')
patch('gmxDetectSimd-cmake-3.14.patch', when='@:2017.99^[email protected]:')
def patch(self):
if '+plumed' in self.spec:
self.spec['plumed'].package.apply_patch(self)
def cmake_args(self):
options = []
if '+mpi' in self.spec:
options.append('-DGMX_MPI:BOOL=ON')
if '+double' in self.spec:
options.append('-DGMX_DOUBLE:BOOL=ON')
if '~shared' in self.spec:
options.append('-DBUILD_SHARED_LIBS:BOOL=OFF')
if '+cuda' in self.spec:
options.append('-DGMX_GPU:BOOL=ON')
options.append('-DCUDA_TOOLKIT_ROOT_DIR:STRING=' +
self.spec['cuda'].prefix)
else:
options.append('-DGMX_GPU:BOOL=OFF')
simd_value = self.spec.variants['simd'].value
if simd_value == 'auto':
pass
elif simd_value == 'none':
options.append('-DGMX_SIMD:STRING=None')
else:
options.append('-DGMX_SIMD:STRING=' + simd_value)
if '-rdtscp' in self.spec:
options.append('-DGMX_USE_RDTSCP:BOOL=OFF')
else:
options.append('-DGMX_USE_RDTSCP:BOOL=ON')
if '+mdrun_only' in self.spec:
options.append('-DGMX_BUILD_MDRUN_ONLY:BOOL=ON')
else:
options.append('-DGMX_BUILD_MDRUN_ONLY:BOOL=OFF')
if '~openmp' in self.spec:
options.append('-DGMX_OPENMP:BOOL=OFF')
else:
options.append('-DGMX_OPENMP:BOOL=ON')
if '+double_precision' in self.spec:
options.append('-DGMX_RELAXED_DOUBLE_PRECISION:BOOL=ON')
else:
options.append('-DGMX_RELAXED_DOUBLE_PRECISION:BOOL=OFF')
return options
|
the-stack_0_13294 | from typing import Optional, Union, Tuple
from torch_geometric.typing import OptTensor, Adj
import torch
from torch import Tensor
import torch.nn.functional as F
from torch.nn import Parameter as Param
from torch.nn import Parameter
from torch_scatter import scatter
from torch_sparse import SparseTensor, matmul, masked_select_nnz
from torch_geometric.nn.conv import MessagePassing
import math
def glorot(tensor):
if tensor is not None:
stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))
tensor.data.uniform_(-stdv, stdv)
def zeros(tensor):
if tensor is not None:
tensor.data.fill_(0)
@torch.jit._overload
def masked_edge_index(edge_index, edge_mask):
# type: (Tensor, Tensor) -> Tensor
pass
@torch.jit._overload
def masked_edge_index(edge_index, edge_mask):
# type: (SparseTensor, Tensor) -> SparseTensor
pass
def masked_edge_index(edge_index, edge_mask):
if isinstance(edge_index, Tensor):
return edge_index[:, edge_mask]
else:
return masked_select_nnz(edge_index, edge_mask, layout='coo')
class RGCNConv(MessagePassing):
r"""The relational graph convolutional operator from the `"Modeling
Relational Data with Graph Convolutional Networks"
<https://arxiv.org/abs/1703.06103>`_ paper
.. math::
\mathbf{x}^{\prime}_i = \mathbf{\Theta}_{\textrm{root}} \cdot
\mathbf{x}_i + \sum_{r \in \mathcal{R}} \sum_{j \in \mathcal{N}_r(i)}
\frac{1}{|\mathcal{N}_r(i)|} \mathbf{\Theta}_r \cdot \mathbf{x}_j,
where :math:`\mathcal{R}` denotes the set of relations, *i.e.* edge types.
Edge type needs to be a one-dimensional :obj:`torch.long` tensor which
stores a relation identifier
:math:`\in \{ 0, \ldots, |\mathcal{R}| - 1\}` for each edge.
.. note::
This implementation is as memory-efficient as possible by iterating
over each individual relation type.
Therefore, it may result in low GPU utilization in case the graph has a
large number of relations.
As an alternative approach, :class:`FastRGCNConv` does not iterate over
each individual type, but may consume a large amount of memory to
compensate.
We advise to check out both implementations to see which one fits your
needs.
Args:
in_channels (int or tuple): Size of each input sample. A tuple
corresponds to the sizes of source and target dimensionalities.
In case no input features are given, this argument should
correspond to the number of nodes in your graph.
out_channels (int): Size of each output sample.
num_relations (int): Number of relations.
num_bases (int, optional): If set to not :obj:`None`, this layer will
use the basis-decomposition regularization scheme where
:obj:`num_bases` denotes the number of bases to use.
(default: :obj:`None`)
num_blocks (int, optional): If set to not :obj:`None`, this layer will
use the block-diagonal-decomposition regularization scheme where
:obj:`num_blocks` denotes the number of blocks to use.
(default: :obj:`None`)
aggr (string, optional): The aggregation scheme to use
(:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`).
(default: :obj:`"mean"`)
root_weight (bool, optional): If set to :obj:`False`, the layer will
not add transformed root node features to the output.
(default: :obj:`True`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels: Union[int, Tuple[int, int]],
out_channels: int,
num_relations: int,
num_bases: Optional[int] = None,
num_blocks: Optional[int] = None,
aggr: str = 'mean',
root_weight: bool = True,
bias: bool = True, **kwargs): # yapf: disable
super(RGCNConv, self).__init__(aggr=aggr, node_dim=0, **kwargs)
if num_bases is not None and num_blocks is not None:
raise ValueError('Can not apply both basis-decomposition and '
'block-diagonal-decomposition at the same time.')
self.in_channels = in_channels
self.out_channels = out_channels
self.num_relations = num_relations
self.num_bases = num_bases
self.num_blocks = num_blocks
if isinstance(in_channels, int):
in_channels = (in_channels, in_channels)
self.in_channels_l = in_channels[0]
if num_bases is not None:
self.weight = Parameter(
torch.Tensor(num_bases, in_channels[0], out_channels))
self.comp = Parameter(torch.Tensor(num_relations, num_bases))
elif num_blocks is not None:
assert (in_channels[0] % num_blocks == 0
and out_channels % num_blocks == 0)
self.weight = Parameter(
torch.Tensor(num_relations, num_blocks,
in_channels[0] // num_blocks,
out_channels // num_blocks))
self.register_parameter('comp', None)
else:
self.weight = Parameter(
torch.Tensor(num_relations, in_channels[0], out_channels))
self.register_parameter('comp', None)
if root_weight:
self.root = Param(torch.Tensor(in_channels[1], out_channels))
else:
self.register_parameter('root', None)
if bias:
self.bias = Param(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
glorot(self.comp)
glorot(self.root)
zeros(self.bias)
def forward(self, x: Union[OptTensor, Tuple[OptTensor, Tensor]],
edge_index: Adj, edge_type: OptTensor = None):
r"""
Args:
x: The input node features. Can be either a :obj:`[num_nodes,
in_channels]` node feature matrix, or an optional
one-dimensional node index tensor (in which case input features
are treated as trainable node embeddings).
Furthermore, :obj:`x` can be of type :obj:`tuple` denoting
source and destination node features.
edge_type: The one-dimensional relation type/index for each edge in
:obj:`edge_index`.
Should be only :obj:`None` in case :obj:`edge_index` is of type
:class:`torch_sparse.tensor.SparseTensor`.
(default: :obj:`None`)
"""
# Convert input features to a pair of node features or node indices.
x_l: OptTensor = None
if isinstance(x, tuple):
x_l = x[0]
else:
x_l = x
if x_l is None:
x_l = torch.arange(self.in_channels_l, device=self.weight.device)
x_r: Tensor = x_l
if isinstance(x, tuple):
x_r = x[1]
size = (x_l.size(0), x_r.size(0))
if isinstance(edge_index, SparseTensor):
edge_type = edge_index.storage.value()
assert edge_type is not None
# propagate_type: (x: Tensor)
out = torch.zeros(x_r.size(0), self.out_channels, device=x_r.device)
weight = self.weight
if self.num_bases is not None: # Basis-decomposition =================
weight = (self.comp @ weight.view(self.num_bases, -1)).view(
self.num_relations, self.in_channels_l, self.out_channels)
if self.num_blocks is not None: # Block-diagonal-decomposition =====
if x_l.dtype == torch.long and self.num_blocks is not None:
raise ValueError('Block-diagonal decomposition not supported '
'for non-continuous input features.')
for i in range(self.num_relations):
tmp = masked_edge_index(edge_index, edge_type == i)
h = self.propagate(tmp, x=x_l, size=size)
h = h.view(-1, weight.size(1), weight.size(2))
h = torch.einsum('abc,bcd->abd', h, weight[i])
out += h.contiguous().view(-1, self.out_channels)
else: # No regularization/Basis-decomposition ========================
for i in range(self.num_relations):
tmp = masked_edge_index(edge_index, edge_type == i)
if x_l.dtype == torch.long:
out += self.propagate(tmp, x=weight[i, x_l], size=size)
else:
h = self.propagate(tmp, x=x_l, size=size)
out = out + (h @ weight[i])
root = self.root
if root is not None:
out += root[x_r] if x_r.dtype == torch.long else x_r @ root
if self.bias is not None:
out += self.bias
return out
def message(self, x_j: Tensor) -> Tensor:
return x_j
def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor:
adj_t = adj_t.set_value(None, layout=None)
return matmul(adj_t, x, reduce=self.aggr)
def __repr__(self):
return '{}({}, {}, num_relations={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels,
self.num_relations)
class FastRGCNConv(RGCNConv):
r"""See :class:`RGCNConv`."""
def forward(self, x: Union[OptTensor, Tuple[OptTensor, Tensor]],
edge_index: Adj, edge_type: OptTensor = None):
""""""
self.fuse = False
assert self.aggr in ['add', 'sum', 'mean']
# Convert input features to a pair of node features or node indices.
x_l: OptTensor = None
if isinstance(x, tuple):
x_l = x[0]
else:
x_l = x
if x_l is None:
x_l = torch.arange(self.in_channels_l, device=self.weight.device)
x_r: Tensor = x_l
if isinstance(x, tuple):
x_r = x[1]
size = (x_l.size(0), x_r.size(0))
# propagate_type: (x: Tensor, edge_type: OptTensor)
out = self.propagate(edge_index, x=x_l, edge_type=edge_type, size=size)
root = self.root
if root is not None:
out += root[x_r] if x_r.dtype == torch.long else x_r @ root
if self.bias is not None:
out += self.bias
return out
def message(self, x_j: Tensor, edge_type: Tensor, index: Tensor) -> Tensor:
weight = self.weight
if self.num_bases is not None: # Basis-decomposition =================
weight = (self.comp @ weight.view(self.num_bases, -1)).view(
self.num_relations, self.in_channels_l, self.out_channels)
if self.num_blocks is not None: # Block-diagonal-decomposition =======
if x_j.dtype == torch.long:
raise ValueError('Block-diagonal decomposition not supported '
'for non-continuous input features.')
weight = weight[edge_type].view(-1, weight.size(2), weight.size(3))
x_j = x_j.view(-1, 1, weight.size(1))
return torch.bmm(x_j, weight).view(-1, self.out_channels)
else: # No regularization/Basis-decomposition ========================
if x_j.dtype == torch.long:
weight_index = edge_type * weight.size(1) + index
return weight.view(-1, self.out_channels)[weight_index]
return torch.bmm(x_j.unsqueeze(-2), weight[edge_type]).squeeze(-2)
def aggregate(self, inputs: Tensor, edge_type: Tensor, index: Tensor,
dim_size: Optional[int] = None) -> Tensor:
# Compute normalization in separation for each `edge_type`.
if self.aggr == 'mean':
norm = F.one_hot(edge_type, self.num_relations).to(torch.float)
norm = scatter(norm, index, dim=0, dim_size=dim_size)[index]
norm = torch.gather(norm, 1, edge_type.view(-1, 1))
norm = 1. / norm.clamp_(1.)
inputs = norm * inputs
return scatter(inputs, index, dim=self.node_dim, dim_size=dim_size)
|
the-stack_0_13295 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Module containing Net information storage."""
#from typing import Tuple
import pandas as pd
from .. import logger
class QNet():
"""Use DataFrame to hold Net Information about the connected pins of a
design.
There is one uniqe net_id for each connected pin.
"""
def __init__(self):
"""Hold the net information of all the USED pins within a design."""
self.column_names = ['net_id', 'component_id', 'pin_name']
self._net_info = pd.DataFrame(columns=self.column_names)
self._qnet_latest_assigned_id = 0
self.logger = logger # type: logging.Logger
def _get_new_net_id(self) -> int:
"""Provide uniqe new qnet_id.
Returns:
int: ID to use for storing a new net within _net_info.
"""
self._qnet_latest_assigned_id += 1
return self._qnet_latest_assigned_id
@property
def qnet_latest_assigned_id(self) -> int:
"""Return unique number for each net in table.
Returns:
int: For user of the design class to know the lastest id added to _net_info.
"""
return self._qnet_latest_assigned_id
@property
def net_info(self) -> pd.DataFrame:
"""Provide table of all nets within the design.
Returns:
pd.DataFrame: Table of the net of pins within design.
"""
return self._net_info
def add_pins_to_table(self, comp1_id: int, pin1_name: str, comp2_id: int,
pin2_name: str) -> int:
"""Add two entries into the _net_info table. If either component/pin is
already in net_info, the connection will NOT be added to the net_info.
Arguments:
comp1_id (int): Name of component 1.
pin1_name (str): Corresponding pin name for component1.
comp2_id (int): Name of component 2.
pint2 (str): Corresponding pin name for component2.
Returns:
int: 0 if not added to list, otherwise the netid
"""
net_id = 0 # Zero mean false, the pin was not added to _net_info
if not isinstance(comp1_id, int):
self.logger.warning(
f'Expected an int, but have {comp1_id}. The pins are were not entered to the net_info table.'
)
return net_id
if not isinstance(comp2_id, int):
self.logger.warning(
f'Expected an int, but have {comp2_id}. The pins are were not entered to the net_info table.'
)
return net_id
if not isinstance(pin1_name, str):
self.logger.warning(
f'Expected a string, but have {pin1_name}. The pins are were not entered to the net_info table.'
)
return net_id
if not isinstance(pin2_name, str):
self.logger.warning(
f'Expected a string, but have {pin2_name}. The pins are were not entered to the net_info table.'
)
return net_id
# Confirm the component-pin combonation is NOT in _net_info, before adding them.
for (netID, component_id,
pin_name) in self._net_info.itertuples(index=False):
if ((component_id == comp1_id) and (pin_name == pin1_name)):
self.logger.warning(
f'Component: {comp1_id} and pin: {pin1_name} are already in net_info with net_id {netID}'
)
return net_id
if ((component_id == comp2_id) and (pin_name == pin2_name)):
self.logger.warning(
f'Component: {comp2_id} and pin: {pin2_name} are already in net_info with net_id {netID}'
)
return net_id
net_id = self._get_new_net_id()
entry1 = [net_id, comp1_id, pin1_name]
entry2 = [net_id, comp2_id, pin2_name]
temp_df = pd.DataFrame([entry1, entry2], columns=self.column_names)
self._net_info = self._net_info.append(temp_df, ignore_index=True)
# print(self._net_info)
return net_id
def delete_net_id(self, net_id_to_remove: int):
"""Removes the two entries with net_id_to_remove. If id is in
_net_info, the entry will be removed.
Arguments:
net_id_to_remove (int): The id to remove.
"""
self._net_info.drop(
self._net_info.index[self._net_info['net_id'] == net_id_to_remove],
inplace=True)
return
def delete_all_pins_for_component(self, component_id_to_remove: int) -> set:
"""Delete all the pins for a given component id.
Args:
component_id_to_remove (int): Component ID to remove
Returns:
set: All deleted ids
"""
all_net_id_deleted = set()
for (netID, component_id,
pin_name) in self._net_info.itertuples(index=False):
if (component_id == component_id_to_remove):
all_net_id_deleted.add(netID)
self.delete_net_id(netID)
return all_net_id_deleted
def get_components_and_pins_for_netid(
self, net_id_search: int) -> pd.core.frame.DataFrame:
"""Search with a net_id to get component id and pin name.
Arguments:
net_id_search (int): Unique net id which connects two pins within a design.
Returns:
pandas.DataFrame: Two rows of the net_info which have the same net_id_search.
"""
df_subset_based_on_net_id = self._net_info[(
self._net_info['net_id'] == net_id_search)]
return df_subset_based_on_net_id
|
the-stack_0_13296 | from discord.ext import commands
import discord, io
class Core(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.update = {
"allowUpdate": True,
"url": "https://raw.github.com/Akumatic/Akuma-Matata/master/extensions/core.py",
"private": False
}
def detectSetGame(self):
return f" | {self.bot.cfg['game']}" if self.bot.cfg["game"] != "" else ""
#Listener
@commands.Cog.listener()
async def on_ready(self):
print("Bot is running!")
game = f"{self.bot.cfg['prefix']}help{self.detectSetGame()}"
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game(name=game))
@commands.Cog.listener()
async def on_guild_join(self, guild):
self.bot.serverCfg[str(guild.id)] = {}
self.bot.writeJSON("server.json", self.bot.serverCfg)
@commands.Cog.listener()
async def on_guild_remove(self, guild):
del self.bot.serverCfg[str(guild.id)]
self.bot.writeJSON("server.json", self.bot.serverCfg)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
e = discord.Embed(color=discord.Color.red(), title="Error")
if isinstance(error, commands.CommandNotFound):
#e.add_field(name="Command Not Found", value="The command you tried to use does not exist.")
return #await ctx.author.send(embed=e)
if isinstance(error, commands.NotOwner):
e.add_field(name="Not The Owner", value="Only the owner of this bot can use this command.")
return await ctx.send(embed=e)
if isinstance(error, commands.NoPrivateMessage):
e.add_field(name="No Direct Message", value="This command is only usable in a server.")
return await ctx.send(embed=e)
if isinstance(error, commands.MissingPermissions):
e.add_field(name="Missing Permissions", value="You don't have the permissions to use this command.")
return await ctx.send(embed=e)
e.add_field(name="Source", value=ctx.message.channel, inline=False)
e.add_field(name="Trigger", value=ctx.message.content, inline=False)
e.add_field(name="Error", value=f"{type(error).__name__} ({error})", inline=False)
await ctx.send(embed=e)
#Commands
@commands.command()
@commands.is_owner()
async def stop(self, ctx):
ext = self.bot.extensions
while len(ext) > 0:
self.bot.unload_extension(list(ext.keys())[0])
await self.bot.close()
@commands.command()
@commands.is_owner()
async def setGame(self, ctx, *, msg : str = None):
self.bot.cfg["game"] = "" if msg == None else msg
game = f"{self.bot.cfg['prefix']}help{self.detectSetGame()}"
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game(name=game))
@commands.command(hidden=True)
@commands.is_owner()
async def load(self, ctx, ext : str = None, json : bool = False):
"""Loads a new python file from \"extension\" folder.
First argument is the name of python file without .py extension.
(Optional) If second argument is True, it will be autoloaded"""
e = discord.Embed(title="Loading Extension")
if ext == None:
e.color = discord.Color.red()
e.add_field(name="No extension specified", value="Please specify the name of the extension.")
return await ctx.send(embed=e)
try:
self.bot.load_extension("extensions." + ext)
e.color = discord.Color.green()
e.add_field(name="Extension loaded", value=f"`{ext}` successfully loaded.", inline=False)
if json and ext not in self.bot.cfg["extensions"]:
self.bot.cfg["extensions"].append(ext)
self.bot.writeJSON("settings.json", self.bot.cfg)
e.add_field(name="Autoload", value=f"`{ext}` was added to autostart extensions.", inline=False)
except Exception as ex:
e.color = discord.Color.red()
e.add_field(name=f"Failed to load extension `{ext}`", value=f"{type(ex).__name__} ({ex})")
await ctx.send(embed=e)
@commands.command(hidden=True)
@commands.is_owner()
async def unload(self, ctx, ext : str = None, json : bool = False):
"""Unloads an extension.
First argument is the name of the extension.
(Optional) If second argument is True, it will be removed from autoload"""
e = discord.Embed(title="Unloading Extension")
if ext == None:
e.color = discord.Color.red()
e.add_field(name="No extension specified", value="Please specify the name of the extension.")
return await ctx.send(embed=e)
if ("extensions." + ext) in self.bot.extensions:
self.bot.unload_extension("extensions." + ext)
e.color = discord.Color.green()
e.add_field(name="Extension unloaded", value=f"`{ext}` successfully unloaded.", inline=False)
if json and ext in self.bot.cfg["extensions"]:
self.bot.cfg["extensions"].remove(ext)
self.bot.writeJSON("settings.json", self.bot.cfg)
e.add_field(name="Autoload", value=f"`{ext}` was removed from autostart extensions.", inline=False)
else:
e.color = discord.Color.red()
e.add_field(name=f"Failed to unload `{ext}`", value=f"`{ext}` not loaded")
await ctx.send(embed=e)
@commands.command(hidden=True)
@commands.is_owner()
async def reload(self, ctx, ext : str = None):
"""Reloads an extension"""
e = discord.Embed(title="Reloading Extension: Unloading")
if ext == None:
e.color = discord.Color.red()
e.add_field(name="No extension specified", value="Please specify the name of the extension.")
return await ctx.send(embed=e)
if ("extensions." + ext) in self.bot.extensions:
self.bot.unload_extension("extensions." + ext)
e.color = discord.Color.green()
e.add_field(name="Extension unloaded", value=f"`{ext}` successfully unloaded.", inline=False)
await ctx.send(embed=e)
e = discord.Embed(title="Reloading Extension: Loading")
try:
self.bot.load_extension("extensions." + ext)
e.color = discord.Color.green()
e.add_field(name="Extension loaded", value=f"`{ext}` successfully loaded.", inline=False)
except Exception as ex:
e.color = discord.Color.red()
e.add_field(name=f"Failed to load extension `{ext}`", value=f"{type(ex).__name__} ({ex})")
else:
e.color = discord.Color.red()
e.add_field(name=f"Failed to unload `{ext}`", value=f"`{ext}` not loaded")
await ctx.send(embed=e)
@commands.command(hidden=True)
@commands.is_owner()
async def printExt(self, ctx):
"""Prints out every loaded extension"""
string = []
temp = None
for ext in self.bot.extensions:
temp = ext.split(".")
string.append(temp[-1] if len(temp) > 1 else temp[0])
e = discord.Embed(color=discord.Color.blue())
e.add_field(name="Loaded extensions", value=', '.join(string))
await ctx.send(embed=e)
def setup(bot):
bot.add_cog(Core(bot)) |
the-stack_0_13297 | # encoding: utf-8
from opendatatools.common import RestAgent
from opendatatools.common import date_convert, remove_non_numerical
from bs4 import BeautifulSoup
import datetime
import json
import pandas as pd
import io
from opendatatools.futures.futures_agent import _concat_df
import zipfile
def time_map(x):
if x == '':
return ''
else:
return datetime.datetime.strptime(x, '%Y%m%d').strftime('%Y-%m-%d')
def plan_map(x):
if '派' not in x:
return 0
else:
return '%.3f' % (float(x.split('派')[-1].split('元')[0])/10)
class SHExAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
headers = {
"Accept": '*/*',
'Referer': 'http://www.sse.com.cn/market/sseindex/indexlist/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
}
self.add_headers(headers)
def get_index_list(self):
url = 'http://query.sse.com.cn/commonSoaQuery.do'
data = {
'sqlId': 'DB_SZZSLB_ZSLB',
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'pageHelp' in rsp:
data = rsp['pageHelp']['data']
return pd.DataFrame(data)
else:
return None
def get_index_component(self, index):
url = 'http://query.sse.com.cn/commonSoaQuery.do'
data = {
'sqlId': 'DB_SZZSLB_CFGLB',
'indexCode' : index,
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'pageHelp' in rsp:
data = rsp['pageHelp']['data']
return pd.DataFrame(data)
else:
return None
def get_dividend(self, code):
url = 'http://query.sse.com.cn/commonQuery.do'
data = {
'sqlId' : 'COMMON_SSE_GP_SJTJ_FHSG_AGFH_L_NEW',
'security_code_a' : code,
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'result' in rsp:
data = rsp['result']
return pd.DataFrame(data)
else:
return None
def get_rzrq_info(self, date):
date2 = date_convert(date, '%Y-%m-%d', '%Y%m%d')
url = 'http://www.sse.com.cn/market/dealingdata/overview/margin/a/rzrqjygk%s.xls' % (date2)
response = self.do_request(url, None, method='GET', type='binary')
if response is not None:
excel = pd.ExcelFile(io.BytesIO(response))
df_total = excel.parse('汇总信息').dropna()
df_detail = excel.parse('明细信息').dropna()
df_total['date'] = date
df_detail['date'] = date
return df_total, df_detail
else:
return None, None
def get_pledge_info(self, date):
date2 = date_convert(date, '%Y-%m-%d', '%Y%m%d')
url = 'http://query.sse.com.cn/exportExcel/exportStockPledgeExcle.do?tradeDate=%s' % (date2)
response = self.do_request(url, None, method='GET', type='binary')
if response is not None:
excel = pd.ExcelFile(io.BytesIO(response))
df_total = excel.parse('交易金额汇总').dropna()
df_detail = excel.parse('交易数量明细').dropna()
df_total['date'] = date
df_detail['date'] = date
return df_total, df_detail
else:
return None, None
class SZExAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def get_index_list(self):
#url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
url = 'http://www.szse.cn/api/report/ShowReport'
data = {
'SHOWTYPE' : 'xls',
'CATALOGID' : '1812',
}
response = self.do_request(url, data, method='GET', type='binary')
df = pd.read_excel(io.BytesIO(response))
return df
def get_index_component(self, index):
#url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
url = 'http://www.szse.cn/api/report/ShowReport'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1747',
'ZSDM' : index
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def get_rzrq_info(self, date):
df_total = self._get_rzrq_total(date)
df_detail = self._get_rzrq_detail(date)
if df_total is not None:
df_total['date'] = date
if df_detail is not None:
df_detail['date'] = date
return df_total, df_detail
def _get_rzrq_total(self, date):
#url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
url = 'http://www.szse.cn/api/report/ShowReport'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_xxpl',
'TABKEY' : 'tab1',
"txtDate": date,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def _get_rzrq_detail(self, date):
#url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
url = 'http://www.szse.cn/api/report/ShowReport'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_xxpl',
'TABKEY': 'tab2',
"txtDate" : date,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def get_pledge_info(self, date):
df_total = self._get_pledge_info_total(date)
df_detail = self._get_pledge_info_detail(date)
if df_total is not None:
df_total['date'] = date
if df_detail is not None:
df_detail['date'] = date
df_detail['证券代码'] = df_detail['证券代码'].apply(lambda x: str(x).zfill(6))
return df_total, df_detail
def _get_pledge_info_total(self, date):
#url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
url = 'http://www.szse.cn/api/report/ShowReport'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_gpzyhgxx',
'TABKEY': 'tab1',
"txtDate" : date,
'ENCODE' : 1,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def _get_pledge_info_detail(self, date):
#url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
url = 'http://www.szse.cn/api/report/ShowReport'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_gpzyhgxx',
'TABKEY': 'tab2',
"txtDate" : date,
'ENCODE' : 1,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
class CSIAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def get_index_list(self):
url = 'http://www.csindex.com.cn/zh-CN/indices/index'
page = 1
result_data = []
while True:
data = {
"data_type" : "json",
"page" : page,
}
response = self.do_request(url, data, method='GET')
rsp = json.loads(response)
page = page + 1
print("fetching data at page %d" % (page) )
if "list" in rsp:
result_data.extend(rsp['list'])
if len(rsp['list']) == 0:
break
else:
return None
return pd.DataFrame(result_data)
def get_index_component(self, index):
url = 'http://www.csindex.com.cn/uploads/file/autofile/cons/%scons.xls' % (index)
response = self.do_request(url, None, method='GET', type='binary')
if response is not None:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
class XueqiuAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
# 600000.SH -> SH600000
def convert_to_xq_symbol(self, symbol):
temp = symbol.split(".")
return temp[1] + temp[0]
def convert_to_xq_symbols(self, symbols):
result = ''
for symbol in symbols.split(','):
result = result + self.convert_to_xq_symbol(symbol) + ','
return result
# SH600000 -> 600000.SH
def convert_from_xq_symbol(self, symbol):
market = symbol[0:2]
code = symbol[2:]
return code + '.' + market
def prepare_cookies(self, url):
response = self.do_request(url, None)
if response is not None:
cookies = self.get_cookies()
return cookies
else:
return None
def get_quote(self, symbols):
url = 'https://stock.xueqiu.com/v5/stock/realtime/quotec.json'
data = {
'symbol' : self.convert_to_xq_symbols(symbols)
}
# {"data":[{"symbol":"SH000001","current":3073.8321,"percent":-1.15,"chg":-35.67,"timestamp":1528427643770,"volume":6670380300,"amount":8.03515860132E10,"market_capital":1.393367880255658E13,"float_market_capital":1.254120000811718E13,"turnover_rate":0.64,"amplitude":0.91,"high":3100.6848,"low":3072.5418,"avg_price":3073.832,"trade_volume":5190400,"side":0,"is_trade":true,"level":1,"trade_session":null,"trade_type":null}],"error_code":0,"error_description":null}
response = self.do_request(url, data, method='GET')
if response is not None:
jsonobj = json.loads(response)
if jsonobj['error_code'] == 0:
result = []
for rsp in jsonobj['data']:
result.append( {
'time' : datetime.datetime.fromtimestamp(rsp['timestamp']/1000),
'symbol' : self.convert_from_xq_symbol(rsp['symbol']),
'high' : rsp['high'],
'low' : rsp['low'],
'last' : rsp['current'],
'change' : rsp['chg'],
'percent': rsp['percent'],
'volume' : rsp['volume'],
'amount' : rsp['amount'],
'turnover_rate' : rsp['turnover_rate'],
'market_capital' : rsp['market_capital'],
'float_market_capital' : rsp['float_market_capital'],
'is_trading' : rsp['is_trade'],
} )
return pd.DataFrame(result), ''
else:
return None, jsonobj['error_description']
else:
return None, '请求数据失败'
def get_kline(self, symbol, timestamp, period, count):
url = 'https://stock.xueqiu.com/v5/stock/chart/kline.json'
data = {
'symbol' : self.convert_to_xq_symbol(symbol),
'begin' : timestamp,
'period' : period,
'type' : 'before',
'count' : count,
'indicator' : 'kline',
}
cookies = self.prepare_cookies('https://xueqiu.com/hq')
response = self.do_request(url, data, cookies=cookies, method='GET')
if response is not None:
jsonobj = json.loads(response)
if jsonobj['error_code'] == 0:
result = []
if len(jsonobj['data']) <= 0:
return None, jsonobj['error_description']
for rsp in jsonobj['data']['item']:
result.append( {
'symbol' : symbol,
'time' : datetime.datetime.fromtimestamp(rsp[0]/1000),
'volume' : rsp[1],
'open' : rsp[2],
'high' : rsp[3],
'low' : rsp[4],
'last' : rsp[5],
'change' : rsp[6],
'percent': rsp[7],
'turnover_rate' : rsp[8],
} )
return pd.DataFrame(result), ''
else:
return None, jsonobj['error_description']
else:
return None, '请求数据失败'
def get_kline_multisymbol(self, symbols, timestamp, period, count):
cookies = self.prepare_cookies('https://xueqiu.com/hq')
url = 'https://stock.xueqiu.com/v5/stock/chart/kline.json'
result = []
for symbol in symbols:
data = {
'symbol' : self.convert_to_xq_symbol(symbol),
'begin' : timestamp,
'period' : period,
'type' : 'before',
'count' : count,
'indicator' : 'kline',
}
response = self.do_request(url, data, cookies=cookies, method='GET')
if response is not None:
jsonobj = json.loads(response)
if jsonobj['error_code'] == 0:
for rsp in jsonobj['data']['item']:
result.append( {
'symbol' : symbol,
'time' : datetime.datetime.fromtimestamp(rsp[0]/1000),
'volume' : rsp[1],
'open' : rsp[2],
'high' : rsp[3],
'low' : rsp[4],
'last' : rsp[5],
'change' : rsp[6],
'percent': rsp[7],
'turnover_rate': rsp[8],
} )
return pd.DataFrame(result), ''
def get_kline_multitimestamp(self, symbol, timestamps, period, count):
cookies = self.prepare_cookies('https://xueqiu.com/hq')
url = 'https://stock.xueqiu.com/v5/stock/chart/kline.json'
result = []
for timestamp in timestamps:
data = {
'symbol' : self.convert_to_xq_symbol(symbol),
'begin' : timestamp,
'period' : period,
'type' : 'before',
'count' : count,
'indicator' : 'kline',
}
response = self.do_request(url, data, cookies=cookies, method='GET')
if response is not None:
jsonobj = json.loads(response)
if jsonobj['error_code'] == 0:
for rsp in jsonobj['data']['item']:
result.append( {
'symbol' : symbol,
'time' : datetime.datetime.fromtimestamp(rsp[0]/1000),
'volume' : rsp[1],
'open' : rsp[2],
'high' : rsp[3],
'low' : rsp[4],
'last' : rsp[5],
'change' : rsp[6],
'percent': rsp[7],
'turnover_rate': rsp[8],
} )
return pd.DataFrame(result), ''
class SinaAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
@staticmethod
def clear_text(text):
return text.replace('\n', '').strip()
def get_adj_factor(self, symbol):
now = datetime.datetime.now()
year = now.year
month = now.month
if month < 4 :
quarter = 1
elif month < 7:
quarter = 2
elif month < 10:
quarter = 3
else:
quarter = 4
temp = symbol.split(".")
url = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vMS_FuQuanMarketHistory/stockid/%s.phtml' % temp[0]
curr_year = year
curr_quarter = quarter
result_list = []
no_data_cnt = 0
while True:
print('getting data for year = %d, quarter = %d' % (curr_year, curr_quarter))
param = {
'year' : curr_year,
'jidu' : curr_quarter,
}
response = self.do_request(url, param, method='GET', encoding='gb18030')
soup = BeautifulSoup(response, "html5lib")
divs = soup.find_all('div')
data = []
for div in divs:
if div.has_attr('class') and 'tagmain' in div['class']:
tables = div.find_all('table')
for table in tables:
if table.has_attr('id') and table['id'] == 'FundHoldSharesTable':
rows = table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 8:
date = SinaAgent.clear_text(cols[0].text)
adjust_factor = SinaAgent.clear_text(cols[7].text)
if date == '日期':
continue
data.append({
"date": date,
"adjust_factor": adjust_factor,
})
result_list.extend(data)
if len(data) == 0:
no_data_cnt = no_data_cnt + 1
if no_data_cnt >= 3:
break
# prepare for next round
if curr_quarter == 1:
curr_year = curr_year - 1
curr_quarter = 4
else:
curr_quarter = curr_quarter - 1
return pd.DataFrame(result_list), ""
# 600000.SH -> SH600000
def convert_to_sina_symbol(self, symbol):
temp = symbol.split(".")
return temp[1].lower() + temp[0]
def get_trade_detail(self, symbol, trade_date):
url = 'http://market.finance.sina.com.cn/downxls.php?date=%s&symbol=%s' % (trade_date, self.convert_to_sina_symbol(symbol))
response = self.do_request(url, None, method='GET', type='text', encoding='gb18030')
if response is not None:
rsp = io.StringIO(response)
line = rsp.readline() # skip first line
line = rsp.readline()
result = []
while line is not None and len(line) > 10:
items = line.split('\t')
if len(items) == 6:
result.append({
'time' : SinaAgent.clear_text(items[0]),
'price' : SinaAgent.clear_text(items[1]),
'change' : SinaAgent.clear_text(items[2]),
'volume' : SinaAgent.clear_text(items[3]),
'turnover': SinaAgent.clear_text(items[4]),
'bs' : SinaAgent.clear_text(items[5]),
})
line = rsp.readline()
df = pd.DataFrame(result)
df['date'] = trade_date
df['symbol'] = symbol
return df, ''
return None, '获取数据失败'
class CNInfoAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
@staticmethod
def clear_text(text):
return text.replace('\n', '').strip()
def _parse_report_file(self, file):
lines = file.readlines()
data_list = []
for i in range(len(lines)):
items = lines[i].decode('gbk').split()
if items[0][:2] == '机构':
head = items[0].split(sep=',')
else:
items = lines[i].decode('gbk')[1:]
data = items.split(sep=',')
data[0] = data[0][1:-1]
data[-1] = remove_non_numerical(data[-1])
data_list.append(data)
df = pd.DataFrame(data_list)
df.columns = head
return df
def get_report_data(self, market, symbol, type):
url = 'http://www.cninfo.com.cn/cninfo-new/data/download'
data = {
'market' : market,
'type' : type,
'code' : symbol,
'orgid' : 'gs%s%s' % (market, symbol),
'minYear' : '1990',
'maxYear' : '2018',
}
response = self.do_request(url, param=data, method='POST', type='binary')
'''if response is None:
return None, '没有获取到数据'
else:
'''
try:
zip_ref = zipfile.ZipFile(io.BytesIO(response))
df_list = []
for finfo in zip_ref.infolist():
file = zip_ref.open(finfo, 'r')
df = self._parse_report_file(file)
df_list.append(df)
df_result = _concat_df(df_list)
df_result.reset_index(inplace=True, drop=True)
return df_result, ''
except:
return None, '获取数据失败'
def get_shareholder_structure(self, market, symbol):
if symbol.startswith('002'):
board = 'sme'
elif symbol.startswith('3'):
board = 'cn'
else:
board = 'mb'
url = 'http://www.cninfo.com.cn/information/lastest/%s%s%s.html' % (market, board, symbol)
response = self.do_request(url, encoding='gb18030')
if response is None:
return None, '获取数据失败'
soup = BeautifulSoup(response, "html5lib")
divs = soup.find_all('div')
data = []
for div in divs:
if div.has_attr('class') and 'clear' in div['class']:
tables = div.find_all('table')
for table in tables:
rows = table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 2:
indicator = CNInfoAgent.clear_text(cols[0].text).replace(':', '')
value = CNInfoAgent.clear_text(cols[1].text)
data.append({
"indicator": indicator,
"value" : value,
})
break
return pd.DataFrame(data), ""
def get_dividend(self, symbol):
symbol = symbol[:6]
url = "http://www.cninfo.com.cn/information/dividend/szmb%s.html"
response = self.do_request(url % symbol, method='GET', encoding='gbk')
if response is None:
return pd.DataFrame([])
soup = BeautifulSoup(response, 'html5lib')
# get name_cn
tds = soup.find_all('td')
for td in tds:
if td.has_attr('style') and 'padding-right:10px' in td['style']:
name_cn = td.text.split(':')[-1]
#get dividend_data
divs = soup.find_all('div')
for div in divs:
if div.has_attr('class') and 'clear' in div['class']:
trs = div.find_all('tr')
if trs == []:
continue
data_list = []
for tr in trs[1:]:
data = [symbol, name_cn]
tds = tr.find_all('td')
for td in tds:
text = td.text.replace(' ', '').replace('\n', '').replace('\xa0', '')
data.append(text)
data_list.append(data)
df_res = pd.DataFrame(data_list, columns=['股票代码', '公司名称', '分红年度', '分红方案', '股权登记日',
'除权日', '红股上市日'])
df_res['股权登记日'] = df_res['股权登记日'].map(time_map)
df_res['除权日'] = df_res['除权日'].map(time_map)
df_res['分红方案'] = df_res['分红方案'].map(plan_map)
df_res['税后股利'] = df_res['分红方案'].map(lambda x: 0.8 * float(x))
df_res['公司代码'] = df_res['股票代码']
df = df_res[['公司代码', '股权登记日', '分红方案', '税后股利', '除权日', '公司名称', '股票代码']]
df.columns = ['COMPANY_CODE', 'DIVIDEND_DATE', 'DIVIDEND_PER_SHARE1_A',
'DIVIDEND_PER_SHARE2_A', 'EX_DIVIDEND_DATE_A','SECURITY_ABBR_A', 'SECURITY_CODE_A']
return df
class EastMoneyAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def _parse_hist_money_flow(self, response):
jsonobj = json.loads(response)
result = []
for data in jsonobj['data']:
items = data.split(',')
result.append({
'Time': items[0],
'ZLJLRJE': items[1],
'ZLJLRZB': items[2],
'CDDJLRJE': items[3],
'CDDJLRZB': items[4],
'DDLRJE': items[5],
'DDLRZB': items[6],
'ZDLRJE': items[7],
'ZDLRZB': items[8],
'XDLRJE': items[9],
'XDLRZB': items[10],
})
return pd.DataFrame(result)
def _get_hist_money_flow(self, url):
response = self.do_request(url)
if response is None:
return None, '获取数据失败'
df = self._parse_hist_money_flow(response)
return df, ''
def get_hist_money_flow(self, symbol):
url = 'http://ff.eastmoney.com//EM_CapitalFlowInterface/api/js?type=hff&rtntype=2&js={"data":(x)}&check=TMLBMSPROCR&acces_token=1942f5da9b46b069953c873404aad4b5&id=%s' % symbol
return self._get_hist_money_flow(url)
def get_hist_money_flow_market(self):
url = 'http://data.eastmoney.com/zjlx/dpzjlx.html'
response = self.do_request(url)
if response is None:
return None, '获取数据失败'
# get data from html
idx = response.find('var DefaultJson=')
idx1 = response.find('[', idx)
idx2 = response.find(']', idx)
json_rsp = '{ "data": ' + response[idx1:idx2+1] + '}'
df = self._parse_hist_money_flow(json_rsp)
return df, ''
def _get_realtime_money_flow(self, url):
response = self.do_request(url)
if response is None:
return None, '获取数据失败'
jsonobj = json.loads(response)
result = {}
result['Time'] = jsonobj['xa'].split(',')
result['ZLJLRJE'] = list()
result['CDDJLRJE'] = list()
result['DDJLRJE'] = list()
result['ZDJLRJE'] = list()
result['XDJLRJE'] = list()
for data in jsonobj['ya']:
items = data.split(',')
result['ZLJLRJE'].append(items[0])
result['CDDJLRJE'].append(items[1])
result['DDJLRJE'].append(items[2])
result['ZDJLRJE'].append(items[3])
result['XDJLRJE'].append(items[4])
df = pd.DataFrame().from_dict(result, orient='index').T
df.dropna(inplace=True)
return df, ''
def get_realtime_money_flow(self, symbol):
url = 'http://ff.eastmoney.com/EM_CapitalFlowInterface/api/js?id=%s&type=ff&check=MLBMS&js={(x)}&rtntype=3&acces_token=1942f5da9b46b069953c873404aad4b5' % symbol
return self._get_realtime_money_flow(url)
def get_realtime_money_flow_market(self):
url = 'http://ff.eastmoney.com/EM_CapitalFlowInterface/api/js?id=ls&type=ff&check=MLBMS&js={(x)}&rtntype=3&acces_token=1942f5da9b46b069953c873404aad4b5'
return self._get_realtime_money_flow(url)
#==============================================================================
#一次性获取所有股票的实时资金流,并按照主力净流入净额排序
## 指标定义
# 超大单:大于等于50万股或者100万元的成交单;
# 大单:大于等于10万股或者20万元且小于50万股和100万元的成交单;
# 中单:大于等于2万股或者4万元且小于10万股和20万元的成交单;
# 小单:小于2万股和4万元的成交单;
# 流入:买入成交额;
# 流出:卖出成交额;
# 主力流入:超大单加大单买入成交额之和;
# 主力流出:超大单加大单卖出成交额之和;
# 净额:流入-流出;
# 净比:(流入-流出)/总成交额;
# 单位:亿元
#==============================================================================
def toDataFrame(self,ll):
dataframe = []
for l in ll:
l = l.replace('-','0')
temp = l.split(",")[1:]
temp[2:-2] = map(eval, temp[2:-2])
dataframe.append(temp)
dataframe = pd.DataFrame(dataframe)
dataframe.columns = [u'代码',u'名称',u'最新价',u'今日涨跌幅',u'今日主力净流入净额',u'今日主力净流入净占比',u'今日超大单净流入净额',u'今日超大单净流入净占比',u'今日大单净流入净额',u'今日大单净流入净占比',u'今日中单净流入净额',u'今日中单净流入净占比',u'今日小单净流入净额',u'今日小单净流入净占比',u'time',u'未知']
return dataframe
def _get_realtime_allstock_flow(self, url):
response = self.do_request(url)
if response is None:
return None, '获取数据失败'
pages = 'pages'
date = 'date'
data = 'data'
data = eval(response[13:])
flashflow = data['data']
df = self.toDataFrame(flashflow)
df.index = df.ix[:,0]
df.dropna(inplace=True)
return df, ''
def get_allstock_flow(self):
url = 'http://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=ct&st=(BalFlowMain)&sr=-1&p=1&ps=3700&js=var%20ucjEIgIa={pages:(pc),date:%222014-10-22%22,data:[(x)]}&token=1942f5da9b46b069953c873404aad4b5&cmd=C._AB&sty=DCFFITA&rt=50984894'
return self._get_realtime_allstock_flow(url)
|
the-stack_0_13298 | from tqdm.auto import tqdm
import numpy as np
import glob
import os
from torchvision import models
from torchvision import transforms
import torch
import torch.nn as nn
from PIL import Image
import gc
import argparse
import h5py
import json
from augs import (
GaussianBlur,
Cutout,
CutoutColor,
CenterCrop,
Rotate,
Flip,
Grayscale,
Original,
)
parser = argparse.ArgumentParser(description="dump videos as features")
parser.add_argument(
"--videos_path",
default="",
type=str,
required=True,
help="path to npy stored videos",
)
parser.add_argument(
"--save_path", default="", type=str, required=True, help="path to features",
)
args = parser.parse_args()
device = "cuda" if torch.cuda.is_available() else "cpu"
batch_size = 32
preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
resnet = models.resnet50(pretrained=True)
modules = list(resnet.children())[:-1]
resnet = nn.Sequential(*modules)
resnet.to(device)
resnet.eval()
files = glob.glob(os.path.join(args.videos_path, "*.npy"))
errors = []
Augmentations = [
Original(),
GaussianBlur(),
Cutout(),
CutoutColor(),
CenterCrop(),
Rotate(),
Flip(),
Grayscale(),
]
dataset = h5py.File("datasets/eccv16_dataset_tvsum_google_pool5.h5", "r")
all_picks = dict(
zip(
list(dataset.keys()),
[dataset[key]["picks"][...] for key in list(dataset.keys())],
)
)
f = open("id_to_key_map_tvsum.json")
id_key_map = json.load(f)
f.close()
for i, file in enumerate(files):
prefix = file.split("/")[-1].split(".")[0]
save_path = os.path.join(args.save_path, prefix)
picks = all_picks[id_key_map[prefix]]
if not os.path.exists(save_path):
os.mkdir(save_path)
try:
video = np.load(file)
video = video[picks, :, :, :]
except:
errors.append(file)
continue
for aug in tqdm(Augmentations, desc=f"Augmenting video {i+1}/{len(files)}"):
aug_name = aug.__class__.__name__.lower()
curr_save_path = os.path.join(save_path, f"{prefix}_{aug_name}.pt")
if os.path.exists(curr_save_path):
continue
video_aug = aug(video)
features = []
inputs = []
for image in tqdm(video_aug, desc=aug_name):
image = Image.fromarray(image.astype(np.uint8))
image = preprocess(image)
image = image.unsqueeze(0).to(device)
inputs.append(image)
if len(inputs) % batch_size == 0:
inputs = torch.cat(inputs, 0)
with torch.no_grad():
feat = resnet(inputs)
features.append(feat.squeeze().cpu())
inputs = []
if len(inputs) > 0:
inputs = torch.cat(inputs, 0)
with torch.no_grad():
feat = resnet(inputs)
features.append(feat.squeeze(-1).squeeze(-1).cpu())
features = torch.cat(features, 0)
features = features.view(-1, 2048)
torch.save(features.cpu(), curr_save_path)
del features
gc.collect()
print("Errors")
print(errors)
|
the-stack_0_13299 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from collections import namedtuple
from corehq.apps.userreports.models import AsyncIndicator, get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
DOMAIN = 'icds-cas'
DATA_SOURCES = (
'static-icds-cas-static-child_cases_monthly_tableau_v2',
'static-icds-cas-static-ccs_record_cases_monthly_tableau_v2',
)
FakeChange = namedtuple('FakeChange', ['id', 'document'])
CASE_DOC_TYPE = 'CommCareCase'
STATE_IDS = [
'f98e91aa003accb7b849a0f18ebd7039',
'f9b47ea2ee2d8a02acddeeb491d3e175',
'a2fcb186e9be8464e167bb1c56ce8fd9',
'f1cd643f0df908421abd915298ba57bc',
'd982a6fb4cca0824fbde59db18d3800f',
'9cd4fd88d9f047088a377b7e7d144830',
'ea4d587fa93a2ed8300853d51db661ef',
]
class Command(BaseCommand):
help = ""
def handle(self, *args, **options):
fake_change_doc = {'doc_type': CASE_DOC_TYPE, 'domain': DOMAIN}
for data_source_id in DATA_SOURCES:
print("processing data source %s" % data_source_id)
data_source, is_static = get_datasource_config(data_source_id, DOMAIN)
assert is_static
adapter = get_indicator_adapter(data_source)
table = adapter.get_table()
for case_id in self._get_case_ids_to_process(adapter, table, data_source_id):
change = FakeChange(case_id, fake_change_doc)
AsyncIndicator.update_from_kafka_change(change, [data_source_id])
def _add_filters(self, query, table, data_source_id):
if data_source_id == 'static-icds-cas-static-child_cases_monthly_tableau_v2':
return query.filter(
table.columns.valid_all_registered_in_month == 1,
table.columns.valid_in_month == 0,
)
elif data_source_id == 'static-icds-cas-static-ccs_record_cases_monthly_tableau_v2':
return query.filter(
table.columns.pregnant_all == 1,
table.columns.pregnant == 0,
)
def _get_case_ids_to_process(self, adapter, table, data_source_id):
for state_id in STATE_IDS:
print("processing state %s" % state_id)
query = adapter.session_helper.Session.query(table.columns.doc_id).distinct(table.columns.doc_id)
case_ids = query.filter(
table.columns.state_id == state_id,
)
case_ids = self._add_filters(query, table, data_source_id).all()
num_case_ids = len(case_ids)
print("processing %d cases" % (num_case_ids))
for i, case_id in enumerate(case_ids):
yield case_id
if i % 1000 == 0:
print("processed %d / %d docs" % (i, num_case_ids))
|
the-stack_0_13300 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Name: PyAnime4K utils
Author: TianZerL
Editor: TianZerL
"""
from pyanime4k import ffmpeg_handler
import contextlib
import os
def migrate_audio_streams(
upscaled_video: str, original_video: str, output_path: str
) -> None:
""" migrate audio streams
Args:
upscaled_video (str): path of upscaled video.
original_video (str): path of original video.
output_path (str): path to output result.
Raises:
FileExistsError: when output path exists and isn't a directory
"""
ffmpeg_handler.migrate_audio_streams(
upscaled_video=upscaled_video,
original_video=original_video,
output_path=output_path,
)
with contextlib.suppress(FileNotFoundError):
os.remove(upscaled_video)
|
the-stack_0_13305 | from os.path import (
realpath,
join,
)
from typing import List
from hummingbot.core.utils.symbol_fetcher import SymbolFetcher
# Global variables
required_exchanges: List[str] = []
symbol_fetcher = SymbolFetcher.get_instance()
# Global static values
KEYFILE_PREFIX = "key_file_"
KEYFILE_POSTFIX = ".json"
GLOBAL_CONFIG_PATH = "conf/conf_global.yml"
TOKEN_ADDRESSES_FILE_PATH = realpath(join(__file__, "../../wallet/ethereum/erc20_tokens.json"))
DEFAULT_KEY_FILE_PATH = "conf/"
DEFAULT_LOG_FILE_PATH = "logs/"
DEFAULT_ETHEREUM_RPC_URL = "https://mainnet.coinalpha.com/hummingbot-test-node"
TEMPLATE_PATH = realpath(join(__file__, "../../templates/"))
CONF_FILE_PATH = "conf/"
CONF_PREFIX = "conf_"
CONF_POSTFIX = "_strategy"
EXCHANGES = {
"bamboo_relay",
"binance",
"coinbase_pro",
"ddex",
"idex",
"radar_relay",
}
DEXES = {
"bamboo_relay",
"ddex",
"idex",
"radar_relay",
}
STRATEGIES = {
"cross_exchange_market_making",
"arbitrage",
"discovery",
"pure_market_making",
}
EXAMPLE_PAIRS = {
"binance": "ZRXETH",
"ddex": "ZRX-WETH",
"idex": "ETH_ZRX",
"radar_relay": "ZRX-WETH",
"bamboo_relay": "ZRX-WETH",
"coinbase_pro": "ETH-USDC",
}
MAXIMUM_OUTPUT_PANE_LINE_COUNT = 1000
MAXIMUM_LOG_PANE_LINE_COUNT = 1000
# Liquidity Bounties:
LIQUIDITY_BOUNTY_CONFIG_PATH = "conf/conf_liquidity_bounty.yml"
MIN_ETH_STAKED_REQUIREMENT = 0.05
|
the-stack_0_13306 | from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import tensorflow as tf
from dltk.core.modules.base import AbstractModule
class TransposedConvolution(AbstractModule):
"""Tranposed convolution module
This build a 2D or 3D transposed convolution based on the dimensionality of the input
"""
def __init__(self, out_filters, strides=(1, 1, 1), filter_shape=None, use_bias=False, name='conv_transposed'):
"""Constructs a transposed convolution
The kernel shape is defined as 2 * stride for stride > 1
Parameters
----------
out_filters : int
number of output filters
strides : tuple or list, optional
strides used for the transposed convolution
use_bias : bool
flag to toggle whether a bias is added to the output
name : string
name of the module
"""
self.in_shape = None
self.in_filters = None
self.out_filters = out_filters
self.out_shape = None
self.strides = strides
self.use_bias = use_bias
self.filter_shape = filter_shape
self.full_strides =[1,] + list(self.strides) + [1,]
self._rank = len(list(self.strides))
assert 1 < self._rank < 4, 'Transposed convolutions are only supported in 2D and 3D'
super(TransposedConvolution, self).__init__(name=name)
def _get_kernel(self):
"""Builds the kernel for the transposed convolution
Returns
-------
tf.Variable
kernel for the transposed convolution
"""
kernel_shape = tuple(self.up_spatial_shape + [self.out_filters, self.in_filters])
k = tf.get_variable("k", shape=kernel_shape, initializer=tf.uniform_unit_scaling_initializer(),
collections=self.WEIGHT_COLLECTIONS)
return k
def _build(self, inp):
"""Applies a transposed convolution to the input tensor
Parameters
----------
inp : tf.Tensor
input tensor
Returns
-------
tf.Tensor
output of transposed convolution
"""
assert (len(inp.get_shape().as_list()) - 2) == self._rank, \
'The input has {} dimensions but this is a {}D convolution'.format(
len(inp.get_shape().as_list()), self._rank)
self.in_shape = tuple(inp.get_shape().as_list())
if self.in_filters is None:
self.in_filters = self.in_shape[-1]
assert self.in_filters == self.in_shape[-1], 'Convolution was built for different number of channels'
inp_shape = tf.shape(inp)
if self.filter_shape is None:
self.up_spatial_shape = [2 * s if s > 1 else 1 for s in self.strides]
else:
self.up_spatial_shape = self.filter_shape
self.out_shape = [inp_shape[i] * self.full_strides[i] for i in range(len(self.in_shape) - 1)] + [self.out_filters,]
self._k = self._get_kernel()
self.variables.append(self._k)
conv_op = tf.nn.conv3d_transpose
if self._rank == 2:
conv_op = tf.nn.conv2d_transpose
outp = conv_op(inp, self._k, output_shape=self.out_shape, strides=self.full_strides, padding='SAME',
name='conv_tranposed')
if self.use_bias:
self._b = tf.get_variable("b", shape=(self.out_filters,), initializer=tf.constant_initializer())
self.variables.append(self._b)
outp += self._b
outp.set_shape([self.in_shape[i] * self.full_strides[i] if isinstance(self.in_shape[i], int) else None
for i in range(len(self.in_shape) - 1)] + [self.out_filters,])
return outp |
the-stack_0_13308 | """
Name : c14_13_average_price_call.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 6/6/2017
email : [email protected]
[email protected]
"""
import scipy as sp
s0=40. # today stock price
x=40. # exercise price
T=0.5 # maturity in years
r=0.05 # risk-free rate
sigma=0.2 # volatility (annualized)
sp.random.seed(123) # fix a seed here
n_simulation=100 # number of simulations
n_steps=100. # number of steps
#
dt=T/n_steps
call=sp.zeros([n_simulation], dtype=float)
for j in range(0, n_simulation):
sT=s0
total=0
for i in range(0,int(n_steps)):
e=sp.random.normal()
sT*=sp.exp((r-0.5*sigma*sigma)*dt+sigma*e*sp.sqrt(dt))
total+=sT
price_average=total/n_steps
call[j]=max(price_average-x,0)
#
call_price=sp.mean(call)*sp.exp(-r*T)
print('call price based on average price = ', round(call_price,3))
|
the-stack_0_13309 | """MIT License
Copyright (c) 2021 Jacopo Schiavon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import time
import jax.numpy as jnp
from jax.ops import index_update, index
from typing import NamedTuple, Union
from .linesearch import wolfe_linesearch, LineSearchParameter
class OptimizerParams(NamedTuple):
"""
Parameters for the optimizer.
Arguments:
- maxtime (float, default 100)
maximum run time
- maxiter (int, default 100)
maximum number of iterations
- mingradnorm (float, default 1e-8)
minimum gradient norm
- minstepsize (float, default 1e-16)
minimum length of the stepsize
- maxcostevals (int, default 5000)
maximum number of cost evaluations
- verbosity (int, default 0)
Level of information logged by the solver while it operates,
0 is silent, 1 basic info on status, 2 info per iteration,
3 info per linesearch iteration
- logverbosity (bool, default False)
Wether to produce a log of the optimization
"""
maxtime: Union[float, jnp.ndarray] = 100
maxiter: Union[int, jnp.ndarray] = 500
mingradnorm: Union[float, jnp.ndarray] = 1e-6
minstepsize: Union[float, jnp.ndarray] = 1e-16
maxcostevals: Union[int, jnp.ndarray] = 5000
memory: Union[int, jnp.ndarray] = 4
verbosity: Union[int, jnp.ndarray] = 0
logverbosity: Union[bool, jnp.ndarray] = False
class OptimizerResult(NamedTuple):
"""
Object holding optimization results.
Components:
- name:
name of the optimizer
- success:
True if optimization succeeded.
- status:
integer solver specific return code. 0 means nominal.
- message:
solver specific message that explains status.
- x:
final solution.
- fun:
final function value.
- gr:
final gradient array.
- grnorm:
norm of the gradient.
- nfev:
integer number of function evaluations.
- ngev:
integer number of gradient evaluations.
- nit:
integer number of iterations of the optimization algorithm.
- stepsize:
length of the final stepsize
- time:
time used by the optimization
"""
name: str
success: Union[bool, jnp.ndarray]
status: Union[int, jnp.ndarray]
message: str
x: jnp.ndarray
fun: jnp.ndarray
gr: jnp.ndarray
grnorm: jnp.ndarray
nfev: Union[int, jnp.ndarray]
ngev: Union[int, jnp.ndarray]
nit: Union[int, jnp.ndarray]
stepsize: jnp.ndarray
time: jnp.ndarray
def __str__(self):
"""String representation."""
try:
sz = self.x.size
except AttributeError:
sz = sum(x.size for x in self.x)
return (
"{}.\n---\nSuccess: {} with status {} in {:.3f} s.\n"
"[{}]\n"
" -Iterations {} (cost evaluation: {}, gradient evaluation: {}, "
"time/it: {})\n"
" \t Function value {:.3f}, gradient norm {}, stepsize {},\n"
" \t value of X:\n{}"
).format(
self.name,
self.success, self.status, self.time, self.message,
self.nit, self.nfev, self.ngev, self.time / self.nit,
self.fun, self.grnorm, self.stepsize,
self.x if sz < 50 else '\t... Too big to show...'
)
def pprint(self):
"""Print a concise summary of the result."""
message = "Optimization {}completed (status {}).".format("" if self.success else "not ", self.status)
details = "{} iterations in {:.3f} s".format(self.nit, self.time)
print(message + "\t" + details)
class OptimizerLog(NamedTuple):
"""
Object holding optimization log.
Components:
- name:
name of the optimizer
- fun:
sequence of function value.
- x:
sequence of data points.
- grnorm:
sequence of gradient norm.
- beta:
sequence of computed beta.
- fev:
sequence of function evaluations.
- gev:
sequence of gradient evaluations.
- it:
iterations.
- stepsize:
sequence of length of stepsize.
- time
sequence of times.
"""
name: str = ''
fun: jnp.ndarray = jnp.array([])
x: list = []
grnorm: jnp.ndarray = jnp.array([])
fev: jnp.ndarray = jnp.array([], dtype=int)
gev: jnp.ndarray = jnp.array([], dtype=int)
it: jnp.ndarray = jnp.array([], dtype=int)
stepsize: jnp.ndarray = jnp.array([])
time: jnp.ndarray = jnp.array([])
class RL_BFGS():
"""L-BFGS optimizer."""
Algo = 'Riemannian Limited memory BFGS'
def __init__(self, manifold, **pars):
"""
Riemannian Limited memory BFGS.
Mandatory arguments:
- manifold
A manifold object that defines the operations on the manifold
Optional parameters:
- maxtime (float, default 100)
maximum run time
- maxiter (int, default 100)
maximum number of iterations
- mingradnorm (float, default 1e-8)
minimum gradient norm
- minstepsize (float, default 1e-16)
minimum length of the stepsize
- maxcostevals (int, default 5000)
maximum number of cost evaluations
- verbosity (int, default 0)
Level of information logged by the solver while it operates,
0 is silent, 1 basic info on status, 2 info per iteration
- logverbosity (bool, default False)
Wether to produce a log of the optimization
Optional linesearch parameters:
- ls_maxiter (int, default 10)
maximum number of iterations
- ls_minstepsize (float, default 1e-16)
minimum length of the stepsize
- ls_optimism (float, default 1.2)
optimism of the new step
- ls_initial_step (float, default 1)
initial stepsize before linesearch
- ls_suff_decr (float, default 1e-4)
sufficient decrease parameter
- ls_contraction (float, default 0.5)
contraction factor (must be 0 < c < 1)
- ls_verbosity (int, default 0)
Level of information to be displayed:
< 3 is silent, 3+ basic info
"""
self.man = manifold
self.__name__ = ("{} on {}".format(self.Algo, str(self.man).lower()))
self._parms = OptimizerParams(
**{k: pars[k] for k in pars if k in OptimizerParams._fields}
)
self._ls_pars = LineSearchParameter(
**{k: pars[k] for k in pars if k in LineSearchParameter._fields}
)
if pars.get('ls_verbosity', None) is None:
self._ls_pars = self._ls_pars._replace(
ls_verbosity=max(0, self._parms.verbosity - 3)
)
def __str__(self):
"""Representat the optimizer as a string."""
return self.__name__
def _check_stopping_criterion(self, time0, iters=-1, grnorm=float('inf'), stepsize=float('inf'), costevals=-1):
status = - 1
if grnorm <= self._parms.mingradnorm:
status = 0
elif stepsize <= self._parms.minstepsize:
status = 1
elif iters >= self._parms.maxiter:
status = 2
elif time.time() >= time0 + self._parms.maxtime:
status = 3
elif costevals >= self._parms.maxcostevals:
status = 4
return status
def _compute_descent_direction(self, l, x, gr, gamma):
q = gr
m = self._parms.memory
H0 = gamma * jnp.identity(gr.shape[0])
alpha = jnp.zeros(shape=(l,))
if self._parms.verbosity >= 3:
print('\tm = {}; l = {}'.format(m, l))
for i in jnp.arange(m - l + 1, 0, -1):
alpha = index_update(alpha, i-1, self.rhok[i-1] * self.man.inner(x, self.sk[i-1], q))
q = q - alpha[i-1] * self.yk[i-1]
r = jnp.matmul(H0, q)
for i in jnp.arange(0, l):
beta = self.rhok[i] * self.man.inner(x, self.yk[i], r)
r = r + (alpha[i] - beta) * self.sk[i]
return -r
def solve(self, objective, gradient, x=None, key=None):
"""
Perform optimization using gradient descent with linesearch.
This method first computes the gradient (derivative) of obj
w.r.t. arg, and then optimizes by moving in the direction of
steepest descent (which is the opposite direction to the gradient).
Arguments:
- objective : callable
The cost function to be optimized
- gradient : callable
The gradient of the cost function
- x : array (None)
Optional parameter. Starting point on the manifold. If none
then a starting point will be randomly generated.
- key: array (None)
Optional parameter, required if x is not provided to randomly
initiate the algorithm
Returns:
- OptimizerResult object
"""
msg = ("status meaning: 0=converged, 1=stepsize too small, "
"2=max iters reached, 3=max time reached, "
"4=max cost evaluations, "
"-1=undefined"
)
if self._parms.verbosity >= 1:
print('Starting {}'.format(self.__name__))
self._costev = 0
self._gradev = 0
def cost(x):
self._costev += 1
return objective(x)
def grad(x):
self._gradev += 1
return self.man.egrad2rgrad(x, gradient(x))
def ls(c_a_g, x, d, f0, df0, g0):
return wolfe_linesearch(c_a_g, x, d, f0, df0, g0, self._ls_pars)
if x is None:
try:
x = self.man.rand(key)
except TypeError:
raise ValueError("Either provide an initial point for"
" the algorithm or a valid random key"
" to perform random initialization")
k = 0
l = 0
gamma = 1.
stepsize = 1.
memorized_shape = (self._parms.memory,) + x.shape
self.sk = jnp.zeros(shape=(memorized_shape))
self.yk = jnp.zeros(shape=(memorized_shape))
self.rhok = jnp.zeros(shape=(self._parms.memory))
f0 = cost(x)
gr = grad(x)
grnorm = self.man.norm(x, gr)
d = - gr
df0 = self.man.inner(x, d, gr)
t_start = time.time()
if self._parms.logverbosity:
logs = OptimizerLog(
name="log of {}".format(self.__name__),
fun=jnp.array([f0]),
x=[x],
grnorm=jnp.array([grnorm]),
fev=jnp.array([self._costev], dtype=int),
gev=jnp.array([self._gradev], dtype=int),
it=jnp.array([k], dtype=int),
stepsize=jnp.array([1.]),
time=jnp.array([time.time() - t_start])
)
while True:
if self._parms.verbosity >= 2:
print('iter: {}\n\tfun value: {:.2f}'.format(k, f0))
print('\tgrad norm: {:.2f}'.format(grnorm))
print('\tdirectional derivative: {:.2f}'.format(df0))
status = self._check_stopping_criterion(
t_start,
k,
grnorm,
stepsize,
self._costev
)
if status >= 0:
break
def cost_and_grad(t):
xnew = self.man.retraction(x, t * d)
fn = cost(xnew)
gn = grad(xnew)
dn = self.man.inner(xnew, - gn, gn)
# dn = -jnp.sqrt(jnp.abs(dn)) if dn < 0 else jnp.sqrt(dn)
return fn, gn, dn
ls_results = ls(cost_and_grad, x, d, f0, df0, gr)
alpha = ls_results.a_k
stepsize = jnp.abs(alpha * df0)
newx = self.man.retraction(x, alpha * d)
newf = ls_results.f_k
newgr = ls_results.g_k
newgrnorm = self.man.norm(x, gr)
sk = self.man.vector_transport(x, alpha * d, alpha * d)
yk = newgr - self.man.vector_transport(x, alpha * d, gr)
a = self.man.inner(newx, yk, sk)
b = self.man.norm(newx, sk) ** 2
if ((a / b) >= (grnorm * 1e-4)):
c = self.man.norm(newx, yk) ** 2
rhok = 1 / a
gamma = a / c
if l == self._parms.memory:
self.sk = self.sk[1:]
self.yk = self.yk[1:]
self.rhok = self.rhok[1:]
else:
l += 1
self.sk = index_update(self.sk, index[l, :, :], sk)
self.yk = index_update(self.yk, index[l, :, :], yk)
self.rhok = index_update(self.rhok, l, rhok)
for i in range(l):
self.sk = index_update(self.sk, index[i, :, :], self.man.vector_transport(x, alpha*d, self.sk[i]))
self.yk = index_update(self.yk, index[i, :, :], self.man.vector_transport(x, alpha*d, self.yk[i]))
if self._parms.verbosity >= 2:
print('\talpha: {}'.format(alpha))
print('\tgamma: {}'.format(gamma))
print('\ta / b: {}'.format(a / b))
x = newx
f0 = newf
gr = newgr
grnorm = newgrnorm
k += 1
if l > 0:
d = self._compute_descent_direction(l, x, gr, gamma)
else:
d = - gr
df0 = self.man.inner(x, d, gr)
if self._parms.logverbosity:
logs = logs._replace(
fun=jnp.append(logs.fun, f0),
x=logs.x + [x],
grnorm=jnp.append(logs.grnorm, grnorm),
fev=jnp.append(logs.fev, self._costev),
gev=jnp.append(logs.gev, self._gradev),
it=jnp.append(logs.it, k),
stepsize=jnp.append(logs.stepsize, stepsize),
time=jnp.append(logs.time, time.time() - t_start)
)
result = OptimizerResult(
name=self.__name__,
success=True if status == 0 else False,
status=status,
message=msg,
x=x,
fun=f0,
gr=gr,
grnorm=grnorm,
nfev=self._costev,
ngev=self._gradev,
nit=k,
stepsize=stepsize,
time=(time.time() - t_start)
)
if self._parms.verbosity >= 1:
result.pprint()
if self._parms.logverbosity:
return result, logs
return result
|
the-stack_0_13310 | import cv2
from flask import Flask
from scipy.spatial import distance
from extract_car import extract_car
from extract_parking import extract_parking
from extract_rectangle import extract_rectangle
app = Flask(__name__)
def find_parking(show_output):
cap = cv2.VideoCapture("http://10.200.9.248:8080/video/mjpeg")
accumulator_free = []
accumulator_occupied = []
global available_parking
while(True):
ret, frame = cap.read()
height, width = frame.shape[:2]
frame = frame[0:height, 0:2*(width//3)]
frame_copy = frame.copy()
res = extract_parking(frame)
res, positions_free = extract_rectangle(frame, res)
res, positions_occupied = extract_car(frame, res)
for acc_free in accumulator_free:
acc_free[1] -= 1
for pos_free in positions_free:
pos_found = False
for acc_free in accumulator_free:
dist = distance.euclidean(pos_free, acc_free[0])
if dist < 10:
acc_free[1] += 2
pos_found = True
break
if not pos_found:
accumulator_free.append([pos_free, 1, False, 'f'])
i = 0
while i < len(accumulator_free):
if accumulator_free[i][1] >= 5:
accumulator_free[i][1] = 5
accumulator_free[i][2] = True
elif accumulator_free[i][1] == 0:
accumulator_free.pop(i)
continue
i += 1
total_spots = 0
for acc_free in accumulator_free:
if acc_free[2]:
cv2.circle(frame_copy, acc_free[0], 30, (0, 200, 0), -1)
total_spots += 1
#######
for acc_free in accumulator_occupied:
acc_free[1] -= 1
for pos_free in positions_occupied:
pos_found = False
for acc_free in accumulator_occupied:
dist = distance.euclidean(pos_free, acc_free[0])
if dist < 10:
acc_free[1] += 2
pos_found = True
break
if not pos_found:
accumulator_occupied.append([pos_free, 1, False, 'o'])
i = 0
while i < len(accumulator_occupied):
if accumulator_occupied[i][1] >= 5:
accumulator_occupied[i][1] = 5
accumulator_occupied[i][2] = True
elif accumulator_occupied[i][1] == 0:
accumulator_occupied.pop(i)
continue
i += 1
for acc_free in accumulator_occupied:
if acc_free[2]:
cv2.circle(frame_copy, acc_free[0], 30, (0, 0, 200), -1)
total_spots += 1
if show_output:
cv2.imshow('frame', frame_copy)
if total_spots == 3:
merged_list = accumulator_free + accumulator_occupied
spots = sorted(merged_list, key=lambda acc: acc[0][1])
spots = sorted(spots, key=lambda acc: acc[0][0])
available_parking = []
for s in range(len(spots)):
if spots[s][-1] == 'f':
available_parking.append(s)
# print(available_parking)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
if show_output:
cv2.destroyAllWindows()
@app.route('/')
def main():
"""Say hello"""
global available_parking
print(available_parking)
return "Hello World: %s" % str(available_parking)
@app.route('/initialize')
def initialize():
global available_parking
find_parking(False)
if __name__ == '__main__':
app.run(threaded=True)
|
the-stack_0_13312 | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "93183a41b962ce21ea168357172aaf00cdca5bd9"
LLVM_SHA256 = "9f212bca2050e2cffa15aa72aa07d89e108b400d15ca541327a829e3d4108fb9"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
patch_file = "//third_party/llvm:disable_parallelism_in_verifier.patch",
)
|
the-stack_0_13313 | from six import BytesIO, StringIO, text_type, string_types
from django.http import HttpResponse
from django.contrib.contenttypes.models import ContentType
try:
from django.db.models.fields.related_descriptors import ManyToManyDescriptor
except ImportError:
# Django 1.8 compat hack.
from django.db.models.fields.related import (
ReverseManyRelatedObjectsDescriptor as ManyToManyDescriptor
)
from django.db.models import Avg, Count, Sum, Max, Min
from openpyxl.workbook import Workbook
from openpyxl.writer.excel import save_virtual_workbook
from openpyxl.utils import get_column_letter
from openpyxl.styles import Font
import csv
import re
from collections import namedtuple
from decimal import Decimal
from numbers import Number
from functools import reduce
import datetime
from .utils import (
get_relation_fields_from_model,
get_properties_from_model,
get_direct_fields_from_model,
get_model_from_path_string,
get_custom_fields_from_model,
)
DisplayField = namedtuple(
"DisplayField",
"path path_verbose field field_verbose aggregate total group choices field_type",
)
def generate_filename(title, ends_with):
title = title.split('.')[0]
title.replace(' ', '_')
title += ('_' + datetime.datetime.now().strftime("%m%d_%H%M"))
if not title.endswith(ends_with):
title += ends_with
return title
class DataExportMixin(object):
def build_sheet(self, data, ws, sheet_name='report', header=None, widths=None):
first_row = 1
column_base = 1
ws.title = re.sub(r'\W+', '', sheet_name)[:30]
if header:
for i, header_cell in enumerate(header):
cell = ws.cell(row=first_row, column=i + column_base)
cell.value = header_cell
cell.font = Font(bold=True)
if widths:
ws.column_dimensions[get_column_letter(i + 1)].width = widths[i]
for row in data:
for i in range(len(row)):
item = row[i]
# If item is a regular string
if isinstance(item, str):
# Change it to a unicode string
try:
row[i] = text_type(item)
except UnicodeDecodeError:
row[i] = text_type(item.decode('utf-8', 'ignore'))
elif type(item) is dict:
row[i] = text_type(item)
try:
ws.append(row)
except ValueError as e:
ws.append([e.message])
except:
ws.append(['Unknown Error'])
def build_xlsx_response(self, wb, title="report"):
""" Take a workbook and return a xlsx file response """
title = generate_filename(title, '.xlsx')
myfile = BytesIO()
myfile.write(save_virtual_workbook(wb))
response = HttpResponse(
myfile.getvalue(),
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s' % title
response['Content-Length'] = myfile.tell()
return response
def build_csv_response(self, wb, title="report"):
""" Take a workbook and return a csv file response """
title = generate_filename(title, '.csv')
myfile = StringIO()
sh = wb.active
c = csv.writer(myfile)
for r in sh.rows:
c.writerow([cell.value for cell in r])
response = HttpResponse(
myfile.getvalue(),
content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s' % title
response['Content-Length'] = myfile.tell()
return response
def list_to_workbook(self, data, title='report', header=None, widths=None):
""" Create just a openpxl workbook from a list of data """
wb = Workbook()
title = re.sub(r'\W+', '', title)[:30]
if isinstance(data, dict):
i = 0
for sheet_name, sheet_data in data.items():
if i > 0:
wb.create_sheet()
ws = wb.worksheets[i]
self.build_sheet(
sheet_data, ws, sheet_name=sheet_name, header=header)
i += 1
else:
ws = wb.worksheets[0]
self.build_sheet(data, ws, header=header, widths=widths)
return wb
def list_to_xlsx_file(self, data, title='report', header=None, widths=None):
""" Make 2D list into a xlsx response for download
data can be a 2d array or a dict of 2d arrays
like {'sheet_1': [['A1', 'B1']]}
returns a StringIO file
"""
wb = self.list_to_workbook(data, title, header, widths)
if not title.endswith('.xlsx'):
title += '.xlsx'
myfile = BytesIO()
myfile.write(save_virtual_workbook(wb))
return myfile
def list_to_csv_file(self, data, title='report', header=None, widths=None):
""" Make a list into a csv response for download.
"""
wb = self.list_to_workbook(data, title, header, widths)
if not title.endswith('.csv'):
title += '.csv'
myfile = StringIO()
sh = wb.active
c = csv.writer(myfile)
for r in sh.rows:
c.writerow([cell.value for cell in r])
return myfile
def list_to_xlsx_response(self, data, title='report', header=None,
widths=None):
""" Make 2D list into a xlsx response for download
data can be a 2d array or a dict of 2d arrays
like {'sheet_1': [['A1', 'B1']]}
"""
wb = self.list_to_workbook(data, title, header, widths)
return self.build_xlsx_response(wb, title=title)
def list_to_csv_response(self, data, title='report', header=None,
widths=None):
""" Make 2D list into a csv response for download data.
"""
wb = self.list_to_workbook(data, title, header, widths)
return self.build_csv_response(wb, title=title)
def add_aggregates(self, queryset, display_fields):
agg_funcs = {
'Avg': Avg, 'Min': Min, 'Max': Max, 'Count': Count, 'Sum': Sum
}
for display_field in display_fields:
if display_field.aggregate:
func = agg_funcs[display_field.aggregate]
full_name = display_field.path + display_field.field
queryset = queryset.annotate(func(full_name))
return queryset
def report_to_list(self, queryset, display_fields, user=None, property_filters=[], preview=False):
""" Create list from a report with all data filtering.
queryset: initial queryset to generate results
display_fields: list of field references or DisplayField models
user: requesting user. If left as None - there will be no permission check
property_filters: ???
preview: return only first 50 rows
Returns list, message in case of issues.
"""
model_class = queryset.model
def can_change_or_view(model):
""" Return True iff `user` has either change or view permission
for `model`. """
if user is None:
return True
model_name = model._meta.model_name
app_label = model._meta.app_label
can_change = user.has_perm(app_label + '.change_' + model_name)
can_view = user.has_perm(app_label + '.view_' + model_name)
return can_change or can_view
if not can_change_or_view(model_class):
return [], 'Permission Denied'
if isinstance(display_fields, list):
# Convert list of strings to DisplayField objects.
new_display_fields = []
for display_field in display_fields:
field_list = display_field.split('__')
field = field_list[-1]
path = '__'.join(field_list[:-1])
if path:
path += '__' # Legacy format to append a __ here.
new_model = get_model_from_path_string(model_class, path)
try:
model_field = new_model._meta.get_field_by_name(field)[0]
except:
try:
model_field = new_model._meta.get_field(field)
except:
model_field = None
choices = model_field.choices
new_display_fields.append(DisplayField(
path, '', field, '', '', None, None, choices, ''
))
display_fields = new_display_fields
# Build group-by field list.
group = [df.path + df.field for df in display_fields if df.group]
# To support group-by with multiple fields, we turn all the other
# fields into aggregations. The default aggregation is `Max`.
if group:
for field in display_fields:
if (not field.group) and (not field.aggregate):
field.aggregate = 'Max'
message = ""
objects = self.add_aggregates(queryset, display_fields)
# Display Values
display_field_paths = []
property_list = {}
custom_list = {}
display_totals = {}
for i, display_field in enumerate(display_fields):
model = get_model_from_path_string(model_class, display_field.path)
if display_field.field_type == "Invalid":
continue
if not model or can_change_or_view(model):
display_field_key = display_field.path + display_field.field
if display_field.field_type == "Property":
property_list[i] = display_field_key
elif display_field.field_type == "Custom Field":
custom_list[i] = display_field_key
elif display_field.aggregate == "Avg":
display_field_key += '__avg'
elif display_field.aggregate == "Max":
display_field_key += '__max'
elif display_field.aggregate == "Min":
display_field_key += '__min'
elif display_field.aggregate == "Count":
display_field_key += '__count'
elif display_field.aggregate == "Sum":
display_field_key += '__sum'
if display_field.field_type not in ('Property', 'Custom Field'):
display_field_paths.append(display_field_key)
if display_field.total:
display_totals[display_field_key] = Decimal(0)
else:
message += 'Error: Permission denied on access to {0}.'.format(
display_field.name
)
def increment_total(display_field_key, val):
""" Increment display total by `val` if given `display_field_key` in
`display_totals`.
"""
if display_field_key in display_totals:
if isinstance(val, bool):
# True: 1, False: 0
display_totals[display_field_key] += Decimal(val)
elif isinstance(val, Number):
display_totals[display_field_key] += Decimal(str(val))
elif val:
display_totals[display_field_key] += Decimal(1)
# Select pk for primary and m2m relations in order to retrieve objects
# for adding properties to report rows. Group-by queries do not support
# Property nor Custom Field filters.
if not group:
display_field_paths.insert(0, 'pk')
m2m_relations = []
for position, property_path in property_list.items():
property_root = property_path.split('__')[0]
root_class = model_class
try:
property_root_class = getattr(root_class, property_root)
except AttributeError: # django-hstore schema compatibility
continue
if type(property_root_class) == ManyToManyDescriptor:
display_field_paths.insert(1, '%s__pk' % property_root)
m2m_relations.append(property_root)
if group:
values = objects.values(*group)
values = self.add_aggregates(values, display_fields)
filtered_report_rows = [
[row[field] for field in display_field_paths]
for row in values
]
for row in filtered_report_rows:
for pos, field in enumerate(display_field_paths):
increment_total(field, row[pos])
else:
filtered_report_rows = []
values_and_properties_list = []
values_list = objects.values_list(*display_field_paths)
for row in values_list:
row = list(row)
values_and_properties_list.append(row[1:])
obj = None # we will get this only if needed for more complex processing
# related_objects
remove_row = False
# filter properties (remove rows with excluded properties)
for property_filter in property_filters:
if not obj:
obj = model_class.objects.get(pk=row.pop(0))
root_relation = property_filter.path.split('__')[0]
if root_relation in m2m_relations:
pk = row[0]
if pk is not None:
# a related object exists
m2m_obj = getattr(obj, root_relation).get(pk=pk)
val = reduce(getattr, [property_filter.field], m2m_obj)
else:
val = None
else:
if property_filter.field_type == 'Custom Field':
for relation in property_filter.path.split('__'):
if hasattr(obj, root_relation):
obj = getattr(obj, root_relation)
val = obj.get_custom_value(property_filter.field)
else:
val = reduce(getattr, (property_filter.path + property_filter.field).split('__'), obj)
if property_filter.filter_property(val):
remove_row = True
values_and_properties_list.pop()
break
if not remove_row:
for i, field in enumerate(display_field_paths[1:]):
increment_total(field, row[i + 1])
for position, display_property in property_list.items():
if not obj:
obj = model_class.objects.get(pk=row.pop(0))
relations = display_property.split('__')
root_relation = relations[0]
if root_relation in m2m_relations:
pk = row.pop(0)
if pk is not None:
# a related object exists
m2m_obj = getattr(obj, root_relation).get(pk=pk)
val = reduce(getattr, relations[1:], m2m_obj)
else:
val = None
else:
# Could error if a related field doesn't exist
try:
val = reduce(getattr, relations, obj)
except AttributeError:
val = None
values_and_properties_list[-1].insert(position, val)
increment_total(display_property, val)
for position, display_custom in custom_list.items():
if not obj:
obj = model_class.objects.get(pk=row.pop(0))
val = obj.get_custom_value(display_custom)
values_and_properties_list[-1].insert(position, val)
increment_total(display_custom, val)
filtered_report_rows.append(values_and_properties_list[-1])
if preview and len(filtered_report_rows) == 50:
break
# Sort results if requested.
if hasattr(display_fields, 'filter'):
defaults = {
None: text_type,
datetime.date: lambda: datetime.date(datetime.MINYEAR, 1, 1),
datetime.datetime: lambda: datetime.datetime(datetime.MINYEAR, 1, 1),
}
# Order sort fields in reverse order so that ascending, descending
# sort orders work together (based on Python's stable sort). See
# http://stackoverflow.com/questions/6666748/ for details.
sort_fields = display_fields.filter(sort__gt=0).order_by('-sort')
sort_values = sort_fields.values_list('position', 'sort_reverse')
for pos, reverse in sort_values:
column = (row[pos] for row in filtered_report_rows)
type_col = (type(val) for val in column if val is not None)
field_type = next(type_col, None)
default = defaults.get(field_type, field_type)()
filtered_report_rows = sorted(
filtered_report_rows,
key=lambda row: self.sort_helper(row[pos], default),
reverse=reverse,
)
values_and_properties_list = filtered_report_rows
# Build mapping from display field position to choices list.
choice_lists = {}
for df in display_fields:
if df.choices and hasattr(df, 'choices_dict'):
df_choices = df.choices_dict
# Insert blank and None as valid choices.
df_choices[''] = ''
df_choices[None] = ''
choice_lists[df.position] = df_choices
# Build mapping from display field position to format.
display_formats = {}
for df in display_fields:
if hasattr(df, 'display_format') and df.display_format:
display_formats[df.position] = df.display_format
def formatter(value, style):
# Convert value to Decimal to apply numeric formats.
try:
value = Decimal(value)
except Exception:
pass
try:
return style.string.format(value)
except ValueError:
return value
# Iterate rows and convert values by choice lists and field formats.
final_list = []
for row in values_and_properties_list:
row = list(row)
for position, choice_list in choice_lists.items():
try:
row[position] = text_type(choice_list[row[position]])
except Exception:
row[position] = text_type(row[position])
for pos, style in display_formats.items():
row[pos] = formatter(row[pos], style)
final_list.append(row)
values_and_properties_list = final_list
if display_totals:
display_totals_row = []
fields_and_properties = list(display_field_paths[0 if group else 1:])
for position, value in property_list.items():
fields_and_properties.insert(position, value)
for field in fields_and_properties:
display_totals_row.append(display_totals.get(field, ''))
# Add formatting to display totals.
for pos, style in display_formats.items():
display_totals_row[pos] = formatter(display_totals_row[pos], style)
values_and_properties_list.append(
['TOTALS'] + (len(fields_and_properties) - 1) * ['']
)
values_and_properties_list.append(display_totals_row)
return values_and_properties_list, message
def sort_helper(self, value, default):
if value is None:
value = default
if isinstance(value, string_types):
value = value.lower()
return value
class GetFieldsMixin(object):
def get_fields(self, model_class, field_name='', path='', path_verbose=''):
""" Get fields and meta data from a model
:param model_class: A django model class
:param field_name: The field name to get sub fields from
:param path: path of our field in format
field_name__second_field_name__ect__
:param path_verbose: Human readable version of above
:returns: Returns fields and meta data about such fields
fields: Django model fields
custom_fields: fields from django-custom-field if installed
properties: Any properties the model has
path: Our new path
path_verbose: Our new human readable path
:rtype: dict
"""
fields = get_direct_fields_from_model(model_class)
properties = get_properties_from_model(model_class)
custom_fields = get_custom_fields_from_model(model_class)
app_label = model_class._meta.app_label
model = model_class
if field_name != '':
field = model_class._meta.get_field(field_name)
direct = field.concrete
if path_verbose:
path_verbose += "::"
# TODO: need actual model name to generate choice list (not pluralized field name)
# - maybe store this as a separate value?
if field.many_to_many and hasattr(field, 'm2m_reverse_field_name'):
path_verbose += field.m2m_reverse_field_name()
else:
path_verbose += field.name
path += field_name
path += '__'
if direct:
new_model = field.related_model
path_verbose = new_model.__name__.lower()
else: # Indirect related field
new_model = field.related_model
path_verbose = new_model.__name__.lower()
fields = get_direct_fields_from_model(new_model)
custom_fields = get_custom_fields_from_model(new_model)
properties = get_properties_from_model(new_model)
app_label = new_model._meta.app_label
model = new_model
return {
'fields': fields,
'custom_fields': custom_fields,
'properties': properties,
'path': path,
'path_verbose': path_verbose,
'app_label': app_label,
'model': model,
}
def get_related_fields(self, model_class, field_name, path="", path_verbose=""):
""" Get fields for a given model """
if field_name:
field = model_class._meta.get_field(field_name)
direct = field.concrete
if direct:
try:
related_field = field.remote_field
except AttributeError:
# Needed for Django < 1.9
related_field = field.related
try:
new_model = related_field.parent_model()
except AttributeError:
new_model = related_field.model
else:
# Indirect related field
new_model = field.related_model
if path_verbose:
path_verbose += "::"
path_verbose += field.name
path += field_name
path += '__'
else:
new_model = model_class
new_fields = get_relation_fields_from_model(new_model)
model_ct = ContentType.objects.get_for_model(new_model)
return (new_fields, model_ct, path)
|
the-stack_0_13315 | import torch
import torch.nn as nn
from .bap import BAP
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck_bk(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, use_bap=False):
super(Bottleneck, self).__init__()
## add by zengh
self.use_bap = use_bap
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
if self.use_bap:
self.bap = BAP()
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x ## feature map
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
feature_map = out
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
if self.use_bap:
attention = out[:,:32,:,:]
raw_features,pooling_features = self.bap(feature_map,attention)
return attention,raw_features,pooling_features
out = self.conv3(out)
out = self.bn3(out)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None,use_bap = False):
super(ResNet, self).__init__()
self.use_bap = use_bap
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2],use_bap=use_bap)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes)
self.fc_new = nn.Linear(512*32,num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False, use_bap = False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
# if use_bap:
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer,use_bap=use_bap))
if use_bap:
return nn.Sequential(*layers)
for _ in range(2, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.use_bap:
attention,raw_features,x = x
# print(attention.shape,raw_features.shape,x.shape)
if not self.use_bap:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc_new(x)
if self.use_bap:
return attention,raw_features,x
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
pretrained_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model_dict = model.state_dict()
state_dict = {k:v for k,v in pretrained_dict.items() if k in model_dict.keys()}
# model.load_state_dict(state_dict)
model_dict.update(state_dict)
model.load_state_dict(model_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
if __name__ == '__main__':
net = resnet50(use_bap=True,pretrained=True)
input = torch.Tensor(4,3,224,224)
out = net(input)
# print(net) |
the-stack_0_13323 | import pickle
import torch
from model import RNN
def read_metadata(metadata_path):
with open(metadata_path, 'rb') as f:
metadata = pickle.load(f)
input_stoi = metadata['input_stoi']
label_itos = metadata['label_itos']
return input_stoi, label_itos
def load_model(model_path, input_stoi):
model = RNN(
len(set(input_stoi.values())), 100, 256, 1,
2, True, 0.5, input_stoi['<pad>']
)
model.load_state_dict(torch.load(model_path))
model = model.eval()
return model
def predict_sentiment(sentence, model_path, metadata_path):
print ('Fetching Meta-Data')
input_stoi, label_itos = read_metadata(metadata_path)
print('Meta Data Loaded')
model = load_model(model_path, input_stoi)
print('Tokenization')
tokenized = [tok for tok in sentence.split()]
indexed = [input_stoi[t] for t in tokenized]
tensor = torch.LongTensor(indexed)
tensor = tensor.unsqueeze(1)
length_tensor = torch.LongTensor([len(indexed)])
print('Parsing through Model')
prediction = torch.sigmoid(model(tensor, length_tensor))
print('prediction-',prediction)
return label_itos[round(prediction.item())] |
the-stack_0_13326 | import json
import os
from botocore.exceptions import ClientError
from typing import Dict, Any, List
from pprint import pprint
from datetime import datetime, timedelta
import uuid
from collections import namedtuple
from .create_processes_metric_image_util import generate_processes_metrics_image
AlarmStateChangeData = namedtuple('AlarmStateChangeData', [
'period', 'queryDate', 'recentDatapoints', 'startDate', 'statistic', 'threshold', 'version','evaluatedDatapoints'])
INSTANCE_ID = slice(0, 19)
def create_metric_images_urls(alarm_details, metric_names, aws_services, instance_type):
'''
This function generates metric images.
'''
metric_images_urls: Dict[str, str] = {}
try:
alarm_name: str = alarm_details['AlarmName']
instance_id: str = alarm_name[INSTANCE_ID]
metric_alarms_new_state_details: Dict[str, Any] = get_alarms_new_state_data(
alarm_details, aws_services)
for name in metric_names:
image_url = generate_processes_metrics_image(instance_type, instance_id, name, metric_alarms_new_state_details['CPUUtilization'], aws_services) \
if 'procstat' in name else generate_metric_image(instance_id, name, metric_alarms_new_state_details[name], aws_services)
print(f'{name} metric image url of instance {instance_id}.')
print(f'{image_url}')
if image_url is not None:
metric_images_urls[name] = image_url
except (Exception, ClientError) as err:
print(err)
print(
f'Failed to generate {metric_names} metric images of instance {instance_id} because of above err.')
raise err
else:
return metric_images_urls
def get_alarms_new_state_data(alarm_details: Dict[str, Any], aws_services: Dict[str, Any]) -> Dict[str, Any]:
print('Get alarms history.')
cloudwatch_resource = aws_services['cloudwatch_resource']
child_alarms_details: List[Dict[str, Any]
] = alarm_details['TriggeringChildren']
alarm_names: List[str] = []
today = datetime.utcnow()
year, month, day = today.year, today.month, today.day
alarms_new_state: Dict[str, Any] = {}
try:
for alarm in child_alarms_details:
_, _, _, _, _, _, alarm_name = alarm['Arn'].split(':')
alarm_names.append(alarm_name)
print(alarm_names)
for alarm_name in alarm_names:
alarm = cloudwatch_resource.Alarm(alarm_name)
history: Dict[str, Any] = alarm.describe_history(AlarmTypes=[
'MetricAlarm',
],
HistoryItemType='StateUpdate',
#StartDate=datetime(year, month, day),
#EndDate=datetime.utcnow(),
MaxRecords=1,#Get the record of transition from OK to ALARM.
ScanBy='TimestampDescending')
for item in history['AlarmHistoryItems']:
print(item['AlarmName'])
history_data: Dict[str, Any] = json.loads(item['HistoryData'])
print(history_data)
new_state_data: Dict[str, Any] = history_data['newState'][
'stateReasonData'] if history_data['newState']['stateValue'] == 'ALARM' else None
if new_state_data is not None:
alarms_new_state['CPUUtilization' if 'CPUUtilization' in alarm_name else 'CPUCreditBalance'] = {'stateReason': history_data['newState']['stateReason'],
'stateReasonData': AlarmStateChangeData(**new_state_data)}
except Exception as err:
print(err)
print(
f'Failed to retrieve new state data of {alarm_names} from history.')
pprint(alarms_new_state)
return alarms_new_state
def generate_metric_image(instance_id: str, metric_name: str, alarm_new_state: Dict[str, Any], aws_services: Dict[str, Any]) -> str:
try:
aws_region: str = os.environ.get('AWS_REGION')
cloudwatch_client = aws_services['cloudwatch_client']
s3_bucket: str = os.environ.get('S3_BUCKET_TO_STORE_GENERATED_IMAGES')
horizontal_annotation: List[Dict[str:Any]] = []
horizontal_annotation.append({
"color": "#ff6961",
"label": '{}'.format(alarm_new_state['stateReason']),
# "fill": "above",
"value": float('{}'.format(alarm_new_state['stateReasonData'].threshold))
})
for datapoint in alarm_new_state['stateReasonData'].recentDatapoints:
horizontal_annotation.append({
"color": "#ff6961",
"label": datapoint,
# "fill": "above",
"value": float(datapoint)
})
metric_request: Dict[str:Any] = {
"metrics": [
["AWS/EC2",
f'{metric_name}',
"InstanceId", f'{instance_id}',
{
"stat": '{}'.format(alarm_new_state['stateReasonData'].statistic),
"period": int('{}'.format(alarm_new_state['stateReasonData'].period))
}]
],
"height": 1024,
"width": 1024,
# "timezone": "+1100",
"start": "-PT3H",
"end": "+PT1H",
"liveData": True,
"annotations": {
"horizontal": horizontal_annotation,
"vertical": [
{
"color": "#9467bd",
"label": "start",
# "value":"2018-08-28T15:25:26Z",
# "value": (datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")),
"value": datetime.strptime('{}'.format(alarm_new_state['stateReasonData'].startDate), "%Y-%m-%dT%H:%M:%S.%f+0000").strftime("%Y-%m-%dT%H:%M:%SZ"),
# "fill": "after"
},
{
"color": "#9467bd",
"value": datetime.strptime('{}'.format(alarm_new_state['stateReasonData'].queryDate), "%Y-%m-%dT%H:%M:%S.%f+0000").strftime("%Y-%m-%dT%H:%M:%SZ"),
"label": "end"
}
]
}
}
print(f'{metric_request}')
response = cloudwatch_client.get_metric_widget_image(
MetricWidget=json.dumps(metric_request)
# OutputFormat='string'
)
image_name: str = f'{uuid.uuid4().hex}.jpeg'
upload_image_to_s3(
image_name, response["MetricWidgetImage"], aws_services)
except Exception as err:
print(err)
print('Failed because of above error.')
else:
return f'https://{s3_bucket}.s3-{aws_region}.amazonaws.com/{image_name}'
def upload_image_to_s3(image_name: str, image: bytearray, aws_services: Dict[str, Any]):
try:
s3_resource = aws_services['s3_resource']
s3_bucket: str = os.environ.get('S3_BUCKET_TO_STORE_GENERATED_IMAGES')
bucket = s3_resource.Bucket(f'{s3_bucket}')
bucket.put_object(Key=image_name,
ACL='public-read',
Body=image,
ContentType='image/jpeg'
)
except Exception as err:
print(err)
print('Failed because of above error')
|
the-stack_0_13329 | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from PooledEffect import PooledEffect
from EffectController import EffectController
import os
class HealSparks(PooledEffect, EffectController):
cardScale = 64.0
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/particleMaps')
self.card = model.find('**/particleSpark')
self.setDepthWrite(0)
self.setLightOff()
self.setFogOff()
self.setColorScaleOff()
self.effectColor = Vec4(1, 1, 1, 1)
self.f = ParticleEffect.ParticleEffect('HealSparks')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('SphereVolumeEmitter')
self.f.addParticles(self.p0)
self.p0.setPoolSize(64)
self.p0.setBirthRate(0.05)
self.p0.setLitterSize(4)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(0.5)
self.p0.factory.setLifespanSpread(0.25)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAINOUT)
self.p0.renderer.setUserAlpha(1.0)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1, 1, 1, 1))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(0)
self.p0.renderer.setInitialXScale(0.001 * self.cardScale)
self.p0.renderer.setFinalXScale(0.004 * self.cardScale)
self.p0.renderer.setInitialYScale(0.001 * self.cardScale)
self.p0.renderer.setFinalYScale(0.005 * self.cardScale)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(0.0)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, 0.0, 0.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(0.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.p0.emitter.setRadius(1.0)
def createTrack(self, delay=0.0):
self.p0.renderer.setInitialXScale(0.001 * self.cardScale)
self.p0.renderer.setFinalXScale(0.004 * self.cardScale)
self.p0.renderer.setInitialYScale(0.001 * self.cardScale)
self.p0.renderer.setFinalYScale(0.005 * self.cardScale)
self.startEffect = Sequence(Wait(delay), Func(self.p0.clearToInitial), Func(self.p0.softStart), Func(self.f.start, self, self))
self.endEffect = Sequence(Func(self.p0.softStop), Wait(2.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(3.0), self.endEffect)
def setEffectColor(self, color):
self.effectColor = color
self.p0.renderer.getColorInterpolationManager().clearToInitial()
self.p0.renderer.getColorInterpolationManager().addLinear(0.0, 1.0, self.effectColor * 2.0, self.effectColor, 1)
def play(self, delay=0.0):
self.createTrack(delay)
self.track.start()
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
self.adjustIval = None
return |
the-stack_0_13331 | #!/usr/bin/python
"""
Script to upload images to wikipedia.
The following parameters are supported:
-keep Keep the filename as is
-filename: Target filename without the namespace prefix
-prefix: Add specified prefix to every filename.
-noverify Do not ask for verification of the upload description if one
is given
-abortonwarn: Abort upload on the specified warning type. If no warning type
is specified, aborts on any warning.
-ignorewarn: Ignores specified upload warnings. If no warning type is
specified, ignores all warnings. Use with caution
-chunked: Upload the file in chunks (more overhead, but restartable). If
no value is specified the chunk size is 1 MiB. The value must
be a number which can be preceded by a suffix. The units are:
No suffix: Bytes
'k': Kilobytes (1000 B)
'M': Megabytes (1000000 B)
'Ki': Kibibytes (1024 B)
'Mi': Mebibytes (1024x1024 B)
The suffixes are case insensitive.
-always Don't ask the user anything. This will imply -keep and
-noverify and require that either -abortonwarn or -ignorewarn
is defined for all. It will also require a valid file name and
description. It'll only overwrite files if -ignorewarn includes
the 'exists' warning.
-recursive When the filename is a directory it also uploads the files from
the subdirectories.
-summary: Pick a custom edit summary for the bot.
-descfile: Specify a filename where the description is stored
It is possible to combine -abortonwarn and -ignorewarn so that if the specific
warning is given it won't apply the general one but more specific one. So if it
should ignore specific warnings and abort on the rest it's possible by defining
no warning for -abortonwarn and the specific warnings for -ignorewarn. The
order does not matter. If both are unspecific or a warning is specified by
both, it'll prefer aborting.
If any other arguments are given, the first is either URL, filename or
directory to upload, and the rest is a proposed description to go with the
upload. If none of these are given, the user is asked for the directory, file
or URL to upload. The bot will then upload the image to the wiki.
The script will ask for the location of an image(s), if not given as a
parameter, and for a description.
"""
#
# (C) Pywikibot team, 2003-2020
#
# Distributed under the terms of the MIT license.
#
import codecs
import math
import os
import re
import pywikibot
from pywikibot.bot import suggest_help
from pywikibot.specialbots import UploadRobot
CHUNK_SIZE_REGEX = re.compile(
r'-chunked(?::(\d+(?:\.\d+)?)[ \t]*(k|ki|m|mi)?b?)?$', re.I)
def get_chunk_size(match) -> int:
"""Get chunk size."""
if not match:
pywikibot.error('Chunk size parameter is not valid.')
chunk_size = 0
elif match.group(1): # number was in there
base = float(match.group(1))
if match.group(2): # suffix too
suffix = match.group(2).lower()
if suffix == 'k':
suffix = 1000
elif suffix == 'm':
suffix = 1000000
elif suffix == 'ki':
suffix = 1 << 10
elif suffix == 'mi':
suffix = 1 << 20
else:
suffix = 1
chunk_size = math.trunc(base * suffix)
else:
chunk_size = 1 << 20 # default to 1 MiB
return chunk_size
def main(*args) -> None:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
url = ''
description = []
summary = None
keep_filename = False
always = False
use_filename = None
filename_prefix = None
verify_description = True
aborts = set()
ignorewarn = set()
chunk_size = 0
recursive = False
description_file = None
# process all global bot args
# returns a list of non-global args, i.e. args for upload.py
local_args = pywikibot.handle_args(args)
for option in local_args:
arg, _, value = option.partition(':')
if arg == '-always':
keep_filename = True
always = True
verify_description = False
elif arg == '-recursive':
recursive = True
elif arg == '-keep':
keep_filename = True
elif arg == '-filename':
use_filename = value
elif arg == '-prefix':
filename_prefix = value
elif arg == '-summary':
summary = value
elif arg == '-noverify':
verify_description = False
elif arg == '-abortonwarn':
if value and aborts is not True:
aborts.add(value)
else:
aborts = True
elif arg == '-ignorewarn':
if value and ignorewarn is not True:
ignorewarn.add(value)
else:
ignorewarn = True
elif arg == '-chunked':
match = CHUNK_SIZE_REGEX.match(option)
chunk_size = get_chunk_size(match)
elif arg == '-descfile':
description_file = value
elif not url:
url = option
else:
description.append(option)
description = ' '.join(description)
if description_file:
if description:
pywikibot.error('Both a description and a -descfile were '
'provided. Please specify only one of those.')
return
with codecs.open(description_file,
encoding=pywikibot.config.textfile_encoding) as f:
description = f.read().replace('\r\n', '\n')
while not ('://' in url or os.path.exists(url)):
if not url:
error = 'No input filename given.'
else:
error = 'Invalid input filename given.'
if not always:
error += ' Try again.'
if always:
url = None
break
pywikibot.output(error)
url = pywikibot.input('URL, file or directory where files are now:')
if always and (aborts is not True and ignorewarn is not True
or not description or url is None):
additional = ''
missing = []
if url is None:
missing += ['filename']
additional = error + ' '
if description is None:
missing += ['description']
if aborts is not True and ignorewarn is not True:
additional += ('Either -ignorewarn or -abortonwarn must be '
'defined for all codes. ')
additional += 'Unable to run in -always mode'
suggest_help(missing_parameters=missing, additional_text=additional)
return
if os.path.isdir(url):
file_list = []
for directory_info in os.walk(url):
if not recursive:
# Do not visit any subdirectories
directory_info[1][:] = []
for dir_file in directory_info[2]:
file_list.append(os.path.join(directory_info[0], dir_file))
url = file_list
else:
url = [url]
bot = UploadRobot(url, description=description, use_filename=use_filename,
keep_filename=keep_filename,
verify_description=verify_description, aborts=aborts,
ignore_warning=ignorewarn, chunk_size=chunk_size,
always=always, summary=summary,
filename_prefix=filename_prefix)
bot.run()
if __name__ == '__main__':
main()
|
the-stack_0_13334 | # -*- coding: utf-8 -*-
"""
Module containing utilities to create/manipulate grids.
"""
import logging
import math
from typing import List, Tuple, Union
import geopandas as gpd
import pyproj
import shapely.ops as sh_ops
import shapely.geometry as sh_geom
#-------------------------------------------------------------
# First define/init some general variables/constants
#-------------------------------------------------------------
# Get a logger...
logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
#-------------------------------------------------------------
# Grid tile helpers
#-------------------------------------------------------------
def create_grid(
total_bounds: Tuple[float, float, float, float],
nb_columns: int,
nb_rows: int,
crs: Union[pyproj.CRS, str, None]) -> gpd.GeoDataFrame:
xmin, ymin, xmax, ymax = total_bounds
width = (xmax-xmin)/nb_columns
height = (ymax-ymin)/nb_rows
return create_grid3(total_bounds=total_bounds, width=width, height=height, crs=crs)
def create_grid3(
total_bounds: Tuple[float, float, float, float],
width: float,
height: float,
crs: Union[pyproj.CRS, str, None]) -> gpd.GeoDataFrame:
"""
Args:
total_bounds (Tuple[float, float, float, float]): [description]
width (float): [description]
height (float): [description]
crs (Union[pyproj.CRS, str, None]): [description]
number_decimals (int, optional): The number of decimals the coordinates
of the grid will have. Defaults to None, so no rounding.
Returns:
gpd.GeoDataFrame: [description]
"""
xmin, ymin, xmax, ymax = total_bounds
rows = int(math.ceil((ymax-ymin) / height))
cols = int(math.ceil((xmax-xmin) / width))
polygons = []
cell_left = xmin
cell_right = xmin + width
for _ in range(cols):
if cell_left > xmax:
break
cell_top = ymin + height
cell_bottom = ymin
for _ in range(rows):
if cell_bottom > ymax:
break
polygons.append(sh_ops.Polygon([(cell_left, cell_top), (cell_right, cell_top), (cell_right, cell_bottom), (cell_left, cell_bottom)]))
cell_top += height
cell_bottom += height
cell_left += width
cell_right += width
return gpd.GeoDataFrame({'geometry': polygons}, crs=crs)
def create_grid2(
total_bounds: Tuple[float, float, float, float],
nb_squarish_tiles: int,
crs: Union[pyproj.CRS, str, None],
nb_squarish_tiles_max: int = None) -> gpd.GeoDataFrame:
"""
Creates a grid and tries to approximate the number of cells asked as
good as possible with grid cells that as close to square as possible.
Args:
total_bounds (Tuple[float, float, float, float]): bounds of the grid to be created
nb_squarish_cells (int): about the number of cells wanted
crs (CRS): the projection to create the grid in
nb_squarish_tiles_max (int, optional): the maximum number of cells
Returns:
gpd.GeoDataFrame: geodataframe with the grid
"""
# Check input
if nb_squarish_tiles_max is not None and nb_squarish_tiles_max < 1:
raise Exception("The maximum nb of tiles should be larger than 1")
# If more cells asked, calculate optimal number
xmin, ymin, xmax, ymax = total_bounds
total_width = xmax-xmin
total_height = ymax-ymin
columns_vs_rows = total_width/total_height
nb_rows = max(round(math.sqrt(nb_squarish_tiles/columns_vs_rows)), 1)
# Evade having too many cells (if few cells are asked)
if nb_rows > nb_squarish_tiles:
nb_rows = nb_squarish_tiles
nb_columns = max(round(nb_squarish_tiles/nb_rows), 1)
# If a maximum number of tiles is specified, check it
if nb_squarish_tiles_max is not None:
while((nb_rows * nb_columns) > nb_squarish_tiles_max):
# If the number of cells became larger than the max number of cells,
# increase the number of cells in the direction of the longest side
# of the resulting cells
if(nb_columns > 1
and (nb_rows == 1
or total_width/nb_columns > total_height/nb_rows)):
# Cell width is larger than cell height
nb_columns -= 1
else:
nb_rows -= 1
# Now we know everything to create the grid
return create_grid(
total_bounds=total_bounds,
nb_columns=nb_columns,
nb_rows=nb_rows,
crs=crs)
def split_tiles(
input_tiles: gpd.GeoDataFrame,
nb_tiles_wanted: int) -> gpd.GeoDataFrame:
nb_tiles = len(input_tiles)
if nb_tiles >= nb_tiles_wanted:
return input_tiles
nb_tiles_ratio_target = nb_tiles_wanted / nb_tiles
# Loop over all tiles in the grid
result_tiles = []
for tile in input_tiles.itertuples():
# For this tile, as long as the curr_nb_tiles_ratio_todo is not 1, keep splitting
curr_nb_tiles_ratio_todo = nb_tiles_ratio_target
curr_tiles_being_split = [tile.geometry]
while curr_nb_tiles_ratio_todo > 1:
# Check in how many parts the tiles are split in this iteration
divisor = 0
if round(curr_nb_tiles_ratio_todo) == 3:
divisor = 3
else:
divisor = 2
curr_nb_tiles_ratio_todo /= divisor
# Split all current tiles
tmp_tiles_after_split = []
for tile_to_split in curr_tiles_being_split:
xmin, ymin, xmax, ymax = tile_to_split.bounds
width = abs(xmax-xmin)
height = abs(ymax-ymin)
# Split in 2 or 3...
if divisor == 3:
if width > height:
split_line = sh_geom.LineString([
(xmin+width/3, ymin-1), (xmin+width/3, ymax+1),
(xmin+2*width/3, ymax+1), (xmin+2*width/3, ymin-1)])
else:
split_line = sh_geom.LineString([
(xmin-1, ymin+height/3), (xmax+1, ymin+height/3),
(xmax+1, ymin+2*height/3), (xmin-1, ymin+2*height/3)])
else:
if width > height:
split_line = sh_geom.LineString([(xmin+width/2, ymin-1), (xmin+width/2, ymax+1)])
else:
split_line = sh_geom.LineString([(xmin-1, ymin+height/2), (xmax+1, ymin+height/2)])
tmp_tiles_after_split.extend(sh_ops.split(tile_to_split, split_line))
curr_tiles_being_split = tmp_tiles_after_split
result_tiles.extend(curr_tiles_being_split)
# We should be ready...
return gpd.GeoDataFrame(geometry=result_tiles, crs=input_tiles.crs)
|
the-stack_0_13336 | import numpy as np
import pytest
import pandas as pd
import pandas.testing as tm
@pytest.mark.parametrize(
"dropna, tuples, outputs",
[
(
True,
[["A", "B"], ["B", "A"]],
{"c": [13.0, 123.23], "d": [13.0, 123.0], "e": [13.0, 1.0]},
),
(
False,
[["A", "B"], ["A", np.nan], ["B", "A"]],
{
"c": [13.0, 12.3, 123.23],
"d": [13.0, 233.0, 123.0],
"e": [13.0, 12.0, 1.0],
},
),
],
)
def test_groupby_dropna_multi_index_dataframe_nan_in_one_group(
dropna, tuples, outputs, nulls_fixture
):
# GH 3729 this is to test that NA is in one group
df_list = [
["A", "B", 12, 12, 12],
["A", nulls_fixture, 12.3, 233.0, 12],
["B", "A", 123.23, 123, 1],
["A", "B", 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
grouped = df.groupby(["a", "b"], dropna=dropna).sum()
mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
# Since right now, by default MI will drop NA from levels when we create MI
# via `from_*`, so we need to add NA for level manually afterwards.
if not dropna:
mi = mi.set_levels(["A", "B", np.nan], level="b")
expected = pd.DataFrame(outputs, index=mi)
tm.assert_frame_equal(grouped, expected)
@pytest.mark.parametrize(
"dropna, tuples, outputs",
[
(
True,
[["A", "B"], ["B", "A"]],
{"c": [12.0, 123.23], "d": [12.0, 123.0], "e": [12.0, 1.0]},
),
(
False,
[["A", "B"], ["A", np.nan], ["B", "A"], [np.nan, "B"]],
{
"c": [12.0, 13.3, 123.23, 1.0],
"d": [12.0, 234.0, 123.0, 1.0],
"e": [12.0, 13.0, 1.0, 1.0],
},
),
],
)
def test_groupby_dropna_multi_index_dataframe_nan_in_two_groups(
dropna, tuples, outputs, nulls_fixture, nulls_fixture2
):
# GH 3729 this is to test that NA in different groups with different representations
df_list = [
["A", "B", 12, 12, 12],
["A", nulls_fixture, 12.3, 233.0, 12],
["B", "A", 123.23, 123, 1],
[nulls_fixture2, "B", 1, 1, 1.0],
["A", nulls_fixture2, 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
grouped = df.groupby(["a", "b"], dropna=dropna).sum()
mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
# Since right now, by default MI will drop NA from levels when we create MI
# via `from_*`, so we need to add NA for level manually afterwards.
if not dropna:
mi = mi.set_levels([["A", "B", np.nan], ["A", "B", np.nan]])
expected = pd.DataFrame(outputs, index=mi)
tm.assert_frame_equal(grouped, expected)
@pytest.mark.parametrize(
"dropna, idx, outputs",
[
(True, ["A", "B"], {"b": [123.23, 13.0], "c": [123.0, 13.0], "d": [1.0, 13.0]}),
(
False,
["A", "B", np.nan],
{
"b": [123.23, 13.0, 12.3],
"c": [123.0, 13.0, 233.0],
"d": [1.0, 13.0, 12.0],
},
),
],
)
def test_groupby_dropna_normal_index_dataframe(dropna, idx, outputs):
# GH 3729
df_list = [
["B", 12, 12, 12],
[None, 12.3, 233.0, 12],
["A", 123.23, 123, 1],
["B", 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d"])
grouped = df.groupby("a", dropna=dropna).sum()
expected = pd.DataFrame(outputs, index=pd.Index(idx, dtype="object", name="a"))
tm.assert_frame_equal(grouped, expected)
@pytest.mark.parametrize(
"dropna, idx, expected",
[
(True, ["a", "a", "b", np.nan], pd.Series([3, 3], index=["a", "b"])),
(
False,
["a", "a", "b", np.nan],
pd.Series([3, 3, 3], index=["a", "b", np.nan]),
),
],
)
def test_groupby_dropna_series_level(dropna, idx, expected):
ser = pd.Series([1, 2, 3, 3], index=idx)
result = ser.groupby(level=0, dropna=dropna).sum()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, expected",
[
(True, pd.Series([210.0, 350.0], index=["a", "b"], name="Max Speed")),
(
False,
pd.Series([210.0, 350.0, 20.0], index=["a", "b", np.nan], name="Max Speed"),
),
],
)
def test_groupby_dropna_series_by(dropna, expected):
ser = pd.Series(
[390.0, 350.0, 30.0, 20.0],
index=["Falcon", "Falcon", "Parrot", "Parrot"],
name="Max Speed",
)
result = ser.groupby(["a", "b", "a", np.nan], dropna=dropna).mean()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, tuples, outputs",
[
(
True,
[["A", "B"], ["B", "A"]],
{"c": [13.0, 123.23], "d": [12.0, 123.0], "e": [1.0, 1.0]},
),
(
False,
[["A", "B"], ["A", np.nan], ["B", "A"]],
{
"c": [13.0, 12.3, 123.23],
"d": [12.0, 233.0, 123.0],
"e": [1.0, 12.0, 1.0],
},
),
],
)
def test_groupby_dropna_multi_index_dataframe_agg(dropna, tuples, outputs):
# GH 3729
df_list = [
["A", "B", 12, 12, 12],
["A", None, 12.3, 233.0, 12],
["B", "A", 123.23, 123, 1],
["A", "B", 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
agg_dict = {"c": sum, "d": max, "e": "min"}
grouped = df.groupby(["a", "b"], dropna=dropna).agg(agg_dict)
mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
# Since right now, by default MI will drop NA from levels when we create MI
# via `from_*`, so we need to add NA for level manually afterwards.
if not dropna:
mi = mi.set_levels(["A", "B", np.nan], level="b")
expected = pd.DataFrame(outputs, index=mi)
tm.assert_frame_equal(grouped, expected)
@pytest.mark.parametrize(
"datetime1, datetime2",
[
(pd.Timestamp("2020-01-01"), pd.Timestamp("2020-02-01")),
(pd.Timedelta("-2 days"), pd.Timedelta("-1 days")),
(pd.Period("2020-01-01"), pd.Period("2020-02-01")),
],
)
@pytest.mark.parametrize(
"dropna, values", [(True, [12, 3]), (False, [12, 3, 6],)],
)
def test_groupby_dropna_datetime_like_data(
dropna, values, datetime1, datetime2, unique_nulls_fixture, unique_nulls_fixture2
):
# 3729
df = pd.DataFrame(
{
"values": [1, 2, 3, 4, 5, 6],
"dt": [
datetime1,
unique_nulls_fixture,
datetime2,
unique_nulls_fixture2,
datetime1,
datetime1,
],
}
)
if dropna:
indexes = [datetime1, datetime2]
else:
indexes = [datetime1, datetime2, np.nan]
grouped = df.groupby("dt", dropna=dropna).agg({"values": sum})
expected = pd.DataFrame({"values": values}, index=pd.Index(indexes, name="dt"))
tm.assert_frame_equal(grouped, expected)
|
the-stack_0_13337 | import argparse
import json
import torch
from scripts.default_config import (get_default_config, imagedata_kwargs,
model_kwargs, merge_from_files_with_base)
import torchreid
from torchreid.utils import collect_env_info, set_random_seed
from ptflops import get_model_complexity_info
def build_datamanager(cfg, classification_classes_filter=None):
return torchreid.data.ImageDataManager(filter_classes=classification_classes_filter, **imagedata_kwargs(cfg))
def reset_config(cfg, args):
if args.root:
cfg.data.root = args.root
if args.custom_roots:
cfg.custom_datasets.roots = args.custom_roots
if args.custom_types:
cfg.custom_datasets.types = args.custom_types
if args.custom_names:
cfg.custom_datasets.names = args.custom_names
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config-file', type=str, default='', required=True,
help='path to config file')
parser.add_argument('--custom-roots', type=str, nargs='+',
help='types or paths to annotation of custom datasets (delimited by space)')
parser.add_argument('--custom-types', type=str, nargs='+',
help='path of custom datasets (delimited by space)')
parser.add_argument('--custom-names', type=str, nargs='+',
help='names of custom datasets (delimited by space)')
parser.add_argument('--root', type=str, default='', help='path to data root')
parser.add_argument('--classes', type=str, nargs='+',
help='name of classes in classification dataset')
parser.add_argument('--out')
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER,
help='Modify config options using the command-line')
args = parser.parse_args()
cfg = get_default_config()
cfg.use_gpu = torch.cuda.is_available()
if args.config_file:
merge_from_files_with_base(cfg, args.config_file)
reset_config(cfg, args)
cfg.merge_from_list(args.opts)
set_random_seed(cfg.train.seed)
print('Show configuration\n{}\n'.format(cfg))
print('Collecting env info ...')
print('** System info **\n{}\n'.format(collect_env_info()))
if cfg.use_gpu:
torch.backends.cudnn.benchmark = True
datamanager = build_datamanager(cfg, args.classes)
num_train_classes = datamanager.num_train_pids
print('Building main model: {}'.format(cfg.model.name))
model = torchreid.models.build_model(**model_kwargs(cfg, num_train_classes))
macs, num_params = get_model_complexity_info(model, (3, cfg.data.height, cfg.data.width),
as_strings=False, verbose=False, print_per_layer_stat=False)
print('Main model complexity: M params={:,} G flops={:,}'.format(num_params / 10**6, macs * 2 / 10**9))
if args.out:
out = list()
out.append({'key': 'size', 'display_name': 'Size', 'value': num_params / 10**6, 'unit': 'Mp'})
out.append({'key': 'complexity', 'display_name': 'Complexity', 'value': 2 * macs / 10**9,
'unit': 'GFLOPs'})
print('dump to' + args.out)
with open(args.out, 'w') as write_file:
json.dump(out, write_file, indent=4)
if __name__ == '__main__':
main()
|
the-stack_0_13340 | import torch
from torch.ao.quantization.observer import ObserverBase
class ModelReportObserver(ObserverBase):
r"""This observer is used to record additional information regarding keeping track
of S = average_batch_activation_range/epoch_activation_range.
The purpose of this information is to prepare a report to present to users on whether
Dynamic or Static Quantization is more appropriate for their model given the general
distributions of their data.
* :attr:`num_batches_tracked` specifies number of batches passed through the observer
* :attr:`average_batch_activation_range` defines average across the ranges of each batch passed through
* :attr:`epoch_activation_min` defines the minimum value passed through the observer
* :attr:`epoch_activation_max` defines the maximum value passed through the observer
Note: this tool is meant for FX Graph Mode Quantization
"""
def __init__(self):
super().__init__(torch.qint8)
self.num_batches_tracked = 0
# keep track of the min and mix of the range for average batch and epoch as a whole
self.average_batch_activation_range = torch.tensor(float(0))
self.epoch_activation_min = torch.tensor(float("inf"))
self.epoch_activation_max = torch.tensor(float("-inf"))
def forward(self, x):
x_copy = x.detach() # avoid keeping autograd tape
x_copy = x_copy.to(self.epoch_activation_min.dtype)
min_val_cur, max_val_cur = torch.aminmax(x_copy)
# calculate new epoch range values
epoch_min_val = torch.min(self.epoch_activation_min, min_val_cur)
epoch_max_val = torch.max(self.epoch_activation_max, max_val_cur)
self.epoch_activation_min.copy_(epoch_min_val)
self.epoch_activation_max.copy_(epoch_max_val)
# calculate the average batch activation range
current_batch_range = max_val_cur - min_val_cur
new_range = (
self.average_batch_activation_range * self.num_batches_tracked
+ current_batch_range
) / (self.num_batches_tracked + 1)
self.average_batch_activation_range = new_range
self.num_batches_tracked += 1 # new batch was processed
# return the passed in the value
return x
@torch.jit.export
def get_batch_to_epoch_ratio(self):
epoch_activation_range = self.epoch_activation_max - self.epoch_activation_min
if epoch_activation_range == torch.tensor(float(0)):
raise ValueError("Range for Epoch is 0")
elif epoch_activation_range == torch.tensor(float("inf")):
raise ValueError(
"No data has been run through observer or infinity value present"
)
else:
return self.average_batch_activation_range / epoch_activation_range
@torch.jit.export
def reset_batch_and_epoch_values(self):
# set all the values back to their original defaults for a new epoch
self.num_batches_tracked = 0
self.average_batch_activation_range = torch.tensor(float(0))
self.epoch_activation_min = torch.tensor(float("inf"))
self.epoch_activation_max = torch.tensor(float("-inf"))
@torch.jit.export
def calculate_qparams(self):
raise Exception(
"calculate_qparams should not be called for ModelReportObserver"
)
|
the-stack_0_13341 | import difflib
import email.parser
import inspect
import json
import os
import re
import sys
import pytest
from .env import H2Conf
class TestPost:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env):
TestPost._local_dir = os.path.dirname(inspect.getfile(TestPost))
H2Conf(env).add_vhost_cgi().install()
assert env.apache_restart() == 0
def local_src(self, fname):
return os.path.join(TestPost._local_dir, fname)
# upload and GET again using curl, compare to original content
def curl_upload_and_verify(self, env, fname, options=None):
url = env.mkurl("https", "cgi", "/upload.py")
fpath = os.path.join(env.gen_dir, fname)
r = env.curl_upload(url, fpath, options=options)
assert r.exit_code == 0, f"{r}"
assert 200 <= r.response["status"] < 300
r2 = env.curl_get(r.response["header"]["location"])
assert r2.exit_code == 0
assert r2.response["status"] == 200
with open(self.local_src(fpath), mode='rb') as file:
src = file.read()
assert src == r2.response["body"]
def test_h2_004_01(self, env):
self.curl_upload_and_verify(env, "data-1k", ["-vvv", "--http1.1"])
self.curl_upload_and_verify(env, "data-1k", ["--http2"])
def test_h2_004_02(self, env):
self.curl_upload_and_verify(env, "data-10k", ["--http1.1"])
self.curl_upload_and_verify(env, "data-10k", ["--http2"])
def test_h2_004_03(self, env):
self.curl_upload_and_verify(env, "data-100k", ["--http1.1"])
self.curl_upload_and_verify(env, "data-100k", ["--http2"])
def test_h2_004_04(self, env):
self.curl_upload_and_verify(env, "data-1m", ["--http1.1"])
self.curl_upload_and_verify(env, "data-1m", ["--http2"])
def test_h2_004_05(self, env):
self.curl_upload_and_verify(env, "data-1k", ["-v", "--http1.1", "-H", "Expect: 100-continue"])
self.curl_upload_and_verify(env, "data-1k", ["-v", "--http2", "-H", "Expect: 100-continue"])
@pytest.mark.skipif(True, reason="python3 regresses in chunked inputs to cgi")
def test_h2_004_06(self, env):
self.curl_upload_and_verify(env, "data-1k", ["--http1.1", "-H", "Content-Length: "])
self.curl_upload_and_verify(env, "data-1k", ["--http2", "-H", "Content-Length: "])
@pytest.mark.parametrize("name, value", [
("HTTP2", "on"),
("H2PUSH", "off"),
("H2_PUSHED", ""),
("H2_PUSHED_ON", ""),
("H2_STREAM_ID", "1"),
("H2_STREAM_TAG", r'\d+-1'),
])
def test_h2_004_07(self, env, name, value):
url = env.mkurl("https", "cgi", "/env.py")
r = env.curl_post_value(url, "name", name)
assert r.exit_code == 0
assert r.response["status"] == 200
m = re.match("{0}=(.*)".format(name), r.response["body"].decode('utf-8'))
assert m
assert re.match(value, m.group(1))
# POST some data using nghttp and see it echo'ed properly back
def nghttp_post_and_verify(self, env, fname, options=None):
url = env.mkurl("https", "cgi", "/echo.py")
fpath = os.path.join(env.gen_dir, fname)
r = env.nghttp().upload(url, fpath, options=options)
assert r.exit_code == 0
assert r.response["status"] >= 200 and r.response["status"] < 300
with open(self.local_src(fpath), mode='rb') as file:
src = file.read()
assert 'request-length' in r.response["header"]
assert int(r.response["header"]['request-length']) == len(src)
if len(r.response["body"]) != len(src):
sys.stderr.writelines(difflib.unified_diff(
src.decode().splitlines(True),
r.response["body"].decode().splitlines(True),
fromfile='source',
tofile='response'
))
assert len(r.response["body"]) == len(src)
assert r.response["body"] == src, f"expected '{src}', got '{r.response['body']}'"
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m"
])
def test_h2_004_21(self, env, name):
self.nghttp_post_and_verify(env, name, [])
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m",
])
def test_h2_004_22(self, env, name, repeat):
self.nghttp_post_and_verify(env, name, ["--no-content-length"])
# upload and GET again using nghttp, compare to original content
def nghttp_upload_and_verify(self, env, fname, options=None):
url = env.mkurl("https", "cgi", "/upload.py")
fpath = os.path.join(env.gen_dir, fname)
r = env.nghttp().upload_file(url, fpath, options=options)
assert r.exit_code == 0
assert r.response["status"] >= 200 and r.response["status"] < 300
assert r.response["header"]["location"]
r2 = env.nghttp().get(r.response["header"]["location"])
assert r2.exit_code == 0
assert r2.response["status"] == 200
with open(self.local_src(fpath), mode='rb') as file:
src = file.read()
assert src == r2.response["body"]
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m"
])
def test_h2_004_23(self, env, name, repeat):
self.nghttp_upload_and_verify(env, name, [])
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m"
])
def test_h2_004_24(self, env, name, repeat):
self.nghttp_upload_and_verify(env, name, ["--expect-continue"])
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m"
])
def test_h2_004_25(self, env, name, repeat):
self.nghttp_upload_and_verify(env, name, ["--no-content-length"])
def test_h2_004_30(self, env):
# issue: #203
resource = "data-1k"
full_length = 1000
chunk = 200
self.curl_upload_and_verify(env, resource, ["-v", "--http2"])
logfile = os.path.join(env.server_logs_dir, "test_004_30")
if os.path.isfile(logfile):
os.remove(logfile)
H2Conf(env).add("""
LogFormat "{ \\"request\\": \\"%r\\", \\"status\\": %>s, \\"bytes_resp_B\\": %B, \\"bytes_tx_O\\": %O, \\"bytes_rx_I\\": %I, \\"bytes_rx_tx_S\\": %S }" issue_203
CustomLog logs/test_004_30 issue_203
""").add_vhost_cgi().install()
assert env.apache_restart() == 0
url = env.mkurl("https", "cgi", "/files/{0}".format(resource))
r = env.curl_get(url, 5, options=["--http2"])
assert r.response["status"] == 200
r = env.curl_get(url, 5, options=["--http1.1", "-H", "Range: bytes=0-{0}".format(chunk-1)])
assert 206 == r.response["status"]
assert chunk == len(r.response["body"].decode('utf-8'))
r = env.curl_get(url, 5, options=["--http2", "-H", "Range: bytes=0-{0}".format(chunk-1)])
assert 206 == r.response["status"]
assert chunk == len(r.response["body"].decode('utf-8'))
# now check what response lengths have actually been reported
lines = open(logfile).readlines()
log_h2_full = json.loads(lines[-3])
log_h1 = json.loads(lines[-2])
log_h2 = json.loads(lines[-1])
assert log_h2_full['bytes_rx_I'] > 0
assert log_h2_full['bytes_resp_B'] == full_length
assert log_h2_full['bytes_tx_O'] > full_length
assert log_h1['bytes_rx_I'] > 0 # input bytes received
assert log_h1['bytes_resp_B'] == chunk # response bytes sent (payload)
assert log_h1['bytes_tx_O'] > chunk # output bytes sent
assert log_h2['bytes_rx_I'] > 0
assert log_h2['bytes_resp_B'] == chunk
assert log_h2['bytes_tx_O'] > chunk
def test_h2_004_40(self, env):
# echo content using h2test_module "echo" handler
def post_and_verify(fname, options=None):
url = env.mkurl("https", "cgi", "/h2test/echo")
fpath = os.path.join(env.gen_dir, fname)
r = env.curl_upload(url, fpath, options=options)
assert r.exit_code == 0
assert r.response["status"] >= 200 and r.response["status"] < 300
ct = r.response["header"]["content-type"]
mail_hd = "Content-Type: " + ct + "\r\nMIME-Version: 1.0\r\n\r\n"
mime_msg = mail_hd.encode() + r.response["body"]
# this MIME API is from hell
body = email.parser.BytesParser().parsebytes(mime_msg)
assert body
assert body.is_multipart()
filepart = None
for part in body.walk():
if fname == part.get_filename():
filepart = part
assert filepart
with open(self.local_src(fpath), mode='rb') as file:
src = file.read()
assert src == filepart.get_payload(decode=True)
post_and_verify("data-1k", [])
|
the-stack_0_13343 | """
PyQt App that leverages completed model for image inpainting
"""
import sys
import os
import random
import torch
import argparse
from PIL import Image
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from torchvision.utils import make_grid
from torchvision.utils import save_image
from torchvision import transforms
from partial_conv_net import PartialConvUNet
from places2_train import unnormalize, MEAN, STDDEV
def exceeds_bounds(y):
if y >= 250:
return True
else:
return False
class Drawer(QWidget):
newPoint = pyqtSignal(QPoint)
def __init__(self, image_path, parent=None):
QWidget.__init__(self, parent)
self.path = QPainterPath()
self.image_path = image_path
def paintEvent(self, event):
painter = QPainter(self)
painter.drawPixmap(QRect(0, 0, 256, 256), QPixmap(self.image_path))
painter.setPen(QPen(Qt.black, 12))
painter.drawPath(self.path)
def mousePressEvent(self, event):
if exceeds_bounds(event.pos().y()):
return
self.path.moveTo(event.pos())
self.update()
def mouseMoveEvent(self, event):
if exceeds_bounds(event.pos().y()):
return
self.path.lineTo(event.pos())
self.newPoint.emit(event.pos())
self.update()
def sizeHint(self):
return QSize(256, 256)
def resetPath(self):
self.path = QPainterPath()
self.update()
class InpaintApp(QWidget):
def __init__(self, image_num):
super().__init__()
self.setLayout(QVBoxLayout())
self.title = 'Inpaint Application'
self.width = 276
self.height = 350
self.cwd = os.getcwd()
image_num = str(image_num).zfill(8)
image_path = self.cwd + "/val_256/Places365_val_{}.jpg".format(image_num)
self.save_path = self.cwd + "/test.jpg"
self.open_and_save_img(image_path, self.save_path)
self.drawer = Drawer(self.save_path, self)
self.setWindowTitle(self.title)
self.setGeometry(200, 200, self.width, self.height)
self.layout().addWidget(self.drawer)
self.layout().addWidget(QPushButton("Inpaint!", clicked=self.inpaint))
self.img_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(MEAN, STDDEV)])
self.mask_transform = transforms.ToTensor()
self.device = torch.device("cpu")
model_dict = torch.load(self.cwd + "/model_e1_i56358.pth", map_location="cpu")
model = PartialConvUNet()
model.load_state_dict(model_dict["model"])
model = model.to(self.device)
self.model = model
self.model.eval()
self.show()
def open_and_save_img(self, path, dest):
img = Image.open(path)
img.save(dest)
def inpaint(self):
mask = QImage(256, 256, QImage.Format_RGB32)
mask.fill(qRgb(255, 255, 255))
painter = QPainter()
painter.begin(mask)
painter.setPen(QPen(Qt.black, 12))
painter.drawPath(self.drawer.path)
painter.end()
mask.save("mask.png", "png")
# open image and normalize before forward pass
mask = Image.open(self.cwd + "/mask.png")
mask = self.mask_transform(mask.convert("RGB"))
gt_img = Image.open(self.save_path)
gt_img = self.img_transform(gt_img.convert("RGB"))
img = gt_img * mask
# adds dimension of 1 (batch) to image
img.unsqueeze_(0)
gt_img.unsqueeze_(0)
mask.unsqueeze_(0)
# forward pass
with torch.no_grad():
output = self.model(img.to(self.device), mask.to(self.device))
# unnormalize the image and output
output = mask * img + (1 - mask) * output
grid = make_grid(unnormalize(output))
save_image(grid, "test.jpg")
self.drawer.resetPath()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--img", type=int, default=1)
args = parser.parse_args()
app = QApplication(sys.argv)
ex = InpaintApp(args.img)
sys.exit(app.exec_()) |
the-stack_0_13345 | import ctypes
import os
from casual.xatmi.xatmi import tpalloc, tpfree, tperrno, tperrnostring, \
X_OCTET, CASUAL_BUFFER_BINARY_TYPE, CASUAL_BUFFER_BINARY_SUBTYPE, \
CASUAL_BUFFER_JSON_TYPE, CASUAL_BUFFER_JSON_SUBTYPE, \
CASUAL_BUFFER_YAML_TYPE, CASUAL_BUFFER_YAML_SUBTYPE, \
CASUAL_BUFFER_XML_TYPE, CASUAL_BUFFER_XML_SUBTYPE
from casual.server.exception import BufferError
BufferTypeMap = {
'x_octet': (X_OCTET, 0),
'binary': (CASUAL_BUFFER_BINARY_TYPE, CASUAL_BUFFER_BINARY_SUBTYPE),
'json': (CASUAL_BUFFER_JSON_TYPE, CASUAL_BUFFER_JSON_SUBTYPE),
'yaml': (CASUAL_BUFFER_YAML_TYPE, CASUAL_BUFFER_YAML_SUBTYPE),
'xml': (CASUAL_BUFFER_XML_TYPE, CASUAL_BUFFER_XML_SUBTYPE)
}
def x_octet():
return BufferTypeMap['x_octet']
def binary():
return BufferTypeMap['binary']
def json():
return BufferTypeMap['json']
def yaml():
return BufferTypeMap['yaml']
def xml():
return BufferTypeMap['xml']
def _convert( data):
try:
data = data.encode()
is_bytes = False
except (UnicodeDecodeError, AttributeError):
is_bytes = True
return is_bytes, data
class Buffer(object):
def __init__(self, buffertype, subtype, data=None):
if data:
self.is_bytes, data = _convert( data)
self.size = ctypes.c_long(len(data))
self.holder = tpalloc(buffertype, subtype, self.size)
if self.holder:
self.set(data)
else:
raise BufferError(tperrnostring(tperrno()))
else:
self.size = ctypes.c_long(1024)
self.holder = tpalloc(buffertype, subtype, self.size)
def set(self, data):
ctypes.memmove(self.holder, data, len(data))
def raw(self):
return self.holder
def data(self):
return self.holder[0:self.size.value]
def __del__(self):
if self.holder and tpfree:
tpfree( self.holder)
#
# Supported buffer type
#
class JsonBuffer(Buffer):
def __init__(self, data = None):
buffertype, subtype = json()
try:
super().__init__(buffertype, subtype, data)
except TypeError:
super( JsonBuffer, self).__init__(buffertype, subtype, data)
#
# Supported buffer type
#
class XmlBuffer(Buffer):
def __init__(self, data = None):
buffertype, subtype = xml()
try:
super().__init__(buffertype, subtype, data)
except TypeError:
super( XmlBuffer, self).__init__(buffertype, subtype, data)
def create_buffer(buffer):
theType=type(buffer)
if theType is XmlBuffer:
return XmlBuffer()
elif theType is JsonBuffer:
return JsonBuffer()
else:
raise BufferError("Unknown buffer type")
|
the-stack_0_13348 | #
# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved.
#
# OpenArkCompiler is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
#
from api import *
SUPO0 = {
"compile": [
C2ast(
clang="${OUT_ROOT}/tools/bin/clang",
include_path=[
"${OUT_ROOT}/${MAPLE_BUILD_TYPE}/lib/include",
"${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include",
"${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include",
"../lib"
],
option="--target=aarch64 -U __SIZEOF_INT128__",
infile="${APP}.c",
outfile="${APP}.ast"
),
Mplfe(
hir2mpl="${OUT_ROOT}/${MAPLE_BUILD_TYPE}/bin/hir2mpl",
infile="${APP}.ast",
outfile="${APP}.mpl"
),
Maple(
maple="${OUT_ROOT}/${MAPLE_BUILD_TYPE}/bin/maple",
run=["mplcg"],
option={
"mplcg": "--quiet"
},
global_option="",
infile="${APP}.mpl"
),
CLinker(
infile="${APP}.s",
front_option="-O2 -static -L../lib -std=c89 -s",
outfile="${APP}.out",
back_option="-lst -lm"
)
],
"run": [
Shell(
"${OUT_ROOT}/tools/bin/qemu-aarch64 -L ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc ${APP}.out > output.log 2>&1"
),
CheckFileEqual(
file1="output.log",
file2="expected.txt"
)
]
}
|
the-stack_0_13350 | # -*- coding: utf-8 -*-
import sys
import gc
from hypothesis import given
from hypothesis.extra import numpy as hynp
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
assert_raises_regex,
)
import textwrap
class TestArrayRepr:
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([nan, inf])')
def test_subclass(self):
class sub(np.ndarray): pass
# one dimensional
x1d = np.array([1, 2]).view(sub)
assert_equal(repr(x1d), 'sub([1, 2])')
# two dimensional
x2d = np.array([[1, 2], [3, 4]]).view(sub)
assert_equal(repr(x2d),
'sub([[1, 2],\n'
' [3, 4]])')
# two dimensional with flexible dtype
xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub)
assert_equal(repr(xstruct),
"sub([[(1,), (1,)],\n"
" [(1,), (1,)]], dtype=[('a', '<i4')])"
)
@pytest.mark.xfail(reason="See gh-10544")
def test_object_subclass(self):
class sub(np.ndarray):
def __new__(cls, inp):
obj = np.asarray(inp).view(cls)
return obj
def __getitem__(self, ind):
ret = super().__getitem__(ind)
return sub(ret)
# test that object + subclass is OK:
x = sub([None, None])
assert_equal(repr(x), 'sub([None, None], dtype=object)')
assert_equal(str(x), '[None None]')
x = sub([None, sub([None, None])])
assert_equal(repr(x),
'sub([None, sub([None, None], dtype=object)], dtype=object)')
assert_equal(str(x), '[None sub([None, None], dtype=object)]')
def test_0d_object_subclass(self):
# make sure that subclasses which return 0ds instead
# of scalars don't cause infinite recursion in str
class sub(np.ndarray):
def __new__(cls, inp):
obj = np.asarray(inp).view(cls)
return obj
def __getitem__(self, ind):
ret = super().__getitem__(ind)
return sub(ret)
x = sub(1)
assert_equal(repr(x), 'sub(1)')
assert_equal(str(x), '1')
x = sub([1, 1])
assert_equal(repr(x), 'sub([1, 1])')
assert_equal(str(x), '[1 1]')
# check it works properly with object arrays too
x = sub(None)
assert_equal(repr(x), 'sub(None, dtype=object)')
assert_equal(str(x), 'None')
# plus recursive object arrays (even depth > 1)
y = sub(None)
x[()] = y
y[()] = x
assert_equal(repr(x),
'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
assert_equal(str(x), '...')
x[()] = 0 # resolve circular references for garbage collector
# nested 0d-subclass-object
x = sub(None)
x[()] = sub(None)
assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)')
assert_equal(str(x), 'None')
# gh-10663
class DuckCounter(np.ndarray):
def __getitem__(self, item):
result = super().__getitem__(item)
if not isinstance(result, DuckCounter):
result = result[...].view(DuckCounter)
return result
def to_string(self):
return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many')
def __str__(self):
if self.shape == ():
return self.to_string()
else:
fmt = {'all': lambda x: x.to_string()}
return np.array2string(self, formatter=fmt)
dc = np.arange(5).view(DuckCounter)
assert_equal(str(dc), "[zero one two many many]")
assert_equal(str(dc[0]), "zero")
def test_self_containing(self):
arr0d = np.array(None)
arr0d[()] = arr0d
assert_equal(repr(arr0d),
'array(array(..., dtype=object), dtype=object)')
arr0d[()] = 0 # resolve recursion for garbage collector
arr1d = np.array([None, None])
arr1d[1] = arr1d
assert_equal(repr(arr1d),
'array([None, array(..., dtype=object)], dtype=object)')
arr1d[1] = 0 # resolve recursion for garbage collector
first = np.array(None)
second = np.array(None)
first[()] = second
second[()] = first
assert_equal(repr(first),
'array(array(array(..., dtype=object), dtype=object), dtype=object)')
first[()] = 0 # resolve circular references for garbage collector
def test_containing_list(self):
# printing square brackets directly would be ambiguuous
arr1d = np.array([None, None])
arr1d[0] = [1, 2]
arr1d[1] = [3]
assert_equal(repr(arr1d),
'array([list([1, 2]), list([3])], dtype=object)')
def test_void_scalar_recursion(self):
# gh-9345
repr(np.void(b'test')) # RecursionError ?
def test_fieldless_structured(self):
# gh-10366
no_fields = np.dtype([])
arr_no_fields = np.empty(4, dtype=no_fields)
assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
class TestComplexArray:
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
wanted = [
'[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
'[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
'[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
'[0.+infj]', '[0.+infj]', '[0.+infj]',
'[0.-infj]', '[0.-infj]', '[0.-infj]',
'[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
'[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
'[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
'[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
'[1.+infj]', '[1.+infj]', '[1.+infj]',
'[1.-infj]', '[1.-infj]', '[1.-infj]',
'[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
'[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
'[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
'[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
'[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
'[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
'[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
'[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
'[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
'[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
'[inf+infj]', '[inf+infj]', '[inf+infj]',
'[inf-infj]', '[inf-infj]', '[inf-infj]',
'[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
'[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
'[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
'[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
'[nan+infj]', '[nan+infj]', '[nan+infj]',
'[nan-infj]', '[nan-infj]', '[nan-infj]',
'[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
for res, val in zip(actual, wanted):
assert_equal(res, val)
class TestArray2String:
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
def test_unexpected_kwarg(self):
# ensure than an appropriate TypeError
# is raised when array2string receives
# an unexpected kwarg
with assert_raises_regex(TypeError, 'nonsense'):
np.array2string(np.array([1, 2, 3]),
nonsense=None)
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
if np.abs(x) < 1:
return '.'
elif np.abs(x) < 2:
return 'o'
else:
return 'O'
x = np.arange(3)
x_hex = "[0x0 0x1 0x2]"
x_oct = "[0o0 0o1 0o2]"
assert_(np.array2string(x, formatter={'all':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
"[0.0000 1.0000 2.0000]")
assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
x_hex)
assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
x_oct)
x = np.arange(3.)
assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
s = np.array(['abc', 'def'])
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
'[abcabc defdef]')
def test_structure_format(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_equal(np.array2string(x),
"[('Sarah', [8., 7.]) ('John', [6., 7.])]")
np.set_printoptions(legacy='1.13')
try:
# for issue #5692
A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
('NaT',) ('NaT',) ('NaT',)]""")
)
finally:
np.set_printoptions(legacy=False)
# same again, but with non-legacy behavior
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ( 'NaT',)
( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',)]""")
)
# and again, with timedeltas
A = np.full(10, 123456, dtype=[("A", "m8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
)
# See #8160
struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
assert_equal(np.array2string(struct_int),
"[([ 1, -1],) ([123, 1],)]")
struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
dtype=[('B', 'i4', (2, 2))])
assert_equal(np.array2string(struct_2dint),
"[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
# See #8172
array_scalar = np.array(
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
def test_unstructured_void_repr(self):
a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
assert_equal(repr(a),
r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
assert_equal(eval(repr(a), vars(np)), a)
assert_equal(eval(repr(a[0]), vars(np)), a[0])
def test_edgeitems_kwarg(self):
# previously the global print options would be taken over the kwarg
arr = np.zeros(3, int)
assert_equal(
np.array2string(arr, edgeitems=1, threshold=0),
"[0 ... 0]"
)
def test_summarize_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ... 998 999 1000]'
assert_equal(str(A), strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_equal(repr(A), reprA)
def test_summarize_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ... 498 499 500]\n' \
' [ 501 502 503 ... 999 1000 1001]]'
assert_equal(str(A), strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_equal(repr(A), reprA)
def test_linewidth(self):
a = np.full(6, 1)
def make_str(a, width, **kw):
return np.array2string(a, separator="", max_line_width=width, **kw)
assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
' 11]')
assert_equal(make_str(a, 8), '[111111]')
assert_equal(make_str(a, 7), '[11111\n'
' 1]')
assert_equal(make_str(a, 5), '[111\n'
' 111]')
b = a[None,None,:]
assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
' 1]]]')
assert_equal(make_str(b, 12), '[[[111111]]]')
assert_equal(make_str(b, 9), '[[[111\n'
' 111]]]')
assert_equal(make_str(b, 8), '[[[11\n'
' 11\n'
' 11]]]')
def test_wide_element(self):
a = np.array(['xxxxx'])
assert_equal(
np.array2string(a, max_line_width=5),
"['xxxxx']"
)
assert_equal(
np.array2string(a, max_line_width=5, legacy='1.13'),
"[ 'xxxxx']"
)
def test_multiline_repr(self):
class MultiLine:
def __repr__(self):
return "Line 1\nLine 2"
a = np.array([[None, MultiLine()], [MultiLine(), None]])
assert_equal(
np.array2string(a),
'[[None Line 1\n'
' Line 2]\n'
' [Line 1\n'
' Line 2 None]]'
)
assert_equal(
np.array2string(a, max_line_width=5),
'[[None\n'
' Line 1\n'
' Line 2]\n'
' [Line 1\n'
' Line 2\n'
' None]]'
)
assert_equal(
repr(a),
'array([[None, Line 1\n'
' Line 2],\n'
' [Line 1\n'
' Line 2, None]], dtype=object)'
)
class MultiLineLong:
def __repr__(self):
return "Line 1\nLooooooooooongestLine2\nLongerLine 3"
a = np.array([[None, MultiLineLong()], [MultiLineLong(), None]])
assert_equal(
repr(a),
'array([[None, Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ],\n'
' [Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 , None]], dtype=object)'
)
assert_equal(
np.array_repr(a, 20),
'array([[None,\n'
' Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ],\n'
' [Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ,\n'
' None]],\n'
' dtype=object)'
)
def test_nested_array_repr(self):
a = np.empty((2, 2), dtype=object)
a[0, 0] = np.eye(2)
a[0, 1] = np.eye(3)
a[1, 0] = None
a[1, 1] = np.ones((3, 1))
assert_equal(
repr(a),
'array([[array([[1., 0.],\n'
' [0., 1.]]), array([[1., 0., 0.],\n'
' [0., 1., 0.],\n'
' [0., 0., 1.]])],\n'
' [None, array([[1.],\n'
' [1.],\n'
' [1.]])]], dtype=object)'
)
@given(hynp.from_dtype(np.dtype("U")))
def test_any_text(self, text):
# This test checks that, given any value that can be represented in an
# array of dtype("U") (i.e. unicode string), ...
a = np.array([text, text, text])
# casting a list of them to an array does not e.g. truncate the value
assert_equal(a[0], text)
# and that np.array2string puts a newline in the expected location
expected_repr = "[{0!r} {0!r}\n {0!r}]".format(text)
result = np.array2string(a, max_line_width=len(repr(text)) * 2 + 3)
assert_equal(result, expected_repr)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount(self):
# make sure we do not hold references to the array due to a recursive
# closure (gh-10620)
gc.disable()
a = np.arange(2)
r1 = sys.getrefcount(a)
np.array2string(a)
np.array2string(a)
r2 = sys.getrefcount(a)
gc.collect()
gc.enable()
assert_(r1 == r2)
class TestPrintOptions:
"""Test getting and setting global print options."""
def setup(self):
self.oldopts = np.get_printoptions()
def teardown(self):
np.set_printoptions(**self.oldopts)
def test_basic(self):
x = np.array([1.5, 0, 1.234567890])
assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
np.set_printoptions(precision=4)
assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
def test_precision_zero(self):
np.set_printoptions(precision=0)
for values, string in (
([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
x = np.array(values)
assert_equal(repr(x), "array([%s])" % string)
def test_formatter(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([0., 1., 2.])")
def test_0d_arrays(self):
assert_equal(str(np.array(u'café', '<U4')), u'café')
assert_equal(repr(np.array('café', '<U4')),
"array('café', dtype='<U4')")
assert_equal(str(np.array('test', np.str_)), 'test')
a = np.zeros(1, dtype=[('a', '<i4', (3,))])
assert_equal(str(a[0]), '([0, 0, 0],)')
assert_equal(repr(np.datetime64('2005-02-25')[...]),
"array('2005-02-25', dtype='datetime64[D]')")
assert_equal(repr(np.timedelta64('10', 'Y')[...]),
"array(10, dtype='timedelta64[Y]')")
# repr of 0d arrays is affected by printoptions
x = np.array(1)
np.set_printoptions(formatter={'all':lambda x: "test"})
assert_equal(repr(x), "array(test)")
# str is unaffected
assert_equal(str(x), "1")
# check `style` arg raises
assert_warns(DeprecationWarning, np.array2string,
np.array(1.), style=repr)
# but not in legacy mode
np.array2string(np.array(1.), style=repr, legacy='1.13')
# gh-10934 style was broken in legacy mode, check it works
np.array2string(np.array(1.), legacy='1.13')
def test_float_spacing(self):
x = np.array([1., 2., 3.])
y = np.array([1., 2., -10.])
z = np.array([100., 2., -1.])
w = np.array([-100., 2., 1.])
assert_equal(repr(x), 'array([1., 2., 3.])')
assert_equal(repr(y), 'array([ 1., 2., -10.])')
assert_equal(repr(np.array(y[0])), 'array(1.)')
assert_equal(repr(np.array(y[-1])), 'array(-10.)')
assert_equal(repr(z), 'array([100., 2., -1.])')
assert_equal(repr(w), 'array([-100., 2., 1.])')
assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
x = np.array([np.inf, 100000, 1.1234])
y = np.array([np.inf, 100000, -1.1234])
z = np.array([np.inf, 1.1234, -1e120])
np.set_printoptions(precision=2)
assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
def test_bool_spacing(self):
assert_equal(repr(np.array([True, True])),
'array([ True, True])')
assert_equal(repr(np.array([True, False])),
'array([ True, False])')
assert_equal(repr(np.array([True])),
'array([ True])')
assert_equal(repr(np.array(True)),
'array(True)')
assert_equal(repr(np.array(False)),
'array(False)')
def test_sign_spacing(self):
a = np.arange(4.)
b = np.array([1.234e9])
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
assert_equal(repr(a), 'array([0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array(1.)')
assert_equal(repr(b), 'array([1.234e+09])')
assert_equal(repr(np.array([0.])), 'array([0.])')
assert_equal(repr(c),
"array([1. +1.j , 1.12345679+1.12345679j])")
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
np.set_printoptions(sign=' ')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array( 1.)')
assert_equal(repr(b), 'array([ 1.234e+09])')
assert_equal(repr(c),
"array([ 1. +1.j , 1.12345679+1.12345679j])")
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
np.set_printoptions(sign='+')
assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
assert_equal(repr(np.array(1.)), 'array(+1.)')
assert_equal(repr(b), 'array([+1.234e+09])')
assert_equal(repr(c),
"array([+1. +1.j , +1.12345679+1.12345679j])")
np.set_printoptions(legacy='1.13')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
assert_equal(repr(b), 'array([ 1.23400000e+09])')
assert_equal(repr(-b), 'array([ -1.23400000e+09])')
assert_equal(repr(np.array(1.)), 'array(1.0)')
assert_equal(repr(np.array([0.])), 'array([ 0.])')
assert_equal(repr(c),
"array([ 1.00000000+1.j , 1.12345679+1.12345679j])")
# gh-10383
assert_equal(str(np.array([-1., 10])), "[ -1. 10.]")
assert_raises(TypeError, np.set_printoptions, wrongarg=True)
def test_float_overflow_nowarn(self):
# make sure internal computations in FloatingFormat don't
# warn about overflow
repr(np.array([1e4, 0.1], dtype='f2'))
def test_sign_spacing_structured(self):
a = np.ones(2, dtype='<f,<f')
assert_equal(repr(a),
"array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
assert_equal(repr(a[0]), "(1., 1.)")
def test_floatmode(self):
x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244,
0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16)
y = np.array([0.2918820979355541, 0.5064172631089138,
0.2848750619642916, 0.4342965294660567,
0.7326538397312751, 0.3459503329096204,
0.0862072768214508, 0.39112753029631175],
dtype=np.float64)
z = np.arange(6, dtype=np.float16)/10
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
# also make sure 1e23 is right (is between two fp numbers)
w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
# note: we construct w from the strings `1eXX` instead of doing
# `10.**arange(24)` because it turns out the two are not equivalent in
# python. On some architectures `1e23 != 10.**23`.
wp = np.array([1.234e1, 1e2, 1e123])
# unique mode
np.set_printoptions(floatmode='unique')
assert_equal(repr(x),
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
assert_equal(repr(y),
"array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n"
" 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n"
" 0.0862072768214508 , 0.39112753029631175])")
assert_equal(repr(z),
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w),
"array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n"
" 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n"
" 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
" 1.e+24])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
assert_equal(repr(c),
"array([1. +1.j , 1.123456789+1.123456789j])")
# maxprec mode, precision=8
np.set_printoptions(floatmode='maxprec', precision=8)
assert_equal(repr(x),
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
assert_equal(repr(y),
"array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
" 0.34595033, 0.08620728, 0.39112753])")
assert_equal(repr(z),
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
assert_equal(repr(c),
"array([1. +1.j , 1.12345679+1.12345679j])")
# fixed mode, precision=4
np.set_printoptions(floatmode='fixed', precision=4)
assert_equal(repr(x),
"array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n"
" 0.2383, 0.4226], dtype=float16)")
assert_equal(repr(y),
"array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])")
assert_equal(repr(z),
"array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
assert_equal(repr(c),
"array([1.0000+1.0000j, 1.1235+1.1235j])")
# for larger precision, representation error becomes more apparent:
np.set_printoptions(floatmode='fixed', precision=8)
assert_equal(repr(z),
"array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n"
" 0.50000000], dtype=float16)")
# maxprec_equal mode, precision=8
np.set_printoptions(floatmode='maxprec_equal', precision=8)
assert_equal(repr(x),
"array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n"
" 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)")
assert_equal(repr(y),
"array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
" 0.34595033, 0.08620728, 0.39112753])")
assert_equal(repr(z),
"array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
assert_equal(repr(c),
"array([1.00000000+1.00000000j, 1.12345679+1.12345679j])")
# test unique special case (gh-18609)
a = np.float64.fromhex('-1p-97')
assert_equal(np.float64(np.array2string(a, floatmode='unique')), a)
def test_legacy_mode_scalars(self):
# in legacy mode, str of floats get truncated, and complex scalars
# use * for non-finite imaginary part
np.set_printoptions(legacy='1.13')
assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912')
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)')
np.set_printoptions(legacy=False)
assert_equal(str(np.float64(1.123456789123456789)),
'1.1234567891234568')
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)')
def test_legacy_stray_comma(self):
np.set_printoptions(legacy='1.13')
assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]')
np.set_printoptions(legacy=False)
assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]')
def test_dtype_linewidth_wrapping(self):
np.set_printoptions(linewidth=75)
assert_equal(repr(np.arange(10,20., dtype='f4')),
"array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)")
assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\
array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
dtype=float32)"""))
styp = '<U4'
assert_equal(repr(np.ones(3, dtype=styp)),
"array(['1', '1', '1'], dtype='{}')".format(styp))
assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
dtype='{}')""".format(styp)))
def test_linewidth_repr(self):
a = np.full(7, fill_value=2)
np.set_printoptions(linewidth=17)
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2,
2])""")
)
np.set_printoptions(linewidth=17, legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2, 2])""")
)
a = np.full(8, fill_value=2)
np.set_printoptions(linewidth=18, legacy=False)
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2,
2, 2])""")
)
np.set_printoptions(linewidth=18, legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2, 2,
2, 2, 2, 2])""")
)
def test_linewidth_str(self):
a = np.full(18, fill_value=2)
np.set_printoptions(linewidth=18)
assert_equal(
str(a),
textwrap.dedent("""\
[2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2
2 2]""")
)
np.set_printoptions(linewidth=18, legacy='1.13')
assert_equal(
str(a),
textwrap.dedent("""\
[2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2]""")
)
def test_edgeitems(self):
np.set_printoptions(edgeitems=1, threshold=1)
a = np.arange(27).reshape((3, 3, 3))
assert_equal(
repr(a),
textwrap.dedent("""\
array([[[ 0, ..., 2],
...,
[ 6, ..., 8]],
...,
[[18, ..., 20],
...,
[24, ..., 26]]])""")
)
b = np.zeros((3, 3, 1, 1))
assert_equal(
repr(b),
textwrap.dedent("""\
array([[[[0.]],
...,
[[0.]]],
...,
[[[0.]],
...,
[[0.]]]])""")
)
# 1.13 had extra trailing spaces, and was missing newlines
np.set_printoptions(legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([[[ 0, ..., 2],
...,
[ 6, ..., 8]],
...,
[[18, ..., 20],
...,
[24, ..., 26]]])""")
)
assert_equal(
repr(b),
textwrap.dedent("""\
array([[[[ 0.]],
...,
[[ 0.]]],
...,
[[[ 0.]],
...,
[[ 0.]]]])""")
)
def test_bad_args(self):
assert_raises(ValueError, np.set_printoptions, threshold=float('nan'))
assert_raises(TypeError, np.set_printoptions, threshold='1')
assert_raises(TypeError, np.set_printoptions, threshold=b'1')
assert_raises(TypeError, np.set_printoptions, precision='1')
assert_raises(TypeError, np.set_printoptions, precision=1.5)
def test_unicode_object_array():
expected = "array(['é'], dtype=object)"
x = np.array([u'\xe9'], dtype=object)
assert_equal(repr(x), expected)
class TestContextManager:
def test_ctx_mgr(self):
# test that context manager actually works
with np.printoptions(precision=2):
s = str(np.array([2.0]) / 3)
assert_equal(s, '[0.67]')
def test_ctx_mgr_restores(self):
# test that print options are actually restrored
opts = np.get_printoptions()
with np.printoptions(precision=opts['precision'] - 1,
linewidth=opts['linewidth'] - 4):
pass
assert_equal(np.get_printoptions(), opts)
def test_ctx_mgr_exceptions(self):
# test that print options are restored even if an exception is raised
opts = np.get_printoptions()
try:
with np.printoptions(precision=2, linewidth=11):
raise ValueError
except ValueError:
pass
assert_equal(np.get_printoptions(), opts)
def test_ctx_mgr_as_smth(self):
opts = {"precision": 2}
with np.printoptions(**opts) as ctx:
saved_opts = ctx.copy()
assert_equal({k: saved_opts[k] for k in opts}, opts)
|
the-stack_0_13352 | #!/usr/bin/env python
# Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <[email protected]>, 2012-2017
# - Mario Lassnig <[email protected]>, 2013-2018
# - Thomas Beermann <[email protected]>, 2013-2018
# - Martin Barisits <[email protected]>, 2013-2017
# - Cedric Serfon <[email protected]>, 2014-2017
# - Joaquin Bogado <[email protected]>, 2018
# - Hannes Hansen <[email protected]>, 2018-2019
# - Andrew Lister <[email protected]>, 2019
# - Patrick Austin <[email protected]>, 2020
#
# PY3K COMPATIBLE
from __future__ import print_function
from logging import getLogger, StreamHandler, DEBUG
from json import dumps, loads
from traceback import format_exc
try:
from urlparse import parse_qsl
except ImportError:
from urllib.parse import parse_qsl
from web import application, ctx, data, header, Created, InternalError, OK, loadhook
from rucio.api.lock import get_replica_locks_for_rule_id
from rucio.api.rule import (add_replication_rule, delete_replication_rule, get_replication_rule, update_replication_rule,
reduce_replication_rule, list_replication_rule_history, list_replication_rule_full_history,
list_replication_rules, examine_replication_rule, move_replication_rule)
from rucio.common.exception import (InsufficientAccountLimit, RuleNotFound, AccessDenied, InvalidRSEExpression,
InvalidReplicationRule, RucioException, DataIdentifierNotFound, InsufficientTargetRSEs,
ReplicationRuleCreationTemporaryFailed, InvalidRuleWeight, StagingAreaRuleRequiresLifetime,
DuplicateRule, InvalidObject, AccountNotFound, RuleReplaceFailed, ScratchDiskLifetimeConflict,
ManualRuleApprovalBlocked, UnsupportedOperation)
from rucio.common.schema import get_schema_value
from rucio.common.utils import generate_http_error, render_json, APIEncoder
from rucio.web.rest.common import rucio_loadhook, check_accept_header_wrapper
LOGGER = getLogger("rucio.rule")
SH = StreamHandler()
SH.setLevel(DEBUG)
LOGGER.addHandler(SH)
URLS = ('/(.+)/locks', 'ReplicaLocks',
'/(.+)/reduce', 'ReduceRule',
'/(.+)/move', 'MoveRule',
'%s/history' % get_schema_value('SCOPE_NAME_REGEXP'), 'RuleHistoryFull',
'/(.+)/history', 'RuleHistory',
'/(.+)/analysis', 'RuleAnalysis',
'/', 'AllRule',
'/(.+)', 'Rule',)
class Rule:
""" REST APIs for replication rules. """
@check_accept_header_wrapper(['application/json'])
def GET(self, rule_id):
""" get rule information for given rule id.
HTTP Success:
200 OK
HTTP Error:
401 Unauthorized
404 Not Found
406 Not Acceptable
500 InternalError
:returns: JSON dict containing informations about the requested user.
"""
header('Content-Type', 'application/json')
try:
estimate_ttc = False
json_data = data()
params = loads(json_data)
if 'estimate_ttc' in params:
estimate_ttc = params['estimate_ttc']
except ValueError:
estimate_ttc = False
try:
rule = get_replication_rule(rule_id, estimate_ttc=estimate_ttc, issuer=ctx.env.get('issuer'), vo=ctx.env.get('vo'))
except RuleNotFound as error:
raise generate_http_error(404, 'RuleNotFound', error.args[0])
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
raise InternalError(error)
return render_json(**rule)
def PUT(self, rule_id):
"""
Update the replication rules locked flag .
HTTP Success:
200 OK
HTTP Error:
401 Unauthorized
404 Not Found
500 InternalError
"""
json_data = data()
try:
params = loads(json_data)
options = params['options']
update_replication_rule(rule_id=rule_id, options=options, issuer=ctx.env.get('issuer'), vo=ctx.env.get('vo'))
except AccessDenied as error:
raise generate_http_error(401, 'AccessDenied', error.args[0])
except RuleNotFound as error:
raise generate_http_error(404, 'RuleNotFound', error.args[0])
except AccountNotFound as error:
raise generate_http_error(404, 'AccountNotFound', error.args[0])
except ScratchDiskLifetimeConflict as error:
raise generate_http_error(409, 'ScratchDiskLifetimeConflict', error.args[0])
except ValueError:
raise generate_http_error(400, 'ValueError', 'Cannot decode json parameter list')
except UnsupportedOperation as error:
raise generate_http_error(409, 'UnsupportedOperation', error.args[0])
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
raise OK()
def DELETE(self, rule_id):
"""
Delete a new replication rule.
HTTP Success:
200 OK
HTTP Error:
401 Unauthorized
404 Not Found
500 Internal Error
"""
json_data = data()
try:
purge_replicas = None
params = loads(json_data)
if 'purge_replicas' in params:
purge_replicas = params['purge_replicas']
except ValueError:
raise generate_http_error(400, 'ValueError', 'Cannot decode json parameter list')
try:
delete_replication_rule(rule_id=rule_id, purge_replicas=purge_replicas, issuer=ctx.env.get('issuer'), vo=ctx.env.get('vo'))
except AccessDenied as error:
raise generate_http_error(401, 'AccessDenied', error.args[0])
except UnsupportedOperation as error:
raise generate_http_error(401, 'UnsupportedOperation', error.args[0])
except RuleNotFound as error:
raise generate_http_error(404, 'RuleNotFound', error.args[0])
except Exception as error:
raise InternalError(error)
raise OK()
class AllRule:
""" REST APIs for all rules. """
@check_accept_header_wrapper(['application/x-json-stream'])
def GET(self):
"""
Return all rules of a given account.
HTTP Success:
200 OK
HTTP Error:
401 Unauthorized
404 Not Found
406 Not Acceptable
:param scope: The scope name.
"""
header('Content-Type', 'application/x-json-stream')
filters = {}
if ctx.query:
params = dict(parse_qsl(ctx.query[1:]))
filters.update(params)
try:
for rule in list_replication_rules(filters=filters, vo=ctx.env.get('vo')):
yield dumps(rule, cls=APIEncoder) + '\n'
except RuleNotFound as error:
raise generate_http_error(404, 'RuleNotFound', error.args[0])
except Exception as error:
print(format_exc())
raise InternalError(error)
def POST(self):
"""
Create a new replication rule.
HTTP Success:
201 Created
HTTP Error:
400 Bad Request
401 Unauthorized
404 Not Found
409 Conflict
500 Internal Error
"""
json_data = data()
try:
grouping, weight, lifetime, locked, subscription_id, source_replica_expression, activity, notify,\
purge_replicas, ignore_availability, comment, ask_approval, asynchronous, priority,\
split_container, meta = 'DATASET', None, None, False, None, None, None, None, False, False, None,\
False, False, 3, False, None
params = loads(json_data)
dids = params['dids']
account = params['account']
copies = params['copies']
rse_expression = params['rse_expression']
if 'grouping' in params:
grouping = params['grouping']
if 'weight' in params:
weight = params['weight']
if 'lifetime' in params:
lifetime = params['lifetime']
if 'locked' in params:
locked = params['locked']
if 'subscription_id' in params:
subscription_id = params['subscription_id']
if 'source_replica_expression' in params:
source_replica_expression = params['source_replica_expression']
if 'activity' in params:
activity = params['activity']
if 'notify' in params:
notify = params['notify']
if 'purge_replicas' in params:
purge_replicas = params['purge_replicas']
if 'ignore_availability' in params:
ignore_availability = params['ignore_availability']
if 'comment' in params:
comment = params['comment']
if 'ask_approval' in params:
ask_approval = params['ask_approval']
if 'asynchronous' in params:
asynchronous = params['asynchronous']
if 'priority' in params:
priority = params['priority']
if 'split_container' in params:
split_container = params['split_container']
if 'meta' in params:
meta = params['meta']
except ValueError:
raise generate_http_error(400, 'ValueError', 'Cannot decode json parameter list')
try:
rule_ids = add_replication_rule(dids=dids,
copies=copies,
rse_expression=rse_expression,
weight=weight,
lifetime=lifetime,
grouping=grouping,
account=account,
locked=locked,
subscription_id=subscription_id,
source_replica_expression=source_replica_expression,
activity=activity,
notify=notify,
purge_replicas=purge_replicas,
ignore_availability=ignore_availability,
comment=comment,
ask_approval=ask_approval,
asynchronous=asynchronous,
priority=priority,
split_container=split_container,
meta=meta,
issuer=ctx.env.get('issuer'),
vo=ctx.env.get('vo'))
# TODO: Add all other error cases here
except InvalidReplicationRule as error:
raise generate_http_error(409, 'InvalidReplicationRule', error.args[0])
except DuplicateRule as error:
raise generate_http_error(409, 'DuplicateRule', error.args[0])
except InsufficientTargetRSEs as error:
raise generate_http_error(409, 'InsufficientTargetRSEs', error.args[0])
except InsufficientAccountLimit as error:
raise generate_http_error(409, 'InsufficientAccountLimit', error.args[0])
except InvalidRSEExpression as error:
raise generate_http_error(409, 'InvalidRSEExpression', error.args[0])
except DataIdentifierNotFound as error:
raise generate_http_error(404, 'DataIdentifierNotFound', error.args[0])
except ReplicationRuleCreationTemporaryFailed as error:
raise generate_http_error(409, 'ReplicationRuleCreationTemporaryFailed', error.args[0])
except InvalidRuleWeight as error:
raise generate_http_error(409, 'InvalidRuleWeight', error.args[0])
except StagingAreaRuleRequiresLifetime as error:
raise generate_http_error(409, 'StagingAreaRuleRequiresLifetime', error.args[0])
except ScratchDiskLifetimeConflict as error:
raise generate_http_error(409, 'ScratchDiskLifetimeConflict', error.args[0])
except ManualRuleApprovalBlocked as error:
raise generate_http_error(409, 'ManualRuleApprovalBlocked', error.args[0])
except InvalidObject as error:
raise generate_http_error(409, 'InvalidObject', error.args[0])
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(format_exc())
raise InternalError(error)
raise Created(dumps(rule_ids))
class ReplicaLocks:
""" REST APIs for replica locks. """
@check_accept_header_wrapper(['application/x-json-stream'])
def GET(self, rule_id):
""" get locks for a given rule_id.
HTTP Success:
200 OK
HTTP Error:
404 Not Found
406 Not Acceptable
500 InternalError
:returns: JSON dict containing informations about the requested user.
"""
header('Content-Type', 'application/x-json-stream')
try:
locks = get_replica_locks_for_rule_id(rule_id)
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
raise InternalError(error)
for lock in locks:
yield dumps(lock, cls=APIEncoder) + '\n'
class ReduceRule:
""" REST APIs for reducing rules. """
def POST(self, rule_id):
"""
Reduce a replication rule.
HTTP Success:
201 Created
HTTP Error:
400 Bad Request
401 Unauthorized
404 Not Found
409 Conflict
500 Internal Error
"""
json_data = data()
try:
exclude_expression = None
params = loads(json_data)
copies = params['copies']
if 'exclude_expression' in params:
exclude_expression = params['exclude_expression']
except ValueError:
raise generate_http_error(400, 'ValueError', 'Cannot decode json parameter list')
try:
rule_ids = reduce_replication_rule(rule_id=rule_id,
copies=copies,
exclude_expression=exclude_expression,
issuer=ctx.env.get('issuer'),
vo=ctx.env.get('vo'))
# TODO: Add all other error cases here
except RuleReplaceFailed as error:
raise generate_http_error(409, 'RuleReplaceFailed', error.args[0])
except RuleNotFound as error:
raise generate_http_error(404, 'RuleNotFound', error.args[0])
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(error)
print(format_exc())
raise InternalError(error)
raise Created(dumps(rule_ids))
class MoveRule:
""" REST APIs for moving rules. """
def POST(self, rule_id):
"""
Move a replication rule.
HTTP Success:
201 Created
HTTP Error:
400 Bad Request
401 Unauthorized
404 Not Found
409 Conflict
500 Internal Error
"""
json_data = data()
try:
params = loads(json_data)
rule_id = params['rule_id']
rse_expression = params['rse_expression']
except ValueError:
raise generate_http_error(400, 'ValueError', 'Cannot decode json parameter list')
try:
rule_ids = move_replication_rule(rule_id=rule_id,
rse_expression=rse_expression,
issuer=ctx.env.get('issuer'),
vo=ctx.env.get('vo'))
except RuleReplaceFailed as error:
raise generate_http_error(409, 'RuleReplaceFailed', error.args[0])
except RuleNotFound as error:
raise generate_http_error(404, 'RuleNotFound', error.args[0])
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
print(error)
print(format_exc())
raise InternalError(error)
raise Created(dumps(rule_ids))
class RuleHistory:
""" REST APIs for rule history. """
@check_accept_header_wrapper(['application/x-json-stream'])
def GET(self, rule_id):
""" get history for a given rule_id.
HTTP Success:
200 OK
HTTP Error:
404 Not Found
406 Not Acceptable
500 InternalError
:returns: JSON dict containing informations about the requested user.
"""
header('Content-Type', 'application/x-json-stream')
try:
history = list_replication_rule_history(rule_id, issuer=ctx.env.get('issuer'), vo=ctx.env.get('vo'))
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
raise InternalError(error)
for hist in history:
yield dumps(hist, cls=APIEncoder) + '\n'
class RuleHistoryFull:
""" REST APIs for rule history for DIDs. """
@check_accept_header_wrapper(['application/x-json-stream'])
def GET(self, scope, name):
""" get history for a given DID.
HTTP Success:
200 OK
HTTP Error:
404 Not Found
406 Not Acceptable
500 InternalError
:returns: JSON dict containing informations about the requested user.
"""
header('Content-Type', 'application/x-json-stream')
try:
history = list_replication_rule_full_history(scope, name, vo=ctx.env.get('vo'))
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
raise InternalError(error)
for hist in history:
yield dumps(hist, cls=APIEncoder) + '\n'
class RuleAnalysis:
""" REST APIs for rule analysis. """
@check_accept_header_wrapper(['application/json'])
def GET(self, rule_id):
""" get analysis for given rule.
HTTP Success:
200 OK
HTTP Error:
404 Not Found
406 Not Acceptable
500 InternalError
:returns: JSON dict containing informations about the requested user.
"""
header('Content-Type', 'application/json')
try:
analysis = examine_replication_rule(rule_id, issuer=ctx.env.get('issuer'), vo=ctx.env.get('vo'))
except RucioException as error:
raise generate_http_error(500, error.__class__.__name__, error.args[0])
except Exception as error:
raise InternalError(error)
return render_json(**analysis)
"""----------------------
Web service startup
----------------------"""
APP = application(URLS, globals())
APP.add_processor(loadhook(rucio_loadhook))
application = APP.wsgifunc()
|
the-stack_0_13354 | import os
import pickle
import copy
import json
from collections import defaultdict
import numpy as np
import random
import torch
from torch_geometric.data import Data, Dataset, Batch
from torch_geometric.utils import to_networkx
from torch_scatter import scatter
#from torch.utils.data import Dataset
import rdkit
from rdkit import Chem
from rdkit.Chem.rdchem import Mol, HybridizationType, BondType
from rdkit import RDLogger
import networkx as nx
from tqdm import tqdm
# import sidechainnet as scn
RDLogger.DisableLog('rdApp.*')
from .chem import BOND_TYPES, mol_to_smiles
def prepare_pdb2(scn_dir, data_path):
# step 1: filter and save pdb file.
train_data = []
cnt_fail = 0
def get_num_plusseg(msk):
tmp = [0]
for i in range(1, len(msk)):
if msk[i] == msk[i-1]:
tmp.append(0)
else:
tmp.append(1)
s = sum(tmp)
if msk[0] == '-':
return (s + 1) // 2
else:
return (s // 2) + 1
def get_plus_rate(msk):
cnt = sum([1 if x == '+' else 0 for x in msk])
return cnt / len(msk)
d = scn.load(casp_version=12, thinning=30, scn_dir=scn_dir)
raw_data = d['train']
mask = raw_data['msk']
n_raw_data = len(mask)
cnt_seg = 0
cnt_success = 0
for i in tqdm(range(n_raw_data)):
if get_plus_rate(mask[i]) > 0.5 and get_num_plusseg(mask[i]) == 1:
cnt_seg += 1
mask_ = [1 if _ == '+' else 0 for _ in mask[i]]
if sum(mask_) < 200:
cnt_success += 1
seq = raw_data['seq'][i]
crd = raw_data['crd'][i]
name = raw_data['ids'][i]
mol = scn.StructureBuilder(seq, crd)
mol.to_pdb('./tmp.pdb')
data = pdb_to_data('./tmp.pdb', name)
if data is not None:
train_data.append(data)
else:
cnt_fail += 1
print('total n_raw_data: %d, cnt_seg: %d, cnt_success: %d' % (n_raw_data, cnt_seg, cnt_success))
n_data = len(train_data)
print('number of train samples: %d | number of fails: %d' % (n_data, cnt_fail))
os.makedirs(os.path.join(data_path), exist_ok=True)
with open(os.path.join(data_path, 'train_data_%dk.pkl' % (n_data // 1000)), "wb") as fout:
pickle.dump(train_data, fout)
print('save train %dk done' % (n_data // 1000))
def prepare_pdblarge(scn_dir, data_path):
# step 1: filter and save pdb file.
train_data = []
cnt_fail = 0
max_residue = 0
d = scn.load(casp_version=12, thinning=30, scn_dir=scn_dir)
raw_data = d['train']
mask = raw_data['msk']
n_raw_data = len(mask)
cnt_seg = 0
cnt_success = 0
for i in tqdm(range(n_raw_data)):
# if get_plus_rate(mask[i]) > 0.5 and get_num_plusseg(mask[i]) == 1:
if True:
cnt_seg += 1
mask_ = [1 if _ == '+' else 0 for _ in mask[i]]
if sum(mask_) < 400:
cnt_success += 1
seq = raw_data['seq'][i]
crd = raw_data['crd'][i]
name = raw_data['ids'][i]
mol = scn.StructureBuilder(seq, crd)
mol.to_pdb('./tmp.pdb')
data = pdb_to_data('./tmp.pdb', name)
if data is not None:
train_data.append(data)
max_residue = max(max_residue, sum(mask_))
else:
cnt_fail += 1
print('total n_raw_data: %d, cnt_seg: %d, cnt_success: %d, max_residue: %d' % (n_raw_data, cnt_seg, cnt_success, max_residue))
n_data = len(train_data)
print('number of train samples: %d | number of fails: %d' % (n_data, cnt_fail))
os.makedirs(os.path.join(data_path), exist_ok=True)
with open(os.path.join(data_path, 'train_data_%dk.pkl' % (n_data // 1000)), "wb") as fout:
pickle.dump(train_data, fout)
print('save train %dk done' % (n_data // 1000))
def prepare_pdb_valtest(scn_dir, data_path):
# step 1: filter and save pdb file.
val_data = []
test_data = []
all_data = []
cnt_fail = 0
max_residue = 0
n_raw_data = 0
cnt_success = 0
d = scn.load(casp_version=12, thinning=30, scn_dir=scn_dir)
fetch_dict = ['test', 'valid-10', 'valid-20', 'valid-30', 'valid-40', 'valid-50', 'valid-70', 'valid-90']
for dict_name in fetch_dict:
raw_data = d[dict_name]
mask = raw_data['msk']
n_raw_data += len(mask)
cnt_seg = 0
cnt_success = 0
for i in tqdm(range(len(mask))):
# if get_plus_rate(mask[i]) > 0.5 and get_num_plusseg(mask[i]) == 1:
if True:
mask_ = [1 if _ == '+' else 0 for _ in mask[i]]
if sum(mask_) < 400:
seq = raw_data['seq'][i]
crd = raw_data['crd'][i]
name = raw_data['ids'][i]
mol = scn.StructureBuilder(seq, crd)
mol.to_pdb('./tmp.pdb')
data = pdb_to_data('./tmp.pdb', name)
if data is not None:
cnt_success += 1
all_data.append(data)
max_residue = max(max_residue, sum(mask_))
else:
cnt_fail += 1
print('total n_raw_data: %d, cnt_success: %d, max_residue: %d' % (n_raw_data, cnt_success, max_residue))
random.shuffle(all_data)
n_val = len(all_data) // 2
n_test = len(all_data) - n_val
print('number of val samples: %d | number of test samples: %d | number of fails: %d' % (n_val, n_test, cnt_fail))
os.makedirs(os.path.join(data_path), exist_ok=True)
with open(os.path.join(data_path, 'val_data_%dk.pkl' % (n_val // 1000)), "wb") as fout:
pickle.dump(all_data[:n_val], fout)
print('save val %dk done' % (n_val // 1000))
with open(os.path.join(data_path, 'test_data_%dk.pkl' % (n_test // 1000)), "wb") as fout:
pickle.dump(all_data[n_val:], fout)
print('save test %dk done' % (n_test // 1000))
def pdb_to_data(pdb_path, name):
mol = Chem.rdmolfiles.MolFromPDBFile(pdb_path)
if mol is None:
return None
with open(pdb_path, 'r') as f:
pdb_infos = f.readlines()
pdb_infos = pdb_infos[1:-1]
assert mol.GetNumConformers() == 1
N = mol.GetNumAtoms()
# name = pdb_path.split('/')[-1].split('.')[0]
pos = torch.tensor(mol.GetConformer(0).GetPositions(), dtype=torch.float32)
atomic_number = []
aromatic = []
is_sidechain = []
is_alpha = []
atom2res = []
sp = []
sp2 = []
sp3 = []
num_hs = []
for index, atom in enumerate(mol.GetAtoms()):
atomic_number.append(atom.GetAtomicNum())
aromatic.append(1 if atom.GetIsAromatic() else 0)
hybridization = atom.GetHybridization()
sp.append(1 if hybridization == HybridizationType.SP else 0)
sp2.append(1 if hybridization == HybridizationType.SP2 else 0)
sp3.append(1 if hybridization == HybridizationType.SP3 else 0)
info = atom.GetPDBResidueInfo()
ref_info = pdb_infos[index]
ref_info = ref_info.split()
assert info.GetResidueName().strip() == ref_info[3]
assert info.GetName().strip() == ref_info[2]
assert info.GetResidueNumber() == int(ref_info[4])
if info.GetName().strip() == 'CA':
is_alpha.append(1)
else:
is_alpha.append(0)
if info.GetName().strip() in ['N', 'CA', 'C', 'O']:
is_sidechain.append(0)
else:
is_sidechain.append(1)
atom2res.append(info.GetResidueNumber() - 1)
num_res = len(set(atom2res))
atom2res = np.array(atom2res)
atom2res -= atom2res.min()
atom2res = torch.tensor(atom2res, dtype=torch.long)
is_sidechain = torch.tensor(is_sidechain).bool()
is_alpha = torch.tensor(is_alpha).bool()
dummy_index = torch.arange(pos.size(0))
alpha_index = dummy_index[is_alpha]
res2alpha_index = -torch.ones(5000, dtype=torch.long)
res2alpha_index[atom2res[is_alpha]] = alpha_index
atom2alpha_index = res2alpha_index[atom2res]
if is_sidechain.sum().item() == 0: # protein built solely on GLY can not be used for sidechain prediction
return None
# assert (4 * num_res == (len(is_sidechain) - sum(is_sidechain))),(4 * num_res, (len(is_sidechain) - sum(is_sidechain)))
z = torch.tensor(atomic_number, dtype=torch.long)
row, col, edge_type = [], [], []
for bond in mol.GetBonds():
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
row += [start, end]
col += [end, start]
edge_type += 2 * [BOND_TYPES[bond.GetBondType()]]
edge_index = torch.tensor([row, col], dtype=torch.long)
edge_type = torch.tensor(edge_type)
if edge_index.size(1) == 0: # only alpha carbon
return None
perm = (edge_index[0] * N + edge_index[1]).argsort()
edge_index = edge_index[:, perm]
edge_type = edge_type[perm]
row, col = edge_index
hs = (z == 1).to(torch.float32)
num_hs = scatter(hs[row], col, dim_size=N, reduce='sum').tolist()
# smiles = Chem.MolToSmiles(mol)
data = Data(atom_type=z, pos=pos, edge_index=edge_index, edge_type=edge_type, is_alpha=is_alpha,
rdmol=copy.deepcopy(mol), name=name, is_sidechain=is_sidechain, atom2res=atom2res, atom2alpha_index=atom2alpha_index)
#data.nx = to_networkx(data, to_undirected=True)
return data
def rdmol_to_data(mol:Mol, smiles=None, data_cls=Data):
assert mol.GetNumConformers() == 1
N = mol.GetNumAtoms()
pos = torch.tensor(mol.GetConformer(0).GetPositions(), dtype=torch.float32)
atomic_number = []
aromatic = []
sp = []
sp2 = []
sp3 = []
num_hs = []
for atom in mol.GetAtoms():
atomic_number.append(atom.GetAtomicNum())
aromatic.append(1 if atom.GetIsAromatic() else 0)
hybridization = atom.GetHybridization()
sp.append(1 if hybridization == HybridizationType.SP else 0)
sp2.append(1 if hybridization == HybridizationType.SP2 else 0)
sp3.append(1 if hybridization == HybridizationType.SP3 else 0)
z = torch.tensor(atomic_number, dtype=torch.long)
row, col, edge_type = [], [], []
for bond in mol.GetBonds():
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
row += [start, end]
col += [end, start]
edge_type += 2 * [BOND_TYPES[bond.GetBondType()]]
edge_index = torch.tensor([row, col], dtype=torch.long)
edge_type = torch.tensor(edge_type)
perm = (edge_index[0] * N + edge_index[1]).argsort()
edge_index = edge_index[:, perm]
edge_type = edge_type[perm]
row, col = edge_index
hs = (z == 1).to(torch.float32)
num_hs = scatter(hs[row], col, dim_size=N, reduce='sum').tolist()
if smiles is None:
smiles = Chem.MolToSmiles(mol)
data = data_cls(atom_type=z, pos=pos, edge_index=edge_index, edge_type=edge_type,
rdmol=copy.deepcopy(mol), smiles=smiles)
#data.nx = to_networkx(data, to_undirected=True)
return data
class MolClusterData(Data):
def __inc__(self, key, value):
if key == 'subgraph_index':
return self.subgraph_index.max().item() + 1
else:
return super().__inc__(key, value)
def rdmol_cluster_to_data(mol:Mol, smiles=None):
data = rdmol_to_data(mol, smiles, data_cls=MolClusterData)
data.subgraph_index = torch.zeros([data.atom_type.size(0)], dtype=torch.long)
for i, subgraph in enumerate(nx.connected_components(to_networkx(data, to_undirected=True))):
data.subgraph_index[list(subgraph)] = i
return data
def preprocess_iso17_dataset(base_path):
train_path = os.path.join(base_path, 'iso17_split-0_train.pkl')
test_path = os.path.join(base_path, 'iso17_split-0_test.pkl')
with open(train_path, 'rb') as fin:
raw_train = pickle.load(fin)
with open(test_path, 'rb') as fin:
raw_test = pickle.load(fin)
smiles_list_train = [mol_to_smiles(mol) for mol in raw_train]
smiles_set_train = list(set(smiles_list_train))
smiles_list_test = [mol_to_smiles(mol) for mol in raw_test]
smiles_set_test = list(set(smiles_list_test))
print('preprocess train...')
all_train = []
for i in tqdm(range(len(raw_train))):
smiles = smiles_list_train[i]
data = rdmol_to_data(raw_train[i], smiles=smiles)
all_train.append(data)
print('Train | find %d molecules with %d confs' % (len(smiles_set_train), len(all_train)))
print('preprocess test...')
all_test = []
for i in tqdm(range(len(raw_test))):
smiles = smiles_list_test[i]
data = rdmol_to_data(raw_test[i], smiles=smiles)
all_test.append(data)
print('Test | find %d molecules with %d confs' % (len(smiles_set_test), len(all_test)))
return all_train, all_test
def preprocess_GEOM_dataset(base_path, dataset_name, max_conf=5, train_size=0.8, max_size=9999999999, seed=None):
# set random seed
if seed is None:
seed = 2021
np.random.seed(seed)
random.seed(seed)
# read summary file
assert dataset_name in ['qm9', 'drugs']
summary_path = os.path.join(base_path, 'summary_%s.json' % dataset_name)
with open(summary_path, 'r') as f:
summ = json.load(f)
# filter valid pickle path
smiles_list = []
pickle_path_list = []
num_mols = 0
num_confs = 0
for smiles, meta_mol in tqdm(summ.items()):
u_conf = meta_mol.get('uniqueconfs')
if u_conf is None:
continue
pickle_path = meta_mol.get('pickle_path')
if pickle_path is None:
continue
num_mols += 1
num_confs += min(max_conf, u_conf)
smiles_list.append(smiles)
pickle_path_list.append(pickle_path)
if num_mols >= max_size:
break
print('pre-filter: find %d molecules with %d confs' % (num_mols, num_confs))
# 1. select maximal 'max_conf' confs of each qm9 molecule
# 2. split the dataset based on 2d-structure, i.e., test on unseen graphs
train_data, val_data, test_data = [], [], []
val_size = test_size = (1. - train_size) / 2
num_mols = np.zeros(4, dtype=int) # (tot, train, val, test)
num_confs = np.zeros(4, dtype=int) # (tot, train, val, test)
'''
# mol.get('uniqueconfs') != len(mol.get('conformers'))
with open(os.path.join(base_path, pickle_path_list[1878]), 'rb') as fin:
mol = pickle.load(fin)
print(mol.get('uniqueconfs'), len(mol.get('conformers')))
print(mol.get('conformers')[0]['rd_mol'].GetConformer(0).GetPositions())
print(mol.get('conformers')[1]['rd_mol'].GetConformer(0).GetPositions())
return
'''
bad_case = 0
for i in tqdm(range(len(pickle_path_list))):
with open(os.path.join(base_path, pickle_path_list[i]), 'rb') as fin:
mol = pickle.load(fin)
if mol.get('uniqueconfs') > len(mol.get('conformers')):
bad_case += 1
continue
if mol.get('uniqueconfs') <= 0:
bad_case += 1
continue
datas = []
smiles = mol.get('smiles')
if mol.get('uniqueconfs') <= max_conf:
# use all confs
conf_ids = np.arange(mol.get('uniqueconfs'))
else:
# filter the most probable 'max_conf' confs
all_weights = np.array([_.get('boltzmannweight', -1.) for _ in mol.get('conformers')])
descend_conf_id = (-all_weights).argsort()
conf_ids = descend_conf_id[:max_conf]
for conf_id in conf_ids:
conf_meta = mol.get('conformers')[conf_id]
data = rdmol_to_data(conf_meta.get('rd_mol'))
labels = {
'totalenergy': conf_meta['totalenergy'],
'boltzmannweight': conf_meta['boltzmannweight'],
}
for k, v in labels.items():
data[k] = torch.tensor([v], dtype=torch.float32)
datas.append(data)
# split
eps = np.random.rand()
if eps <= train_size:
train_data.extend(datas)
num_mols += [1, 1, 0, 0]
num_confs += [len(datas), len(datas), 0, 0]
elif eps <= train_size + val_size:
val_data.extend(datas)
num_mols += [1, 0, 1, 0]
num_confs += [len(datas), 0, len(datas), 0]
else:
test_data.extend(datas)
num_mols += [1, 0, 0, 1]
num_confs += [len(datas), 0, 0, len(datas)]
print('post-filter: find %d molecules with %d confs' % (num_mols[0], num_confs[0]))
print('train size: %d molecules with %d confs' % (num_mols[1], num_confs[1]))
print('val size: %d molecules with %d confs' % (num_mols[2], num_confs[2]))
print('test size: %d molecules with %d confs' % (num_mols[3], num_confs[3]))
print('bad case: %d' % bad_case)
print('done!')
return train_data, val_data, test_data
def preprocess_GEOM_dataset_with_fixed_num_conf(base_path, dataset_name, conf_per_mol=5, train_size=0.8, tot_mol_size=50000, seed=None):
"""
base_path: directory that contains GEOM dataset
dataset_name: dataset name, should be in [qm9, drugs]
conf_per_mol: keep mol that has at least conf_per_mol confs, and sampling the most probable conf_per_mol confs
train_size ratio, val = test = (1-train_size) / 2
tot_mol_size: max num of mols. The total number of final confs should be tot_mol_size * conf_per_mol
seed: rand seed for RNG
"""
# set random seed
if seed is None:
seed = 2021
np.random.seed(seed)
random.seed(seed)
# read summary file
assert dataset_name in ['qm9', 'drugs']
summary_path = os.path.join(base_path, 'summary_%s.json' % dataset_name)
with open(summary_path, 'r') as f:
summ = json.load(f)
# filter valid pickle path
smiles_list = []
pickle_path_list = []
num_mols = 0
num_confs = 0
for smiles, meta_mol in tqdm(summ.items()):
u_conf = meta_mol.get('uniqueconfs')
if u_conf is None:
continue
pickle_path = meta_mol.get('pickle_path')
if pickle_path is None:
continue
if u_conf < conf_per_mol:
continue
num_mols += 1
num_confs += conf_per_mol
smiles_list.append(smiles)
pickle_path_list.append(pickle_path)
# we need do a shuffle and sample first max_size items here.
#if num_mols >= max_size:
# break
random.shuffle(pickle_path_list)
assert len(pickle_path_list) >= tot_mol_size, 'the length of all available mols is %d, which is smaller than tot mol size %d' % (len(pickle_path_list), tot_mol_size)
pickle_path_list = pickle_path_list[:tot_mol_size]
print('pre-filter: find %d molecules with %d confs, use %d molecules with %d confs' % (num_mols, num_confs, tot_mol_size, tot_mol_size*conf_per_mol))
# 1. select maximal 'max_conf' confs of each qm9 molecule
# 2. split the dataset based on 2d-structure, i.e., test on unseen graphs
train_data, val_data, test_data = [], [], []
val_size = test_size = (1. - train_size) / 2
# generate train, val, test split indexes
split_indexes = list(range(tot_mol_size))
random.shuffle(split_indexes)
index2split = {}
#print(int(len(split_indexes) * train_size), int(len(split_indexes) * (train_size + val_size)), len(split_indexes))
for i in range(0, int(len(split_indexes) * train_size)):
index2split[split_indexes[i]] = 'train'
for i in range(int(len(split_indexes) * train_size), int(len(split_indexes) * (train_size + val_size))):
index2split[split_indexes[i]] = 'val'
for i in range(int(len(split_indexes) * (train_size + val_size)), len(split_indexes)):
index2split[split_indexes[i]] = 'test'
num_mols = np.zeros(4, dtype=int) # (tot, train, val, test)
num_confs = np.zeros(4, dtype=int) # (tot, train, val, test)
bad_case = 0
for i in tqdm(range(len(pickle_path_list))):
with open(os.path.join(base_path, pickle_path_list[i]), 'rb') as fin:
mol = pickle.load(fin)
if mol.get('uniqueconfs') > len(mol.get('conformers')):
bad_case += 1
continue
if mol.get('uniqueconfs') <= 0:
bad_case += 1
continue
datas = []
smiles = mol.get('smiles')
if mol.get('uniqueconfs') == conf_per_mol:
# use all confs
conf_ids = np.arange(mol.get('uniqueconfs'))
else:
# filter the most probable 'max_conf' confs
all_weights = np.array([_.get('boltzmannweight', -1.) for _ in mol.get('conformers')])
descend_conf_id = (-all_weights).argsort()
conf_ids = descend_conf_id[:conf_per_mol]
for conf_id in conf_ids:
conf_meta = mol.get('conformers')[conf_id]
data = rdmol_to_data(conf_meta.get('rd_mol'), smiles=smiles)
labels = {
'totalenergy': conf_meta['totalenergy'],
'boltzmannweight': conf_meta['boltzmannweight'],
}
for k, v in labels.items():
data[k] = torch.tensor([v], dtype=torch.float32)
data['idx'] = torch.tensor([i], dtype=torch.long)
datas.append(data)
assert len(datas) == conf_per_mol
# split
'''
eps = np.random.rand()
if eps <= train_size:
train_data.extend(datas)
num_mols += [1, 1, 0, 0]
num_confs += [len(datas), len(datas), 0, 0]
elif eps <= train_size + val_size:
val_data.extend(datas)
num_mols += [1, 0, 1, 0]
num_confs += [len(datas), 0, len(datas), 0]
else:
test_data.extend(datas)
num_mols += [1, 0, 0, 1]
num_confs += [len(datas), 0, 0, len(datas)]
'''
if index2split[i] == 'train':
train_data.extend(datas)
num_mols += [1, 1, 0, 0]
num_confs += [len(datas), len(datas), 0, 0]
elif index2split[i] == 'val':
val_data.extend(datas)
num_mols += [1, 0, 1, 0]
num_confs += [len(datas), 0, len(datas), 0]
elif index2split[i] == 'test':
test_data.extend(datas)
num_mols += [1, 0, 0, 1]
num_confs += [len(datas), 0, 0, len(datas)]
else:
raise ValueError('unknown index2split value.')
print('post-filter: find %d molecules with %d confs' % (num_mols[0], num_confs[0]))
print('train size: %d molecules with %d confs' % (num_mols[1], num_confs[1]))
print('val size: %d molecules with %d confs' % (num_mols[2], num_confs[2]))
print('test size: %d molecules with %d confs' % (num_mols[3], num_confs[3]))
print('bad case: %d' % bad_case)
print('done!')
return train_data, val_data, test_data, index2split
def get_test_set_with_large_num_conf(base_path, dataset_name, block, tot_mol_size=1000, seed=None, confmin=50, confmax=500):
"""
base_path: directory that contains GEOM dataset
dataset_name: dataset name, should be in [qm9, drugs]
conf_per_mol: keep mol that has at least conf_per_mol confs, and sampling the most probable conf_per_mol confs
train_size ratio, val = test = (1-train_size) / 2
tot_mol_size: max num of mols. The total number of final confs should be tot_mol_size * conf_per_mol
seed: rand seed for RNG
"""
#block smiles in train / val
block_smiles = defaultdict(int)
for i in range(len(block)):
block_smiles[block[i].smiles] = 1
# set random seed
if seed is None:
seed = 2021
np.random.seed(seed)
random.seed(seed)
# read summary file
assert dataset_name in ['qm9', 'drugs']
summary_path = os.path.join(base_path, 'summary_%s.json' % dataset_name)
with open(summary_path, 'r') as f:
summ = json.load(f)
# filter valid pickle path
smiles_list = []
pickle_path_list = []
num_mols = 0
num_confs = 0
for smiles, meta_mol in tqdm(summ.items()):
u_conf = meta_mol.get('uniqueconfs')
if u_conf is None:
continue
pickle_path = meta_mol.get('pickle_path')
if pickle_path is None:
continue
if u_conf < confmin or u_conf > confmax:
continue
if block_smiles[smiles] == 1:
continue
num_mols += 1
num_confs += u_conf
smiles_list.append(smiles)
pickle_path_list.append(pickle_path)
# we need do a shuffle and sample first max_size items here.
#if num_mols >= tot_mol_size:
# break
random.shuffle(pickle_path_list)
assert len(pickle_path_list) >= tot_mol_size, 'the length of all available mols is %d, which is smaller than tot mol size %d' % (len(pickle_path_list), tot_mol_size)
pickle_path_list = pickle_path_list[:tot_mol_size]
print('pre-filter: find %d molecules with %d confs' % (num_mols, num_confs))
bad_case = 0
all_test_data = []
num_valid_mol = 0
num_valid_conf = 0
for i in tqdm(range(len(pickle_path_list))):
with open(os.path.join(base_path, pickle_path_list[i]), 'rb') as fin:
mol = pickle.load(fin)
if mol.get('uniqueconfs') > len(mol.get('conformers')):
bad_case += 1
continue
if mol.get('uniqueconfs') <= 0:
bad_case += 1
continue
datas = []
smiles = mol.get('smiles')
conf_ids = np.arange(mol.get('uniqueconfs'))
for conf_id in conf_ids:
conf_meta = mol.get('conformers')[conf_id]
data = rdmol_to_data(conf_meta.get('rd_mol'), smiles=smiles)
labels = {
'totalenergy': conf_meta['totalenergy'],
'boltzmannweight': conf_meta['boltzmannweight'],
}
for k, v in labels.items():
data[k] = torch.tensor([v], dtype=torch.float32)
data['idx'] = torch.tensor([i], dtype=torch.long)
datas.append(data)
all_test_data.extend(datas)
num_valid_mol += 1
num_valid_conf += len(datas)
print('poster-filter: find %d molecules with %d confs' % (num_valid_mol, num_valid_conf))
return all_test_data
class ConformationDataset(Dataset):
def __init__(self, path, transform=None):
super().__init__()
with open(path, 'rb') as f:
self.data = pickle.load(f)
self.transform = transform
self.atom_types = self._atom_types()
self.edge_types = self._edge_types()
def __getitem__(self, idx):
data = self.data[idx].clone()
if self.transform is not None:
data = self.transform(data)
return data
def __len__(self):
return len(self.data)
def _atom_types(self):
"""All atom types."""
atom_types = set()
for graph in self.data:
atom_types.update(graph.atom_type.tolist())
return sorted(atom_types)
def _edge_types(self):
"""All edge types."""
edge_types = set()
for graph in self.data:
edge_types.update(graph.edge_type.tolist())
return sorted(edge_types)
class SidechainConformationDataset(ConformationDataset):
def __init__(self, path, transform=None, cutoff=10., max_residue=5000, fix_subgraph=False):
super().__init__(path, transform)
self.cutoff = cutoff
self.max_residue = max_residue
self.fix_subgraph = fix_subgraph
def __getitem__(self, idx):
data = self.data[idx].clone()
""" Subgraph sampling
1. sampling an atom from the backbone (residue)
2. Find all neighboring atoms within a cutoff
3. extend atoms to ensure the completeness of each residue
4. remap the index for subgraph
"""
is_sidechain = data.is_sidechain
pos = data.pos
edge_index = data.edge_index
atom2res = data.atom2res
dummy_index = torch.arange(pos.size(0))
backbone_index = dummy_index[~is_sidechain]
#stop=False
#while not stop:
# step 1
if self.fix_subgraph:
center_atom_index = backbone_index[backbone_index.size(0) // 2].view(1,)
else:
center_atom_index = backbone_index[torch.randint(low=0, high=backbone_index.size(0), size=(1, ))] # (1, )
pos_center_atom = pos[center_atom_index] # (1, 3)
# step 2
distance = (pos_center_atom - pos).norm(dim=-1)
mask = (distance <= self.cutoff)
# step 3
is_keep_residue = scatter(mask, atom2res, dim=-1, dim_size=self.max_residue, reduce='sum') # (max_residue, )
is_keep_atom = is_keep_residue[atom2res]
is_keep_edge = (is_keep_atom[edge_index[0]]) & (is_keep_atom[edge_index[1]])
# step 4
mapping = -torch.ones(pos.size(0), dtype=torch.long)
keep_index = dummy_index[is_keep_atom]
mapping[keep_index] = torch.arange(keep_index.size(0))
if (data.is_sidechain[is_keep_atom]).sum().item() == 0:
#stop = True
return None
# return subgraph data
subgraph_data = Data(atom_type=data.atom_type[is_keep_atom],
pos=data.pos[is_keep_atom],
edge_index=mapping[data.edge_index[:, is_keep_edge]],
edge_type=data.edge_type[is_keep_edge],
is_sidechain=data.is_sidechain[is_keep_atom],
atom2res=data.atom2res[is_keep_atom])
if self.transform is not None:
subgraph_data = self.transform(subgraph_data)
return subgraph_data
@staticmethod
def collate_fn(data):
batch = [_ for _ in data if _ is not None]
return Batch.from_data_list(batch)
def accumulate_grad_from_subgraph(model, atom_type, pos, bond_index, bond_type, batch, atom2res, batch_size=8, device='cuda:0',
is_sidechain=None, is_alpha=None, pos_gt=None, cutoff=10., max_residue=5000, transform=None):
"""
1. decompose the protein to subgraphs
2. evaluate subgraphs using trained models
3. accumulate atom-wise grads
4. return grads
"""
accumulated_grad = torch.zeros_like(pos)
accumulated_time = torch.zeros(pos.size(0), device=pos.deivce)
all_subgraphs = []
dummy_index = torch.arange(pos.size(0))
# prepare subgraphs
is_covered = torch.zeros(pos.size(0), device=pos.deivce).bool()
is_alpha_and_uncovered = is_alpha & (~is_covered)
while is_alpha_and_uncovered.sum().item() != 0:
alpha_index = dummy_index[is_alpha_and_uncovered]
center_atom_index = alpha_index[torch.randint(low=0, high=alpha_index.size(0), size=(1, ))] # (1, )
pos_center_atom = pos[center_atom_index] # (1, 3)
distance = (pos_center_atom - pos).norm(dim=-1)
mask = (distance <= cutoff)
is_keep_residue = scatter(mask, atom2res, dim=-1, dim_size=max_residue, reduce='sum') # (max_residue, )
is_keep_atom = is_keep_residue[atom2res]
is_keep_edge = (is_keep_atom[bond_index[0]]) & (is_keep_atom[bond_index[1]])
mapping = -torch.ones(pos.size(0), dtype=torch.long)
keep_index = dummy_index[is_keep_atom]
mapping[keep_index] = torch.arange(keep_index.size(0))
is_covered |= is_keep_atom
is_alpha_and_uncovered = is_alpha & (~is_covered)
if (is_sidechain[is_keep_atom]).sum().item() == 0:
continue
subgraph = Data(atom_type=atom_type[is_keep_atom],
pos=pos[is_keep_atom],
edge_index=mapping[bond_index[:, is_keep_edge]],
edge_type=bond_type[is_keep_edge],
is_sidechain=is_sidechain[is_keep_atom],
atom2res=atom2res[is_keep_atom],
mapping=keep_index)
if transform is not None:
subgraph = transform(subgraph)
all_subgraphs.append(subgraph)
# run model
tot_iters = (len(all_subgraphs) + batch_size - 1) // batch_size
for it in range(tot_iters):
batch = Batch.from_data_list(all_subgraphs[it * batch_size, (it + 1) * batch_size]).to(device)
class PackedConformationDataset(ConformationDataset):
def __init__(self, path, transform=None):
super().__init__(path, transform)
#k:v = idx: data_obj
self._pack_data_by_mol()
def _pack_data_by_mol(self):
"""
pack confs with same mol into a single data object
"""
self._packed_data = defaultdict(list)
if hasattr(self.data, 'idx'):
for i in range(len(self.data)):
self._packed_data[self.data[i].idx.item()].append(self.data[i])
else:
for i in range(len(self.data)):
self._packed_data[self.data[i].smiles].append(self.data[i])
print('[Packed] %d Molecules, %d Conformations.' % (len(self._packed_data), len(self.data)))
new_data = []
# logic
# save graph structure for each mol once, but store all confs
cnt = 0
for k, v in self._packed_data.items():
data = copy.deepcopy(v[0])
all_pos = []
for i in range(len(v)):
all_pos.append(v[i].pos)
data.pos_ref = torch.cat(all_pos, 0) # (num_conf*num_node, 3)
data.num_pos_ref = torch.tensor([len(all_pos)], dtype=torch.long)
#del data.pos
if hasattr(data, 'totalenergy'):
del data.totalenergy
if hasattr(data, 'boltzmannweight'):
del data.boltzmannweight
new_data.append(data)
self.new_data = new_data
def __getitem__(self, idx):
data = self.new_data[idx].clone()
if self.transform is not None:
data = self.transform(data)
return data
def __len__(self):
return len(self.new_data)
|
the-stack_0_13358 |
import logging
log = logging.getLogger(__name__)
import numpy
import westpa
from oldtools.aframe import AnalysisMixin, ArgumentError
class IterRangeMixin(AnalysisMixin):
'''A mixin for limiting the range of data considered for a given analysis. This should go after
DataManagerMixin'''
def __init__(self):
super(IterRangeMixin,self).__init__()
self.first_iter = None
self.last_iter = None
self.iter_step = 1
include_args = self.include_args.setdefault('IterRangeMixin',{})
include_args.setdefault('first_iter', True)
include_args.setdefault('last_iter', True)
include_args.setdefault('iter_step',True)
def add_args(self, parser, upcall = True):
if upcall:
try:
upfunc = super(IterRangeMixin,self).add_args
except AttributeError:
pass
else:
upfunc(parser)
group = parser.add_argument_group('analysis range')
if self.include_args['IterRangeMixin']['first_iter']:
group.add_argument('--start', '--begin', '--first', dest='first_iter', type=int, metavar='N_ITER', default=1,
help='''Begin analysis at iteration N_ITER (default: %(default)d).''')
if self.include_args['IterRangeMixin']['last_iter']:
group.add_argument('--stop', '--end', '--last', dest='last_iter', type=int, metavar='N_ITER',
help='''Conclude analysis with N_ITER, inclusive (default: last completed iteration).''')
if self.include_args['IterRangeMixin']['iter_step']:
group.add_argument('--step', dest='iter_step', type=int, metavar='STEP',
help='''Analyze/report in blocks of STEP iterations.''')
def process_args(self, args, upcall = True):
if self.include_args['IterRangeMixin']['first_iter']:
self.first_iter = args.first_iter or 1
if self.include_args['IterRangeMixin']['last_iter']:
self.last_iter = args.last_iter
if self.include_args['IterRangeMixin']['iter_step']:
self.iter_step = args.iter_step or 1
if upcall:
try:
upfunc = super(IterRangeMixin,self).process_args
except AttributeError:
pass
else:
upfunc(args)
def check_iter_range(self):
assert hasattr(self, 'data_manager') and self.data_manager is not None
self.first_iter = int(max(self.first_iter, 1))
if self.last_iter is None or self.last_iter > self.data_manager.current_iteration - 1:
self.last_iter = int(self.data_manager.current_iteration - 1)
if self.first_iter == self.last_iter:
raise ArgumentError('first and last iterations are the same')
westpa.rc.pstatus('Processing iterations from {self.first_iter:d} to {self.last_iter:d}, inclusive (step size {self.iter_step:d})'.format(self=self))
def iter_block_iter(self):
'''Return an iterable of (block_first,block_last+1) over the blocks of iterations
selected by --first/--last/--step. NOTE WELL that the second of the pair follows Python
iterator conventions and returns one past the last element of the block.'''
for blkfirst in range(self.first_iter, self.last_iter+1, self.iter_step):
yield(blkfirst, min(self.last_iter, blkfirst+self.iter_step-1)+1)
def n_iter_blocks(self):
'''Return the number of blocks of iterations (as returned by ``iter_block_iter``) selected by --first/--last/--step.'''
npoints = self.last_iter - self.first_iter + 1
if npoints % self.iter_step == 0:
return npoints // self.iter_step
else:
return npoints // self.iter_step + 1
def record_data_iter_range(self, h5object, first_iter = None, last_iter = None):
'''Store attributes ``first_iter`` and ``last_iter`` on the given HDF5 object (group/dataset)'''
first_iter = first_iter or self.first_iter
last_iter = last_iter or self.last_iter
h5object.attrs['first_iter'] = first_iter
h5object.attrs['last_iter'] = last_iter
def record_data_iter_step(self, h5object, iter_step = None):
'''Store attribute ``iter_step`` on the given HDF5 object (group/dataset).'''
iter_step = iter_step or self.iter_step
h5object.attrs['iter_step'] = iter_step
def check_data_iter_range_least(self, h5object, first_iter = None, last_iter = None):
'''Check that the given HDF5 object contains (as denoted by its ``first_iter``/``last_iter`` attributes) at least the
data range specified.'''
first_iter = first_iter or self.first_iter
last_iter = last_iter or self.last_iter
obj_first_iter = h5object.attrs.get('first_iter')
obj_last_iter = h5object.attrs.get('last_iter')
return (obj_first_iter <= first_iter and obj_last_iter >= last_iter)
def check_data_iter_range_equal(self, h5object, first_iter = None, last_iter = None):
'''Check that the given HDF5 object contains per-iteration data for exactly the specified iterations (as denoted by the
object's ``first_iter`` and ``last_iter`` attributes'''
first_iter = first_iter or self.first_iter
last_iter = last_iter or self.last_iter
obj_first_iter = h5object.attrs.get('first_iter')
obj_last_iter = h5object.attrs.get('last_iter')
return (obj_first_iter == first_iter and obj_last_iter == last_iter)
def check_data_iter_step_conformant(self, h5object, iter_step = None):
'''Check that the given HDF5 object contains per-iteration data at an iteration stride suitable for extracting data
with the given stride. (In other words, is the given ``iter_step`` a multiple of the stride with
which data was recorded.)'''
iter_step = iter_step or self.iter_step
obj_iter_step = h5object.attrs.get('iter_step')
return (obj_iter_step % iter_step == 0)
def check_data_iter_step_equal(self, h5object, iter_step = None):
'''Check that the given HDF5 object contains per-iteration data at an iteration stride the same as
that specified.'''
iter_step = iter_step or self.iter_step
obj_iter_step = h5object.attrs.get('iter_step')
return (obj_iter_step == iter_step)
def slice_per_iter_data(self, dataset, first_iter = None, last_iter = None, iter_step = None, axis=0):
'''Return the subset of the given dataset corresponding to the given iteration range and stride. Unless
otherwise specified, the first dimension of the dataset is the one sliced.'''
first_iter = first_iter or self.first_iter
last_iter = last_iter or self.last_iter
iter_step = iter_step or self.iter_step
ds_first_iter = dataset.attrs['first_iter']
ds_last_iter = dataset.attrs['last_iter']
ds_iter_step = dataset.attrs.get('iter_step', 1)
if first_iter < ds_first_iter or last_iter > ds_last_iter or ds_iter_step % iter_step > 0:
raise IndexError(('Cannot slice requested iterations [{:d},{:d}] (stride={:d}) from dataset {!r}'
+'with range [{:d},{:d}] (stride={:d}).'.format(first_iter,last_iter,iter_step,
ds_first_iter,ds_last_iter,ds_iter_step)))
dimslices = []
for idim in range(len(dataset.shape)):
if idim == axis:
dimslices.append(slice(first_iter - ds_first_iter, last_iter - ds_first_iter + iter_step, iter_step))
else:
dimslices.append(slice(None,None,None))
dimslices = tuple(dimslices)
log.debug('slicing {!r} with {!r}'.format(dataset, dimslices))
data = dataset[dimslices]
log.debug('resulting data is of shape {!r}'.format(data.shape))
return data
def iter_range(self, first_iter = None, last_iter = None, iter_step = None):
first_iter = first_iter or self.first_iter
last_iter = last_iter or self.last_iter
iter_step = iter_step or self.iter_step
return numpy.arange(first_iter, last_iter + 1, iter_step)
|
the-stack_0_13361 | # this is the script that i used to create output videos and gifs
# simply put all the animations, one per each folder
import os
import subprocess
import logging
first_frame_duration = 1
last_frame_duration = 5
fps = 60
source = "frames"
videos_dir = "videos"
h264_videos_dir = "h264"
gifs_dir = "gifs"
completed = 0
logging.basicConfig(level=logging.INFO, filename="generate-videos.log", filemode="w+", format='%(asctime)s %(levelname)s %(message)s')
logging.info("Creating folders")
if not os.path.exists(videos_dir):
os.makedirs(videos_dir)
if not os.path.exists(h264_videos_dir):
os.makedirs(h264_videos_dir)
if not os.path.exists(gifs_dir):
os.makedirs(gifs_dir)
logging.info("Listing file")
dirs = os.listdir(source)
for dir in dirs:
logging.info(f"Started conversion for folder {dir}")
# LIST OF FILES
files = os.listdir(f"{source}/{dir}")
# create video
options = f"ffmpeg -y -r {fps} -i {source}/{dir}/%07d.png -loop 0 {videos_dir}/{dir}.mp4"
subprocess.run(options.split(" "))
logging.info("mp4 video created")
# create h264 video
options = f"ffmpeg -y -r {fps} -i {source}/{dir}/%07d.png -c:a aac -b:a 256k -ar 44100 -c:v libx264 -pix_fmt yuv420p -r {fps} {h264_videos_dir}/{dir}_h264.mp4"
subprocess.run(options.split(" "))
logging.info("h264 video created")
# create gif
options = f"ffmpeg -y -i {videos_dir}/{dir}.mp4 -loop 0 -filter_complex fps=25,scale=500:-1,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse {gifs_dir}/{dir}.gif"
subprocess.run(options.split(" "))
logging.info("gif video created")
logging.info(f"Completed folder {dir}! Folder {completed + 1}/{len(dirs)}")
completed += 1
logging.info("Removing temp folder")
logging.info("Everything completed")
|
the-stack_0_13362 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.errors",
marshal="google.ads.googleads.v10",
manifest={"RequestErrorEnum",},
)
class RequestErrorEnum(proto.Message):
r"""Container for enum describing possible request errors.
"""
class RequestError(proto.Enum):
r"""Enum describing possible request errors."""
UNSPECIFIED = 0
UNKNOWN = 1
RESOURCE_NAME_MISSING = 3
RESOURCE_NAME_MALFORMED = 4
BAD_RESOURCE_ID = 17
INVALID_CUSTOMER_ID = 16
OPERATION_REQUIRED = 5
RESOURCE_NOT_FOUND = 6
INVALID_PAGE_TOKEN = 7
EXPIRED_PAGE_TOKEN = 8
INVALID_PAGE_SIZE = 22
REQUIRED_FIELD_MISSING = 9
IMMUTABLE_FIELD = 11
TOO_MANY_MUTATE_OPERATIONS = 13
CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT = 14
CANNOT_MODIFY_FOREIGN_FIELD = 15
INVALID_ENUM_VALUE = 18
DEVELOPER_TOKEN_PARAMETER_MISSING = 19
LOGIN_CUSTOMER_ID_PARAMETER_MISSING = 20
VALIDATE_ONLY_REQUEST_HAS_PAGE_TOKEN = 21
CANNOT_RETURN_SUMMARY_ROW_FOR_REQUEST_WITHOUT_METRICS = 29
CANNOT_RETURN_SUMMARY_ROW_FOR_VALIDATE_ONLY_REQUESTS = 30
INCONSISTENT_RETURN_SUMMARY_ROW_VALUE = 31
TOTAL_RESULTS_COUNT_NOT_ORIGINALLY_REQUESTED = 32
RPC_DEADLINE_TOO_SHORT = 33
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_13363 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from detectron2.structures import ImageList
from .build import SSHEAD_REGISTRY
from .ss_layers import Flatten
class CycleEnergyHead(nn.Module):
def __init__(self, cfg, cin):
super(CycleEnergyHead, self).__init__()
self.name = 'cycle'
self.input = 'ROI'
self.device = torch.device(cfg.MODEL.DEVICE)
self.coef = cfg.MODEL.SS.COEF
self.enc1 = nn.Sequential(
nn.Conv2d(cin, 256, kernel_size=3, padding=0, bias=True),
# nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=0, bias=True),
# nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(1)
# nn.Flatten(start_dim=1, end_dim=-1)
)
self.map_back = nn.Linear(256, 256*49)
self.topk = 100
self.bs = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE
self.scale = cfg.MODEL.SS.LOSS_SCALE
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
m.bias.data.zero_()
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 0)
def cal_pair_dist(self, feat_u, feat_v):
# finding the similarity score of feat_v
us = feat_u.size(0)
vs = feat_v.size(0)
fs = feat_u.size(1)
assert fs == feat_v.size(1)
uu = feat_u.unsqueeze(1).repeat(1, vs, 1).view(-1, fs)
vv = feat_v.repeat(us, 1)
diff = uu - vv
dist = (diff * diff).sum(dim=1).view(us, vs) * self.coef
score = F.softmax(dist, dim=1)
return dist, score
def computer_corr_softmax(self, feat_u, feat_v):
# track forward
# calculate the L2 distance between feat_u and feat_v
sim_dist, sim_score = self.cal_pair_dist(feat_u, feat_v)
soft_v = torch.matmul(sim_score, feat_v)
# track backward
back_dist, back_score = self.cal_pair_dist(soft_v, feat_u)
labels = torch.arange(len(feat_u)).long().to(back_dist.device)
loss = nn.CrossEntropyLoss()(back_dist, labels)
if back_dist.size(1) == 0:# there is no objects in the first frame.
print(back_dist.size(), feat_u.size(), feat_v.size(), loss)
correct = (back_dist.argmax(dim=1) == labels).float().sum()
count = len(back_dist)
return loss, correct, count, soft_v
def forward(self, features, prev_boxes=None):
features, idxs, proposals = features
total_loss = 0.0
corrects = 0
counts = 0
pos_fea= None
neg_fea = None
prev = 0
# since the number of proposals might be different for different pairs
if prev_boxes is not None:
feat_u = self.enc1(features)
feat_v = self.enc1(prev_boxes)
feat_u = feat_u.view(feat_u.size(0), feat_u.size(1))
feat_v = feat_v.view(feat_v.size(0), feat_v.size(1))
if feat_u.size(0) == 0:
print(feat_u, feat_v)
return {'loss_cycle': feat_u.sum() * self.scale}, 0.
total_loss, correct, cnt, _ = self.computer_corr_softmax(feat_u, feat_v)
# print('correct: ', correct, 'cnt: ', cnt)
total_acc = correct.item()/cnt
else:
for i in range(0, len(idxs), 2):
u = features[prev:idxs[i]]
v = features[idxs[i]: idxs[i+1]]
prev = idxs[i+1]
feat_u = self.enc1(u)
feat_v = self.enc1(v)
feat_u = feat_u.view(feat_u.size(0), feat_u.size(1))
feat_v = feat_v.view(feat_v.size(0), feat_v.size(1))
if feat_u.size(0) == 0:
print(feat_u.size(), feat_v.size())
loss = feat_u.sum()
correct = 0
cnt = 0
else:
loss, correct, cnt, soft_target = self.computer_corr_softmax(feat_u, feat_v)
if pos_fea is None:
pos_fea = self.map_back(feat_u)
neg_fea = self.map_back(soft_target)
else:
pos_fea = torch.cat([pos_fea, self.map_back(feat_u)], 0)
neg_fea = torch.cat([neg_fea, self.map_back(soft_target)], 0)
total_loss += loss*cnt
corrects += correct
counts += cnt
# breakpoint()
if counts != 0:
total_loss /= counts
total_acc = corrects/counts
else:
total_acc = 0.
if pos_fea is not None:
assert len(pos_fea) == len(neg_fea)
# print('total loss: {:.4f}\ttotal acc: {:.3f}'.format(total_loss, total_acc))
return {'loss_cycle': total_loss * self.scale}, total_acc, torch.cat([pos_fea, neg_fea], 0)
else:
return {'loss_cycle': total_loss * self.scale}, total_acc, None
@SSHEAD_REGISTRY.register()
def build_cycle_energy_head(cfg, input_shape):
in_channels = cfg.MODEL.FPN.OUT_CHANNELS
rot_head = CycleEnergyHead(cfg, in_channels)
return rot_head
|
the-stack_0_13364 | import argparse
import speakeasy
class DbgView(speakeasy.Speakeasy):
"""
Print debug port prints to the console
"""
def __init__(self, debug=False):
super(DbgView, self).__init__(debug=debug)
def debug_print_hook(self, emu, api_name, func, params):
# Call the DbgPrint* function and print the formatted string to the console
rv = func(params)
formatted_str = params[0]
print(formatted_str)
return rv
def debug_printex_hook(self, emu, api_name, func, params):
# Call the DbgPrintEx function and print the formatted string to the console
rv = func(params)
formatted_str = params[2]
print(formatted_str)
return rv
def main(args):
dbg = DbgView()
module = dbg.load_module(args.file)
dbg.add_api_hook(dbg.debug_print_hook, "ntoskrnl", "DbgPrint")
dbg.add_api_hook(dbg.debug_printex_hook, "ntoskrnl", "DbgPrintEx")
# Emulate the module
dbg.run_module(module, all_entrypoints=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Print debug port prints to the console"
)
parser.add_argument(
"-f",
"--file",
action="store",
dest="file",
required=True,
help="Path of driver to emulate",
)
args = parser.parse_args()
main(args)
|
the-stack_0_13365 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : GUC参数
Case Name : enable_hashjoin参数使用gs_guc set设置并验证其预期结果
Description :
1.查询enable_hashjoin默认值
2.修改enable_hashjoin为off
3.重启使其生效
4.校验其预期结果
5.恢复默认值
Expect :
1.查询enable_hashjoin默认值成功
2.修改enable_hashjoin为off成功
3.重启集群成功
4.该参数值为off,达到预期效果
5.恢复默认值成功
History :
"""
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
LOG = Logger()
class GucQueryplan(unittest.TestCase):
def setUp(self):
LOG.info('----this is setup------')
LOG.info(
'--------Opengauss_Function_Guc_Queryplan_Case0007--------')
self.comsh = CommonSH('PrimaryDbUser')
self.constant = Constant()
self.pv = ''
def test_Guc_queryplan(self):
LOG.info(
'--------查看enable_hashjoin默认值-----')
msg = self.comsh.execut_db_sql('show enable_hashjoin;')
LOG.info(msg)
self.pv = msg.splitlines()[-2].strip()
LOG.info(
'------修改enable_hashjoin为off----')
msg = self.comsh.execute_gsguc('set', self.constant.GSGUC_SUCCESS_MSG,
'enable_hashjoin=off')
LOG.info(msg)
LOG.info('-------重启数据库------')
self.comsh.restart_db_cluster()
status = self.comsh.get_db_cluster_status()
self.assertTrue("Normal" in status or 'Degraded' in status)
LOG.info(
'-------校验其预期结果-------')
msg = self.comsh.execut_db_sql('show enable_hashjoin;')
LOG.info(msg)
res = msg.splitlines()[-2].strip()
self.assertIn(self.constant.BOOLEAN_VALUES[1], res)
def tearDown(self):
LOG.info(
'----this is tearDown-------')
LOG.info(
'-------恢复默认值------')
msg = self.comsh.execute_gsguc('set', self.constant.GSGUC_SUCCESS_MSG,
f'enable_hashjoin={self.pv}')
LOG.info(msg)
stopmsg = self.comsh.stop_db_cluster()
LOG.info(stopmsg)
startmsg = self.comsh.start_db_cluster()
LOG.info(startmsg)
LOG.info(
'------Opengauss_Function_Guc_Queryplan_Case0007执行完成------')
|
the-stack_0_13366 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
from fairseq import utils
import torch
from . import FairseqCriterion, register_criterion
@register_criterion('cokd_loss')
class COKDCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.kd_alpha = args.kd_alpha
self.eps = args.label_smoothing
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
parser.add_argument('--kd-alpha', default=0.5, type=float)
parser.add_argument('--num-teachers', default=1, type=int)
# fmt: on
def forward(self, model, sample, reduce=True, teachers = None):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
if teachers is None:
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
else:
net_output_teachers = [teacher(**sample['net_input']) for teacher in teachers]
loss, nll_loss = self.compute_kd_loss(model, net_output, net_output_teachers, sample, reduce=reduce)
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'nll_loss': utils.item(nll_loss.data) if reduce else nll_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)#fairseq/models/fairseq_model.py:sample['target']
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = self.eps / lprobs.size(-1)
loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
def compute_kd_loss(self, model, net_output, net_output_teachers, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
teacher_probs = [model.get_normalized_probs(net_output_teacher, log_probs=False) for net_output_teacher in net_output_teachers]
teacher_prob = torch.mean(torch.stack(teacher_probs, dim = 0), dim = 0)
teacher_prob = teacher_prob.view(-1, teacher_prob.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
non_pad_mask = target.ne(self.padding_idx)
kd_loss = (-lprobs * teacher_prob).sum(dim = -1, keepdim=True)[non_pad_mask]
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
if reduce:
nll_loss = nll_loss.sum()
kd_loss = kd_loss.sum()
loss = nll_loss * (1 - self.kd_alpha) + kd_loss * self.kd_alpha
return loss, nll_loss
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
return {
'loss': sum(log.get('loss', 0) for log in logging_outputs) / sample_size / math.log(2),
'nll_loss': sum(log.get('nll_loss', 0) for log in logging_outputs) / ntokens / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
|
the-stack_0_13368 | import math
import random
from collections import namedtuple, deque
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from rllite.common import ReplayBuffer2
USE_CUDA = torch.cuda.is_available()
class StochasticMDP:
def __init__(self):
self.end = False
self.current_state = 2
self.num_actions = 2
self.num_states = 6
self.p_right = 0.5
def reset(self):
self.end = False
self.current_state = 2
state = np.zeros(self.num_states)
state[self.current_state - 1] = 1.
return state
def step(self, action):
if self.current_state != 1:
if action == 1:
if random.random() < self.p_right and self.current_state < self.num_states:
self.current_state += 1
else:
self.current_state -= 1
if action == 0:
self.current_state -= 1
if self.current_state == self.num_states:
self.end = True
state = np.zeros(self.num_states)
state[self.current_state - 1] = 1.
if self.current_state == 1:
if self.end:
return state, 1.00, True, {}
else:
return state, 1.00 / 100.00, True, {}
else:
return state, 0.0, False, {}
class Net(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Net, self).__init__()
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.layers = nn.Sequential(
nn.Linear(num_inputs, 256),
nn.ReLU(),
nn.Linear(256, num_outputs)
)
def forward(self, x):
return self.layers(x)
def act(self, state, epsilon):
if random.random() > epsilon:
state = torch.FloatTensor(state).unsqueeze(0)
action = self.forward(state).max(1)[1]
return action.data[0]
else:
return random.randrange(self.num_outputs)
class HierarchicalDQN(object):
def __init__(self):
self.env = StochasticMDP()
self.num_goals = self.env.num_states
self.num_actions = self.env.num_actions
self.model = Net(2*self.num_goals, self.num_actions)
self.target_model = Net(2*self.num_goals, self.num_actions)
self.meta_model = Net(self.num_goals, self.num_goals)
self.target_meta_model = Net(self.num_goals, self.num_goals)
if USE_CUDA:
self.model = self.model.cuda()
self.target_model = self.target_model.cuda()
self.meta_model = self.meta_model.cuda()
self.target_meta_model = self.target_meta_model.cuda()
self.optimizer = optim.Adam(self.model.parameters())
self.meta_optimizer = optim.Adam(self.meta_model.parameters())
self.replay_buffer = ReplayBuffer2(10000)
self.meta_replay_buffer = ReplayBuffer2(10000)
def to_onehot(self, x):
oh = np.zeros(6)
oh[x - 1] = 1.
return oh
def update(self, model, optimizer, replay_buffer, batch_size):
if batch_size > len(replay_buffer):
return
state, action, reward, next_state, done = replay_buffer.sample(batch_size)
state = torch.FloatTensor(state)
next_state = torch.FloatTensor(next_state)
action = torch.LongTensor(action)
reward = torch.FloatTensor(reward)
done = torch.FloatTensor(done)
q_value = model(state)
q_value = q_value.gather(1, action.unsqueeze(1)).squeeze(1)
next_q_value = model(next_state).max(1)[0]
expected_q_value = reward + 0.99 * next_q_value * (1 - done)
loss = (q_value - expected_q_value).pow(2).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
def learn(self, num_frames=100000, epsilon_start=1.0, epsilon_final=0.01, epsilon_decay=500):
frame_idx = 1
state = self.env.reset()
done = False
all_rewards = []
episode_reward = 0
while frame_idx < num_frames:
goal = self.meta_model.act(state, epsilon_final + (epsilon_start - epsilon_final) * math.exp(-1. * frame_idx / epsilon_decay))
onehot_goal = self.to_onehot(goal)
meta_state = state
extrinsic_reward = 0
while not done and goal != np.argmax(state):
goal_state = np.concatenate([state, onehot_goal])
action = self.model.act(goal_state, epsilon_final + (epsilon_start - epsilon_final) * math.exp(-1. * frame_idx / epsilon_decay))
next_state, reward, done, _ = self.env.step(action)
episode_reward += reward
extrinsic_reward += reward
intrinsic_reward = 1.0 if goal == np.argmax(next_state) else 0.0
self.replay_buffer.push(goal_state, action, intrinsic_reward, np.concatenate([next_state, onehot_goal]),
done)
state = next_state
self.update(self.model, self.optimizer, self.replay_buffer, 32)
self.update(self.meta_model, self.meta_optimizer, self.meta_replay_buffer, 32)
frame_idx += 1
if frame_idx % 1000 == 0:
n = 100 # mean reward of last 100 episodes
plt.figure(figsize=(20, 5))
plt.title(frame_idx)
plt.plot([np.mean(all_rewards[i:i + n]) for i in range(0, len(all_rewards), n)])
plt.show()
self.meta_replay_buffer.push(meta_state, goal, extrinsic_reward, state, done)
if done:
state = self.env.reset()
done = False
all_rewards.append(episode_reward)
episode_reward = 0
print(frame_idx)
if __name__ == '__main__':
model = HierarchicalDQN()
model.learn()
|
the-stack_0_13369 | import numpy
try:
import cupy
xpy_default=cupy
junk_to_check_installed = cupy.array(5) # this will fail if GPU not installed correctly
except:
xpy_default=numpy
def TimeDelayFromEarthCenter(
detector_earthfixed_xyz_metres,
source_right_ascension_radians,
source_declination_radians,
greenwich_mean_sidereal_time,
xpy=xpy_default, dtype=numpy.float64,
):
"""
Parameters
----------
detector_earthfixed_xyz_metres : array_like, shape = det_shape + (3,)
Location of detector(s) relative to Earth's center in meters. May provide
multiple detectors, last axis must be (x,y,z) but other axes can take
whatever form is desired.
source_right_ascension_radians : array_like, shape = sample_shape
Right ascension of source in radians, can be an arbitrary dimensional
array.
source_declination_radians : array_like, shape = sample_shape
Declination of source in radians, can be an arbitrary dimensional array.
greenwich_mean_sidereal_time : float
Should be equivalent to XLALGreenwichMeanSiderealTime(gpstime).
Returns
-------
time_delay_from_earth_center : array_like, shape = det_shape + sample_shape
"""
negative_speed_of_light = xpy.asarray(-299792458.0)
det_shape = detector_earthfixed_xyz_metres.shape[:-1]
sample_shape = source_right_ascension_radians.shape
cos_dec = xpy.cos(source_declination_radians)
greenwich_hour_angle = (
greenwich_mean_sidereal_time - source_right_ascension_radians
)
ehat_src = xpy.empty(sample_shape + (3,), dtype=dtype)
ehat_src[...,0] = cos_dec * xpy.cos(greenwich_hour_angle)
ehat_src[...,1] = -cos_dec * xpy.sin(greenwich_hour_angle)
ehat_src[...,2] = xpy.sin(source_declination_radians)
neg_separation = xpy.inner(detector_earthfixed_xyz_metres, ehat_src)
return xpy.divide(
neg_separation, negative_speed_of_light,
out=neg_separation,
)
def ComputeDetAMResponse(
detector_response_matrix,
source_right_ascension_radians,
source_declination_radians,
source_polarization_radians,
greenwich_mean_sidereal_time,
xpy=xpy_default, dtype_real=numpy.float64, dtype_complex=numpy.complex128,
):
"""
Parameters
----------
detector_response_matrix : array_like, shape = det_shape + (3, 3)
Detector response matrix, or matrices for multiple detectors. Last two
axes must be 3-by-3 response matrix, and may include arbitrary axes before
that for various detectors.
source_right_ascension_radians : array_like, shape = sample_shape
Right ascension of source in radians, can be an arbitrary dimensional
array.
source_declination_radians : array_like, shape = sample_shape
Declination of source in radians, can be an arbitrary dimensional array.
source_polarization_radians : array_like, shape = sample_shape
Polarization angle of source in radians, can be an arbitrary dimensional
array.
greenwich_mean_sidereal_time : float
Should be equivalent to XLALGreenwichMeanSiderealTime(gpstime).
Returns
-------
F : array_like, shape = det_shape + sample_shape
"""
det_shape = detector_response_matrix.shape[:-1]
sample_shape = source_right_ascension_radians.shape
matrix_shape = 3, 3
# Initialize trig matrices.
X = xpy.empty(sample_shape+(3,), dtype=dtype_real)
Y = xpy.empty(sample_shape+(3,), dtype=dtype_real)
# Greenwich hour angle of source in radians.
source_greenwich_radians = (
greenwich_mean_sidereal_time - source_right_ascension_radians
)
# Pre-compute trig functions
cos_gha = xpy.cos(source_greenwich_radians)
sin_gha = xpy.sin(source_greenwich_radians)
cos_dec = xpy.cos(source_declination_radians)
sin_dec = xpy.sin(source_declination_radians)
cos_psi = xpy.cos(source_polarization_radians)
sin_psi = xpy.sin(source_polarization_radians)
# Populate trig matrices.
X[...,0] = -cos_psi*sin_gha - sin_psi*cos_gha*sin_dec
X[...,1] = -cos_psi*cos_gha + sin_psi*sin_gha*sin_dec
X[...,2] = sin_psi*cos_dec
Y[...,0] = sin_psi*sin_gha - cos_psi*cos_gha*sin_dec
Y[...,1] = sin_psi*cos_gha + cos_psi*sin_gha*sin_dec
Y[...,2] = cos_psi*cos_dec
# Compute F for each polarization state.
F_plus = (
X*xpy.inner(X, detector_response_matrix) -
Y*xpy.inner(Y, detector_response_matrix)
).sum(axis=-1)
F_cross = (
X*xpy.inner(Y, detector_response_matrix) +
Y*xpy.inner(X, detector_response_matrix)
).sum(axis=-1)
return F_plus + 1.0j*F_cross
|
the-stack_0_13373 | # -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('hs_core', '0020_baseresource_collections'),
]
operations = [
migrations.CreateModel(
name='FundingAgency',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('agency_name', models.TextField()),
('award_title', models.TextField(null=True, blank=True)),
('award_number', models.TextField(null=True, blank=True)),
('agency_url', models.URLField(null=True, blank=True)),
('content_type', models.ForeignKey(related_name='hs_core_fundingagency_related', to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
),
]
|
the-stack_0_13374 | import requests
import json
from lxml import html
from lxml import etree
from bs4 import BeautifulSoup
import time
import numpy
import getpass
import os
clear = lambda: os.system('cls')
import json
"""
Most of these functions work through REST APIs, but due to lack of documentation about some features,
this script uses also HTTP request scraping (for example in def get_last_mark())
this is the cause for having two functions for logging in, liucLogin() for loggin through http requests
while login() for logging through REST APIs
further investigation in APIs documentation should fix this and make the script work ONLY through REST API.
Function get_last_mark() works through http scraping, and it requires liucLogin()
Please note that the script is working only with students' account. """
#not all urls are from official REST API's endpoints
url_login = "https://sol.liuc.it/esse3/auth/Logon.do"
url_esiti = 'https://sol.liuc.it/esse3/auth/studente/Appelli/BachecaEsiti.do'
url_appelli = 'https://sol.liuc.it/e3rest/api/calesa-service-v1/appelli/'
url_login_end = "https://sol.liuc.it/e3rest/api/login/"
#average endpoint = url_average + matId (get this from login()) + "/medie"
url_average = 'http://sol.liuc.it/e3rest/api/libretto-service-v2/libretti/'
#example: ".../e3rest/api/libretto-service-v2/libretti/999/medie" return average of student with matId 999
url_libretto = 'http://sol.liuc.it/e3rest/api/libretto-service-v2/libretti/'
#start requests session
session = requests.session()
session.get(url_login)
#login through API
#return basic info about the student
def login(username1, pwd):
response = session.get(url_login_end, auth=(username1, pwd))
user_details_json = json.loads(response.text)
user_details = []
matId = (user_details_json["user"]["trattiCarriera"][0]["matId"])
stuId = (user_details_json["user"]["trattiCarriera"][0]["stuId"])
matricola = (user_details_json["user"]["trattiCarriera"][0]["matricola"])
name = (user_details_json["user"]["firstName"])
surname = (user_details_json["user"]["lastName"])
user_details.append(matId)
user_details.append(stuId)
user_details.append(matricola)
user_details.append(name)
user_details.append(surname)
return user_details
#return a matrix with available exams and their details
#this function works through JSON REST API
def getAppelli(username1, pwd):
appelli = session.get(url_appelli, auth=(username1, pwd))
appelli_json = json.loads(appelli.text)
appelli_detail = [[]]
advanced_details_exam = [[]]
#look for exam attributes, so i can search for exams description
#first endpoints = exam id
#second endopoints = input(exam_id)->output(exam_details)
for i in range(len(appelli_json)):
id_appello = appelli_json[i]["adDefAppId"]
id_corso = appelli_json[i]["cdsDefAppId"]
desc_appello = appelli_json[i]["adDes"]
appelli_detail.insert(i, [desc_appello, id_appello, id_corso])
#look for exam details, giving as input exam id
for i in range(len(appelli_detail) - 1):
detail_endpoints = url_appelli + str(appelli_detail[i][2]) + "/" + str(appelli_detail[i][1])
get_exam_info = session.get(detail_endpoints, auth=(username1, pwd))
exam_info_json = json.loads(get_exam_info.text)
""" print(exam_info_json)
print(detail_endpoints) """
for j in range(len(exam_info_json) - 1):
corso = exam_info_json[j]["adDes"]
data_appello = exam_info_json[j]["dataInizioApp"]
data_inizio = exam_info_json[j]["dataInizioIscr"]
data_fine = exam_info_json[j]["dataFineIscr"]
tipo_appello = exam_info_json[j]["desApp"]
advanced_details_exam.insert((j+i), [corso, data_appello, tipo_appello, data_inizio, data_fine])
return advanced_details_exam
#return average and most likely graduation grade
def get_media(username1, pwd):
matricola_id = login(username1, pwd)[0]
personal_url_average = url_average + str(matricola_id) + "/medie"
getAverage = session.get(personal_url_average, auth=(username1,pwd))
average_json = json.loads(getAverage.text)
average = average_json[1]["media"]
votolaurea = average_json[3]["media"]
return average, votolaurea
#return a matrix in which each line contains [exam name, exam grade]
#if an exam has not a grade, return [exam name, "---"]
def get_libretto(username1, pwd):
libretto = [[]]
matricola_id = login(username1, pwd)[0]
personal_url_libretto = url_libretto + str(matricola_id) + "/righe/"
response = session.get(personal_url_libretto, auth = (username1, pwd))
libretto_json = json.loads(response.text)
num_esami_da_dare = 0
for i in range(len(libretto_json)):
esame_libretto = libretto_json[i]["adDes"]
voto_libretto = libretto_json[i]["esito"]["voto"]
if voto_libretto == None:
voto_libretto = "---"
num_esami_da_dare = num_esami_da_dare + 1
libretto.insert(i, [esame_libretto, voto_libretto])
#adding info about how many exam are finished
num_esami_dati = len(libretto_json) - num_esami_da_dare
#insert the info in last line of the list
esami_dati_da_dare = [num_esami_dati, num_esami_da_dare]
return libretto, esami_dati_da_dare
#----------------------------------------------------------------------------------------------------------------
def liucLogin(username1, pwd):
response = session.get(url_login, auth=(username1, pwd))
#salvo la pagina di scelta carriera
tree = etree.HTML(response.text)
element = tree.xpath('//*[@id="gu_toolbar_sceltacarriera"]')
try:
content = etree.tostring(element[0])
url1 = content[108:113].decode('utf-8')
print("Accedo all'ultima carriera disponibile...")
url_carriera = "https://sol.liuc.it/esse3/auth/studente/SceltaCarrieraStudente.do?stu_id=" + url1
response = session.get(url_carriera, auth=(username1, pwd))
if (response.status_code) == 200:
print("Login riuscito. ")
else:
print("Login non riuscito. ")
except:
print("Login non riuscito ")
#check the last grades
def get_last_mark(username1, pwd):
response = session.get(url_esiti, auth=(username1,pwd))
html_esiti = BeautifulSoup(response.text, "html.parser")
#nome deve essere fixato, non trovo il css selector esatto, non funziona neanche con xpath
#controllare documentazione e3rest su esse3 per api json
prof_esame_esito = html_esiti.select('td.detail_table:nth-child(3)')
data_esame_esito = html_esiti.select('td.detail_table:nth-child(1)')
voto_esame_esito = html_esiti.select('td.detail_table:nth-child(5) > form:nth-child(1)')
print(len(prof_esame_esito))
esiti = []
quanti_esiti = len(prof_esame_esito)
for i in range(quanti_esiti):
prof_esame_esito1 = prof_esame_esito[i].get_text()
data_esame_esito1 = data_esame_esito[i].get_text()
voto_esame_esito1 = voto_esame_esito[i].get_text()
info_esito = prof_esame_esito1 + " - " + data_esame_esito1 + " - " + voto_esame_esito1
info_esito = info_esito.replace("\n", "")
esiti.append(info_esito)
return esiti |
the-stack_0_13375 | #!/usr/bin/env python3
"""This example demonstrates using the file token manager for refresh tokens.
In order to run this program, you will first need to obtain a valid refresh token. You
can use the `obtain_refresh_token.py` example to help.
In this example, refresh tokens will be saved into a file `refresh_token.txt` relative
to your current working directory. If your current working directory is under version
control it is strongly encouraged you add `refresh_token.txt` to the version control
ignore list.
Usage:
EXPORT praw_client_id=<REDDIT_CLIENT_ID>
EXPORT praw_client_secret=<REDDIT_CLIENT_SECRET>
python3 use_file_token_manager.py
"""
import asyncio
import os
import sys
import aiofiles
import asyncpraw
from asyncpraw.util.token_manager import FileTokenManager
REFRESH_TOKEN_FILENAME = "refresh_token.txt"
async def initialize_refresh_token_file():
if os.path.isfile(REFRESH_TOKEN_FILENAME):
return
refresh_token = input("Initial refresh token value: ")
async with aiofiles.open(REFRESH_TOKEN_FILENAME, "w") as fp:
await fp.write(refresh_token)
async def main():
if "praw_client_id" not in os.environ:
sys.stderr.write("Environment variable ``praw_client_id`` must be defined\n")
return 1
if "praw_client_secret" not in os.environ:
sys.stderr.write(
"Environment variable ``praw_client_secret`` must be defined\n"
)
return 1
await initialize_refresh_token_file()
refresh_token_manager = FileTokenManager(REFRESH_TOKEN_FILENAME)
async with asyncpraw.Reddit(
token_manager=refresh_token_manager,
user_agent="use_file_token_manager/v0 by u/bboe",
) as reddit:
scopes = await reddit.auth.scopes()
if scopes == {"*"}:
print(f"{await reddit.user.me()} is authenticated with all scopes")
elif "identity" in scopes:
print(
f"{await reddit.user.me()} is authenticated with the following scopes:"
f" {scopes}"
)
else:
print(f"You are authenticated with the following scopes: {scopes}")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
sys.exit(loop.run_until_complete(main()))
|
the-stack_0_13376 | import time
from rdfframes.knowledge_graph import KnowledgeGraph
from rdfframes.utils.constants import JoinType
from rdfframes.client.http_client import HttpClientDataFormat, HttpClient
def movies_with_american_actors_cache():
graph = KnowledgeGraph(graph_name='dbpedia')
dataset = graph.feature_domain_range('dbpp:starring', 'movie', 'actor')\
.expand('actor', [('dbpp:birthPlace', 'actor_country'), ('rdfs:label', 'actor_name')])\
.expand('movie', [('rdfs:label', 'movie_name'), ('dcterms:subject', 'subject'),
('dbpp:country', 'movie_country'), ('dbpp:genre', 'genre', True)])\
.cache()
# 26928 Rows. -- 4273 msec.
american_actors = dataset.filter({'actor_country': ['regex(str(?actor_country), "USA")']})
# 1606 Rows. -- 7659 msec.
prolific_actors = dataset.group_by(['actor'])\
.count('movie', 'movie_count', unique=True).filter({'movie_count': ['>= 200']})
#663,769 Rows. -- 76704 msec.
movies = american_actors.join(prolific_actors, join_col_name1='actor', join_type=JoinType.OuterJoin)\
.join(dataset, join_col_name1='actor')
#.select_cols(['movie_name', 'actor_name', 'genre'])
sparql_query = movies.to_sparql()
print(sparql_query)
def movies_with_american_actors():
graph = KnowledgeGraph(graph_name='dbpedia')
dataset1 = graph.feature_domain_range('dbpp:starring', 'movie1', 'actor')\
.expand('actor', [('dbpp:birthPlace', 'actor_country1'), ('rdfs:label', 'actor_name1')])\
.expand('movie1', [('rdfs:label', 'movie_name1'), ('dcterms:subject', 'subject1'),
('dbpp:country', 'movie_country1'), ('dbpp:genre', 'genre1', True)])
# 26928 Rows. -- 4273 msec.
american_actors = dataset1.filter({'actor_country1': ['regex(str(?actor_country1), "USA")']})
# 1606 Rows. -- 7659 msec.
dataset2 = graph.feature_domain_range('dbpp:starring', 'movie2', 'actor')\
.expand('actor', [('dbpp:birthPlace', 'actor_country2'), ('rdfs:label', 'actor_name2')])\
.expand('movie2', [('rdfs:label', 'movie_name2'), ('dcterms:subject', 'subject2'),
('dbpp:country', 'movie_country2'), ('dbpp:genre', 'genre2', True)])
prolific_actors = dataset2.group_by(['actor'])\
.count('movie2', 'movie_count2', unique=True).filter({'movie_count2': ['>= 200']})
#663,769 Rows. -- 76704 msec.
movies = american_actors.join(prolific_actors, join_col_name1='actor', join_type=JoinType.OuterJoin)\
# .join(dataset, join_col_name1='actor')
#.select_cols(['movie_name', 'actor_name', 'genre'])
sparql_query = movies.to_sparql()
print(sparql_query)
def movies_with_american_actors_optional():
graph = KnowledgeGraph(graph_uri='http://dbpedia.org',
prefixes={'dcterms': 'http://purl.org/dc/terms/',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
'dbpprop': 'http://dbpedia.org/property/',
'dbpr': 'http://dbpedia.org/resource/'})
dataset = graph.feature_domain_range('dbpprop:starring', domain_col_name='movie', range_col_name='actor')\
.expand('actor', [
RDFPredicate('dbpprop:birthPlace', 'actor_country', optional=True),
RDFPredicate('rdfs:label', 'actor_name', optional=True)])\
.expand('movie', [
RDFPredicate('rdfs:label', 'movie_name', optional=True),
RDFPredicate('dcterms:subject', 'subject', optional=True),
RDFPredicate('dbpprop:country', 'movie_country', optional=True)])\
.cache()
# 26928 Rows. -- 4273 msec.
american_actors = dataset.filter({'actor_country': ['regex(str(?actor_country), "USA")']})
# 1606 Rows. -- 7659 msec.
prolific_actors = dataset.group_by(['actor'])\
.count('movie', 'movie_count', unique=True).filter({'movie_count': ['>= 20', '<=30']})
# 663769 Rows. -- 76511 msec.
movies = american_actors.join(prolific_actors, join_col_name1='actor', join_type=JoinType.OuterJoin)\
.join(dataset, join_col_name1='actor')
sparql_query = movies.to_sparql()
print(sparql_query)
endpoint = 'http://10.161.202.101:8890/sparql/'
output_format = HttpClientDataFormat.PANDAS_DF
client = HttpClient(endpoint_url=endpoint, return_format=output_format)
df = dataset.execute(client, return_format=output_format)
print(df)
#movies_with_american_actors_optional()
start = time.time()
movies_with_american_actors()
duration = time.time()-start
print("Duration = {} sec".format(duration))
|
the-stack_0_13378 | ### Noisy DQN Procgen Config ###
env = {
# "name": it should be defined in the command. ex) python main.py --config config.AGENT.procgen --env.name coinrun
"render": False,
"gray_img": True,
"stack_frame": 4,
"no_op": False,
"reward_clip": True,
}
agent = {
"name": "noisy",
"network": "noisy",
"head": "cnn",
"gamma": 0.99,
"explore_ratio": 0.1,
"buffer_size": 1000000,
"batch_size": 32,
"start_train_step": 100000,
"target_update_period": 10000,
# noisy
"noise_type": "factorized", # [independent, factorized]
}
optim = {
"name": "adam",
"lr": 2.5e-4,
}
train = {
"training": True,
"load_path": None,
"run_step": 30000000,
"print_period": 10000,
"save_period": 100000,
"eval_iteration": 5,
"record": True,
"record_period": 300000,
# distributed setting
"update_period": 32,
"num_workers": 16,
}
|
the-stack_0_13380 | #!/usr/bin/env python3
""" Make satellite test data """
import os
from pathlib import Path
import numcodecs
import pandas as pd
import xarray as xr
import nowcasting_dataset
START = pd.Timestamp("2020-04-01T12:00")
END = pd.Timestamp("2020-04-01T14:00")
OUTPUT_PATH = Path(os.path.dirname(nowcasting_dataset.__file__)).parent / "tests" / "data"
print(f"{OUTPUT_PATH=}")
# HRV Path
HRV_SAT_FILENAME = (
"/mnt/storage_ssd_8tb/data/ocf/solar_pv_nowcasting/nowcasting_dataset_pipeline/"
"satellite/EUMETSAT/SEVIRI_RSS/zarr/v3/eumetsat_seviri_hrv_uk.zarr"
)
# Non-HRV path
SAT_FILENAME = (
"/mnt/storage_ssd_8tb/data/ocf/solar_pv_nowcasting/nowcasting_dataset_pipeline/"
"satellite/EUMETSAT/SEVIRI_RSS/zarr/v3/eumetsat_seviri_uk.zarr"
)
def generate_satellite_test_data():
"""Main function to make satelllite test data"""
# Create HRV data
output_filename = OUTPUT_PATH / "hrv_sat_data.zarr"
print("Opening", HRV_SAT_FILENAME)
print("Writing satellite tests data to", output_filename)
# This opens all the HRV satellite data
hrv_sat_data = xr.open_mfdataset(
HRV_SAT_FILENAME, chunks={}, mode="r", engine="zarr", concat_dim="time", combine="nested"
)
# v3 of the HRV data doesn't use variables. Instead the HRV data is in the 'data' DataArray.
# hrv_sat_data = hrv_sat_data.sel(variable=["HRV"], time=slice(START, END))
# just take a bit of the time, to keep size of file now
hrv_sat_data = hrv_sat_data.sel(time=slice(START, END))
# Adds compression and chunking
encoding = {
"data": {"compressor": numcodecs.get_codec(dict(id="bz2", level=5))},
"time": {"units": "nanoseconds since 1970-01-01"},
}
# Write the HRV data to disk
hrv_sat_data.to_zarr(
output_filename, mode="w", consolidated=True, encoding=encoding, compute=True
)
# Now do the exact same with the non-HRV data
output_filename = OUTPUT_PATH / "sat_data.zarr"
print("Writing satellite tests data to", output_filename)
sat_data = xr.open_mfdataset(
SAT_FILENAME, chunks={}, mode="r", engine="zarr", concat_dim="time", combine="nested"
)
sat_data = sat_data.sel(variable=["IR_016"], time=slice(START, END))
sat_data.to_zarr(output_filename, mode="w", consolidated=True, encoding=encoding, compute=True)
if __name__ == "__main__":
generate_satellite_test_data()
|
the-stack_0_13382 | from primitiv import Device
from primitiv import tensor_functions as tF
from primitiv.devices import Naive
import numpy as np
import unittest
class TensorFunctionsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.device = Naive()
Device.set_default(self.device)
self.a = np.array([[1, 2], [3, 4]], np.float32)
self.b = np.array([[1, 1], [4, 8]], np.float32)
def tearDown(self):
pass
def test_tensor_pos(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((+x).to_ndarrays()[0] == self.a).all())
def test_tensor_neg(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((-x).to_ndarrays()[0] == -self.a).all())
def test_tensor_add(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x + y).to_ndarrays()[0] == np.array([[2, 3], [7, 12]])).all())
self.assertTrue(((x + 2).to_ndarrays()[0] == np.array([[3, 4], [5, 6]])).all())
self.assertTrue(((2 + x).to_ndarrays()[0] == np.array([[3, 4], [5, 6]])).all())
def test_tensor_sub(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x - y).to_ndarrays()[0] == np.array([[0, 1], [-1, -4]])).all())
self.assertTrue(((x - 2).to_ndarrays()[0] == np.array([[-1, 0], [1, 2]])).all())
self.assertTrue(((2 - x).to_ndarrays()[0] == np.array([[1, 0], [-1, -2]])).all())
def test_tensor_mul(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x * y).to_ndarrays()[0] == np.array([[1, 2], [12, 32]])).all())
self.assertTrue(((x * 2).to_ndarrays()[0] == np.array([[2, 4], [6, 8]])).all())
self.assertTrue(((2 * x).to_ndarrays()[0] == np.array([[2, 4], [6, 8]])).all())
def test_tensor_matmul(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x @ y).to_ndarrays()[0] == np.array([[9, 17], [19, 35]])).all())
self.assertRaises(TypeError, lambda: x @ 2)
self.assertRaises(TypeError, lambda: 2 @ x)
def test_tensor_truediv(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x / y).to_ndarrays()[0] == np.array([[1, 2], [0.75, 0.5]])).all())
self.assertTrue(((x / 2).to_ndarrays()[0] == np.array([[0.5, 1], [1.5, 2]])).all())
self.assertTrue(((2 / y).to_ndarrays()[0] == np.array([[2, 2], [0.5, 0.25]])).all())
def test_tensor_pow(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(np.isclose((x ** y).to_ndarrays()[0], np.array([[1, 2], [81, 65536]])).all())
self.assertTrue(np.isclose((x ** 2).to_ndarrays()[0], np.array([[1, 4], [9, 16]])).all())
self.assertTrue(np.isclose((2 ** x).to_ndarrays()[0], np.array([[2, 4], [8, 16]])).all())
self.assertTrue(np.isclose((x ** -2).to_ndarrays()[0], np.array([[1, 1/4], [1/9, 1/16]])).all())
input_arr = np.array([1, -1, 3, -3, 5, -5])
x = tF.input(input_arr)
self.assertTrue(((x ** 6).to_ndarrays()[0] == np.array([1, 1, 729, 729, 15625, 15625])).all())
self.assertTrue(((x ** 9).to_ndarrays()[0] == np.array([1, -1, 19683, -19683, 1953125, -1953125])).all())
input_arr = np.array([1, -1])
x = tF.input(input_arr)
self.assertTrue(((x ** 0x7fffffff).to_ndarrays()[0] == np.array([1, -1])).all())
self.assertTrue(((x ** -0x80000000).to_ndarrays()[0] == np.array([1, 1])).all())
self.assertRaises(TypeError, lambda: pow(x, y, 2))
def test_tensor_iadd(self):
x = tF.input(self.a)
y = tF.input(self.b)
x_tmp = x
x += y
self.assertIs(x, x_tmp)
self.assertTrue((x.to_ndarrays()[0] == np.array([[2, 3], [7, 12]])).all())
def test_tensor_isub(self):
x = tF.input(self.a)
y = tF.input(self.b)
x_tmp = x
x -= y
self.assertIs(x, x_tmp)
self.assertTrue((x.to_ndarrays()[0] == np.array([[0, 1], [-1, -4]])).all())
def test_tensor_imul(self):
x = tF.input(self.a)
x_tmp = x
x *= 2
self.assertIs(x, x_tmp)
self.assertTrue((x.to_ndarrays()[0] == np.array([[2, 4], [6, 8]])).all())
|
the-stack_0_13384 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://www.bggofurther.com/2015/01/create-an-interactive-command-line-menu-using-python/
# This tool won't work in Visual Studio Code (as an example).
# I don't know why this is the case but just run it in cmd.exe
import sys
import os
import collections
import ctypes
from subprocess import Popen, PIPE
import locale
import gui # <-- change name !!
import header
from hurry.filesize import alternative, size # pip install hurry.filesize
from prompt_toolkit import prompt
from prompt_toolkit.styles import style_from_dict
from prompt_toolkit.token import Token
# set locale to default to get thousands separators
locale.setlocale(locale.LC_ALL, '')
# Pointer to large unsigned integer
PULARGE_INTEGER = ctypes.POINTER(ctypes.c_ulonglong)
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
kernel32.GetDiskFreeSpaceExW.argtypes = (
ctypes.c_wchar_p,) + (PULARGE_INTEGER,) * 3
def get_size(start_path='.'):
"""
https://stackoverflow.com/questions/1392413/calculating-a-directory-size-using-python
"""
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return size(total_size, system=alternative)
def get_size2(string):
value = size(string, system=alternative)
return value
def cutit(s, n):
"""
cute function that removes chars
s = string
n = char to remove
"""
return s[n:]
class UsageTuple(collections.namedtuple('UsageTuple', 'total, used, free')):
def __str__(self):
# Add thousands separator to numbers displayed
return '{}, {}, {}'.format(*self)
def disk_usage(path):
try:
# allows str or bytes (or os.PathLike in Python 3.6+)
path = os.fsdecode(path)
except AttributeError: # fsdecode() not added until Python 3.2
pass
# Define variables to receive results when passed as "by reference" arguments
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), ctypes.c_ulonglong()
success = kernel32.GetDiskFreeSpaceExW(
path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if not success:
error_code = ctypes.get_last_error()
if not success:
windows_error_message = ctypes.FormatError(error_code)
raise ctypes.WinError(error_code, '{} {!r}'.format(
windows_error_message, path))
used = total.value - free.value
return UsageTuple(total.value, used, free.value)
def drive_parser(letter):
total, used, free = disk_usage(letter)
total = get_size2(total)
free = get_size2(free)
return free, total
def get_bottom_toolbar_tokens(cli):
free, total = drive_parser('D:/')
return [(Token.Toolbar, ' app folder: {} patch folder: {} SDCard: {} of {} free'.format(get_size('app'), get_size('patch'), free, total))]
def input(string): # it's intendet to redefine input() XD
style = style_from_dict({
Token.Toolbar: '#ffffff bg:#333333',
})
output = prompt(
string, get_bottom_toolbar_tokens=get_bottom_toolbar_tokens, style=style)
return output
# Main definition - constants
menu_actions = {}
sub_menu = {}
selection = []
name, titleid = gui.send_variables()
# =======================
# MENUS FUNCTIONS
# =======================
def clearscreen(numlines=100):
"""
Clear the console.numlines is an optional argument used only as a fall-back.
"""
# Thanks to Steven D'Aprano, http://www.velocityreviews.com/forums
if os.name == "posix":
# Unix/Linux/MacOS/BSD/etc
os.system('clear')
elif os.name in ("nt", "dos", "ce"):
# DOS/Windows
os.system('CLS')
else:
# Fallback for other operating systems.
print('\n' * numlines)
def syscmd(cmd):
"""
executes the given command with a better way than using
os.system() (I don't know why but it seems to be bad practice !)
It also returns the exe output instead of printing it :)
"""
cmoa = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
output, error = cmoa.communicate()
return output, error
# Main menu
def main_menu():
clearscreen()
print("1.Start the download")
print("2.Update Database")
print("3.Search for Games")
print("4.Load the queue from 'input.txt'")
print("5.View the queue")
print("6.Exit")
choice = input(">> ")
exec_menu(choice)
return
# Execute menu
def exec_menu(choice):
clearscreen()
ch = choice.lower()
if ch == '':
menu_actions['main_menu']()
else:
try:
menu_actions[ch]()
except KeyError:
print("Invalid selection, please try again.\n")
menu_actions['main_menu']()
return
def start_download():
clearscreen()
if selection == []:
print("Nothing to download.")
input('\n<press enter>')
menu_actions['main_menu']()
else:
for tid in selection:
header.start_download(tid, 'psv')
input('\n<press enter>')
menu_actions['main_menu']()
def update_database():
clearscreen()
header.initial_setup()
input('\n<press enter>')
menu_actions['main_menu']()
def search():
search_input, selected = gui.start_searching(None)
for item in selected:
selection.append(item)
menu_actions['main_menu']()
def load():
clearscreen()
if header.exists('input.txt') is False:
print("Enter the Filename:")
filename = header.input_txt(input(">> "))
else:
filename = 'input.txt'
list1 = header.input_txt(filename)
for item in list1:
selection.append(item)
input('\n<press enter>')
menu_actions['main_menu']()
def view():
for item in selection:
position = titleid.index(item)
print(name[position], '[' + item + ']')
input('\n<press enter>')
menu_actions['main_menu']()
# Exit program
def exit():
sys.exit()
# =======================
# MENUS DEFINITIONS
# =======================
# Menu definition
menu_actions = {
'main_menu': main_menu,
'1': start_download,
'2': update_database,
'3': search,
'4': load,
'5': view,
'6': exit,
}
sub_menu = {
'home': search,
}
# =======================
# MAIN PROGRAM
# =======================
# Main Program
if __name__ == "__main__":
# Launch main menu
main_menu()
|
the-stack_0_13386 | # -*- coding: utf-8 -*-
# This is for introducing **syntactic** local bindings, i.e. simple code splicing
# at macro expansion time. If you're looking for regular run-time let et al. macros,
# see letdo.py.
# TODO: Coverage of code using `with block` and `with expr` is not reported correctly.
#
# TODO: As this is a toy macro system within the real macro system, that is to be expected;
# TODO: `mcpyrate` goes to some degree of trouble to produce correct coverage reporting for
# TODO: the real macro system, and we haven't duplicated that effort here.
#
# TODO: With `mcpyrate`, we don't really need `let_syntax` and `abbrev` anymore, so we could
# TODO: actually remove them; but their tests exercise some code paths that would otherwise
# TODO: remain untested. As of v0.15.0, we're keeping them for now.
__all__ = ["let_syntax", "abbrev", "expr", "block"]
from mcpyrate.quotes import macros, q, a # noqa: F401
from ast import Name, Call, Subscript, Tuple, Starred, Expr, With
from copy import deepcopy
from functools import partial
import sys
from mcpyrate import parametricmacro
from mcpyrate.quotes import is_captured_value
from mcpyrate.utils import rename
from mcpyrate.walkers import ASTTransformer, ASTVisitor
from .letdo import _implicit_do, _destructure_and_apply_let
from .nameutil import is_unexpanded_block_macro
from .util import eliminate_ifones
from ..dynassign import dyn
# --------------------------------------------------------------------------------
# Macro interface
@parametricmacro
def let_syntax(tree, *, args, syntax, expander, **kw):
"""[syntax, expr/block] Introduce local **syntactic** bindings.
**Expression variant**::
let_syntax[lhs << rhs, ...][body]
let_syntax[lhs << rhs, ...][[body0, ...]]
Alternative haskelly syntax::
let_syntax[[lhs << rhs, ...] in body]
let_syntax[[lhs << rhs, ...] in [body0, ...]]
let_syntax[body, where[lhs << rhs, ...]]
let_syntax[[body0, ...], where[lhs << rhs, ...]]
**Block variant**::
with let_syntax:
with block as xs: # capture a block of statements - bare name
...
with block[a, ...] as xs: # capture a block of statements - template
...
with expr as x: # capture a single expression - bare name
...
with expr[a, ...] as x: # capture a single expression - template
...
body0
...
A single expression can be a ``do[]`` if multiple expressions are needed.
The bindings are applied **at macro expansion time**, substituting
the expression on the RHS for each instance of the corresponding LHS.
Each substitution gets a fresh copy.
This is useful to e.g. locally abbreviate long function names at macro
expansion time (with zero run-time overhead), or to splice in several
(possibly parametric) instances of a common pattern.
In the expression variant, ``lhs`` may be:
- A bare name (e.g. ``x``), or
- A simple template of the form ``f(x, ...)``. The names inside the
parentheses declare the formal parameters of the template (that can
then be used in the body).
In the block variant:
- The **as-part** specifies the name of the LHS.
- If a template, the formal parameters are declared on the ``block``
or ``expr``, not on the as-part (due to syntactic limitations).
**Templates**
To make parametric substitutions, use templates.
Templates support only positional arguments, with no default values.
Even in block templates, parameters are always expressions (because they
use the subscript syntax at the use site).
In the body of the ``let_syntax``, a template is used like an expr macro.
Just like in an actual macro invocation, when the template is substituted,
any instances of its formal parameters on its RHS get replaced by the
argument values from the invocation site.
Note each instance of the same formal parameter gets a fresh copy of the
corresponding argument value.
**Substitution order**
This is a two-step process. In the first step, we apply template substitutions.
In the second step, we apply bare name substitutions to the result of the
first step. (So RHSs of templates may use any of the bare-name definitions.)
Within each step, the substitutions are applied **in the order specified**.
So if the bindings are ``((x, y), (y, z))``, then ``x`` transforms to ``z``.
But if the bindings are ``((y, z), (x, y))``, then ``x`` transforms to ``y``,
and only an explicit ``y`` at the use site transforms to ``z``.
**Notes**
Inspired by Racket's ``let-syntax`` and ``with-syntax``, see:
https://docs.racket-lang.org/reference/let.html
https://docs.racket-lang.org/reference/stx-patterns.html
**CAUTION**: This is essentially a toy macro system inside the real
macro system, implemented with the real macro system.
The usual caveats of macro systems apply. Especially, we support absolutely
no form of hygiene. Be very, very careful to avoid name conflicts.
``let_syntax`` is meant only for simple local substitutions where the
elimination of repetition can shorten the code and improve readability.
If you need to do something complex, prefer writing a real macro directly
in `mcpyrate`.
"""
if syntax not in ("expr", "block"):
raise SyntaxError("let_syntax is an expr and block macro only") # pragma: no cover
if syntax == "block" and kw['optional_vars'] is not None:
raise SyntaxError("let_syntax (block mode) does not take an as-part") # pragma: no cover
if syntax == "expr":
_let_syntax_expr_inside_out = partial(_let_syntax_expr, expand_inside=True)
return _destructure_and_apply_let(tree, args, expander, _let_syntax_expr_inside_out, letsyntax_mode=True)
else: # syntax == "block":
with dyn.let(_macro_expander=expander):
return _let_syntax_block(block_body=tree, expand_inside=True)
@parametricmacro
def abbrev(tree, *, args, syntax, expander, **kw):
"""[syntax, expr/block] Exactly like ``let_syntax``, but expands outside in.
Because this variant expands before any macros in the body, it can locally
rename other macros, e.g.::
abbrev[m << macrowithverylongname][
m[tree1] if m[tree2] else m[tree3]]
**CAUTION**: Because ``abbrev`` expands outside-in, and does not respect
boundaries of any nested ``abbrev`` invocations, it will not lexically scope
the substitutions. Instead, the outermost ``abbrev`` expands first, and then
any inner ones expand with whatever substitutions they have remaining.
If the same name is used on the LHS in two or more nested ``abbrev``,
any inner ones will likely raise an error (unless the outer substitution
just replaces a name with another), because also the names on the LHS
in the inner ``abbrev`` will undergo substitution when the outer
``abbrev`` expands.
"""
if syntax not in ("expr", "block"):
raise SyntaxError("abbrev is an expr and block macro only") # pragma: no cover
if syntax == "block" and kw['optional_vars'] is not None:
raise SyntaxError("abbrev (block mode) does not take an as-part") # pragma: no cover
# DON'T expand inner macro invocations first - outside-in ordering is the default, so we simply do nothing.
if syntax == "expr":
_let_syntax_expr_outside_in = partial(_let_syntax_expr, expand_inside=False)
return _destructure_and_apply_let(tree, args, expander, _let_syntax_expr_outside_in,
letsyntax_mode=True)
else:
with dyn.let(_macro_expander=expander):
return _let_syntax_block(block_body=tree, expand_inside=False)
@parametricmacro
def expr(tree, *, syntax, **kw):
"""[syntax, block] ``with expr:`` inside a ``with let_syntax:``."""
if syntax != "block":
raise SyntaxError("`expr` is a block macro only") # pragma: no cover
raise SyntaxError("`expr` is only valid at the top level of a block-mode `let_syntax` or `abbrev`") # pragma: no cover, not intended to hit the expander
@parametricmacro
def block(tree, *, syntax, **kw):
"""[syntax, block] ``with block:`` inside a ``with let_syntax:``."""
if syntax != "block":
raise SyntaxError("`block` is a block macro only") # pragma: no cover
raise SyntaxError("`block` is only valid at the top level of a block-mode `let_syntax` or `abbrev`") # pragma: no cover, not intended to hit the expander
# --------------------------------------------------------------------------------
# Syntax transformers
# let_syntax[lhs << rhs, ...][body]
# let_syntax[lhs << rhs, ...][[body0, ...]]
# let_syntax[[lhs << rhs, ...] in body]
# let_syntax[[lhs << rhs, ...] in [body0, ...]]
# let_syntax[body, where[lhs << rhs, ...]]
# let_syntax[[body0, ...], where[lhs << rhs, ...]]
#
# This transformer takes destructured input, with the bindings subform
# and the body already extracted, and supplied separately.
#
# bindings: sequence of ast.Tuple: (k1, v1), (k2, v2), ..., (kn, vn)
# expand_inside: if True, expand inside-out. If False, expand outside-in.
def _let_syntax_expr(bindings, body, *, expand_inside):
body = _implicit_do(body) # support the extra bracket syntax
if not bindings: # Optimize out a `let_syntax` with no bindings.
return body # pragma: no cover
names_seen = set()
templates = []
barenames = []
def register_bindings():
for line in bindings:
key, value = line.elts
name, args = _analyze_lhs(key)
if name in names_seen:
raise SyntaxError(f"duplicate '{name}'; names defined in the same let_syntax expr must be unique") # pragma: no cover
names_seen.add(name)
target = templates if args else barenames
target.append((name, args, value, "expr"))
if expand_inside:
bindings = dyn._macro_expander.visit_recursively(bindings)
body = dyn._macro_expander.visit_recursively(body)
register_bindings()
body = _substitute_templates(templates, body)
body = _substitute_barenames(barenames, body)
return body
# block version:
#
# with let_syntax:
# with block as xs:
# ...
# with block[a, ...] as xs:
# ...
# with expr as x:
# ...
# with expr[a, ...] as x:
# ...
# body0
# ...
#
# expand_inside: if True, expand inside-out. If False, expand outside-in.
def _let_syntax_block(block_body, *, expand_inside):
is_let_syntax = partial(is_unexpanded_block_macro, let_syntax, dyn._macro_expander)
is_abbrev = partial(is_unexpanded_block_macro, abbrev, dyn._macro_expander)
is_expr_declaration = partial(is_unexpanded_block_macro, expr, dyn._macro_expander)
is_block_declaration = partial(is_unexpanded_block_macro, block, dyn._macro_expander)
is_helper_macro = lambda tree: is_expr_declaration(tree) or is_block_declaration(tree)
def check_strays(ismatch, tree):
class StrayHelperMacroChecker(ASTVisitor): # TODO: refactor this?
def examine(self, tree):
if is_captured_value(tree):
return # don't recurse!
elif is_let_syntax(tree) or is_abbrev(tree):
return # don't recurse!
elif ismatch(tree):
# Expand the stray helper macro invocation, to trigger its `SyntaxError`
# with a useful message, and *make the expander generate a use site traceback*.
#
# (If we just `raise` here directly, the expander won't see the use site
# of the `with expr` or `with block`, but just that of the `do[]`.)
dyn._macro_expander.visit(tree)
self.generic_visit(tree)
StrayHelperMacroChecker().visit(tree)
check_stray_blocks_and_exprs = partial(check_strays, is_helper_macro)
names_seen = set()
def destructure_binding(withstmt, mode, kind):
assert mode in ("block", "expr")
assert kind in ("barename", "template")
ctxmanager = withstmt.items[0].context_expr
optvars = withstmt.items[0].optional_vars
if not optvars:
raise SyntaxError(f"'with {mode}:': expected an as-part") # pragma: no cover
if type(optvars) is not Name:
raise SyntaxError(f"'with {mode}:': expected exactly one name in the as-part") # pragma: no cover
name = optvars.id
if name in names_seen:
raise SyntaxError(f"duplicate '{name}'; as-parts in the same let_syntax block must be unique") # pragma: no cover
if kind == "template":
_, args = _analyze_lhs(ctxmanager) # syntactic limitation, can't place formal parameter list on the as-part
else: # kind == "barename":
args = []
if mode == "block":
with q as value:
if 1:
with a:
withstmt.body
else: # mode == "expr":
if len(withstmt.body) != 1:
raise SyntaxError("'with expr:' expected a one-item body (use a do[] if need more)") # pragma: no cover
theexpr = withstmt.body[0]
if type(theexpr) is not Expr:
raise SyntaxError("'with expr:' expected an expression body, got a statement") # pragma: no cover
value = theexpr.value # discard Expr wrapper in definition
names_seen.add(name)
return name, args, value, mode
def isbinding(tree):
for mode in ("block", "expr"):
if not (type(tree) is With and len(tree.items) == 1):
continue
ctxmanager = tree.items[0].context_expr
if type(ctxmanager) is Name and ctxmanager.id == mode:
return mode, "barename"
# expr[...], block[...]
if type(ctxmanager) is Subscript and type(ctxmanager.value) is Name and ctxmanager.value.id == mode:
return mode, "template"
# expr(...), block(...)
# parenthesis syntax for macro arguments TODO: Python 3.9+: remove once we bump minimum Python to 3.9
if type(ctxmanager) is Call and type(ctxmanager.func) is Name and ctxmanager.func.id == mode:
return mode, "template"
return False
templates = []
barenames = []
new_block_body = []
for stmt in block_body:
# `let_syntax` mode (expand_inside): respect lexical scoping of nested `let_syntax`/`abbrev`
expanded = False
if expand_inside and (is_let_syntax(stmt) or is_abbrev(stmt)):
stmt = dyn._macro_expander.visit_recursively(stmt)
expanded = True
stmt = _substitute_templates(templates, stmt)
stmt = _substitute_barenames(barenames, stmt)
binding_data = isbinding(stmt)
if binding_data:
name, args, value, mode = destructure_binding(stmt, *binding_data)
check_stray_blocks_and_exprs(value) # before expanding it!
if expand_inside and not expanded:
value = dyn._macro_expander.visit_recursively(value)
target = templates if args else barenames
target.append((name, args, value, mode))
else:
check_stray_blocks_and_exprs(stmt) # before expanding it!
if expand_inside and not expanded:
stmt = dyn._macro_expander.visit_recursively(stmt)
new_block_body.append(stmt)
new_block_body = eliminate_ifones(new_block_body)
if not new_block_body:
raise SyntaxError("let_syntax: expected at least one statement beside definitions") # pragma: no cover
return new_block_body
# -----------------------------------------------------------------------------
def _get_subscript_args(tree):
if sys.version_info >= (3, 9, 0): # Python 3.9+: the Index wrapper is gone.
theslice = tree.slice
else:
theslice = tree.slice.value
if type(theslice) is Tuple:
args = theslice.elts
else:
args = [theslice]
return args
# x --> "x", []
# f[a, b, c] --> "f", ["a", "b", "c"]
# f(a, b, c) --> "f", ["a", "b", "c"]
def _analyze_lhs(tree):
if type(tree) is Name: # bare name
name = tree.id
args = []
elif type(tree) is Subscript and type(tree.value) is Name: # template f[x, ...]
name = tree.value.id
args = [a.id for a in _get_subscript_args(tree)]
# parenthesis syntax for macro arguments TODO: Python 3.9+: remove once we bump minimum Python to 3.9
elif type(tree) is Call and type(tree.func) is Name: # template f(x, ...)
name = tree.func.id
if any(type(a) is Starred for a in tree.args): # *args (Python 3.5+)
raise SyntaxError("in template, only positional parameters supported (no *args)") # pragma: no cover
args = [a.id for a in tree.args]
if tree.keywords:
raise SyntaxError("in template, only positional parameters supported (no named args or **kwargs)") # pragma: no cover
else:
raise SyntaxError("expected a name (e.g. x) or a template (e.g. f(x, ...)) on the LHS") # pragma: no cover
return name, args
def _substitute_barename(name, value, tree, mode):
def isthisname(tree):
return type(tree) is Name and tree.id == name
def splice(tree):
class Splicer(ASTTransformer):
def transform(self, tree):
if is_captured_value(tree):
return tree # don't recurse!
def subst():
# Copy just to be on the safe side. Different instances may be
# edited differently by other macros expanded later.
return deepcopy(value)
# discard Expr wrapper (identifying a statement position) at use site
# when performing a block substitution
if mode == "block" and type(tree) is Expr and isthisname(tree.value):
tree = subst()
return tree
elif isthisname(tree):
if mode == "block":
raise SyntaxError(f"cannot substitute block '{name}' into expression position") # pragma: no cover
tree = subst()
return self.generic_visit(tree)
return self.generic_visit(tree)
return Splicer().visit(tree)
# If the new value is also bare name, perform the substitution (now as a string)
# also in the name part of def and similar, to support human intuition of "renaming".
if type(value) is Name:
postproc = partial(rename, name, value.id)
else:
postproc = lambda x: x
return postproc(splice(tree))
def _substitute_barenames(barenames, tree):
for name, _noformalparams, value, mode in barenames:
tree = _substitute_barename(name, value, tree, mode)
return tree
def _substitute_templates(templates, tree):
for name, formalparams, value, mode in templates:
def isthisfunc(tree):
if type(tree) is Subscript and type(tree.value) is Name and tree.value.id == name:
return True
# parenthesis syntax for macro arguments TODO: Python 3.9+: remove once we bump minimum Python to 3.9
if type(tree) is Call and type(tree.func) is Name and tree.func.id == name:
return True
return False
def subst(tree):
if type(tree) is Subscript:
theargs = _get_subscript_args(tree)
elif type(tree) is Call:
theargs = tree.args
else:
assert False
if len(theargs) != len(formalparams):
raise SyntaxError(f"let_syntax template '{name}' expected {len(formalparams)} arguments, got {len(theargs)}") # pragma: no cover
# make a fresh deep copy of the RHS to avoid destroying the template.
tree = deepcopy(value) # expand the f itself in f[x, ...] or f(x, ...)
for k, v in zip(formalparams, theargs): # expand the x, ... in the expanded form of f
# can't put statements in a Subscript or in a Call, so always treat args as expressions.
tree = _substitute_barename(k, v, tree, "expr")
return tree
def splice(tree):
class Splicer(ASTTransformer):
def transform(self, tree):
if is_captured_value(tree):
return tree # don't recurse!
# discard Expr wrapper (identifying a statement position) at use site
# when performing a block substitution
if mode == "block" and type(tree) is Expr and isthisfunc(tree.value):
tree = subst(tree.value)
return tree
elif isthisfunc(tree):
if mode == "block":
raise SyntaxError(f"cannot substitute block '{name}' into expression position") # pragma: no cover
tree = subst(tree)
return self.generic_visit(tree)
return self.generic_visit(tree)
return Splicer().visit(tree)
tree = splice(tree)
return tree
|
the-stack_0_13387 | from __future__ import unicode_literals
import json
from django import forms
from django.utils.safestring import mark_safe
from .conf import settings
class MediumEditorTextarea(forms.Textarea):
def render(self, name, value, attrs=None, renderer=None):
if attrs is None:
attrs = {}
attrs.update({'class': 'django-mediumeditor-input'})
identifier = attrs.get('id', 'id_{}'.format(name))
params = {
'data-mediumeditor-textarea': identifier,
'class': 'django-mediumeditor-editable',
'id': '{}_editable'.format(identifier),
}
param_str = ' '.join('{}="{}"'.format(k, v) for k, v in params.items())
html = super(MediumEditorTextarea, self).render(name, value, attrs)
options = json.dumps(settings.MEDIUM_EDITOR_OPTIONS)
html = mark_safe(u'''{}
<div {}></div>
<script type="text/javascript">
MediumEditorOptions={};
</script>'''.format(html, param_str, options))
return html
class Media:
css = {'all': (
'//cdn.jsdelivr.net/medium-editor/latest/css/'
'medium-editor.min.css',
'css/mediumeditor/django-mediumeditor.css',
'//cdn.jsdelivr.net/medium-editor/latest/css/themes/{}.min.css'.format(
settings.MEDIUM_EDITOR_THEME
)
)}
js = (
'//cdn.jsdelivr.net/medium-editor/latest/js/medium-editor.min.js',
'js/mediumeditor/django-mediumeditor.js', )
|
the-stack_0_13388 | import os
import tensorflow as tf
from configparser import ConfigParser
from utilities.set_dirs import get_conf_dir
conf_dir = get_conf_dir(debug=False)
parser = ConfigParser(os.environ)
parser.read(os.path.join(conf_dir, 'neural_network.ini'))
# AdamOptimizer
beta1 = parser.getfloat('optimizer', 'beta1')
beta2 = parser.getfloat('optimizer', 'beta2')
epsilon = parser.getfloat('optimizer', 'epsilon')
learning_rate = parser.getfloat('optimizer', 'learning_rate')
def variable_on_cpu(name, shape, initializer):
"""
Next we concern ourselves with graph creation.
However, before we do so we must introduce a utility function ``variable_on_cpu()``
used to create a variable in CPU memory.
"""
# Use the /cpu:0 device for scoped operations
with tf.device('/cpu:0'):
# Create or get apropos variable
var = tf.get_variable(name=name, shape=shape, initializer=initializer)
return var
def create_optimizer():
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1,
beta2=beta2,
epsilon=epsilon)
return optimizer
|
the-stack_0_13389 |
from .expression import Params, ParamsExpression
class Function(ParamsExpression):
__visit_name__ = 'function'
def __init__(self, filter=None, weight=None, **kwargs):
self.filter = filter
self.weight = weight
super(Function, self).__init__(**kwargs)
class Weight(Function):
__func_name__ = 'weight'
__visit_name__ = 'weight_function'
def __init__(self, weight, filter=None):
super(Weight, self).__init__(filter=filter, weight=weight)
class FieldValueFactor(Function):
__func_name__ = 'field_value_factor'
def __init__(
self, field, factor=None, modifier=None, missing=None,
filter=None, **kwargs
):
super(FieldValueFactor, self).__init__(
field=field, factor=factor, modifier=modifier, missing=missing,
filter=filter, **kwargs
)
Factor = FieldValueFactor
class ScriptScore(Function):
__func_name__ = 'script_score'
def __init__(self, script, filter=None, **kwargs):
super(ScriptScore, self).__init__(
script=script, filter=filter, **kwargs
)
class RandomScore(Function):
__func_name__ = 'random_score'
def __init__(self, seed=None, filter=None, **kwargs):
super(RandomScore, self).__init__(seed=seed, filter=filter, **kwargs)
class DecayFunction(Function):
__visit_name__ = 'decay_function'
def __init__(
self, field, origin, scale, offset=None, decay=None,
multi_value_mode=None, **kwargs
):
self.field = field
self.decay_params = Params(
origin=origin, scale=scale, offset=offset, decay=decay,
)
super(DecayFunction, self).__init__(
multi_value_mode=multi_value_mode, **kwargs
)
class Gauss(DecayFunction):
__func_name__ = 'gauss'
class Exp(DecayFunction):
__func_name__ = 'exp'
class Linear(DecayFunction):
__func_name__ = 'linear'
|
the-stack_0_13390 | import functools
import requests
import pyvo
import pyvo.auth.authsession
import warnings
from rubin_jupyter_utils.helpers import get_access_token
from rubin_jupyter_utils.config import RubinConfig
def deprecated(new_name=''):
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter("always", DeprecationWarning) # turn off filter
if new_name:
warnings.warn(f"Call to deprecated function {func.__name__}. " +
"This function may be removed at any point in the future. " +
f"Please use {new_name} instead.",
category=DeprecationWarning,
stacklevel=2)
else:
warnings.warn(f"Call to deprecated function {func.__name__}. " +
"This function may be removed at any point in the future.",
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func
return deprecated
def _get_tap_url():
rc = RubinConfig()
tapurl = rc.external_tap_url or (rc.external_instance_url +
rc.tap_route)
return tapurl
def _get_auth():
tap_url = _get_tap_url()
s = requests.Session()
s.headers["Authorization"] = "Bearer " + get_access_token()
auth = pyvo.auth.authsession.AuthSession()
auth.credentials.set("lsst-token", s)
auth.add_security_method_for_url(tap_url, "lsst-token")
auth.add_security_method_for_url(tap_url + "/sync", "lsst-token")
auth.add_security_method_for_url(tap_url + "/async", "lsst-token")
auth.add_security_method_for_url(tap_url + "/tables", "lsst-token")
return auth
def get_tap_service():
return pyvo.dal.TAPService(_get_tap_url(), _get_auth())
@deprecated(new_name="get_tap_service")
def get_catalog():
return get_tap_service()
def retrieve_query(query_url):
return pyvo.dal.AsyncTAPJob(query_url, _get_auth())
|
the-stack_0_13392 | from collections import defaultdict, Sized
import numpy as np
import pandas as pd
from pandas._libs.lib import fast_zip
from pandas._libs.parsers import union_categoricals
from pandas.core.dtypes.common import is_numeric_dtype
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph._traversal import connected_components
def get_sequence_length(obj):
if isinstance(obj, str) or not isinstance(obj, Sized):
return -1
elif isinstance(obj, Sized) and all(not isinstance(i, Sized) and pd.isnull(i) for i in obj):
return -2
else:
return len(obj)
def flatten(frame,
index_name=None,
as_index=False,
keep_na=False,
columns=None,
tile_index=False):
"""
Flatten the input before the transformation
Parameters
----------
frame: pandas.DataFrame
index_name: str
Name of the index to append to indentify each item uniquely
keep_na: bool or str
Should non-sequences elements (or sequences full of None) be kept in the dataframe
as an empty row (value given is None and new index value is None also)
columns: tuple of str
Flatten only sequence in these columns if not None
Returns
-------
(pandas.DataFrame, pandas.DataFrame, callable)
flattened input:
Flattened input to transform
length:
Lengths of the sequences. We will actually only want to know if it was a sequence
or not (see get_sequence_length(...)), during either unflattening if regroup is
True or during rationale backpropagation
sequence_constructor:
Returns the "type" of the sequences contained in the frame, or more specifically
the function used to build an instance of these sequences. Will be used during
unflattening if self.regroup is True and during rationale backpropagation
"""
if isinstance(as_index, bool):
as_column = not as_index
elif isinstance(as_index, str) and index_name is None:
index_name = as_index
as_column = False
else:
raise Exception("as_index must be str or bool, and if str, index_name must be None")
if isinstance(frame, pd.Series):
res = flatten(pd.DataFrame({"X": frame}), index_name, as_column, keep_na, columns, tile_index)
new_frame = res["X"]
new_frame.name = frame.name
return new_frame
if keep_na is True:
keep_na = 'null_index'
elif keep_na is False:
keep_na = 'remove'
assert keep_na in ('null_index', 'as_single_item', 'remove')
assert isinstance(frame, pd.DataFrame), "Can only flatten DataFrame"
if columns is None:
columns = frame.columns
elif not isinstance(columns, (tuple, list)):
columns = [columns]
else:
columns = list(columns)
lengths = frame[columns].applymap(lambda seq: get_sequence_length(seq))
for col in frame.columns:
if col not in columns:
lengths[col] = -1
result_lengths = lengths.max(axis=1)
# Each column element will be expanded on multiple rows,
# even if it is a non-iterable object
# We must know before how many rows will the expansion take
# and we take this length from the maximum sequence size
if keep_na == 'remove':
bool_row_selector = result_lengths > 0
result_lengths = result_lengths[bool_row_selector]
selected_lengths = lengths[bool_row_selector]
frame = frame[bool_row_selector]
nulls = None
else:
nulls = result_lengths < 0
# Non sequence or sequence full of None will give rise to 1 row
result_lengths[nulls] = 1
selected_lengths = lengths
nulls = result_lengths.cumsum()[nulls] - 1
categoricals = {}
frame = frame.copy()
for col in frame.columns:
if hasattr(frame[col], 'cat'):
categoricals[col] = frame[col].cat.categories
frame[col] = frame[col].cat.codes
flattened = {col: [] for col in frame.columns}
for col_name, col in frame.iteritems():
for obj, res_length, length in zip(col.values, result_lengths, selected_lengths[col_name]):
if length >= 0: # we have a normal sequence
flattened[col_name].append(obj if isinstance(obj, pd.Series) else pd.Series(obj))
# Otherwise it a non sequence, create as many rows as needed for it
else:
# -2 means sequence full of None, we put a None instead here
if length == -2:
obj = None
if res_length == 1:
flattened[col_name].append(pd.Series([obj]))
else:
flattened[col_name].append(pd.Series([obj] * res_length))
index = frame.index.repeat(result_lengths) if index_name is not None else None
for col_name in flattened:
flattened[col_name] = pd.concat(flattened[col_name], ignore_index=True)
if index is not None:
flattened[col_name].index = index
flattened = pd.DataFrame(flattened)
# flattened = pd.DataFrame(
# data={col_name: pd.concat(flattened[col_name], ignore_index=True) for col_name in flattened},
# index=frame.index.repeat(result_lengths) if index_name is not None else None)
for name, categories in categoricals.items():
flattened[name] = pd.Categorical.from_codes(flattened[name], categories=categories)
# Adds an index under the name `self.index_name` to identify uniquely every row
# of the frame
if index_name is not None:
if index_name in flattened.columns:
flattened.set_index(index_name, append=True, inplace=True)
else:
if tile_index:
new_index_values = np.concatenate([np.arange(s) for s in result_lengths])
flattened[index_name] = new_index_values
else:
new_index_values = np.arange(len(flattened))
flattened[index_name] = new_index_values
flattened[index_name] = flattened[index_name]
flattened.set_index(index_name, append=True, inplace=True)
if keep_na == 'null_index' and nulls is not None:
new_labels = np.arange(len(flattened))
# noinspection PyUnresolvedReferences
new_labels[nulls.values] = -1
flattened.index.set_codes(
new_labels, level=index_name, inplace=True)
if as_column:
flattened.reset_index(index_name, inplace=True)
flattened.reset_index(inplace=True, drop=True)
# flattened.index = flattened.index.remove_unused_levels()
return flattened
def make_merged_names(left_span_names, right_span_names, left_on, right_on, left_columns, right_columns,
suffixes=('_x', '_y')):
right_columns = set(right_columns) - set(right_on)
left_columns = set(left_columns) - set(left_on)
left_merged = [name + (suffixes[0] if name in right_columns else '') for name in left_span_names]
right_merged = [name + (suffixes[1] if name in left_columns else '') for name in right_span_names]
return left_merged, right_merged
def make_merged_names_map(left_columns, right_columns, left_on, right_on, suffixes=('_x', '_y')):
right_columns = set(right_columns) - set(right_on)
left_columns = set(left_columns) - set(left_on)
left_merged = [name + (suffixes[0] if name in right_columns else '') for name in left_columns]
right_merged = [name + (suffixes[1] if name in left_columns else '') for name in right_columns]
return dict(zip(left_columns, left_merged)), dict(zip(right_columns, right_merged))
def merge_with_spans(
left, right=None,
how='inner',
on=None,
left_on=None,
right_on=None,
suffixes=('_x', '_y'),
span_policy='partial_strict',
placeholder_columns=(),
**kwargs):
"""
Just like pandas.merge, but handles the merging of spans
Any tuple in the "on" column will be considered a (begin, end) span
How to merge those span
Parameters
----------
left: pd.DataFrame
right: pd.DataFrame
how: str
"inner", "outer", "left", "right"
on: list of (str or tuple of str)
left_on: list of (str or tuple of str)
right_on: list of (str or tuple of str)
suffixes: list of str
span_policy: str
How to merge spans ?
One of: "partial", "exact", "partial_strict"
placeholder_columns:
Zero will be put as a value instead of nan for any empty cell in those columns after the merge
kwargs: any
Any kwargs for the pd.merge function
Returns
-------
pd.DataFrame
"""
if right is None:
right = left
left = left.copy()
right = right.copy()
if isinstance(on, str):
on = [on]
if left_on is None:
left_on = on
if right_on is None:
right_on = on
left_columns = left.columns if hasattr(left, 'columns') else [left.name]
right_columns = right.columns if hasattr(right, 'columns') else [right.name]
if left_on is None and right_on is None:
left_on = right_on = list(set(left_columns) & set(right_columns))
left_on_spans = [o for o in left_on if isinstance(o, tuple)]
right_on_spans = [o for o in right_on if isinstance(o, tuple)]
left_on = [c for c in left_on if not isinstance(c, tuple)] # flatten_sequence(left_on)
right_on = [c for c in right_on if not isinstance(c, tuple)] # flatten_sequence(right_on)
left_names, right_names = make_merged_names(
left_columns, right.columns,
left_on=left_on,
right_on=right_on,
left_columns=left_columns, right_columns=right_columns, suffixes=suffixes)
left_names_map = dict(zip(left_columns, left_names))
right_names_map = dict(zip(right_columns, right_names))
categoricals = {}
for left_col, right_col in zip(left_on, right_on):
left_cat = getattr(left[left_col] if hasattr(left, 'columns') else left, 'cat', None)
right_cat = getattr(right[right_col] if hasattr(right, 'columns') else right, 'cat', None)
if left_cat is not None or right_cat is not None:
if (left_cat and right_cat and not (left_cat.categories is right_cat.categories)) or (
(left_cat is None) != (right_cat is None)):
left[left_col] = left[left_col].astype('category')
right[right_col] = right[right_col].astype('category')
cat_merge = union_categoricals([left[left_col], right[right_col]])
if hasattr(left, 'columns'):
left[left_col] = cat_merge[:len(left)]
else:
left = cat_merge[:len(left)]
if hasattr(right, 'columns'):
right[right_col] = cat_merge[len(left):]
else:
right = cat_merge[len(left):]
categoricals[left_names_map[left_col]] = left[left_col].cat.categories
categoricals[right_names_map[right_col]] = right[right_col].cat.categories
if hasattr(left, 'columns'):
left[left_col] = left[left_col].cat.codes
else:
left = left.cat.codes
if hasattr(right, 'columns'):
right[right_col] = right[right_col].cat.codes
else:
right = right.cat.codes
if len(left_on_spans) == 0:
merged = pd.merge(left, right, left_on=left_on, right_on=right_on, suffixes=suffixes, how=how, **kwargs)
else:
if how != 'inner':
left['_left_index'] = np.arange(len(left))
right['_right_index'] = np.arange(len(right))
merged = pd.merge(left, right, left_on=left_on, right_on=right_on, suffixes=suffixes, how='inner', **kwargs)
for i, (left_span_names, right_span_names) in enumerate(zip(left_on_spans, right_on_spans)):
(left_begin, left_end), (right_begin, right_end) = make_merged_names(
left_span_names, right_span_names, left_on=left_on, right_on=right_on,
left_columns=left.columns, right_columns=right_columns, suffixes=suffixes)
merged[f'overlap_size_{i}'] = np.minimum(merged[left_end], merged[right_end]) - np.maximum(merged[left_begin], merged[right_begin])
if span_policy != "none":
results = []
chunk_size = 1000000
for chunk_i in range(0, len(merged), chunk_size):
if span_policy == "partial_strict":
results.append(merged.iloc[chunk_i:chunk_i + chunk_size].query(f'({right_end} > {left_begin} and {left_end} > {right_begin})'))
elif span_policy == "partial":
results.append(merged.iloc[chunk_i:chunk_i + chunk_size].query(f'({right_end} >= {left_begin} and {left_end} >= {right_begin})'))
elif span_policy == "exact":
results.append(merged.iloc[chunk_i:chunk_i + chunk_size].query(f'({left_begin} == {right_begin} and {left_end} == {right_end})'))
else:
results.append(merged.iloc[chunk_i:chunk_i + chunk_size].query(span_policy))
if len(results):
merged = pd.concat(results, sort=False, ignore_index=True)
else:
merged = merged.iloc[:0]
elif span_policy == "none":
pass
else:
raise Exception(f"Unrecognized policy {span_policy}")
if how != 'inner':
if how in ('left', 'outer'):
missing = left[~left['_left_index'].isin(merged['_left_index'])].copy()
missing = missing.rename(left_names_map, axis=1)
for col in right.columns:
if hasattr(right[col], 'cat') and right_names_map[col] not in missing.columns:
missing[right_names_map[col]] = pd.Categorical([None] * len(missing),
categories=right[col].cat.categories)
for col in placeholder_columns:
if col not in left_on and right_names_map.get(col, col) not in left.columns:
missing[right_names_map.get(col, col)] = 0 # -np.arange(len(missing)) - 1
merged = pd.concat([merged, missing.rename(dict(zip(left.columns, left_names)), axis=1)], sort=False,
ignore_index=True)
if how in ('right', 'outer'):
missing = right[~right['_right_index'].isin(merged['_right_index'])].copy()
missing = missing.rename(right_names_map, axis=1)
for col in left.columns:
if hasattr(left[col], 'cat') and left_names_map[col] not in missing.columns:
missing[left_names_map[col]] = pd.Categorical([None] * len(missing),
categories=left[col].cat.categories)
for col in placeholder_columns:
if col not in right_on and left_names_map.get(col, col) not in right.columns:
missing[left_names_map.get(col, col)] = 0 # -np.arange(len(missing)) - 1
merged = pd.concat([merged, missing.rename(dict(zip(right.columns, right_names)), axis=1)], sort=False,
ignore_index=True)
merged = merged.sort_values(['_left_index', '_right_index'])
del merged['_left_index']
del merged['_right_index']
merged = merged.reset_index(drop=True)
for col, categories in categoricals.items():
merged[col] = pd.Categorical.from_codes(merged[col].fillna(-1).astype(int), categories=categories)
return merged
def make_id_from_merged(*indices_arrays, same_ids=False, apply_on=None):
"""
Compute new ids from connected components by looking at `indices_arrays`
Parameters
----------
indices_arrays: collections.Sequence
1d array of positive integers
same_ids: bool
Do the multiple arrays represent the same ids ? (a 3 in one column should therefore be
connected to a 3 in another, event if they are not on the same row)
apply_on: list of (int, any)
Return the new ids matching old ids
for each (index, vector) in apply_on:
return new_ids matching those in vector that should be considered the same
of those of the vector number `index` in the `indices_arrays`
Returns
-------
list of np.ndarray
"""
if not same_ids:
indices_arrays, unique_objects = zip(*(factorize_rows(array, return_categories=True) for array in indices_arrays))
else:
indices_arrays, unique_objects = factorize_rows(indices_arrays, return_categories=True)
unique_objects = [unique_objects] * len(indices_arrays)
offset = max(indices_array.max() for indices_array in indices_arrays) + 1
N = offset * (len(indices_arrays) + 1)
if same_ids:
N = offset
offset = 0
offseted_ids = [s + i * offset for i, s in enumerate(indices_arrays)]
left_ids, right_ids = zip(*[(offseted_ids[i], offseted_ids[j])
for i in range(0, len(indices_arrays) - 1)
for j in range(i + 1, len(indices_arrays))])
left_ids = np.concatenate(left_ids)
right_ids = np.concatenate(right_ids)
_, matches = connected_components(csr_matrix((np.ones(len(left_ids)), (left_ids, right_ids)), shape=(N, N)))
matches = pd.factorize(matches)[0]
if apply_on is None:
return [
matches[s]
for s in offseted_ids
]
else:
return [
matches[factorize_rows(s, categories=unique_objects[i], return_categories=False) + i * offset]
for i, s in apply_on
]
def df_to_csr(rows, cols, data=None, n_rows=None, n_cols=None):
"""
Transforms a dataframe into a csr_matrix
Parameters
----------
data: pd.Series
Data column (column full one True will be used if None)
rows: pd.Series
Column containing row indices (can be Categorical and then codes will be used)
cols: pd.Series
Column containing column indices (can be Categorical and then codes will be used)
n_rows: int
n_cols: int
Returns
-------
csr_matrix
"""
if data is None:
data = np.ones(len(rows), dtype=bool)
if hasattr(rows, 'cat'):
n_rows = len(rows.cat.categories)
rows, rows_cat = rows.cat.codes, rows.cat.categories
else:
n_rows = n_rows or (rows.max() + 1 if len(rows) > 0 else 0)
if hasattr(cols, 'cat'):
n_cols = len(cols.cat.categories)
cols, cols_cat = cols.cat.codes, cols.cat.categories
else:
n_cols = n_cols or (cols.max() + 1 if len(cols) > 0 else 0)
return csr_matrix((np.asarray(data), (np.asarray(rows), np.asarray(cols))), shape=(n_rows, n_cols))
def df_to_flatarray(rows, data, n_rows=None):
"""
Transforms a dataframe into a flat array
Parameters
----------
data: pd.Series
Data column (column full one True will be used if None)
rows: pd.Series
Column containing row indices (can be Categorical and then codes will be used)
n_rows: int
Returns
-------
np.ndarray
"""
if hasattr(rows, 'cat'):
n_rows = len(rows.cat.categories)
rows, rows_cat = rows.cat.codes, rows.cat.categories
else:
n_rows = n_rows or (rows.max() + 1)
res = np.zeros(n_rows, dtype=data.dtype)
res[rows] = np.asarray(data)
return res
def csr_to_df(csr, row_categories=None, col_categories=None, row_name=None, col_name=None, value_name=None):
"""
Convert a csr_matrix to a dataframe
Parameters
----------
csr: csr_matrix
row_categories: any
Categories to rebuild the real object from their row indices
col_categories: any
Categories to rebuild the real object from their col indices
row_name: str
What name to give to the column built from the row indices
col_name: str
What name to give to the column built from the col indices
value_name:
What name to give to the column built from the values
If None, no value column will be built
Returns
-------
pd.DataFrame
"""
csr = csr.tocoo()
rows, cols, values = csr.row, csr.col, csr.data
if isinstance(row_categories, pd.DataFrame):
rows_df = row_categories.iloc[rows]
elif isinstance(row_categories, pd.Series):
rows_df = pd.DataFrame({row_categories.name: row_categories.iloc[rows]})
elif isinstance(row_categories, pd.CategoricalDtype):
rows_df = pd.DataFrame({row_name: pd.Categorical.from_codes(rows, dtype=row_categories)})
else:
rows_df = pd.DataFrame({row_name: rows})
if isinstance(col_categories, pd.DataFrame):
cols_df = col_categories.iloc[cols]
elif isinstance(col_categories, pd.Series):
cols_df = pd.DataFrame({col_categories.name: col_categories.iloc[cols]})
elif isinstance(col_categories, pd.CategoricalDtype):
cols_df = pd.DataFrame({col_name: pd.Categorical.from_codes(cols, dtype=col_categories)})
else:
cols_df = pd.DataFrame({col_name: cols})
res = (rows_df.reset_index(drop=True), cols_df.reset_index(drop=True))
if value_name is not None:
res = res + (pd.DataFrame({value_name: values}),)
return pd.concat(res, axis=1)
def factorize_rows(rows, categories=None, group_nans=True, subset=None, freeze_categories=True, return_categories=True):
if not isinstance(rows, list):
was_list = False
all_rows = [rows]
else:
all_rows = rows
was_list = True
del rows
not_null_subset = (subset if subset is not None else all_rows[0].columns if hasattr(all_rows[0], 'columns') else [all_rows[0].name])
cat_arrays = [[] for _ in not_null_subset]
for rows in (categories, *all_rows) if categories is not None else all_rows:
for (col_name, col), dest in zip(([(0, rows)] if len(rows.shape) == 1 else rows[subset].items() if subset is not None else rows.items()), cat_arrays):
dest.append(np.asarray(col))
cat_arrays = [np.concatenate(arrays) for arrays in cat_arrays]
is_not_nan = None
if not group_nans:
is_not_nan = ~pd.isna(np.stack(cat_arrays, axis=1)).any(1)
cat_arrays = [arrays[is_not_nan] for arrays in cat_arrays]
if len(cat_arrays) > 1:
relative_values, unique_values = pd.factorize(fast_zip(cat_arrays))
else:
relative_values, unique_values = pd.factorize(cat_arrays[0])
if freeze_categories and categories is not None:
relative_values[relative_values >= len(categories)] = -1
if not group_nans:
new_relative_values = np.full(is_not_nan.shape, fill_value=-1, dtype=relative_values.dtype)
new_relative_values[is_not_nan] = relative_values
new_relative_values[~is_not_nan] = len(unique_values) + np.arange((~is_not_nan).sum())
relative_values = new_relative_values
offset = len(categories) if categories is not None else 0
res = []
for rows in all_rows:
new_rows = relative_values[offset:offset + len(rows)]
if isinstance(rows, (pd.DataFrame, pd.Series)):
new_rows = pd.Series(new_rows)
new_rows.index = rows.index
new_rows.name = "+".join(not_null_subset)
res.append(new_rows)
offset += len(rows)
if categories is None and return_categories:
if isinstance(all_rows[0], pd.DataFrame):
if len(cat_arrays) > 1:
categories = pd.DataFrame(dict(zip(not_null_subset, [np.asarray(l) for l in zip(*unique_values)])))
else:
categories = pd.DataFrame({not_null_subset[0]: unique_values})
categories = categories.astype({k: dtype for k, dtype in next(rows for rows in all_rows if len(rows)).dtypes.items() if k in not_null_subset})
elif isinstance(all_rows[0], pd.Series):
categories = pd.Series(unique_values)
categories.name = all_rows[0].name
categories = categories.astype(next(rows.dtype for rows in all_rows if len(rows)))
else:
categories = np.asarray([l for l in zip(*unique_values)])
if not was_list:
res = res[0]
if not return_categories:
return res
return res, categories
def normalize_vocabularies(dfs, vocabularies=None, train_vocabularies=True, unk=None, verbose=0):
"""
Categorize the columns of the dataframes so that they share the same
categories if they share the same columns
If a column's name ends up with '_id', do not categorize it since it is no something we want to train on
Parameters
----------
dfs: list of pd.DataFrame
DataFrame whose columns will be categorized
vocabularies: dict or None
Existing vocabulary to use if any
train_vocabularies: bool or dict of (str, bool)
Which category to extend/create in the voc ?
unk: dict of (str, any)
Which filler should we put for an unknown object if we cannot train the corresponding voc ?
verbose: int
Returns
-------
list of pd.DataFrame, dict
"""
# Define label vocabulary
if unk is None:
unk = {}
if vocabularies is None:
vocabularies = {}
voc_order = list(vocabularies.keys())
if train_vocabularies is False:
train_vocabularies = defaultdict(lambda: False)
else:
train_vocabularies_ = defaultdict(lambda: True)
if isinstance(train_vocabularies, dict):
train_vocabularies_.update(train_vocabularies)
train_vocabularies = train_vocabularies_
del train_vocabularies_
for col_name in vocabularies:
if col_name not in train_vocabularies:
train_vocabularies[col_name] = False
for df in dfs:
for col_name in df:
if not col_name.endswith('_id') and not is_numeric_dtype(df[col_name].dtype):
if train_vocabularies[col_name]:
train_vocabularies[col_name] = True
else:
train_vocabularies[col_name] = False
for col_name, will_train in train_vocabularies.items():
if will_train and verbose:
print(f"Will train vocabulary for {col_name}")
for df in dfs:
for col_name in df:
if hasattr(df[col_name], 'cat') and col_name not in vocabularies and not col_name.endswith('_id'):
if verbose:
print(f"Discovered existing vocabulary ({len(df[col_name].cat.categories)} entities) for {col_name}")
vocabularies[col_name] = list(df[col_name].dtype.categories)
for voc_name, train_voc in train_vocabularies.items():
if train_voc:
voc = list(vocabularies.get(voc_name, []))
if voc_name in unk and unk[voc_name] not in voc:
voc.append(unk[voc_name])
if hasattr(voc, 'categories'):
voc = list(voc.categories)
for df in dfs:
if voc_name in df:
voc.extend(df[voc_name].astype("category").cat.categories)
voc = pd.factorize(voc)[1]
dtype = pd.CategoricalDtype(pd.factorize(voc)[1])
for df in dfs:
if voc_name in df:
df[voc_name] = df[voc_name].astype(dtype)
vocabularies[voc_name] = voc
if voc_name in unk:
df[voc_name].fillna(unk[voc_name], inplace=True)
else:
voc = vocabularies.get(voc_name)
if not hasattr(voc, 'categories'):
voc = pd.CategoricalDtype(voc)
for df in dfs:
if voc_name in df:
df[voc_name] = df[voc_name].astype(voc)
if verbose:
unk_msg = f"unk {unk[voc_name]}" if voc_name in unk else "no unk"
print(f"Normalized {voc_name}, with given vocabulary and {unk_msg}")
if voc_name in unk:
df[voc_name].fillna(unk[voc_name], inplace=True)
# Reorder vocabularies to keep same order as the vocabulary passed in parameters
vocabularies = dict((*((c, vocabularies[c]) for c in voc_order if c in vocabularies),
*((c, vocabularies[c]) for c in vocabularies if c not in voc_order)))
# Reorder dataframes according to vocabulary order
dfs = [
df[[*(c for c in vocabularies if c in df.columns), *(c for c in df.columns if c not in vocabularies)]]
for df in dfs
]
return dfs, vocabularies
class FasterGroupBy:
def __init__(self, groupby_object, dtypes, name=None):
self.groupby_object = groupby_object
self.dtypes = dtypes
self.name = name
def _retype(self, res):
if self.name is None:
return res.astype(self.dtypes)
return (res.astype(self.dtypes) if self.dtypes is not None else res).reset_index().rename({0: self.name}, axis=1)
def agg(self, *args, **kwargs):
return self._retype(self.groupby_object.agg(*args, **kwargs))
def apply(self, *args, **kwargs):
return self._retype(self.groupby_object.apply(*args, **kwargs))
def __getitem__(self, item):
return FasterGroupBy(self.groupby_object[item], self.dtypes.get(item, None), item if not isinstance(item, (list, tuple)) else None)
class NLStructAccessor(object):
def __init__(self, pandas_obj):
self._obj = pandas_obj
def factorize(self, subset=None, categories=None, group_nans=False,
return_categories=False, freeze_categories=True):
return factorize_rows(self._obj,
subset=subset,
categories=categories,
group_nans=group_nans,
return_categories=return_categories, freeze_categories=freeze_categories)
def flatten(self, *args, **kwargs):
return flatten(self._obj, *args, **kwargs)
def to_flatarray(self, row_column, data_column, n_rows=None):
return df_to_flatarray(self._obj[row_column], self._obj[data_column], n_rows=n_rows)
def to_csr(self, row_column, col_column, data_column=None, n_rows=None, n_cols=None):
return df_to_csr(self._obj[row_column], self._obj[col_column], self._obj[data_column] if data_column is not None else None,
n_rows=n_rows, n_cols=n_cols)
def groupby(self, by, *args, decategorize=None, as_index=False, observed=True, **kwargs):
if not as_index:
if decategorize is None:
decategorize = by
new_dtypes = {k: v if not hasattr(v, 'categories') else v.categories.dtype for k, v in self._obj.dtypes[decategorize].items()}
return FasterGroupBy(self._obj.astype(new_dtypes).groupby(by=by, *args, as_index=as_index, observed=observed, **kwargs), self._obj.dtypes[decategorize])
else:
return self._obj.groupby(by=by, *args, as_index=as_index, **kwargs)
def groupby_assign(self, by, agg, as_index=False, observed=True, **kwargs):
res = self._obj.assign(_index=np.arange(len(self._obj)))
res = res.drop(columns=list(agg.keys())).merge(
# .astype({key: "category" for key in mentions_cluster_ids})
res.groupby(by, observed=observed, **kwargs)
.agg({**agg, "_index": tuple}).reset_index(drop=True)
.nlstruct.flatten("_index"),
how='left',
on='_index',
).drop(columns=["_index"])
if as_index:
res = res.set_index(by)
return res
pd.api.extensions.register_dataframe_accessor("nlstruct")(NLStructAccessor)
pd.api.extensions.register_series_accessor("nlstruct")(NLStructAccessor)
|
the-stack_0_13393 | '''
Function:
千千音乐下载: http://music.taihe.com/
Author:
Charles
微信公众号:
Charles的皮卡丘
声明:
代码仅供学习交流, 不得用于商业/非法使用.
'''
import os
import click
import requests
from contextlib import closing
'''
Input:
-mode: search(搜索模式)/download(下载模式)
--search模式:
----songname: 搜索的歌名
--download模式:
----need_down_list: 需要下载的歌曲名列表
----savepath: 下载歌曲保存路径
Return:
-search模式:
--search_results: 搜索结果
-download模式:
--downed_list: 成功下载的歌曲名列表
'''
class qianqian():
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'referer': 'http://music.baidu.com/'
}
self.search_url = "http://musicapi.qianqian.com/v1/restserver/ting"
self.player_url = 'http://music.baidu.com/data/music/links'
self.search_results = {}
'''外部调用'''
def get(self, mode='search', **kwargs):
if mode == 'search':
songname = kwargs.get('songname')
self.search_results = self.__searchBySongname(songname)
return self.search_results
elif mode == 'download':
need_down_list = kwargs.get('need_down_list')
downed_list = []
savepath = kwargs.get('savepath') if kwargs.get('savepath') is not None else './results'
if need_down_list is not None:
for download_name in need_down_list:
songid = self.search_results.get(download_name)
params = {"songIds": songid}
res = requests.get(self.player_url, params=params, headers=self.headers)
if not res.json().get('data').get('songList'):
continue
download_url = res.json().get('data').get('songList')[0].get('songLink')
if not download_url:
continue
res = self.__download(download_name, download_url, savepath)
if res:
downed_list.append(download_name)
return downed_list
else:
raise ValueError('mode in qianqian().get must be <search> or <download>...')
'''下载'''
def __download(self, download_name, download_url, savepath):
if not os.path.exists(savepath):
os.mkdir(savepath)
download_name = download_name.replace('<', '').replace('>', '').replace('\\', '').replace('/', '') \
.replace('?', '').replace(':', '').replace('"', '').replace(':', '') \
.replace('|', '').replace('?', '').replace('*', '')
savename = 'qianqian_{}'.format(download_name)
count = 0
while os.path.isfile(os.path.join(savepath, savename+'.mp3')):
count += 1
savename = 'qianqian_{}_{}'.format(download_name, count)
savename += '.mp3'
try:
print('[qianqian-INFO]: 正在下载 --> %s' % savename.split('.')[0])
with closing(requests.get(download_url, headers=self.headers, stream=True, verify=False)) as res:
total_size = int(res.headers['content-length'])
if res.status_code == 200:
label = '[FileSize]:%0.2f MB' % (total_size/(1024*1024))
with click.progressbar(length=total_size, label=label) as progressbar:
with open(os.path.join(savepath, savename), "wb") as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
progressbar.update(1024)
else:
raise RuntimeError('Connect error...')
return True
except:
return False
'''根据歌名搜索'''
def __searchBySongname(self, songname):
params = {
"query": songname,
"method": "baidu.ting.search.common",
"format": "json",
"page_no": 1,
"page_size": 15
}
res = requests.get(self.search_url, params=params, headers=self.headers)
results = {}
for song in res.json()['song_list']:
songid = song.get('song_id')
singers = song.get('author').replace("<em>", "").replace("</em>", "")
album = song.get('album_title').replace("<em>", "").replace("</em>", "")
download_name = '%s--%s--%s' % (song.get('title').replace("<em>", "").replace("</em>", ""), singers, album)
count = 0
while download_name in results:
count += 1
download_name = '%s(%d)--%s--%s' % (song.get('title'), count, singers, album)
results[download_name] = songid
return results
'''测试用'''
if __name__ == '__main__':
qianqian_downloader = qianqian()
res = qianqian_downloader.get(mode='search', songname='尾戒')
qianqian_downloader.get(mode='download', need_down_list=list(res.keys())[:2]) |
the-stack_0_13395 | # Written by Dr Daniel Buscombe, Marda Science LLC
#
# MIT License
#
# Copyright (c) 2020, Marda Science LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
os.environ["TF_DETERMINISTIC_OPS"] = "1"
##calcs
import tensorflow as tf #numerical operations on gpu
import numpy as np
import matplotlib.pyplot as plt
SEED=42
np.random.seed(SEED)
AUTO = tf.data.experimental.AUTOTUNE # used in tf.data.Dataset API
tf.random.set_seed(SEED)
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print('GPU name: ', tf.config.experimental.list_physical_devices('GPU'))
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
TARGET_SIZE = 1024
BATCH_SIZE = 4
@tf.autograph.experimental.do_not_convert
#-----------------------------------
def read_seg_tfrecord_multiclass(example):
"""
"read_seg_tfrecord_multiclass(example)"
This function reads an example from a TFrecord file into a single image and label
This is the "multiclass" version for imagery, where the classes are mapped as follows:
INPUTS:
* TFRecord example object
OPTIONAL INPUTS: None
GLOBAL INPUTS: TARGET_SIZE
OUTPUTS:
* image [tensor array]
* class_label [tensor array]
"""
features = {
"image": tf.io.FixedLenFeature([], tf.string), # tf.string = bytestring (not text string)
"label": tf.io.FixedLenFeature([], tf.string), # shape [] means scalar
}
# decode the TFRecord
example = tf.io.parse_single_example(example, features)
image = tf.image.decode_png(example['image'], channels=3)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [TARGET_SIZE,TARGET_SIZE, 3])
#image = tf.reshape(tf.image.rgb_to_grayscale(image), [TARGET_SIZE,TARGET_SIZE, 1])
label = tf.image.decode_png(example['label'], channels=1)
label = tf.cast(label, tf.uint8)#/ 255.0
label = tf.reshape(label, [TARGET_SIZE,TARGET_SIZE, 1])
cond = tf.equal(label, tf.ones(tf.shape(label),dtype=tf.uint8)*7)
label = tf.where(cond, tf.ones(tf.shape(label),dtype=tf.uint8)*6, label)
label = tf.one_hot(tf.cast(label, tf.uint8), 6) #6 = 5 classes (undamaged, minor, major, destroyed, unclass) + null (0)
label = tf.squeeze(label)
image = tf.reshape(image, (image.shape[0], image.shape[1], image.shape[2]))
#image = tf.image.per_image_standardization(image)
return image, label
#-----------------------------------
def get_batched_dataset(filenames):
"""
"get_batched_dataset(filenames)"
This function defines a workflow for the model to read data from
tfrecord files by defining the degree of parallelism, batch size, pre-fetching, etc
and also formats the imagery properly for model training
INPUTS:
* filenames [list]
OPTIONAL INPUTS: None
GLOBAL INPUTS: BATCH_SIZE, AUTO
OUTPUTS: tf.data.Dataset object
"""
option_no_order = tf.data.Options()
option_no_order.experimental_deterministic = True
dataset = tf.data.Dataset.list_files(filenames)
dataset = dataset.with_options(option_no_order)
dataset = dataset.interleave(tf.data.TFRecordDataset, cycle_length=16, num_parallel_calls=AUTO)
dataset = dataset.map(read_seg_tfrecord_multiclass, num_parallel_calls=AUTO)
#dataset = dataset.cache() # This dataset fits in RAM
dataset = dataset.repeat()
#dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) # drop_remainder will be needed on TPU
dataset = dataset.prefetch(AUTO) #
return dataset
# from tensorflow.python.client import device_lib
#
# def get_available_devices():
# local_device_protos = device_lib.list_local_devices()
# return [x.name for x in local_device_protos if x.device_type == 'GPU' or x.device_type == 'CPU']
#==================================================================
for storm in ['matthew', 'michael', 'florence', 'harvey']:
imdir = '/media/marda/TWOTB1/xBD/hurricanes/images/'+storm
lab_path = '/media/marda/TWOTB1/xBD/hurricanes/labels2D/'+storm
tfrecord_dir = '/media/marda/TWOTB1/xBD/hurricanes/tfrecords/'+storm+'/imseg'
# # Run inference on CPU
# with tf.device('/cpu:0'):
##test
filenames = sorted(tf.io.gfile.glob(tfrecord_dir+'/*.jpg'))
dataset = get_batched_dataset(filenames)
B = []
for imgs,lbls in dataset.take(1):
for count,(im,lab) in enumerate(zip(imgs,lbls)):
print(np.shape(lab))
lab= np.argmax(lab,axis=-1)
B.append(np.bincount(lab.flatten(),minlength=6))
plt.subplot(int(BATCH_SIZE/2),int(BATCH_SIZE/2),count+1)
plt.imshow(im)
del im
plt.imshow(lab, alpha=0.5, cmap='bwr')
plt.axis('off')
del lab
plt.show()
np.sum(np.vstack(B),axis=0)
|
the-stack_0_13396 | ############################################################
# log
############################################################
# Contains the custom logger object to be used.
import logging
import sys
import os
def setup_custom_logger():
"""Setups the custom logger to be used globally.
The logger object can be referenced via 'root' in logging.getLogger().
Returns:
The logger object to be used in the script.
"""
logging.basicConfig(filename=os.getcwd() + '\\output.log',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
log = logging.getLogger()
log.setLevel(logging.INFO)
stdout_handler = logging.StreamHandler(sys.stdout)
log.addHandler(stdout_handler)
return log
def get_logger():
"""Returns the logger object to be used.
"""
log = setup_custom_logger() if not logging.getLogger('root').hasHandlers() \
else logging.getLogger('root')
return log
|
the-stack_0_13398 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from read_excel_MIK import read_excel_MIK
from read_excel_Seichitech import read_excel_Seichitech
from openpyxl import load_workbook
import pandas
class read_write_excel:
"""
For read/write and parse excel file
"""
def __init__(self, filename):
'''
support both MIK and Seichitech platform
'''
self.filename = filename
fd = pandas.ExcelFile(self.filename)
sheet_names = fd.sheet_names
fd.close()
if "Result Data" in sheet_names and "Graph" in sheet_names and "Raw Data" in sheet_names:
self.fd = read_excel_MIK(self.filename)
else:
self.fd = read_excel_Seichitech(self.filename)
def read_config(self):
'''
read boundary, max_x, max_y information
'''
return self.fd.read_config()
def read_report_time(self):
'''
read report time for line test
'''
return self.fd.read_report_time()
def read_target(self):
'''
get target from excel
'''
return self.fd.read_target()
def read_measure(self):
'''
get measure data from excel
'''
return self.fd.read_measure()
def read_target_and_measure(self):
'''
read target and measure at the same time
'''
target_list = self.read_target()
measure_list_mm, measure_list_pixel = self.read_measure()
return target_list, measure_list_mm, measure_list_pixel
def write_excel(self, write_data):
'''
write output to new sheet
'''
df = pandas.DataFrame(write_data)
book = load_workbook(self.filename)
sheet_names = book.sheetnames
for name in sheet_names:
if "analysis_output" in name:
book.remove(book[name])
writer = pandas.ExcelWriter(self.filename, engine = 'openpyxl')
writer.book = book
df.to_excel(writer, sheet_name='analysis_output', index=False)
work_sheet = book["analysis_output"]
for col in work_sheet.columns:
max_length = 0
column = col[0].column # Get the column name
for cell in col:
try: # Necessary to avoid error on empty cells
if len(str(cell.value)) > max_length:
max_length = len(cell.value)
except:
pass
adjusted_width = (max_length + 2) * 1.2
work_sheet.column_dimensions[column].width = adjusted_width
writer.save()
writer.close()
def destroy(self):
'''
destroy excel fd
'''
self.fd.destroy()
if __name__ == "__main__":
"""
This is for test purpose
"""
fd = read_write_excel("../H_Line/Result.xlsx")
test_type, max_x, max_y, boundary_range = fd.read_config()
target_list, measure_list = fd.read_target_and_measure()
# print("This is %s, max_x = %f, max_y = %f, boundary_range = %f" % ( test_type, max_x, max_y, boundary_range ))
# for i in range(len(target_list)):
# print("\nNO.%d target line ---------------------- (%f, %f) -> (%f, %f)" % ( i + 1, target_list[i][0], target_list[i][1], target_list[i][2], target_list[i][3] ))
# for j in range(len(measure_list[i])):
# print("\tNO.%d measured point ---------------------------- (%f, %f)" % ( j + 1, measure_list[i][j][0], measure_list[i][j][1] ))
# print("\n")
test_dict = {'a':[1,2,3], 'b':[2,3,4], 'c':[3,4,5]}
fd.write_excel(test_dict)
fd = read_write_excel("../POINTS/20180918175024.xlsx")
test_type, max_x, max_y, boundary_range = fd.read_config()
target_list, measure_list = fd.read_target_and_measure()
# print("This is %s, max_x = %f, max_y = %f, boundary_range = %f" % ( test_type, max_x, max_y, boundary_range ))
# for i in range(len(target_list)):
# print("\nNO.%d target point ---------------------- (%f, %f)" % ( i + 1, target_list[i][0], target_list[i][1] ))
# for j in range(len(measure_list[i])):
# print("\tRepeat %d" % ( j + 1 ))
# for k in range(len(measure_list[i][j])):
# print("\t\tNO.%d measured point ---------------------------- (%f, %f)" % ( k + 1, measure_list[i][j][k][0], measure_list[i][j][k][1] ))
# print("\n")
|
the-stack_0_13399 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_ntp_auth
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages NTP authentication.
description:
- Manages NTP authentication.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- If C(state=absent), the module will remove the given key configuration if it exists.
- If C(state=absent) and C(authentication=on), authentication will be turned off.
options:
key_id:
description:
- Authentication key identifier (numeric).
md5string:
description:
- MD5 String.
auth_type:
description:
- Whether the given md5string is in cleartext or
has been encrypted. If in cleartext, the device
will encrypt it before storing it.
default: text
choices: ['text', 'encrypt']
trusted_key:
description:
- Whether the given key is required to be supplied by a time source
for the device to synchronize to the time source.
choices: [ 'false', 'true' ]
default: 'false'
authentication:
description:
- Turns NTP authentication on or off.
choices: ['on', 'off']
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Basic NTP authentication configuration
- nxos_ntp_auth:
key_id: 32
md5string: hello
auth_type: text
'''
RETURN = '''
commands:
description: command sent to the device
returned: always
type: list
sample: ["ntp authentication-key 32 md5 helloWorld 0", "ntp trusted-key 32"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
if 'show run' not in command:
command = {
'command': command,
'output': 'json',
}
else:
command = {
'command': command,
'output': 'text',
}
return run_commands(module, [command])
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_ntp_auth(module):
command = 'show ntp authentication-status'
body = execute_show_command(command, module)[0]
ntp_auth_str = body['authentication']
if 'enabled' in ntp_auth_str:
ntp_auth = True
else:
ntp_auth = False
return ntp_auth
def get_ntp_trusted_key(module):
trusted_key_list = []
command = 'show run | inc ntp.trusted-key'
trusted_key_str = execute_show_command(command, module)[0]
if trusted_key_str:
trusted_keys = trusted_key_str.splitlines()
else:
trusted_keys = []
for line in trusted_keys:
if line:
trusted_key_list.append(str(line.split()[2]))
return trusted_key_list
def get_ntp_auth_key(key_id, module):
authentication_key = {}
command = 'show run | inc ntp.authentication-key.{0}'.format(key_id)
auth_regex = (r".*ntp\sauthentication-key\s(?P<key_id>\d+)\s"
r"md5\s(?P<md5string>\S+)\s(?P<atype>\S+).*")
body = execute_show_command(command, module)[0]
try:
match_authentication = re.match(auth_regex, body, re.DOTALL)
group_authentication = match_authentication.groupdict()
authentication_key['key_id'] = group_authentication['key_id']
authentication_key['md5string'] = group_authentication['md5string']
if group_authentication['atype'] == '7':
authentication_key['auth_type'] = 'encrypt'
else:
authentication_key['auth_type'] = 'text'
except (AttributeError, TypeError):
authentication_key = {}
return authentication_key
def get_ntp_auth_info(key_id, module):
auth_info = get_ntp_auth_key(key_id, module)
trusted_key_list = get_ntp_trusted_key(module)
auth_power = get_ntp_auth(module)
if key_id in trusted_key_list:
auth_info['trusted_key'] = 'true'
else:
auth_info['trusted_key'] = 'false'
if auth_power:
auth_info['authentication'] = 'on'
else:
auth_info['authentication'] = 'off'
return auth_info
def auth_type_to_num(auth_type):
if auth_type == 'encrypt':
return '7'
else:
return '0'
def set_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
ntp_auth_cmds = []
if key_id and md5string:
auth_type_num = auth_type_to_num(auth_type)
ntp_auth_cmds.append(
'ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if trusted_key == 'true':
ntp_auth_cmds.append(
'ntp trusted-key {0}'.format(key_id))
elif trusted_key == 'false':
ntp_auth_cmds.append(
'no ntp trusted-key {0}'.format(key_id))
if authentication == 'on':
ntp_auth_cmds.append(
'ntp authenticate')
elif authentication == 'off':
ntp_auth_cmds.append(
'no ntp authenticate')
return ntp_auth_cmds
def remove_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
auth_remove_cmds = []
if key_id:
auth_type_num = auth_type_to_num(auth_type)
auth_remove_cmds.append(
'no ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if authentication:
auth_remove_cmds.append(
'no ntp authenticate')
return auth_remove_cmds
def main():
argument_spec = dict(
key_id=dict(type='str'),
md5string=dict(type='str'),
auth_type=dict(choices=['text', 'encrypt'], default='text'),
trusted_key=dict(choices=['true', 'false'], default='false'),
authentication=dict(choices=['on', 'off']),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
key_id = module.params['key_id']
md5string = module.params['md5string']
auth_type = module.params['auth_type']
trusted_key = module.params['trusted_key']
authentication = module.params['authentication']
state = module.params['state']
if key_id:
if not trusted_key and not md5string:
module.fail_json(msg='trusted_key or md5string MUST be specified')
args = dict(key_id=key_id, md5string=md5string,
auth_type=auth_type, trusted_key=trusted_key,
authentication=authentication)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_ntp_auth_info(key_id, module)
end_state = existing
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if state == 'present':
if delta:
command = set_ntp_auth_key(
key_id, md5string, delta.get('auth_type'),
delta.get('trusted_key'), delta.get('authentication'))
if command:
commands.append(command)
elif state == 'absent':
auth_toggle = None
if existing.get('authentication') == 'on':
auth_toggle = True
if not existing.get('key_id'):
key_id = None
command = remove_ntp_auth_key(
key_id, md5string, auth_type, trusted_key, auth_toggle)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
end_state = get_ntp_auth_info(key_id, module)
delta = dict(set(end_state.items()).difference(existing.items()))
if delta or (len(existing) != len(end_state)):
changed = True
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
|
the-stack_0_13400 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List
from fairseq import utils
from fairseq.models.roberta import (
RobertaModel,
RobertaLMHead,
roberta_base_architecture,
roberta_large_architecture,
)
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
@register_model('mpnet')
class MPNet(RobertaModel):
def __init__(self, args, encoder):
super().__init__(args, encoder)
def task_compute(self, task='mlm', **kwargs):
if task == 'mlm':
return self.compute_mlm(**kwargs)
elif task == 'plm':
return self.compute_plm(**kwargs)
else:
return self.compute_mpnet(**kwargs)
def compute_mlm(self, src_tokens, src_lengths, positions, pred_size, **kwargs):
sz = src_tokens.size(1)
emb = self.encode_emb(self.decoder.sentence_encoder, src_tokens, positions)
x = reverse_tensor(emb)
positions_bias = self.encode_relative_emb(self.decoder.sentence_encoder, positions)
for layer in self.decoder.sentence_encoder.layers:
x, _ = layer(x, positions_bias=positions_bias)
x = self.maybe_final_norm(self.decoder.sentence_encoder, x)
x = reverse_tensor(x)
x = self.output_layer(x[:, sz-pred_size:])
return x
def compute_plm(self, src_tokens, src_lengths, positions, pred_size, **kwargs):
emb = self.encode_emb(self.decoder.sentence_encoder, src_tokens, positions)
x = reverse_tensor(emb)
c, q = split_tensor(x, pred_size)
content_position_bias = self.encode_relative_emb(
self.decoder.sentence_encoder, positions[:, :-pred_size]
)
if content_position_bias is not None:
query_position_bias = content_position_bias[:, -pred_size:].contiguous()
else:
query_position_bias = None
sz = c.size(0)
query_mask, content_mask = make_query_and_content_mask(src_tokens, sz, pred_size, kind='PLM')
for i, layer in enumerate(self.decoder.sentence_encoder.layers):
c, q = encode_two_stream_attn(
layer, c, q, content_mask, query_mask, content_position_bias, query_position_bias,
)
q = self.maybe_final_norm(self.decoder.sentence_encoder, q)
q = reverse_tensor(q)
x = self.output_layer(q)
return x
def compute_mpnet(self, src_tokens, src_lengths, positions, pred_size, return_mlm=False, **kwargs):
emb = self.encode_emb(self.decoder.sentence_encoder, src_tokens, positions)
x = reverse_tensor(emb)
c, q = split_tensor(x, pred_size)
content_position_bias = self.encode_relative_emb(self.decoder.sentence_encoder, positions[:, :-pred_size])
if content_position_bias is not None:
query_position_bias = content_position_bias[:, -pred_size:].contiguous()
else:
query_position_bias = None
sz = c.size(0) - pred_size
query_mask, content_mask = make_query_and_content_mask(src_tokens, sz, pred_size)
for i, layer in enumerate(self.decoder.sentence_encoder.layers):
c, q = encode_two_stream_attn(
layer, c, q, content_mask, query_mask, content_position_bias, query_position_bias,
)
q = self.maybe_final_norm(self.decoder.sentence_encoder, q)
q = reverse_tensor(q)
x = self.output_layer(q)
if return_mlm is True:
c = c[-pred_size:]
c = self.maybe_final_norm(self.decoder.sentence_encoder, c)
c = reverse_tensor(c)
c = self.output_layer(c)
return x, c
return x
@staticmethod
def encode_emb(self, src_tokens, positions=None):
x = self.embed_tokens(src_tokens)
if self.embed_scale is not None:
x *= self.embed_scale
if positions is not None:
x += F.embedding(positions + 2, self.embed_positions.weight, self.padding_idx)
if self.emb_layer_norm is not None and not self.normalize_before:
x = self.emb_layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x
@staticmethod
def maybe_final_norm(self, x):
if self.emb_layer_norm is not None and self.normalize_before:
return self.emb_layer_norm(x)
return x
@staticmethod
def encode_relative_emb(self, positions):
if not self.relative_attention_bias:
return None
qlen, klen = positions.size(1), positions.size(1)
context_position = positions[:, :, None]
memory_position = positions[:, None, :]
relative_position = memory_position - context_position
rp_bucket = self.relative_position_bucket(
relative_position,
num_buckets=self.relative_attention_num_buckets,
)
rp_bucket = rp_bucket.to(positions.device)
values = self.relative_attention_bias(rp_bucket)
values = values.permute(0, 3, 1, 2).contiguous() # [bsz, head, qlen, klen]
values = values.view(-1, qlen, klen)
return values
def reverse_tensor(x):
return x.transpose(0, 1)
def split_tensor(x, split_size):
sz = x.size(0) - split_size
return x[:sz].contiguous(), x[sz:].contiguous()
def encode_two_stream_attn(
self,
c,
q,
content_mask: torch.Tensor = None,
query_mask: torch.Tensor = None,
content_position_bias: torch.Tensor = None,
query_position_bias: torch.Tensor = None,
):
def reuse_fn(x, residual):
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return x
residual_c = c
residual_q = q
c = self.maybe_layer_norm(self.self_attn_layer_norm, c, before=True)
q = self.maybe_layer_norm(self.self_attn_layer_norm, q, before=True)
c, q = two_stream_self_attention(
self.self_attn,
query=[c, q],
key=c,
value=c,
query_mask=query_mask,
content_mask=content_mask,
query_position_bias=query_position_bias,
content_position_bias=content_position_bias,
)
c = reuse_fn(c, residual_c)
q = reuse_fn(q, residual_q)
return c, q
def two_stream_self_attention(
self,
query: torch.Tensor,
key: torch.Tensor = None,
value: torch.Tensor = None,
query_mask: torch.Tensor = None,
content_mask: torch.Tensor = None,
query_position_bias: torch.Tensor = None,
content_position_bias: torch.Tensor = None,
):
c, q = query
bsz, embed_dim = key.size(1), key.size(2)
def transpose_fn(x):
return x.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
def fill_mask(attn_weights, attn_mask):
return attn_weights.masked_fill(
attn_mask.unsqueeze(0),
float('-inf')
)
def attn_fn(_q, k, v, mask=None, bias=None):
_q = transpose_fn(self.scaling * self.in_proj_q(_q))
attn_weights = torch.bmm(_q, k.transpose(1, 2))
if bias is not None:
attn_weights += bias
if mask is not None:
attn_weights = fill_mask(attn_weights, mask)
attn_weights = utils.softmax(
attn_weights, dim=-1,
).type_as(attn_weights)
attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn = torch.bmm(attn_weights, v)
attn = attn.transpose(0, 1).contiguous().view(-1, bsz, embed_dim)
return self.out_proj(attn)
k = transpose_fn(self.in_proj_k(key))
v = transpose_fn(self.in_proj_v(value))
c = attn_fn(c, k, v, mask=content_mask, bias=content_position_bias)
q = attn_fn(q, k, v, mask=query_mask, bias=query_position_bias)
return c, q
def make_query_and_content_mask(tensor, a, b, kind='MPLM'):
'''
Query Mask:
| <- PLM -> | | <- MPNet -> |
[ 0 0 0 0 1 1 1 ] [ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 0 1 1 ] [ 0 0 0 0 0 1 1 1 0 0 ]
[ 0 0 0 0 0 0 1 ] [ 0 0 0 0 0 0 1 1 1 0 ]
Content Mask:
| <- PLM -> | | <- MPNet -> |
x x x x x x x m m m
1 2 3 4 5 6 7 5 6 7
[ 0 0 0 0 1 1 1 ] [ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 1 1 1 ] [ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 1 1 1 ] [ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 1 1 1 ] [ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 0 1 1 ] [ 0 0 0 0 0 1 1 1 0 0 ]
[ 0 0 0 0 0 0 1 ] [ 0 0 0 0 0 0 1 1 1 0 ]
[ 0 0 0 0 0 0 0 ] [ 0 0 0 0 0 0 0 1 1 1 ]
[ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 1 1 1 0 0 0 ]
[ 0 0 0 0 1 1 1 0 0 0 ]
'''
def make_query_mask():
mask = torch.triu(torch.ones(b, b), 0)
mask = (torch.ones(b, a - b), 1 - mask) if kind is 'PLM' else (torch.ones(b, a - b), 1 - mask, mask)
return torch.cat(mask, dim=-1).eq(0)
def make_content_mask():
mask = [torch.zeros(a - b, b), torch.tril(torch.ones(b, b), 0)]
if kind is not 'PLM':
mask.append(torch.zeros(b, b))
mask = torch.cat(mask, dim=0)
mask = (torch.ones(a, a - b), mask) if kind is 'PLM' else (torch.ones(a + b, a - b), mask, 1 - mask)
return torch.cat(mask, dim=-1).eq(0)
return make_query_mask().to(tensor.device), make_content_mask().to(tensor.device)
@register_model_architecture('mpnet', 'mpnet_base')
def mpnet_base_architecture(args):
roberta_base_architecture(args)
@register_model_architecture('mpnet', 'mpnet_rel_base')
def mpnet_rel_base_architecture(args):
args.use_relative_positions = getattr(args, 'use_relative_positions', True)
mpnet_base_architecture(args)
@register_model_architecture('mpnet', 'mpnet_large')
def mpnet_large_architecture(args):
roberta_large_architecture(args)
|
the-stack_0_13401 |
# Unicode and Emoji
# importing necessary library
from tkinter import * # from tkinter we import everything
import tkinter as tk
from tkinter import ttk
from PIL import Image, ImageTk
import tkinter.messagebox as mbox
import emoji
import pandas as pd
data = pd.read_csv('emoji_df.csv')
emoji1 = data['emoji'].tolist()
code1 = data['codepoints'].tolist()
# Main Window
frame = Tk()
frame.title('Unicode and Emoji')
frame.geometry('950x700')
# frame.configure(bg = "white")
# image on the main window
path = "Images/front.jpg"
# Creates a Tkinter-compatible photo image, which can be used everywhere Tkinter expects an image object.
img1 = ImageTk.PhotoImage(Image.open(path))
# The Label widget is a standard Tkinter widget used to display a text or image on the screen.
panel = tk.Label(frame, image = img1)
panel.place(x = 55, y = 110)
# starting label
start1 = Label(frame, text='UNICODE & EMOJI', font=("Arial", 55,"underline"),fg="magenta")
start1.place(x=130,y=10)
def start_fun():
frame.destroy()
# creating an exit button
prevB = Button(frame, text='START', command=start_fun, font=("Arial", 25), bg = "light green", fg = "blue", borderwidth=3, relief="raised")
prevB.place(x = 120, y = 590)
# defined exit_win function, to show a exit dialog box when tried to exit
def exit_win():
if mbox.askokcancel("Exit", "Do you want to exit?"):
frame.destroy()
# creating an exit button
prevB = Button(frame, text='EXIT', command=exit_win, font=("Arial", 25), bg = "red", fg = "blue", borderwidth=3, relief="raised")
prevB.place(x = 700, y = 590)
# this is done to show the exit dialog box when tried to exit from the main window, using the top-roght close button of titlebar
frame.protocol("WM_DELETE_WINDOW", exit_win)
frame.mainloop()
# Main Window
frame1 = Tk()
frame1.title('Unicode and Emoji')
frame1.geometry('950x700')
# image on the main window
path1 = "Images/second.jpg"
# Creates a Tkinter-compatible photo image, which can be used everywhere Tkinter expects an image object.
img2 = ImageTk.PhotoImage(Image.open(path1))
# The Label widget is a standard Tkinter widget used to display a text or image on the screen.
panel1 = tk.Label(frame1, image = img2)
panel1.place(x = 465, y = 110)
# starting label
start1 = Label(frame1, text='UNICODE & EMOJI', font=("Arial", 55,"underline"),fg="magenta")
start1.place(x=130,y=10)
# starting label
start1 = Label(frame1, text='Emoji to\nUnicode', font=("Arial", 40),fg="green")
start1.place(x=100,y=120)
# starting label
start1 = Label(frame1, text='Emoji', font=("Arial", 30),fg="brown")
start1.place(x=50,y=250)
# emoji Box
l1_entry = Entry(frame1, font=("Arial", 25), fg='brown', bg="light yellow", borderwidth=3, width=18)
l1_entry.place(x=50, y=300)
# starting label
start1 = Label(frame1, text='Unicode', font=("Arial", 30),fg="brown")
start1.place(x=50,y=400)
# unicode Box
l2_entry = Entry(frame1, font=("Arial", 25), fg='brown', bg="light yellow", borderwidth=3, width=18)
l2_entry.place(x=50, y=450)
# starting label
start1 = Label(frame1, text='Unicode\nto Emoji', font=("Arial", 40),fg="green")
start1.place(x=620,y=120)
# starting label
start1 = Label(frame1, text='Unicode', font=("Arial", 30),fg="brown")
start1.place(x=550,y=250)
# unicode Box
r1_entry = Entry(frame1, font=("Arial", 25), fg='brown', bg="light yellow", borderwidth=3, width=18)
r1_entry.place(x=550, y=300)
# starting label
start1 = Label(frame1, text='Emoji', font=("Arial", 30),fg="brown")
start1.place(x=550,y=400)
# emoji Box
r2_entry = Entry(frame1, font=("Arial", 25), fg='brown', bg="light yellow", borderwidth=3, width=18)
r2_entry.place(x=550, y=450)
def uni_fun():
# emoji_entered = str(l1_entry.get())
# uc_sentence = emoji_entered.encode('unicode-escape')
# l2_entry.insert(0,uc_sentence)
emoji_entered = str(l1_entry.get())
for i in range(0,len(emoji1)):
if emoji1[i]==emoji_entered:
l2_entry.delete(0,END)
l2_entry.insert(0, code1[i])
break
def emo_fun():
code_entered = str(r1_entry.get())
for i in range(0, len(code1)):
if code1[i] == code_entered:
r2_entry.delete(0,END)
r2_entry.insert(0, emoji1[i])
break
# creating an exit button
prevB = Button(frame1, text='GET UNICODE', command=uni_fun, font=("Arial", 25), bg = "orange", fg = "blue", borderwidth=3, relief="raised")
prevB.place(x = 70, y = 550)
# creating an exit button
prevB = Button(frame1, text='GET EMOJI', command=emo_fun, font=("Arial", 25), bg = "orange", fg = "blue", borderwidth=3, relief="raised")
prevB.place(x = 650, y = 550)
# defined exit_win function, to show a exit dialog box when tried to exit
def exit_win1():
if mbox.askokcancel("Exit", "Do you want to exit?"):
frame1.destroy()
# creating an exit button
prevB = Button(frame1, text='EXIT', command=exit_win1, font=("Arial", 25), bg = "red", fg = "blue", borderwidth=3, relief="raised")
prevB.place(x = 420, y = 600)
# this is done to show the exit dialog box when tried to exit from the main window, using the top-roght close button of titlebar
frame1.protocol("WM_DELETE_WINDOW", exit_win1)
frame1.mainloop() |
the-stack_0_13403 | import zmq
import unittest
from http import client as http
from .simple import Base, TimeoutError
CONFIG='test/wlimit.yaml'
CHAT_FW = "ipc:///tmp/zerogw-test-chatfw"
class Wlimit(Base):
timeout = 2 # in zmq.select units (seconds)
config = CONFIG
def setUp(self):
self.zmq = zmq.Context(1)
self.addCleanup(self.zmq.term)
super().setUp()
self.chatfw = self.zmq.socket(zmq.PULL)
self.addCleanup(self.chatfw.close)
self.chatfw.connect(CHAT_FW)
def backend_recv(self, backend=None):
if backend is None:
sock = self.chatfw
else:
sock = self.minigame
if (([sock], [], []) !=
zmq.select([sock], [], [], timeout=self.timeout)):
raise TimeoutError()
val = sock.recv_multipart()
if val[1] == b'heartbeat':
return self.backend_recv(backend=backend)
return val
def testWorking(self):
ws1 = self.websock()
ws1.connect()
ws1.client_send('hello1') # checks backend delivery itself
ws2 = self.websock()
ws2.connect()
ws2.client_send('hello2')
ws1.client_send('hello3')
ws3 = self.websock()
ws3.connect()
ws3.client_send('hello1') # checks backend delivery itself
ws4 = self.websock()
ws4.connect()
ws4.client_send('hello2')
ws1.close()
ws5 = self.websock()
ws5.connect()
ws5.client_send("hello4")
ws2.client_send("fifth_hello")
ws2.close()
ws3.close()
ws4.close()
ws5.close()
def testNoMoreSlots(self):
ws1 = self.websock()
ws1.connect()
self.addCleanup(ws1.close)
ws1.client_send('hello1') # checks backend delivery itself
ws2 = self.websock()
ws2.connect()
self.addCleanup(ws2.close)
ws2.client_send('hello2')
ws1.client_send('hello3')
ws3 = self.websock()
ws3.connect()
self.addCleanup(ws3.close)
ws3.client_send('hello1') # checks backend delivery itself
ws4 = self.websock()
ws4.connect()
self.addCleanup(ws4.close)
ws4.client_send('hello2')
ws5 = self.websock()
with self.assertRaisesRegex(http.BadStatusLine, "''"):
ws5.connect()
self.addCleanup(ws5.http.close)
ws2.client_send("fifth_hello")
if __name__ == '__main__':
unittest.main()
|
the-stack_0_13407 | # Copyright 2016 - Nokia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import pecan
from oslo_log import log
from oslo_utils.strutils import bool_from_string
from osprofiler import profiler
from pecan.core import abort
from vitrage.api.controllers.rest import RootRestController
from vitrage.api.policy import enforce
LOG = log.getLogger(__name__)
# noinspection PyBroadException
@profiler.trace_cls("rca controller",
info={}, hide_args=False, trace_private=False)
class RCAController(RootRestController):
@pecan.expose('json')
def index(self, alarm_id, all_tenants=False):
return self.get(alarm_id, all_tenants)
@pecan.expose('json')
def get(self, alarm_id, all_tenants=False):
all_tenants = bool_from_string(all_tenants)
if all_tenants:
enforce('get rca:all_tenants', pecan.request.headers,
pecan.request.enforcer, {})
else:
enforce('get rca', pecan.request.headers,
pecan.request.enforcer, {})
LOG.info('received show rca with alarm id %s', alarm_id)
return self.get_rca(alarm_id, all_tenants)
@staticmethod
def get_rca(alarm_id, all_tenants):
try:
graph_data = pecan.request.client.call(pecan.request.context,
'get_rca',
root=alarm_id,
all_tenants=all_tenants)
LOG.info(graph_data)
graph = json.loads(graph_data)
return graph
except Exception:
LOG.exception('Failed to get RCA.')
abort(404, 'Failed to get RCA')
|
the-stack_0_13408 | from __future__ import unicode_literals
import os
import sys
import urllib.request
import shutil
from contextlib import closing
#import gzip
import datetime
from dateutil import parser
import logging
#import subprocess
from netCDF4 import Dataset
import rasterio as rio
import eeUtil
import numpy as np
LOG_LEVEL = logging.INFO
CLEAR_COLLECTION_FIRST = False
DOWNLOAD_FILE = True
# constants for bleaching alerts
SOURCE_URL = 'http://soton.eead.csic.es/spei/nc/{filename}'
SOURCE_FILENAME = 'spei{month_lag}.nc'
FILENAME = 'cli_039_lag{lag}_{date}'
SDS_NAME = 'NETCDF:\"{nc_name}\":{var_name}'
VAR_NAME = 'spei'
TIME_NAME = 'time'
TIMELAGS = ['06']
# Read from dataset
NODATA_VALUE = None
DATA_TYPE = 'Byte' # Byte/Int16/UInt16/UInt32/Int32/Float32/Float64/CInt16/CInt32/CFloat32/CFloat64
MISSING_VALUE_NAME = "missing_value"
DATA_DIR = 'data/'
GS_FOLDER = 'cli_039_spei'
EE_COLLECTION = 'cli_039_spei'
MAX_ASSETS = 36
DATE_FORMAT = '%Y%m15'
TIMESTEP = {'days': 30}
def getAssetName(date, lag):
'''get asset name from datestamp'''
return os.path.join(EE_COLLECTION, FILENAME.format(date=date, lag=lag))
def getDate(filename):
'''get last 8 chrs of filename'''
return os.path.splitext(os.path.basename(filename))[0][-8:]
def getNewTargetDates(exclude_dates):
'''Get new dates excluding existing'''
new_dates = []
date = datetime.date.today()
date.replace(day=15)
for i in range(MAX_ASSETS):
date -= datetime.timedelta(**TIMESTEP)
date.replace(day=15)
datestr = date.strftime(DATE_FORMAT)
if datestr not in exclude_dates + new_dates:
new_dates.append(datestr)
return new_dates
def fetch(filename, lag):
'''Fetch files by datestamp'''
# New data may not yet be posted
sourceUrl = SOURCE_URL.format(filename=SOURCE_FILENAME.format(month_lag=lag))
try:
urllib.request.urlretrieve(sourceUrl, filename)
except Exception as e:
logging.warning('Could not fetch {}'.format(sourceUrl))
logging.error(e)
return filename
def extract_metadata(nc_file):
nc = Dataset(nc_file)
logging.debug(nc)
logging.debug(nc.variables)
logging.debug(nc[VAR_NAME])
dtype = str(nc[VAR_NAME].dtype)
nodata = float(nc[VAR_NAME].getncattr("_FillValue"))
#nodata = float(nc[VAR_NAME].getncattr(MISSING_VALUE_NAME))
del nc
return dtype, nodata
def retrieve_formatted_dates(nc_file, date_pattern=DATE_FORMAT):
'''
Inputs:
* pointer to a netcdf file
Outputs:
* dates formatted according to DATE_FORMAT
'''
# Extract time variable range
nc = Dataset(nc_file)
time_displacements = nc[TIME_NAME]
del nc
# Identify time units
# fuzzy=True allows the parser to pick the date out from a string with other text
time_units = time_displacements.getncattr('units')
logging.debug("Time units: {}".format(time_units))
ref_time = parser.parse(time_units, fuzzy=True)
logging.debug("Reference time: {}".format(ref_time))
# Format times to DATE_FORMAT
###
## REPLACE W/ MAP FUNCTION
###
formatted_dates = [(ref_time + datetime.timedelta(days=int(time_disp))).strftime(date_pattern) for time_disp in time_displacements]
logging.debug('Dates available: {}'.format(formatted_dates))
return(formatted_dates)
def extract_subdata_by_date(nc_file, lag, dtype, nodata, available_dates, target_dates):
'''
new_dates should be a list of tuples of form (date, index_in_netcdf)
'''
nc = Dataset(nc_file)
sub_tifs = []
for date in target_dates:
# Find index in available dates, if not available, skip this date
try:
date_ix = available_dates.index(date)
logging.info("Date {} found! Processing...".format(date))
except:
logging.error("Date {} not found in available dates".format(date))
continue
# Extract data
data = nc[VAR_NAME][date_ix,:,:]
# Create profile/tif metadata
south_lat = -90
north_lat = 90
west_lon = -180
east_lon = 180
# Transformation function
transform = rio.transform.from_bounds(west_lon, south_lat, east_lon, north_lat, data.shape[1], data.shape[0])
# Profile
profile = {
'driver':'GTiff',
'height':data.shape[0],
'width':data.shape[1],
'count':1,
'dtype':dtype,
'crs':'EPSG:4326',
'transform':transform,
'compress':'lzw',
'nodata':nodata
}
# Set filename
sub_tif = DATA_DIR + '{}.tif'.format(FILENAME.format(date=date, lag=lag))
logging.info(sub_tif)
with rio.open(sub_tif, 'w', **profile) as dst:
## Need to flip array, original data comes in upside down
flipped_array = np.flipud(data.astype(dtype))
dst.write(flipped_array, indexes=1)
sub_tifs.append(sub_tif)
del nc
return sub_tifs
def processNewData(existing_dates, lag):
'''fetch, process, upload, and clean new data'''
# 1. Determine which years to read from the netCDF file
target_dates = getNewTargetDates(existing_dates)
# 2. Fetch datafile
logging.info('Fetching files')
nc_file = fetch(DATA_DIR + 'nc_file.nc', lag)
available_dates = retrieve_formatted_dates(nc_file)
dtype, nodata = extract_metadata(nc_file)
logging.info('type: ' + dtype)
logging.info('nodata val: ' + str(nodata))
if target_dates:
# 3. Convert new files
logging.info('Converting files')
sub_tifs = extract_subdata_by_date(nc_file, lag, dtype, nodata, available_dates, target_dates)
logging.info(sub_tifs)
# 4. Upload new files
logging.info('Uploading files')
dates = [getDate(tif) for tif in sub_tifs]
datestamps = [datetime.datetime.strptime(date, DATE_FORMAT)
for date in dates]
assets = [getAssetName(date, lag) for date in dates]
eeUtil.uploadAssets(sub_tifs, assets, GS_FOLDER, datestamps)
# 5. Delete local files
logging.info('Cleaning local files')
os.remove(nc_file)
for tif in sub_tifs:
logging.debug('deleting: ' + tif)
os.remove(tif)
return assets
return []
def checkCreateCollection(collection):
'''List assests in collection else create new collection'''
if eeUtil.exists(collection):
return eeUtil.ls(collection)
else:
logging.info('{} does not exist, creating'.format(collection))
eeUtil.createFolder(collection, imageCollection=True, public=True)
return []
def deleteExcessAssets(dates, max_assets):
'''Delete assets if too many'''
# oldest first
dates.sort()
if len(dates) > max_assets:
for date in dates[:-max_assets]:
eeUtil.removeAsset(getAssetName(date, TIMELAGS[0]))
def main():
'''Ingest new data into EE and delete old data'''
logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL)
logging.info('STARTING')
# Initialize eeUtil
eeUtil.initJson()
# 1. Check if collection exists and create
if CLEAR_COLLECTION_FIRST:
if eeUtil.exists(EE_COLLECTION):
eeUtil.removeAsset(EE_COLLECTION, recursive=True)
existing_assets = checkCreateCollection(EE_COLLECTION)
existing_dates = [getDate(a) for a in existing_assets]
# 2. Fetch, process, stage, ingest, clean
new_assets = []
for lag in TIMELAGS:
new_assets.extend(processNewData(existing_dates, lag))
new_dates = [getDate(a) for a in new_assets]
# 3. Delete old assets
existing_dates = existing_dates + new_dates
logging.info('Existing assets: {}, new: {}, max: {}'.format(
len(existing_dates), len(new_dates), MAX_ASSETS))
deleteExcessAssets(existing_dates, MAX_ASSETS)
###
logging.info('SUCCESS')
|
the-stack_0_13409 | """Test the cloud.iot module."""
import asyncio
from unittest.mock import patch, MagicMock, PropertyMock
from aiohttp import WSMsgType, client_exceptions, web
import pytest
from homeassistant.setup import async_setup_component
from homeassistant.components.cloud import (
Cloud, iot, auth_api, MODE_DEV)
from homeassistant.components.cloud.const import (
PREF_ENABLE_ALEXA, PREF_ENABLE_GOOGLE)
from homeassistant.util import dt as dt_util
from tests.components.alexa import test_smart_home as test_alexa
from tests.common import mock_coro, async_fire_time_changed
from . import mock_cloud_prefs
@pytest.fixture
def mock_client():
"""Mock the IoT client."""
client = MagicMock()
type(client).closed = PropertyMock(side_effect=[False, True])
# Trigger cancelled error to avoid reconnect.
with patch('asyncio.sleep', side_effect=asyncio.CancelledError), \
patch('homeassistant.components.cloud.iot'
'.async_get_clientsession') as session:
session().ws_connect.return_value = mock_coro(client)
yield client
@pytest.fixture
def mock_handle_message():
"""Mock handle message."""
with patch('homeassistant.components.cloud.iot'
'.async_handle_message') as mock:
yield mock
@pytest.fixture
def mock_cloud():
"""Mock cloud class."""
return MagicMock(subscription_expired=False)
@asyncio.coroutine
def test_cloud_calling_handler(mock_client, mock_handle_message, mock_cloud):
"""Test we call handle message with correct info."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.return_value = mock_coro(MagicMock(
type=WSMsgType.text,
json=MagicMock(return_value={
'msgid': 'test-msg-id',
'handler': 'test-handler',
'payload': 'test-payload'
})
))
mock_handle_message.return_value = mock_coro('response')
mock_client.send_json.return_value = mock_coro(None)
yield from conn.connect()
# Check that we sent message to handler correctly
assert len(mock_handle_message.mock_calls) == 1
p_hass, p_cloud, handler_name, payload = \
mock_handle_message.mock_calls[0][1]
assert p_hass is mock_cloud.hass
assert p_cloud is mock_cloud
assert handler_name == 'test-handler'
assert payload == 'test-payload'
# Check that we forwarded response from handler to cloud
assert len(mock_client.send_json.mock_calls) == 1
assert mock_client.send_json.mock_calls[0][1][0] == {
'msgid': 'test-msg-id',
'payload': 'response'
}
@asyncio.coroutine
def test_connection_msg_for_unknown_handler(mock_client, mock_cloud):
"""Test a msg for an unknown handler."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.return_value = mock_coro(MagicMock(
type=WSMsgType.text,
json=MagicMock(return_value={
'msgid': 'test-msg-id',
'handler': 'non-existing-handler',
'payload': 'test-payload'
})
))
mock_client.send_json.return_value = mock_coro(None)
yield from conn.connect()
# Check that we sent the correct error
assert len(mock_client.send_json.mock_calls) == 1
assert mock_client.send_json.mock_calls[0][1][0] == {
'msgid': 'test-msg-id',
'error': 'unknown-handler',
}
@asyncio.coroutine
def test_connection_msg_for_handler_raising(mock_client, mock_handle_message,
mock_cloud):
"""Test we sent error when handler raises exception."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.return_value = mock_coro(MagicMock(
type=WSMsgType.text,
json=MagicMock(return_value={
'msgid': 'test-msg-id',
'handler': 'test-handler',
'payload': 'test-payload'
})
))
mock_handle_message.side_effect = Exception('Broken')
mock_client.send_json.return_value = mock_coro(None)
yield from conn.connect()
# Check that we sent the correct error
assert len(mock_client.send_json.mock_calls) == 1
assert mock_client.send_json.mock_calls[0][1][0] == {
'msgid': 'test-msg-id',
'error': 'exception',
}
@asyncio.coroutine
def test_handler_forwarding():
"""Test we forward messages to correct handler."""
handler = MagicMock()
handler.return_value = mock_coro()
hass = object()
cloud = object()
with patch.dict(iot.HANDLERS, {'test': handler}):
yield from iot.async_handle_message(
hass, cloud, 'test', 'payload')
assert len(handler.mock_calls) == 1
r_hass, r_cloud, payload = handler.mock_calls[0][1]
assert r_hass is hass
assert r_cloud is cloud
assert payload == 'payload'
async def test_handling_core_messages_logout(hass, mock_cloud):
"""Test handling core messages."""
mock_cloud.logout.return_value = mock_coro()
await iot.async_handle_cloud(hass, mock_cloud, {
'action': 'logout',
'reason': 'Logged in at two places.'
})
assert len(mock_cloud.logout.mock_calls) == 1
async def test_handling_core_messages_refresh_auth(hass, mock_cloud):
"""Test handling core messages."""
mock_cloud.hass = hass
with patch('random.randint', return_value=0) as mock_rand, patch(
'homeassistant.components.cloud.auth_api.check_token'
) as mock_check:
await iot.async_handle_cloud(hass, mock_cloud, {
'action': 'refresh_auth',
'seconds': 230,
})
async_fire_time_changed(hass, dt_util.utcnow())
await hass.async_block_till_done()
assert len(mock_rand.mock_calls) == 1
assert mock_rand.mock_calls[0][1] == (0, 230)
assert len(mock_check.mock_calls) == 1
assert mock_check.mock_calls[0][1][0] is mock_cloud
@asyncio.coroutine
def test_cloud_getting_disconnected_by_server(mock_client, caplog, mock_cloud):
"""Test server disconnecting instance."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.return_value = mock_coro(MagicMock(
type=WSMsgType.CLOSING,
))
with patch('asyncio.sleep', side_effect=[None, asyncio.CancelledError]):
yield from conn.connect()
assert 'Connection closed' in caplog.text
@asyncio.coroutine
def test_cloud_receiving_bytes(mock_client, caplog, mock_cloud):
"""Test server disconnecting instance."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.return_value = mock_coro(MagicMock(
type=WSMsgType.BINARY,
))
yield from conn.connect()
assert 'Connection closed: Received non-Text message' in caplog.text
@asyncio.coroutine
def test_cloud_sending_invalid_json(mock_client, caplog, mock_cloud):
"""Test cloud sending invalid JSON."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.return_value = mock_coro(MagicMock(
type=WSMsgType.TEXT,
json=MagicMock(side_effect=ValueError)
))
yield from conn.connect()
assert 'Connection closed: Received invalid JSON.' in caplog.text
@asyncio.coroutine
def test_cloud_check_token_raising(mock_client, caplog, mock_cloud):
"""Test cloud unable to check token."""
conn = iot.CloudIoT(mock_cloud)
mock_cloud.hass.async_add_job.side_effect = auth_api.CloudError("BLA")
yield from conn.connect()
assert 'Unable to refresh token: BLA' in caplog.text
@asyncio.coroutine
def test_cloud_connect_invalid_auth(mock_client, caplog, mock_cloud):
"""Test invalid auth detected by server."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.side_effect = \
client_exceptions.WSServerHandshakeError(None, None, status=401)
yield from conn.connect()
assert 'Connection closed: Invalid auth.' in caplog.text
@asyncio.coroutine
def test_cloud_unable_to_connect(mock_client, caplog, mock_cloud):
"""Test unable to connect error."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.side_effect = client_exceptions.ClientError(None, None)
yield from conn.connect()
assert 'Unable to connect:' in caplog.text
@asyncio.coroutine
def test_cloud_random_exception(mock_client, caplog, mock_cloud):
"""Test random exception."""
conn = iot.CloudIoT(mock_cloud)
mock_client.receive.side_effect = Exception
yield from conn.connect()
assert 'Unexpected error' in caplog.text
@asyncio.coroutine
def test_refresh_token_before_expiration_fails(hass, mock_cloud):
"""Test that we don't connect if token is expired."""
mock_cloud.subscription_expired = True
mock_cloud.hass = hass
conn = iot.CloudIoT(mock_cloud)
with patch('homeassistant.components.cloud.auth_api.check_token',
return_value=mock_coro()) as mock_check_token, \
patch.object(hass.components.persistent_notification,
'async_create') as mock_create:
yield from conn.connect()
assert len(mock_check_token.mock_calls) == 1
assert len(mock_create.mock_calls) == 1
@asyncio.coroutine
def test_handler_alexa(hass):
"""Test handler Alexa."""
hass.states.async_set(
'switch.test', 'on', {'friendly_name': "Test switch"})
hass.states.async_set(
'switch.test2', 'on', {'friendly_name': "Test switch 2"})
with patch('homeassistant.components.cloud.Cloud.async_start',
return_value=mock_coro()):
setup = yield from async_setup_component(hass, 'cloud', {
'cloud': {
'alexa': {
'filter': {
'exclude_entities': 'switch.test2'
},
'entity_config': {
'switch.test': {
'name': 'Config name',
'description': 'Config description',
'display_categories': 'LIGHT'
}
}
}
}
})
assert setup
mock_cloud_prefs(hass)
resp = yield from iot.async_handle_alexa(
hass, hass.data['cloud'],
test_alexa.get_new_request('Alexa.Discovery', 'Discover'))
endpoints = resp['event']['payload']['endpoints']
assert len(endpoints) == 1
device = endpoints[0]
assert device['description'] == 'Config description'
assert device['friendlyName'] == 'Config name'
assert device['displayCategories'] == ['LIGHT']
assert device['manufacturerName'] == 'Home Assistant'
@asyncio.coroutine
def test_handler_alexa_disabled(hass, mock_cloud_fixture):
"""Test handler Alexa when user has disabled it."""
mock_cloud_fixture[PREF_ENABLE_ALEXA] = False
resp = yield from iot.async_handle_alexa(
hass, hass.data['cloud'],
test_alexa.get_new_request('Alexa.Discovery', 'Discover'))
assert resp['event']['header']['namespace'] == 'Alexa'
assert resp['event']['header']['name'] == 'ErrorResponse'
assert resp['event']['payload']['type'] == 'BRIDGE_UNREACHABLE'
@asyncio.coroutine
def test_handler_google_actions(hass):
"""Test handler Google Actions."""
hass.states.async_set(
'switch.test', 'on', {'friendly_name': "Test switch"})
hass.states.async_set(
'switch.test2', 'on', {'friendly_name': "Test switch 2"})
hass.states.async_set(
'group.all_locks', 'on', {'friendly_name': "Evil locks"})
with patch('homeassistant.components.cloud.Cloud.async_start',
return_value=mock_coro()):
setup = yield from async_setup_component(hass, 'cloud', {
'cloud': {
'google_actions': {
'filter': {
'exclude_entities': 'switch.test2'
},
'entity_config': {
'switch.test': {
'name': 'Config name',
'aliases': 'Config alias',
'room': 'living room'
}
}
}
}
})
assert setup
mock_cloud_prefs(hass)
reqid = '5711642932632160983'
data = {'requestId': reqid, 'inputs': [{'intent': 'action.devices.SYNC'}]}
with patch('homeassistant.components.cloud.Cloud._decode_claims',
return_value={'cognito:username': 'myUserName'}):
resp = yield from iot.async_handle_google_actions(
hass, hass.data['cloud'], data)
assert resp['requestId'] == reqid
payload = resp['payload']
assert payload['agentUserId'] == 'myUserName'
devices = payload['devices']
assert len(devices) == 1
device = devices[0]
assert device['id'] == 'switch.test'
assert device['name']['name'] == 'Config name'
assert device['name']['nicknames'] == ['Config alias']
assert device['type'] == 'action.devices.types.SWITCH'
assert device['roomHint'] == 'living room'
async def test_handler_google_actions_disabled(hass, mock_cloud_fixture):
"""Test handler Google Actions when user has disabled it."""
mock_cloud_fixture[PREF_ENABLE_GOOGLE] = False
with patch('homeassistant.components.cloud.Cloud.async_start',
return_value=mock_coro()):
assert await async_setup_component(hass, 'cloud', {})
reqid = '5711642932632160983'
data = {'requestId': reqid, 'inputs': [{'intent': 'action.devices.SYNC'}]}
resp = await iot.async_handle_google_actions(
hass, hass.data['cloud'], data)
assert resp['requestId'] == reqid
assert resp['payload']['errorCode'] == 'deviceTurnedOff'
async def test_refresh_token_expired(hass):
"""Test handling Unauthenticated error raised if refresh token expired."""
cloud = Cloud(hass, MODE_DEV, None, None)
with patch('homeassistant.components.cloud.auth_api.check_token',
side_effect=auth_api.Unauthenticated) as mock_check_token, \
patch.object(hass.components.persistent_notification,
'async_create') as mock_create:
await cloud.iot.connect()
assert len(mock_check_token.mock_calls) == 1
assert len(mock_create.mock_calls) == 1
async def test_webhook_msg(hass):
"""Test webhook msg."""
cloud = Cloud(hass, MODE_DEV, None, None)
await cloud.prefs.async_initialize()
await cloud.prefs.async_update(cloudhooks={
'hello': {
'webhook_id': 'mock-webhook-id',
'cloudhook_id': 'mock-cloud-id'
}
})
received = []
async def handler(hass, webhook_id, request):
"""Handle a webhook."""
received.append(request)
return web.json_response({'from': 'handler'})
hass.components.webhook.async_register(
'test', 'Test', 'mock-webhook-id', handler)
response = await iot.async_handle_webhook(hass, cloud, {
'cloudhook_id': 'mock-cloud-id',
'body': '{"hello": "world"}',
'headers': {
'content-type': 'application/json'
},
'method': 'POST',
'query': None,
})
assert response == {
'status': 200,
'body': '{"from": "handler"}',
'headers': {
'Content-Type': 'application/json'
}
}
assert len(received) == 1
assert await received[0].json() == {
'hello': 'world'
}
async def test_send_message_not_connected(mock_cloud):
"""Test sending a message that expects no answer."""
cloud_iot = iot.CloudIoT(mock_cloud)
with pytest.raises(iot.NotConnected):
await cloud_iot.async_send_message('webhook', {'msg': 'yo'})
async def test_send_message_no_answer(mock_cloud):
"""Test sending a message that expects no answer."""
cloud_iot = iot.CloudIoT(mock_cloud)
cloud_iot.state = iot.STATE_CONNECTED
cloud_iot.client = MagicMock(send_json=MagicMock(return_value=mock_coro()))
await cloud_iot.async_send_message('webhook', {'msg': 'yo'},
expect_answer=False)
assert not cloud_iot._response_handler
assert len(cloud_iot.client.send_json.mock_calls) == 1
msg = cloud_iot.client.send_json.mock_calls[0][1][0]
assert msg['handler'] == 'webhook'
assert msg['payload'] == {'msg': 'yo'}
async def test_send_message_answer(loop, mock_cloud):
"""Test sending a message that expects no answer."""
cloud_iot = iot.CloudIoT(mock_cloud)
cloud_iot.state = iot.STATE_CONNECTED
cloud_iot.client = MagicMock(send_json=MagicMock(return_value=mock_coro()))
uuid = 5
with patch('homeassistant.components.cloud.iot.uuid.uuid4',
return_value=MagicMock(hex=uuid)):
send_task = loop.create_task(cloud_iot.async_send_message(
'webhook', {'msg': 'yo'}))
await asyncio.sleep(0)
assert len(cloud_iot.client.send_json.mock_calls) == 1
assert len(cloud_iot._response_handler) == 1
msg = cloud_iot.client.send_json.mock_calls[0][1][0]
assert msg['handler'] == 'webhook'
assert msg['payload'] == {'msg': 'yo'}
cloud_iot._response_handler[uuid].set_result({'response': True})
response = await send_task
assert response == {'response': True}
|
the-stack_0_13411 | import sys
sys.path.append("..")
import pickle
import json, gzip
import datetime
import numpy as np
import config as cfg
from utils import log
##################### GLOBAL VARS #######################
GRID = []
CODES = []
STEP = 0.25
###################### LOAD DATA ########################
def load():
global GRID
global CODES
global STEP
if len(GRID) == 0:
# Status
log.p('LOADING eBIRD GRID DATA...', new_line=False)
# Load pickled or zipped grid data
if cfg.EBIRD_MDATA.rsplit('.', 1)[-1] == 'gz':
with gzip.open(cfg.EBIRD_MDATA, 'rt') as pfile:
GRID = json.load(pfile)
else:
with open(cfg.EBIRD_MDATA, 'rb') as pfile:
GRID = pickle.load(pfile)
# Load species codes
with open(cfg.EBIRD_SPECIES_CODES, 'r') as jfile:
CODES = json.load(jfile)
STEP = cfg.GRID_STEP_SIZE
log.p(('DONE!', len(GRID), 'GRID CELLS'))
#################### PROBABILITIES ######################
def getCellData(lat, lon):
# Find nearest cell
for cell in GRID:
if lat > cell['lat'] - STEP and lat < cell['lat'] + STEP and lon > cell['lon'] - STEP and lon < cell['lon'] + STEP:
return cell
# No cell
return None
def getWeek():
w = datetime.datetime.now().isocalendar()[1]
return min(48, max(1, int(48.0 * w / 52.0)))
def getWeekFromDate(y, m, d):
w = datetime.date(int(y), int(m), int(d)).isocalendar()[1]
return min(48, max(1, int(48.0 * w / 52.0)))
def getSpeciesProbabilities(lat=-1, lon=-1, week=-1):
# Dummy array
p = np.zeros((len(cfg.CLASSES)), dtype='float32')
# No coordinates?
if lat == -1 or lon == -1:
return p + 1.0
else:
# Get checklist data for nearest cell
cdata = getCellData(lat, lon)
# No cell data?
if cdata == None:
return p + 1.0
else:
# Get probabilities from checklist frequencies
for entry in cdata['data']:
for species in entry:
try:
# Get class index from species code
for i in range(len(cfg.CLASSES)):
if cfg.CLASSES[i].split('_')[0] == CODES[species].split('_')[0]:
# Do we want a specific week?
if week >= 1 and week <= 48:
p[i] = entry[species][week - 1] / 100.0
# If not, simply return the max frequency
else:
p[i] = max(entry[species]) / 100.0
break
except:
pass
return p
def getSpeciesLists(lat=-1, lon=-1, week=-1, threshold=0.02):
# Get species probabilities from for date and location
p = getSpeciesProbabilities(lat, lon, week)
# Parse probabilities and create white list and black list
white_list, black_list = [], []
for i in range(p.shape[0]):
if p[i] >= threshold:
white_list.append(cfg.CLASSES[i])
else:
black_list.append(cfg.CLASSES[i])
return white_list, black_list |
the-stack_0_13412 | from __future__ import annotations
from collections import defaultdict
from typing import TYPE_CHECKING
from typing import DefaultDict
from poetry.console.commands.command import Command
if TYPE_CHECKING:
from poetry.core.packages.package import Package
class PluginShowCommand(Command):
name = "plugin show"
description = "Shows information about the currently installed plugins."
def handle(self) -> int:
from poetry.plugins.application_plugin import ApplicationPlugin
from poetry.plugins.plugin import Plugin
from poetry.plugins.plugin_manager import PluginManager
from poetry.repositories.installed_repository import InstalledRepository
from poetry.utils.env import EnvManager
from poetry.utils.helpers import canonicalize_name
from poetry.utils.helpers import pluralize
plugins: DefaultDict[str, dict[str, Package | list[str]]] = defaultdict(
lambda: {
"package": None,
"plugins": [],
"application_plugins": [],
}
)
entry_points = (
PluginManager(ApplicationPlugin.group).get_plugin_entry_points()
+ PluginManager(Plugin.group).get_plugin_entry_points()
)
system_env = EnvManager.get_system_env(naive=True)
installed_repository = InstalledRepository.load(
system_env, with_dependencies=True
)
packages_by_name = {pkg.name: pkg for pkg in installed_repository.packages}
for entry_point in entry_points:
plugin = entry_point.load()
category = "plugins"
if issubclass(plugin, ApplicationPlugin):
category = "application_plugins"
package = packages_by_name[canonicalize_name(entry_point.distro.name)]
plugins[package.pretty_name]["package"] = package
plugins[package.pretty_name][category].append(entry_point)
for name, info in plugins.items():
package = info["package"]
description = " " + package.description if package.description else ""
self.line("")
self.line(f" • <c1>{name}</c1> (<c2>{package.version}</c2>){description}")
provide_line = " "
if info["plugins"]:
count = len(info["plugins"])
provide_line += f" <info>{count}</info> plugin{pluralize(count)}"
if info["application_plugins"]:
if info["plugins"]:
provide_line += " and"
count = len(info["application_plugins"])
provide_line += (
f" <info>{count}</info> application plugin{pluralize(count)}"
)
self.line(provide_line)
if package.requires:
self.line("")
self.line(" <info>Dependencies</info>")
for dependency in package.requires:
self.line(
f" - {dependency.pretty_name}"
f" (<c2>{dependency.pretty_constraint}</c2>)"
)
return 0
|
the-stack_0_13413 | # pylint: disable=W0611
'''
Android Joystick Input Provider
===============================
This module is based on the PyGame JoyStick Input Provider. For more
information, please refer to
`<http://www.pygame.org/docs/ref/joystick.html>`_
'''
__all__ = ('AndroidMotionEventProvider', )
import os
try:
import android # NOQA
except ImportError:
if 'KIVY_DOC' not in os.environ:
raise Exception('android lib not found.')
from kivy.logger import Logger
from kivy.input.provider import MotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.input.shape import ShapeRect
from kivy.input.motionevent import MotionEvent
import pygame.joystick
class AndroidMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.profile = ['pos', 'pressure', 'shape']
self.sx, self.sy, self.pressure, radius = args
self.shape = ShapeRect()
self.shape.width = radius
self.shape.height = radius
super(AndroidMotionEvent, self).depack(args)
class AndroidMotionEventProvider(MotionEventProvider):
def __init__(self, device, args):
super(AndroidMotionEventProvider, self).__init__(device, args)
self.joysticks = []
self.touches = {}
self.uid = 0
self.window = None
def create_joystick(self, index):
Logger.info('Android: create joystick <%d>' % index)
js = pygame.joystick.Joystick(index)
js.init()
if js.get_numbuttons() == 0:
Logger.info('Android: discard joystick <%d> cause no button' %
index)
return
self.joysticks.append(js)
def start(self):
pygame.joystick.init()
Logger.info('Android: found %d joystick' % pygame.joystick.get_count())
for i in range(pygame.joystick.get_count()):
self.create_joystick(i)
def stop(self):
self.joysticks = []
def update(self, dispatch_fn):
if not self.window:
from kivy.core.window import Window
self.window = Window
w, h = self.window.system_size
touches = self.touches
for joy in self.joysticks:
jid = joy.get_id()
pressed = joy.get_button(0)
x = joy.get_axis(0) * 32768. / w
y = 1. - (joy.get_axis(1) * 32768. / h)
# python for android do * 1000.
pressure = joy.get_axis(2) / 1000.
radius = joy.get_axis(3) / 1000.
# new touche ?
if pressed and jid not in touches:
self.uid += 1
touch = AndroidMotionEvent(self.device, self.uid,
[x, y, pressure, radius])
touches[jid] = touch
dispatch_fn('begin', touch)
# update touch
elif pressed:
touch = touches[jid]
# avoid same touch position
if touch.sx == x and touch.sy == y \
and touch.pressure == pressure:
continue
touch.move([x, y, pressure, radius])
dispatch_fn('update', touch)
# disapear
elif not pressed and jid in touches:
touch = touches[jid]
touch.move([x, y, pressure, radius])
touch.update_time_end()
dispatch_fn('end', touch)
touches.pop(jid)
MotionEventFactory.register('android', AndroidMotionEventProvider)
|
the-stack_0_13414 | # Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from threading import RLock
from rosgraph.names import ns_join, GLOBALNS, SEP, is_global, is_private, canonicalize_name
import os
import json
def _get_param_names(names, key, d):
"""
helper recursive routine for getParamNames()
@param names: list of param names to append to
@type names: [str]
@param d: parameter tree node
@type d: dict
@param key: parameter key for tree node d
@type key: str
"""
#TODOXXX
for k,v in d.items():
if type(v) == dict:
_get_param_names(names, ns_join(key, k), v)
else:
names.append(ns_join(key, k))
class ParamDictionary(object):
def __init__(self, reg_manager):
"""
ctor.
@param subscribers: parameter subscribers
@type subscribers: Registrations
"""
self.lock = RLock()
self.parameters = {}
self.reg_manager = reg_manager
self.snapshot = False
if "ROS_MASTER_SNAPSHOT" in os.environ:
try:
self.snapshot = True
self.snapshot_file = os.path.join(os.environ["ROS_ROOT"], ".master_snapshot")
with open(self.snapshot_file, "r") as f:
self.parameters = json.loads(f.read())
del self.parameters["run_id"]
except IOError:
pass
except KeyError:
pass
def get_param_names(self):
"""
Get list of all parameter names stored on this server.
@return: [code, statusMessage, parameterNameList]
@rtype: [int, str, [str]]
"""
try:
self.lock.acquire()
param_names = []
_get_param_names(param_names, '/', self.parameters)
finally:
self.lock.release()
return param_names
def search_param(self, ns, key):
"""
Search for matching parameter key for search param
key. Search for key starts at ns and proceeds upwards to
the root. As such, search_param should only be called with a
relative parameter name.
search_param's behavior is to search for the first partial match.
For example, imagine that there are two 'robot_description' parameters:
- /robot_description
- /robot_description/arm
- /robot_description/base
- /pr2/robot_description
- /pr2/robot_description/base
If I start in the namespace /pr2/foo and search for
'robot_description', search_param will match
/pr2/robot_description. If I search for 'robot_description/arm'
it will return /pr2/robot_description/arm, even though that
parameter does not exist (yet).
@param ns: namespace to begin search from.
@type ns: str
@param key: Parameter key.
@type key: str
@return: key of matching parameter or None if no matching
parameter.
@rtype: str
"""
if not key or is_private(key):
raise ValueError("invalid key")
if not is_global(ns):
raise ValueError("namespace must be global")
if is_global(key):
if self.has_param(key):
return key
else:
return None
# there are more efficient implementations, but our hiearchy
# is not very deep and this is fairly clean code to read.
# - we only search for the first namespace in the key to check for a match
key_namespaces = [x for x in key.split(SEP) if x]
key_ns = key_namespaces[0]
# - corner case: have to test initial namespace first as
# negative indices won't work with 0
search_key = ns_join(ns, key_ns)
if self.has_param(search_key):
# resolve to full key
return ns_join(ns, key)
namespaces = [x for x in ns.split(SEP) if x]
for i in range(1, len(namespaces)+1):
search_key = SEP + SEP.join(namespaces[0:-i] + [key_ns])
if self.has_param(search_key):
# we have a match on the namespace of the key, so
# compose the full key and return it
full_key = SEP + SEP.join(namespaces[0:-i] + [key])
return full_key
return None
def get_param(self, key):
"""
Get the parameter in the parameter dictionary.
@param key: parameter key
@type key: str
@return: parameter value
"""
try:
self.lock.acquire()
val = self.parameters
if key != GLOBALNS:
# split by the namespace separator, ignoring empty splits
namespaces = [x for x in key.split(SEP)[1:] if x]
for ns in namespaces:
if not type(val) == dict:
raise KeyError(val)
val = val[ns]
return val
finally:
self.lock.release()
def set_param(self, key, value, notify_task=None):
"""
Set the parameter in the parameter dictionary.
@param key: parameter key
@type key: str
@param value: parameter value
@param notify_task: function to call with
subscriber updates. updates is of the form
[(subscribers, param_key, param_value)*]. The empty dictionary
represents an unset parameter.
@type notify_task: fn(updates)
"""
try:
self.lock.acquire()
if key == GLOBALNS:
if type(value) != dict:
raise TypeError("cannot set root of parameter tree to non-dictionary")
self.parameters = value
else:
namespaces = [x for x in key.split(SEP) if x]
# - last namespace is the actual key we're storing in
value_key = namespaces[-1]
namespaces = namespaces[:-1]
d = self.parameters
# - descend tree to the node we're setting
for ns in namespaces:
if not ns in d:
new_d = {}
d[ns] = new_d
d = new_d
else:
val = d[ns]
# implicit type conversion of value to namespace
if type(val) != dict:
d[ns] = val = {}
d = val
d[value_key] = value
# ParamDictionary needs to queue updates so that the updates are thread-safe
if notify_task:
updates = compute_param_updates(self.reg_manager.param_subscribers, key, value)
if updates:
notify_task(updates)
finally:
self.lock.release()
if self.snapshot:
with open(self.snapshot_file, 'w') as f:
f.write(json.dumps(self.parameters))
def subscribe_param(self, key, registration_args):
"""
@param key: parameter key
@type key: str
@param registration_args: additional args to pass to
subscribers.register. First parameter is always the parameter
key.
@type registration_args: tuple
"""
if key != SEP:
key = canonicalize_name(key) + SEP
try:
self.lock.acquire()
# fetch parameter value
try:
val = self.get_param(key)
except KeyError:
# parameter not set yet
val = {}
self.reg_manager.register_param_subscriber(key, *registration_args)
return val
finally:
self.lock.release()
def unsubscribe_param(self, key, unregistration_args):
"""
@param key str: parameter key
@type key: str
@param unregistration_args: additional args to pass to
subscribers.unregister. i.e. unregister will be called with
(key, *unregistration_args)
@type unregistration_args: tuple
@return: return value of subscribers.unregister()
"""
if key != SEP:
key = canonicalize_name(key) + SEP
return self.reg_manager.unregister_param_subscriber(key, *unregistration_args)
def delete_param(self, key, notify_task=None):
"""
Delete the parameter in the parameter dictionary.
@param key str: parameter key
@param notify_task fn(updates): function to call with
subscriber updates. updates is of the form
[(subscribers, param_key, param_value)*]. The empty dictionary
represents an unset parameter.
"""
try:
self.lock.acquire()
if key == GLOBALNS:
raise KeyError("cannot delete root of parameter tree")
else:
# key is global, so first split is empty
namespaces = [x for x in key.split(SEP) if x]
# - last namespace is the actual key we're deleting
value_key = namespaces[-1]
namespaces = namespaces[:-1]
d = self.parameters
# - descend tree to the node we're setting
for ns in namespaces:
if type(d) != dict or not ns in d:
raise KeyError(key)
else:
d = d[ns]
if not value_key in d:
raise KeyError(key)
else:
del d[value_key]
# ParamDictionary needs to queue updates so that the updates are thread-safe
if notify_task:
updates = compute_param_updates(self.reg_manager.param_subscribers, key, {})
if updates:
notify_task(updates)
finally:
self.lock.release()
def has_param(self, key):
"""
Test for parameter existence
@param key: parameter key
@type key: str
@return: True if parameter set, False otherwise
@rtype: bool
"""
try:
# more efficient implementations are certainly possible,
# but this guarantees correctness for now
self.get_param(key)
return True
except KeyError:
return False
def _compute_all_keys(param_key, param_value, all_keys=None):
"""
Compute which subscribers should be notified based on the parameter update
@param param_key: key of updated parameter
@type param_key: str
@param param_value: value of updated parameter
@param all_keys: (internal use only) list of parameter keys
to append to for recursive calls.
@type all_keys: [str]
@return: list of parameter keys. All keys will be canonicalized with trailing slash.
@rtype: [str]
"""
if all_keys is None:
all_keys = []
for k, v in param_value.items():
new_k = ns_join(param_key, k) + SEP
all_keys.append(new_k)
if type(v) == dict:
_compute_all_keys(new_k, v, all_keys)
return all_keys
def compute_param_updates(subscribers, param_key, param_value):
"""
Compute subscribers that should be notified based on the parameter update
@param subscribers: parameter subscribers
@type subscribers: Registrations
@param param_key: parameter key
@type param_key: str
@param param_value: parameter value
@type param_value: str
"""
# logic correct for both updates and deletions
if not subscribers:
return []
# end with a trailing slash to optimize startswith check from
# needing an extra equals check
if param_key != SEP:
param_key = canonicalize_name(param_key) + SEP
# compute all the updated keys
if type(param_value) == dict:
all_keys = _compute_all_keys(param_key, param_value)
else:
all_keys = None
updates = []
# subscriber gets update if anything in the subscribed namespace is updated or if its deleted
for sub_key in subscribers.iterkeys():
ns_key = sub_key
if ns_key[-1] != SEP:
ns_key = sub_key + SEP
if param_key.startswith(ns_key):
node_apis = subscribers[sub_key]
updates.append((node_apis, param_key, param_value))
elif all_keys is not None and ns_key.startswith(param_key) \
and not sub_key in all_keys:
# parameter was deleted
node_apis = subscribers[sub_key]
updates.append((node_apis, sub_key, {}))
# add updates for exact matches within tree
if all_keys is not None:
# #586: iterate over parameter tree for notification
for key in all_keys:
if key in subscribers:
# compute actual update value
sub_key = key[len(param_key):]
namespaces = [x for x in sub_key.split(SEP) if x]
val = param_value
for ns in namespaces:
val = val[ns]
updates.append((subscribers[key], key, val))
return updates
|
the-stack_0_13415 | #!/usr/bin/env python3
"""
MSFT Bonsai SDK3 Template for Simulator Integration using Python
Copyright 2020 Microsoft
Usage:
For registering simulator with the Bonsai service for training:
python __main__.py \
--workspace <workspace_id> \
--accesskey="<access_key> \
Then connect your registered simulator to a Brain via UI
Alternatively, one can set the SIM_ACCESS_KEY and SIM_WORKSPACE as
environment variables.
"""
import json
import time
from typing import Dict, Any, Optional
from microsoft_bonsai_api.simulator.client import BonsaiClientConfig, BonsaiClient
from microsoft_bonsai_api.simulator.generated.models import (
SimulatorState,
SimulatorInterface,
)
import argparse
from sim.qube_simulator import QubeSimulator
class TemplateSimulatorSession():
def __init__(self, render):
## Initialize python api for simulator
self.simulator = QubeSimulator()
self.render = render
def get_state(self) -> Dict[str, Any]:
"""Called to retreive the current state of the simulator. """
return {
## Add simulator state as dictionary
"theta": float(self.simulator.state[0]),
"alpha": float(self.simulator.state[1]),
"theta_dot": float(self.simulator.state[2]),
"alpha_dot": float(self.simulator.state[3])
}
def episode_start(self, config: Dict[str, Any]):
""" Called at the start of each episode """
## Add simulator reset api here using config from desired lesson in inkling
self.simulator.reset(config)
def episode_step(self, action: Dict[str, Any]):
""" Called for each step of the episode """
## Add simulator step api here using action from Bonsai platform
self.simulator.step(action['Vm'])
if self.render:
self.simulator.view()
def halted(self) -> bool:
"""
Should return True if the simulator cannot continue for some reason
"""
return (
False
)
def main(render = False):
# Grab standardized way to interact with sim API
sim = TemplateSimulatorSession(render= render)
# Configure client to interact with Bonsai service
config_client = BonsaiClientConfig()
client = BonsaiClient(config_client)
# Load json file as simulator integration config type file
with open('interface.json') as file:
interface = json.load(file)
# Create simulator session and init sequence id
registration_info = SimulatorInterface(
name=interface['name'],
timeout=interface['timeout'],
simulator_context=config_client.simulator_context,
)
registered_session = client.session.create(
workspace_name=config_client.workspace,
body=registration_info
)
print("Registered simulator.")
sequence_id = 1
try:
while True:
# Advance by the new state depending on the event type
sim_state = SimulatorState(
sequence_id=sequence_id, state=sim.get_state(),
halted=sim.halted()
)
event = client.session.advance(
workspace_name=config_client.workspace,
session_id=registered_session.session_id,
body=sim_state
)
sequence_id = event.sequence_id
print("[{}] Last Event: {}".format(time.strftime('%H:%M:%S'),
event.type))
# Event loop
if event.type == 'Idle':
time.sleep(event.idle.callback_time)
print('Idling...')
elif event.type == 'EpisodeStart':
sim.episode_start(event.episode_start.config)
elif event.type == 'EpisodeStep':
sim.episode_step(event.episode_step.action)
elif event.type == 'EpisodeFinish':
print('Episode Finishing...')
elif event.type == 'Unregister':
client.session.delete(
workspace_name=config_client.workspace,
session_id=registered_session.session_id
)
print("Unregistered simulator.")
else:
pass
except KeyboardInterrupt:
# Gracefully unregister with keyboard interrupt
client.session.delete(
workspace_name=config_client.workspace,
session_id=registered_session.session_id
)
print("Unregistered simulator.")
except Exception as err:
# Gracefully unregister for any other exceptions
client.session.delete(
workspace_name=config_client.workspace,
session_id=registered_session.session_id
)
print("Unregistered simulator because: {}".format(err))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='args for sim integration',
allow_abbrev=False)
parser.add_argument('--render', action='store_true')
args, _ = parser.parse_known_args()
main(render=args.render) |
the-stack_0_13416 | #
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from MDSplus import TdiCompile, TreeNode
import os
import sys
import numpy
example = '/image/%s/-1?expr=ADD(ZERO([100,100],0WU),2000WU)&bit=12' % os.environ.get(
'EXPT', 'main')
def doImage(self):
if len(self.path_parts) > 2:
tree = self.openTree(self.path_parts[1], self.path_parts[2])
_tdi = tree.tdiCompile
else:
_tdi = TdiCompile
expr = self.args['expr'][-1]
obj = _tdi(expr)
idx = int(self.args['idx'][-1]) if 'idx' in self.args else 0
if isinstance(obj, TreeNode) and obj.getNumSegments() > 0:
d = obj.getSegment(idx)
isseg = True
else:
d = obj.evaluate()
isseg = False
try:
im = d.getImage()
except:
from PIL import Image
import io
raw = d.data()
if 'bit' in self.args:
bit = 8-int(self.args['bit'][-1])
if bit != 0:
if raw.itemsize == 1:
raw = raw.astype('uint16')
if bit > 0:
raw = (((raw+1) << (bit))-1).astype('uint8')
elif bit < 0:
raw = (((raw-1) >> (-bit))+1).astype('uint8')
else:
raw.astype("uint8")
if raw.ndim > 2 and ((not isseg) or raw.shape[0]):
raw = raw[0] if isseg else raw[idx]
if raw.ndim == 2:
img = Image.new("L", raw.T.shape, "gray")
elif raw.ndim == 3:
img = Image.new("RGB", raw.T.shape[:2])
raw = numpy.rollaxis(raw, 0, 3)
else:
raise
fmt = self.args['format'][-1].lower() if 'format' in self.args else 'jpeg'
img.frombytes(raw.tostring())
stream = io.BytesIO()
img.save(stream, format=fmt.upper())
return ('200 OK', [('Content-type', 'image/%s' % fmt)], stream.getvalue())
else:
if im.format == "MPEG":
response_headers = [('Content-type', 'video/mpeg'),
('Content-Disposition', 'inline; filename="%s.mpeg"' % (expr,))]
else: # covers gif, jpeg, and png
fmt = im.format.lower()
response_headers = [('Content-type', 'image/%s' % fmt),
('Content-Disposition', 'inline; filename="%s.%s"' % (expr, fmt))]
output = str(d.data().data)
status = '200 OK'
return (status, response_headers, output)
|
the-stack_0_13417 | ############################################################################
#
# Copyright (c) Mamba Developers. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
############################################################################
""" Mamba generic utility functions """
import os
import re
import inspect
from typing import List, Iterator, Dict, Callable, Any
from types import ModuleType
from importlib import import_module
from pkgutil import iter_modules
from shutil import ignore_patterns, copy2, copystat
from mamba.core.context import Context
from mamba.core.exceptions import ComposeFileException
def get_properties_dict(configuration: Dict[str, dict]) -> Dict[str, Any]:
"""Return a dictionary of properties with default values composed from
a configuration file.
Args:
configuration: The path string formatted in windows or linux style.
Returns:
The dictionary of properties.
"""
if 'device' in configuration and 'properties' in \
configuration['device']:
properties_dict = {
key: value.get('default')
for key, value in configuration['device']['properties'].items()
}
else:
properties_dict = {}
return properties_dict
def path_from_string(path_str: str) -> str:
"""Return a valid path from a given path string, formatted with windows
or linux slashes.
Args:
path_str: The path string formatted in windows or linux style.
Returns:
The valid path string.
"""
path = os.path.join(*re.split(r' |/|\\', path_str))
if path_str[0] == '/': # Fix for absolute path
path = '/' + path
return path
def get_classes_from_module(module: str,
search_class: type) -> Dict[str, Callable]:
"""Return a dictionary with all classes 'search_class' defined in the
given module that can be instantiated.
"""
classes_dict: Dict[str, Callable] = {}
for cls in _iter_classes(module, search_class):
cls_name = cls.__module__.split('.')[-1]
classes_dict[cls_name] = cls
return classes_dict
def get_components(used_components: Dict[str, dict], modules: List[str],
component_type: type,
context: Context) -> Dict[str, object]:
"""Returns a dictionary of instantiated component with context.
Args:
used_components: The dictionary of used component.
modules: The folders where to look for the component.
component_type: The class type of the component.
context: The application context to instantiate
the component with.
Returns:
The instantiated dictionary of component.
Raises:
ComposeFileException: If a given component id is not found.
"""
all_components_by_type: Dict[str, Callable] = {}
for module in modules:
components_in_module = get_classes_from_module(module, component_type)
for key, value in components_in_module.items():
if key not in all_components_by_type:
all_components_by_type[key] = value
dict_used_components = {}
for component_name, args in used_components.items():
if args is None or 'component' not in args:
raise ComposeFileException(
f"'{component_name}: missing component property")
if args['component'] in all_components_by_type:
args['name'] = component_name
dict_used_components[component_name] = all_components_by_type[
args['component']](context, args)
else:
raise ComposeFileException(
f"{component_name}: component {args['component']}' is not a "
f"valid component identifier")
return dict_used_components
def merge_dicts(dict_1, dict_2):
"""
Merge dictionary dict_2 into dict_1. In case of conflict dict_1
has precedence
"""
if dict_1 is None:
return dict_2
if dict_2 is None:
return dict_1
result = dict_1
for key in dict_2:
if key in dict_1:
if isinstance(dict_1[key], dict) and isinstance(dict_2[key], dict):
merge_dicts(dict_1[key], dict_2[key])
else:
result[key] = dict_2[key]
return result
def copytree(src, dst, ignore_pattern=ignore_patterns('*.pyc', '.svn')):
"""
Since the original function always creates the directory, to resolve
the issue a new function had to be created. It's a simple copy and
was reduced for this case.
"""
ignore = ignore_pattern
names = os.listdir(src)
ignored_names = ignore(src, names)
if not os.path.exists(dst):
os.makedirs(dst)
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.isdir(srcname):
copytree(srcname, dstname)
else:
copy2(srcname, dstname)
copystat(src, dst)
def _walk_modules(path: str) -> List[ModuleType]:
"""Loads a module and all its submodules from the given module path and
returns them. If *any* module throws an exception while importing, that
exception is thrown back.
For example: walk_modules('mamba.mock')
"""
mods = []
mod = import_module(path)
mods.append(mod)
# Any module that contains a __path__ attribute is considered a package.
if hasattr(mod, '__path__'):
for _, subpath, ispkg in iter_modules(getattr(mod, '__path__')):
fullpath = path + '.' + subpath
if ispkg:
mods += _walk_modules(fullpath)
else:
submod = import_module(fullpath)
mods.append(submod)
return mods
def _iter_classes(module_name: str, search_class: type) -> Iterator[Callable]:
"""Return an iterator over all classes 'search_class' defined in the given
module that can be instantiated.
"""
for module in _walk_modules(module_name):
for obj in vars(module).values():
if inspect.isclass(obj) and \
issubclass(obj, search_class) and \
obj.__module__ == module.__name__ and \
not obj == search_class:
yield obj
|
the-stack_0_13421 | import statistics
import time
import pytest
import streamz
from metrix import MElement, MStream, MSinkPrinter
from metrix import MCoordinator as MC
@pytest.fixture(scope="module")
def test_elements():
return [
{"name": "m1", "value": 1, "tags": None},
{"name": "m2", "value": 2, "tags": {"foo": "bar"}},
{"name": "m1", "value": 3, "tags": None},
{"name": "m2", "value": 1, "tags": {"bat": "baz"}},
{"name": "m2", "value": 2, "tags": {"foo": "bar"}},
{"name": "m1", "value": 3, "tags": None},
{"name": "m2", "value": 1, "tags": {"foo": "bar"}},
{"name": "m1", "value": 1, "tags": {"foo": "bar"}},
{"name": "m1", "value": 3, "tags": {"bat": "baz"}},
{"name": "m2", "value": 2, "tags": None},
]
class MSinkToList:
"""Ad-hoc metric sink -- useful for testing, but not production."""
def __init__(self):
self.data = []
def __call__(self, me: MElement):
self.data.append(me)
@pytest.mark.parametrize(
"mstreams,msinks,rate_limit",
[
(
[MStream("m1", agg=sum, batch_size=1)],
[MSinkPrinter()],
1.0,
),
(
[MStream("m1", agg=sum, batch_size=1)],
[MSinkPrinter()],
None,
),
(
[
MStream("m1", agg=sum, batch_size=1),
MStream("m2", agg=[min, max], batch_size=1)
],
[MSinkPrinter(), MSinkPrinter()],
[1.0, 0.5],
),
(None, None, None),
]
)
def test_metric_coordinator_init(mstreams, msinks, rate_limit):
mc = MC(mstreams=mstreams, msinks=msinks, rate_limit=rate_limit)
assert str(mc)
assert all(hasattr(mc, attr) for attr in ["stream", "metric_mstreams", "msinks"])
if mstreams:
assert (
len(mc.metric_mstreams) == len(mc.stream.upstreams) == len(mstreams) and
sorted(mc.metric_mstreams.keys()) == sorted(mstream.name for mstream in mstreams)
)
if msinks:
assert len(mc.msinks) == len(mc.stream.downstreams) == len(msinks)
if rate_limit:
assert all(isinstance(ds, streamz.core.rate_limit) for ds in mc.stream.downstreams)
if isinstance(rate_limit, list):
assert all(ds.interval == rl for ds, rl in zip(mc.stream.downstreams, rate_limit))
else:
assert all(ds.interval == rate_limit for ds in mc.stream.downstreams)
else:
assert all(isinstance(ds, streamz.core.buffer) for ds in mc.stream.downstreams)
@pytest.mark.parametrize(
"mstreams,msinks,rate_limit",
[
(
[MStream("m1", agg=sum, batch_size=1)],
[MSinkPrinter()],
[1.0, 1.0],
),
(
[MStream("m1", agg=sum, batch_size=1)],
[MSinkPrinter()],
"10s",
),
]
)
def test_metric_coordinator_bad_init(mstreams, msinks, rate_limit):
with pytest.raises((ValueError, TypeError)):
_ = MC(mstreams=mstreams, msinks=msinks, rate_limit=rate_limit)
@pytest.mark.parametrize(
"mstreams",
[
[MStream("m1", agg=sum, batch_size=1)],
[
MStream("m1", agg=sum, batch_size=1),
MStream("m2", agg=[min, max], batch_size=1)
],
]
)
def test_metric_coordinator_add_mstream(mstreams):
mc = MC()
for mstream in mstreams:
mc.add_mstream(mstream)
assert (
len(mc.metric_mstreams) == len(mc.stream.upstreams) == len(mstreams) and
sorted(mc.metric_mstreams.keys()) == sorted(mstream.name for mstream in mstreams)
)
@pytest.mark.parametrize(
"msinks,rate_limits",
[
([MSinkPrinter()], [1.0]),
([MSinkPrinter(), MSinkPrinter()], [1.0, 0.5]),
([MSinkPrinter()], [None]),
]
)
def test_metric_coordinator_add_msink(msinks,rate_limits):
mc = MC()
for sink, rate_limit in zip(msinks, rate_limits):
mc.add_msink(sink, rate_limit)
assert len(mc.msinks) == len(mc.stream.downstreams) == len(msinks)
@pytest.mark.slow
@pytest.mark.parametrize(
"init_kwargs,exp_results",
[
(
{
"mstreams": [
MStream("m1", agg=sum, batch_size=5),
MStream("m2", agg=statistics.mean, batch_size=5)
],
"msinks": [MSinkToList(), MSinkToList()]
},
[
MElement(name="m1.sum", value=7, tags=None),
MElement(name="m1.sum", value=1, tags={"foo": "bar"}),
MElement(name="m1.sum", value=3, tags={"bat": "baz"}),
MElement(name="m2.mean", value=1.6666666666666667, tags={"foo": "bar"}),
MElement(name="m2.mean", value=1, tags={"bat": "baz"}),
MElement(name="m2.mean", value=2, tags=None),
],
),
(
{
"mstreams": [
MStream("m1", agg=sum, batch_size=1),
MStream("m2", agg=statistics.mean, batch_size=1)
],
"msinks": [MSinkToList(), MSinkToList()]
},
[
MElement(name="m1.sum", value=1, tags=None),
MElement(name="m2.mean", value=2, tags={"foo": "bar"}),
MElement(name="m1.sum", value=3, tags=None),
MElement(name="m2.mean", value=1, tags={"bat": "baz"}),
MElement(name="m2.mean", value=2, tags={"foo": "bar"}),
MElement(name="m1.sum", value=3, tags=None),
MElement(name="m2.mean", value=1, tags={"foo": "bar"}),
MElement(name="m1.sum", value=1, tags={"foo": "bar"}),
MElement(name="m1.sum", value=3, tags={"bat": "baz"}),
MElement(name="m2.mean", value=2, tags=None),
],
),
]
)
def test_metric_stream_send(init_kwargs, exp_results, test_elements):
mc = MC(**init_kwargs)
for te in test_elements:
mc.send(**te)
time.sleep(0.01)
time.sleep(0.2)
assert all(msink.data == exp_results for msink in mc.msinks)
|
the-stack_0_13423 | from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union
from vkbottle import ABCView, BaseReturnManager
from vkbottle.dispatch.handlers import FromFuncHandler
from vkbottle.framework.bot import BotLabeler
from vkbottle.modules import logger
from vkbottle_types.events import MessageEvent as _MessageEvent
from vkbottle_callback.rules import *
from vkbottle_callback.types import MessageEvent
if TYPE_CHECKING:
from vkbottle import ABCAPI, ABCStateDispenser
from vkbottle.dispatch.rules import ABCRule
from vkbottle.dispatch.views import ABCView
from vkbottle.dispatch.views.bot import ABCBotMessageView, RawBotEventView
from vkbottle.framework.bot.labeler.abc import LabeledMessageHandler
class MessageEventReturnHandler(BaseReturnManager):
@BaseReturnManager.instance_of(str)
async def str_handler(self, value: str, event: MessageEvent, _: dict):
await event.show_snackbar(value)
def message_event_min(event: dict, ctx_api: "ABCAPI") -> "MessageEvent":
update = _MessageEvent(**event)
message_event = MessageEvent(
**update.object.dict(),
group_id=update.group_id,
)
setattr(message_event, "unprepared_ctx_api", ctx_api)
return message_event
class MessageEventView(ABCView):
def __init__(self):
super().__init__()
self.handler_return_manager = MessageEventReturnHandler()
async def process_event(self, event: dict) -> bool:
return event["type"] == "message_event"
async def handle_event(
self, event: dict, ctx_api: "ABCAPI", state_dispenser: "ABCStateDispenser"
) -> None:
logger.debug("Handling event ({}) with message_event view".format(event.get("event_id")))
context_variables: dict = {}
message_event = message_event_min(event, ctx_api)
message_event.state_peer = await state_dispenser.cast(message_event.peer_id)
mw_instances = await self.pre_middleware(message_event, context_variables) # type: ignore
if mw_instances is None:
logger.info("Handling stopped, pre_middleware returned error")
return
handle_responses = []
handlers = []
for handler in self.handlers:
result = await handler.filter(message_event) # type: ignore
logger.debug("Handler {} returned {}".format(handler, result))
if result is False:
continue
elif isinstance(result, dict):
context_variables.update(result)
handler_response = await handler.handle(message_event, **context_variables) # type: ignore
handle_responses.append(handler_response)
handlers.append(handler)
return_handler = self.handler_return_manager.get_handler(handler_response)
if return_handler is not None:
await return_handler(
self.handler_return_manager, handler_response, message_event, context_variables
)
if handler.blocking:
break
await self.post_middleware(mw_instances, handle_responses, handlers)
LabeledMessageEventHandler = Callable[..., Callable[[MessageEvent], Any]]
DEFAULT_CUSTOM_RULES: Dict[str, Type[ABCMessageEventRule]] = {
"from_chat": PeerRule,
"peer_ids": FromPeerRule,
"payload": PayloadRule,
"payload_contains": PayloadContainsRule,
"payload_map": PayloadMapRule,
"func": FuncRule,
"coro": CoroutineRule,
"coroutine": CoroutineRule,
"state": StateRule
}
class MessageEventLabeler(BotLabeler):
def __init__(
self,
message_view: Optional["ABCBotMessageView"] = None,
raw_event_view: Optional["RawBotEventView"] = None,
custom_rules: Optional[Dict[str, Type["ABCRule"]]] = None,
auto_rules: Optional[List["ABCRule"]] = None,
message_event_view: Optional["MessageEventView"] = None
):
super().__init__(message_view, raw_event_view, custom_rules, auto_rules)
self.custom_rules = custom_rules or DEFAULT_CUSTOM_RULES
self.message_event_view = message_event_view or MessageEventView()
def message_event(
self, *rules: "ABCRule", blocking: bool = True, **custom_rules
) -> "LabeledMessageHandler":
def decorator(func):
self.message_event_view.handlers.append(
FromFuncHandler(
func,
*rules,
*self.auto_rules,
*self.get_custom_rules(custom_rules),
blocking=blocking,
)
)
return func
return decorator
def load(self, labeler: Union[BotLabeler, "MessageEventLabeler"]):
if type(labeler) is MessageEventLabeler:
self.message_event_view.handlers.extend(labeler.message_event_view.handlers)
self.message_event_view.middlewares.update(labeler.message_event_view.middlewares)
self.message_view.handlers.extend(labeler.message_view.handlers)
self.message_view.middlewares.update(labeler.message_view.middlewares)
for event, handler_basements in labeler.raw_event_view.handlers.items():
event_handlers = self.raw_event_view.handlers.get(event)
if event_handlers:
event_handlers.extend(handler_basements)
else:
self.raw_event_view.handlers[event] = handler_basements
self.raw_event_view.middlewares.update(labeler.raw_event_view.middlewares)
def views(self) -> Dict[str, "ABCView"]:
return {
"message": self.message_view,
"message_event": self.message_event_view,
"raw": self.raw_event_view
}
__all__ = (
"MessageEventView",
"MessageEventLabeler"
)
|
the-stack_0_13424 | import os
import sys
import pandas as pd
import numpy as np
from sklearn.datasets import make_classification
from keras import backend as K
from keras import initializers, layers
from keras.utils import to_categorical
from keras.constraints import non_neg, max_norm
from keras.initializers import Zeros
from keras.constraints import Constraint
import tensorflow as tf
from decision_tree import *
def split_pred(df, label):
return df[[x for x in df.columns if x != label]], df[label]
# add sys.args
if len(sys.argv) == 1:
ntree=5
last_only=True
else:
_, ntree, last_only = sys.argv
last_only = last_only == "1"
ntree = int(ntree)
depth = 5
dim_size = 16
num_class=26
path = "clean_data"
train_adult = pd.read_csv(path+'/letter_train_scale.csv')
test_adult = pd.read_csv(path+'/letter_test_scale.csv')
x, y = split_pred(train_adult, "lettr")
x_test, y_test = split_pred(test_adult, "lettr")
y = to_categorical(y)
y_test = to_categorical(y_test)
save_dir = os.path.join(os.getcwd(), 'saved_models')
save_dir = "letter_benchmark"
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
tree = Tree() # this keeps the state of the current decision tree...
input_dim = dim_size
nepochs = 200
class TimingCallback(Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, batch, logs={}):
# write stuff to disc here...
self.times.append(time.time() - self.epoch_time_start)
def gen_states(tree, tree_list=[0], target_idx=None, return_tree_list=False):
def size_0(dct):
for key, val in dct.items():
if len(val) > 0:
return False
return True
tree_index = max(tree_list)
if target_idx is None:
curr_list = [tree_index+1, tree_index+2, tree_index+3]
else:
curr_list = [tree_index+1, target_idx, tree_index+2]
tree_list.extend(curr_list)
d0, s0 = tree.prune()
d1 = tree.tree.copy()
d2, s2 = tree.graft()
if size_0(d0):
# reset
d0 = Tree().tree.copy()
state_info = {'prune': (d0, curr_list[0]),
'base': (d1, curr_list[1]),
'graft': (d2, curr_list[2]),
'state': {
'prune': s0, 'graft': s2
}}
if return_tree_list:
return state_info, tree_list, curr_list
else:
return state_info
# In[6]:
def outputshape(input_shape):
return [(input_shape[0], input_shape[1]) for _ in range(input_shape[2])]
def normalise_pred(x):
x = tf.stack(x)
x = tf.transpose(x, [1, 0, 2])
return x
def normalise_pred_shape(input_shape):
shape = list(input_shape[0])
num_trees = len(input_shape)
return tuple([shape[0], num_trees, shape[1]])
shape = list(input_shape[0])
return tuple([shape[0], 2])
# In[7]:
def softmax_tau(proba, tau=0.1):
"""
This is a softmax which goes towards one-hot encoding overtime.
We want to decay tau from 1.0 to 0.1 roughly
"""
from scipy.special import logit, expit
out = expit(logit(proba)/tau)
return out/np.sum(out)
def get_layer_weights(model, name='hwy', sample=False, tau=1.0):
out = K.eval([x for x in model.layers if x.name == name][0].weights[0]).flatten()
return normalise_weights(out, sample, tau)
def normalise_weights(out, sample=False, tau=1.0):
out = np.abs(out)
out = out/np.sum(out)
if sample and tau >= 1.0:
draw = np.random.choice(range(out.shape[0]), 1, p=out)
return draw[0]
elif sample:
draw = np.random.choice(range(out.shape[0]), 1, p=softmax_tau(out, tau))
return draw[0]
elif tau >= 1.0:
return out
else:
return softmax_tau(out, tau)
# In[8]:
def calculate_routes(adj_list=None):
"""
Calculates routes given a provided adjancency list,
assume that root node is always 0.
Assume this is a binary tree as well...
Test cases:
{0:[1, 2], 1:[], 2:[]} --> [(0, 0), (1, 0),
(0, 0), (1, 1),
(0, 1), (2, 0),
(0, 1), (2, 1)]
{0:[1], 1:[2], 2:[]} --> [(0, 0), (1, 0), (2, 0),
(0, 0), (1, 0), (2, 1),
(0, 0), (1, 1),
(0, 1)]
calculate_routes({0:[1,2], 1:[], 2:[]})
calculate_routes({0:[1], 1:[2], 2:[]})
"""
if adj_list is None:
raise Exception("Adj_list cannot be none")
def get_next(path):
next_paths = adj_list[path[-1]]
if len(next_paths) > 0:
for p in next_paths:
get_next(path + [p])
else:
all_paths.append(path)
all_paths = []
get_next([0])
# convert paths to indices...
path_indx = []
for path in all_paths:
cur_path = []
for cur_node, nxt_node in zip(path, path[1:]+[None]):
# print(cur_node, nxt_node)
pos_dir = np.array(sorted(adj_list[cur_node]))
pos_idx = np.argwhere(pos_dir==nxt_node).flatten().tolist()
if len(pos_idx) > 0 and len(pos_dir) == 2: # i.e. has 2 children
cur_path.append((cur_node, pos_idx[0]))
elif len(pos_idx) > 0 and len(pos_dir) == 1: # i.e. has 1 child
path_indx.append(cur_path + [(cur_node, 1)]) # then it will have a leaf!
cur_path.append((cur_node, pos_idx[0]))
elif nxt_node is not None:
cur_path.append((cur_node, pos_dir.shape[0]))
else:
path_indx.append(cur_path + [(cur_node, 0)])
path_indx.append(cur_path + [(cur_node, 1)])
return path_indx
def build_tree(main_input, depth, tree_number=0, last_only=True):
"""
Builds a single decision tree, returns all the specs needed to preserve tree state...
"""
# main_input = Input(shape=(dim_size,), name='main_input')
tree_nodes = DecisionTreeNode(depth=depth, name=f'decision_tree{tree_number}')(main_input)
tree_route = DecisionTreeRouting(depth=depth, name=f'decision_route{tree_number}')([main_input, tree_nodes])
leaf_layers = layers.Lambda(lambda x: [tf.squeeze(y) for y in tf.split(x, [1 for _ in range(K.int_shape(x)[2])], axis=2)], output_shape=outputshape)(tree_route)
pred_layer_tree = [Dense(num_class, activation='softmax', name="t{}_tree_l{}".format(tree_number, idx))(x) for idx, x in enumerate(leaf_layers)]
stack_pred = layers.Lambda(normalise_pred, output_shape=normalise_pred_shape)(pred_layer_tree)
tree_d = DecisionPredRouting(depth=depth)([stack_pred, tree_nodes])
if last_only:
return [tree_d]
else:
return [tree_d], [tree_d]+pred_layer_tree
def normalise_pred2(x):
x = tf.stack(x)
x = tf.transpose(x, [1, 0, 2])
cl = K.sum(x, axis=1)
cl = cl/tf.norm(cl, ord=1, axis=1, keepdims=True)
return cl
def normalise_pred_shape2(input_shape):
shape = list(input_shape[0])
return tuple([shape[0], num_class])
main_input = Input(shape=(dim_size,), name='main_input')
tree = []
out_list = []
for idx in range(ntree):
if last_only:
tree.append(build_tree(main_input, depth, idx, last_only))
else:
t_, out = build_tree(main_input, depth, idx, last_only)
tree.append(t_)
out_list.extend(out)
stack_pred = layers.Lambda(normalise_pred2, output_shape=normalise_pred_shape2)([x[0] for x in tree])
if last_only:
outputs = [stack_pred]
else:
outputs = [stack_pred] + out_list
model = Model(inputs=main_input, outputs=outputs)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
time_cb = TimingCallback()
print("Running model with {} layers".format(len(model.layers)))
hist = model.fit([x], [y for _ in range(len(outputs))],
validation_data=([x_test], [y_test for _ in range(len(outputs))]),
epochs=nepochs, verbose=2,
callbacks = [time_cb])
hist_df = pd.DataFrame(hist.history)
print(pd.DataFrame(hist.history).iloc[-1])
hist_df['times'] = time_cb.times[-hist_df.shape[0]:]
hist_df.to_csv('{}/benchmark_rf{}_lastonly{}_{}.csv'.format(save_dir, ntree, last_only, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')), index=True)
|
the-stack_0_13425 | """
MonteCarlo rejection of Models
==========================
The created geological models with gempy were exported as SHEMAT-Suite input files. `SHEMAT-Suite <https://git.rwth-aachen.de/SHEMAT-Suite/SHEMAT-Suite-open>`_ [1] is a code for
solving coupled heat transport in porous media. It is written in fortran and uses a finite differences scheme in a hexahedral grid.
In this example, we will load a heat transport simulation from the base POC model we created in "Geological model creation and gravity simulation". We will demonstrate methods contained
in OpenWF for loading the result file, displaying the parameters it contains and how to visualize these parameters. Finally, we will calculate the conductive heat flow and plot it.
"""
#%%
# import libraries
import warnings
warnings.filterwarnings("ignore")
import h5py
import numpy as np
import pandas as pd
import os,sys
import glob
from scipy import stats
import random
import gempy as gp
from gempy.bayesian.fields import probability, information_entropy
plt.style.use(['seaborn-talk'])
sys.path.append('../models/20210319_MC_no_middle_filling/')
print(f"Run mit GemPy version {gp.__version__}")
# In[2]:
def c_rmse(predicted, target):
rm_sq_diff = np.sqrt((predicted.sub(target, axis=0)**2).mean())
return rm_sq_diff
def rejection(rmse, rnseed=np.random.seed(0), verbose=True):
rnseed
Ref = rmse[0]
accept = []
P = []
k = 0
for i in range(1,len(rmse)):
if rmse[i] < Ref:
Ref = rmse[i]
accept.append(i)
elif random.random() < np.exp(-(rmse[i] - Ref)/(u_g)):
P.append(np.exp(-(rmse[i] - Ref)/(u_g)))
Ref = rmse[i]
accept.append(i)
k +=1
if verbose==True:
print(f"{len(accept)} realizations were accepted.")
return accept, P
def fahrenheit_to_celsius(temp_fahrenheit, difference=False):
if not difference:
return (temp_fahrenheit - 32) * 5 / 9
else:
return temp_fahrenheit * 5 / 9
def extTui(datafile, dimension=3, direction='x'):
f = h5py.File(datafile,'r')
z, y, x = f['temp'].shape
if dimension==3:
temp = f['temp'][:,:,:]
uindex = f['uindex'][:,:,:]
elif dimension==2:
if direction=='x':
temp = f['temp'][:,:,x//2]
uindex = f['uindex'][:,:,x//2]
elif direction=='y':
temp = f['temp'][:,y//2,:]
uindex = f['uindex'][:,y//2,:]
elif direction=='z':
temp = f['temp'][z//2,:,:]
uindex = f['uindex'][z//2,:,:]
return temp,uindex
#%%
# Rejection algorithm based on random walk
# ----------------------------------------
# We created a tiny ensemble of 10 different SHEMAT-Suite models in the previous step and will use a rejection algorithm to get a posterior ensemble of models.
# For this, we "borrow" the Metropolis acceptance probability which is defined as:
#
# .. math::
# \alpha(x_{t-1},z) = \begin{cases} min\big(\frac{p(z)}{p(x_{t-1})},1\big), & \text{if } p(x_{t-1}) > 0\\
# 1, & \text{if } p(x_{t-1}) = 0 \end{cases}
#
# A different approach would be to assess the missfit (as RMS error) of each realisation.
# .. math::
# \alpha(x_{t-1},z) = \begin{cases} exp\big(-\frac{S(z) - S(x_{t-1}) }{u_T}\big), & \text{if } S(z) > S(x_{t-1})\\
# 1, & \text{otherwise } \end{cases}
#
# We will use the second approach for now. As discretization error, we take a value from Elison(2015), $u_{T-discr} = 0.7$ K, an estimate of error. This error should
# be estimated to best knowledge.
#
# Using Gauss error propagation, we assess a potential error for the realisations.
# .. math::
# u_T = \sqrt{\big(\frac{\partial T}{\partial x_1}u_1 \big)^2 + ... + \big(\frac{\partial T}{\partial x_n}u_n \big)^2}
#%%
# Literature sources for log-errors:
# ----------------------------------
# _The lower part of the disturbed log profile (below the cross-over point) was rotated to match these corrected tempera-tures. In the upper part of the profile, the same correction as for method A was applied. The quality of this correction method strongly depends on the correct calculation of the lowermost profile temperatures. According to Förster (2001), most of the corrected tem-peratures have errors of ± 3 to 5 K._ https://doi.org/10.1186/s40517-020-00181-w
#
#
# _The effective accuracy of commercial temperature logs is ±0.5ºC (Blackwell and Spafford, 1987)._ http://www.sprensky.com/publishd/temper2.html
#
# _More normal accuracies are +- 0.25 °C over 0-200 °C_ Keith Geothermal Energy lecture
#
# For errors as a function of e.g. logging speed, measurement response time etc, look https://doi.org/10.1016/j.petrol.2020.107727
# import DTM
dtm = np.load('../../models/20210319_MC_no_middle_filling/Graben_base_model/Graben_base_model_topography.npy')
# In[4]:
# load base model
model_path = '../models/2021-06-04_POC_base_model/'
geo_model = gp.load_model('POC_PCT_model',
path=model_path, recompile=False)
# In[5]:
# get delx and dely of the model, so cell sizes
delx = geo_model._grid.regular_grid.dx
dely = geo_model._grid.regular_grid.dy
delz = geo_model._grid.regular_grid.dz
# In[6]:
# import gravity data and borehole locations
g_data = pd.read_csv('../models/20210319_MC_no_middle_filling/2021-06-16_grav_of_POC_base_model.csv')
bhole = np.array([[31, 14],
[78, 22],
[53, 34],
[49, 44]])
# In[7]:
# plot the map
fig = plt.figure(figsize=[15,7])
cs = plt.contourf(dtm[:,:,0], dtm[:,:,1], dtm[:,:,2],20, cmap='gist_earth')
plt.contour(dtm[:,:,0], dtm[:,:,1], dtm[:,:,2],10, colors='gray', zorder=1)
plt.scatter(g_data['X'], g_data['Y'], marker='s', s=150, c='brown', edgecolor='k',
label='gravity stations', zorder=2)
plt.scatter(bhole[:,0]*delx, bhole[:,1]*dely, marker='^', s=200, c='k', label='boreholes',
zorder=3)
plt.colorbar(cs, label='elevation [m]')
plt.legend(frameon=True)
plt.xlabel('X [m]')
plt.ylabel('Y [m]');
#fig.savefig('../imgs/Model_topography_and_grav_stations.png', dpi=300, bbox_inches='tight')
# ## Load the Lithology Blocks
# First let's load the lithology block of all 1000 models, looking at the probabilities of the graben unit and at the model entropy.
# In[8]:
# load and calculate Probability and Entropy using GemPy bayesian field functions
full_ens = np.load('../../../data_var/lith_block_samples_all_1000real.npy')
prior_prob = probability(full_ens)
prior_entr = information_entropy(prior_prob)
# In[9]:
layer = 5
# upper filling
gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=prior_prob[layer],
kwargs_regular_grid={'cmap': 'viridis',
'norm': None})
# lower filling
gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=prior_prob[layer+1],
kwargs_regular_grid={'cmap': 'viridis',
'norm': None});
# In[16]:
p2dp = gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=prior_entr,
kwargs_regular_grid={'cmap': 'magma',
'norm': None,
'colorbar': True}
)
# The Information entropy plot shows where the maximal Uncertainty is in our model, i.e. where the contacts are between the graben units and the basement. A lot of uncertainty is visible in the right part of the model (between around 16000 and 20000), where the main graben unit may or may not be present.
# # Gravity rejection
# In a first stage, we take a look at the gravity signal of each realization. The gravity signal is "recorded" at each of the squares you see in the plot above. Comparing the recorded gravity signals of each realization with the ones of the base model (which we regard as the "true" observations), we can differentiate between fitting and non-fitting ensemble members.
# In[18]:
g_simu = pd.read_csv('../models/20210319_MC_no_middle_filling/MC_grav_simulations_run01and02_1000_reals_rseed0_250+50mstd.csv',
sep=';')
# In[27]:
add_noise = True
if add_noise==True:
np.random.seed(27)
noise = np.random.normal(0, 1., size=15)
g_data_noise = g_data.copy()
g_data_noise['grav'] = g_data_noise['grav'] + noise
print(np.mean(noise))
u_g = np.mean(noise)
# In[20]:
#calculate stdeviation and mean of the prior ensemble
g_simu_stdev = g_simu.std(axis=1)
g_simu_mean = g_simu.mean(axis=1)
# In[21]:
fig = plt.figure(figsize=[15,7])
cs = plt.contourf(dtm[:,:,0], dtm[:,:,1], dtm[:,:,2],20, cmap='gist_earth')
plt.contour(dtm[:,:,0], dtm[:,:,1], dtm[:,:,2],10, colors='gray', zorder=1)
cs = plt.scatter(g_data['X'], g_data['Y'], c=g_simu_stdev, marker='s',
s=100, zorder=2, cmap='magma')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.colorbar(cs, label='standard deviation');
# In[22]:
g_simu_stdev
# we see that station 0 and 14 are not sensitive to changing the PCT depth, so they are not really helping in the rejection, but are influencing the RMSE. With focusing on the sensitive locations, we are likely to increase the performance of the rejection algorithm.
# In[23]:
# drop the first and last entry which do not show variation
simu_drop = g_simu.drop(labels=[0,14], axis=0)
simu_drop_std = simu_drop.std(axis=1)
#station_drop = g_coordinates.drop(labels=[0,14], axis=0)
g_data_drop = g_data.drop(labels=[0,14], axis=0)
g_data_noise_drop = g_data_noise.drop(labels=[0,14], axis=0)
# In[24]:
fig = plt.figure(figsize=[15,7])
cs = plt.contourf(dtm[:,:,0], dtm[:,:,1], dtm[:,:,2],20, cmap='gist_earth')
plt.contour(dtm[:,:,0], dtm[:,:,1], dtm[:,:,2],10, colors='gray', zorder=1)
cs = plt.scatter(g_data_drop['X'], g_data_drop['Y'], c=simu_drop_std, marker='s', s=100,
cmap='magma', zorder=2)
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.colorbar(cs, label='standard deviation');
# In[28]:
seed = random.seed(4)
rmse = c_rmse(g_simu, g_data_drop['grav'])
accept, P = rejection(rmse=rmse, rnseed=seed)
# In[29]:
accepted_reals = full_ens[accept, :]
grav_prob = probability(accepted_reals)
grav_entr = information_entropy(grav_prob)
# In[30]:
p2dp = gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=grav_entr,
kwargs_regular_grid={'cmap': 'magma',
'norm': None}
)
plt.savefig('../imgs/POC_grav_posterior_IE.png', dpi=300, bbox_inches='tight')
# In[32]:
np.save('../../../data_var/lith_blocks_samples_run01and02.npy', full_ens)
np.save('../../../data_var/lith_blocks_accepted_23042021.npy', accepted_reals)
np.save('../../../data_var/accepted_realizations_23042021.npy', accept)
# ## With noisy data
# What if we add noise to our data?
# In[200]:
random.seed(4)
rmse_noise = c_rmse(g_simu, g_data_noise_drop['grav'])
accepted_noise, P_noise = rejection(rmse=rmse_noise, rnseed=random.seed(40))
# In[171]:
accepted_reals_n = full_ens[accepted_noise, :]
grav_prob_n = probability(accepted_reals_n)
grav_entr_n = information_entropy(grav_prob_n)
# In[172]:
p2dp = gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=grav_entr_n,
kwargs_regular_grid={'cmap': 'magma',
'norm': None}
)
# We see that here, more realizations get accepted, in this case around 16 % more.
# ## Temperature rejection
# The black triangles in the Map plot are the locations from 4 different boreholes in the model. Temperature data from these boreholes is now used in a similar fashion to further reduce the model to realizations, which now fit both the gravity and the temperature signal.
# In[31]:
f = h5py.File('../models/20210219_MC_ensemble/PCT_base_model_final.h5','r')
# In[32]:
z,y,x = f['uindex'].shape
# In[33]:
# define uT
T_error = 0.25 # temperature error tool accuracy
s_error = fahrenheit_to_celsius(1.25, difference=True) # sensor response time of 2 sec and 1 year after drilling
l_error = fahrenheit_to_celsius(1.25, difference=True) # logging speed of 20/ft after 1 year
d_error = 1.0 # estimated temperature error by discretization
#u_T = np.sqrt(T_error[0]**2 + T_error[1]**2 + T_error[2]**2 + T_error[3]**2 + d_error**2)
#u_T = np.sum(T_error**2)/4
u_T = np.sqrt(T_error**2 + s_error**2 + l_error**2 + d_error**2)
print(u_T)
# In[34]:
# load Simulation outputs. Those outputs get written by SHEMAT-Suite if runmode = 1
outp_path = '../models/20210319_MC_no_middle_filling/SHEMAT_MC/'
#accepted = accept
accepted = np.loadtxt('../models/20210319_MC_no_middle_filling/accepted_realizations_01042021').astype(int)
diffs = np.loadtxt(outp_path+'PCT_MC_1dat_cor_final.dat',skiprows=3,usecols=(8,),dtype=float)
for i in accepted[1:]:
n = np.loadtxt(outp_path+f'PCT_MC_{i}dat_cor_final.dat',skiprows=3,usecols=(8,),dtype=float)
diffs=np.vstack([diffs,n])
# In[35]:
# calculate RMSE of each realisation.
n = diffs.shape[1] # as we have 4 data points for temperature
diffs_sq = diffs**2
ssr = diffs_sq.sum(axis=1)
rmse = np.sqrt((diffs_sq.sum(axis=1)/n))
# In[36]:
# this is a matrix with all vectors. First 96 columns are differences of the wells, then the column is the SSR,
# final column is RMSE
tot_diffs = np.column_stack((diffs,ssr,rmse))
print(tot_diffs.shape)
# add index to the realizations
ind = np.array(range(tot_diffs.shape[0]))
tot_diffs = np.column_stack((tot_diffs,accepted))
# ## Rejection sampling
# we now start with a random sample and go randomly through the pool, accepting and rejecting realizations.
# The algorithm starts with one refrence sample `Ref`. Then, iteratively, samples (= realizations) get accepted, rejected based on their RMSE values. That is why we use the 6th column of `tot_diffs`. Alternatively, one could also just use the `rmse` array.
# In[37]:
# Chronological implemntation - start von 1 bis N
# Can be used here, if samples generated are already in a random order and not correlated.
# That is usually the case with GemPy exports to SHEMAT-Suite.
random.seed(42)
col = 129
Ref = tot_diffs[0,col]
accept = []
P = []
k=0
for i in range(1,tot_diffs.shape[0]):
if tot_diffs[i,col] < Ref:
Ref = tot_diffs[i,col]
accept.append(i)
elif random.random() < np.exp(-(tot_diffs[i,col] - Ref)/(u_T)):
P.append(np.exp(-(tot_diffs[i,col] - Ref)/(u_T)))
Ref = tot_diffs[i,col]
accept.append(i)
k += 1
print(len(accept))
# In[38]:
accepted_reals_T = accepted_reals[accept, :]
grav_T_prob = probability(accepted_reals_T)
grav_T_entr = information_entropy(grav_T_prob)
# In[39]:
p2dp = gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=grav_T_entr,
kwargs_regular_grid={'cmap': 'magma',
'norm': None}
)
plt.savefig('../imgs/POC_grav_temp_posterior_IE.png', dpi=300, bbox_inches='tight')
# In[48]:
np.savetxt('../models/20210319_MC_no_middle_filling/accepted_after_temp_rejection', accept)
# ## Rejection sampling
# we now start with a random sample and go randomly through the pool, accepting and rejecting realizations.
# The algorithm starts with one refrence sample `Ref`. Then, iteratively, samples (= realizations) get accepted, rejected based on their RMSE values. That is why we use the 6th column of `tot_diffs`. Alternatively, one could also just use the `rmse` array.
# In[40]:
# have a look at sensitive data points
st = np.std(diffs, axis=0)
st.shape
# In[41]:
plt.hist(st)
# We see, that there are many points not sensitive
# In[53]:
indices = np.where(st < 0.5)
# In[54]:
diffs_red = np.delete(diffs, obj=indices, axis=1)
# Now let's see how the removal of relatively robust datapoints helps:
# In[55]:
# calculate RMSE of each realisation.
n = diffs_red.shape[1] # as we have 4 data points for temperature
diffs_sq = diffs_red**2
ssr = diffs_sq.sum(axis=1)
rmse = np.sqrt((diffs_sq.sum(axis=1)/n))
# In[56]:
# this is a matrix with all vectors. First 96 columns are differences of the wells, then the column is the SSR,
# final column is RMSE
tot_diffs = np.column_stack((diffs_red,ssr,rmse))
print(tot_diffs.shape)
# add index to the realizations
ind = np.array(range(tot_diffs.shape[0]))
tot_diffs = np.column_stack((tot_diffs,accepted))
# In[57]:
# Chronological implemntation - start von 1 bis N
# Can be used here, if samples generated are already in a random order and not correlated.
# That is usually the case with GemPy exports to SHEMAT-Suite.
random.seed(42)
col = 54
Ref = tot_diffs[0,col]
accept = []
P = []
k=0
for i in range(1,tot_diffs.shape[0]):
if tot_diffs[i,col] < Ref:
Ref = tot_diffs[i,col]
accept.append(i)
elif random.random() < np.exp(-(tot_diffs[i,col] - Ref)/(u_T)):
P.append(np.exp(-(tot_diffs[i,col] - Ref)/(u_T)))
Ref = tot_diffs[i,col]
accept.append(i)
k += 1
print(len(accept))
#print(accept)
# In[58]:
accepted_reals_Ts = accepted_reals[accept, :]
grav_Ts_prob = probability(accepted_reals_Ts)
grav_Ts_entr = information_entropy(grav_Ts_prob)
# In[59]:
p2dp = gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=grav_Ts_entr,
kwargs_regular_grid={'cmap': 'magma',
'norm': None}
)
plt.savefig('../imgs/POC_grav_temp_red_posterior_IE.png', dpi=300, bbox_inches='tight')
# In[56]:
np.savetxt('../models/20210319_MC_no_middle_filling/accepted_after_temp_rejection_reduced_datapoints', accept)
# And we see, temperature data is not sensitive to changes in the PCT-depth.
#
# But what if we also treat the thermal conductivity as an uncertain parameter?
# *Then the rejection is way more rigorous.*
# In[60]:
fids = glob.glob('H:PCT_SHEMAT/20210219_MC_outputs/*.h5')
# In[70]:
outpath = 'H:PCT_SHEMAT/20210219_MC_outputs\\'
poTemp = []
poUi = []
dicfil = {}
for fn in fids:
for i in accept:
if fn == outpath+f"PCT_MC_{i}var_TCt_final.h5":
dT,dui = extTui(fn, dimension=2, direction='y')
poTemp.append(dT)
poUi.append(dui)
dicfil[fn.split('/')[-1]] = dui
# In[71]:
poTempa = np.asarray(poTemp)
poUia = np.asarray(poUi)
accepta = np.asarray(accept)
print(poUia.shape,poTempa.shape,accepta.shape)
np.savetxt('accepted_realisations',accepta,fmt='%i',delimiter=' ',newline='\n')
#np.savetxt('posterior_Temps',poTempa,fmt='%.5f',delimiter=' ',newline='\n',header=" posterior 61 realizations for Temperature")
#np.savetxt('posterior_Uindex',poUia,fmt='%i',delimiter=' ',newline='\n')
# In[72]:
# calculate mean temperature field and mean posterior uindex
mTemp = np.mean(poTempa,axis=0)
mUi = np.mean(poUia,axis=0)
# import y and z for visualising
plfn = h5py.File('../models/20210219_MC_ensemble/PCT_base_model_final.h5','r')
x = plfn['x'][0,0,:]
y = plfn['y'][0,:,0]
z = plfn['z'][:,0,0]
refT = plfn['temp'][:,25,:]
# In[73]:
poUi[0].shape
# In[79]:
fig = plt.figure(figsize=(20,8))
cs = plt.contourf(x,z-6500.,mUi,cmap='viridis')
plt.contour(x,z-6500.,mUi,5, colors='gray', zorder=1)
plt.tick_params(axis='both',labelsize=14)
plt.xlabel('x [m]',fontsize=16)
plt.ylabel('depth[m]',fontsize=16)
plt.savefig('../imgs/POC_mean_uindex.png', dpi=300, bbox_inches='tight')
# In[80]:
fig = plt.figure(figsize=(20,8))
cs = plt.pcolor(x,z-6500.,mTemp,cmap='viridis', shading='auto')
plt.tick_params(axis='both',labelsize=14)
plt.xlabel('x [m]',fontsize=16)
plt.ylabel('depth[m]',fontsize=16)
cbar = plt.colorbar(cs,orientation='vertical')
cbar.set_label('Temperature $^\circ$C]',fontsize=16)
cbar.ax.tick_params(labelsize=14)
# In[86]:
#plot ssr of mean posterior and reference model
fig = plt.figure(figsize=(20,8))
cs = plt.pcolor(x,z-6500.,np.abs((refT-mTemp)),cmap='RdBu', shading='auto')
plt.tick_params(axis='both',labelsize=14)
plt.xlabel('x [m]',fontsize=16)
plt.ylabel('depth[m]',fontsize=16)
cbar = plt.colorbar(cs,orientation='vertical')
cbar.set_label('Temperature $^\circ$C]',fontsize=16)
cbar.ax.tick_params(labelsize=14)
plt.savefig('../imgs/POC_absolut_differences_reference_ensemble_mean.png', dpi=300, bbox_inches='tight')
# In[ ]:
|
the-stack_0_13426 | """Low-level api to work with relationships"""
import functools
import itertools
class BaseFilter:
"Base filter that accepts one argument"
def __init__(self, **query):
assert len(query) == 1
for key, value in query.items():
self.key = key
self.value = value
@staticmethod
def parse_key(key):
"Parses the key to remove the __ if there is one"
return key.split("__")[0]
def match(self, value):
"Checks wether value matches this filter"
parsed_key = self.parse_key(self.key)
if parsed_key != "_self":
value = getattr(value, parsed_key, None)
if value is None:
return False
if self.key.endswith("__gt"):
return value > self.value
elif self.key.endswith("__gte"):
return value >= self.value
elif self.key.endswith("__lt"):
return value < self.value
elif self.key.endswith("__lte"):
return value <= self.value
elif self.key.endswith("__ne"):
return value != self.value
else:
return value == self.value
class AndFilter(BaseFilter):
"Composite filter that combines two filters"
def __init__(self, filters, **query):
self.filters = filters
for key, value in query.items():
self.filters.append(BaseFilter(**{key:value}))
def match(self, value):
is_match = True
for _filter in self.filters:
is_match &= _filter.match(value)
return is_match
class OrFilter(AndFilter):
"Composite filter that combines two filters via an or"
def match(self, value):
is_match = False
for _filter in self.filters:
is_match |= _filter.match(value)
return is_match
class Vertex:
"Represents a dependency link"
def __init__(self, vertex_type:str, from_node:str, to_node:str, **attributes):
# Ensures that we won't override our parameters...
assert "from_node" not in attributes
assert "to_node" not in attributes
assert "vertex_type" not in attributes
self.vertex_type = vertex_type
self.from_node = from_node
self.to_node = to_node
for key, value in attributes.items():
setattr(self, key, value)
def __eq__(self, other):
if isinstance(other, Vertex):
return (self.vertex_type == other.vertex_type
and self.from_node == other.from_node
and self.to_node == other.to_node)
def __hash__(self):
return hash((self.vertex_type, self.from_node, self.to_node))
def __str__(self):
return "(%s) --> (%s)" % (self.from_node, self.to_node)
class CircularDependencyError(BaseException):
pass
class DependencyGraph:
def __init__(self, nodes, plugins):
self.nodes = set(nodes)
self.plugins = plugins
def generate_vertices():
nodes = list(self.nodes)
while nodes:
yield from self.build_vertices(nodes.pop())
self.vertices = set(generate_vertices())
if not self.is_acyclic():
raise CircularDependencyError()
def __str__(self):
return "\n".join([str(vertex) for vertex in self.vertices])
def is_acyclic(self):
"Checks for circular dependencies"
nodes = []
# We build an index
connected_to = {node: set() for node in self.nodes}
from_nodes = {node: set() for node in self.nodes}
for vertice in self.vertices:
connected_to[vertice.to_node].add(vertice)
from_nodes[vertice.from_node].add(vertice)
for node in self.nodes:
# Only nodes that don't have someone dependent on
if len(connected_to[node]) == 0:
nodes.append(node)
vertices = list(self.vertices)
deleted_vertices = set()
while nodes:
node = nodes.pop()
connected = from_nodes[node] - deleted_vertices
for vertice in connected:
deleted_vertices.add(vertice)
if not connected_to[node] - deleted_vertices:
nodes.append(vertice.to_node)
return len(vertices) == len(deleted_vertices)
def build_vertices(self, node):
plugins = filter(lambda p: p.can_create_vertex(node), self.plugins)
for plugin in plugins:
for vertex in plugin.vertices(node):
if vertex.to_node not in self.nodes:
self.nodes.add(vertex.to_node)
# I know, we are limited by recursions.
# Fix it when it is a problem
yield from self.build_vertices(vertex.to_node)
yield vertex
def dependencies(self, node, follow=False):
"Returns dependencies of a node, either all or direct"
vertices = filter(lambda v: v.from_node == node, self.vertices)
for vertex in vertices:
yield vertex.to_node
if follow:
yield from self.dependencies(vertex.to_node, follow)
class Plugin:
"Represents a plugin"
file_extensions = "*"
def __init__(self, **kwargs):
for key, value in kwargs:
setattr(self, key, value)
def vertices(self, node):
"Yields vertices for a node"
raise NotImplementedError()
def can_create_vertex(self, node):
"Checks if this plugin can create links for this type of node"
if self.file_extensions == "*":
return True
if isinstance(self.file_extensions, str):
return node.name.endswith(self.file_extensions)
else:
# If the file extension of the node name is in the plugins file ext
ends_with = False
for file_ext in self.file_extensions:
ends_with = ends_with or node.name.endswith(file_ext)
return ends_with
class StaticDependencies(Plugin):
"Plugin to illustrate manual dependencies"
# Format of a dependency:
# ("A", ("B", "C", "D"))
def __init__(self, dependencies, **kwargs):
self.dependencies = dependencies
def vertices(self, node):
for deps in self.dependencies:
if deps[0] == node:
for sub_node in deps[1]:
yield Vertex("static", node, sub_node)
|
the-stack_0_13427 | """A POP3 client class.
Based on the J. Myers POP3 draft, Jan. 96
"""
# Author: David Ascher <[email protected]>
# [heavily stealing from nntplib.py]
# Updated: Piers Lauder <[email protected]> [Jul '97]
# String method conversion and test jig improvements by ESR, February 2001.
# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <[email protected]> Aug 2003
# Example (see the test function at the end of this file)
# Imports
import re, socket
__all__ = ["POP3","error_proto"]
# Exception raised when an error or invalid response is received:
class error_proto(Exception): pass
# Standard Port
POP3_PORT = 110
# POP SSL PORT
POP3_SSL_PORT = 995
# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
CR = b'\r'
LF = b'\n'
CRLF = CR+LF
class POP3:
"""This class supports both the minimal and optional command sets.
Arguments can be strings or integers (where appropriate)
(e.g.: retr(1) and retr('1') both work equally well.
Minimal Command Set:
USER name user(name)
PASS string pass_(string)
STAT stat()
LIST [msg] list(msg = None)
RETR msg retr(msg)
DELE msg dele(msg)
NOOP noop()
RSET rset()
QUIT quit()
Optional Commands (some servers support these):
RPOP name rpop(name)
APOP name digest apop(name, digest)
TOP msg n top(msg, n)
UIDL [msg] uidl(msg = None)
Raises one exception: 'error_proto'.
Instantiate with:
POP3(hostname, port=110)
NB: the POP protocol locks the mailbox from user
authorization until QUIT, so be sure to get in, suck
the messages, and quit, each time you access the
mailbox.
POP is a line-based protocol, which means large mail
messages consume lots of python cycles reading them
line-by-line.
If it's available on your mail server, use IMAP4
instead, it doesn't suffer from the two problems
above.
"""
encoding = 'UTF-8'
def __init__(self, host, port=POP3_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.port = port
self.sock = self._create_socket(timeout)
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
def _create_socket(self, timeout):
return socket.create_connection((self.host, self.port), timeout)
def _putline(self, line):
if self._debugging > 1: print('*put*', repr(line))
self.sock.sendall(line + CRLF)
# Internal: send one command to the server (through _putline())
def _putcmd(self, line):
if self._debugging: print('*cmd*', repr(line))
line = bytes(line, self.encoding)
self._putline(line)
# Internal: return one line from the server, stripping CRLF.
# This is where all the CPU time of this module is consumed.
# Raise error_proto('-ERR EOF') if the connection is closed.
def _getline(self):
line = self.file.readline()
if self._debugging > 1: print('*get*', repr(line))
if not line: raise error_proto('-ERR EOF')
octets = len(line)
# server can send any combination of CR & LF
# however, 'readline()' returns lines ending in LF
# so only possibilities are ...LF, ...CRLF, CR...LF
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
# Internal: get a response from the server.
# Raise 'error_proto' if the response doesn't start with '+'.
def _getresp(self):
resp, o = self._getline()
if self._debugging > 1: print('*resp*', repr(resp))
if not resp.startswith(b'+'):
raise error_proto(resp)
return resp
# Internal: get a response plus following text from the server.
def _getlongresp(self):
resp = self._getresp()
list = []; octets = 0
line, o = self._getline()
while line != b'.':
if line.startswith(b'..'):
o = o-1
line = line[1:]
octets = octets + o
list.append(line)
line, o = self._getline()
return resp, list, octets
# Internal: send a command and get the response
def _shortcmd(self, line):
self._putcmd(line)
return self._getresp()
# Internal: send a command and get the response plus following text
def _longcmd(self, line):
self._putcmd(line)
return self._getlongresp()
# These can be useful:
def getwelcome(self):
return self.welcome
def set_debuglevel(self, level):
self._debugging = level
# Here are all the POP commands:
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
def pass_(self, pswd):
"""Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to 'quit()'
"""
return self._shortcmd('PASS %s' % pswd)
def stat(self):
"""Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)
"""
retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging: print('*stat*', repr(rets))
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
def list(self, which=None):
"""Request listing, return result.
Result without a message number argument is in form
['response', ['mesg_num octets', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.
"""
if which is not None:
return self._shortcmd('LIST %s' % which)
return self._longcmd('LIST')
def retr(self, which):
"""Retrieve whole message number 'which'.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('RETR %s' % which)
def dele(self, which):
"""Delete message number 'which'.
Result is 'response'.
"""
return self._shortcmd('DELE %s' % which)
def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP')
def rset(self):
"""Unmark all messages marked for deletion."""
return self._shortcmd('RSET')
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
try:
resp = self._shortcmd('QUIT')
except error_proto as val:
resp = val
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
#__del__ = quit
# optional commands:
def rpop(self, user):
"""Not sure what this does."""
return self._shortcmd('RPOP %s' % user)
timestamp = re.compile(br'\+OK.*(<[^>]+>)')
def apop(self, user, password):
"""Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
password - mailbox password.
NB: mailbox is locked by server from here to 'quit()'
"""
secret = bytes(password, self.encoding)
m = self.timestamp.match(self.welcome)
if not m:
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = m.group(1)+secret
digest = hashlib.md5(digest).hexdigest()
return self._shortcmd('APOP %s %s' % (user, digest))
def top(self, which, howmuch):
"""Retrieve message header of message number 'which'
and first 'howmuch' lines of message body.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('TOP %s %s' % (which, howmuch))
def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL')
try:
import ssl
except ImportError:
pass
else:
class POP3_SSL(POP3):
"""POP3 client class over SSL connection
Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None)
hostname - the hostname of the pop3 over ssl server
port - port number
keyfile - PEM formatted file that countains your private key
certfile - PEM formatted certificate chain file
See the methods of the parent class POP3 for more documentation.
"""
def __init__(self, host, port=POP3_SSL_PORT,
keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.keyfile = keyfile
self.certfile = certfile
POP3.__init__(self, host, port, timeout)
def _create_socket(self, timeout):
sock = POP3._create_socket(self, timeout)
return ssl.wrap_socket(sock, self.keyfile, self.certfile)
__all__.append("POP3_SSL")
if __name__ == "__main__":
import sys
a = POP3(sys.argv[1])
print(a.getwelcome())
a.user(sys.argv[2])
a.pass_(sys.argv[3])
a.list()
(numMsgs, totalSize) = a.stat()
for i in range(1, numMsgs + 1):
(header, msg, octets) = a.retr(i)
print("Message %d:" % i)
for line in msg:
print(' ' + line)
print('-----------------------')
a.quit()
|
the-stack_0_13429 | import pygame
import ctypes
import os
import queue
import sys
import random
import Main
import Functions
import Screens
pygame.init()
# getting the size of user's screen
user32 = ctypes.windll.user32
screensize = user32.GetSystemMetrics(78), user32.GetSystemMetrics(79)
scaled_screen_height = int(screensize[1] * .9 * .75)
# setting the size of snake and fields.
scale_factor = 2 # can be changed to full number. the bigger num - the smaller snake
body_size = int(scaled_screen_height / 12 / scale_factor)
block_size = body_size * scale_factor #: base variable for setting all the sizes - important one!
# creating the game window
game_width, game_height = 12 * block_size, 16 * block_size #: size of the game - adapted to background image
icon = pygame.image.load('media/heads/head_down.jpg')
pygame.display.set_caption('Snake')
pygame.display.set_icon(icon)
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (int((screensize[0] - 12 * block_size) / 2), 32)
# all images in the game
screen = pygame.display.set_mode((game_width, game_height))
surface = pygame.image.load("media/screens/background_800.png").convert()
surface = pygame.transform.scale(surface, (block_size*12, block_size*16))
head = Functions.load_img('media/heads/head_down.png', 1.5)
head_eat = Functions.load_img('media/heads/head_eat_down.png', 1.5)
head_dead = Functions.load_img('media/heads/head_down_dead.png', 1.5)
food = Functions.load_img('media/food/mouse.png', 1.2)
lost = pygame.image.load("media/screens/lost.png").convert()
lost = pygame.transform.scale(lost, (block_size*6, block_size*6))
welcome = pygame.image.load("media/screens/welcome.png").convert()
welcome = pygame.transform.scale(welcome, (block_size*6, block_size*6))
# movement constants
UP = (0, -1)
DOWN = (0, 1)
LEFT = (-1, 0)
RIGHT = (1, 0)
PAUSE = (0, 0)
# colors
dark_gray = (30, 30, 30)
black = (0, 0, 0)
orange = (219, 106, 15)
# sad variables
is_eaten = False
is_dead = False
# eaten mouses
points = 0
high_score = list()
score_multi = 1
# fonts
large_text = pygame.font.SysFont('Calibri', int(block_size*2), True, False)
normal_text = pygame.font.SysFont('Calibri', int(block_size*0.8), True, False)
small_text = pygame.font.SysFont('Calibri', int(block_size*0.4), True, False)
# time
clock = pygame.time.Clock()
fps = 10 # frames per second
screen.blit(surface, [0, 0])
# queue with movements
q = queue.Queue()
x_tmp = 1
y_tmp = 0
starting = True
resume_dir = RIGHT
|
the-stack_0_13432 | # Dual Annealing implementation.
# Copyright (c) 2018 Sylvain Gubian <[email protected]>,
# Yang Xiang <[email protected]>
# Author: Sylvain Gubian, Yang Xiang, PMP S.A.
"""
A Dual Annealing global optimization algorithm
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import OptimizeResult
from scipy.optimize import minimize
from scipy.special import gammaln
from scipy._lib._util import check_random_state
__all__ = ['dual_annealing']
class VisitingDistribution(object):
"""
Class used to generate new coordinates based on the distorted
Cauchy-Lorentz distribution. Depending on the steps within the strategy
chain, the class implements the strategy for generating new location
changes.
Parameters
----------
lb : array_like
A 1-D numpy ndarray containing lower bounds of the generated
components. Neither NaN or inf are allowed.
ub : array_like
A 1-D numpy ndarray containing upper bounds for the generated
components. Neither NaN or inf are allowed.
visiting_param : float
Parameter for visiting distribution. Default value is 2.62.
Higher values give the visiting distribution a heavier tail, this
makes the algorithm jump to a more distant region.
The value range is (0, 3]. It's value is fixed for the life of the
object.
rand_state : `~numpy.random.mtrand.RandomState` object
A `~numpy.random.mtrand.RandomState` object for using the current state
of the created random generator container.
"""
TAIL_LIMIT = 1.e8
MIN_VISIT_BOUND = 1.e-10
def __init__(self, lb, ub, visiting_param, rand_state):
# if you wish to make _visiting_param adjustable during the life of
# the object then _factor2, _factor3, _factor5, _d1, _factor6 will
# have to be dynamically calculated in `visit_fn`. They're factored
# out here so they don't need to be recalculated all the time.
self._visiting_param = visiting_param
self.rand_state = rand_state
self.lower = lb
self.upper = ub
self.bound_range = ub - lb
# these are invariant numbers unless visiting_param changes
self._factor2 = np.exp((4.0 - self._visiting_param) * np.log(
self._visiting_param - 1.0))
self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0)
/ (self._visiting_param - 1.0))
self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * (
3.0 - self._visiting_param))
self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5
self._d1 = 2.0 - self._factor5
self._factor6 = np.pi * (1.0 - self._factor5) / np.sin(
np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1))
def visiting(self, x, step, temperature):
""" Based on the step in the strategy chain, new coordinated are
generated by changing all components is the same time or only
one of them, the new values are computed with visit_fn method
"""
dim = x.size
if step < dim:
# Changing all coordinates with a new visiting value
visits = self.visit_fn(temperature, dim)
upper_sample = self.rand_state.random_sample()
lower_sample = self.rand_state.random_sample()
visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample
visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample
x_visit = visits + x
a = x_visit - self.lower
b = np.fmod(a, self.bound_range) + self.bound_range
x_visit = np.fmod(b, self.bound_range) + self.lower
x_visit[np.fabs(
x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10
else:
# Changing only one coordinate at a time based on strategy
# chain step
x_visit = np.copy(x)
visit = self.visit_fn(temperature, 1)
if visit > self.TAIL_LIMIT:
visit = self.TAIL_LIMIT * self.rand_state.random_sample()
elif visit < -self.TAIL_LIMIT:
visit = -self.TAIL_LIMIT * self.rand_state.random_sample()
index = step - dim
x_visit[index] = visit + x[index]
a = x_visit[index] - self.lower[index]
b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]
x_visit[index] = np.fmod(b, self.bound_range[
index]) + self.lower[index]
if np.fabs(x_visit[index] - self.lower[
index]) < self.MIN_VISIT_BOUND:
x_visit[index] += self.MIN_VISIT_BOUND
return x_visit
def visit_fn(self, temperature, dim):
""" Formula Visita from p. 405 of reference [2] """
x, y = self.rand_state.normal(size=(dim, 2)).T
factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))
factor4 = self._factor4_p * factor1
# sigmax
x *= np.exp(-(self._visiting_param - 1.0) * np.log(
self._factor6 / factor4) / (3.0 - self._visiting_param))
den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) /
(3.0 - self._visiting_param))
return x / den
class EnergyState(object):
"""
Class used to record the energy state. At any time, it knows what is the
currently used coordinates and the most recent best location.
Parameters
----------
lower : array_like
A 1-D numpy ndarray containing lower bounds for generating an initial
random components in the `reset` method.
upper : array_like
A 1-D numpy ndarray containing upper bounds for generating an initial
random components in the `reset` method
components. Neither NaN or inf are allowed.
callback : callable, ``callback(x, f, context)``, optional
A callback function which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and `context` has value in [0, 1, 2]
"""
# Maximimum number of trials for generating a valid starting point
MAX_REINIT_COUNT = 1000
def __init__(self, lower, upper, callback=None):
self.ebest = None
self.current_energy = None
self.current_location = None
self.xbest = None
self.lower = lower
self.upper = upper
self.callback = callback
def reset(self, func_wrapper, rand_state, x0=None):
"""
Initialize current location is the search domain. If `x0` is not
provided, a random location within the bounds is generated.
"""
if x0 is None:
self.current_location = self.lower + rand_state.random_sample(
len(self.lower)) * (self.upper - self.lower)
else:
self.current_location = np.copy(x0)
init_error = True
reinit_counter = 0
while init_error:
self.current_energy = func_wrapper.fun(self.current_location)
if self.current_energy is None:
raise ValueError('Objective function is returning None')
if (not np.isfinite(self.current_energy) or np.isnan(
self.current_energy)):
if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
init_error = False
message = (
'Stopping algorithm because function '
'create NaN or (+/-) infinity values even with '
'trying new random parameters'
)
raise ValueError(message)
self.current_location = self.lower + rand_state.random_sample(
self.lower.size) * (self.upper - self.lower)
reinit_counter += 1
else:
init_error = False
# If first time reset, initialize ebest and xbest
if self.ebest is None and self.xbest is None:
self.ebest = self.current_energy
self.xbest = np.copy(self.current_location)
# Otherwise, we keep them in case of reannealing reset
def update_best(self, e, x, context):
self.ebest = e
self.xbest = np.copy(x)
if self.callback is not None:
val = self.callback(x, e, context)
if val is not None:
if val:
return('Callback function requested to stop early by '
'returning True')
def update_current(self, e, x):
self.current_energy = e
self.current_location = np.copy(x)
class StrategyChain(object):
"""
Class that implements within a Markov chain the strategy for location
acceptance and local search decision making.
Parameters
----------
acceptance_param : float
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
visit_dist : VisitingDistribution
Instance of `VisitingDistribution` class.
func_wrapper : ObjectiveFunWrapper
Instance of `ObjectiveFunWrapper` class.
minimizer_wrapper: LocalSearchWrapper
Instance of `LocalSearchWrapper` class.
rand_state : `~numpy.random.mtrand.RandomState` object
A `~numpy.random.mtrand.RandomState` object for using the current state
of the created random generator container.
energy_state: EnergyState
Instance of `EnergyState` class.
"""
def __init__(self, acceptance_param, visit_dist, func_wrapper,
minimizer_wrapper, rand_state, energy_state):
# Local strategy chain minimum energy and location
self.emin = energy_state.current_energy
self.xmin = np.array(energy_state.current_location)
# Global optimizer state
self.energy_state = energy_state
# Acceptance parameter
self.acceptance_param = acceptance_param
# Visiting distribution instance
self.visit_dist = visit_dist
# Wrapper to objective function
self.func_wrapper = func_wrapper
# Wrapper to the local minimizer
self.minimizer_wrapper = minimizer_wrapper
self.not_improved_idx = 0
self.not_improved_max_idx = 1000
self._rand_state = rand_state
self.temperature_step = 0
self.K = 100 * len(energy_state.current_location)
def accept_reject(self, j, e, x_visit):
r = self._rand_state.random_sample()
pqv_temp = (self.acceptance_param - 1.0) * (
e - self.energy_state.current_energy) / (
self.temperature_step + 1.)
if pqv_temp <= 0.:
pqv = 0.
else:
pqv = np.exp(np.log(pqv_temp) / (
1. - self.acceptance_param))
if r <= pqv:
# We accept the new location and update state
self.energy_state.update_current(e, x_visit)
self.xmin = np.copy(self.energy_state.current_location)
# No improvement for a long time
if self.not_improved_idx >= self.not_improved_max_idx:
if j == 0 or self.energy_state.current_energy < self.emin:
self.emin = self.energy_state.current_energy
self.xmin = np.copy(self.energy_state.current_location)
def run(self, step, temperature):
self.temperature_step = temperature / float(step + 1)
self.not_improved_idx += 1
for j in range(self.energy_state.current_location.size * 2):
if j == 0:
if step == 0:
self.energy_state_improved = True
else:
self.energy_state_improved = False
x_visit = self.visit_dist.visiting(
self.energy_state.current_location, j, temperature)
# Calling the objective function
e = self.func_wrapper.fun(x_visit)
if e < self.energy_state.current_energy:
# We have got a better energy value
self.energy_state.update_current(e, x_visit)
if e < self.energy_state.ebest:
val = self.energy_state.update_best(e, x_visit, 0)
if val is not None:
if val:
return val
self.energy_state_improved = True
self.not_improved_idx = 0
else:
# We have not improved but do we accept the new location?
self.accept_reject(j, e, x_visit)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during annealing')
# End of StrategyChain loop
def local_search(self):
# Decision making for performing a local search
# based on strategy chain results
# If energy has been improved or no improvement since too long,
# performing a local search with the best strategy chain location
if self.energy_state_improved:
# Global energy has improved, let's see if LS improves further
e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,
self.energy_state.ebest)
if e < self.energy_state.ebest:
self.not_improved_idx = 0
val = self.energy_state.update_best(e, x, 1)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during local search')
# Check probability of a need to perform a LS even if no improvement
do_ls = False
if self.K < 90 * len(self.energy_state.current_location):
pls = np.exp(self.K * (
self.energy_state.ebest - self.energy_state.current_energy) /
self.temperature_step)
if pls >= self._rand_state.random_sample():
do_ls = True
# Global energy not improved, let's see what LS gives
# on the best strategy chain location
if self.not_improved_idx >= self.not_improved_max_idx:
do_ls = True
if do_ls:
e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)
self.xmin = np.copy(x)
self.emin = e
self.not_improved_idx = 0
self.not_improved_max_idx = self.energy_state.current_location.size
if e < self.energy_state.ebest:
val = self.energy_state.update_best(
self.emin, self.xmin, 2)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during dual annealing')
class ObjectiveFunWrapper(object):
def __init__(self, func, maxfun=1e7, *args):
self.func = func
self.args = args
# Number of objective function evaluations
self.nfev = 0
# Number of gradient function evaluation if used
self.ngev = 0
# Number of hessian of the objective function if used
self.nhev = 0
self.maxfun = maxfun
def fun(self, x):
self.nfev += 1
return self.func(x, *self.args)
class LocalSearchWrapper(object):
"""
Class used to wrap around the minimizer used for local search
Default local minimizer is SciPy minimizer L-BFGS-B
"""
LS_MAXITER_RATIO = 6
LS_MAXITER_MIN = 100
LS_MAXITER_MAX = 1000
def __init__(self, bounds, func_wrapper, **kwargs):
self.func_wrapper = func_wrapper
self.kwargs = kwargs
self.minimizer = minimize
bounds_list = list(zip(*bounds))
self.lower = np.array(bounds_list[0])
self.upper = np.array(bounds_list[1])
# If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
if not self.kwargs:
n = len(self.lower)
ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
self.LS_MAXITER_MIN),
self.LS_MAXITER_MAX)
self.kwargs['method'] = 'L-BFGS-B'
self.kwargs['options'] = {
'maxiter': ls_max_iter,
}
self.kwargs['bounds'] = list(zip(self.lower, self.upper))
def local_search(self, x, e):
# Run local search from the given x location where energy value is e
x_tmp = np.copy(x)
mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
if 'njev' in mres.keys():
self.func_wrapper.ngev += mres.njev
if 'nhev' in mres.keys():
self.func_wrapper.nhev += mres.nhev
# Check if is valid value
is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
in_bounds = np.all(mres.x >= self.lower) and np.all(
mres.x <= self.upper)
is_valid = is_finite and in_bounds
# Use the new point only if it is valid and return a better results
if is_valid and mres.fun < e:
return mres.fun, mres.x
else:
return e, x_tmp
def dual_annealing(func, bounds, args=(), maxiter=1000,
local_search_options={}, initial_temp=5230.,
restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
maxfun=1e7, seed=None, no_local_search=False,
callback=None, x0=None):
"""
Find the global minimum of a function using Dual Annealing.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence, shape (n, 2)
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining bounds for the objective function parameter.
args : tuple, optional
Any additional fixed parameters needed to completely specify the
objective function.
maxiter : int, optional
The maximum number of global search iterations. Default value is 1000.
local_search_options : dict, optional
Extra keyword arguments to be passed to the local minimizer
(`minimize`). Some important options could be:
``method`` for the minimizer method to use and ``args`` for
objective function additional arguments.
initial_temp : float, optional
The initial temperature, use higher values to facilitates a wider
search of the energy landscape, allowing dual_annealing to escape
local minima that it is trapped in. Default value is 5230. Range is
(0.01, 5.e4].
restart_temp_ratio : float, optional
During the annealing process, temperature is decreasing, when it
reaches ``initial_temp * restart_temp_ratio``, the reannealing process
is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
visit : float, optional
Parameter for visiting distribution. Default value is 2.62. Higher
values give the visiting distribution a heavier tail, this makes
the algorithm jump to a more distant region. The value range is (0, 3].
accept : float, optional
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
maxfun : int, optional
Soft limit for the number of objective function calls. If the
algorithm is in the middle of a local search, this number will be
exceeded, the algorithm will stop just after the local search is
done. Default value is 1e7.
seed : {int or `~numpy.random.mtrand.RandomState` instance}, optional
If `seed` is not specified the `~numpy.random.mtrand.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``RandomState`` instance, then that
instance is used.
Specify `seed` for repeatable minimizations. The random numbers
generated with this seed only affect the visiting distribution
function and new coordinates generation.
no_local_search : bool, optional
If `no_local_search` is set to True, a traditional Generalized
Simulated Annealing will be performed with no local search
strategy applied.
callback : callable, optional
A callback function with signature ``callback(x, f, context)``,
which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and ``context`` has value in [0, 1, 2], with the
following meaning:
- 0: minimum detected in the annealing process.
- 1: detection occured in the local search process.
- 2: detection done in the dual annealing process.
If the callback implementation returns True, the algorithm will stop.
x0 : ndarray, shape(n,), optional
Coordinates of a single n-dimensional starting point.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``fun`` the value
of the function at the solution, and ``message`` which describes the
cause of the termination.
See `OptimizeResult` for a description of other attributes.
Notes
-----
This function implements the Dual Annealing optimization. This stochastic
approach derived from [3]_ combines the generalization of CSA (Classical
Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
to a strategy for applying a local search on accepted locations [4]_.
An alternative implementation of this same algorithm is described in [5]_
and benchmarks are presented in [6]_. This approach introduces an advanced
method to refine the solution found by the generalized annealing
process. This algorithm uses a distorted Cauchy-Lorentz visiting
distribution, with its shape controlled by the parameter :math:`q_{v}`
.. math::
g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
\\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
\\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
\\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
\\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
Where :math:`t` is the artificial time. This visiting distribution is used
to generate a trial jump distance :math:`\\Delta x(t)` of variable
:math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
From the starting point, after calling the visiting distribution
function, the acceptance probability is computed as follows:
.. math::
p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
\\frac{1}{1-q_{a}}}\\}}
Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
acceptance probability is assigned to the cases where
.. math::
[1-(1-q_{a}) \\beta \\Delta E] < 0
The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
.. math::
T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
1 + t\\right)^{q_{v}-1}-1}
Where :math:`q_{v}` is the visiting parameter.
.. versionadded:: 1.2.0
References
----------
.. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
statistics. Journal of Statistical Physics, 52, 479-487 (1998).
.. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
Physica A, 233, 395-406 (1996).
.. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
Annealing Algorithm and Its Application to the Thomson Model.
Physics Letters A, 233, 216-220 (1997).
.. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
Annealing. Physical Review E, 62, 4473 (2000).
.. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
Simulated Annealing for Efficient Global Optimization: the GenSA
Package for R. The R Journal, Volume 5/1 (2013).
.. [6] Mullen, K. Continuous Global Optimization in R. Journal of
Statistical Software, 60(6), 1 - 45, (2014). DOI:10.18637/jss.v060.i06
Examples
--------
The following example is a 10-dimensional problem, with many local minima.
The function involved is called Rastrigin
(https://en.wikipedia.org/wiki/Rastrigin_function)
>>> from scipy.optimize import dual_annealing
>>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
>>> lw = [-5.12] * 10
>>> up = [5.12] * 10
>>> ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234)
>>> print("global minimum: xmin = {0}, f(xmin) = {1:.6f}".format(
... ret.x, ret.fun))
global minimum: xmin = [-4.26437714e-09 -3.91699361e-09 -1.86149218e-09 -3.97165720e-09
-6.29151648e-09 -6.53145322e-09 -3.93616815e-09 -6.55623025e-09
-6.05775280e-09 -5.00668935e-09], f(xmin) = 0.000000
""" # noqa: E501
if x0 is not None and not len(x0) == len(bounds):
raise ValueError('Bounds size does not match x0')
lu = list(zip(*bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
# Check that restart temperature ratio is correct
if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
raise ValueError('Restart temperature ratio has to be in range (0, 1)')
# Checking bounds are valid
if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
np.isnan(lower)) or np.any(np.isnan(upper))):
raise ValueError('Some bounds values are inf values or nan values')
# Checking that bounds are consistent
if not np.all(lower < upper):
raise ValueError('Bounds are not consistent min < max')
# Checking that bounds are the same length
if not len(lower) == len(upper):
raise ValueError('Bounds do not have the same dimensions')
# Wrapper for the objective function
func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
# Wrapper fot the minimizer
minimizer_wrapper = LocalSearchWrapper(
bounds, func_wrapper, **local_search_options)
# Initialization of RandomState for reproducible runs if seed provided
rand_state = check_random_state(seed)
# Initialization of the energy state
energy_state = EnergyState(lower, upper, callback)
energy_state.reset(func_wrapper, rand_state, x0)
# Minimum value of annealing temperature reached to perform
# re-annealing
temperature_restart = initial_temp * restart_temp_ratio
# VisitingDistribution instance
visit_dist = VisitingDistribution(lower, upper, visit, rand_state)
# Strategy chain instance
strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
minimizer_wrapper, rand_state, energy_state)
need_to_stop = False
iteration = 0
message = []
# OptimizeResult object to be returned
optimize_res = OptimizeResult()
optimize_res.success = True
optimize_res.status = 0
t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
# Run the search loop
while(not need_to_stop):
for i in range(maxiter):
# Compute temperature for this step
s = float(i) + 2.0
t2 = np.exp((visit - 1) * np.log(s)) - 1.0
temperature = initial_temp * t1 / t2
if iteration >= maxiter:
message.append("Maximum number of iteration reached")
need_to_stop = True
break
# Need a re-annealing process?
if temperature < temperature_restart:
energy_state.reset(func_wrapper, rand_state)
break
# starting strategy chain
val = strategy_chain.run(i, temperature)
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
# Possible local search at the end of the strategy chain
if not no_local_search:
val = strategy_chain.local_search()
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
iteration += 1
# Setting the OptimizeResult values
optimize_res.x = energy_state.xbest
optimize_res.fun = energy_state.ebest
optimize_res.nit = iteration
optimize_res.nfev = func_wrapper.nfev
optimize_res.njev = func_wrapper.ngev
optimize_res.nhev = func_wrapper.nhev
optimize_res.message = message
return optimize_res
|
the-stack_0_13433 | from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.io as io
import tensorflow as tf
import align.detect_face
#ref = io.loadmat('pnet_dbg.mat')
with tf.Graph().as_default():
sess = tf.compat.v1.Session()
with sess.as_default():
with tf.compat.v1.variable_scope('pnet'):
# data = tf.compat.v1.placeholder(tf.float32, (None,None,None,3), 'input')
data = tf.compat.v1.placeholder(tf.float32, (1, 1610, 1901, 3), 'input')
pnet = align.detect_face.PNet({'data': data})
pnet.load('../../data/det1.npy', sess)
# with tf.compat.v1.variable_scope('rnet'):
# data = tf.compat.v1.placeholder(tf.float32, (None,24,24,3), 'input')
# rnet = align.detect_face.RNet({'data':data})
# rnet.load('../../data/det2.npy', sess)
# with tf.compat.v1.variable_scope('onet'):
# data = tf.compat.v1.placeholder(tf.float32, (None,48,48,3), 'input')
# onet = align.detect_face.ONet({'data':data})
# onet.load('../../data/det3.npy', sess)
def pnet_fun(img): return sess.run(
('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0': img})
# rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img})
# onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img})
ref = io.loadmat('pnet_dbg.mat')
img_x = np.expand_dims(ref['im_data'], 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
out = pnet_fun(img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
# np.where(abs(out0[0,:,:,:]-ref['out0'])>1e-18)
qqq3 = np.where(abs(out1[0, :, :, :]-ref['out1'])
> 1e-7) # 3390 diffs with softmax2
print(qqq3[0].shape)
np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
# prob1=sess1.run('prob1:0', feed_dict={data:img})
# print(prob1[0,0,0,:])
# conv42=sess1.run('conv4-2/BiasAdd:0', feed_dict={data:img})
# print(conv42[0,0,0,:])
# conv42, prob1 = pnet_fun(img)
# print(prob1[0,0,0,:])
# print(conv42[0,0,0,:])
# [ 0.9929 0.0071] prob1, caffe
# [ 0.9929 0.0071] prob1, tensorflow
# [ 0.1207 -0.0116 -0.1231 -0.0463] conv4-2, caffe
# [ 0.1207 -0.0116 -0.1231 -0.0463] conv4-2, tensorflow
# g2 = tf.Graph()
# with g2.as_default():
# data = tf.compat.v1.placeholder(tf.float32, (None,24,24,3), 'input')
# rnet = align.detect_face.RNet({'data':data})
# sess2 = tf.compat.v1.Session(graph=g2)
# rnet.load('../../data/det2.npy', sess2)
# rnet_fun = lambda img : sess2.run(('conv5-2/conv5-2:0', 'prob1:0'), feed_dict={'input:0':img})
# np.random.seed(666)
# img = np.random.rand(73,3,24,24)
# img = np.transpose(img, (0,2,3,1))
# np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
#
# prob1=sess2.run('prob1:0', feed_dict={data:img})
# print(prob1[0,:])
#
# conv52=sess2.run('conv5-2/conv5-2:0', feed_dict={data:img})
# print(conv52[0,:])
# [ 0.9945 0.0055] prob1, caffe
# [ 0.1108 -0.0038 -0.1631 -0.0890] conv5-2, caffe
# [ 0.9945 0.0055] prob1, tensorflow
# [ 0.1108 -0.0038 -0.1631 -0.0890] conv5-2, tensorflow
# g3 = tf.Graph()
# with g3.as_default():
# data = tf.compat.v1.placeholder(tf.float32, (None,48,48,3), 'input')
# onet = align.detect_face.ONet({'data':data})
# sess3 = tf.compat.v1.Session(graph=g3)
# onet.load('../../data/det3.npy', sess3)
# onet_fun = lambda img : sess3.run(('conv6-2/conv6-2:0', 'conv6-3/conv6-3:0', 'prob1:0'), feed_dict={'input:0':img})
# np.random.seed(666)
# img = np.random.rand(11,3,48,48)
# img = np.transpose(img, (0,2,3,1))
# np.set_printoptions(formatter={'float': '{: 0.4f}'.format})
#
# prob1=sess3.run('prob1:0', feed_dict={data:img})
# print(prob1[0,:])
# print('prob1, tensorflow')
#
# conv62=sess3.run('conv6-2/conv6-2:0', feed_dict={data:img})
# print(conv62[0,:])
# print('conv6-2, tensorflow')
#
# conv63=sess3.run('conv6-3/conv6-3:0', feed_dict={data:img})
# print(conv63[0,:])
# print('conv6-3, tensorflow')
# [ 0.9988 0.0012] prob1, caffe
# [ 0.0446 -0.0968 -0.1091 -0.0212] conv6-2, caffe
# [ 0.2429 0.6104 0.4074 0.3104 0.5939 0.2729 0.2132 0.5462 0.7863 0.7568] conv6-3, caffe
# [ 0.9988 0.0012] prob1, tensorflow
# [ 0.0446 -0.0968 -0.1091 -0.0212] conv6-2, tensorflow
# [ 0.2429 0.6104 0.4074 0.3104 0.5939 0.2729 0.2132 0.5462 0.7863 0.7568] conv6-3, tensorflow
#pnet_fun = lambda img : sess1.run(('conv4-2/BiasAdd:0', 'prob1:0'), feed_dict={'input:0':img})
|
the-stack_0_13436 | """This module contains the meta information of OrgResolveLogicalParents ExternalMethod."""
from ..ucscentralcoremeta import MethodMeta, MethodPropertyMeta
method_meta = MethodMeta("OrgResolveLogicalParents", "orgResolveLogicalParents", "Version142b")
prop_meta = {
"cookie": MethodPropertyMeta("Cookie", "cookie", "Xs:string", "Version142b", "InputOutput", False),
"dn": MethodPropertyMeta("Dn", "dn", "ReferenceObject", "Version142b", "InputOutput", False),
"in_hierarchical": MethodPropertyMeta("InHierarchical", "inHierarchical", "Xs:string", "Version142b", "Input", False),
"in_single_level": MethodPropertyMeta("InSingleLevel", "inSingleLevel", "Xs:string", "Version142b", "Input", False),
"out_configs": MethodPropertyMeta("OutConfigs", "outConfigs", "ConfigMap", "Version142b", "Output", True),
}
prop_map = {
"cookie": "cookie",
"dn": "dn",
"inHierarchical": "in_hierarchical",
"inSingleLevel": "in_single_level",
"outConfigs": "out_configs",
}
|
the-stack_0_13437 | """Parent class for every Overkiz device."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from pyoverkiz.enums import OverkizAttribute, OverkizState
from pyoverkiz.models import Device
from homeassistant.components.sensor import SensorEntityDescription
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
from .coordinator import OverkizDataUpdateCoordinator
from .executor import OverkizExecutor
class OverkizEntity(CoordinatorEntity):
"""Representation of an Overkiz device entity."""
coordinator: OverkizDataUpdateCoordinator
def __init__(
self, device_url: str, coordinator: OverkizDataUpdateCoordinator
) -> None:
"""Initialize the device."""
super().__init__(coordinator)
self.device_url = device_url
self.base_device_url, *_ = self.device_url.split("#")
self.executor = OverkizExecutor(device_url, coordinator)
self._attr_assumed_state = not self.device.states
self._attr_available = self.device.available
self._attr_unique_id = self.device.device_url
self._attr_name = self.device.label
self._attr_device_info = self.generate_device_info()
@property
def device(self) -> Device:
"""Return Overkiz device linked to this entity."""
return self.coordinator.data[self.device_url]
def generate_device_info(self) -> DeviceInfo:
"""Return device registry information for this entity."""
# Some devices, such as the Smart Thermostat have several devices in one physical device,
# with same device url, terminated by '#' and a number.
# In this case, we use the base device url as the device identifier.
if "#" in self.device_url and not self.device_url.endswith("#1"):
# Only return the url of the base device, to inherit device name and model from parent device.
return {
"identifiers": {(DOMAIN, self.executor.base_device_url)},
}
manufacturer = (
self.executor.select_attribute(OverkizAttribute.CORE_MANUFACTURER)
or self.executor.select_state(OverkizState.CORE_MANUFACTURER_NAME)
or self.coordinator.client.server.manufacturer
)
model = (
self.executor.select_state(
OverkizState.CORE_MODEL,
OverkizState.CORE_PRODUCT_MODEL_NAME,
OverkizState.IO_MODEL,
)
or self.device.widget
)
return DeviceInfo(
identifiers={(DOMAIN, self.executor.base_device_url)},
name=self.device.label,
manufacturer=manufacturer,
model=model,
sw_version=self.executor.select_attribute(
OverkizAttribute.CORE_FIRMWARE_REVISION
),
hw_version=self.device.controllable_name,
suggested_area=self.coordinator.areas[self.device.place_oid],
via_device=self.executor.get_gateway_id(),
configuration_url=self.coordinator.client.server.configuration_url,
)
@dataclass
class OverkizSensorDescription(SensorEntityDescription):
"""Class to describe an Overkiz sensor."""
native_value: Callable[
[str | int | float], str | int | float
] | None = lambda val: val
class OverkizDescriptiveEntity(OverkizEntity):
"""Representation of a Overkiz device entity based on a description."""
def __init__(
self,
device_url: str,
coordinator: OverkizDataUpdateCoordinator,
description: OverkizSensorDescription,
) -> None:
"""Initialize the device."""
super().__init__(device_url, coordinator)
self.entity_description = description
self._attr_name = f"{super().name} {self.entity_description.name}"
self._attr_unique_id = f"{super().unique_id}-{self.entity_description.key}"
|
the-stack_0_13439 | import torch
import os
import numpy as np
from tqdm import tqdm
from data_package.smoke_dataset import SmokeDataset
from model_package.mlp_mixer import MLPMixer
from model_package.resnet import resnet18, resnet34,resnext50_32x4d
from torch.utils.data import DataLoader
from data_package.data_transform import VideoTransform
root_dirs = [
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/base_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/DeAn_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/ZhangYe_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/XinXiang_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/HeNeng_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/TongHua_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/GuRun_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/YunJing_dataset",
# "/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/WanZai_dataset",
# "D:\data\smoke_car\MLP_data\\base_dataset",
# "D:\data\smoke_car\MLP_data\\DeAn_dataset",
# "D:\data\smoke_car\MLP_data\\ZhangYe_dataset",
# "D:\data\smoke_car\MLP_data\\XinXiang_dataset",
# "D:\data\smoke_car\MLP_data\\HeNeng_dataset",
# "D:\data\smoke_car\MLP_data\\TongHua_dataset",
# "D:\data\smoke_car\MLP_data\\GuRun_dataset",
# "D:\data\smoke_car\MLP_data\\YunJing_dataset"
]
test_dris = [
# "D:\data\smoke_car\MLP_data\\WanZai_dataset",
"/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/WanZai_dataset",
# "/home/liuky/HDD_1/data/smoke/train_data/smoke_classification_data/test_dataset",
]
load_weight = "weights/ResNet_C2_E60.snap"
save_model_name = "ResNet"
batch_size = 16
init_lr = 0.01
lr_steps = [50, 100, 150, 200, 250]
start_epoch = 0
max_epoch = 300
use_cuda = False
def train():
# model = MLPMixer(in_channels=96,
# num_patch=25 * 25,
# patch_size=25,
# num_classes=2,
# dim=512,
# depth=8,
# token_dim=256,
# channel_dim=2048
# )
model = resnext50_32x4d(
True if start_epoch > 0 else True,
num_classes=2)
if use_cuda:
model = model.cuda()
if len(load_weight) > 0 and start_epoch > 0:
print("|INFO|loading model:%s|" % load_weight)
static_dict = torch.load(load_weight)
model.load_state_dict(static_dict)
criterion = torch.nn.CrossEntropyLoss()
optim = torch.optim.Adam([{"params": model.parameters(), "initial_lr": init_lr}], lr=init_lr)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optim, lr_steps, 0.1, last_epoch=start_epoch)
dataset = SmokeDataset(root_dirs=root_dirs,
transform=VideoTransform(size=100, flip_p=0.5, std=255.,
use_bright_contrast=True,
horizontal_flip=True,
vertical_flip=True,
random_sample=False)
)
data_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, collate_fn=SmokeDataset.collate)
test_dataset = SmokeDataset(root_dirs=test_dris,
transform=VideoTransform(size=100, flip_p=0.5, std=255.,
use_bright_contrast=False,
horizontal_flip=False,
vertical_flip=False,
random_sample=False)
)
test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
collate_fn=SmokeDataset.collate)
for epoch in range(start_epoch, max_epoch):
process_bar = tqdm(data_loader, ncols=180)
total_loss = 0
total_acc = 0
model.eval()
for idx, (data, label) in enumerate(test_data_loader):
if use_cuda:
data = data.cuda()
label = label.cuda()
logit = model(data)
pred = torch.argmax(logit, 1)
acc = pred == label
acc = acc.sum() / batch_size
total_acc += acc
print("\n|INFO|acc:%.4f|" % (total_acc / (idx + 1)))
model.train()
for idx, (data, label) in enumerate(process_bar):
if use_cuda:
data = data.cuda()
label = label.cuda()
logit = model(data)
optim.zero_grad()
loss = criterion.forward(input=logit, target=label)
loss.backward()
optim.step()
total_loss += loss.data
process_bar.desc = "|INFO|epoch:%d|step:%d|loss:%.4f/%.4f|lr:%f|" % (
epoch, idx, loss.data, total_loss.data / (idx + 1), optim.param_groups[0]["lr"])
lr_scheduler.step()
# test
if (epoch % 10 == 0) and epoch > 0:
save_path = os.path.abspath('weights/%s_C2_E%d.snap' % (save_model_name,epoch))
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
torch.save(model.state_dict(), save_path)
print("\n|INFO|save model in %s|" % save_path)
if __name__ == '__main__':
train()
from torchvision.models import resnext50_32x4d
|
the-stack_0_13440 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class _Info(object):
def __init__(self, name, _type=None, entry_type=None):
self._name = name
self._type = _type
if entry_type is not None and self._type != 'GenericSet':
raise ValueError(
'entry_type should only be specified if _type is GenericSet')
self._entry_type = entry_type
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def entry_type(self):
return self._entry_type
ANGLE_REVISIONS = _Info('angleRevisions', 'GenericSet', str)
ARCHITECTURES = _Info('architectures', 'GenericSet', str)
BENCHMARKS = _Info('benchmarks', 'GenericSet', str)
BENCHMARK_START = _Info('benchmarkStart', 'DateRange')
BENCHMARK_DESCRIPTIONS = _Info('benchmarkDescriptions', 'GenericSet', str)
BOTS = _Info('bots', 'GenericSet', str)
BUG_COMPONENTS = _Info('bugComponents', 'GenericSet', str)
BUILD_URLS = _Info('buildUrls', 'GenericSet', str)
BUILDS = _Info('builds', 'GenericSet', int)
CATAPULT_REVISIONS = _Info('catapultRevisions', 'GenericSet', str)
CHROMIUM_COMMIT_POSITIONS = _Info('chromiumCommitPositions', 'GenericSet', int)
CHROMIUM_REVISIONS = _Info('chromiumRevisions', 'GenericSet', str)
DESCRIPTION = _Info('description', 'GenericSet', str)
DEVICE_IDS = _Info('deviceIds', 'GenericSet', str)
DOCUMENTATION_URLS = _Info('documentationLinks', 'GenericSet', str)
FUCHSIA_GARNET_REVISIONS = _Info('fuchsiaGarnetRevisions', 'GenericSet', str)
FUCHSIA_PERIDOT_REVISIONS = _Info('fuchsiaPeridotRevisions', 'GenericSet', str)
FUCHSIA_TOPAZ_REVISIONS = _Info('fuchsiaTopazRevisions', 'GenericSet', str)
FUCHSIA_ZIRCON_REVISIONS = _Info('fuchsiaZirconRevisions', 'GenericSet', str)
GPUS = _Info('gpus', 'GenericSet', str)
HAD_FAILURES = _Info('hadFailures', 'GenericSet', bool)
IS_REFERENCE_BUILD = _Info('isReferenceBuild', 'GenericSet', bool)
LABELS = _Info('labels', 'GenericSet', str)
LOG_URLS = _Info('logUrls', 'GenericSet', str)
MASTERS = _Info('masters', 'GenericSet', str)
MEMORY_AMOUNTS = _Info('memoryAmounts', 'GenericSet', int)
OS_NAMES = _Info('osNames', 'GenericSet', str)
OS_VERSIONS = _Info('osVersions', 'GenericSet', str)
OWNERS = _Info('owners', 'GenericSet', str)
POINT_ID = _Info('pointId', 'GenericSet', int)
PRODUCT_VERSIONS = _Info('productVersions', 'GenericSet', str)
REVISION_TIMESTAMPS = _Info('revisionTimestamps', 'DateRange')
SKIA_REVISIONS = _Info('skiaRevisions', 'GenericSet', str)
STATISTICS_NAMES = _Info('statisticsNames', 'GenericSet', str)
STORIES = _Info('stories', 'GenericSet', str)
STORYSET_REPEATS = _Info('storysetRepeats', 'GenericSet', int)
STORY_TAGS = _Info('storyTags', 'GenericSet', str)
SUMMARY_KEYS = _Info('summaryKeys', 'GenericSet', str)
TEST_PATH = _Info('testPath', 'GenericSet', str)
TRACE_START = _Info('traceStart', 'DateRange')
TRACE_URLS = _Info('traceUrls', 'GenericSet', str)
V8_COMMIT_POSITIONS = _Info('v8CommitPositions', 'DateRange')
V8_REVISIONS = _Info('v8Revisions', 'GenericSet', str)
WEBRTC_REVISIONS = _Info('webrtcRevisions', 'GenericSet', str)
WEBRTC_INTERNAL_REVISIONS = _Info('webrtcInternalRevisions', 'GenericSet', str)
def _CreateCachedInfoTypes():
info_types = {}
for info in globals().values():
if isinstance(info, _Info):
info_types[info.name] = info
return info_types
_CACHED_INFO_TYPES = _CreateCachedInfoTypes()
def GetTypeForName(name):
info = _CACHED_INFO_TYPES.get(name)
if info:
return info.type
def AllInfos():
for info in _CACHED_INFO_TYPES.values():
yield info
def AllNames():
for info in AllInfos():
yield info.name
|
the-stack_0_13442 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from uuid import uuid4
import pytest
import helpers
import upload_model_explain_tabular_managed_container_sample
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
IMAGE_URI = "gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest"
ARTIFACT_URI = "gs://ucaip-samples-us-central1/model/boston_housing/"
DISPLAY_NAME = f"temp_upload_model_test_{uuid4()}"
INPUT_TENSOR_NAME = "dense_input"
OUTPUT_TENSOR_NAME = "dense_2"
@pytest.fixture(scope="function", autouse=True)
def teardown(teardown_model):
yield
@pytest.mark.skip(reason="https://github.com/googleapis/java-aiplatform/issues/420")
def test_ucaip_generated_upload_model_explain_tabular_managed_constainer_sample(capsys, shared_state):
upload_model_explain_tabular_managed_container_sample.upload_model_explain_tabular_managed_container_sample(
display_name=DISPLAY_NAME,
artifact_uri=ARTIFACT_URI,
container_spec_image_uri=IMAGE_URI,
project=PROJECT_ID,
input_tensor_name=INPUT_TENSOR_NAME,
output_tensor_name=OUTPUT_TENSOR_NAME,
feature_names=["crim", "zn", "indus", "chas", "nox", "rm", "age",
"dis", "rad", "tax", "ptratio", "b", "lstat"]
)
out, _ = capsys.readouterr()
shared_state["model_name"] = helpers.get_name(out, key="model")
|
the-stack_0_13443 | import numpy as np
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Modules.Module import ModuleError, Module
class Glue(Module):
def __init__(self, modules=None, fwdGlue=None, bwdGlue=None, fwdShapeGlue=None, bwdShapeGlue=None, name=None):
super().__init__(name)
if modules is not None and not isinstance(modules, dict):
raise ModuleError("Modules object must be non-empty dictionary")
self.modules = modules
self.fwdGlue = fwdGlue
self.bwdGlue = bwdGlue
self.fwdShapeGlue = fwdShapeGlue
self.bwdShapeGlue = bwdShapeGlue
def updateData(self, data):
self.data = self.fwdGlue(data, self.modules)
def updateGrad(self, grad):
self.grad = self.bwdGlue(grad, self.modules)
def dataShapeFrom(self, shape):
if self.fwdShapeGlue is not None:
return self.fwdShapeGlue(shape)
else:
raise ModuleError("Forward shape glue hook is not installed")
def gradShapeFrom(self, shape):
if self.bwdShapeGlue is not None:
return self.bwdShapeGlue(shape)
else:
raise ModuleError("Backward shape glue hook is not installed")
def unittest():
data1 = gpuarray.to_gpu(np.random.randn(10, 2, 3, 3).astype(np.float32))
data2 = gpuarray.to_gpu(np.random.randn(10, 2, 3, 3).astype(np.float32))
data3 = gpuarray.to_gpu(np.random.randn(10, 10).astype(np.float32))
def fwdGlue(data, modules):
dat1, dat2, dat3 = data
split = modules["split"]
out1, out2 = split(data3)
return [dat1 + dat2, out1, out2]
def bwdGlue(grad, modules):
gr1, gr2, gr3 = grad
split = modules["split"]
split.backward([gr2, gr3])
return [gr1, gr1, split.grad]
from PuzzleLib.Modules.Split import Split
glue = Glue(fwdGlue=fwdGlue, bwdGlue=bwdGlue, modules={"split": Split(axis=1, sections=(5, 5))})
glue([data1, data2, data3])
grad1 = gpuarray.to_gpu(np.random.randn(*glue.data[0].shape).astype(np.float32))
grad2 = gpuarray.to_gpu(np.random.randn(*glue.data[1].shape).astype(np.float32))
grad3 = gpuarray.to_gpu(np.random.randn(*glue.data[2].shape).astype(np.float32))
glue.backward([grad1, grad2, grad3])
if __name__ == "__main__":
unittest()
|
the-stack_0_13445 | import os
import re
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
RECEIVED_FILE_CHAR_LIMIT = 50 * 1000
# The limit in number of characters of files to accept
def new_file(basename, content):
"""
The method creates a new File object or derived object from File based on the basename file extension. This should
be used to create Files instead of using the File object constructors. This also ensures that the file is not longer
than the file char limit.
:param basename:
The file name
:param content:
The file content
:return:
Returns a File or derived File object
"""
if len(content) > RECEIVED_FILE_CHAR_LIMIT:
raise Exception(f"File {basename} exceeds size limits")
fn, ext = os.path.splitext(basename)
if ext == ".adb" or ext == ".ads":
return AdaFile(basename, content)
elif ext == ".c" or ext == ".h":
return CFile(basename, content)
elif ext == ".cpp" or ext == ".hh":
return CPPFile(basename, content)
elif ext == ".gpr":
return ProjectFile(basename, content)
else:
return File(basename, content)
def find_mains(filelist):
"""
This checks a list of files to find files that can be considered mains. For Ada files, the criteria is that the
adb file does not have a corresponding ads file. For C files, we use the CFile.is_main() method.
:param filelist:
The list of files to check for mains
:return:
The list of files that have mains
"""
mains = []
for f in filelist:
logger.debug(f"Checking {f.get_name()} for main")
if f.language() == "Ada":
filename = f.get_name()
base, ext = os.path.splitext(filename)
if ext == ".adb":
logger.debug(f"Looking for spec for {f.get_name()}")
if not next((x for x in filelist if x.get_name() == (base + ".ads")), None):
logger.debug(f"Found main in {f.get_name()}")
mains.append(filename)
else:
if f.is_main():
mains.append(f.get_name())
logger.debug(f"Found main in {f.get_name()}")
return mains
class File:
"""
This is the base File class used to represent generic Files.
Attributes
----------
basename : str
The file name
content : str
the name of the animal
Methods
-------
get_name()
Returns the name of the file
get_content()
Returns the content of the file
language()
Returns the coding language for the file is any
is_main()
Checks if the file is a main
"""
def __init__(self, basename, content):
"""
Constructor for File. THIS SHOULD NOT BE CALLED DIRECTLY!! Use new_file method instead.
:param basename:
File name
:param content:
File content
"""
self.basename = basename
self.content = content
def get_name(self):
"""
Returns the name of the file
:return:
The file name
"""
return self.basename
def get_content(self):
"""
Returns the content of the file
:return:
The file content
"""
return self.content
def language(self):
"""
Returns the language for the file
:return:
Returns the file language or None
"""
return None
def is_main(self):
"""
Returns if the file is/has a main. Valid for C or CPP only now.
:return:
Returns True if the file has/is a main
"""
return False
class AdaFile(File):
"""
Class for an Ada file. Inherits from File.
Attributes
----------
basename : str
The file name
content : str
the name of the animal
Methods
-------
get_name()
Returns the name of the file
get_content()
Returns the content of the file
language()
Returns the coding language for the file is any
is_main()
Checks if the file is a main
"""
def is_main(self):
"""
This should check if the Ada file is a main. This is unimplemented and shouldn't be used
"""
# TODO: figure out how to do this
raise NotImplementedError
def language(self):
"""
Returns "Ada"
:return:
The language string
"""
return "Ada"
class CFile(File):
"""
Class for a C file. Inherits from File.
Attributes
----------
basename : str
The file name
content : str
the name of the animal
Methods
-------
get_name()
Returns the name of the file
get_content()
Returns the content of the file
language()
Returns the coding language for the file is any
is_main()
Checks if the file is a main
"""
def is_main(self):
"""
Uses a regex to compute if the C file has the right function layout/name for a main
:return:
True if the regex matches
"""
main_re = re.compile("^(?:void|int) +main\(.*\)(?: |\n)*{", re.MULTILINE)
return main_re.findall(self.content)
def language(self):
"""
Returns "c"
:return:
The language string
"""
return "c"
class CPPFile(CFile):
"""
Class for a CPP file. Inherits from CFile.
Attributes
----------
basename : str
The file name
content : str
the name of the animal
Methods
-------
get_name()
Returns the name of the file
get_content()
Returns the content of the file
language()
Returns the coding language for the file is any
is_main()
Checks if the file is a main
"""
def language(self):
"""
Returns "c++"
:return:
The language string
"""
return "c++"
class ProjectFile(File):
"""
Class for a Project file. Inherits from File.
Attributes
----------
basename : str
The file name
content : str
the name of the animal
allowed_switches : dict
the list of allowed switch to apply to gpr packages
Methods
-------
get_name()
Returns the name of the file
get_content()
Returns the content of the file
language()
Returns the coding language for the file is any
is_main()
Checks if the file is a main
insert_language(languages)
Inserts the languages for the project into the project file
define_mains(mains)
Inserts the mains for the project into the project file
"""
allowed_switches = {
'Builder': ['-g'],
'Compiler': ['-g', '-O0', '-gnata', '-gnatwa', '-gnato', '-gnato0', '-gnato11', '-gnato23',
'-gnato21', '-gnato22']
}
def insert_languages(self, languages):
"""
Inserts languages into the correct place in the project file
:param languages:
The list of languages to add to the project
"""
lang_list = [f'"{x}"' for x in languages]
to_insert = f"for Languages use ({', '.join(lang_list)});"
self.content = self.content.replace("--LANGUAGE_PLACEHOLDER--", to_insert)
def define_mains(self, mains):
"""
Inserts the mains into the correct place in the project file
:param mains:
The list of mains to add to the project
"""
main_list = [f'"{x}"' for x in mains]
to_insert = f"for Main use ({', '.join(main_list)});"
self.content = self.content.replace("--MAIN_PLACEHOLDER--", to_insert)
def insert_switches(self, switch_list):
sw_dict = {}
regex = re.compile(r'(Builder|Compiler)\((.+)\)')
for sec in switch_list:
match = regex.search(sec)
if match:
pkg_name = match.group(1)
switches = set(match.group(2).split(','))
if pkg_name in sw_dict.keys():
sw_dict[pkg_name] = sw_dict[pkg_name] | switches
else:
sw_dict[pkg_name] = switches
for pkg, unfiltered_switches in sw_dict.items():
filtered_switches = []
for switch in unfiltered_switches:
if switch in self.allowed_switches[pkg]:
filtered_switches.append('"' + switch + '"')
else:
logger.error(f"Illegal switch requested in pkg {pkg}: {switch}")
if filtered_switches:
placeholder_str = "--" + pkg.upper() + "_SWITCHES_PLACEHOLDER--"
switches_str = ', '.join(filtered_switches)
line_str = f'for Switches ("Ada") use ({switches_str});'
logger.debug(f"Adding line {line_str} to pkg {pkg}")
self.content = self.content.replace(placeholder_str, line_str)
|
the-stack_0_13448 | import unittest
from conans.client import tools
from conans.client.build.visual_environment import VisualStudioBuildEnvironment
from conans.test.utils.mocks import MockSettings, MockConanfile
from conans.test.utils.tools import TestClient
class VisualStudioBuildEnvironmentTest(unittest.TestCase):
def test_visual(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
conanfile.deps_cpp_info.include_paths.append("/one/include/path")
conanfile.deps_cpp_info.include_paths.append("/two/include/path")
conanfile.deps_cpp_info.lib_paths.append("/one/lib/path")
conanfile.deps_cpp_info.lib_paths.append("/two/lib/path")
conanfile.deps_cpp_info.cflags.append("-mycflag")
conanfile.deps_cpp_info.cflags.append("-mycflag2")
conanfile.deps_cpp_info.cxxflags.append("-mycxxflag")
conanfile.deps_cpp_info.cxxflags.append("-mycxxflag2")
conanfile.deps_cpp_info.exelinkflags.append("-myexelinkflag")
conanfile.deps_cpp_info.sharedlinkflags.append("-mysharedlinkflag")
conanfile.deps_cpp_info.libs.extend(['gdi32', 'user32.lib'])
tool = VisualStudioBuildEnvironment(conanfile)
self.assertEqual(tool.vars_dict, {
"CL": ["-I/one/include/path", "-I/two/include/path",
'-MDd',
'-mycflag',
'-mycflag2',
'-Zi',
'-Ob0',
'-Od',
'-mycxxflag',
'-mycxxflag2'],
"LIB": ["/one/lib/path", "/two/lib/path"],
"UseEnv": "True",
"_LINK_": ['-myexelinkflag', '-mysharedlinkflag', 'gdi32.lib', 'user32.lib']
})
tool.parallel = True
self.assertEqual(tool.vars_dict, {
"CL": ["-I/one/include/path", "-I/two/include/path",
'-MDd',
'-mycflag',
'-mycflag2',
'-Zi',
'-Ob0',
'-Od',
'-mycxxflag',
'-mycxxflag2',
'/MP%s' % tools.cpu_count(output=conanfile.output)],
"LIB": ["/one/lib/path", "/two/lib/path"],
"UseEnv": "True",
"_LINK_": ['-myexelinkflag', '-mysharedlinkflag', 'gdi32.lib', 'user32.lib']
})
tool.parallel = False
# Now alter the paths before the vars_dict call
tool.include_paths.append("/three/include/path")
tool.lib_paths.append("/three/lib/path")
self.assertEqual(tool.vars_dict, {
"CL": ["-I/one/include/path",
"-I/two/include/path",
"-I/three/include/path",
'-MDd',
'-mycflag',
'-mycflag2',
'-Zi',
'-Ob0',
'-Od',
'-mycxxflag',
'-mycxxflag2'],
"LIB": ["/one/lib/path", "/two/lib/path", "/three/lib/path"],
"UseEnv": "True",
"_LINK_": ['-myexelinkflag', '-mysharedlinkflag', 'gdi32.lib', 'user32.lib']
})
# Now try appending to environment
with tools.environment_append({"CL": "-I/four/include/path -I/five/include/path",
"LIB": "/four/lib/path;/five/lib/path"}):
self.assertEqual(tool.vars_dict, {
"CL": ["-I/one/include/path", "-I/two/include/path",
"-I/three/include/path",
'-MDd',
'-mycflag',
'-mycflag2',
'-Zi',
'-Ob0',
'-Od',
'-mycxxflag',
'-mycxxflag2',
"-I/four/include/path -I/five/include/path"],
"LIB": ["/one/lib/path", "/two/lib/path", "/three/lib/path",
"/four/lib/path;/five/lib/path"],
"UseEnv": "True",
"_LINK_": ['-myexelinkflag', '-mysharedlinkflag', 'gdi32.lib', 'user32.lib']
})
self.assertEqual(tool.vars, {
"CL": '-I"/one/include/path" -I"/two/include/path" -I"/three/include/path" -MDd '
'-mycflag -mycflag2 -Zi -Ob0 -Od '
'-mycxxflag -mycxxflag2 '
'-I/four/include/path -I/five/include/path',
"LIB": "/one/lib/path;/two/lib/path;/three/lib/path;/four/lib/path;/five/lib/path",
"UseEnv": "True",
"_LINK_": "-myexelinkflag -mysharedlinkflag gdi32.lib user32.lib"
})
def test_build_type_toolset(self):
profile = """
[settings]
os=Windows
compiler=Visual Studio
compiler.version=15
build_type=Release
"""
profile_toolset = """
[settings]
os=Windows
compiler=Visual Studio
compiler.version=15
compiler.toolset=v141
build_type=Release
"""
profile_toolset_clang = """
[settings]
os=Windows
compiler=Visual Studio
compiler.version=15
build_type=Release
compiler.toolset=v141_clang_c2
"""
conanfile = """
from conans import ConanFile, VisualStudioBuildEnvironment
class TestConan(ConanFile):
name = "testlib"
version = "1.0"
settings = "compiler", "build_type", "os"
def build(self):
env_build = VisualStudioBuildEnvironment(self)
self.output.info(env_build.flags)
"""
client = TestClient()
client.save({"profile": profile,
"profile_toolset": profile_toolset,
"profile_toolset_clang": profile_toolset_clang,
"conanfile.py": conanfile})
result = {"Debug": "['-Zi', '-Ob0', '-Od']",
"Release": "['-DNDEBUG', '-O2', '-Ob2']",
"RelWithDebInfo": "['-DNDEBUG', '-Zi', '-O2', '-Ob1']",
"MinSizeRel": "['-DNDEBUG', '-O1', '-Ob1']"}
result_toolset_clang = {"Debug": "['-gline-tables-only', '-fno-inline', '-O0']",
"Release": "['-DNDEBUG', '-O2']",
"RelWithDebInfo": "['-DNDEBUG', '-gline-tables-only', '-O2', '-fno-inline']",
"MinSizeRel": "['-DNDEBUG']"}
for build_type in ["Debug", "Release", "RelWithDebInfo", "MinSizeRel"]:
client.run("create . danimtb/testing -pr=profile -s build_type=%s" % build_type)
self.assertIn(result[build_type], client.out)
client.run("create . danimtb/testing -pr=profile_toolset -s build_type=%s" % build_type)
self.assertIn(result[build_type], client.out)
client.run("create . danimtb/testing -pr=profile_toolset_clang -s build_type=%s" %
build_type)
self.assertIn(result_toolset_clang[build_type], client.out)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.