max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
accalib/utils.py | pj0620/acca-video-series | 0 | 5900 | from manimlib.imports import *
from manimlib.utils import bezier
import numpy as np
class VectorInterpolator:
def __init__(self,points):
self.points = points
self.n = len(self.points)
self.dists = [0]
for i in range(len(self.points)):
self.dists += [np.linalg.norm(
self.points[i] -
self.points[(i+1) % self.n]
)+self.dists[i]]
def interpolate(self,alpha):
dist = alpha*self.dists[-1]
idx = self.interpolate_index(dist)
mult = (dist - self.dists[idx])/np.linalg.norm(self.points[(idx+1)%self.n]-self.points[idx])
return self.points[idx] + \
mult*(self.points[(idx+1)%self.n]-self.points[idx])
def interpolate_index(self,dist):
def is_solution(idx):
if idx == self.n-1:
return self.dists[idx] <= dist
else:
return ((self.dists[cur] <= dist) and
(self.dists[(cur+1)%self.n] >= dist))
# binary search
step_size=int(self.n / 4)
cur=int(self.n / 2)
while not is_solution(cur):
if self.dists[cur] > dist:
cur -= step_size
else:
cur += step_size
step_size = max(int(step_size/2), 1)
return cur | from manimlib.imports import *
from manimlib.utils import bezier
import numpy as np
class VectorInterpolator:
def __init__(self,points):
self.points = points
self.n = len(self.points)
self.dists = [0]
for i in range(len(self.points)):
self.dists += [np.linalg.norm(
self.points[i] -
self.points[(i+1) % self.n]
)+self.dists[i]]
def interpolate(self,alpha):
dist = alpha*self.dists[-1]
idx = self.interpolate_index(dist)
mult = (dist - self.dists[idx])/np.linalg.norm(self.points[(idx+1)%self.n]-self.points[idx])
return self.points[idx] + \
mult*(self.points[(idx+1)%self.n]-self.points[idx])
def interpolate_index(self,dist):
def is_solution(idx):
if idx == self.n-1:
return self.dists[idx] <= dist
else:
return ((self.dists[cur] <= dist) and
(self.dists[(cur+1)%self.n] >= dist))
# binary search
step_size=int(self.n / 4)
cur=int(self.n / 2)
while not is_solution(cur):
if self.dists[cur] > dist:
cur -= step_size
else:
cur += step_size
step_size = max(int(step_size/2), 1)
return cur | en | 0.677804 | # binary search | 2.834894 | 3 |
setup.py | def-mycroft/rapid-plotly | 1 | 5901 | from setuptools import setup
setup(name='rapid_plotly',
version='0.1',
description='Convenience functions to rapidly create beautiful Plotly graphs',
author='<NAME>',
author_email='<EMAIL>',
packages=['rapid_plotly'],
zip_safe=False)
| from setuptools import setup
setup(name='rapid_plotly',
version='0.1',
description='Convenience functions to rapidly create beautiful Plotly graphs',
author='<NAME>',
author_email='<EMAIL>',
packages=['rapid_plotly'],
zip_safe=False)
| none | 1 | 1.206795 | 1 |
|
dodo.py | enerqi/bridge-bidding-systems | 2 | 5902 | <filename>dodo.py
#! /usr/bin/doit -f
# https://pydoit.org
# `pip install [--user] doit` adds `doit.exe` to the PATH
# - Note `doit auto`, the file watcher only works on Linux/Mac
# - All commands are relative to dodo.py (doit runs in the working dir of dodo.py
# even if ran from a different directory `doit -f path/to/dodo.py`)
from glob import glob
import json
from os import environ
from os.path import abspath, basename, dirname, exists, expanduser, join, splitext
from shutil import copyfile
from typing import Iterator, List, NewType, Optional
from doit.tools import title_with_actions
Path = NewType("Path", str)
home = Path(expanduser("~"))
bml_tools_dir = Path(environ.get("BML_TOOLS_DIRECTORY", join(home, "dev/bml")))
bml_includes_cache_file = ".include-deps.json"
def bml_include_dependencies(bml_path: Path) -> List[Path]:
# bml files can include others, so spend time scanning every bml file
# for new include directives every time a bml file is saved
def includes(file_handle) -> Iterator[Path]:
for line in file_handle.readlines():
line = line.strip()
if line.startswith("#INCLUDE"):
include_directive_tokens = line.split(maxsplit=1)
if len(include_directive_tokens) > 1:
# We assume the file name is not quoted, just a free form path string
included_file = include_directive_tokens[1].strip()
yield Path(included_file)
with open(bml_path, encoding='utf-8') as f:
unique_deps = {include for include in includes(f) if include != bml_path}
return list(unique_deps)
def read_bml_includes_cache(bml_path: Path) -> Optional[List[Path]]:
if not exists(bml_includes_cache_file):
return None
with open(bml_includes_cache_file, encoding='utf-8') as f:
try:
existing_deps = json.load(f)
except Exception:
# Manually edited messed up json perhaps
return None
if bml_path in existing_deps:
return existing_deps[bml_path]
else:
return None # Manually edited perhaps (assuming we got the task order correct)
def update_bml_includes_cache(bml_path: Path, bml_deps: List[Path]):
existing_deps = {}
if exists(bml_includes_cache_file):
with open(bml_includes_cache_file, encoding='utf-8') as f:
try:
existing_deps = json.load(f)
except Exception:
pass
existing_deps[bml_path] = bml_deps
with open(bml_includes_cache_file, "w", encoding='utf-8') as f:
json.dump(existing_deps, f, indent=4)
def task_bml_include_cache():
"""Populate the bml include cache."""
input_bml_file_paths = glob("*.bml")
def calc_include_deps_and_cache(file_dep) -> None:
bml_path = Path(file_dep)
bml_deps = bml_include_dependencies(bml_path)
update_bml_includes_cache(bml_path, bml_deps)
for bml_path in input_bml_file_paths:
# We don't use a target as doit cannot deal with more than one input file affecting the same output file
# and we are using a single cache file instead of one cache file per input file.
# This does mean that we are using the order of the tasks in this file to have the include cache updated
# before the html task reads the include cache as part of determining changing file dependencies
# The html task itself cannot use the include cache file as a doit file_dep dependency as it is being updated
# by other unrelated bml file changes.
# Actually, using a different notion of an update (not just tracking file modifications) if another feature of
# doit that could be applied if interested enough.
yield {
'name': basename(bml_path),
'actions': [(calc_include_deps_and_cache, [bml_path])],
'file_dep': [bml_path],
'title': title_with_actions
}
def task_bml2html():
"""Create html file from bridge bidding markup language file."""
bml2html_path = Path(join(bml_tools_dir, "bml2html.py"))
input_bml_file_paths = glob("*.bml")
def html_output_path(bml_path: Path) -> Path:
return Path(splitext(bml_path)[0] + ".html")
for bml_path in input_bml_file_paths:
bml_deps = read_bml_includes_cache(bml_path)
if bml_deps is None:
bml_deps = bml_include_dependencies(bml_path)
update_bml_includes_cache(bml_path, bml_deps)
yield {
'name': basename(bml_path),
'actions': [f"python {bml2html_path} {bml_path}"],
'file_dep': [bml_path] + bml_deps,
'targets': [html_output_path(bml_path)],
'title': title_with_actions
}
def task_bmlcss():
"""Copy the bml CSS style sheet to this directory."""
css_basename = "bml.css"
src_css_file = Path(join(bml_tools_dir, css_basename))
def copy_file() -> None:
# OS neutral compared to running a shell command
copyfile(src_css_file, css_basename)
return {
'actions': [copy_file],
'file_dep': [src_css_file],
'targets': [css_basename],
'title': title_with_actions
}
def task_publish_main_bidding():
"""Copy the main bidding html and css document to the web server root."""
src_file = "bidding-system.html"
dst_file = f"W:/{src_file}"
css_file = "bml.css"
dst_css = f"W:/{css_file}"
def copy_file(dependencies, targets) -> None:
copyfile(dependencies[0], targets[0])
for src, dst in [(src_file, dst_file), (css_file, dst_css)]:
yield {
'name': basename(src),
'actions': [copy_file],
'file_dep': [src],
'targets': [dst],
'title': title_with_actions
}
| <filename>dodo.py
#! /usr/bin/doit -f
# https://pydoit.org
# `pip install [--user] doit` adds `doit.exe` to the PATH
# - Note `doit auto`, the file watcher only works on Linux/Mac
# - All commands are relative to dodo.py (doit runs in the working dir of dodo.py
# even if ran from a different directory `doit -f path/to/dodo.py`)
from glob import glob
import json
from os import environ
from os.path import abspath, basename, dirname, exists, expanduser, join, splitext
from shutil import copyfile
from typing import Iterator, List, NewType, Optional
from doit.tools import title_with_actions
Path = NewType("Path", str)
home = Path(expanduser("~"))
bml_tools_dir = Path(environ.get("BML_TOOLS_DIRECTORY", join(home, "dev/bml")))
bml_includes_cache_file = ".include-deps.json"
def bml_include_dependencies(bml_path: Path) -> List[Path]:
# bml files can include others, so spend time scanning every bml file
# for new include directives every time a bml file is saved
def includes(file_handle) -> Iterator[Path]:
for line in file_handle.readlines():
line = line.strip()
if line.startswith("#INCLUDE"):
include_directive_tokens = line.split(maxsplit=1)
if len(include_directive_tokens) > 1:
# We assume the file name is not quoted, just a free form path string
included_file = include_directive_tokens[1].strip()
yield Path(included_file)
with open(bml_path, encoding='utf-8') as f:
unique_deps = {include for include in includes(f) if include != bml_path}
return list(unique_deps)
def read_bml_includes_cache(bml_path: Path) -> Optional[List[Path]]:
if not exists(bml_includes_cache_file):
return None
with open(bml_includes_cache_file, encoding='utf-8') as f:
try:
existing_deps = json.load(f)
except Exception:
# Manually edited messed up json perhaps
return None
if bml_path in existing_deps:
return existing_deps[bml_path]
else:
return None # Manually edited perhaps (assuming we got the task order correct)
def update_bml_includes_cache(bml_path: Path, bml_deps: List[Path]):
existing_deps = {}
if exists(bml_includes_cache_file):
with open(bml_includes_cache_file, encoding='utf-8') as f:
try:
existing_deps = json.load(f)
except Exception:
pass
existing_deps[bml_path] = bml_deps
with open(bml_includes_cache_file, "w", encoding='utf-8') as f:
json.dump(existing_deps, f, indent=4)
def task_bml_include_cache():
"""Populate the bml include cache."""
input_bml_file_paths = glob("*.bml")
def calc_include_deps_and_cache(file_dep) -> None:
bml_path = Path(file_dep)
bml_deps = bml_include_dependencies(bml_path)
update_bml_includes_cache(bml_path, bml_deps)
for bml_path in input_bml_file_paths:
# We don't use a target as doit cannot deal with more than one input file affecting the same output file
# and we are using a single cache file instead of one cache file per input file.
# This does mean that we are using the order of the tasks in this file to have the include cache updated
# before the html task reads the include cache as part of determining changing file dependencies
# The html task itself cannot use the include cache file as a doit file_dep dependency as it is being updated
# by other unrelated bml file changes.
# Actually, using a different notion of an update (not just tracking file modifications) if another feature of
# doit that could be applied if interested enough.
yield {
'name': basename(bml_path),
'actions': [(calc_include_deps_and_cache, [bml_path])],
'file_dep': [bml_path],
'title': title_with_actions
}
def task_bml2html():
"""Create html file from bridge bidding markup language file."""
bml2html_path = Path(join(bml_tools_dir, "bml2html.py"))
input_bml_file_paths = glob("*.bml")
def html_output_path(bml_path: Path) -> Path:
return Path(splitext(bml_path)[0] + ".html")
for bml_path in input_bml_file_paths:
bml_deps = read_bml_includes_cache(bml_path)
if bml_deps is None:
bml_deps = bml_include_dependencies(bml_path)
update_bml_includes_cache(bml_path, bml_deps)
yield {
'name': basename(bml_path),
'actions': [f"python {bml2html_path} {bml_path}"],
'file_dep': [bml_path] + bml_deps,
'targets': [html_output_path(bml_path)],
'title': title_with_actions
}
def task_bmlcss():
"""Copy the bml CSS style sheet to this directory."""
css_basename = "bml.css"
src_css_file = Path(join(bml_tools_dir, css_basename))
def copy_file() -> None:
# OS neutral compared to running a shell command
copyfile(src_css_file, css_basename)
return {
'actions': [copy_file],
'file_dep': [src_css_file],
'targets': [css_basename],
'title': title_with_actions
}
def task_publish_main_bidding():
"""Copy the main bidding html and css document to the web server root."""
src_file = "bidding-system.html"
dst_file = f"W:/{src_file}"
css_file = "bml.css"
dst_css = f"W:/{css_file}"
def copy_file(dependencies, targets) -> None:
copyfile(dependencies[0], targets[0])
for src, dst in [(src_file, dst_file), (css_file, dst_css)]:
yield {
'name': basename(src),
'actions': [copy_file],
'file_dep': [src],
'targets': [dst],
'title': title_with_actions
}
| en | 0.884034 | #! /usr/bin/doit -f # https://pydoit.org # `pip install [--user] doit` adds `doit.exe` to the PATH # - Note `doit auto`, the file watcher only works on Linux/Mac # - All commands are relative to dodo.py (doit runs in the working dir of dodo.py # even if ran from a different directory `doit -f path/to/dodo.py`) # bml files can include others, so spend time scanning every bml file # for new include directives every time a bml file is saved # We assume the file name is not quoted, just a free form path string # Manually edited messed up json perhaps # Manually edited perhaps (assuming we got the task order correct) Populate the bml include cache. # We don't use a target as doit cannot deal with more than one input file affecting the same output file # and we are using a single cache file instead of one cache file per input file. # This does mean that we are using the order of the tasks in this file to have the include cache updated # before the html task reads the include cache as part of determining changing file dependencies # The html task itself cannot use the include cache file as a doit file_dep dependency as it is being updated # by other unrelated bml file changes. # Actually, using a different notion of an update (not just tracking file modifications) if another feature of # doit that could be applied if interested enough. Create html file from bridge bidding markup language file. Copy the bml CSS style sheet to this directory. # OS neutral compared to running a shell command Copy the main bidding html and css document to the web server root. | 2.260204 | 2 |
learn/hard-way/EmptyFileError.py | hustbill/Python-auto | 0 | 5903 | class EmptyFileError(Exception):
pass
filenames = ["myfile1", "nonExistent", "emptyFile", "myfile2"]
for file in filenames:
try:
f = open(file, 'r')
line = f.readline()
if line == "":
f.close()
raise EmptyFileError("%s: is empty" % file)
# except IOError as error:
# print("%s: could not be opened: %s" % (file, error.strerror)
## except EmptyFileError as error:
# print(error)
# else:
# print("%s: %s" % (file, f.readline()))
# finally:
# print("Done processing", file)
| class EmptyFileError(Exception):
pass
filenames = ["myfile1", "nonExistent", "emptyFile", "myfile2"]
for file in filenames:
try:
f = open(file, 'r')
line = f.readline()
if line == "":
f.close()
raise EmptyFileError("%s: is empty" % file)
# except IOError as error:
# print("%s: could not be opened: %s" % (file, error.strerror)
## except EmptyFileError as error:
# print(error)
# else:
# print("%s: %s" % (file, f.readline()))
# finally:
# print("Done processing", file)
| en | 0.541682 | # except IOError as error: # print("%s: could not be opened: %s" % (file, error.strerror) ## except EmptyFileError as error: # print(error) # else: # print("%s: %s" % (file, f.readline())) # finally: # print("Done processing", file) | 3.678347 | 4 |
plugins/crumbling_in.py | jimconner/digital_sky | 2 | 5904 | <reponame>jimconner/digital_sky<filename>plugins/crumbling_in.py
# Crumbling In
# Like randomised coloured dots and then they
# increase on both sides getting closer and closer into the middle.
import sys, traceback, random
from numpy import array,full
class animation():
def __init__(self,datastore):
self.max_led = datastore.LED_COUNT
self.pos = 0
self.direction=0
self.cols = [ \
[255,0,0,0], \
[0,255,0,0], \
[0,0,255,0], \
[0,0,0,255], \
[255,255,0,0], \
[255,0,255,0], \
[0,255,255,0], \
[0,0,255,64], \
]
self.row=full((self.max_led,4),0)
def emit_row(self):
try:
if self.pos >= self.max_led/2:
self.direction=1
if self.pos <= 0:
self.direction=0
col=self.cols[random.randint(0,7)]
if self.direction==1:
col=[0,0,0,0]
self.row[self.pos]=col
self.row[(self.max_led-1)-self.pos]=col
if self.direction==0:
self.pos+=1
else:
self.pos-=1
return self.row
except Exception as err:
print(err)
traceback.print_exc(file=sys.stdout)
| # Crumbling In
# Like randomised coloured dots and then they
# increase on both sides getting closer and closer into the middle.
import sys, traceback, random
from numpy import array,full
class animation():
def __init__(self,datastore):
self.max_led = datastore.LED_COUNT
self.pos = 0
self.direction=0
self.cols = [ \
[255,0,0,0], \
[0,255,0,0], \
[0,0,255,0], \
[0,0,0,255], \
[255,255,0,0], \
[255,0,255,0], \
[0,255,255,0], \
[0,0,255,64], \
]
self.row=full((self.max_led,4),0)
def emit_row(self):
try:
if self.pos >= self.max_led/2:
self.direction=1
if self.pos <= 0:
self.direction=0
col=self.cols[random.randint(0,7)]
if self.direction==1:
col=[0,0,0,0]
self.row[self.pos]=col
self.row[(self.max_led-1)-self.pos]=col
if self.direction==0:
self.pos+=1
else:
self.pos-=1
return self.row
except Exception as err:
print(err)
traceback.print_exc(file=sys.stdout) | en | 0.924035 | # Crumbling In # Like randomised coloured dots and then they # increase on both sides getting closer and closer into the middle. | 2.947889 | 3 |
pybleau/app/plotting/tests/test_plot_config.py | KBIbiopharma/pybleau | 4 | 5905 | from __future__ import division
from unittest import skipIf, TestCase
import os
from pandas import DataFrame
import numpy as np
from numpy.testing import assert_array_equal
BACKEND_AVAILABLE = os.environ.get("ETS_TOOLKIT", "qt4") != "null"
if BACKEND_AVAILABLE:
from app_common.apptools.testing_utils import assert_obj_gui_works
from pybleau.app.plotting.plot_config import HeatmapPlotConfigurator, \
HEATMAP_PLOT_TYPE, HistogramPlotConfigurator, HIST_PLOT_TYPE, \
LinePlotConfigurator, BarPlotConfigurator, ScatterPlotConfigurator, \
SCATTER_PLOT_TYPE, CMAP_SCATTER_PLOT_TYPE, LINE_PLOT_TYPE, \
BAR_PLOT_TYPE
LEN = 16
TEST_DF = DataFrame({"a": [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4],
"b": [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4],
"c": [1, 2, 3, 4, 2, 3, 1, 1, 4, 4, 5, 6, 4, 4, 5, 6],
"d": list("ababcabcdabcdeab"),
"e": np.random.randn(LEN),
"f": range(LEN),
# Highly repetitive column to split the entire data into 2
"g": np.array(["0", "1"] * (LEN // 2)),
"h": np.array([0, 1] * (LEN // 2), dtype=bool),
})
class BasePlotConfig(object):
def test_creation_fails_if_no_df(self):
with self.assertRaises(ValueError):
config = self.configurator()
config.to_dict()
def test_bring_up(self):
obj = self.configurator(data_source=TEST_DF)
assert_obj_gui_works(obj)
# Assertion utilities -----------------------------------------------------
def assert_editor_options(self, editor):
editor_options = editor.values
if self.numerical_cols_only:
for col in editor_options:
if col != "index":
self.assertIn(TEST_DF[col].dtype, (np.int64, np.float64))
else:
self.assertEqual(set(editor_options),
set(TEST_DF.columns) | {"index"})
class BaseXYPlotConfig(BasePlotConfig):
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], TEST_DF["a"].values)
self.assertIn("y_arr", config_dict)
assert_array_equal(config_dict["y_arr"], TEST_DF["b"].values)
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
y_editor = view_items[1].content[0].editor
self.assert_editor_options(y_editor)
def test_plot_colored_by_str_col(self):
# Color by a column filled with boolean values
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
self.assertIn(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], dict)
d_values = TEST_DF["d"].unique()
self.assertEqual(set(config_dict["x_arr"].keys()), set(d_values))
for arr in config_dict["x_arr"].values():
self.assertIsInstance(arr, np.ndarray)
# For example:
assert_array_equal(config_dict["x_arr"]["c"], np.array([1, 4, 4]))
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], dict)
self.assertEqual(set(config_dict["y_arr"].keys()), set(d_values))
for arr in config_dict["y_arr"].values():
self.assertIsInstance(arr, np.ndarray)
# For example:
assert_array_equal(config_dict["y_arr"]["c"], np.array([2, 2, 3]))
def test_plot_colored_by_bool_col(self):
# Color by a column filled with boolean values
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="h")
self.assertIn(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], dict)
hue_values = set(TEST_DF["h"])
self.assertEqual(set(config_dict["x_arr"].keys()), hue_values)
assert_array_equal(config_dict["x_arr"][False], TEST_DF["a"][::2])
assert_array_equal(config_dict["x_arr"][True], TEST_DF["a"][1::2])
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], dict)
self.assertEqual(set(config_dict["y_arr"].keys()), hue_values)
assert_array_equal(config_dict["y_arr"][False], TEST_DF["b"][::2])
assert_array_equal(config_dict["y_arr"][True], TEST_DF["b"][1::2])
def test_plot_colored_by_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestScatterPlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = ScatterPlotConfigurator
self.basic_type = SCATTER_PLOT_TYPE
self.numerical_cols_only = True
def test_plot_scatter_colored_by_int_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="c")
self.assertEqual(config.plot_type, CMAP_SCATTER_PLOT_TYPE)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], np.ndarray)
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], np.ndarray)
self.assertIn("z_arr", config_dict)
self.assertIsInstance(config_dict["z_arr"], np.ndarray)
def test_plot_scatter_colored_by_float_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
self.assertEqual(config.plot_type, CMAP_SCATTER_PLOT_TYPE)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], np.ndarray)
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], np.ndarray)
self.assertIn("z_arr", config_dict)
self.assertIsInstance(config_dict["z_arr"], np.ndarray)
def test_style_colorize_by_float_changes_on_color_column_change(self):
""" The dtype of the column to colorize controls colorize_by_float.
"""
# Color by a string:
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
self.assertFalse(config.plot_style.colorize_by_float)
# Color by a float:
config.z_col_name = "e"
self.assertTrue(config.plot_style.colorize_by_float)
def test_scatter_data_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._data_selection_columns()
expected = config._numerical_columns
self.assertCountEqual(columns, expected)
def test_scatter_color_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._color_selection_columns()
expected = [""] + config._available_columns
self.assertCountEqual(columns, expected)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestLinePlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = LinePlotConfigurator
self.basic_type = LINE_PLOT_TYPE
self.numerical_cols_only = True
def test_line_data_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._data_selection_columns()
expected = config._numerical_columns
self.assertCountEqual(columns, expected)
def test_line_color_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._color_selection_columns()
expected = [""] + config._available_columns
self.assertCountEqual(columns, expected)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestBarPlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = BarPlotConfigurator
self.basic_type = BAR_PLOT_TYPE
self.numerical_cols_only = False
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
view_items = config._data_selection_items()
x_editor = view_items[0].content[3].content[0].content[0].editor
self.assert_editor_options(x_editor)
def test_melt_mode_no_effect(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True)
self.assertEqual(config.plot_type, self.basic_type)
# No columns to melt, so no transformation:
self.assertIs(config.data_source, TEST_DF)
self.assertIs(config.transformed_data, TEST_DF)
def test_melt_mode_with_melted_columns(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"])
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"]*LEN+["f"]*LEN)
assert_array_equal(config.x_arr, x_values)
self.assertEqual(config.x_col_name, "variable")
self.assertEqual(len(config.y_arr), 2 * LEN)
self.assertEqual(config.y_col_name, "value")
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], x_values)
self.assertIn("y_arr", config_dict)
self.assertEqual(len(config_dict["y_arr"]), 2 * LEN)
def test_melt_mode_with_melted_columns_and_str_color(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"], z_col_name="g")
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
hue_values = TEST_DF["g"].unique()
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"] * (LEN // 2) + ["f"] * (LEN // 2))
self.assertEqual(set(config.x_arr.keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config.x_arr[key], x_values)
self.assertEqual(config.x_col_name, "variable")
for key in hue_values:
self.assertEqual(len(config.y_arr[key]), LEN)
self.assertEqual(config.y_col_name, "value")
self.assertIn("g", config.transformed_data.columns)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertEqual(set(config_dict["x_arr"].keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config_dict["x_arr"][key], x_values)
self.assertIn("y_arr", config_dict)
for key in hue_values:
self.assertEqual(len(config_dict["y_arr"][key]), LEN)
def test_melt_mode_with_melted_columns_and_bool_color(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"], z_col_name="h")
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
hue_values = TEST_DF["h"].unique()
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"] * (LEN // 2) + ["f"] * (LEN // 2))
self.assertEqual(set(config.x_arr.keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config.x_arr[key], x_values)
self.assertEqual(config.x_col_name, "variable")
for key in hue_values:
self.assertEqual(len(config.y_arr[key]), LEN)
self.assertEqual(config.y_col_name, "value")
self.assertIn("h", config.transformed_data.columns)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertEqual(set(config_dict["x_arr"].keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config_dict["x_arr"][key], x_values)
self.assertIn("y_arr", config_dict)
for key in hue_values:
self.assertEqual(len(config_dict["y_arr"][key]), LEN)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestHistogramPlotConfig(BasePlotConfig, TestCase):
def setUp(self):
self.configurator = HistogramPlotConfigurator
self.basic_type = HIST_PLOT_TYPE
self.numerical_cols_only = True
# Tests -------------------------------------------------------------------
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], TEST_DF["a"].values)
def test_plot_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF,
x_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestHeatmapPlotConfig(BasePlotConfig, TestCase):
def setUp(self):
self.configurator = HeatmapPlotConfigurator
self.basic_type = HEATMAP_PLOT_TYPE
self.numerical_cols_only = True
# Tests -------------------------------------------------------------------
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
def test_plot_colored_by_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
Passing non-numerical
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
y_editor = view_items[1].content[0].editor
self.assert_editor_options(y_editor)
| from __future__ import division
from unittest import skipIf, TestCase
import os
from pandas import DataFrame
import numpy as np
from numpy.testing import assert_array_equal
BACKEND_AVAILABLE = os.environ.get("ETS_TOOLKIT", "qt4") != "null"
if BACKEND_AVAILABLE:
from app_common.apptools.testing_utils import assert_obj_gui_works
from pybleau.app.plotting.plot_config import HeatmapPlotConfigurator, \
HEATMAP_PLOT_TYPE, HistogramPlotConfigurator, HIST_PLOT_TYPE, \
LinePlotConfigurator, BarPlotConfigurator, ScatterPlotConfigurator, \
SCATTER_PLOT_TYPE, CMAP_SCATTER_PLOT_TYPE, LINE_PLOT_TYPE, \
BAR_PLOT_TYPE
LEN = 16
TEST_DF = DataFrame({"a": [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4],
"b": [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4],
"c": [1, 2, 3, 4, 2, 3, 1, 1, 4, 4, 5, 6, 4, 4, 5, 6],
"d": list("ababcabcdabcdeab"),
"e": np.random.randn(LEN),
"f": range(LEN),
# Highly repetitive column to split the entire data into 2
"g": np.array(["0", "1"] * (LEN // 2)),
"h": np.array([0, 1] * (LEN // 2), dtype=bool),
})
class BasePlotConfig(object):
def test_creation_fails_if_no_df(self):
with self.assertRaises(ValueError):
config = self.configurator()
config.to_dict()
def test_bring_up(self):
obj = self.configurator(data_source=TEST_DF)
assert_obj_gui_works(obj)
# Assertion utilities -----------------------------------------------------
def assert_editor_options(self, editor):
editor_options = editor.values
if self.numerical_cols_only:
for col in editor_options:
if col != "index":
self.assertIn(TEST_DF[col].dtype, (np.int64, np.float64))
else:
self.assertEqual(set(editor_options),
set(TEST_DF.columns) | {"index"})
class BaseXYPlotConfig(BasePlotConfig):
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], TEST_DF["a"].values)
self.assertIn("y_arr", config_dict)
assert_array_equal(config_dict["y_arr"], TEST_DF["b"].values)
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
y_editor = view_items[1].content[0].editor
self.assert_editor_options(y_editor)
def test_plot_colored_by_str_col(self):
# Color by a column filled with boolean values
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
self.assertIn(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], dict)
d_values = TEST_DF["d"].unique()
self.assertEqual(set(config_dict["x_arr"].keys()), set(d_values))
for arr in config_dict["x_arr"].values():
self.assertIsInstance(arr, np.ndarray)
# For example:
assert_array_equal(config_dict["x_arr"]["c"], np.array([1, 4, 4]))
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], dict)
self.assertEqual(set(config_dict["y_arr"].keys()), set(d_values))
for arr in config_dict["y_arr"].values():
self.assertIsInstance(arr, np.ndarray)
# For example:
assert_array_equal(config_dict["y_arr"]["c"], np.array([2, 2, 3]))
def test_plot_colored_by_bool_col(self):
# Color by a column filled with boolean values
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="h")
self.assertIn(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], dict)
hue_values = set(TEST_DF["h"])
self.assertEqual(set(config_dict["x_arr"].keys()), hue_values)
assert_array_equal(config_dict["x_arr"][False], TEST_DF["a"][::2])
assert_array_equal(config_dict["x_arr"][True], TEST_DF["a"][1::2])
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], dict)
self.assertEqual(set(config_dict["y_arr"].keys()), hue_values)
assert_array_equal(config_dict["y_arr"][False], TEST_DF["b"][::2])
assert_array_equal(config_dict["y_arr"][True], TEST_DF["b"][1::2])
def test_plot_colored_by_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestScatterPlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = ScatterPlotConfigurator
self.basic_type = SCATTER_PLOT_TYPE
self.numerical_cols_only = True
def test_plot_scatter_colored_by_int_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="c")
self.assertEqual(config.plot_type, CMAP_SCATTER_PLOT_TYPE)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], np.ndarray)
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], np.ndarray)
self.assertIn("z_arr", config_dict)
self.assertIsInstance(config_dict["z_arr"], np.ndarray)
def test_plot_scatter_colored_by_float_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
self.assertEqual(config.plot_type, CMAP_SCATTER_PLOT_TYPE)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], np.ndarray)
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], np.ndarray)
self.assertIn("z_arr", config_dict)
self.assertIsInstance(config_dict["z_arr"], np.ndarray)
def test_style_colorize_by_float_changes_on_color_column_change(self):
""" The dtype of the column to colorize controls colorize_by_float.
"""
# Color by a string:
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
self.assertFalse(config.plot_style.colorize_by_float)
# Color by a float:
config.z_col_name = "e"
self.assertTrue(config.plot_style.colorize_by_float)
def test_scatter_data_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._data_selection_columns()
expected = config._numerical_columns
self.assertCountEqual(columns, expected)
def test_scatter_color_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._color_selection_columns()
expected = [""] + config._available_columns
self.assertCountEqual(columns, expected)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestLinePlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = LinePlotConfigurator
self.basic_type = LINE_PLOT_TYPE
self.numerical_cols_only = True
def test_line_data_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._data_selection_columns()
expected = config._numerical_columns
self.assertCountEqual(columns, expected)
def test_line_color_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._color_selection_columns()
expected = [""] + config._available_columns
self.assertCountEqual(columns, expected)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestBarPlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = BarPlotConfigurator
self.basic_type = BAR_PLOT_TYPE
self.numerical_cols_only = False
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
view_items = config._data_selection_items()
x_editor = view_items[0].content[3].content[0].content[0].editor
self.assert_editor_options(x_editor)
def test_melt_mode_no_effect(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True)
self.assertEqual(config.plot_type, self.basic_type)
# No columns to melt, so no transformation:
self.assertIs(config.data_source, TEST_DF)
self.assertIs(config.transformed_data, TEST_DF)
def test_melt_mode_with_melted_columns(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"])
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"]*LEN+["f"]*LEN)
assert_array_equal(config.x_arr, x_values)
self.assertEqual(config.x_col_name, "variable")
self.assertEqual(len(config.y_arr), 2 * LEN)
self.assertEqual(config.y_col_name, "value")
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], x_values)
self.assertIn("y_arr", config_dict)
self.assertEqual(len(config_dict["y_arr"]), 2 * LEN)
def test_melt_mode_with_melted_columns_and_str_color(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"], z_col_name="g")
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
hue_values = TEST_DF["g"].unique()
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"] * (LEN // 2) + ["f"] * (LEN // 2))
self.assertEqual(set(config.x_arr.keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config.x_arr[key], x_values)
self.assertEqual(config.x_col_name, "variable")
for key in hue_values:
self.assertEqual(len(config.y_arr[key]), LEN)
self.assertEqual(config.y_col_name, "value")
self.assertIn("g", config.transformed_data.columns)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertEqual(set(config_dict["x_arr"].keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config_dict["x_arr"][key], x_values)
self.assertIn("y_arr", config_dict)
for key in hue_values:
self.assertEqual(len(config_dict["y_arr"][key]), LEN)
def test_melt_mode_with_melted_columns_and_bool_color(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"], z_col_name="h")
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
hue_values = TEST_DF["h"].unique()
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"] * (LEN // 2) + ["f"] * (LEN // 2))
self.assertEqual(set(config.x_arr.keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config.x_arr[key], x_values)
self.assertEqual(config.x_col_name, "variable")
for key in hue_values:
self.assertEqual(len(config.y_arr[key]), LEN)
self.assertEqual(config.y_col_name, "value")
self.assertIn("h", config.transformed_data.columns)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertEqual(set(config_dict["x_arr"].keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config_dict["x_arr"][key], x_values)
self.assertIn("y_arr", config_dict)
for key in hue_values:
self.assertEqual(len(config_dict["y_arr"][key]), LEN)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestHistogramPlotConfig(BasePlotConfig, TestCase):
def setUp(self):
self.configurator = HistogramPlotConfigurator
self.basic_type = HIST_PLOT_TYPE
self.numerical_cols_only = True
# Tests -------------------------------------------------------------------
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], TEST_DF["a"].values)
def test_plot_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF,
x_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestHeatmapPlotConfig(BasePlotConfig, TestCase):
def setUp(self):
self.configurator = HeatmapPlotConfigurator
self.basic_type = HEATMAP_PLOT_TYPE
self.numerical_cols_only = True
# Tests -------------------------------------------------------------------
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
def test_plot_colored_by_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
Passing non-numerical
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
y_editor = view_items[1].content[0].editor
self.assert_editor_options(y_editor)
| en | 0.626471 | # Highly repetitive column to split the entire data into 2 # Assertion utilities ----------------------------------------------------- Make sure different configurators provide the right data choices. # Color by a column filled with boolean values # For example: # For example: # Color by a column filled with boolean values The dtype of the column to colorize controls colorize_by_float. # Color by a string: # Color by a float: Make sure different configurators provide the right data choices. # No columns to melt, so no transformation: # Pulling the x_arr forces a reset of the x_col_name # Pulling the x_arr forces a reset of the x_col_name # Pulling the x_arr forces a reset of the x_col_name # Tests ------------------------------------------------------------------- Make sure different configurators provide the right data choices. # Tests ------------------------------------------------------------------- Make sure different configurators provide the right data choices. Passing non-numerical | 2.382178 | 2 |
test/integration/languages/test_mixed.py | thomasrockhu/bfg9000 | 72 | 5906 | import os.path
from .. import *
class TestMixed(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'mixed'), *args, **kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello from c++!\n')
class TestMixedLibrary(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'mixed_library'), *args,
**kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello, library!\n')
@skip_if('fortran' not in test_features, 'skipping fortran tests')
# XXX: This fails on macOS, probably because of a version mismatch somewhere.
@skip_if(env.host_platform.genus == 'darwin', 'fortran on os x is weird')
class TestMixedFortran(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'mixed_fortran'), *args,
**kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello from f77!\n')
| import os.path
from .. import *
class TestMixed(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'mixed'), *args, **kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello from c++!\n')
class TestMixedLibrary(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'mixed_library'), *args,
**kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello, library!\n')
@skip_if('fortran' not in test_features, 'skipping fortran tests')
# XXX: This fails on macOS, probably because of a version mismatch somewhere.
@skip_if(env.host_platform.genus == 'darwin', 'fortran on os x is weird')
class TestMixedFortran(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'mixed_fortran'), *args,
**kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello from f77!\n')
| en | 0.896357 | # XXX: This fails on macOS, probably because of a version mismatch somewhere. | 2.051531 | 2 |
code/7/collections/namedtupe_example.py | TeamLab/introduction_to_pythoy_TEAMLAB_MOOC | 65 | 5907 | from collections import namedtuple
# Basic example
Point = namedtuple('Point', ['x', 'y'])
p = Point(11, y=22)
print(p[0] + p[1])
x, y = p
print(x, y)
print(p.x + p.y)
print(Point(x=11, y=22))
from collections import namedtuple
import csv
f = open("users.csv", "r")
next(f)
reader = csv.reader(f)
student_list = []
for row in reader:
student_list.append(row)
print(row)
print(student_list)
columns = ["user_id", "integration_id", "login_id", "password", "first_name",
"last_name", "full_name", "sortable_name", "short_name",
"email", "status"]
Student = namedtuple('Student', columns)
student_namedtupe_list = []
for row in student_list:
student = Student(*row)
student_namedtupe_list.append(student)
print(student_namedtupe_list[0])
print(student_namedtupe_list[0].full_name)
| from collections import namedtuple
# Basic example
Point = namedtuple('Point', ['x', 'y'])
p = Point(11, y=22)
print(p[0] + p[1])
x, y = p
print(x, y)
print(p.x + p.y)
print(Point(x=11, y=22))
from collections import namedtuple
import csv
f = open("users.csv", "r")
next(f)
reader = csv.reader(f)
student_list = []
for row in reader:
student_list.append(row)
print(row)
print(student_list)
columns = ["user_id", "integration_id", "login_id", "password", "first_name",
"last_name", "full_name", "sortable_name", "short_name",
"email", "status"]
Student = namedtuple('Student', columns)
student_namedtupe_list = []
for row in student_list:
student = Student(*row)
student_namedtupe_list.append(student)
print(student_namedtupe_list[0])
print(student_namedtupe_list[0].full_name)
| en | 0.2883 | # Basic example | 3.998087 | 4 |
test/helper_tools/benchtool.py | dotnes/mitmproxy | 4 | 5908 | <reponame>dotnes/mitmproxy
# Profile mitmdump with apachebench and
# yappi (https://code.google.com/p/yappi/)
#
# Requirements:
# - Apache Bench "ab" binary
# - pip install click yappi
from mitmproxy.main import mitmdump
from os import system
from threading import Thread
import time
import yappi
import click
class ApacheBenchThread(Thread):
def __init__(self, concurrency):
self.concurrency = concurrency
super().__init__()
def run(self):
time.sleep(2)
system(
"ab -n 1024 -c {} -X 127.0.0.1:8080 http://example.com/".format(self.concurrency))
@click.command()
@click.option('--profiler', default="none", type=click.Choice(['none', 'yappi']))
@click.option('--clock-type', default="cpu", type=click.Choice(['wall', 'cpu']))
@click.option('--concurrency', default=1, type=click.INT)
def main(profiler, clock_type, concurrency):
outfile = "callgrind.mitmdump-{}-c{}".format(clock_type, concurrency)
a = ApacheBenchThread(concurrency)
a.start()
if profiler == "yappi":
yappi.set_clock_type(clock_type)
yappi.start(addons=True)
print("Start mitmdump...")
mitmdump(["-k", "-q", "-S", "1024example"])
print("mitmdump stopped.")
print("Save profile information...")
if profiler == "yappi":
yappi.stop()
stats = yappi.get_func_stats()
stats.save(outfile, type='callgrind')
print("Done.")
if __name__ == '__main__':
main()
| # Profile mitmdump with apachebench and
# yappi (https://code.google.com/p/yappi/)
#
# Requirements:
# - Apache Bench "ab" binary
# - pip install click yappi
from mitmproxy.main import mitmdump
from os import system
from threading import Thread
import time
import yappi
import click
class ApacheBenchThread(Thread):
def __init__(self, concurrency):
self.concurrency = concurrency
super().__init__()
def run(self):
time.sleep(2)
system(
"ab -n 1024 -c {} -X 127.0.0.1:8080 http://example.com/".format(self.concurrency))
@click.command()
@click.option('--profiler', default="none", type=click.Choice(['none', 'yappi']))
@click.option('--clock-type', default="cpu", type=click.Choice(['wall', 'cpu']))
@click.option('--concurrency', default=1, type=click.INT)
def main(profiler, clock_type, concurrency):
outfile = "callgrind.mitmdump-{}-c{}".format(clock_type, concurrency)
a = ApacheBenchThread(concurrency)
a.start()
if profiler == "yappi":
yappi.set_clock_type(clock_type)
yappi.start(addons=True)
print("Start mitmdump...")
mitmdump(["-k", "-q", "-S", "1024example"])
print("mitmdump stopped.")
print("Save profile information...")
if profiler == "yappi":
yappi.stop()
stats = yappi.get_func_stats()
stats.save(outfile, type='callgrind')
print("Done.")
if __name__ == '__main__':
main() | en | 0.679446 | # Profile mitmdump with apachebench and # yappi (https://code.google.com/p/yappi/) # # Requirements: # - Apache Bench "ab" binary # - pip install click yappi | 2.188335 | 2 |
pivpy/graphics.py | alexliberzonlab/pivpy | 1 | 5909 | <filename>pivpy/graphics.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Various plots
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation, FFMpegWriter
import xarray as xr
import os
def quiver(data, arrScale = 25.0, threshold = None, nthArr = 1,
contourLevels = None, colbar = True, logscale = False,
aspectratio='equal', colbar_orient = 'vertical', units = None):
"""
Generates a quiver plot of a 'data' xarray DataArray object (single frame from a dataset)
Inputs:
data - xarray DataArray of the type defined in pivpy, one of the frames in the Dataset
selected by default using .isel(t=0)
threshold - values above the threshold will be set equal to threshold
arrScale - use to change arrow scales
nthArr - use to plot only every nth arrow from the array
contourLevels - use to specify the maximum value (abs) of contour plots
colbar - True/False wether to generate a colorbar or not
logscale - if true then colorbar is on log scale
aspectratio - set auto or equal for the plot's apearence
colbar_orient - 'horizontal' or 'vertical' orientation of the colorbar (if colbar is True)
Outputs:
none
Usage:
graphics.quiver(data, arrScale = 0.2, threshold = Inf, n)
"""
data = dataset_to_array(data)
x = data.x
y = data.y
u = data.u
v = data.v
if units is not None:
lUnits = units[0] # ['m' 'm' 'mm/s' 'mm/s']
velUnits = units[2]
tUnits = velUnits.split('/')[1] # make it 's' or 'dt'
else:
lUnits, velUnits, tUnits = '', '', ''
if threshold is not None:
data['u'] = xr.where(data['u']>threshold, threshold, data['u'])
data['v'] = xr.where(data['v']>threshold, threshold, data['v'])
S = np.array(np.sqrt(u**2 + v**2))
fig = plt.get_fignums()
if len(fig) == 0: # if no figure is open
fig, ax = plt.subplots() # open a new figure
else:
ax = plt.gca()
if contourLevels is None:
levels = np.linspace(0, np.max(S.flatten()), 30) # default contour levels up to max of S
else:
levels = np.linspace(0, contourLevels, 30)
if logscale:
c = ax.contourf(x,y,S,alpha=0.8,
cmap = plt.get_cmap("Blues"),
levels = levels, norm = plt.colors.LogNorm())
else:
c = ax.contourf(x,y,S,alpha=0.8,
cmap = plt.get_cmap("Blues"),
levels=levels)
if colbar:
cbar = plt.colorbar(c, orientation=colbar_orient)
cbar.set_label(r'$\left| \, V \, \right|$ ['+ lUnits +' $\cdot$ '+ tUnits +'$^{-1}$]')
ax.quiver(x[::nthArr],y[::nthArr],
u[::nthArr,::nthArr],v[::nthArr,::nthArr],units='width',
scale = np.max(S*arrScale),headwidth=2)
ax.set_xlabel('x (' + lUnits + ')')
ax.set_ylabel('y (' + lUnits + ')')
ax.set_aspect(aspectratio)
return fig,ax
def histogram(data, normed = False):
"""
this function will plot a normalized histogram of
the velocity data.
Input:
data : xarray DataSet with ['u','v'] attrs['units']
normed : (optional) default is False to present normalized
histogram
"""
u = np.asarray(data.u).flatten()
v = np.asarray(data.v).flatten()
units = data.attrs['units']
f,ax = plt.subplots(2)
ax[0].hist(u,bins=np.int(np.sqrt(len(u))*0.5),density=normed)
ax[0].set_xlabel('u ['+units[2]+']')
ax[1] = plt.subplot2grid((2,1),(1,0))
ax[1].hist(v,bins=np.int(np.sqrt(len(v)*0.5)),density=normed)
ax[1].set_xlabel('v ['+units[2]+']')
plt.tight_layout()
return f, ax
def contour_plot(data, threshold = None, contourLevels = None,
colbar = True, logscale = False, aspectration='equal', units=None):
""" contourf ajusted for the xarray PIV dataset, creates a
contour map for the data['w'] property.
Input:
data : xarray PIV DataArray, converted automatically using .isel(t=0)
threshold : a threshold value, default is None (no data clipping)
contourLevels : number of contour levels, default is None
colbar : boolean (default is True) show/hide colorbar
logscale : boolean (True is default) create in linear/log scale
aspectration : string, 'equal' is the default
"""
data = dataset_to_array(data)
if units is not None:
lUnits = units[0] # ['m' 'm' 'mm/s' 'mm/s']
# velUnits = units[2]
# tUnits = velUnits.split('/')[1] # make it 's' or 'dt'
else:
# lUnits, velUnits = '', ''
lUnits = ''
f,ax = plt.subplots()
if threshold is not None:
data['w'] = xr.where(data['w']>threshold, threshold, data['w'])
m = np.amax(abs(data['w']))
if contourLevels == None:
levels = np.linspace(-m, m, 30)
else:
levels = np.linspace(-contourLevels, contourLevels, 30)
if logscale:
c = ax.contourf(data.x,data.y,np.abs(data['w']), levels=levels,
cmap = plt.get_cmap('RdYlBu'), norm=plt.colors.LogNorm())
else:
c = ax.contourf(data.x,data.y,data['w'], levels=levels,
cmap = plt.get_cmap('RdYlBu'))
plt.xlabel('x [' + lUnits + ']')
plt.ylabel('y [' + lUnits + ']')
if colbar:
cbar = plt.colorbar(c)
cbar.set_label(r'$\omega$ [s$^{-1}$]')
ax.set_aspect(aspectration)
return f,ax
def showf(data, variables=None, units=None, fig=None):
"""
showf(data, var, units)
Arguments:
data : xarray.DataSet that contains dimensions of t,x,y
and variables u,v and maybe w (scalar)
"""
if variables is None:
xlabel = ' '
ylabel = ' '
else:
xlabel = variables[0]
ylabel = variables[1]
if units is not None:
xlabel += ' ' + units[0]
ylabel += ' ' + units[1]
fig = plt.figure(None if fig is None else fig.number)
for t in data['t']:
d = data.isel(t=t)
plt.quiver(d['x'],d['y'],d['u'],d['v'],d['u']**2 + d['v']**2)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.draw()
plt.pause(0.1)
plt.show()
def showscal(data, property='ken'):
"""
showf(data, var, units)
Arguments:
data : xarray.DataSet that contains dimensions of t,x,y
and a variable w (scalar)
"""
# fig = plt.figure(None if fig is None else fig.number)
# import pdb; pdb.set_trace()
# xlabel = (None if var is None else var[0]) + ' [' + (None if units is None else units[0])+']'
# ylabel = (None if var is None else var[1]) + ' [' + (None if units is None else units[1])+']'
data = data.piv.vec2scal(property=property)
contour_plot(data)
def animate(data, arrowscale=1, savepath=None):
""" animates the quiver plot for the dataset (multiple frames)
Input:
data : xarray PIV type of DataSet
arrowscale : [optional] integer, default is 1
savepath : [optional] path to save the MP4 animation, default is None
Output:
if savepath is None, then only an image display of the animation
if savepath is an existing path, a file named im.mp4 is saved
"""
X, Y = data.x, data.y
U, V = data.u[:,:,0], data.v[:,:,0] # first frame
fig, ax = plt.subplots(1,1)
M = np.sqrt(U**2 + V**2)
Q = ax.quiver(X[::3,::3], Y[::3,::3],
U[::3,::3], V[::3,::3], M[::3,::3],
units='inches', scale=arrowscale)
cb = plt.colorbar(Q)
units = data.attrs['units']
cb.ax.set_ylabel('velocity (' + units[2] + ')')
text = ax.text(0.2,1.05, '1/'+str(len(data.t)), ha='center', va='center',
transform=ax.transAxes)
def update_quiver(num,Q,data,text):
U,V = data.u[:,:,num],data.v[:,:,num]
M = np.sqrt(U[::3,::3]**2 + V[::3,::3]**2)
Q.set_UVC(U,V,M)
text.set_text(str(num+1)+'/'+str(len(data.t)))
return Q
anim = FuncAnimation(fig, update_quiver, fargs=(Q,data,text),
frames = len(data.t), blit=False)
mywriter = FFMpegWriter()
if savepath:
p = os.getcwd()
os.chdir(savepath)
anim.save('im.mp4', writer=mywriter)
os.chdir(p)
else: anim.save('im.mp4', writer=mywriter)
def dataset_to_array(data,N=0):
""" converts xarray Dataset to array """
if 't' in data.dims:
print('Warning: function for a single frame, using first frame, supply data.isel(t=N)')
data = data.isel(t=N)
return data | <filename>pivpy/graphics.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Various plots
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation, FFMpegWriter
import xarray as xr
import os
def quiver(data, arrScale = 25.0, threshold = None, nthArr = 1,
contourLevels = None, colbar = True, logscale = False,
aspectratio='equal', colbar_orient = 'vertical', units = None):
"""
Generates a quiver plot of a 'data' xarray DataArray object (single frame from a dataset)
Inputs:
data - xarray DataArray of the type defined in pivpy, one of the frames in the Dataset
selected by default using .isel(t=0)
threshold - values above the threshold will be set equal to threshold
arrScale - use to change arrow scales
nthArr - use to plot only every nth arrow from the array
contourLevels - use to specify the maximum value (abs) of contour plots
colbar - True/False wether to generate a colorbar or not
logscale - if true then colorbar is on log scale
aspectratio - set auto or equal for the plot's apearence
colbar_orient - 'horizontal' or 'vertical' orientation of the colorbar (if colbar is True)
Outputs:
none
Usage:
graphics.quiver(data, arrScale = 0.2, threshold = Inf, n)
"""
data = dataset_to_array(data)
x = data.x
y = data.y
u = data.u
v = data.v
if units is not None:
lUnits = units[0] # ['m' 'm' 'mm/s' 'mm/s']
velUnits = units[2]
tUnits = velUnits.split('/')[1] # make it 's' or 'dt'
else:
lUnits, velUnits, tUnits = '', '', ''
if threshold is not None:
data['u'] = xr.where(data['u']>threshold, threshold, data['u'])
data['v'] = xr.where(data['v']>threshold, threshold, data['v'])
S = np.array(np.sqrt(u**2 + v**2))
fig = plt.get_fignums()
if len(fig) == 0: # if no figure is open
fig, ax = plt.subplots() # open a new figure
else:
ax = plt.gca()
if contourLevels is None:
levels = np.linspace(0, np.max(S.flatten()), 30) # default contour levels up to max of S
else:
levels = np.linspace(0, contourLevels, 30)
if logscale:
c = ax.contourf(x,y,S,alpha=0.8,
cmap = plt.get_cmap("Blues"),
levels = levels, norm = plt.colors.LogNorm())
else:
c = ax.contourf(x,y,S,alpha=0.8,
cmap = plt.get_cmap("Blues"),
levels=levels)
if colbar:
cbar = plt.colorbar(c, orientation=colbar_orient)
cbar.set_label(r'$\left| \, V \, \right|$ ['+ lUnits +' $\cdot$ '+ tUnits +'$^{-1}$]')
ax.quiver(x[::nthArr],y[::nthArr],
u[::nthArr,::nthArr],v[::nthArr,::nthArr],units='width',
scale = np.max(S*arrScale),headwidth=2)
ax.set_xlabel('x (' + lUnits + ')')
ax.set_ylabel('y (' + lUnits + ')')
ax.set_aspect(aspectratio)
return fig,ax
def histogram(data, normed = False):
"""
this function will plot a normalized histogram of
the velocity data.
Input:
data : xarray DataSet with ['u','v'] attrs['units']
normed : (optional) default is False to present normalized
histogram
"""
u = np.asarray(data.u).flatten()
v = np.asarray(data.v).flatten()
units = data.attrs['units']
f,ax = plt.subplots(2)
ax[0].hist(u,bins=np.int(np.sqrt(len(u))*0.5),density=normed)
ax[0].set_xlabel('u ['+units[2]+']')
ax[1] = plt.subplot2grid((2,1),(1,0))
ax[1].hist(v,bins=np.int(np.sqrt(len(v)*0.5)),density=normed)
ax[1].set_xlabel('v ['+units[2]+']')
plt.tight_layout()
return f, ax
def contour_plot(data, threshold = None, contourLevels = None,
colbar = True, logscale = False, aspectration='equal', units=None):
""" contourf ajusted for the xarray PIV dataset, creates a
contour map for the data['w'] property.
Input:
data : xarray PIV DataArray, converted automatically using .isel(t=0)
threshold : a threshold value, default is None (no data clipping)
contourLevels : number of contour levels, default is None
colbar : boolean (default is True) show/hide colorbar
logscale : boolean (True is default) create in linear/log scale
aspectration : string, 'equal' is the default
"""
data = dataset_to_array(data)
if units is not None:
lUnits = units[0] # ['m' 'm' 'mm/s' 'mm/s']
# velUnits = units[2]
# tUnits = velUnits.split('/')[1] # make it 's' or 'dt'
else:
# lUnits, velUnits = '', ''
lUnits = ''
f,ax = plt.subplots()
if threshold is not None:
data['w'] = xr.where(data['w']>threshold, threshold, data['w'])
m = np.amax(abs(data['w']))
if contourLevels == None:
levels = np.linspace(-m, m, 30)
else:
levels = np.linspace(-contourLevels, contourLevels, 30)
if logscale:
c = ax.contourf(data.x,data.y,np.abs(data['w']), levels=levels,
cmap = plt.get_cmap('RdYlBu'), norm=plt.colors.LogNorm())
else:
c = ax.contourf(data.x,data.y,data['w'], levels=levels,
cmap = plt.get_cmap('RdYlBu'))
plt.xlabel('x [' + lUnits + ']')
plt.ylabel('y [' + lUnits + ']')
if colbar:
cbar = plt.colorbar(c)
cbar.set_label(r'$\omega$ [s$^{-1}$]')
ax.set_aspect(aspectration)
return f,ax
def showf(data, variables=None, units=None, fig=None):
"""
showf(data, var, units)
Arguments:
data : xarray.DataSet that contains dimensions of t,x,y
and variables u,v and maybe w (scalar)
"""
if variables is None:
xlabel = ' '
ylabel = ' '
else:
xlabel = variables[0]
ylabel = variables[1]
if units is not None:
xlabel += ' ' + units[0]
ylabel += ' ' + units[1]
fig = plt.figure(None if fig is None else fig.number)
for t in data['t']:
d = data.isel(t=t)
plt.quiver(d['x'],d['y'],d['u'],d['v'],d['u']**2 + d['v']**2)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.draw()
plt.pause(0.1)
plt.show()
def showscal(data, property='ken'):
"""
showf(data, var, units)
Arguments:
data : xarray.DataSet that contains dimensions of t,x,y
and a variable w (scalar)
"""
# fig = plt.figure(None if fig is None else fig.number)
# import pdb; pdb.set_trace()
# xlabel = (None if var is None else var[0]) + ' [' + (None if units is None else units[0])+']'
# ylabel = (None if var is None else var[1]) + ' [' + (None if units is None else units[1])+']'
data = data.piv.vec2scal(property=property)
contour_plot(data)
def animate(data, arrowscale=1, savepath=None):
""" animates the quiver plot for the dataset (multiple frames)
Input:
data : xarray PIV type of DataSet
arrowscale : [optional] integer, default is 1
savepath : [optional] path to save the MP4 animation, default is None
Output:
if savepath is None, then only an image display of the animation
if savepath is an existing path, a file named im.mp4 is saved
"""
X, Y = data.x, data.y
U, V = data.u[:,:,0], data.v[:,:,0] # first frame
fig, ax = plt.subplots(1,1)
M = np.sqrt(U**2 + V**2)
Q = ax.quiver(X[::3,::3], Y[::3,::3],
U[::3,::3], V[::3,::3], M[::3,::3],
units='inches', scale=arrowscale)
cb = plt.colorbar(Q)
units = data.attrs['units']
cb.ax.set_ylabel('velocity (' + units[2] + ')')
text = ax.text(0.2,1.05, '1/'+str(len(data.t)), ha='center', va='center',
transform=ax.transAxes)
def update_quiver(num,Q,data,text):
U,V = data.u[:,:,num],data.v[:,:,num]
M = np.sqrt(U[::3,::3]**2 + V[::3,::3]**2)
Q.set_UVC(U,V,M)
text.set_text(str(num+1)+'/'+str(len(data.t)))
return Q
anim = FuncAnimation(fig, update_quiver, fargs=(Q,data,text),
frames = len(data.t), blit=False)
mywriter = FFMpegWriter()
if savepath:
p = os.getcwd()
os.chdir(savepath)
anim.save('im.mp4', writer=mywriter)
os.chdir(p)
else: anim.save('im.mp4', writer=mywriter)
def dataset_to_array(data,N=0):
""" converts xarray Dataset to array """
if 't' in data.dims:
print('Warning: function for a single frame, using first frame, supply data.isel(t=N)')
data = data.isel(t=N)
return data | en | 0.571693 | # -*- coding: utf-8 -*- Various plots Generates a quiver plot of a 'data' xarray DataArray object (single frame from a dataset) Inputs: data - xarray DataArray of the type defined in pivpy, one of the frames in the Dataset selected by default using .isel(t=0) threshold - values above the threshold will be set equal to threshold arrScale - use to change arrow scales nthArr - use to plot only every nth arrow from the array contourLevels - use to specify the maximum value (abs) of contour plots colbar - True/False wether to generate a colorbar or not logscale - if true then colorbar is on log scale aspectratio - set auto or equal for the plot's apearence colbar_orient - 'horizontal' or 'vertical' orientation of the colorbar (if colbar is True) Outputs: none Usage: graphics.quiver(data, arrScale = 0.2, threshold = Inf, n) # ['m' 'm' 'mm/s' 'mm/s'] # make it 's' or 'dt' # if no figure is open # open a new figure # default contour levels up to max of S this function will plot a normalized histogram of the velocity data. Input: data : xarray DataSet with ['u','v'] attrs['units'] normed : (optional) default is False to present normalized histogram contourf ajusted for the xarray PIV dataset, creates a contour map for the data['w'] property. Input: data : xarray PIV DataArray, converted automatically using .isel(t=0) threshold : a threshold value, default is None (no data clipping) contourLevels : number of contour levels, default is None colbar : boolean (default is True) show/hide colorbar logscale : boolean (True is default) create in linear/log scale aspectration : string, 'equal' is the default # ['m' 'm' 'mm/s' 'mm/s'] # velUnits = units[2] # tUnits = velUnits.split('/')[1] # make it 's' or 'dt' # lUnits, velUnits = '', '' showf(data, var, units) Arguments: data : xarray.DataSet that contains dimensions of t,x,y and variables u,v and maybe w (scalar) showf(data, var, units) Arguments: data : xarray.DataSet that contains dimensions of t,x,y and a variable w (scalar) # fig = plt.figure(None if fig is None else fig.number) # import pdb; pdb.set_trace() # xlabel = (None if var is None else var[0]) + ' [' + (None if units is None else units[0])+']' # ylabel = (None if var is None else var[1]) + ' [' + (None if units is None else units[1])+']' animates the quiver plot for the dataset (multiple frames) Input: data : xarray PIV type of DataSet arrowscale : [optional] integer, default is 1 savepath : [optional] path to save the MP4 animation, default is None Output: if savepath is None, then only an image display of the animation if savepath is an existing path, a file named im.mp4 is saved # first frame converts xarray Dataset to array | 2.765581 | 3 |
configs/my_config/vit_base_aspp.py | BostonCrayfish/mmsegmentation | 0 | 5910 | # model settings
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='pretrain/vit_base_patch16_224.pth',
backbone=dict(
type='VisionTransformer',
img_size=(224, 224),
patch_size=16,
in_channels=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
# out_indices=(2, 5, 8, 11),
qkv_bias=True,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
with_cls_token=True,
norm_cfg=dict(type='LN', eps=1e-6),
act_cfg=dict(type='GELU'),
norm_eval=False,
interpolate_mode='bicubic'),
neck=None,
decode_head=dict(
type='ASPPHead',
in_channels=768,
# in_index=3,
channels=512,
dilations=(1, 6, 12, 18),
dropout_ratio=0.1,
num_classes=21,
contrast=True,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=None,
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole')) # yapf: disable | # model settings
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='pretrain/vit_base_patch16_224.pth',
backbone=dict(
type='VisionTransformer',
img_size=(224, 224),
patch_size=16,
in_channels=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
# out_indices=(2, 5, 8, 11),
qkv_bias=True,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
with_cls_token=True,
norm_cfg=dict(type='LN', eps=1e-6),
act_cfg=dict(type='GELU'),
norm_eval=False,
interpolate_mode='bicubic'),
neck=None,
decode_head=dict(
type='ASPPHead',
in_channels=768,
# in_index=3,
channels=512,
dilations=(1, 6, 12, 18),
dropout_ratio=0.1,
num_classes=21,
contrast=True,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=None,
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole')) # yapf: disable | en | 0.75065 | # model settings # out_indices=(2, 5, 8, 11), # in_index=3, # model training and testing settings # yapf: disable | 1.502532 | 2 |
tripleo_ansible/ansible_plugins/modules/podman_container.py | smolar/tripleo-ansible | 0 | 5911 | <filename>tripleo_ansible/ansible_plugins/modules/podman_container.py
#!/usr/bin/python
# Copyright (c) 2019 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# flake8: noqa: E501
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
from distutils.version import LooseVersion
import yaml
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
module: podman_container
author:
- "<NAME> (@sshnaidm)"
version_added: '2.9'
short_description: Manage podman containers
notes: []
description:
- Start, stop, restart and manage Podman containers
requirements:
- "Podman installed on host"
options:
name:
description:
- Name of the container
required: True
type: str
executable:
description:
- Path to C(podman) executable if it is not in the C($PATH) on the
machine running C(podman)
default: 'podman'
type: str
state:
description:
- I(absent) - A container matching the specified name will be stopped and
removed.
- I(present) - Asserts the existence of a container matching the name and
any provided configuration parameters. If no container matches the
name, a container will be created. If a container matches the name but
the provided configuration does not match, the container will be
updated, if it can be. If it cannot be updated, it will be removed and
re-created with the requested config. Image version will be taken into
account when comparing configuration. Use the recreate option to force
the re-creation of the matching container.
- I(started) - Asserts there is a running container matching the name and
any provided configuration. If no container matches the name, a
container will be created and started. Use recreate to always re-create
a matching container, even if it is running. Use force_restart to force
a matching container to be stopped and restarted.
- I(stopped) - Asserts that the container is first I(present), and then
if the container is running moves it to a stopped state.
type: str
default: started
choices:
- absent
- present
- stopped
- started
image:
description:
- Repository path (or image name) and tag used to create the container.
If an image is not found, the image will be pulled from the registry.
If no tag is included, C(latest) will be used.
- Can also be an image ID. If this is the case, the image is assumed to
be available locally.
type: str
annotation:
description:
- Add an annotation to the container. The format is key value, multiple
times.
type: dict
authfile:
description:
- Path of the authentication file. Default is
``${XDG_RUNTIME_DIR}/containers/auth.json``
(Not available for remote commands) You can also override the default
path of the authentication file by setting the ``REGISTRY_AUTH_FILE``
environment variable. ``export REGISTRY_AUTH_FILE=path``
type: path
blkio_weight:
description:
- Block IO weight (relative weight) accepts a weight value between 10 and
1000
type: int
blkio_weight_device:
description:
- Block IO weight (relative device weight, format DEVICE_NAME[:]WEIGHT).
type: dict
cap_add:
description:
- List of capabilities to add to the container.
type: list
elements: str
cap_drop:
description:
- List of capabilities to drop from the container.
type: list
elements: str
cgroup_parent:
description:
- Path to cgroups under which the cgroup for the container will be
created.
If the path is not absolute, the path is considered to be relative to
the cgroups path of the init process. Cgroups will be created if they
do not already exist.
type: path
cgroupns:
description:
- Path to cgroups under which the cgroup for the container will be
created.
type: str
cgroups:
description:
- Determines whether the container will create CGroups.
Valid values are enabled and disabled, which the default being enabled.
The disabled option will force the container to not create CGroups,
and thus conflicts with CGroup options cgroupns and cgroup-parent.
type: str
choices:
- default
- disabled
cidfile:
description:
- Write the container ID to the file
type: path
cmd_args:
description:
- Any additionl command options you want to pass to podman command,
cmd_args - ['--other-param', 'value']
Be aware module doesn't support idempotency if this is set.
type: list
elements: str
conmon_pidfile:
description:
- Write the pid of the conmon process to a file.
conmon runs in a separate process than Podman,
so this is necessary when using systemd to restart Podman containers.
type: path
command:
description:
- Override command of container. Can be a string or a list.
type: raw
cpu_period:
description:
- Limit the CPU real-time period in microseconds
type: int
cpu_rt_period:
description:
- Limit the CPU real-time period in microseconds.
Limit the container's Real Time CPU usage. This flag tell the kernel to
restrict the container's Real Time CPU usage to the period you specify.
type: int
cpu_rt_runtime:
description:
- Limit the CPU real-time runtime in microseconds.
This flag tells the kernel to limit the amount of time in a given CPU
period Real Time tasks may consume.
type: int
cpu_shares:
description:
- CPU shares (relative weight)
type: int
cpus:
description:
- Number of CPUs. The default is 0.0 which means no limit.
type: str
cpuset_cpus:
description:
- CPUs in which to allow execution (0-3, 0,1)
type: str
cpuset_mems:
description:
- Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only
effective on NUMA systems.
type: str
detach:
description:
- Run container in detach mode
type: bool
default: True
debug:
description:
- Return additional information which can be helpful for investigations.
type: bool
default: False
detach_keys:
description:
- Override the key sequence for detaching a container. Format is a single
character or ctrl-value
type: str
device:
description:
- Add a host device to the container.
The format is <device-on-host>[:<device-on-container>][:<permissions>]
(e.g. device /dev/sdc:/dev/xvdc:rwm)
type: list
elements: str
device_read_bps:
description:
- Limit read rate (bytes per second) from a device
(e.g. device-read-bps /dev/sda:1mb)
type: list
device_read_iops:
description:
- Limit read rate (IO per second) from a device
(e.g. device-read-iops /dev/sda:1000)
type: list
device_write_bps:
description:
- Limit write rate (bytes per second) to a device
(e.g. device-write-bps /dev/sda:1mb)
type: list
device_write_iops:
description:
- Limit write rate (IO per second) to a device
(e.g. device-write-iops /dev/sda:1000)
type: list
dns:
description:
- Set custom DNS servers
type: list
elements: str
dns_option:
description:
- Set custom DNS options
type: str
dns_search:
description:
- Set custom DNS search domains (Use dns_search with '' if you don't wish
to set the search domain)
type: str
entrypoint:
description:
- Overwrite the default ENTRYPOINT of the image
type: str
env:
description:
- Set environment variables.
This option allows you to specify arbitrary environment variables that
are available for the process that will be launched inside of the
container.
type: dict
env_file:
description:
- Read in a line delimited file of environment variables
type: path
env_host:
description:
- Use all current host environment variables in container.
Defaults to false.
type: bool
etc_hosts:
description:
- Dict of host-to-IP mappings, where each host name is a key in the
dictionary. Each host name will be added to the container's
``/etc/hosts`` file.
type: dict
aliases:
- add_hosts
expose:
description:
- Expose a port, or a range of ports (e.g. expose "3300-3310") to set up
port redirection on the host system.
type: list
elements: str
aliases:
- exposed
- exposed_ports
force_restart:
description:
- Force restart of container.
type: bool
default: False
aliases:
- restart
gidmap:
description:
- Run the container in a new user namespace using the supplied mapping.
type: str
group_add:
description:
- Add additional groups to run as
type: list
healthcheck:
description:
- Set or alter a healthcheck command for a container.
type: str
healthcheck_interval:
description:
- Set an interval for the healthchecks
(a value of disable results in no automatic timer setup)
(default "30s")
type: str
healthcheck_retries:
description:
- The number of retries allowed before a healthcheck is considered to be
unhealthy. The default value is 3.
type: int
healthcheck_start_period:
description:
- The initialization time needed for a container to bootstrap.
The value can be expressed in time format like 2m3s. The default value
is 0s
type: str
healthcheck_timeout:
description:
- The maximum time allowed to complete the healthcheck before an interval
is considered failed. Like start-period, the value can be expressed in
a time format such as 1m22s. The default value is 30s
type: str
hostname:
description:
- Container host name. Sets the container host name that is available
inside the container.
type: str
http_proxy:
description:
- By default proxy environment variables are passed into the container if
set for the podman process. This can be disabled by setting the
http_proxy option to false. The environment variables passed in
include http_proxy, https_proxy, ftp_proxy, no_proxy, and also the
upper case versions of those.
Defaults to true
type: bool
image_volume:
description:
- Tells podman how to handle the builtin image volumes.
The options are bind, tmpfs, or ignore (default bind)
type: str
choices:
- 'bind'
- 'tmpfs'
- 'ignore'
image_strict:
description:
- Whether to compare images in idempotency by taking into account a full
name with registry and namespaces.
type: bool
default: False
init:
description:
- Run an init inside the container that forwards signals and reaps
processes.
type: str
init_path:
description:
- Path to the container-init binary.
type: str
interactive:
description:
- Keep STDIN open even if not attached. The default is false.
When set to true, keep stdin open even if not attached.
The default is false.
type: bool
ip:
description:
- Specify a static IP address for the container, for example
'10.88.64.128'.
Can only be used if no additional CNI networks to join were specified
via 'network:', and if the container is not joining another container's
network namespace via 'network container:<name|id>'.
The address must be within the default CNI network's pool
(default 10.88.0.0/16).
type: str
ipc:
description:
- Default is to create a private IPC namespace (POSIX SysV IPC) for the
container
type: str
kernel_memory:
description:
- Kernel memory limit
(format <number>[<unit>], where unit = b, k, m or g)
Note - idempotency is supported for integers only.
type: str
label:
description:
- Add metadata to a container, pass dictionary of label names and values
type: dict
label_file:
description:
- Read in a line delimited file of labels
type: str
log_driver:
description:
- Logging driver. Used to set the log driver for the container.
For example log_driver "k8s-file".
type: str
choices:
- k8s-file
- journald
- json-file
log_opt:
description:
- Logging driver specific options. Used to set the path to the container
log file. For example log_opt
"path=/var/log/container/mycontainer.json"
type: str
aliases:
- log_options
memory:
description:
- Memory limit (format 10k, where unit = b, k, m or g)
Note - idempotency is supported for integers only.
type: str
memory_reservation:
description:
- Memory soft limit (format 100m, where unit = b, k, m or g)
Note - idempotency is supported for integers only.
type: str
memory_swap:
description:
- A limit value equal to memory plus swap. Must be used with the -m
(--memory) flag.
The swap LIMIT should always be larger than -m (--memory) value.
By default, the swap LIMIT will be set to double the value of --memory
Note - idempotency is supported for integers only.
type: str
memory_swappiness:
description:
- Tune a container's memory swappiness behavior. Accepts an integer
between 0 and 100.
type: int
mount:
description:
- Attach a filesystem mount to the container. bind or tmpfs
For example mount
"type=bind,source=/path/on/host,destination=/path/in/container"
type: str
network:
description:
- Set the Network mode for the container
* bridge create a network stack on the default bridge
* none no networking
* container:<name|id> reuse another container's network stack
* host use the podman host network stack.
* <network-name>|<network-id> connect to a user-defined network
* ns:<path> path to a network namespace to join
* slirp4netns use slirp4netns to create a user network stack.
This is the default for rootless containers
type: list
elements: str
aliases:
- net
no_hosts:
description:
- Do not create /etc/hosts for the container
Default is false.
type: bool
oom_kill_disable:
description:
- Whether to disable OOM Killer for the container or not.
Default is false.
type: bool
oom_score_adj:
description:
- Tune the host's OOM preferences for containers (accepts -1000 to 1000)
type: int
pid:
description:
- Set the PID mode for the container
type: str
pids_limit:
description:
- Tune the container's pids limit. Set -1 to have unlimited pids for the
container.
type: str
pod:
description:
- Run container in an existing pod.
If you want podman to make the pod for you, preference the pod name
with "new:"
type: str
privileged:
description:
- Give extended privileges to this container. The default is false.
type: bool
publish:
description:
- Publish a container's port, or range of ports, to the host.
Format - ip:hostPort:containerPort | ip::containerPort |
hostPort:containerPort | containerPort
type: list
elements: str
aliases:
- ports
- published
- published_ports
publish_all:
description:
- Publish all exposed ports to random ports on the host interfaces. The
default is false.
type: bool
read_only:
description:
- Mount the container's root filesystem as read only. Default is false
type: bool
read_only_tmpfs:
description:
- If container is running in --read-only mode, then mount a read-write
tmpfs on /run, /tmp, and /var/tmp. The default is true
type: bool
recreate:
description:
- Use with present and started states to force the re-creation of an
existing container.
type: bool
default: False
restart_policy:
description:
- Restart policy to follow when containers exit.
Restart policy will not take effect if a container is stopped via the
podman kill or podman stop commands. Valid values are
* no - Do not restart containers on exit
* on-failure[:max_retries] - Restart containers when they exit with a
non-0 exit code, retrying indefinitely
or until the optional max_retries count is hit
* always - Restart containers when they exit, regardless of status,
retrying indefinitely
type: str
rm:
description:
- Automatically remove the container when it exits. The default is false.
type: bool
aliases:
- remove
rootfs:
description:
- If true, the first argument refers to an exploded container on the file
system. The dafault is false.
type: bool
security_opt:
description:
- Security Options. For example security_opt "seccomp=unconfined"
type: list
elements: str
shm_size:
description:
- Size of /dev/shm. The format is <number><unit>. number must be greater
than 0.
Unit is optional and can be b (bytes), k (kilobytes), m(megabytes), or
g (gigabytes).
If you omit the unit, the system uses bytes. If you omit the size
entirely, the system uses 64m
type: str
sig_proxy:
description:
- Proxy signals sent to the podman run command to the container process.
SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is true.
type: bool
stop_signal:
description:
- Signal to stop a container. Default is SIGTERM.
type: int
stop_timeout:
description:
- Timeout (in seconds) to stop a container. Default is 10.
type: int
subgidname:
description:
- Run the container in a new user namespace using the map with 'name' in
the /etc/subgid file.
type: str
subuidname:
description:
- Run the container in a new user namespace using the map with 'name' in
the /etc/subuid file.
type: str
sysctl:
description:
- Configure namespaced kernel parameters at runtime
type: dict
systemd:
description:
- Run container in systemd mode. The default is true.
type: bool
tmpfs:
description:
- Create a tmpfs mount. For example tmpfs
"/tmp" "rw,size=787448k,mode=1777"
type: dict
tty:
description:
- Allocate a pseudo-TTY. The default is false.
type: bool
uidmap:
description:
- Run the container in a new user namespace using the supplied mapping.
type: list
ulimit:
description:
- Ulimit options
type: list
user:
description:
- Sets the username or UID used and optionally the groupname or GID for
the specified command.
type: str
userns:
description:
- Set the user namespace mode for the container.
It defaults to the PODMAN_USERNS environment variable.
An empty value means user namespaces are disabled.
type: str
uts:
description:
- Set the UTS mode for the container
type: str
volume:
description:
- Create a bind mount. If you specify, volume /HOST-DIR:/CONTAINER-DIR,
podman bind mounts /HOST-DIR in the host to /CONTAINER-DIR in the
podman container.
type: list
elements: str
aliases:
- volumes
volumes_from:
description:
- Mount volumes from the specified container(s).
type: list
elements: str
workdir:
description:
- Working directory inside the container.
The default working directory for running binaries within a container
is the root directory (/).
type: str
"""
EXAMPLES = """
- name: Run container
podman_container:
name: container
image: quay.io/bitnami/wildfly
state: started
- name: Create a data container
podman_container:
name: mydata
image: busybox
volume:
- /tmp/data
- name: Re-create a redis container
podman_container:
name: myredis
image: redis
command: redis-server --appendonly yes
state: present
recreate: yes
expose:
- 6379
volumes_from:
- mydata
- name: Restart a container
podman_container:
name: myapplication
image: redis
state: started
restart: yes
etc_hosts:
other: "127.0.0.1"
restart_policy: "no"
device: "/dev/sda:/dev/xvda:rwm"
ports:
- "8080:9000"
- "127.0.0.1:8081:9001/udp"
env:
SECRET_KEY: "ssssh"
BOOLEAN_KEY: "yes"
- name: Container present
podman_container:
name: mycontainer
state: present
image: ubuntu:14.04
command: "sleep 1d"
- name: Stop a container
podman_container:
name: mycontainer
state: stopped
- name: Start 4 load-balanced containers
podman_container:
name: "container{{ item }}"
recreate: yes
image: someuser/anotherappimage
command: sleep 1d
with_sequence: count=4
- name: remove container
podman_container:
name: ohno
state: absent
- name: Writing output
podman_container:
name: myservice
image: busybox
log_options: path=/var/log/container/mycontainer.json
log_driver: k8s-file
"""
RETURN = """
container:
description:
- Facts representing the current state of the container. Matches the
podman inspection output.
- Note that facts are part of the registered vars since Ansible 2.8. For
compatibility reasons, the facts
are also accessible directly as C(podman_container). Note that the
returned fact will be removed in Ansible 2.12.
- Empty if C(state) is I(absent).
returned: always
type: dict
sample: '{
"AppArmorProfile": "",
"Args": [
"sh"
],
"BoundingCaps": [
"CAP_CHOWN",
...
],
"Config": {
"Annotations": {
"io.kubernetes.cri-o.ContainerType": "sandbox",
"io.kubernetes.cri-o.TTY": "false"
},
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"sh"
],
"Domainname": "",
"Entrypoint": "",
"Env": [
"PATH=/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
"HOSTNAME=",
"container=podman"
],
"Hostname": "",
"Image": "docker.io/library/busybox:latest",
"Labels": null,
"OpenStdin": false,
"StdinOnce": false,
"StopSignal": 15,
"Tty": false,
"User": {
"gid": 0,
"uid": 0
},
"Volumes": null,
"WorkingDir": "/"
},
"ConmonPidFile": "...",
"Created": "2019-06-17T19:13:09.873858307+03:00",
"Dependencies": [],
"Driver": "overlay",
"EffectiveCaps": [
"CAP_CHOWN",
...
],
"ExecIDs": [],
"ExitCommand": [
"/usr/bin/podman",
"--root",
...
],
"GraphDriver": {
...
},
"HostConfig": {
...
},
"HostnamePath": "...",
"HostsPath": "...",
"ID": "...",
"Image": "...",
"ImageName": "docker.io/library/busybox:latest",
"IsInfra": false,
"LogPath": "/tmp/container/mycontainer.json",
"MountLabel": "system_u:object_r:container_file_t:s0:c282,c782",
"Mounts": [
...
],
"Name": "myservice",
"Namespace": "",
"NetworkSettings": {
"Bridge": "",
...
},
"Path": "sh",
"ProcessLabel": "system_u:system_r:container_t:s0:c282,c782",
"ResolvConfPath": "...",
"RestartCount": 0,
"Rootfs": "",
"State": {
"Dead": false,
"Error": "",
"ExitCode": 0,
"FinishedAt": "2019-06-17T19:13:10.157518963+03:00",
"Healthcheck": {
"FailingStreak": 0,
"Log": null,
"Status": ""
},
"OOMKilled": false,
"OciVersion": "1.0.1-dev",
"Paused": false,
"Pid": 4083,
"Restarting": false,
"Running": false,
"StartedAt": "2019-06-17T19:13:10.152479729+03:00",
"Status": "exited"
},
"StaticDir": "..."
...
}'
"""
class PodmanModuleParams:
"""Creates list of arguments for podman CLI command.
Arguments:
action {str} -- action type from 'run', 'stop', 'create', 'delete',
'start'
params {dict} -- dictionary of module parameters
"""
def __init__(self, action, params, podman_version, module):
self.params = params
self.action = action
self.podman_version = podman_version
self.module = module
def construct_command_from_params(self):
"""Create a podman command from given module parameters.
Returns:
list -- list of byte strings for Popen command
"""
if self.action in ['start', 'stop', 'delete']:
return self.start_stop_delete()
if self.action in ['create', 'run']:
cmd = [self.action, '--name', self.params['name']]
all_param_methods = [func for func in dir(self)
if callable(getattr(self, func))
and func.startswith("addparam")]
params_set = (i for i in self.params if self.params[i] is not None)
for param in params_set:
func_name = "_".join(["addparam", param])
if func_name in all_param_methods:
cmd = getattr(self, func_name)(cmd)
cmd.append(self.params['image'])
if self.params['command']:
if isinstance(self.params['command'], list):
cmd += self.params['command']
else:
cmd += self.params['command'].split()
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
def start_stop_delete(self):
if self.action in ['stop', 'start']:
cmd = [self.action, self.params['name']]
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
if self.action == 'delete':
cmd = ['rm', '-f', self.params['name']]
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
def check_version(self, param, minv=None, maxv=None):
if minv and LooseVersion(minv) > LooseVersion(
self.podman_version):
self.module.fail_json(msg="Parameter %s is supported from podman "
"version %s only! Current version is %s" % (
param, minv, self.podman_version))
if maxv and LooseVersion(maxv) < LooseVersion(
self.podman_version):
self.module.fail_json(msg="Parameter %s is supported till podman "
"version %s only! Current version is %s" % (
param, minv, self.podman_version))
def addparam_annotation(self, c):
for annotate in self.params['annotation'].items():
c += ['--annotation', '='.join(annotate)]
return c
def addparam_authfile(self, c):
return c + ['--authfile', self.params['authfile']]
def addparam_blkio_weight(self, c):
return c + ['--blkio-weight', self.params['blkio_weight']]
def addparam_blkio_weight_device(self, c):
for blkio in self.params['blkio_weight_device'].items():
c += ['--blkio-weight-device', ':'.join(blkio)]
return c
def addparam_cap_add(self, c):
for cap_add in self.params['cap_add']:
c += ['--cap-add', cap_add]
return c
def addparam_cap_drop(self, c):
for cap_drop in self.params['cap_drop']:
c += ['--cap-drop', cap_drop]
return c
def addparam_cgroups(self, c):
self.check_version('--cgroups', minv='1.6.0')
return c + ['--cgroups=%s' % self.params['cgroups']]
def addparam_cgroupns(self, c):
self.check_version('--cgroupns', minv='1.6.2')
return c + ['--cgroupns=%s' % self.params['cgroupns']]
def addparam_cgroup_parent(self, c):
return c + ['--cgroup-parent', self.params['cgroup_parent']]
def addparam_cidfile(self, c):
return c + ['--cidfile', self.params['cidfile']]
def addparam_conmon_pidfile(self, c):
return c + ['--conmon-pidfile', self.params['conmon_pidfile']]
def addparam_cpu_period(self, c):
return c + ['--cpu-period', self.params['cpu_period']]
def addparam_cpu_rt_period(self, c):
return c + ['--cpu-rt-period', self.params['cpu_rt_period']]
def addparam_cpu_rt_runtime(self, c):
return c + ['--cpu-rt-runtime', self.params['cpu_rt_runtime']]
def addparam_cpu_shares(self, c):
return c + ['--cpu-shares', self.params['cpu_shares']]
def addparam_cpus(self, c):
return c + ['--cpus', self.params['cpus']]
def addparam_cpuset_cpus(self, c):
return c + ['--cpuset-cpus', self.params['cpuset_cpus']]
def addparam_cpuset_mems(self, c):
return c + ['--cpuset-mems', self.params['cpuset_mems']]
def addparam_detach(self, c):
return c + ['--detach=%s' % self.params['detach']]
def addparam_detach_keys(self, c):
return c + ['--detach-keys', self.params['detach_keys']]
def addparam_device(self, c):
for dev in self.params['device']:
c += ['--device', dev]
return c
def addparam_device_read_bps(self, c):
for dev in self.params['device_read_bps']:
c += ['--device-read-bps', dev]
return c
def addparam_device_read_iops(self, c):
for dev in self.params['device_read_iops']:
c += ['--device-read-iops', dev]
return c
def addparam_device_write_bps(self, c):
for dev in self.params['device_write_bps']:
c += ['--device-write-bps', dev]
return c
def addparam_device_write_iops(self, c):
for dev in self.params['device_write_iops']:
c += ['--device-write-iops', dev]
return c
def addparam_dns(self, c):
return c + ['--dns', ','.join(self.params['dns'])]
def addparam_dns_option(self, c):
return c + ['--dns-option', self.params['dns_option']]
def addparam_dns_search(self, c):
return c + ['--dns-search', self.params['dns_search']]
def addparam_entrypoint(self, c):
return c + ['--entrypoint', self.params['entrypoint']]
def addparam_env(self, c):
for env_value in self.params['env'].items():
c += ['--env',
b"=".join([to_bytes(k, errors='surrogate_or_strict')
for k in env_value])]
return c
def addparam_env_file(self, c):
return c + ['--env-file', self.params['env_file']]
def addparam_env_host(self, c):
self.check_version('--env-host', minv='1.5.0')
return c + ['--env-host=%s' % self.params['env_host']]
def addparam_etc_hosts(self, c):
for host_ip in self.params['etc_hosts'].items():
c += ['--add-host', ':'.join(host_ip)]
return c
def addparam_expose(self, c):
for exp in self.params['expose']:
c += ['--expose', exp]
return c
def addparam_gidmap(self, c):
return c + ['--gidmap', self.params['gidmap']]
def addparam_group_add(self, c):
for g in self.params['group_add']:
c += ['--group-add', g]
return c
def addparam_healthcheck(self, c):
return c + ['--healthcheck-command', self.params['healthcheck']]
def addparam_healthcheck_interval(self, c):
return c + ['--healthcheck-interval',
self.params['healthcheck_interval']]
def addparam_healthcheck_retries(self, c):
return c + ['--healthcheck-retries',
self.params['healthcheck_retries']]
def addparam_healthcheck_start_period(self, c):
return c + ['--healthcheck-start-period',
self.params['healthcheck_start_period']]
def addparam_healthcheck_timeout(self, c):
return c + ['--healthcheck-timeout',
self.params['healthcheck_timeout']]
def addparam_hostname(self, c):
return c + ['--hostname', self.params['hostname']]
def addparam_http_proxy(self, c):
return c + ['--http-proxy=%s' % self.params['http_proxy']]
def addparam_image_volume(self, c):
return c + ['--image-volume', self.params['image_volume']]
def addparam_init(self, c):
return c + ['--init', self.params['init']]
def addparam_init_path(self, c):
return c + ['--init-path', self.params['init_path']]
def addparam_interactive(self, c):
return c + ['--interactive=%s' % self.params['interactive']]
def addparam_ip(self, c):
return c + ['--ip', self.params['ip']]
def addparam_ipc(self, c):
return c + ['--ipc', self.params['ipc']]
def addparam_kernel_memory(self, c):
return c + ['--kernel-memory', self.params['kernel_memory']]
def addparam_label(self, c):
for label in self.params['label'].items():
c += ['--label', b'='.join([to_bytes(l, errors='surrogate_or_strict')
for l in label])]
return c
def addparam_label_file(self, c):
return c + ['--label-file', self.params['label_file']]
def addparam_log_driver(self, c):
return c + ['--log-driver', self.params['log_driver']]
def addparam_log_opt(self, c):
return c + ['--log-opt', self.params['log_opt']]
def addparam_memory(self, c):
return c + ['--memory', self.params['memory']]
def addparam_memory_reservation(self, c):
return c + ['--memory-reservation', self.params['memory_reservation']]
def addparam_memory_swap(self, c):
return c + ['--memory-swap', self.params['memory_swap']]
def addparam_memory_swappiness(self, c):
return c + ['--memory-swappiness', self.params['memory_swappiness']]
def addparam_mount(self, c):
return c + ['--mount', self.params['mount']]
def addparam_network(self, c):
return c + ['--network', ",".join(self.params['network'])]
def addparam_no_hosts(self, c):
return c + ['--no-hosts=%s' % self.params['no_hosts']]
def addparam_oom_kill_disable(self, c):
return c + ['--oom-kill-disable=%s' % self.params['oom_kill_disable']]
def addparam_oom_score_adj(self, c):
return c + ['--oom-score-adj', self.params['oom_score_adj']]
def addparam_pid(self, c):
return c + ['--pid', self.params['pid']]
def addparam_pids_limit(self, c):
return c + ['--pids-limit', self.params['pids_limit']]
def addparam_pod(self, c):
return c + ['--pod', self.params['pod']]
def addparam_privileged(self, c):
return c + ['--privileged=%s' % self.params['privileged']]
def addparam_publish(self, c):
for pub in self.params['publish']:
c += ['--publish', pub]
return c
def addparam_publish_all(self, c):
return c + ['--publish-all=%s' % self.params['publish_all']]
def addparam_read_only(self, c):
return c + ['--read-only=%s' % self.params['read_only']]
def addparam_read_only_tmpfs(self, c):
return c + ['--read-only-tmpfs=%s' % self.params['read_only_tmpfs']]
def addparam_restart_policy(self, c):
return c + ['--restart=%s' % self.params['restart_policy']]
def addparam_rm(self, c):
if self.params['rm']:
c += ['--rm']
return c
def addparam_rootfs(self, c):
return c + ['--rootfs=%s' % self.params['rootfs']]
def addparam_security_opt(self, c):
for secopt in self.params['security_opt']:
c += ['--security-opt', secopt]
return c
def addparam_shm_size(self, c):
return c + ['--shm-size', self.params['shm_size']]
def addparam_sig_proxy(self, c):
return c + ['--sig-proxy=%s' % self.params['sig_proxy']]
def addparam_stop_signal(self, c):
return c + ['--stop-signal', self.params['stop_signal']]
def addparam_stop_timeout(self, c):
return c + ['--stop-timeout', self.params['stop_timeout']]
def addparam_subgidname(self, c):
return c + ['--subgidname', self.params['subgidname']]
def addparam_subuidname(self, c):
return c + ['--subuidname', self.params['subuidname']]
def addparam_sysctl(self, c):
for sysctl in self.params['sysctl'].items():
c += ['--sysctl',
b"=".join([to_bytes(k, errors='surrogate_or_strict')
for k in sysctl])]
return c
def addparam_systemd(self, c):
return c + ['--systemd=%s' % self.params['systemd']]
def addparam_tmpfs(self, c):
for tmpfs in self.params['tmpfs'].items():
c += ['--tmpfs', ':'.join(tmpfs)]
return c
def addparam_tty(self, c):
return c + ['--tty=%s' % self.params['tty']]
def addparam_uidmap(self, c):
for uidmap in self.params['uidmap']:
c += ['--uidmap', uidmap]
return c
def addparam_ulimit(self, c):
for u in self.params['ulimit']:
c += ['--ulimit', u]
return c
def addparam_user(self, c):
return c + ['--user', self.params['user']]
def addparam_userns(self, c):
return c + ['--userns', self.params['userns']]
def addparam_uts(self, c):
return c + ['--uts', self.params['uts']]
def addparam_volume(self, c):
for vol in self.params['volume']:
if vol:
c += ['--volume', vol]
return c
def addparam_volumes_from(self, c):
for vol in self.params['volumes_from']:
c += ['--volumes-from', vol]
return c
def addparam_workdir(self, c):
return c + ['--workdir', self.params['workdir']]
# Add your own args for podman command
def addparam_cmd_args(self, c):
return c + self.params['cmd_args']
class PodmanDefaults:
def __init__(self, module, podman_version):
self.module = module
self.version = podman_version
self.defaults = {
"blkio_weight": 0,
"cgroups": "default",
"cgroup_parent": "",
"cidfile": "",
"cpus": 0.0,
"cpu_shares": 0,
"cpu_quota": 0,
"cpu_period": 0,
"cpu_rt_runtime": 0,
"cpu_rt_period": 0,
"cpuset_cpus": "",
"cpuset_mems": "",
"detach": True,
"device": [],
"env_host": False,
"etc_hosts": {},
"group_add": [],
"healthcheck": "",
"ipc": "",
"kernelmemory": "0",
"log_driver": "k8s-file",
"memory": "0",
"memory_swap": "0",
"memory_reservation": "0",
# "memory_swappiness": -1,
"no_hosts": False,
# libpod issue with networks in inspection
"network": ["default"],
"oom_score_adj": 0,
"pid": "",
"privileged": False,
"rm": False,
"security_opt": [],
"stop_signal": 15,
"tty": False,
"user": "",
"uts": "",
"volume": [],
"workdir": "/",
}
def default_dict(self):
# make here any changes to self.defaults related to podman version
return self.defaults
class PodmanContainerDiff:
def __init__(self, module, info, podman_version):
self.module = module
self.version = podman_version
self.default_dict = None
self.info = yaml.safe_load(json.dumps(info).lower())
self.params = self.defaultize()
self.diff = {'before': {}, 'after': {}}
self.non_idempotent = {
'env_file',
'env_host',
"ulimit", # Defaults depend on user and platform, impossible to guess
}
def defaultize(self):
params_with_defaults = {}
self.default_dict = PodmanDefaults(
self.module, self.version).default_dict()
for p in self.module.params:
if self.module.params[p] is None and p in self.default_dict:
params_with_defaults[p] = self.default_dict[p]
else:
params_with_defaults[p] = self.module.params[p]
return params_with_defaults
def _diff_update_and_compare(self, param_name, before, after):
if before != after:
self.diff['before'].update({param_name: before})
self.diff['after'].update({param_name: after})
return True
return False
def diffparam_annotation(self):
before = self.info['config']['annotations'] or {}
after = before.copy()
if self.module.params['annotation'] is not None:
after.update(self.params['annotation'])
return self._diff_update_and_compare('annotation', before, after)
def diffparam_env_host(self):
# It's impossible to get from inspest, recreate it if not default
before = False
after = self.params['env_host']
return self._diff_update_and_compare('env_host', before, after)
def diffparam_blkio_weight(self):
before = self.info['hostconfig']['blkioweight']
after = self.params['blkio_weight']
return self._diff_update_and_compare('blkio_weight', before, after)
def diffparam_blkio_weight_device(self):
before = self.info['hostconfig']['blkioweightdevice']
if before == [] and self.module.params['blkio_weight_device'] is None:
after = []
else:
after = self.params['blkio_weight_device']
return self._diff_update_and_compare('blkio_weight_device', before, after)
def diffparam_cap_add(self):
before = self.info['effectivecaps'] or []
after = []
if self.module.params['cap_add'] is not None:
after += ["cap_" + i.lower()
for i in self.module.params['cap_add']]
after += before
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('cap_add', before, after)
def diffparam_cap_drop(self):
before = self.info['effectivecaps'] or []
after = before[:]
if self.module.params['cap_drop'] is not None:
for c in ["cap_" + i.lower() for i in self.module.params['cap_drop']]:
if c in after:
after.remove(c)
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('cap_drop', before, after)
def diffparam_cgroup_parent(self):
before = self.info['hostconfig']['cgroupparent']
after = self.params['cgroup_parent']
return self._diff_update_and_compare('cgroup_parent', before, after)
def diffparam_cgroups(self):
# Cgroups output is not supported in all versions
if 'cgroups' in self.info['hostconfig']:
before = self.info['hostconfig']['cgroups']
after = self.params['cgroups']
return self._diff_update_and_compare('cgroups', before, after)
return False
def diffparam_cidfile(self):
before = self.info['hostconfig']['containeridfile']
after = self.params['cidfile']
return self._diff_update_and_compare('cidfile', before, after)
def diffparam_command(self):
# TODO(sshnaidm): to inspect image to get the default command
if self.module.params['command'] is not None:
before = self.info['config']['cmd']
after = self.params['command']
if isinstance(after, str):
after = [i.lower() for i in after.split()]
elif isinstance(after, list):
after = [i.lower() for i in after]
return self._diff_update_and_compare('command', before, after)
return False
def diffparam_conmon_pidfile(self):
before = self.info['conmonpidfile']
if self.module.params['conmon_pidfile'] is None:
after = before
else:
after = self.params['conmon_pidfile']
return self._diff_update_and_compare('conmon_pidfile', before, after)
def diffparam_cpu_period(self):
before = self.info['hostconfig']['cpuperiod']
after = self.params['cpu_period']
return self._diff_update_and_compare('cpu_period', before, after)
def diffparam_cpu_rt_period(self):
before = self.info['hostconfig']['cpurealtimeperiod']
after = self.params['cpu_rt_period']
return self._diff_update_and_compare('cpu_rt_period', before, after)
def diffparam_cpu_rt_runtime(self):
before = self.info['hostconfig']['cpurealtimeruntime']
after = self.params['cpu_rt_runtime']
return self._diff_update_and_compare('cpu_rt_runtime', before, after)
def diffparam_cpu_shares(self):
before = self.info['hostconfig']['cpushares']
after = self.params['cpu_shares']
return self._diff_update_and_compare('cpu_shares', before, after)
def diffparam_cpus(self):
before = int(self.info['hostconfig']['nanocpus']) / 1000000000
after = self.params['cpus']
return self._diff_update_and_compare('cpus', before, after)
def diffparam_cpuset_cpus(self):
before = self.info['hostconfig']['cpusetcpus']
after = self.params['cpuset_cpus']
return self._diff_update_and_compare('cpuset_cpus', before, after)
def diffparam_cpuset_mems(self):
before = self.info['hostconfig']['cpusetmems']
after = self.params['cpuset_mems']
return self._diff_update_and_compare('cpuset_mems', before, after)
def diffparam_device(self):
before = [":".join([i['pathonhost'], i['pathincontainer']])
for i in self.info['hostconfig']['devices']]
after = [":".join(i.split(":")[:2]) for i in self.params['device']]
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('devices', before, after)
def diffparam_device_read_bps(self):
before = self.info['hostconfig']['blkiodevicereadbps'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_read_bps'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_read_bps', before, after)
def diffparam_device_read_iops(self):
before = self.info['hostconfig']['blkiodevicereadiops'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_read_iops'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_read_iops', before, after)
def diffparam_device_write_bps(self):
before = self.info['hostconfig']['blkiodevicewritebps'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_write_bps'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_write_bps', before, after)
def diffparam_device_write_iops(self):
before = self.info['hostconfig']['blkiodevicewriteiops'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_write_iops'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_write_iops', before, after)
# Limited idempotency, it can't guess default values
def diffparam_env(self):
env_before = self.info['config']['env'] or {}
before = {i.split("=")[0]: i.split("=")[1] for i in env_before}
after = before.copy()
if self.params['env']:
after.update({
str(k).lower(): str(v).lower()
for k, v in self.params['env'].items()
})
return self._diff_update_and_compare('env', before, after)
def diffparam_etc_hosts(self):
if self.info['hostconfig']['extrahosts']:
before = dict([i.split(":") for i in self.info['hostconfig']['extrahosts']])
else:
before = {}
after = self.params['etc_hosts']
return self._diff_update_and_compare('etc_hosts', before, after)
def diffparam_group_add(self):
before = self.info['hostconfig']['groupadd']
after = self.params['group_add']
return self._diff_update_and_compare('group_add', before, after)
# Healthcheck is only defined in container config if a healthcheck
# was configured; otherwise the config key isn't part of the config.
def diffparam_healthcheck(self):
if 'healthcheck' in self.info['config']:
# the "test" key is a list of 2 items where the first one is
# "CMD-SHELL" and the second one is the actual healthcheck command.
before = self.info['config']['healthcheck']['test'][1]
else:
before = ''
after = self.params['healthcheck'] or before
return self._diff_update_and_compare('healthcheck', before, after)
# Because of hostname is random generated, this parameter has partial idempotency only.
def diffparam_hostname(self):
before = self.info['config']['hostname']
after = self.params['hostname'] or before
return self._diff_update_and_compare('hostname', before, after)
def diffparam_image(self):
# TODO(sshnaidm): for strict image compare mode use SHAs
before = self.info['config']['image']
after = self.params['image']
mode = self.params['image_strict']
if mode is None or not mode:
# In a idempotency 'lite mode' assume all images from different registries are the same
before = before.replace(":latest", "")
after = after.replace(":latest", "")
before = before.split("/")[-1]
after = after.split("/")[-1]
return self._diff_update_and_compare('image', before, after)
def diffparam_ipc(self):
before = self.info['hostconfig']['ipcmode']
after = self.params['ipc']
return self._diff_update_and_compare('ipc', before, after)
def diffparam_label(self):
before = self.info['config']['labels'] or {}
after = before.copy()
if self.params['label']:
after.update({
str(k).lower(): str(v).lower()
for k, v in self.params['label'].items()
})
return self._diff_update_and_compare('label', before, after)
def diffparam_log_driver(self):
before = self.info['hostconfig']['logconfig']['type']
after = self.params['log_driver']
return self._diff_update_and_compare('log_driver', before, after)
# Parameter has limited idempotency, unable to guess the default log_path
def diffparam_log_opt(self):
before = self.info['logpath']
if self.module.params['log_opt'] in [None, '']:
after = before
else:
after = self.params['log_opt'].split("=")[1]
return self._diff_update_and_compare('log_opt', before, after)
def diffparam_memory(self):
before = str(self.info['hostconfig']['memory'])
after = self.params['memory']
return self._diff_update_and_compare('memory', before, after)
def diffparam_memory_swap(self):
# By default it's twice memory parameter
before = str(self.info['hostconfig']['memoryswap'])
after = self.params['memory_swap']
if (self.module.params['memory_swap'] is None
and self.params['memory'] != 0
and self.params['memory'].isdigit()):
after = str(int(self.params['memory']) * 2)
return self._diff_update_and_compare('memory_swap', before, after)
def diffparam_memory_reservation(self):
before = str(self.info['hostconfig']['memoryreservation'])
after = self.params['memory_reservation']
return self._diff_update_and_compare('memory_reservation', before, after)
def diffparam_network(self):
before = [self.info['hostconfig']['networkmode']]
after = self.params['network']
return self._diff_update_and_compare('network', before, after)
def diffparam_no_hosts(self):
before = not bool(self.info['hostspath'])
after = self.params['no_hosts']
if self.params['network'] == ['none']:
after = True
return self._diff_update_and_compare('no_hosts', before, after)
def diffparam_oom_score_adj(self):
before = self.info['hostconfig']['oomscoreadj']
after = self.params['oom_score_adj']
return self._diff_update_and_compare('oom_score_adj', before, after)
def diffparam_privileged(self):
before = self.info['hostconfig']['privileged']
after = self.params['privileged']
return self._diff_update_and_compare('privileged', before, after)
def diffparam_pid(self):
before = self.info['hostconfig']['pidmode']
after = self.params['pid']
return self._diff_update_and_compare('pid', before, after)
def diffparam_rm(self):
before = self.info['hostconfig']['autoremove']
after = self.params['rm']
return self._diff_update_and_compare('rm', before, after)
def diffparam_security_opt(self):
before = self.info['hostconfig']['securityopt']
after = self.params['security_opt']
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('security_opt', before, after)
def diffparam_stop_signal(self):
before = self.info['config']['stopsignal']
after = self.params['stop_signal']
return self._diff_update_and_compare('stop_signal', before, after)
def diffparam_tty(self):
before = self.info['config']['tty']
after = self.params['tty']
return self._diff_update_and_compare('tty', before, after)
def diffparam_user(self):
before = self.info['config']['user']
if self.module.params['user'] is None and before:
after = before
else:
after = self.params['user']
return self._diff_update_and_compare('user', before, after)
def diffparam_uts(self):
before = self.info['hostconfig']['utsmode']
after = self.params['uts']
return self._diff_update_and_compare('uts', before, after)
def diffparam_volume(self):
before = self.info['mounts']
if before:
volumes = []
for m in before:
if m['type'] == 'volume':
volumes.append([m['name'], m['destination']])
else:
volumes.append([m['source'], m['destination']])
before = [":".join(v) for v in volumes]
# Ignore volumes option for idempotency
after = [":".join(v.split(":")[:2]) for v in self.params['volume']]
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('volume', before, after)
def diffparam_volumes_from(self):
before = self.info['hostconfig']['volumesfrom'] or []
after = self.params['volumes_from'] or []
return self._diff_update_and_compare('volumes_from', before, after)
def diffparam_workdir(self):
before = self.info['config']['workingdir']
after = self.params['workdir']
return self._diff_update_and_compare('workdir', before, after)
def is_different(self):
diff_func_list = [func for func in dir(self)
if callable(getattr(self, func)) and func.startswith(
"diffparam")]
fail_fast = not bool(self.module._diff)
different = False
for func_name in diff_func_list:
dff_func = getattr(self, func_name)
if dff_func():
if fail_fast:
return True
else:
different = True
# Check non idempotent parameters
for p in self.non_idempotent:
if self.module.params[p] is not None and self.module.params[p] not in [{}, [], '']:
different = True
return different
def ensure_image_exists(module, image):
"""If image is passed, ensure it exists, if not - pull it or fail.
Arguments:
module {obj} -- ansible module object
image {str} -- name of image
Returns:
list -- list of image actions - if it pulled or nothing was done
"""
image_actions = []
module_exec = module.params['executable']
if not image:
return image_actions
rc, out, err = module.run_command([module_exec, 'image', 'exists', image])
if rc == 0:
return image_actions
rc, out, err = module.run_command([module_exec, 'image', 'pull', image])
if rc != 0:
module.fail_json(msg="Can't pull image %s" % image, stdout=out,
stderr=err)
image_actions.append("pulled image %s" % image)
return image_actions
class PodmanContainer:
"""Perform container tasks.
Manages podman container, inspects it and checks its current state
"""
def __init__(self, module, name):
"""Initialize PodmanContainer class.
Arguments:
module {obj} -- ansible module object
name {str} -- name of container
"""
super(PodmanContainer, self).__init__()
self.module = module
self.name = name
self.stdout, self.stderr = '', ''
self.info = self.get_info()
self.version = self._get_podman_version()
self.diff = {}
self.actions = []
@property
def exists(self):
"""Check if container exists."""
return bool(self.info != {})
@property
def different(self):
"""Check if container is different."""
diffcheck = PodmanContainerDiff(self.module, self.info, self.version)
is_different = diffcheck.is_different()
diffs = diffcheck.diff
if self.module._diff and is_different and diffs['before'] and diffs['after']:
self.diff['before'] = "\n".join(
["%s - %s" % (k, v) for k, v in sorted(
diffs['before'].items())]) + "\n"
self.diff['after'] = "\n".join(
["%s - %s" % (k, v) for k, v in sorted(
diffs['after'].items())]) + "\n"
return is_different
@property
def running(self):
"""Return True if container is running now."""
return self.exists and self.info['State']['Running']
@property
def stopped(self):
"""Return True if container exists and is not running now."""
return self.exists and not self.info['State']['Running']
def get_info(self):
"""Inspect container and gather info about it."""
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'container', b'inspect', self.name])
return json.loads(out)[0] if rc == 0 else {}
def _get_podman_version(self):
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'--version'])
if rc != 0 or not out or "version" not in out:
self.module.fail_json(msg="%s run failed!" % self.module.params['executable'])
return out.split("version")[1].strip()
def _perform_action(self, action):
"""Perform action with container.
Arguments:
action {str} -- action to perform - start, create, stop, run,
delete
"""
b_command = PodmanModuleParams(action,
self.module.params,
self.version,
self.module,
).construct_command_from_params()
full_cmd = " ".join([self.module.params['executable']]
+ [to_native(i) for i in b_command])
self.module.log("PODMAN-CONTAINER-DEBUG: %s" % full_cmd)
self.actions.append(full_cmd)
if not self.module.check_mode:
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'container'] + b_command,
expand_user_and_vars=False)
self.stdout = out
self.stderr = err
if rc != 0:
self.module.fail_json(
msg="Can't %s container %s" % (action, self.name),
stdout=out, stderr=err)
def run(self):
"""Run the container."""
self._perform_action('run')
def delete(self):
"""Delete the container."""
self._perform_action('delete')
def stop(self):
"""Stop the container."""
self._perform_action('stop')
def start(self):
"""Start the container."""
self._perform_action('start')
def create(self):
"""Create the container."""
self._perform_action('create')
def recreate(self):
"""Recreate the container."""
self.delete()
self.run()
def restart(self):
"""Restart the container."""
self.stop()
self.run()
class PodmanManager:
"""Module manager class.
Defines according to parameters what actions should be applied to container
"""
def __init__(self, module):
"""Initialize PodmanManager class.
Arguments:
module {obj} -- ansible module object
"""
super(PodmanManager, self).__init__()
self.module = module
self.results = {
'changed': False,
'actions': [],
'container': {},
}
self.name = self.module.params['name']
self.executable = \
self.module.get_bin_path(self.module.params['executable'],
required=True)
self.image = self.module.params['image']
image_actions = ensure_image_exists(self.module, self.image)
self.results['actions'] += image_actions
self.state = self.module.params['state']
self.restart = self.module.params['force_restart']
self.recreate = self.module.params['recreate']
self.container = PodmanContainer(self.module, self.name)
def update_container_result(self, changed=True):
"""Inspect the current container, update results with last info, exit.
Keyword Arguments:
changed {bool} -- whether any action was performed
(default: {True})
"""
facts = self.container.get_info() if changed else self.container.info
out, err = self.container.stdout, self.container.stderr
self.results.update({'changed': changed, 'container': facts,
'podman_actions': self.container.actions},
stdout=out, stderr=err)
if self.container.diff:
self.results.update({'diff': self.container.diff})
if self.module.params['debug']:
self.results.update({'podman_version': self.container.version})
self.module.exit_json(**self.results)
def make_started(self):
"""Run actions if desired state is 'started'."""
if self.container.running and \
(self.container.different or self.recreate):
self.container.recreate()
self.results['actions'].append('recreated %s' %
self.container.name)
self.update_container_result()
elif self.container.running and not self.container.different:
if self.restart:
self.container.restart()
self.results['actions'].append('restarted %s' %
self.container.name)
self.update_container_result()
self.update_container_result(changed=False)
elif not self.container.exists:
self.container.run()
self.results['actions'].append('started %s' % self.container.name)
self.update_container_result()
elif self.container.stopped and self.container.different:
self.container.recreate()
self.results['actions'].append('recreated %s' %
self.container.name)
self.update_container_result()
elif self.container.stopped and not self.container.different:
self.container.start()
self.results['actions'].append('started %s' % self.container.name)
self.update_container_result()
def make_stopped(self):
"""Run actions if desired state is 'stopped'."""
if not self.container.exists and not self.image:
self.module.fail_json(msg='Cannot create container when image'
' is not specified!')
if not self.container.exists:
self.container.create()
self.results['actions'].append('created %s' % self.container.name)
self.update_container_result()
if self.container.stopped:
self.update_container_result(changed=False)
elif self.container.running:
self.container.stop()
self.results['actions'].append('stopped %s' % self.container.name)
self.update_container_result()
def make_absent(self):
"""Run actions if desired state is 'absent'."""
if not self.container.exists:
self.results.update({'changed': False})
elif self.container.exists:
self.container.delete()
self.results['actions'].append('deleted %s' % self.container.name)
self.results.update({'changed': True})
self.results.update({'container': {},
'podman_actions': self.container.actions})
self.module.exit_json(**self.results)
def execute(self):
"""Execute the desired action according to map of actions & states."""
states_map = {
'present': self.make_started,
'started': self.make_started,
'absent': self.make_absent,
'stopped': self.make_stopped
}
process_action = states_map[self.state]
process_action()
self.module.fail_json(msg="Unexpected logic error happened, "
"please contact maintainers ASAP!")
def main():
module = AnsibleModule(
argument_spec=yaml.safe_load(DOCUMENTATION)['options'],
mutually_exclusive=(
['no_hosts', 'etc_hosts'],
),
supports_check_mode=True,
)
# work on input vars
if module.params['state'] in ['started', 'present'] and \
not module.params['image']:
module.fail_json(msg="State '%s' required image to be configured!" %
module.params['state'])
PodmanManager(module).execute()
if __name__ == '__main__':
main()
| <filename>tripleo_ansible/ansible_plugins/modules/podman_container.py
#!/usr/bin/python
# Copyright (c) 2019 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# flake8: noqa: E501
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
from distutils.version import LooseVersion
import yaml
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
module: podman_container
author:
- "<NAME> (@sshnaidm)"
version_added: '2.9'
short_description: Manage podman containers
notes: []
description:
- Start, stop, restart and manage Podman containers
requirements:
- "Podman installed on host"
options:
name:
description:
- Name of the container
required: True
type: str
executable:
description:
- Path to C(podman) executable if it is not in the C($PATH) on the
machine running C(podman)
default: 'podman'
type: str
state:
description:
- I(absent) - A container matching the specified name will be stopped and
removed.
- I(present) - Asserts the existence of a container matching the name and
any provided configuration parameters. If no container matches the
name, a container will be created. If a container matches the name but
the provided configuration does not match, the container will be
updated, if it can be. If it cannot be updated, it will be removed and
re-created with the requested config. Image version will be taken into
account when comparing configuration. Use the recreate option to force
the re-creation of the matching container.
- I(started) - Asserts there is a running container matching the name and
any provided configuration. If no container matches the name, a
container will be created and started. Use recreate to always re-create
a matching container, even if it is running. Use force_restart to force
a matching container to be stopped and restarted.
- I(stopped) - Asserts that the container is first I(present), and then
if the container is running moves it to a stopped state.
type: str
default: started
choices:
- absent
- present
- stopped
- started
image:
description:
- Repository path (or image name) and tag used to create the container.
If an image is not found, the image will be pulled from the registry.
If no tag is included, C(latest) will be used.
- Can also be an image ID. If this is the case, the image is assumed to
be available locally.
type: str
annotation:
description:
- Add an annotation to the container. The format is key value, multiple
times.
type: dict
authfile:
description:
- Path of the authentication file. Default is
``${XDG_RUNTIME_DIR}/containers/auth.json``
(Not available for remote commands) You can also override the default
path of the authentication file by setting the ``REGISTRY_AUTH_FILE``
environment variable. ``export REGISTRY_AUTH_FILE=path``
type: path
blkio_weight:
description:
- Block IO weight (relative weight) accepts a weight value between 10 and
1000
type: int
blkio_weight_device:
description:
- Block IO weight (relative device weight, format DEVICE_NAME[:]WEIGHT).
type: dict
cap_add:
description:
- List of capabilities to add to the container.
type: list
elements: str
cap_drop:
description:
- List of capabilities to drop from the container.
type: list
elements: str
cgroup_parent:
description:
- Path to cgroups under which the cgroup for the container will be
created.
If the path is not absolute, the path is considered to be relative to
the cgroups path of the init process. Cgroups will be created if they
do not already exist.
type: path
cgroupns:
description:
- Path to cgroups under which the cgroup for the container will be
created.
type: str
cgroups:
description:
- Determines whether the container will create CGroups.
Valid values are enabled and disabled, which the default being enabled.
The disabled option will force the container to not create CGroups,
and thus conflicts with CGroup options cgroupns and cgroup-parent.
type: str
choices:
- default
- disabled
cidfile:
description:
- Write the container ID to the file
type: path
cmd_args:
description:
- Any additionl command options you want to pass to podman command,
cmd_args - ['--other-param', 'value']
Be aware module doesn't support idempotency if this is set.
type: list
elements: str
conmon_pidfile:
description:
- Write the pid of the conmon process to a file.
conmon runs in a separate process than Podman,
so this is necessary when using systemd to restart Podman containers.
type: path
command:
description:
- Override command of container. Can be a string or a list.
type: raw
cpu_period:
description:
- Limit the CPU real-time period in microseconds
type: int
cpu_rt_period:
description:
- Limit the CPU real-time period in microseconds.
Limit the container's Real Time CPU usage. This flag tell the kernel to
restrict the container's Real Time CPU usage to the period you specify.
type: int
cpu_rt_runtime:
description:
- Limit the CPU real-time runtime in microseconds.
This flag tells the kernel to limit the amount of time in a given CPU
period Real Time tasks may consume.
type: int
cpu_shares:
description:
- CPU shares (relative weight)
type: int
cpus:
description:
- Number of CPUs. The default is 0.0 which means no limit.
type: str
cpuset_cpus:
description:
- CPUs in which to allow execution (0-3, 0,1)
type: str
cpuset_mems:
description:
- Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only
effective on NUMA systems.
type: str
detach:
description:
- Run container in detach mode
type: bool
default: True
debug:
description:
- Return additional information which can be helpful for investigations.
type: bool
default: False
detach_keys:
description:
- Override the key sequence for detaching a container. Format is a single
character or ctrl-value
type: str
device:
description:
- Add a host device to the container.
The format is <device-on-host>[:<device-on-container>][:<permissions>]
(e.g. device /dev/sdc:/dev/xvdc:rwm)
type: list
elements: str
device_read_bps:
description:
- Limit read rate (bytes per second) from a device
(e.g. device-read-bps /dev/sda:1mb)
type: list
device_read_iops:
description:
- Limit read rate (IO per second) from a device
(e.g. device-read-iops /dev/sda:1000)
type: list
device_write_bps:
description:
- Limit write rate (bytes per second) to a device
(e.g. device-write-bps /dev/sda:1mb)
type: list
device_write_iops:
description:
- Limit write rate (IO per second) to a device
(e.g. device-write-iops /dev/sda:1000)
type: list
dns:
description:
- Set custom DNS servers
type: list
elements: str
dns_option:
description:
- Set custom DNS options
type: str
dns_search:
description:
- Set custom DNS search domains (Use dns_search with '' if you don't wish
to set the search domain)
type: str
entrypoint:
description:
- Overwrite the default ENTRYPOINT of the image
type: str
env:
description:
- Set environment variables.
This option allows you to specify arbitrary environment variables that
are available for the process that will be launched inside of the
container.
type: dict
env_file:
description:
- Read in a line delimited file of environment variables
type: path
env_host:
description:
- Use all current host environment variables in container.
Defaults to false.
type: bool
etc_hosts:
description:
- Dict of host-to-IP mappings, where each host name is a key in the
dictionary. Each host name will be added to the container's
``/etc/hosts`` file.
type: dict
aliases:
- add_hosts
expose:
description:
- Expose a port, or a range of ports (e.g. expose "3300-3310") to set up
port redirection on the host system.
type: list
elements: str
aliases:
- exposed
- exposed_ports
force_restart:
description:
- Force restart of container.
type: bool
default: False
aliases:
- restart
gidmap:
description:
- Run the container in a new user namespace using the supplied mapping.
type: str
group_add:
description:
- Add additional groups to run as
type: list
healthcheck:
description:
- Set or alter a healthcheck command for a container.
type: str
healthcheck_interval:
description:
- Set an interval for the healthchecks
(a value of disable results in no automatic timer setup)
(default "30s")
type: str
healthcheck_retries:
description:
- The number of retries allowed before a healthcheck is considered to be
unhealthy. The default value is 3.
type: int
healthcheck_start_period:
description:
- The initialization time needed for a container to bootstrap.
The value can be expressed in time format like 2m3s. The default value
is 0s
type: str
healthcheck_timeout:
description:
- The maximum time allowed to complete the healthcheck before an interval
is considered failed. Like start-period, the value can be expressed in
a time format such as 1m22s. The default value is 30s
type: str
hostname:
description:
- Container host name. Sets the container host name that is available
inside the container.
type: str
http_proxy:
description:
- By default proxy environment variables are passed into the container if
set for the podman process. This can be disabled by setting the
http_proxy option to false. The environment variables passed in
include http_proxy, https_proxy, ftp_proxy, no_proxy, and also the
upper case versions of those.
Defaults to true
type: bool
image_volume:
description:
- Tells podman how to handle the builtin image volumes.
The options are bind, tmpfs, or ignore (default bind)
type: str
choices:
- 'bind'
- 'tmpfs'
- 'ignore'
image_strict:
description:
- Whether to compare images in idempotency by taking into account a full
name with registry and namespaces.
type: bool
default: False
init:
description:
- Run an init inside the container that forwards signals and reaps
processes.
type: str
init_path:
description:
- Path to the container-init binary.
type: str
interactive:
description:
- Keep STDIN open even if not attached. The default is false.
When set to true, keep stdin open even if not attached.
The default is false.
type: bool
ip:
description:
- Specify a static IP address for the container, for example
'10.88.64.128'.
Can only be used if no additional CNI networks to join were specified
via 'network:', and if the container is not joining another container's
network namespace via 'network container:<name|id>'.
The address must be within the default CNI network's pool
(default 10.88.0.0/16).
type: str
ipc:
description:
- Default is to create a private IPC namespace (POSIX SysV IPC) for the
container
type: str
kernel_memory:
description:
- Kernel memory limit
(format <number>[<unit>], where unit = b, k, m or g)
Note - idempotency is supported for integers only.
type: str
label:
description:
- Add metadata to a container, pass dictionary of label names and values
type: dict
label_file:
description:
- Read in a line delimited file of labels
type: str
log_driver:
description:
- Logging driver. Used to set the log driver for the container.
For example log_driver "k8s-file".
type: str
choices:
- k8s-file
- journald
- json-file
log_opt:
description:
- Logging driver specific options. Used to set the path to the container
log file. For example log_opt
"path=/var/log/container/mycontainer.json"
type: str
aliases:
- log_options
memory:
description:
- Memory limit (format 10k, where unit = b, k, m or g)
Note - idempotency is supported for integers only.
type: str
memory_reservation:
description:
- Memory soft limit (format 100m, where unit = b, k, m or g)
Note - idempotency is supported for integers only.
type: str
memory_swap:
description:
- A limit value equal to memory plus swap. Must be used with the -m
(--memory) flag.
The swap LIMIT should always be larger than -m (--memory) value.
By default, the swap LIMIT will be set to double the value of --memory
Note - idempotency is supported for integers only.
type: str
memory_swappiness:
description:
- Tune a container's memory swappiness behavior. Accepts an integer
between 0 and 100.
type: int
mount:
description:
- Attach a filesystem mount to the container. bind or tmpfs
For example mount
"type=bind,source=/path/on/host,destination=/path/in/container"
type: str
network:
description:
- Set the Network mode for the container
* bridge create a network stack on the default bridge
* none no networking
* container:<name|id> reuse another container's network stack
* host use the podman host network stack.
* <network-name>|<network-id> connect to a user-defined network
* ns:<path> path to a network namespace to join
* slirp4netns use slirp4netns to create a user network stack.
This is the default for rootless containers
type: list
elements: str
aliases:
- net
no_hosts:
description:
- Do not create /etc/hosts for the container
Default is false.
type: bool
oom_kill_disable:
description:
- Whether to disable OOM Killer for the container or not.
Default is false.
type: bool
oom_score_adj:
description:
- Tune the host's OOM preferences for containers (accepts -1000 to 1000)
type: int
pid:
description:
- Set the PID mode for the container
type: str
pids_limit:
description:
- Tune the container's pids limit. Set -1 to have unlimited pids for the
container.
type: str
pod:
description:
- Run container in an existing pod.
If you want podman to make the pod for you, preference the pod name
with "new:"
type: str
privileged:
description:
- Give extended privileges to this container. The default is false.
type: bool
publish:
description:
- Publish a container's port, or range of ports, to the host.
Format - ip:hostPort:containerPort | ip::containerPort |
hostPort:containerPort | containerPort
type: list
elements: str
aliases:
- ports
- published
- published_ports
publish_all:
description:
- Publish all exposed ports to random ports on the host interfaces. The
default is false.
type: bool
read_only:
description:
- Mount the container's root filesystem as read only. Default is false
type: bool
read_only_tmpfs:
description:
- If container is running in --read-only mode, then mount a read-write
tmpfs on /run, /tmp, and /var/tmp. The default is true
type: bool
recreate:
description:
- Use with present and started states to force the re-creation of an
existing container.
type: bool
default: False
restart_policy:
description:
- Restart policy to follow when containers exit.
Restart policy will not take effect if a container is stopped via the
podman kill or podman stop commands. Valid values are
* no - Do not restart containers on exit
* on-failure[:max_retries] - Restart containers when they exit with a
non-0 exit code, retrying indefinitely
or until the optional max_retries count is hit
* always - Restart containers when they exit, regardless of status,
retrying indefinitely
type: str
rm:
description:
- Automatically remove the container when it exits. The default is false.
type: bool
aliases:
- remove
rootfs:
description:
- If true, the first argument refers to an exploded container on the file
system. The dafault is false.
type: bool
security_opt:
description:
- Security Options. For example security_opt "seccomp=unconfined"
type: list
elements: str
shm_size:
description:
- Size of /dev/shm. The format is <number><unit>. number must be greater
than 0.
Unit is optional and can be b (bytes), k (kilobytes), m(megabytes), or
g (gigabytes).
If you omit the unit, the system uses bytes. If you omit the size
entirely, the system uses 64m
type: str
sig_proxy:
description:
- Proxy signals sent to the podman run command to the container process.
SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is true.
type: bool
stop_signal:
description:
- Signal to stop a container. Default is SIGTERM.
type: int
stop_timeout:
description:
- Timeout (in seconds) to stop a container. Default is 10.
type: int
subgidname:
description:
- Run the container in a new user namespace using the map with 'name' in
the /etc/subgid file.
type: str
subuidname:
description:
- Run the container in a new user namespace using the map with 'name' in
the /etc/subuid file.
type: str
sysctl:
description:
- Configure namespaced kernel parameters at runtime
type: dict
systemd:
description:
- Run container in systemd mode. The default is true.
type: bool
tmpfs:
description:
- Create a tmpfs mount. For example tmpfs
"/tmp" "rw,size=787448k,mode=1777"
type: dict
tty:
description:
- Allocate a pseudo-TTY. The default is false.
type: bool
uidmap:
description:
- Run the container in a new user namespace using the supplied mapping.
type: list
ulimit:
description:
- Ulimit options
type: list
user:
description:
- Sets the username or UID used and optionally the groupname or GID for
the specified command.
type: str
userns:
description:
- Set the user namespace mode for the container.
It defaults to the PODMAN_USERNS environment variable.
An empty value means user namespaces are disabled.
type: str
uts:
description:
- Set the UTS mode for the container
type: str
volume:
description:
- Create a bind mount. If you specify, volume /HOST-DIR:/CONTAINER-DIR,
podman bind mounts /HOST-DIR in the host to /CONTAINER-DIR in the
podman container.
type: list
elements: str
aliases:
- volumes
volumes_from:
description:
- Mount volumes from the specified container(s).
type: list
elements: str
workdir:
description:
- Working directory inside the container.
The default working directory for running binaries within a container
is the root directory (/).
type: str
"""
EXAMPLES = """
- name: Run container
podman_container:
name: container
image: quay.io/bitnami/wildfly
state: started
- name: Create a data container
podman_container:
name: mydata
image: busybox
volume:
- /tmp/data
- name: Re-create a redis container
podman_container:
name: myredis
image: redis
command: redis-server --appendonly yes
state: present
recreate: yes
expose:
- 6379
volumes_from:
- mydata
- name: Restart a container
podman_container:
name: myapplication
image: redis
state: started
restart: yes
etc_hosts:
other: "127.0.0.1"
restart_policy: "no"
device: "/dev/sda:/dev/xvda:rwm"
ports:
- "8080:9000"
- "127.0.0.1:8081:9001/udp"
env:
SECRET_KEY: "ssssh"
BOOLEAN_KEY: "yes"
- name: Container present
podman_container:
name: mycontainer
state: present
image: ubuntu:14.04
command: "sleep 1d"
- name: Stop a container
podman_container:
name: mycontainer
state: stopped
- name: Start 4 load-balanced containers
podman_container:
name: "container{{ item }}"
recreate: yes
image: someuser/anotherappimage
command: sleep 1d
with_sequence: count=4
- name: remove container
podman_container:
name: ohno
state: absent
- name: Writing output
podman_container:
name: myservice
image: busybox
log_options: path=/var/log/container/mycontainer.json
log_driver: k8s-file
"""
RETURN = """
container:
description:
- Facts representing the current state of the container. Matches the
podman inspection output.
- Note that facts are part of the registered vars since Ansible 2.8. For
compatibility reasons, the facts
are also accessible directly as C(podman_container). Note that the
returned fact will be removed in Ansible 2.12.
- Empty if C(state) is I(absent).
returned: always
type: dict
sample: '{
"AppArmorProfile": "",
"Args": [
"sh"
],
"BoundingCaps": [
"CAP_CHOWN",
...
],
"Config": {
"Annotations": {
"io.kubernetes.cri-o.ContainerType": "sandbox",
"io.kubernetes.cri-o.TTY": "false"
},
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"sh"
],
"Domainname": "",
"Entrypoint": "",
"Env": [
"PATH=/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
"HOSTNAME=",
"container=podman"
],
"Hostname": "",
"Image": "docker.io/library/busybox:latest",
"Labels": null,
"OpenStdin": false,
"StdinOnce": false,
"StopSignal": 15,
"Tty": false,
"User": {
"gid": 0,
"uid": 0
},
"Volumes": null,
"WorkingDir": "/"
},
"ConmonPidFile": "...",
"Created": "2019-06-17T19:13:09.873858307+03:00",
"Dependencies": [],
"Driver": "overlay",
"EffectiveCaps": [
"CAP_CHOWN",
...
],
"ExecIDs": [],
"ExitCommand": [
"/usr/bin/podman",
"--root",
...
],
"GraphDriver": {
...
},
"HostConfig": {
...
},
"HostnamePath": "...",
"HostsPath": "...",
"ID": "...",
"Image": "...",
"ImageName": "docker.io/library/busybox:latest",
"IsInfra": false,
"LogPath": "/tmp/container/mycontainer.json",
"MountLabel": "system_u:object_r:container_file_t:s0:c282,c782",
"Mounts": [
...
],
"Name": "myservice",
"Namespace": "",
"NetworkSettings": {
"Bridge": "",
...
},
"Path": "sh",
"ProcessLabel": "system_u:system_r:container_t:s0:c282,c782",
"ResolvConfPath": "...",
"RestartCount": 0,
"Rootfs": "",
"State": {
"Dead": false,
"Error": "",
"ExitCode": 0,
"FinishedAt": "2019-06-17T19:13:10.157518963+03:00",
"Healthcheck": {
"FailingStreak": 0,
"Log": null,
"Status": ""
},
"OOMKilled": false,
"OciVersion": "1.0.1-dev",
"Paused": false,
"Pid": 4083,
"Restarting": false,
"Running": false,
"StartedAt": "2019-06-17T19:13:10.152479729+03:00",
"Status": "exited"
},
"StaticDir": "..."
...
}'
"""
class PodmanModuleParams:
"""Creates list of arguments for podman CLI command.
Arguments:
action {str} -- action type from 'run', 'stop', 'create', 'delete',
'start'
params {dict} -- dictionary of module parameters
"""
def __init__(self, action, params, podman_version, module):
self.params = params
self.action = action
self.podman_version = podman_version
self.module = module
def construct_command_from_params(self):
"""Create a podman command from given module parameters.
Returns:
list -- list of byte strings for Popen command
"""
if self.action in ['start', 'stop', 'delete']:
return self.start_stop_delete()
if self.action in ['create', 'run']:
cmd = [self.action, '--name', self.params['name']]
all_param_methods = [func for func in dir(self)
if callable(getattr(self, func))
and func.startswith("addparam")]
params_set = (i for i in self.params if self.params[i] is not None)
for param in params_set:
func_name = "_".join(["addparam", param])
if func_name in all_param_methods:
cmd = getattr(self, func_name)(cmd)
cmd.append(self.params['image'])
if self.params['command']:
if isinstance(self.params['command'], list):
cmd += self.params['command']
else:
cmd += self.params['command'].split()
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
def start_stop_delete(self):
if self.action in ['stop', 'start']:
cmd = [self.action, self.params['name']]
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
if self.action == 'delete':
cmd = ['rm', '-f', self.params['name']]
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
def check_version(self, param, minv=None, maxv=None):
if minv and LooseVersion(minv) > LooseVersion(
self.podman_version):
self.module.fail_json(msg="Parameter %s is supported from podman "
"version %s only! Current version is %s" % (
param, minv, self.podman_version))
if maxv and LooseVersion(maxv) < LooseVersion(
self.podman_version):
self.module.fail_json(msg="Parameter %s is supported till podman "
"version %s only! Current version is %s" % (
param, minv, self.podman_version))
def addparam_annotation(self, c):
for annotate in self.params['annotation'].items():
c += ['--annotation', '='.join(annotate)]
return c
def addparam_authfile(self, c):
return c + ['--authfile', self.params['authfile']]
def addparam_blkio_weight(self, c):
return c + ['--blkio-weight', self.params['blkio_weight']]
def addparam_blkio_weight_device(self, c):
for blkio in self.params['blkio_weight_device'].items():
c += ['--blkio-weight-device', ':'.join(blkio)]
return c
def addparam_cap_add(self, c):
for cap_add in self.params['cap_add']:
c += ['--cap-add', cap_add]
return c
def addparam_cap_drop(self, c):
for cap_drop in self.params['cap_drop']:
c += ['--cap-drop', cap_drop]
return c
def addparam_cgroups(self, c):
self.check_version('--cgroups', minv='1.6.0')
return c + ['--cgroups=%s' % self.params['cgroups']]
def addparam_cgroupns(self, c):
self.check_version('--cgroupns', minv='1.6.2')
return c + ['--cgroupns=%s' % self.params['cgroupns']]
def addparam_cgroup_parent(self, c):
return c + ['--cgroup-parent', self.params['cgroup_parent']]
def addparam_cidfile(self, c):
return c + ['--cidfile', self.params['cidfile']]
def addparam_conmon_pidfile(self, c):
return c + ['--conmon-pidfile', self.params['conmon_pidfile']]
def addparam_cpu_period(self, c):
return c + ['--cpu-period', self.params['cpu_period']]
def addparam_cpu_rt_period(self, c):
return c + ['--cpu-rt-period', self.params['cpu_rt_period']]
def addparam_cpu_rt_runtime(self, c):
return c + ['--cpu-rt-runtime', self.params['cpu_rt_runtime']]
def addparam_cpu_shares(self, c):
return c + ['--cpu-shares', self.params['cpu_shares']]
def addparam_cpus(self, c):
return c + ['--cpus', self.params['cpus']]
def addparam_cpuset_cpus(self, c):
return c + ['--cpuset-cpus', self.params['cpuset_cpus']]
def addparam_cpuset_mems(self, c):
return c + ['--cpuset-mems', self.params['cpuset_mems']]
def addparam_detach(self, c):
return c + ['--detach=%s' % self.params['detach']]
def addparam_detach_keys(self, c):
return c + ['--detach-keys', self.params['detach_keys']]
def addparam_device(self, c):
for dev in self.params['device']:
c += ['--device', dev]
return c
def addparam_device_read_bps(self, c):
for dev in self.params['device_read_bps']:
c += ['--device-read-bps', dev]
return c
def addparam_device_read_iops(self, c):
for dev in self.params['device_read_iops']:
c += ['--device-read-iops', dev]
return c
def addparam_device_write_bps(self, c):
for dev in self.params['device_write_bps']:
c += ['--device-write-bps', dev]
return c
def addparam_device_write_iops(self, c):
for dev in self.params['device_write_iops']:
c += ['--device-write-iops', dev]
return c
def addparam_dns(self, c):
return c + ['--dns', ','.join(self.params['dns'])]
def addparam_dns_option(self, c):
return c + ['--dns-option', self.params['dns_option']]
def addparam_dns_search(self, c):
return c + ['--dns-search', self.params['dns_search']]
def addparam_entrypoint(self, c):
return c + ['--entrypoint', self.params['entrypoint']]
def addparam_env(self, c):
for env_value in self.params['env'].items():
c += ['--env',
b"=".join([to_bytes(k, errors='surrogate_or_strict')
for k in env_value])]
return c
def addparam_env_file(self, c):
return c + ['--env-file', self.params['env_file']]
def addparam_env_host(self, c):
self.check_version('--env-host', minv='1.5.0')
return c + ['--env-host=%s' % self.params['env_host']]
def addparam_etc_hosts(self, c):
for host_ip in self.params['etc_hosts'].items():
c += ['--add-host', ':'.join(host_ip)]
return c
def addparam_expose(self, c):
for exp in self.params['expose']:
c += ['--expose', exp]
return c
def addparam_gidmap(self, c):
return c + ['--gidmap', self.params['gidmap']]
def addparam_group_add(self, c):
for g in self.params['group_add']:
c += ['--group-add', g]
return c
def addparam_healthcheck(self, c):
return c + ['--healthcheck-command', self.params['healthcheck']]
def addparam_healthcheck_interval(self, c):
return c + ['--healthcheck-interval',
self.params['healthcheck_interval']]
def addparam_healthcheck_retries(self, c):
return c + ['--healthcheck-retries',
self.params['healthcheck_retries']]
def addparam_healthcheck_start_period(self, c):
return c + ['--healthcheck-start-period',
self.params['healthcheck_start_period']]
def addparam_healthcheck_timeout(self, c):
return c + ['--healthcheck-timeout',
self.params['healthcheck_timeout']]
def addparam_hostname(self, c):
return c + ['--hostname', self.params['hostname']]
def addparam_http_proxy(self, c):
return c + ['--http-proxy=%s' % self.params['http_proxy']]
def addparam_image_volume(self, c):
return c + ['--image-volume', self.params['image_volume']]
def addparam_init(self, c):
return c + ['--init', self.params['init']]
def addparam_init_path(self, c):
return c + ['--init-path', self.params['init_path']]
def addparam_interactive(self, c):
return c + ['--interactive=%s' % self.params['interactive']]
def addparam_ip(self, c):
return c + ['--ip', self.params['ip']]
def addparam_ipc(self, c):
return c + ['--ipc', self.params['ipc']]
def addparam_kernel_memory(self, c):
return c + ['--kernel-memory', self.params['kernel_memory']]
def addparam_label(self, c):
for label in self.params['label'].items():
c += ['--label', b'='.join([to_bytes(l, errors='surrogate_or_strict')
for l in label])]
return c
def addparam_label_file(self, c):
return c + ['--label-file', self.params['label_file']]
def addparam_log_driver(self, c):
return c + ['--log-driver', self.params['log_driver']]
def addparam_log_opt(self, c):
return c + ['--log-opt', self.params['log_opt']]
def addparam_memory(self, c):
return c + ['--memory', self.params['memory']]
def addparam_memory_reservation(self, c):
return c + ['--memory-reservation', self.params['memory_reservation']]
def addparam_memory_swap(self, c):
return c + ['--memory-swap', self.params['memory_swap']]
def addparam_memory_swappiness(self, c):
return c + ['--memory-swappiness', self.params['memory_swappiness']]
def addparam_mount(self, c):
return c + ['--mount', self.params['mount']]
def addparam_network(self, c):
return c + ['--network', ",".join(self.params['network'])]
def addparam_no_hosts(self, c):
return c + ['--no-hosts=%s' % self.params['no_hosts']]
def addparam_oom_kill_disable(self, c):
return c + ['--oom-kill-disable=%s' % self.params['oom_kill_disable']]
def addparam_oom_score_adj(self, c):
return c + ['--oom-score-adj', self.params['oom_score_adj']]
def addparam_pid(self, c):
return c + ['--pid', self.params['pid']]
def addparam_pids_limit(self, c):
return c + ['--pids-limit', self.params['pids_limit']]
def addparam_pod(self, c):
return c + ['--pod', self.params['pod']]
def addparam_privileged(self, c):
return c + ['--privileged=%s' % self.params['privileged']]
def addparam_publish(self, c):
for pub in self.params['publish']:
c += ['--publish', pub]
return c
def addparam_publish_all(self, c):
return c + ['--publish-all=%s' % self.params['publish_all']]
def addparam_read_only(self, c):
return c + ['--read-only=%s' % self.params['read_only']]
def addparam_read_only_tmpfs(self, c):
return c + ['--read-only-tmpfs=%s' % self.params['read_only_tmpfs']]
def addparam_restart_policy(self, c):
return c + ['--restart=%s' % self.params['restart_policy']]
def addparam_rm(self, c):
if self.params['rm']:
c += ['--rm']
return c
def addparam_rootfs(self, c):
return c + ['--rootfs=%s' % self.params['rootfs']]
def addparam_security_opt(self, c):
for secopt in self.params['security_opt']:
c += ['--security-opt', secopt]
return c
def addparam_shm_size(self, c):
return c + ['--shm-size', self.params['shm_size']]
def addparam_sig_proxy(self, c):
return c + ['--sig-proxy=%s' % self.params['sig_proxy']]
def addparam_stop_signal(self, c):
return c + ['--stop-signal', self.params['stop_signal']]
def addparam_stop_timeout(self, c):
return c + ['--stop-timeout', self.params['stop_timeout']]
def addparam_subgidname(self, c):
return c + ['--subgidname', self.params['subgidname']]
def addparam_subuidname(self, c):
return c + ['--subuidname', self.params['subuidname']]
def addparam_sysctl(self, c):
for sysctl in self.params['sysctl'].items():
c += ['--sysctl',
b"=".join([to_bytes(k, errors='surrogate_or_strict')
for k in sysctl])]
return c
def addparam_systemd(self, c):
return c + ['--systemd=%s' % self.params['systemd']]
def addparam_tmpfs(self, c):
for tmpfs in self.params['tmpfs'].items():
c += ['--tmpfs', ':'.join(tmpfs)]
return c
def addparam_tty(self, c):
return c + ['--tty=%s' % self.params['tty']]
def addparam_uidmap(self, c):
for uidmap in self.params['uidmap']:
c += ['--uidmap', uidmap]
return c
def addparam_ulimit(self, c):
for u in self.params['ulimit']:
c += ['--ulimit', u]
return c
def addparam_user(self, c):
return c + ['--user', self.params['user']]
def addparam_userns(self, c):
return c + ['--userns', self.params['userns']]
def addparam_uts(self, c):
return c + ['--uts', self.params['uts']]
def addparam_volume(self, c):
for vol in self.params['volume']:
if vol:
c += ['--volume', vol]
return c
def addparam_volumes_from(self, c):
for vol in self.params['volumes_from']:
c += ['--volumes-from', vol]
return c
def addparam_workdir(self, c):
return c + ['--workdir', self.params['workdir']]
# Add your own args for podman command
def addparam_cmd_args(self, c):
return c + self.params['cmd_args']
class PodmanDefaults:
def __init__(self, module, podman_version):
self.module = module
self.version = podman_version
self.defaults = {
"blkio_weight": 0,
"cgroups": "default",
"cgroup_parent": "",
"cidfile": "",
"cpus": 0.0,
"cpu_shares": 0,
"cpu_quota": 0,
"cpu_period": 0,
"cpu_rt_runtime": 0,
"cpu_rt_period": 0,
"cpuset_cpus": "",
"cpuset_mems": "",
"detach": True,
"device": [],
"env_host": False,
"etc_hosts": {},
"group_add": [],
"healthcheck": "",
"ipc": "",
"kernelmemory": "0",
"log_driver": "k8s-file",
"memory": "0",
"memory_swap": "0",
"memory_reservation": "0",
# "memory_swappiness": -1,
"no_hosts": False,
# libpod issue with networks in inspection
"network": ["default"],
"oom_score_adj": 0,
"pid": "",
"privileged": False,
"rm": False,
"security_opt": [],
"stop_signal": 15,
"tty": False,
"user": "",
"uts": "",
"volume": [],
"workdir": "/",
}
def default_dict(self):
# make here any changes to self.defaults related to podman version
return self.defaults
class PodmanContainerDiff:
def __init__(self, module, info, podman_version):
self.module = module
self.version = podman_version
self.default_dict = None
self.info = yaml.safe_load(json.dumps(info).lower())
self.params = self.defaultize()
self.diff = {'before': {}, 'after': {}}
self.non_idempotent = {
'env_file',
'env_host',
"ulimit", # Defaults depend on user and platform, impossible to guess
}
def defaultize(self):
params_with_defaults = {}
self.default_dict = PodmanDefaults(
self.module, self.version).default_dict()
for p in self.module.params:
if self.module.params[p] is None and p in self.default_dict:
params_with_defaults[p] = self.default_dict[p]
else:
params_with_defaults[p] = self.module.params[p]
return params_with_defaults
def _diff_update_and_compare(self, param_name, before, after):
if before != after:
self.diff['before'].update({param_name: before})
self.diff['after'].update({param_name: after})
return True
return False
def diffparam_annotation(self):
before = self.info['config']['annotations'] or {}
after = before.copy()
if self.module.params['annotation'] is not None:
after.update(self.params['annotation'])
return self._diff_update_and_compare('annotation', before, after)
def diffparam_env_host(self):
# It's impossible to get from inspest, recreate it if not default
before = False
after = self.params['env_host']
return self._diff_update_and_compare('env_host', before, after)
def diffparam_blkio_weight(self):
before = self.info['hostconfig']['blkioweight']
after = self.params['blkio_weight']
return self._diff_update_and_compare('blkio_weight', before, after)
def diffparam_blkio_weight_device(self):
before = self.info['hostconfig']['blkioweightdevice']
if before == [] and self.module.params['blkio_weight_device'] is None:
after = []
else:
after = self.params['blkio_weight_device']
return self._diff_update_and_compare('blkio_weight_device', before, after)
def diffparam_cap_add(self):
before = self.info['effectivecaps'] or []
after = []
if self.module.params['cap_add'] is not None:
after += ["cap_" + i.lower()
for i in self.module.params['cap_add']]
after += before
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('cap_add', before, after)
def diffparam_cap_drop(self):
before = self.info['effectivecaps'] or []
after = before[:]
if self.module.params['cap_drop'] is not None:
for c in ["cap_" + i.lower() for i in self.module.params['cap_drop']]:
if c in after:
after.remove(c)
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('cap_drop', before, after)
def diffparam_cgroup_parent(self):
before = self.info['hostconfig']['cgroupparent']
after = self.params['cgroup_parent']
return self._diff_update_and_compare('cgroup_parent', before, after)
def diffparam_cgroups(self):
# Cgroups output is not supported in all versions
if 'cgroups' in self.info['hostconfig']:
before = self.info['hostconfig']['cgroups']
after = self.params['cgroups']
return self._diff_update_and_compare('cgroups', before, after)
return False
def diffparam_cidfile(self):
before = self.info['hostconfig']['containeridfile']
after = self.params['cidfile']
return self._diff_update_and_compare('cidfile', before, after)
def diffparam_command(self):
# TODO(sshnaidm): to inspect image to get the default command
if self.module.params['command'] is not None:
before = self.info['config']['cmd']
after = self.params['command']
if isinstance(after, str):
after = [i.lower() for i in after.split()]
elif isinstance(after, list):
after = [i.lower() for i in after]
return self._diff_update_and_compare('command', before, after)
return False
def diffparam_conmon_pidfile(self):
before = self.info['conmonpidfile']
if self.module.params['conmon_pidfile'] is None:
after = before
else:
after = self.params['conmon_pidfile']
return self._diff_update_and_compare('conmon_pidfile', before, after)
def diffparam_cpu_period(self):
before = self.info['hostconfig']['cpuperiod']
after = self.params['cpu_period']
return self._diff_update_and_compare('cpu_period', before, after)
def diffparam_cpu_rt_period(self):
before = self.info['hostconfig']['cpurealtimeperiod']
after = self.params['cpu_rt_period']
return self._diff_update_and_compare('cpu_rt_period', before, after)
def diffparam_cpu_rt_runtime(self):
before = self.info['hostconfig']['cpurealtimeruntime']
after = self.params['cpu_rt_runtime']
return self._diff_update_and_compare('cpu_rt_runtime', before, after)
def diffparam_cpu_shares(self):
before = self.info['hostconfig']['cpushares']
after = self.params['cpu_shares']
return self._diff_update_and_compare('cpu_shares', before, after)
def diffparam_cpus(self):
before = int(self.info['hostconfig']['nanocpus']) / 1000000000
after = self.params['cpus']
return self._diff_update_and_compare('cpus', before, after)
def diffparam_cpuset_cpus(self):
before = self.info['hostconfig']['cpusetcpus']
after = self.params['cpuset_cpus']
return self._diff_update_and_compare('cpuset_cpus', before, after)
def diffparam_cpuset_mems(self):
before = self.info['hostconfig']['cpusetmems']
after = self.params['cpuset_mems']
return self._diff_update_and_compare('cpuset_mems', before, after)
def diffparam_device(self):
before = [":".join([i['pathonhost'], i['pathincontainer']])
for i in self.info['hostconfig']['devices']]
after = [":".join(i.split(":")[:2]) for i in self.params['device']]
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('devices', before, after)
def diffparam_device_read_bps(self):
before = self.info['hostconfig']['blkiodevicereadbps'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_read_bps'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_read_bps', before, after)
def diffparam_device_read_iops(self):
before = self.info['hostconfig']['blkiodevicereadiops'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_read_iops'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_read_iops', before, after)
def diffparam_device_write_bps(self):
before = self.info['hostconfig']['blkiodevicewritebps'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_write_bps'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_write_bps', before, after)
def diffparam_device_write_iops(self):
before = self.info['hostconfig']['blkiodevicewriteiops'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_write_iops'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_write_iops', before, after)
# Limited idempotency, it can't guess default values
def diffparam_env(self):
env_before = self.info['config']['env'] or {}
before = {i.split("=")[0]: i.split("=")[1] for i in env_before}
after = before.copy()
if self.params['env']:
after.update({
str(k).lower(): str(v).lower()
for k, v in self.params['env'].items()
})
return self._diff_update_and_compare('env', before, after)
def diffparam_etc_hosts(self):
if self.info['hostconfig']['extrahosts']:
before = dict([i.split(":") for i in self.info['hostconfig']['extrahosts']])
else:
before = {}
after = self.params['etc_hosts']
return self._diff_update_and_compare('etc_hosts', before, after)
def diffparam_group_add(self):
before = self.info['hostconfig']['groupadd']
after = self.params['group_add']
return self._diff_update_and_compare('group_add', before, after)
# Healthcheck is only defined in container config if a healthcheck
# was configured; otherwise the config key isn't part of the config.
def diffparam_healthcheck(self):
if 'healthcheck' in self.info['config']:
# the "test" key is a list of 2 items where the first one is
# "CMD-SHELL" and the second one is the actual healthcheck command.
before = self.info['config']['healthcheck']['test'][1]
else:
before = ''
after = self.params['healthcheck'] or before
return self._diff_update_and_compare('healthcheck', before, after)
# Because of hostname is random generated, this parameter has partial idempotency only.
def diffparam_hostname(self):
before = self.info['config']['hostname']
after = self.params['hostname'] or before
return self._diff_update_and_compare('hostname', before, after)
def diffparam_image(self):
# TODO(sshnaidm): for strict image compare mode use SHAs
before = self.info['config']['image']
after = self.params['image']
mode = self.params['image_strict']
if mode is None or not mode:
# In a idempotency 'lite mode' assume all images from different registries are the same
before = before.replace(":latest", "")
after = after.replace(":latest", "")
before = before.split("/")[-1]
after = after.split("/")[-1]
return self._diff_update_and_compare('image', before, after)
def diffparam_ipc(self):
before = self.info['hostconfig']['ipcmode']
after = self.params['ipc']
return self._diff_update_and_compare('ipc', before, after)
def diffparam_label(self):
before = self.info['config']['labels'] or {}
after = before.copy()
if self.params['label']:
after.update({
str(k).lower(): str(v).lower()
for k, v in self.params['label'].items()
})
return self._diff_update_and_compare('label', before, after)
def diffparam_log_driver(self):
before = self.info['hostconfig']['logconfig']['type']
after = self.params['log_driver']
return self._diff_update_and_compare('log_driver', before, after)
# Parameter has limited idempotency, unable to guess the default log_path
def diffparam_log_opt(self):
before = self.info['logpath']
if self.module.params['log_opt'] in [None, '']:
after = before
else:
after = self.params['log_opt'].split("=")[1]
return self._diff_update_and_compare('log_opt', before, after)
def diffparam_memory(self):
before = str(self.info['hostconfig']['memory'])
after = self.params['memory']
return self._diff_update_and_compare('memory', before, after)
def diffparam_memory_swap(self):
# By default it's twice memory parameter
before = str(self.info['hostconfig']['memoryswap'])
after = self.params['memory_swap']
if (self.module.params['memory_swap'] is None
and self.params['memory'] != 0
and self.params['memory'].isdigit()):
after = str(int(self.params['memory']) * 2)
return self._diff_update_and_compare('memory_swap', before, after)
def diffparam_memory_reservation(self):
before = str(self.info['hostconfig']['memoryreservation'])
after = self.params['memory_reservation']
return self._diff_update_and_compare('memory_reservation', before, after)
def diffparam_network(self):
before = [self.info['hostconfig']['networkmode']]
after = self.params['network']
return self._diff_update_and_compare('network', before, after)
def diffparam_no_hosts(self):
before = not bool(self.info['hostspath'])
after = self.params['no_hosts']
if self.params['network'] == ['none']:
after = True
return self._diff_update_and_compare('no_hosts', before, after)
def diffparam_oom_score_adj(self):
before = self.info['hostconfig']['oomscoreadj']
after = self.params['oom_score_adj']
return self._diff_update_and_compare('oom_score_adj', before, after)
def diffparam_privileged(self):
before = self.info['hostconfig']['privileged']
after = self.params['privileged']
return self._diff_update_and_compare('privileged', before, after)
def diffparam_pid(self):
before = self.info['hostconfig']['pidmode']
after = self.params['pid']
return self._diff_update_and_compare('pid', before, after)
def diffparam_rm(self):
before = self.info['hostconfig']['autoremove']
after = self.params['rm']
return self._diff_update_and_compare('rm', before, after)
def diffparam_security_opt(self):
before = self.info['hostconfig']['securityopt']
after = self.params['security_opt']
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('security_opt', before, after)
def diffparam_stop_signal(self):
before = self.info['config']['stopsignal']
after = self.params['stop_signal']
return self._diff_update_and_compare('stop_signal', before, after)
def diffparam_tty(self):
before = self.info['config']['tty']
after = self.params['tty']
return self._diff_update_and_compare('tty', before, after)
def diffparam_user(self):
before = self.info['config']['user']
if self.module.params['user'] is None and before:
after = before
else:
after = self.params['user']
return self._diff_update_and_compare('user', before, after)
def diffparam_uts(self):
before = self.info['hostconfig']['utsmode']
after = self.params['uts']
return self._diff_update_and_compare('uts', before, after)
def diffparam_volume(self):
before = self.info['mounts']
if before:
volumes = []
for m in before:
if m['type'] == 'volume':
volumes.append([m['name'], m['destination']])
else:
volumes.append([m['source'], m['destination']])
before = [":".join(v) for v in volumes]
# Ignore volumes option for idempotency
after = [":".join(v.split(":")[:2]) for v in self.params['volume']]
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('volume', before, after)
def diffparam_volumes_from(self):
before = self.info['hostconfig']['volumesfrom'] or []
after = self.params['volumes_from'] or []
return self._diff_update_and_compare('volumes_from', before, after)
def diffparam_workdir(self):
before = self.info['config']['workingdir']
after = self.params['workdir']
return self._diff_update_and_compare('workdir', before, after)
def is_different(self):
diff_func_list = [func for func in dir(self)
if callable(getattr(self, func)) and func.startswith(
"diffparam")]
fail_fast = not bool(self.module._diff)
different = False
for func_name in diff_func_list:
dff_func = getattr(self, func_name)
if dff_func():
if fail_fast:
return True
else:
different = True
# Check non idempotent parameters
for p in self.non_idempotent:
if self.module.params[p] is not None and self.module.params[p] not in [{}, [], '']:
different = True
return different
def ensure_image_exists(module, image):
"""If image is passed, ensure it exists, if not - pull it or fail.
Arguments:
module {obj} -- ansible module object
image {str} -- name of image
Returns:
list -- list of image actions - if it pulled or nothing was done
"""
image_actions = []
module_exec = module.params['executable']
if not image:
return image_actions
rc, out, err = module.run_command([module_exec, 'image', 'exists', image])
if rc == 0:
return image_actions
rc, out, err = module.run_command([module_exec, 'image', 'pull', image])
if rc != 0:
module.fail_json(msg="Can't pull image %s" % image, stdout=out,
stderr=err)
image_actions.append("pulled image %s" % image)
return image_actions
class PodmanContainer:
"""Perform container tasks.
Manages podman container, inspects it and checks its current state
"""
def __init__(self, module, name):
"""Initialize PodmanContainer class.
Arguments:
module {obj} -- ansible module object
name {str} -- name of container
"""
super(PodmanContainer, self).__init__()
self.module = module
self.name = name
self.stdout, self.stderr = '', ''
self.info = self.get_info()
self.version = self._get_podman_version()
self.diff = {}
self.actions = []
@property
def exists(self):
"""Check if container exists."""
return bool(self.info != {})
@property
def different(self):
"""Check if container is different."""
diffcheck = PodmanContainerDiff(self.module, self.info, self.version)
is_different = diffcheck.is_different()
diffs = diffcheck.diff
if self.module._diff and is_different and diffs['before'] and diffs['after']:
self.diff['before'] = "\n".join(
["%s - %s" % (k, v) for k, v in sorted(
diffs['before'].items())]) + "\n"
self.diff['after'] = "\n".join(
["%s - %s" % (k, v) for k, v in sorted(
diffs['after'].items())]) + "\n"
return is_different
@property
def running(self):
"""Return True if container is running now."""
return self.exists and self.info['State']['Running']
@property
def stopped(self):
"""Return True if container exists and is not running now."""
return self.exists and not self.info['State']['Running']
def get_info(self):
"""Inspect container and gather info about it."""
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'container', b'inspect', self.name])
return json.loads(out)[0] if rc == 0 else {}
def _get_podman_version(self):
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'--version'])
if rc != 0 or not out or "version" not in out:
self.module.fail_json(msg="%s run failed!" % self.module.params['executable'])
return out.split("version")[1].strip()
def _perform_action(self, action):
"""Perform action with container.
Arguments:
action {str} -- action to perform - start, create, stop, run,
delete
"""
b_command = PodmanModuleParams(action,
self.module.params,
self.version,
self.module,
).construct_command_from_params()
full_cmd = " ".join([self.module.params['executable']]
+ [to_native(i) for i in b_command])
self.module.log("PODMAN-CONTAINER-DEBUG: %s" % full_cmd)
self.actions.append(full_cmd)
if not self.module.check_mode:
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'container'] + b_command,
expand_user_and_vars=False)
self.stdout = out
self.stderr = err
if rc != 0:
self.module.fail_json(
msg="Can't %s container %s" % (action, self.name),
stdout=out, stderr=err)
def run(self):
"""Run the container."""
self._perform_action('run')
def delete(self):
"""Delete the container."""
self._perform_action('delete')
def stop(self):
"""Stop the container."""
self._perform_action('stop')
def start(self):
"""Start the container."""
self._perform_action('start')
def create(self):
"""Create the container."""
self._perform_action('create')
def recreate(self):
"""Recreate the container."""
self.delete()
self.run()
def restart(self):
"""Restart the container."""
self.stop()
self.run()
class PodmanManager:
"""Module manager class.
Defines according to parameters what actions should be applied to container
"""
def __init__(self, module):
"""Initialize PodmanManager class.
Arguments:
module {obj} -- ansible module object
"""
super(PodmanManager, self).__init__()
self.module = module
self.results = {
'changed': False,
'actions': [],
'container': {},
}
self.name = self.module.params['name']
self.executable = \
self.module.get_bin_path(self.module.params['executable'],
required=True)
self.image = self.module.params['image']
image_actions = ensure_image_exists(self.module, self.image)
self.results['actions'] += image_actions
self.state = self.module.params['state']
self.restart = self.module.params['force_restart']
self.recreate = self.module.params['recreate']
self.container = PodmanContainer(self.module, self.name)
def update_container_result(self, changed=True):
"""Inspect the current container, update results with last info, exit.
Keyword Arguments:
changed {bool} -- whether any action was performed
(default: {True})
"""
facts = self.container.get_info() if changed else self.container.info
out, err = self.container.stdout, self.container.stderr
self.results.update({'changed': changed, 'container': facts,
'podman_actions': self.container.actions},
stdout=out, stderr=err)
if self.container.diff:
self.results.update({'diff': self.container.diff})
if self.module.params['debug']:
self.results.update({'podman_version': self.container.version})
self.module.exit_json(**self.results)
def make_started(self):
"""Run actions if desired state is 'started'."""
if self.container.running and \
(self.container.different or self.recreate):
self.container.recreate()
self.results['actions'].append('recreated %s' %
self.container.name)
self.update_container_result()
elif self.container.running and not self.container.different:
if self.restart:
self.container.restart()
self.results['actions'].append('restarted %s' %
self.container.name)
self.update_container_result()
self.update_container_result(changed=False)
elif not self.container.exists:
self.container.run()
self.results['actions'].append('started %s' % self.container.name)
self.update_container_result()
elif self.container.stopped and self.container.different:
self.container.recreate()
self.results['actions'].append('recreated %s' %
self.container.name)
self.update_container_result()
elif self.container.stopped and not self.container.different:
self.container.start()
self.results['actions'].append('started %s' % self.container.name)
self.update_container_result()
def make_stopped(self):
"""Run actions if desired state is 'stopped'."""
if not self.container.exists and not self.image:
self.module.fail_json(msg='Cannot create container when image'
' is not specified!')
if not self.container.exists:
self.container.create()
self.results['actions'].append('created %s' % self.container.name)
self.update_container_result()
if self.container.stopped:
self.update_container_result(changed=False)
elif self.container.running:
self.container.stop()
self.results['actions'].append('stopped %s' % self.container.name)
self.update_container_result()
def make_absent(self):
"""Run actions if desired state is 'absent'."""
if not self.container.exists:
self.results.update({'changed': False})
elif self.container.exists:
self.container.delete()
self.results['actions'].append('deleted %s' % self.container.name)
self.results.update({'changed': True})
self.results.update({'container': {},
'podman_actions': self.container.actions})
self.module.exit_json(**self.results)
def execute(self):
"""Execute the desired action according to map of actions & states."""
states_map = {
'present': self.make_started,
'started': self.make_started,
'absent': self.make_absent,
'stopped': self.make_stopped
}
process_action = states_map[self.state]
process_action()
self.module.fail_json(msg="Unexpected logic error happened, "
"please contact maintainers ASAP!")
def main():
module = AnsibleModule(
argument_spec=yaml.safe_load(DOCUMENTATION)['options'],
mutually_exclusive=(
['no_hosts', 'etc_hosts'],
),
supports_check_mode=True,
)
# work on input vars
if module.params['state'] in ['started', 'present'] and \
not module.params['image']:
module.fail_json(msg="State '%s' required image to be configured!" %
module.params['state'])
PodmanManager(module).execute()
if __name__ == '__main__':
main()
| en | 0.68882 | #!/usr/bin/python # Copyright (c) 2019 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # flake8: noqa: E501 module: podman_container author: - "<NAME> (@sshnaidm)" version_added: '2.9' short_description: Manage podman containers notes: [] description: - Start, stop, restart and manage Podman containers requirements: - "Podman installed on host" options: name: description: - Name of the container required: True type: str executable: description: - Path to C(podman) executable if it is not in the C($PATH) on the machine running C(podman) default: 'podman' type: str state: description: - I(absent) - A container matching the specified name will be stopped and removed. - I(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no container matches the name, a container will be created. If a container matches the name but the provided configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created with the requested config. Image version will be taken into account when comparing configuration. Use the recreate option to force the re-creation of the matching container. - I(started) - Asserts there is a running container matching the name and any provided configuration. If no container matches the name, a container will be created and started. Use recreate to always re-create a matching container, even if it is running. Use force_restart to force a matching container to be stopped and restarted. - I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped state. type: str default: started choices: - absent - present - stopped - started image: description: - Repository path (or image name) and tag used to create the container. If an image is not found, the image will be pulled from the registry. If no tag is included, C(latest) will be used. - Can also be an image ID. If this is the case, the image is assumed to be available locally. type: str annotation: description: - Add an annotation to the container. The format is key value, multiple times. type: dict authfile: description: - Path of the authentication file. Default is ``${XDG_RUNTIME_DIR}/containers/auth.json`` (Not available for remote commands) You can also override the default path of the authentication file by setting the ``REGISTRY_AUTH_FILE`` environment variable. ``export REGISTRY_AUTH_FILE=path`` type: path blkio_weight: description: - Block IO weight (relative weight) accepts a weight value between 10 and 1000 type: int blkio_weight_device: description: - Block IO weight (relative device weight, format DEVICE_NAME[:]WEIGHT). type: dict cap_add: description: - List of capabilities to add to the container. type: list elements: str cap_drop: description: - List of capabilities to drop from the container. type: list elements: str cgroup_parent: description: - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. type: path cgroupns: description: - Path to cgroups under which the cgroup for the container will be created. type: str cgroups: description: - Determines whether the container will create CGroups. Valid values are enabled and disabled, which the default being enabled. The disabled option will force the container to not create CGroups, and thus conflicts with CGroup options cgroupns and cgroup-parent. type: str choices: - default - disabled cidfile: description: - Write the container ID to the file type: path cmd_args: description: - Any additionl command options you want to pass to podman command, cmd_args - ['--other-param', 'value'] Be aware module doesn't support idempotency if this is set. type: list elements: str conmon_pidfile: description: - Write the pid of the conmon process to a file. conmon runs in a separate process than Podman, so this is necessary when using systemd to restart Podman containers. type: path command: description: - Override command of container. Can be a string or a list. type: raw cpu_period: description: - Limit the CPU real-time period in microseconds type: int cpu_rt_period: description: - Limit the CPU real-time period in microseconds. Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. type: int cpu_rt_runtime: description: - Limit the CPU real-time runtime in microseconds. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. type: int cpu_shares: description: - CPU shares (relative weight) type: int cpus: description: - Number of CPUs. The default is 0.0 which means no limit. type: str cpuset_cpus: description: - CPUs in which to allow execution (0-3, 0,1) type: str cpuset_mems: description: - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. type: str detach: description: - Run container in detach mode type: bool default: True debug: description: - Return additional information which can be helpful for investigations. type: bool default: False detach_keys: description: - Override the key sequence for detaching a container. Format is a single character or ctrl-value type: str device: description: - Add a host device to the container. The format is <device-on-host>[:<device-on-container>][:<permissions>] (e.g. device /dev/sdc:/dev/xvdc:rwm) type: list elements: str device_read_bps: description: - Limit read rate (bytes per second) from a device (e.g. device-read-bps /dev/sda:1mb) type: list device_read_iops: description: - Limit read rate (IO per second) from a device (e.g. device-read-iops /dev/sda:1000) type: list device_write_bps: description: - Limit write rate (bytes per second) to a device (e.g. device-write-bps /dev/sda:1mb) type: list device_write_iops: description: - Limit write rate (IO per second) to a device (e.g. device-write-iops /dev/sda:1000) type: list dns: description: - Set custom DNS servers type: list elements: str dns_option: description: - Set custom DNS options type: str dns_search: description: - Set custom DNS search domains (Use dns_search with '' if you don't wish to set the search domain) type: str entrypoint: description: - Overwrite the default ENTRYPOINT of the image type: str env: description: - Set environment variables. This option allows you to specify arbitrary environment variables that are available for the process that will be launched inside of the container. type: dict env_file: description: - Read in a line delimited file of environment variables type: path env_host: description: - Use all current host environment variables in container. Defaults to false. type: bool etc_hosts: description: - Dict of host-to-IP mappings, where each host name is a key in the dictionary. Each host name will be added to the container's ``/etc/hosts`` file. type: dict aliases: - add_hosts expose: description: - Expose a port, or a range of ports (e.g. expose "3300-3310") to set up port redirection on the host system. type: list elements: str aliases: - exposed - exposed_ports force_restart: description: - Force restart of container. type: bool default: False aliases: - restart gidmap: description: - Run the container in a new user namespace using the supplied mapping. type: str group_add: description: - Add additional groups to run as type: list healthcheck: description: - Set or alter a healthcheck command for a container. type: str healthcheck_interval: description: - Set an interval for the healthchecks (a value of disable results in no automatic timer setup) (default "30s") type: str healthcheck_retries: description: - The number of retries allowed before a healthcheck is considered to be unhealthy. The default value is 3. type: int healthcheck_start_period: description: - The initialization time needed for a container to bootstrap. The value can be expressed in time format like 2m3s. The default value is 0s type: str healthcheck_timeout: description: - The maximum time allowed to complete the healthcheck before an interval is considered failed. Like start-period, the value can be expressed in a time format such as 1m22s. The default value is 30s type: str hostname: description: - Container host name. Sets the container host name that is available inside the container. type: str http_proxy: description: - By default proxy environment variables are passed into the container if set for the podman process. This can be disabled by setting the http_proxy option to false. The environment variables passed in include http_proxy, https_proxy, ftp_proxy, no_proxy, and also the upper case versions of those. Defaults to true type: bool image_volume: description: - Tells podman how to handle the builtin image volumes. The options are bind, tmpfs, or ignore (default bind) type: str choices: - 'bind' - 'tmpfs' - 'ignore' image_strict: description: - Whether to compare images in idempotency by taking into account a full name with registry and namespaces. type: bool default: False init: description: - Run an init inside the container that forwards signals and reaps processes. type: str init_path: description: - Path to the container-init binary. type: str interactive: description: - Keep STDIN open even if not attached. The default is false. When set to true, keep stdin open even if not attached. The default is false. type: bool ip: description: - Specify a static IP address for the container, for example '10.88.64.128'. Can only be used if no additional CNI networks to join were specified via 'network:', and if the container is not joining another container's network namespace via 'network container:<name|id>'. The address must be within the default CNI network's pool (default 10.88.0.0/16). type: str ipc: description: - Default is to create a private IPC namespace (POSIX SysV IPC) for the container type: str kernel_memory: description: - Kernel memory limit (format <number>[<unit>], where unit = b, k, m or g) Note - idempotency is supported for integers only. type: str label: description: - Add metadata to a container, pass dictionary of label names and values type: dict label_file: description: - Read in a line delimited file of labels type: str log_driver: description: - Logging driver. Used to set the log driver for the container. For example log_driver "k8s-file". type: str choices: - k8s-file - journald - json-file log_opt: description: - Logging driver specific options. Used to set the path to the container log file. For example log_opt "path=/var/log/container/mycontainer.json" type: str aliases: - log_options memory: description: - Memory limit (format 10k, where unit = b, k, m or g) Note - idempotency is supported for integers only. type: str memory_reservation: description: - Memory soft limit (format 100m, where unit = b, k, m or g) Note - idempotency is supported for integers only. type: str memory_swap: description: - A limit value equal to memory plus swap. Must be used with the -m (--memory) flag. The swap LIMIT should always be larger than -m (--memory) value. By default, the swap LIMIT will be set to double the value of --memory Note - idempotency is supported for integers only. type: str memory_swappiness: description: - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. type: int mount: description: - Attach a filesystem mount to the container. bind or tmpfs For example mount "type=bind,source=/path/on/host,destination=/path/in/container" type: str network: description: - Set the Network mode for the container * bridge create a network stack on the default bridge * none no networking * container:<name|id> reuse another container's network stack * host use the podman host network stack. * <network-name>|<network-id> connect to a user-defined network * ns:<path> path to a network namespace to join * slirp4netns use slirp4netns to create a user network stack. This is the default for rootless containers type: list elements: str aliases: - net no_hosts: description: - Do not create /etc/hosts for the container Default is false. type: bool oom_kill_disable: description: - Whether to disable OOM Killer for the container or not. Default is false. type: bool oom_score_adj: description: - Tune the host's OOM preferences for containers (accepts -1000 to 1000) type: int pid: description: - Set the PID mode for the container type: str pids_limit: description: - Tune the container's pids limit. Set -1 to have unlimited pids for the container. type: str pod: description: - Run container in an existing pod. If you want podman to make the pod for you, preference the pod name with "new:" type: str privileged: description: - Give extended privileges to this container. The default is false. type: bool publish: description: - Publish a container's port, or range of ports, to the host. Format - ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort type: list elements: str aliases: - ports - published - published_ports publish_all: description: - Publish all exposed ports to random ports on the host interfaces. The default is false. type: bool read_only: description: - Mount the container's root filesystem as read only. Default is false type: bool read_only_tmpfs: description: - If container is running in --read-only mode, then mount a read-write tmpfs on /run, /tmp, and /var/tmp. The default is true type: bool recreate: description: - Use with present and started states to force the re-creation of an existing container. type: bool default: False restart_policy: description: - Restart policy to follow when containers exit. Restart policy will not take effect if a container is stopped via the podman kill or podman stop commands. Valid values are * no - Do not restart containers on exit * on-failure[:max_retries] - Restart containers when they exit with a non-0 exit code, retrying indefinitely or until the optional max_retries count is hit * always - Restart containers when they exit, regardless of status, retrying indefinitely type: str rm: description: - Automatically remove the container when it exits. The default is false. type: bool aliases: - remove rootfs: description: - If true, the first argument refers to an exploded container on the file system. The dafault is false. type: bool security_opt: description: - Security Options. For example security_opt "seccomp=unconfined" type: list elements: str shm_size: description: - Size of /dev/shm. The format is <number><unit>. number must be greater than 0. Unit is optional and can be b (bytes), k (kilobytes), m(megabytes), or g (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses 64m type: str sig_proxy: description: - Proxy signals sent to the podman run command to the container process. SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is true. type: bool stop_signal: description: - Signal to stop a container. Default is SIGTERM. type: int stop_timeout: description: - Timeout (in seconds) to stop a container. Default is 10. type: int subgidname: description: - Run the container in a new user namespace using the map with 'name' in the /etc/subgid file. type: str subuidname: description: - Run the container in a new user namespace using the map with 'name' in the /etc/subuid file. type: str sysctl: description: - Configure namespaced kernel parameters at runtime type: dict systemd: description: - Run container in systemd mode. The default is true. type: bool tmpfs: description: - Create a tmpfs mount. For example tmpfs "/tmp" "rw,size=787448k,mode=1777" type: dict tty: description: - Allocate a pseudo-TTY. The default is false. type: bool uidmap: description: - Run the container in a new user namespace using the supplied mapping. type: list ulimit: description: - Ulimit options type: list user: description: - Sets the username or UID used and optionally the groupname or GID for the specified command. type: str userns: description: - Set the user namespace mode for the container. It defaults to the PODMAN_USERNS environment variable. An empty value means user namespaces are disabled. type: str uts: description: - Set the UTS mode for the container type: str volume: description: - Create a bind mount. If you specify, volume /HOST-DIR:/CONTAINER-DIR, podman bind mounts /HOST-DIR in the host to /CONTAINER-DIR in the podman container. type: list elements: str aliases: - volumes volumes_from: description: - Mount volumes from the specified container(s). type: list elements: str workdir: description: - Working directory inside the container. The default working directory for running binaries within a container is the root directory (/). type: str - name: Run container podman_container: name: container image: quay.io/bitnami/wildfly state: started - name: Create a data container podman_container: name: mydata image: busybox volume: - /tmp/data - name: Re-create a redis container podman_container: name: myredis image: redis command: redis-server --appendonly yes state: present recreate: yes expose: - 6379 volumes_from: - mydata - name: Restart a container podman_container: name: myapplication image: redis state: started restart: yes etc_hosts: other: "127.0.0.1" restart_policy: "no" device: "/dev/sda:/dev/xvda:rwm" ports: - "8080:9000" - "127.0.0.1:8081:9001/udp" env: SECRET_KEY: "ssssh" BOOLEAN_KEY: "yes" - name: Container present podman_container: name: mycontainer state: present image: ubuntu:14.04 command: "sleep 1d" - name: Stop a container podman_container: name: mycontainer state: stopped - name: Start 4 load-balanced containers podman_container: name: "container{{ item }}" recreate: yes image: someuser/anotherappimage command: sleep 1d with_sequence: count=4 - name: remove container podman_container: name: ohno state: absent - name: Writing output podman_container: name: myservice image: busybox log_options: path=/var/log/container/mycontainer.json log_driver: k8s-file container: description: - Facts representing the current state of the container. Matches the podman inspection output. - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts are also accessible directly as C(podman_container). Note that the returned fact will be removed in Ansible 2.12. - Empty if C(state) is I(absent). returned: always type: dict sample: '{ "AppArmorProfile": "", "Args": [ "sh" ], "BoundingCaps": [ "CAP_CHOWN", ... ], "Config": { "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "AttachStderr": false, "AttachStdin": false, "AttachStdout": false, "Cmd": [ "sh" ], "Domainname": "", "Entrypoint": "", "Env": [ "PATH=/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=", "container=podman" ], "Hostname": "", "Image": "docker.io/library/busybox:latest", "Labels": null, "OpenStdin": false, "StdinOnce": false, "StopSignal": 15, "Tty": false, "User": { "gid": 0, "uid": 0 }, "Volumes": null, "WorkingDir": "/" }, "ConmonPidFile": "...", "Created": "2019-06-17T19:13:09.873858307+03:00", "Dependencies": [], "Driver": "overlay", "EffectiveCaps": [ "CAP_CHOWN", ... ], "ExecIDs": [], "ExitCommand": [ "/usr/bin/podman", "--root", ... ], "GraphDriver": { ... }, "HostConfig": { ... }, "HostnamePath": "...", "HostsPath": "...", "ID": "...", "Image": "...", "ImageName": "docker.io/library/busybox:latest", "IsInfra": false, "LogPath": "/tmp/container/mycontainer.json", "MountLabel": "system_u:object_r:container_file_t:s0:c282,c782", "Mounts": [ ... ], "Name": "myservice", "Namespace": "", "NetworkSettings": { "Bridge": "", ... }, "Path": "sh", "ProcessLabel": "system_u:system_r:container_t:s0:c282,c782", "ResolvConfPath": "...", "RestartCount": 0, "Rootfs": "", "State": { "Dead": false, "Error": "", "ExitCode": 0, "FinishedAt": "2019-06-17T19:13:10.157518963+03:00", "Healthcheck": { "FailingStreak": 0, "Log": null, "Status": "" }, "OOMKilled": false, "OciVersion": "1.0.1-dev", "Paused": false, "Pid": 4083, "Restarting": false, "Running": false, "StartedAt": "2019-06-17T19:13:10.152479729+03:00", "Status": "exited" }, "StaticDir": "..." ... }' Creates list of arguments for podman CLI command. Arguments: action {str} -- action type from 'run', 'stop', 'create', 'delete', 'start' params {dict} -- dictionary of module parameters Create a podman command from given module parameters. Returns: list -- list of byte strings for Popen command # Add your own args for podman command # "memory_swappiness": -1, # libpod issue with networks in inspection # make here any changes to self.defaults related to podman version # Defaults depend on user and platform, impossible to guess # It's impossible to get from inspest, recreate it if not default # Cgroups output is not supported in all versions # TODO(sshnaidm): to inspect image to get the default command # Limited idempotency, it can't guess default values # Healthcheck is only defined in container config if a healthcheck # was configured; otherwise the config key isn't part of the config. # the "test" key is a list of 2 items where the first one is # "CMD-SHELL" and the second one is the actual healthcheck command. # Because of hostname is random generated, this parameter has partial idempotency only. # TODO(sshnaidm): for strict image compare mode use SHAs # In a idempotency 'lite mode' assume all images from different registries are the same # Parameter has limited idempotency, unable to guess the default log_path # By default it's twice memory parameter # Ignore volumes option for idempotency # Check non idempotent parameters If image is passed, ensure it exists, if not - pull it or fail. Arguments: module {obj} -- ansible module object image {str} -- name of image Returns: list -- list of image actions - if it pulled or nothing was done Perform container tasks. Manages podman container, inspects it and checks its current state Initialize PodmanContainer class. Arguments: module {obj} -- ansible module object name {str} -- name of container Check if container exists. Check if container is different. Return True if container is running now. Return True if container exists and is not running now. Inspect container and gather info about it. Perform action with container. Arguments: action {str} -- action to perform - start, create, stop, run, delete Run the container. Delete the container. Stop the container. Start the container. Create the container. Recreate the container. Restart the container. Module manager class. Defines according to parameters what actions should be applied to container Initialize PodmanManager class. Arguments: module {obj} -- ansible module object Inspect the current container, update results with last info, exit. Keyword Arguments: changed {bool} -- whether any action was performed (default: {True}) Run actions if desired state is 'started'. Run actions if desired state is 'stopped'. Run actions if desired state is 'absent'. Execute the desired action according to map of actions & states. # work on input vars | 1.557913 | 2 |
setup.py | UdoGi/dark-matter | 0 | 5912 | #!/usr/bin/env python
from setuptools import setup
# Modified from http://stackoverflow.com/questions/2058802/
# how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package
def version():
import os
import re
init = os.path.join('dark', '__init__.py')
with open(init) as fp:
initData = fp.read()
match = re.search(r"^__version__ = ['\"]([^'\"]+)['\"]",
initData, re.M)
if match:
return match.group(1)
else:
raise RuntimeError('Unable to find version string in %r.' % init)
# Explicitly list bin scripts to be installed, seeing as I have a few local
# bin files that are not (yet) part of the distribution.
scripts = [
'bin/aa-info.py',
'bin/aa-to-dna.py',
'bin/aa-to-properties.py',
'bin/adaptor-distances.py',
'bin/alignment-panel-civ.py',
'bin/alignments-per-read.py',
'bin/bit-score-to-e-value.py',
'bin/cat-json-blast-records.py',
'bin/check-fasta-json-blast-consistency.py',
'bin/codon-distance.py',
'bin/compare-consensuses.py',
'bin/compare-sequences.py',
'bin/convert-blast-xml-to-json.py',
'bin/convert-diamond-to-json.py',
'bin/convert-diamond-to-sam.py',
'bin/convert-sam-to-fastq.sh',
'bin/create-newick-relabeling-output.py',
'bin/dark-matter-version.py',
'bin/describe-protein-database.py',
'bin/dna-to-aa.py',
'bin/download-genbank.sh',
'bin/e-value-to-bit-score.py',
'bin/extract-ORFs.py',
'bin/fasta-base-indices.py',
'bin/fasta-count.py',
'bin/fasta-diff.sh',
'bin/fasta-identity-table.py',
'bin/fasta-ids.py',
'bin/fasta-join.py',
'bin/fasta-lengths.py',
'bin/fasta-sequences.py',
'bin/fasta-sort.py',
'bin/fasta-split-by-id.py',
'bin/fasta-subset.py',
'bin/fasta-subtraction.py',
'bin/fasta-to-phylip.py',
'bin/fasta-variable-sites.py',
'bin/filter-fasta-by-complexity.py',
'bin/filter-fasta-by-taxonomy.py',
'bin/filter-fasta.py',
'bin/filter-hits-to-fasta.py',
'bin/filter-reads-alignments.py',
'bin/filter-sam.py',
'bin/find-hits.py',
'bin/format-fasta.py',
'bin/genome-protein-summary.py',
'bin/get-features.py',
'bin/get-hosts.py',
'bin/get-reads.py',
'bin/get-taxonomy.py',
'bin/graph-evalues.py',
'bin/local-align.py',
'bin/make-consensus.py',
'bin/make-fasta-database.py',
'bin/make-protein-database.py',
'bin/ncbi-fetch-id.py',
'bin/newick-to-ascii.py',
'bin/noninteractive-alignment-panel.py',
'bin/parse-genbank-flat-file.py',
'bin/position-summary.py',
'bin/pre-commit.sh',
'bin/print-blast-xml-for-derek.py',
'bin/print-blast-xml.py',
'bin/print-read-lengths.py',
'bin/proteins-to-pathogens.py',
'bin/proteins-to-pathogens-civ.py',
'bin/randomize-fasta.py',
'bin/read-blast-json.py',
'bin/read-blast-xml.py',
'bin/relabel-newick-tree.py',
'bin/run-bwa.py',
'bin/run-bowtie2.py',
'bin/sam-coverage.py',
'bin/sam-coverage-depth.py',
'bin/sam-to-fasta-alignment.py',
'bin/sam-reference-read-counts.py',
'bin/sam-references.py',
'bin/sff-to-fastq.py',
'bin/split-fasta-by-adaptors.py',
'bin/subset-protein-database.py',
'bin/summarize-fasta-bases.py',
'bin/summarize-reads.py',
'bin/trim-primers.py',
'bin/trim-reads.py',
'bin/write-htcondor-job-spec.py',
]
setup(name='dark-matter',
version=version(),
packages=['dark', 'dark.blast', 'dark.diamond', 'dark.civ'],
url='https://github.com/acorg/dark-matter',
download_url='https://github.com/acorg/dark-matter',
author='<NAME>, <NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
keywords=['virus discovery'],
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='MIT',
description='Python classes for working with genetic sequence data',
scripts=scripts,
install_requires=[
'biopython>=1.71',
'bz2file>=0.98',
'Cython>=0.29.16',
'ipython>=3.1.0',
'matplotlib>=1.4.3',
'mysql-connector-python==8.0.11',
'numpy>=1.14.2',
'pysam>=0.15.2',
'pyfaidx>=0.4.8.4',
'pyzmq>=14.3.1',
'requests>=2.18.4',
'cachetools>=3.1.0',
'simplejson>=3.5.3',
'six>=1.11.0',
])
| #!/usr/bin/env python
from setuptools import setup
# Modified from http://stackoverflow.com/questions/2058802/
# how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package
def version():
import os
import re
init = os.path.join('dark', '__init__.py')
with open(init) as fp:
initData = fp.read()
match = re.search(r"^__version__ = ['\"]([^'\"]+)['\"]",
initData, re.M)
if match:
return match.group(1)
else:
raise RuntimeError('Unable to find version string in %r.' % init)
# Explicitly list bin scripts to be installed, seeing as I have a few local
# bin files that are not (yet) part of the distribution.
scripts = [
'bin/aa-info.py',
'bin/aa-to-dna.py',
'bin/aa-to-properties.py',
'bin/adaptor-distances.py',
'bin/alignment-panel-civ.py',
'bin/alignments-per-read.py',
'bin/bit-score-to-e-value.py',
'bin/cat-json-blast-records.py',
'bin/check-fasta-json-blast-consistency.py',
'bin/codon-distance.py',
'bin/compare-consensuses.py',
'bin/compare-sequences.py',
'bin/convert-blast-xml-to-json.py',
'bin/convert-diamond-to-json.py',
'bin/convert-diamond-to-sam.py',
'bin/convert-sam-to-fastq.sh',
'bin/create-newick-relabeling-output.py',
'bin/dark-matter-version.py',
'bin/describe-protein-database.py',
'bin/dna-to-aa.py',
'bin/download-genbank.sh',
'bin/e-value-to-bit-score.py',
'bin/extract-ORFs.py',
'bin/fasta-base-indices.py',
'bin/fasta-count.py',
'bin/fasta-diff.sh',
'bin/fasta-identity-table.py',
'bin/fasta-ids.py',
'bin/fasta-join.py',
'bin/fasta-lengths.py',
'bin/fasta-sequences.py',
'bin/fasta-sort.py',
'bin/fasta-split-by-id.py',
'bin/fasta-subset.py',
'bin/fasta-subtraction.py',
'bin/fasta-to-phylip.py',
'bin/fasta-variable-sites.py',
'bin/filter-fasta-by-complexity.py',
'bin/filter-fasta-by-taxonomy.py',
'bin/filter-fasta.py',
'bin/filter-hits-to-fasta.py',
'bin/filter-reads-alignments.py',
'bin/filter-sam.py',
'bin/find-hits.py',
'bin/format-fasta.py',
'bin/genome-protein-summary.py',
'bin/get-features.py',
'bin/get-hosts.py',
'bin/get-reads.py',
'bin/get-taxonomy.py',
'bin/graph-evalues.py',
'bin/local-align.py',
'bin/make-consensus.py',
'bin/make-fasta-database.py',
'bin/make-protein-database.py',
'bin/ncbi-fetch-id.py',
'bin/newick-to-ascii.py',
'bin/noninteractive-alignment-panel.py',
'bin/parse-genbank-flat-file.py',
'bin/position-summary.py',
'bin/pre-commit.sh',
'bin/print-blast-xml-for-derek.py',
'bin/print-blast-xml.py',
'bin/print-read-lengths.py',
'bin/proteins-to-pathogens.py',
'bin/proteins-to-pathogens-civ.py',
'bin/randomize-fasta.py',
'bin/read-blast-json.py',
'bin/read-blast-xml.py',
'bin/relabel-newick-tree.py',
'bin/run-bwa.py',
'bin/run-bowtie2.py',
'bin/sam-coverage.py',
'bin/sam-coverage-depth.py',
'bin/sam-to-fasta-alignment.py',
'bin/sam-reference-read-counts.py',
'bin/sam-references.py',
'bin/sff-to-fastq.py',
'bin/split-fasta-by-adaptors.py',
'bin/subset-protein-database.py',
'bin/summarize-fasta-bases.py',
'bin/summarize-reads.py',
'bin/trim-primers.py',
'bin/trim-reads.py',
'bin/write-htcondor-job-spec.py',
]
setup(name='dark-matter',
version=version(),
packages=['dark', 'dark.blast', 'dark.diamond', 'dark.civ'],
url='https://github.com/acorg/dark-matter',
download_url='https://github.com/acorg/dark-matter',
author='<NAME>, <NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
keywords=['virus discovery'],
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='MIT',
description='Python classes for working with genetic sequence data',
scripts=scripts,
install_requires=[
'biopython>=1.71',
'bz2file>=0.98',
'Cython>=0.29.16',
'ipython>=3.1.0',
'matplotlib>=1.4.3',
'mysql-connector-python==8.0.11',
'numpy>=1.14.2',
'pysam>=0.15.2',
'pyfaidx>=0.4.8.4',
'pyzmq>=14.3.1',
'requests>=2.18.4',
'cachetools>=3.1.0',
'simplejson>=3.5.3',
'six>=1.11.0',
])
| en | 0.813584 | #!/usr/bin/env python # Modified from http://stackoverflow.com/questions/2058802/ # how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package # Explicitly list bin scripts to be installed, seeing as I have a few local # bin files that are not (yet) part of the distribution. | 2.083428 | 2 |
authenticationApp/templatetags/timetags.py | FilipBali/VirtualPortfolio-WebApplication | 0 | 5913 | <reponame>FilipBali/VirtualPortfolio-WebApplication<gh_stars>0
# ======================================================================================================================
# Fakulta informacnich technologii VUT v Brne
# Bachelor thesis
# Author: <NAME> (xbalif00)
# License: MIT
# ======================================================================================================================
from django import template
import datetime
import time
from portfolioApp.models import NotificationEvent
register = template.Library()
import pandas as pd
def print_timestamp(timestamp):
return time.strftime('%Y-%m-%d'.format(timestamp%1000), time.gmtime(timestamp/1000.0))
def print_timestamp_analysis(timestamp):
return str(timestamp.year) + '-' + str(timestamp.month) +'-' + str(timestamp.day)
def print_timestamp_notifications(timestamp):
return str(timestamp.year) + '-' + str(timestamp.month) +'-' + str(timestamp.day)
def print_notification_text(type):
if type == 1:
return 'At a price change equal/above/below'
elif type == 2:
return 'Percentage increase current price'
elif type == 3:
return 'Percentage decrease current price'
def print_symbol_notifications(notification_id):
object = NotificationEvent.objects.get(id=notification_id)
symbol = str(object.company.symbol)
return symbol
def print_type_notifications(notification_type):
if notification_type == 1:
return 'Interday'
elif notification_type == 2:
return 'Intraday'
register.filter(print_timestamp)
register.filter(print_timestamp_analysis)
register.filter(print_timestamp_notifications)
register.filter(print_notification_text)
register.filter(print_symbol_notifications)
register.filter(print_type_notifications) | # ======================================================================================================================
# Fakulta informacnich technologii VUT v Brne
# Bachelor thesis
# Author: <NAME> (xbalif00)
# License: MIT
# ======================================================================================================================
from django import template
import datetime
import time
from portfolioApp.models import NotificationEvent
register = template.Library()
import pandas as pd
def print_timestamp(timestamp):
return time.strftime('%Y-%m-%d'.format(timestamp%1000), time.gmtime(timestamp/1000.0))
def print_timestamp_analysis(timestamp):
return str(timestamp.year) + '-' + str(timestamp.month) +'-' + str(timestamp.day)
def print_timestamp_notifications(timestamp):
return str(timestamp.year) + '-' + str(timestamp.month) +'-' + str(timestamp.day)
def print_notification_text(type):
if type == 1:
return 'At a price change equal/above/below'
elif type == 2:
return 'Percentage increase current price'
elif type == 3:
return 'Percentage decrease current price'
def print_symbol_notifications(notification_id):
object = NotificationEvent.objects.get(id=notification_id)
symbol = str(object.company.symbol)
return symbol
def print_type_notifications(notification_type):
if notification_type == 1:
return 'Interday'
elif notification_type == 2:
return 'Intraday'
register.filter(print_timestamp)
register.filter(print_timestamp_analysis)
register.filter(print_timestamp_notifications)
register.filter(print_notification_text)
register.filter(print_symbol_notifications)
register.filter(print_type_notifications) | en | 0.324205 | # ====================================================================================================================== # Fakulta informacnich technologii VUT v Brne # Bachelor thesis # Author: <NAME> (xbalif00) # License: MIT # ====================================================================================================================== | 2.303008 | 2 |
pycfmodel/model/resources/properties/policy.py | donatoaz/pycfmodel | 23 | 5914 | <reponame>donatoaz/pycfmodel
from pycfmodel.model.resources.properties.policy_document import PolicyDocument
from pycfmodel.model.resources.properties.property import Property
from pycfmodel.model.types import Resolvable, ResolvableStr
class Policy(Property):
"""
Contains information about an attached policy.
Properties:
- PolicyDocument: A [policy document][pycfmodel.model.resources.properties.policy_document.PolicyDocument] object.
- PolicyName: The friendly name (not ARN) identifying the policy.
"""
PolicyName: ResolvableStr
PolicyDocument: Resolvable[PolicyDocument]
| from pycfmodel.model.resources.properties.policy_document import PolicyDocument
from pycfmodel.model.resources.properties.property import Property
from pycfmodel.model.types import Resolvable, ResolvableStr
class Policy(Property):
"""
Contains information about an attached policy.
Properties:
- PolicyDocument: A [policy document][pycfmodel.model.resources.properties.policy_document.PolicyDocument] object.
- PolicyName: The friendly name (not ARN) identifying the policy.
"""
PolicyName: ResolvableStr
PolicyDocument: Resolvable[PolicyDocument] | en | 0.632898 | Contains information about an attached policy. Properties: - PolicyDocument: A [policy document][pycfmodel.model.resources.properties.policy_document.PolicyDocument] object. - PolicyName: The friendly name (not ARN) identifying the policy. | 2.184726 | 2 |
stlearn/__init__.py | mrahim/stacked-learn | 2 | 5915 | from .stacking import StackingClassifier, stack_features
from .multitask import MultiTaskEstimator
| from .stacking import StackingClassifier, stack_features
from .multitask import MultiTaskEstimator
| none | 1 | 1.058068 | 1 |
|
samples/workload/XNNPACK/toolchain/emscripten_toolchain_config.bzl | utsavm9/wasm-micro-runtime | 2 | 5916 | # Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"feature",
"flag_group",
"flag_set",
"tool_path",
)
all_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
]
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
def _impl(ctx):
tool_paths = [
tool_path(
name = "gcc",
path = "/opt/emsdk/upstream/emscripten/emcc",
),
tool_path(
name = "ld",
path = "/opt/emsdk/upstream/emscripten/emcc",
),
tool_path(
name = "ar",
path = "/opt/emsdk/upstream/emscripten/emar",
),
tool_path(
name = "cpp",
path = "/opt/emsdk/upstream/emscripten/em++",
),
tool_path(
name = "gcov",
path = "/bin/false",
),
tool_path(
name = "nm",
path = "/bin/false",
),
tool_path(
name = "objdump",
path = "/bin/false",
),
tool_path(
name = "strip",
path = "/bin/false",
),
]
features = [ # NEW
feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_compile_actions,
flag_groups = ([
flag_group(
flags = [
"-O3",
"-msimd128",
"-s",
"USE_PTHREADS=0",
"-s",
"ERROR_ON_UNDEFINED_SYMBOLS=0",
"-s",
"STANDALONE_WASM=1",
],
),
]),
),
],
),
feature(
name = "default_linker_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = ([
flag_group(
flags = [
"-O3",
"-msimd128",
"-s",
"USE_PTHREADS=0",
"-s",
"ERROR_ON_UNDEFINED_SYMBOLS=0",
"-s",
"STANDALONE_WASM=1",
"-Wl,--export=__heap_base",
"-Wl,--export=__data_end",
],
),
]),
),
],
),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features, # NEW
cxx_builtin_include_directories = [
"/opt/emsdk/upstream/emscripten/system/include/libcxx",
"/opt/emsdk/upstream/emscripten/system/lib/libcxxabi/include",
"/opt/emsdk/upstream/emscripten/system/include",
"/opt/emsdk/upstream/emscripten/system/include/libc",
"/opt/emsdk/upstream/emscripten/system/lib/libc/musl/arch/emscripten",
"/opt/emsdk/upstream/lib/clang/12.0.0/include/",
],
toolchain_identifier = "wasm-emsdk",
host_system_name = "i686-unknown-linux-gnu",
target_system_name = "wasm32-unknown-emscripten",
target_cpu = "wasm32",
target_libc = "unknown",
compiler = "emsdk",
abi_version = "unknown",
abi_libc_version = "unknown",
tool_paths = tool_paths,
)
emsdk_toolchain_config = rule(
implementation = _impl,
attrs = {},
provides = [CcToolchainConfigInfo],
)
| # Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"feature",
"flag_group",
"flag_set",
"tool_path",
)
all_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
]
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
def _impl(ctx):
tool_paths = [
tool_path(
name = "gcc",
path = "/opt/emsdk/upstream/emscripten/emcc",
),
tool_path(
name = "ld",
path = "/opt/emsdk/upstream/emscripten/emcc",
),
tool_path(
name = "ar",
path = "/opt/emsdk/upstream/emscripten/emar",
),
tool_path(
name = "cpp",
path = "/opt/emsdk/upstream/emscripten/em++",
),
tool_path(
name = "gcov",
path = "/bin/false",
),
tool_path(
name = "nm",
path = "/bin/false",
),
tool_path(
name = "objdump",
path = "/bin/false",
),
tool_path(
name = "strip",
path = "/bin/false",
),
]
features = [ # NEW
feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_compile_actions,
flag_groups = ([
flag_group(
flags = [
"-O3",
"-msimd128",
"-s",
"USE_PTHREADS=0",
"-s",
"ERROR_ON_UNDEFINED_SYMBOLS=0",
"-s",
"STANDALONE_WASM=1",
],
),
]),
),
],
),
feature(
name = "default_linker_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = ([
flag_group(
flags = [
"-O3",
"-msimd128",
"-s",
"USE_PTHREADS=0",
"-s",
"ERROR_ON_UNDEFINED_SYMBOLS=0",
"-s",
"STANDALONE_WASM=1",
"-Wl,--export=__heap_base",
"-Wl,--export=__data_end",
],
),
]),
),
],
),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features, # NEW
cxx_builtin_include_directories = [
"/opt/emsdk/upstream/emscripten/system/include/libcxx",
"/opt/emsdk/upstream/emscripten/system/lib/libcxxabi/include",
"/opt/emsdk/upstream/emscripten/system/include",
"/opt/emsdk/upstream/emscripten/system/include/libc",
"/opt/emsdk/upstream/emscripten/system/lib/libc/musl/arch/emscripten",
"/opt/emsdk/upstream/lib/clang/12.0.0/include/",
],
toolchain_identifier = "wasm-emsdk",
host_system_name = "i686-unknown-linux-gnu",
target_system_name = "wasm32-unknown-emscripten",
target_cpu = "wasm32",
target_libc = "unknown",
compiler = "emsdk",
abi_version = "unknown",
abi_libc_version = "unknown",
tool_paths = tool_paths,
)
emsdk_toolchain_config = rule(
implementation = _impl,
attrs = {},
provides = [CcToolchainConfigInfo],
)
| en | 0.571494 | # Copyright (C) 2019 Intel Corporation. All rights reserved. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # NEW # NEW | 1.349997 | 1 |
cloud_storages/gdrive/gdrive.py | toplenboren/safezone | 0 | 5917 | <gh_stars>0
from __future__ import print_function
import json
from typing import List
from functools import lru_cache
from cloud_storages.http_shortcuts import *
from database.database import Database
from models.models import StorageMetaInfo, Resource, Size
from cloud_storages.storage import Storage
from cloud_storages.gdrive.client_config import GOOGLE_DRIVE_CONFIG, SCOPES
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
GOOGLE_DRIVE_DB_KEY = 'google'
class GDriveStorage(Storage):
def __init__(self, token):
self.token = token
@lru_cache(maxsize=None)
def _get_folder_id_by_name(self, name: str) -> str:
"""
Google drive has a quirk - you can't really use normal os-like paths - first you need to get an ID of the folder
This function searches for folders with specified name
"""
response = get_with_OAuth(
f"https://www.googleapis.com/drive/v3/files",
params={
'fields': '*',
'q': f"name = '{name}' and mimeType = 'application/vnd.google-apps.folder'"
},
token=self.token
)
if response.status_code == 200:
response_as_json = response.json()
try:
result = response_as_json['files'][0]['id']
return result
except IndexError as e:
raise ValueError(f"Something went wrong with GD: Error: {e}")
else:
raise ValueError(f"Something went wrong with GD: Response: "
f"{str(response.status_code)} — {response.json()}")
@classmethod
# todo (toplenboren) remove database argument dependency :(
def auth(cls, db: Database):
creds = None
creds_from_db = db.get(GOOGLE_DRIVE_DB_KEY)
if creds_from_db:
creds = Credentials.from_authorized_user_info(json.loads(creds_from_db), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_config(GOOGLE_DRIVE_CONFIG, SCOPES)
creds = flow.run_local_server(port=0)
db.set(GOOGLE_DRIVE_DB_KEY, creds.token)
@classmethod
def _deserialize_resource(cls, json: dict) -> Resource or None:
"""
Tries to parse Resource from YD to Resource object
:param json:
:return:
"""
try:
is_file = True
if 'folder' in json['mimeType']:
is_file = False
# You don't have pathes in google drive, instead -- you have an id
path = json['id']
except KeyError:
return None
res = Resource(is_file, path)
res.size = Size(json.get('size'), 'b') if json.get('size') else None
res.name = json.get('name')
res.url = json.get('webContentLink')
res.updated = json.get('modifiedTime')
res.md5 = json.get('md5Checksum')
return res
def list_resources_on_path(self, remote_path: str) -> List[Resource]:
"""
List all items in directory
:param path: path to the resource
"""
folder_id = self._get_folder_id_by_name(remote_path)
response = get_with_OAuth(
f"https://www.googleapis.com/drive/v3/files",
params={
'fields': '*',
'q': f"'{folder_id}' in parents"
},
token=self.token
)
if response.status_code == 200:
result = []
response_as_json = response.json()
files = response_as_json['files']
for resource in files:
res: Resource or None = self._deserialize_resource(resource)
if res is not None:
result.append(res)
return result
else:
raise ValueError(f"Something went wrong with YD: Response: "
f"{str(response.status_code)} — {response.json()['message']}")
def get_meta_info(self) -> StorageMetaInfo:
response = get_with_OAuth('https://www.googleapis.com/drive/v3/about?fields=*', token=self.token)
if response.status_code == 200:
response_read = response.json()
used_space = response_read.get('storageQuota', {}).get('usage')
total_space = response_read.get('storageQuota', {}).get('limit')
return StorageMetaInfo(int(used_space), int(total_space))
else:
raise ValueError(f"Something went wrong with GD: Response: "
f"{str(response.status_code)} — {response.json()['message']}")
def create_path(self, remote_path: List[str]) -> None:
"""
Creates the remote path on yandex disk
"""
print(f'[{__name__}] Trying to create directory {"/".join(remote_path)} on remote...')
dir_to_create = []
for dir in remote_path:
dir_to_create.append(dir)
path_to_create = '/'.join(dir_to_create)
response = put_with_OAuth(f'https://cloud-api.yandex.net/v1/disk/resources?path={path_to_create}',
token=self.token)
if 199 < response.status_code < 401:
print(f'[{__name__}] Created directory {path_to_create}')
continue
elif response.status_code == 409 and 'уже существует' in response.json().get('message', ''):
continue
return
def save_resource_to_path(self, resource: Resource, remote_path: str, overwrite: bool, _rec_call:bool = False) -> Resource or None:
"""
Put an Item to the directory
:param resource: resource on the local fs
:param remote_path: string, path to resource on remote fs
:param _rec_call: bool, a system parameter, whether or not this function was called as a recursive call
:return: saved resource or raises exception
"""
upload_successful_flag = False
response = get_with_OAuth(
f'https://cloud-api.yandex.net/v1/disk/resources/upload?path={remote_path}&overwrite=${overwrite}',
token=self.token
)
if response.status_code == 200:
response_read = response.json()
upload_link = response_read['href']
with open(resource.path, 'rb') as f:
files = f
response = put_with_OAuth(upload_link, data=files)
if 199 < response.status_code < 401:
upload_successful_flag = True
response = get_with_OAuth(f'https://cloud-api.yandex.net/v1/disk/resources?path={remote_path}',
token=self.token)
resource_metainfo = self._deserialize_resource(response.json())
if 199 < response.status_code < 401:
return resource_metainfo
elif upload_successful_flag:
return resource
# This dir is not present in the storage
# We use _rec_call to tell that the next call was made as recursive call, so we don't cause SO
elif response.status_code == 409 and not _rec_call:
# We don't need to create a folder with the name equal to the filename, so we do [:-1]
self.create_path(remote_path.split('/')[:-1])
return self.save_resource_to_path(resource, remote_path, overwrite, _rec_call=True)
raise ValueError(f"Something went wrong with YD: Response: "
f"{str(response.status_code)} — {response.json().get('message', '')}")
def download_resource(self, remote_path, local_path) -> str:
response = get_with_OAuth(
f'https://cloud-api.yandex.net/v1/disk/resources/download?path={remote_path}',
token=self.token
)
if response.status_code == 200:
response_read = response.json()
dl_url = response_read.get('href')
else:
raise ValueError(f"[{__name__}] Something went wrong with YD: Response: "
f"{str(response.status_code)} — {response.json()['message']}")
file = requests.get(dl_url)
open(local_path, 'wb').write(file.content)
return local_path
def main():
storage = GDriveStorage(None)
db = Database('../storage.db')
storage.auth(db)
authed_storage = GDriveStorage(json.loads(db.get(GOOGLE_DRIVE_DB_KEY))['token'])
result = authed_storage.list_resources_on_path('savezone')
print(result)
if __name__ == '__main__':
main() | from __future__ import print_function
import json
from typing import List
from functools import lru_cache
from cloud_storages.http_shortcuts import *
from database.database import Database
from models.models import StorageMetaInfo, Resource, Size
from cloud_storages.storage import Storage
from cloud_storages.gdrive.client_config import GOOGLE_DRIVE_CONFIG, SCOPES
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
GOOGLE_DRIVE_DB_KEY = 'google'
class GDriveStorage(Storage):
def __init__(self, token):
self.token = token
@lru_cache(maxsize=None)
def _get_folder_id_by_name(self, name: str) -> str:
"""
Google drive has a quirk - you can't really use normal os-like paths - first you need to get an ID of the folder
This function searches for folders with specified name
"""
response = get_with_OAuth(
f"https://www.googleapis.com/drive/v3/files",
params={
'fields': '*',
'q': f"name = '{name}' and mimeType = 'application/vnd.google-apps.folder'"
},
token=self.token
)
if response.status_code == 200:
response_as_json = response.json()
try:
result = response_as_json['files'][0]['id']
return result
except IndexError as e:
raise ValueError(f"Something went wrong with GD: Error: {e}")
else:
raise ValueError(f"Something went wrong with GD: Response: "
f"{str(response.status_code)} — {response.json()}")
@classmethod
# todo (toplenboren) remove database argument dependency :(
def auth(cls, db: Database):
creds = None
creds_from_db = db.get(GOOGLE_DRIVE_DB_KEY)
if creds_from_db:
creds = Credentials.from_authorized_user_info(json.loads(creds_from_db), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_config(GOOGLE_DRIVE_CONFIG, SCOPES)
creds = flow.run_local_server(port=0)
db.set(GOOGLE_DRIVE_DB_KEY, creds.token)
@classmethod
def _deserialize_resource(cls, json: dict) -> Resource or None:
"""
Tries to parse Resource from YD to Resource object
:param json:
:return:
"""
try:
is_file = True
if 'folder' in json['mimeType']:
is_file = False
# You don't have pathes in google drive, instead -- you have an id
path = json['id']
except KeyError:
return None
res = Resource(is_file, path)
res.size = Size(json.get('size'), 'b') if json.get('size') else None
res.name = json.get('name')
res.url = json.get('webContentLink')
res.updated = json.get('modifiedTime')
res.md5 = json.get('md5Checksum')
return res
def list_resources_on_path(self, remote_path: str) -> List[Resource]:
"""
List all items in directory
:param path: path to the resource
"""
folder_id = self._get_folder_id_by_name(remote_path)
response = get_with_OAuth(
f"https://www.googleapis.com/drive/v3/files",
params={
'fields': '*',
'q': f"'{folder_id}' in parents"
},
token=self.token
)
if response.status_code == 200:
result = []
response_as_json = response.json()
files = response_as_json['files']
for resource in files:
res: Resource or None = self._deserialize_resource(resource)
if res is not None:
result.append(res)
return result
else:
raise ValueError(f"Something went wrong with YD: Response: "
f"{str(response.status_code)} — {response.json()['message']}")
def get_meta_info(self) -> StorageMetaInfo:
response = get_with_OAuth('https://www.googleapis.com/drive/v3/about?fields=*', token=self.token)
if response.status_code == 200:
response_read = response.json()
used_space = response_read.get('storageQuota', {}).get('usage')
total_space = response_read.get('storageQuota', {}).get('limit')
return StorageMetaInfo(int(used_space), int(total_space))
else:
raise ValueError(f"Something went wrong with GD: Response: "
f"{str(response.status_code)} — {response.json()['message']}")
def create_path(self, remote_path: List[str]) -> None:
"""
Creates the remote path on yandex disk
"""
print(f'[{__name__}] Trying to create directory {"/".join(remote_path)} on remote...')
dir_to_create = []
for dir in remote_path:
dir_to_create.append(dir)
path_to_create = '/'.join(dir_to_create)
response = put_with_OAuth(f'https://cloud-api.yandex.net/v1/disk/resources?path={path_to_create}',
token=self.token)
if 199 < response.status_code < 401:
print(f'[{__name__}] Created directory {path_to_create}')
continue
elif response.status_code == 409 and 'уже существует' in response.json().get('message', ''):
continue
return
def save_resource_to_path(self, resource: Resource, remote_path: str, overwrite: bool, _rec_call:bool = False) -> Resource or None:
"""
Put an Item to the directory
:param resource: resource on the local fs
:param remote_path: string, path to resource on remote fs
:param _rec_call: bool, a system parameter, whether or not this function was called as a recursive call
:return: saved resource or raises exception
"""
upload_successful_flag = False
response = get_with_OAuth(
f'https://cloud-api.yandex.net/v1/disk/resources/upload?path={remote_path}&overwrite=${overwrite}',
token=self.token
)
if response.status_code == 200:
response_read = response.json()
upload_link = response_read['href']
with open(resource.path, 'rb') as f:
files = f
response = put_with_OAuth(upload_link, data=files)
if 199 < response.status_code < 401:
upload_successful_flag = True
response = get_with_OAuth(f'https://cloud-api.yandex.net/v1/disk/resources?path={remote_path}',
token=self.token)
resource_metainfo = self._deserialize_resource(response.json())
if 199 < response.status_code < 401:
return resource_metainfo
elif upload_successful_flag:
return resource
# This dir is not present in the storage
# We use _rec_call to tell that the next call was made as recursive call, so we don't cause SO
elif response.status_code == 409 and not _rec_call:
# We don't need to create a folder with the name equal to the filename, so we do [:-1]
self.create_path(remote_path.split('/')[:-1])
return self.save_resource_to_path(resource, remote_path, overwrite, _rec_call=True)
raise ValueError(f"Something went wrong with YD: Response: "
f"{str(response.status_code)} — {response.json().get('message', '')}")
def download_resource(self, remote_path, local_path) -> str:
response = get_with_OAuth(
f'https://cloud-api.yandex.net/v1/disk/resources/download?path={remote_path}',
token=self.token
)
if response.status_code == 200:
response_read = response.json()
dl_url = response_read.get('href')
else:
raise ValueError(f"[{__name__}] Something went wrong with YD: Response: "
f"{str(response.status_code)} — {response.json()['message']}")
file = requests.get(dl_url)
open(local_path, 'wb').write(file.content)
return local_path
def main():
storage = GDriveStorage(None)
db = Database('../storage.db')
storage.auth(db)
authed_storage = GDriveStorage(json.loads(db.get(GOOGLE_DRIVE_DB_KEY))['token'])
result = authed_storage.list_resources_on_path('savezone')
print(result)
if __name__ == '__main__':
main() | en | 0.888741 | Google drive has a quirk - you can't really use normal os-like paths - first you need to get an ID of the folder This function searches for folders with specified name # todo (toplenboren) remove database argument dependency :( Tries to parse Resource from YD to Resource object :param json: :return: # You don't have pathes in google drive, instead -- you have an id List all items in directory :param path: path to the resource Creates the remote path on yandex disk Put an Item to the directory :param resource: resource on the local fs :param remote_path: string, path to resource on remote fs :param _rec_call: bool, a system parameter, whether or not this function was called as a recursive call :return: saved resource or raises exception # This dir is not present in the storage # We use _rec_call to tell that the next call was made as recursive call, so we don't cause SO # We don't need to create a folder with the name equal to the filename, so we do [:-1] | 2.693186 | 3 |
index/urls.py | darkestmidnight/fedcodeathon2018 | 1 | 5918 | <gh_stars>1-10
from django.urls import re_path, include
from . import views
app_name='logged'
# url mappings for the webapp.
urlpatterns = [
re_path(r'^$', views.logged_count, name="logged_count"),
re_path(r'^loggedusers/', views.logged, name="logged_users"),
re_path(r'^settings/', views.user_settings, name="update_info"),
re_path(r'^administrators/', views.post_alert, name="post_alert"),
re_path(r'^alerts/$', views.list_alert, name="list_alert"),
re_path(r'^alerts/(?P<slug>[\w-]+)/$', views.view_alert, name="view_alert"),
re_path(r'^display/', views.display, name="display"),
re_path(r'^doorselection/', views.doors_election, name="door_selecttion")
] | from django.urls import re_path, include
from . import views
app_name='logged'
# url mappings for the webapp.
urlpatterns = [
re_path(r'^$', views.logged_count, name="logged_count"),
re_path(r'^loggedusers/', views.logged, name="logged_users"),
re_path(r'^settings/', views.user_settings, name="update_info"),
re_path(r'^administrators/', views.post_alert, name="post_alert"),
re_path(r'^alerts/$', views.list_alert, name="list_alert"),
re_path(r'^alerts/(?P<slug>[\w-]+)/$', views.view_alert, name="view_alert"),
re_path(r'^display/', views.display, name="display"),
re_path(r'^doorselection/', views.doors_election, name="door_selecttion")
] | en | 0.52175 | # url mappings for the webapp. | 1.935392 | 2 |
scout/dao/item.py | uw-it-aca/scout | 7 | 5919 | <filename>scout/dao/item.py<gh_stars>1-10
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from scout.dao.space import get_spots_by_filter, _get_spot_filters, \
_get_extended_info_by_key
import copy
def get_item_by_id(item_id):
spot = get_spots_by_filter([
('item:id', item_id),
('extended_info:app_type', 'tech')
])
if spot:
spot = _filter_spot_items(item_id, spot[0])
return spot
def _filter_spot_items(item_id, spot):
for item in spot.items:
if item.item_id == item_id:
spot.item = item
return spot
def add_item_info(spot):
for item in spot.items:
item.model = _get_extended_info_by_key("i_model",
item.extended_info)
item.brand = _get_extended_info_by_key("i_brand",
item.extended_info)
item.checkout_period = _get_extended_info_by_key(
"i_checkout_period",
item.extended_info
)
item.reservation_notes = _get_extended_info_by_key(
"i_reservation_notes",
item.extended_info
)
item.is_active = _get_extended_info_by_key(
"i_is_active",
item.extended_info
)
item.quantity = _get_extended_info_by_key(
"i_quantity",
item.extended_info
)
item.description = _get_extended_info_by_key(
"i_description",
item.extended_info
)
item.reserve_url = _get_extended_info_by_key(
"i_reserve_url",
item.extended_info
)
item.manual_url = _get_extended_info_by_key(
"i_manual_url",
item.extended_info
)
item.owner = _get_extended_info_by_key(
"i_owner",
item.extended_info
)
item.is_stf = _get_extended_info_by_key(
"i_is_stf",
item.extended_info
)
item.cte_type_id = _get_extended_info_by_key(
"cte_type_id",
item.extended_info
)
return spot
def get_filtered_items(spots, request):
parameter_list = _get_spot_filters(request)
brand = []
subcategory = []
is_active = False
for param in parameter_list:
if param[0] == "item:extended_info:i_brand":
brand.append(param[1])
elif param[0] == "item:subcategory":
subcategory.append(param[1])
elif param[0] == "item:extended_info:i_is_active":
is_active = True
new_spots = []
for spot in spots:
new_spot = copy.deepcopy(spot)
new_spot.items = []
for item in spot.items:
if is_active and not item.is_active:
continue
if len(subcategory) > 0 and item.subcategory not in subcategory:
continue
if len(brand) > 0 and item.brand not in brand:
continue
new_spot.items.append(item)
new_spots.append(new_spot)
return new_spots
def get_item_count(spots):
item_count = 0
for spot in spots:
item_count += len(spot.items)
return item_count
| <filename>scout/dao/item.py<gh_stars>1-10
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from scout.dao.space import get_spots_by_filter, _get_spot_filters, \
_get_extended_info_by_key
import copy
def get_item_by_id(item_id):
spot = get_spots_by_filter([
('item:id', item_id),
('extended_info:app_type', 'tech')
])
if spot:
spot = _filter_spot_items(item_id, spot[0])
return spot
def _filter_spot_items(item_id, spot):
for item in spot.items:
if item.item_id == item_id:
spot.item = item
return spot
def add_item_info(spot):
for item in spot.items:
item.model = _get_extended_info_by_key("i_model",
item.extended_info)
item.brand = _get_extended_info_by_key("i_brand",
item.extended_info)
item.checkout_period = _get_extended_info_by_key(
"i_checkout_period",
item.extended_info
)
item.reservation_notes = _get_extended_info_by_key(
"i_reservation_notes",
item.extended_info
)
item.is_active = _get_extended_info_by_key(
"i_is_active",
item.extended_info
)
item.quantity = _get_extended_info_by_key(
"i_quantity",
item.extended_info
)
item.description = _get_extended_info_by_key(
"i_description",
item.extended_info
)
item.reserve_url = _get_extended_info_by_key(
"i_reserve_url",
item.extended_info
)
item.manual_url = _get_extended_info_by_key(
"i_manual_url",
item.extended_info
)
item.owner = _get_extended_info_by_key(
"i_owner",
item.extended_info
)
item.is_stf = _get_extended_info_by_key(
"i_is_stf",
item.extended_info
)
item.cte_type_id = _get_extended_info_by_key(
"cte_type_id",
item.extended_info
)
return spot
def get_filtered_items(spots, request):
parameter_list = _get_spot_filters(request)
brand = []
subcategory = []
is_active = False
for param in parameter_list:
if param[0] == "item:extended_info:i_brand":
brand.append(param[1])
elif param[0] == "item:subcategory":
subcategory.append(param[1])
elif param[0] == "item:extended_info:i_is_active":
is_active = True
new_spots = []
for spot in spots:
new_spot = copy.deepcopy(spot)
new_spot.items = []
for item in spot.items:
if is_active and not item.is_active:
continue
if len(subcategory) > 0 and item.subcategory not in subcategory:
continue
if len(brand) > 0 and item.brand not in brand:
continue
new_spot.items.append(item)
new_spots.append(new_spot)
return new_spots
def get_item_count(spots):
item_count = 0
for spot in spots:
item_count += len(spot.items)
return item_count
| en | 0.374447 | # Copyright 2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 | 1.989632 | 2 |
juriscraper/opinions/united_states/state/minnctapp.py | umeboshi2/juriscraper | 0 | 5920 | <filename>juriscraper/opinions/united_states/state/minnctapp.py
#Scraper for Minnesota Court of Appeals Published Opinions
#CourtID: minnctapp
#Court Short Name: MN
#Author: mlr
#Date: 2016-06-03
from juriscraper.opinions.united_states.state import minn
class Site(minn.Site):
# Only subclasses minn for the _download method.
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.court_filters = ['/ctapun/', '/ctappub/']
| <filename>juriscraper/opinions/united_states/state/minnctapp.py
#Scraper for Minnesota Court of Appeals Published Opinions
#CourtID: minnctapp
#Court Short Name: MN
#Author: mlr
#Date: 2016-06-03
from juriscraper.opinions.united_states.state import minn
class Site(minn.Site):
# Only subclasses minn for the _download method.
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.court_filters = ['/ctapun/', '/ctappub/']
| en | 0.832351 | #Scraper for Minnesota Court of Appeals Published Opinions #CourtID: minnctapp #Court Short Name: MN #Author: mlr #Date: 2016-06-03 # Only subclasses minn for the _download method. | 1.819755 | 2 |
monty/os/__init__.py | JosephMontoya-TRI/monty | 0 | 5921 | from __future__ import absolute_import
import os
import errno
from contextlib import contextmanager
__author__ = '<NAME>'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = '1/24/14'
@contextmanager
def cd(path):
"""
A Fabric-inspired cd context that temporarily changes directory for
performing some tasks, and returns to the original working directory
afterwards. E.g.,
with cd("/my/path/"):
do_something()
Args:
path: Path to cd to.
"""
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
def makedirs_p(path, **kwargs):
"""
Wrapper for os.makedirs that does not raise an exception if the directory already exists, in the fashion of
"mkdir -p" command. The check is performed in a thread-safe way
Args:
path: path of the directory to create
kwargs: standard kwargs for os.makedirs
"""
try:
os.makedirs(path, **kwargs)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise | from __future__ import absolute_import
import os
import errno
from contextlib import contextmanager
__author__ = '<NAME>'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = '1/24/14'
@contextmanager
def cd(path):
"""
A Fabric-inspired cd context that temporarily changes directory for
performing some tasks, and returns to the original working directory
afterwards. E.g.,
with cd("/my/path/"):
do_something()
Args:
path: Path to cd to.
"""
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
def makedirs_p(path, **kwargs):
"""
Wrapper for os.makedirs that does not raise an exception if the directory already exists, in the fashion of
"mkdir -p" command. The check is performed in a thread-safe way
Args:
path: path of the directory to create
kwargs: standard kwargs for os.makedirs
"""
try:
os.makedirs(path, **kwargs)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise | en | 0.777034 | A Fabric-inspired cd context that temporarily changes directory for performing some tasks, and returns to the original working directory afterwards. E.g., with cd("/my/path/"): do_something() Args: path: Path to cd to. Wrapper for os.makedirs that does not raise an exception if the directory already exists, in the fashion of "mkdir -p" command. The check is performed in a thread-safe way Args: path: path of the directory to create kwargs: standard kwargs for os.makedirs | 2.612193 | 3 |
{{ cookiecutter.repo_name }}/tests/test_environment.py | FrancisMudavanhu/cookiecutter-data-science | 0 | 5922 | import sys
REQUIRED_PYTHON = "python3"
required_major = 3
def main():
system_major = sys.version_info.major
if system_major != required_major:
raise TypeError(
f"This project requires Python {required_major}."
f" Found: Python {sys.version}")
else:
print(">>> Development environment passes all tests!")
if __name__ == '__main__':
main()
| import sys
REQUIRED_PYTHON = "python3"
required_major = 3
def main():
system_major = sys.version_info.major
if system_major != required_major:
raise TypeError(
f"This project requires Python {required_major}."
f" Found: Python {sys.version}")
else:
print(">>> Development environment passes all tests!")
if __name__ == '__main__':
main()
| none | 1 | 3.304125 | 3 |
|
documents/aws-doc-sdk-examples/python/example_code/kda/kda-python-datagenerator-stockticker.py | siagholami/aws-documentation | 5 | 5923 | # snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[kda-python-datagenerator-stockticker.py demonstrates how to generate sample data for Amazon Kinesis Data Analytics SQL applications.]
# snippet-service:[kinesisanalytics]
# snippet-keyword:[Python]
# snippet-sourcesyntax:[python]
# snippet-sourcesyntax:[python]
# snippet-keyword:[Amazon Kinesis Data Analytics]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-01-29]
# snippet-sourceauthor:[fletpatr (AWS)]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# snippet-start:[kinesisanalytics.python.datagenerator.stockticker]
import json
import boto3
import random
import datetime
kinesis = boto3.client('kinesis')
def getReferrer():
data = {}
now = datetime.datetime.now()
str_now = now.isoformat()
data['EVENT_TIME'] = str_now
data['TICKER'] = random.choice(['AAPL', 'AMZN', 'MSFT', 'INTC', 'TBV'])
price = random.random() * 100
data['PRICE'] = round(price, 2)
return data
while True:
data = json.dumps(getReferrer())
print(data)
kinesis.put_record(
StreamName="ExampleInputStream",
Data=data,
PartitionKey="partitionkey")
# snippet-end:[kinesisanalytics.python.datagenerator.stockticker]
| # snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[kda-python-datagenerator-stockticker.py demonstrates how to generate sample data for Amazon Kinesis Data Analytics SQL applications.]
# snippet-service:[kinesisanalytics]
# snippet-keyword:[Python]
# snippet-sourcesyntax:[python]
# snippet-sourcesyntax:[python]
# snippet-keyword:[Amazon Kinesis Data Analytics]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-01-29]
# snippet-sourceauthor:[fletpatr (AWS)]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# snippet-start:[kinesisanalytics.python.datagenerator.stockticker]
import json
import boto3
import random
import datetime
kinesis = boto3.client('kinesis')
def getReferrer():
data = {}
now = datetime.datetime.now()
str_now = now.isoformat()
data['EVENT_TIME'] = str_now
data['TICKER'] = random.choice(['AAPL', 'AMZN', 'MSFT', 'INTC', 'TBV'])
price = random.random() * 100
data['PRICE'] = round(price, 2)
return data
while True:
data = json.dumps(getReferrer())
print(data)
kinesis.put_record(
StreamName="ExampleInputStream",
Data=data,
PartitionKey="partitionkey")
# snippet-end:[kinesisanalytics.python.datagenerator.stockticker]
| en | 0.723809 | # snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.] # snippet-sourcedescription:[kda-python-datagenerator-stockticker.py demonstrates how to generate sample data for Amazon Kinesis Data Analytics SQL applications.] # snippet-service:[kinesisanalytics] # snippet-keyword:[Python] # snippet-sourcesyntax:[python] # snippet-sourcesyntax:[python] # snippet-keyword:[Amazon Kinesis Data Analytics] # snippet-keyword:[Code Sample] # snippet-sourcetype:[full-example] # snippet-sourcedate:[2019-01-29] # snippet-sourceauthor:[fletpatr (AWS)] # Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # This file is licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. A copy of the # License is located at # # http://aws.amazon.com/apache2.0/ # # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. # snippet-start:[kinesisanalytics.python.datagenerator.stockticker] # snippet-end:[kinesisanalytics.python.datagenerator.stockticker] | 2.416657 | 2 |
backend/app.py | alexespejo/project-argus | 1 | 5924 | <reponame>alexespejo/project-argus
import face_recognition
from flask import Flask, request, redirect, Response
import camera
import firestore as db
# You can change this to any folder on your system
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def detect_faces_in_image(name, access, file_stream):
# Load the uploaded image filed
img = face_recognition.load_image_file(file_stream)
# Get face encodings for any faces in the uploaded image
unknown_face_encodings = face_recognition.face_encodings(img)[0].tolist()
db.add_member(name, access, unknown_face_encodings)
return ('', 204)
@app.route('/')
def root():
return ('', 204)
@app.route('/upload', methods=['GET', 'POST'])
def upload_image():
db.encoding.update()
name = request.form.get("name")
access = request.form.get("access")
access = int(access)
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
if file and allowed_file(file.filename):
return detect_faces_in_image(name, access, file)
return redirect('/video_feed')
@app.route('/update', methods=['GET', 'POST'])
def update():
db.encoding.update()
member = request.form.get("updateMember")
changeName = request.form.get("changeName")
changeAccess = request.form.get("changeAccess")
if changeAccess == None:
changeAccess = ""
db.update_member(member, changeName, changeAccess)
return ('', 204)
@app.route('/configuration', methods=['GET', 'POST'])
def config():
db.config_camera_interval(int(request.form.get('cameraDuration')))
return('', 204)
@app.route('/members')
def members():
print(type(db.encoding.get_names()))
return str(db.encoding.get_names())
@app.route('/video_feed')
def video_feed():
print('CAMERA RUN')
return Response(camera.gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/recent_person')
def recent_person():
return db.history_log.get_most_recent_member()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5001, debug=True)
| import face_recognition
from flask import Flask, request, redirect, Response
import camera
import firestore as db
# You can change this to any folder on your system
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def detect_faces_in_image(name, access, file_stream):
# Load the uploaded image filed
img = face_recognition.load_image_file(file_stream)
# Get face encodings for any faces in the uploaded image
unknown_face_encodings = face_recognition.face_encodings(img)[0].tolist()
db.add_member(name, access, unknown_face_encodings)
return ('', 204)
@app.route('/')
def root():
return ('', 204)
@app.route('/upload', methods=['GET', 'POST'])
def upload_image():
db.encoding.update()
name = request.form.get("name")
access = request.form.get("access")
access = int(access)
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
if file and allowed_file(file.filename):
return detect_faces_in_image(name, access, file)
return redirect('/video_feed')
@app.route('/update', methods=['GET', 'POST'])
def update():
db.encoding.update()
member = request.form.get("updateMember")
changeName = request.form.get("changeName")
changeAccess = request.form.get("changeAccess")
if changeAccess == None:
changeAccess = ""
db.update_member(member, changeName, changeAccess)
return ('', 204)
@app.route('/configuration', methods=['GET', 'POST'])
def config():
db.config_camera_interval(int(request.form.get('cameraDuration')))
return('', 204)
@app.route('/members')
def members():
print(type(db.encoding.get_names()))
return str(db.encoding.get_names())
@app.route('/video_feed')
def video_feed():
print('CAMERA RUN')
return Response(camera.gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/recent_person')
def recent_person():
return db.history_log.get_most_recent_member()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5001, debug=True) | en | 0.975204 | # You can change this to any folder on your system # Load the uploaded image filed # Get face encodings for any faces in the uploaded image | 2.978071 | 3 |
module/classification_package/src/utils.py | fishial/Object-Detection-Model | 1 | 5925 | import numpy as np
import logging
import numbers
import torch
import math
import json
import sys
from torch.optim.lr_scheduler import LambdaLR
from torchvision.transforms.functional import pad
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ConstantLRSchedule(LambdaLR):
""" Constant learning rate schedule.
"""
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
class WarmupConstantSchedule(LambdaLR):
""" Linear warmup and then constant.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Keeps learning rate schedule equal to 1. after warmup_steps.
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
return 1.
class WarmupLinearSchedule(LambdaLR):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
class WarmupCosineSchedule(LambdaLR):
""" Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
def get_padding(image):
w, h = image.size
max_wh = np.max([w, h])
h_padding = (max_wh - w) / 2
v_padding = (max_wh - h) / 2
l_pad = h_padding if h_padding % 1 == 0 else h_padding + 0.5
t_pad = v_padding if v_padding % 1 == 0 else v_padding + 0.5
r_pad = h_padding if h_padding % 1 == 0 else h_padding - 0.5
b_pad = v_padding if v_padding % 1 == 0 else v_padding - 0.5
padding = (int(l_pad), int(t_pad), int(r_pad), int(b_pad))
return padding
class NewPad(object):
def __init__(self, fill=0, padding_mode='constant'):
assert isinstance(fill, (numbers.Number, str, tuple))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
self.fill = fill
self.padding_mode = padding_mode
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be padded.
Returns:
PIL Image: Padded image.
"""
return pad(img, get_padding(img), self.fill, self.padding_mode)
def __repr__(self):
return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'. \
format(self.fill, self.padding_mode)
def find_device():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
return device
def read_json(data):
with open(data) as f:
return json.load(f)
def save_json(data, path):
with open(path, 'w', encoding='utf-8') as f:
json.dump(data, f)
def setup_logger():
logger = logging.getLogger('train')
logger.setLevel(logging.INFO)
if len(logger.handlers) == 0:
formatter = logging.Formatter('%(asctime)s | %(message)s')
ch = logging.StreamHandler(stream=sys.stdout)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(model, path):
torch.save(model.state_dict(), path)
def reverse_norm_image(image):
MEAN = torch.tensor([0.485, 0.456, 0.406])
STD = torch.tensor([0.229, 0.224, 0.225])
reverse_image = image * STD[:, None, None] + MEAN[:, None, None]
return reverse_image.permute(1, 2, 0).cpu().numpy() | import numpy as np
import logging
import numbers
import torch
import math
import json
import sys
from torch.optim.lr_scheduler import LambdaLR
from torchvision.transforms.functional import pad
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ConstantLRSchedule(LambdaLR):
""" Constant learning rate schedule.
"""
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
class WarmupConstantSchedule(LambdaLR):
""" Linear warmup and then constant.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Keeps learning rate schedule equal to 1. after warmup_steps.
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
return 1.
class WarmupLinearSchedule(LambdaLR):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
class WarmupCosineSchedule(LambdaLR):
""" Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
def get_padding(image):
w, h = image.size
max_wh = np.max([w, h])
h_padding = (max_wh - w) / 2
v_padding = (max_wh - h) / 2
l_pad = h_padding if h_padding % 1 == 0 else h_padding + 0.5
t_pad = v_padding if v_padding % 1 == 0 else v_padding + 0.5
r_pad = h_padding if h_padding % 1 == 0 else h_padding - 0.5
b_pad = v_padding if v_padding % 1 == 0 else v_padding - 0.5
padding = (int(l_pad), int(t_pad), int(r_pad), int(b_pad))
return padding
class NewPad(object):
def __init__(self, fill=0, padding_mode='constant'):
assert isinstance(fill, (numbers.Number, str, tuple))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
self.fill = fill
self.padding_mode = padding_mode
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be padded.
Returns:
PIL Image: Padded image.
"""
return pad(img, get_padding(img), self.fill, self.padding_mode)
def __repr__(self):
return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'. \
format(self.fill, self.padding_mode)
def find_device():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
return device
def read_json(data):
with open(data) as f:
return json.load(f)
def save_json(data, path):
with open(path, 'w', encoding='utf-8') as f:
json.dump(data, f)
def setup_logger():
logger = logging.getLogger('train')
logger.setLevel(logging.INFO)
if len(logger.handlers) == 0:
formatter = logging.Formatter('%(asctime)s | %(message)s')
ch = logging.StreamHandler(stream=sys.stdout)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(model, path):
torch.save(model.state_dict(), path)
def reverse_norm_image(image):
MEAN = torch.tensor([0.485, 0.456, 0.406])
STD = torch.tensor([0.229, 0.224, 0.225])
reverse_image = image * STD[:, None, None] + MEAN[:, None, None]
return reverse_image.permute(1, 2, 0).cpu().numpy() | en | 0.867836 | Computes and stores the average and current value Constant learning rate schedule. Linear warmup and then constant. Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps. Keeps learning rate schedule equal to 1. after warmup_steps. Linear warmup and then linear decay. Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps. Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps. Linear warmup and then cosine decay. Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps. Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve. If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup. # progress after warmup Args: img (PIL Image): Image to be padded. Returns: PIL Image: Padded image. Sets the learning rate to the initial LR decayed by 10 every 30 epochs | 2.576838 | 3 |
tests/pylint_plugins/test_assert_raises_without_msg.py | L-Net-1992/mlflow | 0 | 5926 | import pytest
from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable
pytestmark = skip_if_pylint_unavailable()
@pytest.fixture(scope="module")
def test_case():
import pylint.testutils
from pylint_plugins import AssertRaisesWithoutMsg
class TestAssertRaisesWithoutMsg(pylint.testutils.CheckerTestCase):
CHECKER_CLASS = AssertRaisesWithoutMsg
test_case = TestAssertRaisesWithoutMsg()
test_case.setup_method()
return test_case
def test_assert_raises_without_msg(test_case):
node = extract_node("self.assertRaises(Exception)")
with test_case.assertAddsMessages(create_message(test_case.CHECKER_CLASS.name, node)):
test_case.walk(node)
node = extract_node("self.assertRaises(Exception, msg='test')")
with test_case.assertNoMessages():
test_case.walk(node)
node = extract_node("pandas.assertRaises(Exception)")
with test_case.assertNoMessages():
test_case.walk(node)
| import pytest
from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable
pytestmark = skip_if_pylint_unavailable()
@pytest.fixture(scope="module")
def test_case():
import pylint.testutils
from pylint_plugins import AssertRaisesWithoutMsg
class TestAssertRaisesWithoutMsg(pylint.testutils.CheckerTestCase):
CHECKER_CLASS = AssertRaisesWithoutMsg
test_case = TestAssertRaisesWithoutMsg()
test_case.setup_method()
return test_case
def test_assert_raises_without_msg(test_case):
node = extract_node("self.assertRaises(Exception)")
with test_case.assertAddsMessages(create_message(test_case.CHECKER_CLASS.name, node)):
test_case.walk(node)
node = extract_node("self.assertRaises(Exception, msg='test')")
with test_case.assertNoMessages():
test_case.walk(node)
node = extract_node("pandas.assertRaises(Exception)")
with test_case.assertNoMessages():
test_case.walk(node)
| none | 1 | 2.180046 | 2 |
|
SVassembly/plot_bcs_across_bkpts.py | AV321/SVPackage | 0 | 5927 | import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors
import csv
from scipy.stats import mode
import math as m
import os
import collections
#set working directory
#os.chdir("/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr")
#bkpt_name = "1"
#example: plot_bcs_bkpt("1", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr")
def plot_bcs_bkpt(bkpt_name, infolder, outfolder):
if infolder[-1] != '/':
infolder = infolder + '/'
file_1 = infolder + bkpt_name + "_1.bc_windows.txt"
file_2 = infolder + bkpt_name + "_2.bc_windows.txt"
file_hap = infolder + bkpt_name + "_hap_bcs.txt"
df_1 = pd.read_table(file_1)
df_2 = pd.read_table(file_2)
hap_bcs = pd.read_table(file_hap)
# bkpt_name = "1"
# file_1 = bkpt_name + "_1.bc_windows.txt"
# file_2 = bkpt_name + "_2.bc_windows.txt"
# file_hap = bkpt_name + "_hap_bcs.txt"
# #sort barcodes by where they map (lowest coordinate to highest)
# #read in data frames
# df_1 = pd.read_table(file_1)
# df_2 = pd.read_table(file_2)
# hap_bcs = pd.read_table(file_hap)
hap_bcs = hap_bcs.transpose()
bcs_hap_dict = {}
for key in df_1.keys():
if key != "chrom" and key != "window_start" and key != "window_end":
key = key[:-2]
bcs_hap_dict[key] = 'unassigned'
for key, values in hap_bcs.iteritems():
if values[0] != 'bcs':
hap = values[1]
bcs_hap_dict[values[0]] = hap
df_1 = df_1.sort_values('window_start')
df_2 = df_2.sort_values('window_start')
chrom_1 = df_1.at[0, 'chrom']
chrom_2 = df_2.at[0, 'chrom']
x_values_1_1 = []
x_values_1_2 = []
x_values_1_unassigned = []
y_values_1_1 = []
y_values_1_2 = []
y_values_1_unassigned = []
x_values_2_1 = []
x_values_2_2 = []
x_values_2_unassigned = []
y_values_2_1 = []
y_values_2_2 = []
y_values_2_unassigned = []
i1 = 0
window_start_arr1 = df_1['window_start']
for name, values in df_1.iteritems(): #go through columns (so each barcode)
if name != "chrom" and name != "window_start" and name != "window_end":
i1 += 1
name = name[:-2]
hap = bcs_hap_dict[name]
#print type(hap) int
for indx, window in values.iteritems():
if window != 0:
if hap == 1:
y_values_1_1.append(i1)
x_values_1_1.append(window_start_arr1[indx])
elif hap == 2:
y_values_1_2.append(i1)
x_values_1_2.append(window_start_arr1[indx])
else:
y_values_1_unassigned.append(i1)
x_values_1_unassigned.append(window_start_arr1[indx])
i2 = 0
window_start_arr2 = df_2['window_start']
for name, values in df_2.iteritems():
if name != "chrom" and name != "window_start" and name != "window_end":
i2 += 1
name = name[:-2]
hap = bcs_hap_dict[name]
for indx, window in values.iteritems():
if window != 0:
if hap == 1:
y_values_2_1.append(i2)
x_values_2_1.append(window_start_arr2[indx])
elif hap == 2:
y_values_2_2.append(i2)
x_values_2_2.append(window_start_arr2[indx])
elif hap == 'unassigned':
y_values_2_unassigned.append(i2)
x_values_2_unassigned.append(window_start_arr2[indx])
fig = plt.figure()
figL = fig.add_subplot(121)
figL.scatter(x_values_1_1, y_values_1_1, s=0.2, color='b') #this doesn't seem to contain anything
figL.scatter(x_values_1_2, y_values_1_2, s=0.2, color='r') #same
figL.scatter(x_values_1_unassigned, y_values_1_unassigned, s=0.2, color='g')
figL.set_title("")
figL.set_xlabel("chr %d (Mb)" %chrom_1)
figL.set_ylabel("SV-specific barcode")
figR = fig.add_subplot(122)
figR.scatter(x_values_2_1, y_values_2_1, s=0.2, color='b') #same
figR.scatter(x_values_2_2, y_values_2_2, s=0.2, color='r') #same
figR.scatter(x_values_2_unassigned, y_values_2_unassigned, s=0.2, color='g')
figR.set_title("")
figR.set_xlabel("chr %d (Mb)" %chrom_2)
figR.set_ylabel("")
brkpt1 = min(df_1['window_start']) + ((max(df_1['window_end']) - min(df_1['window_start']))/2)
brkpt2 = min(df_2['window_start']) + ((max(df_2['window_end']) - min(df_2['window_start']))/2)
figL.axvline(x=brkpt1, linewidth=1, color = 'black')
figR.axvline(x=brkpt2, linewidth=1, color = 'black')
path = outfolder + 'bcs_bkpt_map'
plt.savefig(path)
| import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors
import csv
from scipy.stats import mode
import math as m
import os
import collections
#set working directory
#os.chdir("/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr")
#bkpt_name = "1"
#example: plot_bcs_bkpt("1", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr")
def plot_bcs_bkpt(bkpt_name, infolder, outfolder):
if infolder[-1] != '/':
infolder = infolder + '/'
file_1 = infolder + bkpt_name + "_1.bc_windows.txt"
file_2 = infolder + bkpt_name + "_2.bc_windows.txt"
file_hap = infolder + bkpt_name + "_hap_bcs.txt"
df_1 = pd.read_table(file_1)
df_2 = pd.read_table(file_2)
hap_bcs = pd.read_table(file_hap)
# bkpt_name = "1"
# file_1 = bkpt_name + "_1.bc_windows.txt"
# file_2 = bkpt_name + "_2.bc_windows.txt"
# file_hap = bkpt_name + "_hap_bcs.txt"
# #sort barcodes by where they map (lowest coordinate to highest)
# #read in data frames
# df_1 = pd.read_table(file_1)
# df_2 = pd.read_table(file_2)
# hap_bcs = pd.read_table(file_hap)
hap_bcs = hap_bcs.transpose()
bcs_hap_dict = {}
for key in df_1.keys():
if key != "chrom" and key != "window_start" and key != "window_end":
key = key[:-2]
bcs_hap_dict[key] = 'unassigned'
for key, values in hap_bcs.iteritems():
if values[0] != 'bcs':
hap = values[1]
bcs_hap_dict[values[0]] = hap
df_1 = df_1.sort_values('window_start')
df_2 = df_2.sort_values('window_start')
chrom_1 = df_1.at[0, 'chrom']
chrom_2 = df_2.at[0, 'chrom']
x_values_1_1 = []
x_values_1_2 = []
x_values_1_unassigned = []
y_values_1_1 = []
y_values_1_2 = []
y_values_1_unassigned = []
x_values_2_1 = []
x_values_2_2 = []
x_values_2_unassigned = []
y_values_2_1 = []
y_values_2_2 = []
y_values_2_unassigned = []
i1 = 0
window_start_arr1 = df_1['window_start']
for name, values in df_1.iteritems(): #go through columns (so each barcode)
if name != "chrom" and name != "window_start" and name != "window_end":
i1 += 1
name = name[:-2]
hap = bcs_hap_dict[name]
#print type(hap) int
for indx, window in values.iteritems():
if window != 0:
if hap == 1:
y_values_1_1.append(i1)
x_values_1_1.append(window_start_arr1[indx])
elif hap == 2:
y_values_1_2.append(i1)
x_values_1_2.append(window_start_arr1[indx])
else:
y_values_1_unassigned.append(i1)
x_values_1_unassigned.append(window_start_arr1[indx])
i2 = 0
window_start_arr2 = df_2['window_start']
for name, values in df_2.iteritems():
if name != "chrom" and name != "window_start" and name != "window_end":
i2 += 1
name = name[:-2]
hap = bcs_hap_dict[name]
for indx, window in values.iteritems():
if window != 0:
if hap == 1:
y_values_2_1.append(i2)
x_values_2_1.append(window_start_arr2[indx])
elif hap == 2:
y_values_2_2.append(i2)
x_values_2_2.append(window_start_arr2[indx])
elif hap == 'unassigned':
y_values_2_unassigned.append(i2)
x_values_2_unassigned.append(window_start_arr2[indx])
fig = plt.figure()
figL = fig.add_subplot(121)
figL.scatter(x_values_1_1, y_values_1_1, s=0.2, color='b') #this doesn't seem to contain anything
figL.scatter(x_values_1_2, y_values_1_2, s=0.2, color='r') #same
figL.scatter(x_values_1_unassigned, y_values_1_unassigned, s=0.2, color='g')
figL.set_title("")
figL.set_xlabel("chr %d (Mb)" %chrom_1)
figL.set_ylabel("SV-specific barcode")
figR = fig.add_subplot(122)
figR.scatter(x_values_2_1, y_values_2_1, s=0.2, color='b') #same
figR.scatter(x_values_2_2, y_values_2_2, s=0.2, color='r') #same
figR.scatter(x_values_2_unassigned, y_values_2_unassigned, s=0.2, color='g')
figR.set_title("")
figR.set_xlabel("chr %d (Mb)" %chrom_2)
figR.set_ylabel("")
brkpt1 = min(df_1['window_start']) + ((max(df_1['window_end']) - min(df_1['window_start']))/2)
brkpt2 = min(df_2['window_start']) + ((max(df_2['window_end']) - min(df_2['window_start']))/2)
figL.axvline(x=brkpt1, linewidth=1, color = 'black')
figR.axvline(x=brkpt2, linewidth=1, color = 'black')
path = outfolder + 'bcs_bkpt_map'
plt.savefig(path)
| en | 0.489823 | #set working directory #os.chdir("/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr") #bkpt_name = "1" #example: plot_bcs_bkpt("1", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr") # bkpt_name = "1" # file_1 = bkpt_name + "_1.bc_windows.txt" # file_2 = bkpt_name + "_2.bc_windows.txt" # file_hap = bkpt_name + "_hap_bcs.txt" # #sort barcodes by where they map (lowest coordinate to highest) # #read in data frames # df_1 = pd.read_table(file_1) # df_2 = pd.read_table(file_2) # hap_bcs = pd.read_table(file_hap) #go through columns (so each barcode) #print type(hap) int #this doesn't seem to contain anything #same #same #same | 2.159135 | 2 |
bites/bite029.py | ChidinmaKO/Chobe-bitesofpy | 0 | 5928 | <reponame>ChidinmaKO/Chobe-bitesofpy
def get_index_different_char(chars):
alnum = []
not_alnum = []
for index, char in enumerate(chars):
if str(char).isalnum():
alnum.append(index)
else:
not_alnum.append(index)
result = alnum[0] if len(alnum) < len(not_alnum) else not_alnum[0]
return result
# tests
def test_wrong_char():
inputs = (
['A', 'f', '.', 'Q', 2],
['.', '{', ' ^', '%', 'a'],
[1, '=', 3, 4, 5, 'A', 'b', 'a', 'b', 'c'],
['=', '=', '', '/', '/', 9, ':', ';', '?', '¡'],
list(range(1,9)) + ['}'] + list('abcde'), # noqa E231
)
expected = [2, 4, 1, 5, 8]
for arg, exp in zip(inputs, expected):
err = f'get_index_different_char({arg}) should return index {exp}'
assert get_index_different_char(arg) == exp, err | def get_index_different_char(chars):
alnum = []
not_alnum = []
for index, char in enumerate(chars):
if str(char).isalnum():
alnum.append(index)
else:
not_alnum.append(index)
result = alnum[0] if len(alnum) < len(not_alnum) else not_alnum[0]
return result
# tests
def test_wrong_char():
inputs = (
['A', 'f', '.', 'Q', 2],
['.', '{', ' ^', '%', 'a'],
[1, '=', 3, 4, 5, 'A', 'b', 'a', 'b', 'c'],
['=', '=', '', '/', '/', 9, ':', ';', '?', '¡'],
list(range(1,9)) + ['}'] + list('abcde'), # noqa E231
)
expected = [2, 4, 1, 5, 8]
for arg, exp in zip(inputs, expected):
err = f'get_index_different_char({arg}) should return index {exp}'
assert get_index_different_char(arg) == exp, err | en | 0.340442 | # tests # noqa E231 | 3.616755 | 4 |
language-detection-webapp/blueprints/langid.py | derlin/SwigSpot_Schwyzertuutsch-Spotting | 6 | 5929 | import logging
from flask import Blueprint
from flask import Flask, render_template, request, flash
from flask_wtf import FlaskForm
from wtforms import StringField, validators, SelectField, BooleanField
from wtforms.fields.html5 import IntegerRangeField
from wtforms.widgets import TextArea
import langid
from utils.utils import templated
blueprint_langid = Blueprint('langid', __name__)
class UrlForm(FlaskForm):
url = StringField(
'URL',
validators=[validators.DataRequired(), validators.URL(message='Sorry, this is not a valid URL,')])
wMin = IntegerRangeField(
'Min. words',
default=5,
validators=[validators.DataRequired(), validators.NumberRange(min=1, max=20)])
extractor_class = SelectField(
'Extractor',
default=langid.EXTRACTORS[0],
choices=[(i, i) for i in langid.EXTRACTORS],
validators=[validators.DataRequired()])
model_class = SelectField(
'Model',
default=langid.MODELS[0],
choices=[(i, i) for i in langid.MODELS],
validators=[validators.DataRequired()])
return_raw = BooleanField(
'Display raw sentences',
default=False
)
class TextForm(FlaskForm):
text = StringField(
'Text',
widget=TextArea(),
validators=[validators.DataRequired()])
model_class = SelectField(
'Model',
default=langid.MODELS[0],
choices=[(i, i) for i in langid.MODELS],
validators=[validators.DataRequired()])
@blueprint_langid.route('/', methods=['GET', 'POST'])
@templated('index.html')
def crawl():
form = UrlForm(request.form)
if request.method == 'GET':
return dict(form=form)
elif not form.validate():
for f, errs in form.errors.items():
flash("%s: %s" % (f, "<br>".join(errs)), 'danger')
return dict(form=form)
try:
results = langid.mixed_sentences_from_urls(
form.url.data.strip(), extractor_name=form.extractor_class.data, model=form.model_class.data,
with_proba=True, min_words=form.wMin.data, return_raw=form.return_raw.data)
except Exception as e:
flash('Something went wrong %s' % e, 'danger')
logging.exception(e)
return dict(form=form)
return dict(form=form, results=results, labels=langid.DEFAULT_LABELS)
@blueprint_langid.route('/text', methods=['GET', 'POST'])
@templated('langid.html')
def predict_text():
form = TextForm(request.form)
if request.method == 'GET':
return dict(form=form)
elif not form.validate():
for f, errs in form.errors.items():
flash("%s: %s" % (f, "<br>".join(errs)), 'danger')
return dict(form=form)
results = [[r] for r in langid.lang_of_text(
form.text.data, model=form.model_class.data, with_proba=True)]
return dict(form=form, results=results, labels=langid.DEFAULT_LABELS)
| import logging
from flask import Blueprint
from flask import Flask, render_template, request, flash
from flask_wtf import FlaskForm
from wtforms import StringField, validators, SelectField, BooleanField
from wtforms.fields.html5 import IntegerRangeField
from wtforms.widgets import TextArea
import langid
from utils.utils import templated
blueprint_langid = Blueprint('langid', __name__)
class UrlForm(FlaskForm):
url = StringField(
'URL',
validators=[validators.DataRequired(), validators.URL(message='Sorry, this is not a valid URL,')])
wMin = IntegerRangeField(
'Min. words',
default=5,
validators=[validators.DataRequired(), validators.NumberRange(min=1, max=20)])
extractor_class = SelectField(
'Extractor',
default=langid.EXTRACTORS[0],
choices=[(i, i) for i in langid.EXTRACTORS],
validators=[validators.DataRequired()])
model_class = SelectField(
'Model',
default=langid.MODELS[0],
choices=[(i, i) for i in langid.MODELS],
validators=[validators.DataRequired()])
return_raw = BooleanField(
'Display raw sentences',
default=False
)
class TextForm(FlaskForm):
text = StringField(
'Text',
widget=TextArea(),
validators=[validators.DataRequired()])
model_class = SelectField(
'Model',
default=langid.MODELS[0],
choices=[(i, i) for i in langid.MODELS],
validators=[validators.DataRequired()])
@blueprint_langid.route('/', methods=['GET', 'POST'])
@templated('index.html')
def crawl():
form = UrlForm(request.form)
if request.method == 'GET':
return dict(form=form)
elif not form.validate():
for f, errs in form.errors.items():
flash("%s: %s" % (f, "<br>".join(errs)), 'danger')
return dict(form=form)
try:
results = langid.mixed_sentences_from_urls(
form.url.data.strip(), extractor_name=form.extractor_class.data, model=form.model_class.data,
with_proba=True, min_words=form.wMin.data, return_raw=form.return_raw.data)
except Exception as e:
flash('Something went wrong %s' % e, 'danger')
logging.exception(e)
return dict(form=form)
return dict(form=form, results=results, labels=langid.DEFAULT_LABELS)
@blueprint_langid.route('/text', methods=['GET', 'POST'])
@templated('langid.html')
def predict_text():
form = TextForm(request.form)
if request.method == 'GET':
return dict(form=form)
elif not form.validate():
for f, errs in form.errors.items():
flash("%s: %s" % (f, "<br>".join(errs)), 'danger')
return dict(form=form)
results = [[r] for r in langid.lang_of_text(
form.text.data, model=form.model_class.data, with_proba=True)]
return dict(form=form, results=results, labels=langid.DEFAULT_LABELS)
| none | 1 | 2.227976 | 2 |
|
var/spack/repos/builtin/packages/r-xts/package.py | kehw/spack | 2 | 5930 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RXts(RPackage):
"""Provide for uniform handling of R's different time-based data classes by
extending zoo, maximizing native format information preservation and
allowing for user level customization and extension, while simplifying
cross-class interoperability."""
homepage = "http://r-forge.r-project.org/projects/xts/"
url = "https://cloud.r-project.org/src/contrib/xts_0.11-2.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/xts"
version('0.11-2', sha256='12772f6a66aab5b84b0665c470f11a3d8d8a992955c027261cfe8e6077ee13b8')
version('0.9-7', sha256='f11f7cb98f4b92b7f6632a2151257914130880c267736ef5a264b5dc2dfb7098')
depends_on('[email protected]:', type=('build', 'run'))
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RXts(RPackage):
"""Provide for uniform handling of R's different time-based data classes by
extending zoo, maximizing native format information preservation and
allowing for user level customization and extension, while simplifying
cross-class interoperability."""
homepage = "http://r-forge.r-project.org/projects/xts/"
url = "https://cloud.r-project.org/src/contrib/xts_0.11-2.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/xts"
version('0.11-2', sha256='12772f6a66aab5b84b0665c470f11a3d8d8a992955c027261cfe8e6077ee13b8')
version('0.9-7', sha256='f11f7cb98f4b92b7f6632a2151257914130880c267736ef5a264b5dc2dfb7098')
depends_on('[email protected]:', type=('build', 'run'))
| en | 0.813529 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) Provide for uniform handling of R's different time-based data classes by extending zoo, maximizing native format information preservation and allowing for user level customization and extension, while simplifying cross-class interoperability. | 1.385264 | 1 |
sandbox/lib/jumpscale/Jumpscale/core/BASECLASSES/JSConfigsBCDB.py | threefoldtech/threebot_prebuilt | 0 | 5931 | <gh_stars>0
# Copyright (C) July 2018: TF TECH NV in Belgium see https://www.threefold.tech/
# In case TF TECH NV ceases to exist (e.g. because of bankruptcy)
# then Incubaid NV also in Belgium will get the Copyright & Authorship for all changes made since July 2018
# and the license will automatically become Apache v2 for all code related to Jumpscale & DigitalMe
# This file is part of jumpscale at <https://github.com/threefoldtech>.
# jumpscale is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jumpscale is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License v3 for more details.
#
# You should have received a copy of the GNU General Public License
# along with jumpscale or jumpscale derived works. If not, see <http://www.gnu.org/licenses/>.
# LICENSE END
from Jumpscale import j
from .JSConfigBCDBBase import JSConfigBCDBBase
class JSConfigsBCDB(JSConfigBCDBBase):
def _childclass_selector(self, jsxobject, **kwargs):
"""
allow custom implementation of which child class to use
:return:
"""
return self.__class__._CHILDCLASS
def new(self, name, jsxobject=None, autosave=True, **kwargs):
"""
it it exists will delete if first when delete == True
:param name:
:param jsxobject:
:param autosave: sets the autosave argument on the data and also saves the object before the function returns. If set to False, you need to explicitly save the object.
:param kwargs:
:return:
"""
if self.exists(name=name):
raise j.exceptions.Base("cannot do new object, exists")
jsconfig = self._new(name=name, jsxobject=jsxobject, autosave=autosave, **kwargs)
self._check(jsconfig)
return jsconfig
def _check_children(self):
if not self._cache_use:
assert self._children == {}
def _check(self, jsconfig):
if jsconfig._id is None:
# model has never been saved no check required yet
return
# lets do some tests (maybe in future can be removed, but for now the safe bet)
assert jsconfig._id > 0
mother_id = jsconfig._mother_id_get()
if mother_id:
assert jsconfig.mother_id == mother_id
assert jsconfig._model.schema._md5 == self._model.schema._md5
def _new(self, name, jsxobject=None, autosave=True, **kwargs):
"""
:param name: for the CONFIG item (is a unique name for the service, client, ...)
:param jsxobject: you can right away specify the jsxobject
:param kwargs: the data elements which will be given to JSXObject underneith (given to constructor)
:return: the service
"""
kwargs_to_class = {}
if not jsxobject:
if kwargs:
kwargs_to_obj_new = {}
props = [i.name for i in self._model.schema.properties]
for key, val in kwargs.items():
if key in props:
kwargs_to_obj_new[key] = val
else:
kwargs_to_class[key] = val
jsxobject = self._model.new(data=kwargs_to_obj_new)
else:
jsxobject = self._model.new()
jsxobject.name = name
# means we need to remember the parent id
mother_id = self._mother_id_get()
if mother_id:
if jsxobject.mother_id != mother_id:
jsxobject.mother_id = mother_id
jsconfig_klass = self._childclass_selector(jsxobject=jsxobject)
jsconfig = jsconfig_klass(parent=self, jsxobject=jsxobject, **kwargs_to_class)
jsconfig._triggers_call(jsconfig, "new")
jsconfig._autosave = autosave
self._children[name] = jsconfig
if autosave:
self._children[name].save()
jsxobject._autosave = autosave
return self._children[name]
def get(self, name="main", id=None, needexist=False, autosave=True, reload=False, **kwargs):
"""
:param name: of the object
"""
# will reload if needed (not in self._children)
rc, jsconfig = self._get(name=name, id=id, die=needexist, reload=reload)
if not jsconfig:
self._log_debug("NEW OBJ:%s:%s" % (name, self._classname))
jsconfig = self._new(name=name, autosave=autosave, **kwargs)
else:
# check that the stored values correspond with kwargs given
# means comes from the database
if not jsconfig._data._model.schema._md5 == jsconfig._model.schema._md5:
# means data came from DB and schema is not same as config mgmt class
j.shell()
changed = False
jsconfig._data._autosave = False
for key, val in kwargs.items():
if not getattr(jsconfig, key) == val:
changed = True
setattr(jsconfig, key, val)
if changed and autosave:
try:
jsconfig.save()
except Exception as e:
print("CHECK WHY ERROR")
j.shell()
jsconfig._autosave = autosave
# lets do some tests (maybe in future can be removed, but for now the safe bet)
self._check(jsconfig)
jsconfig._triggers_call(jsconfig, "get")
return jsconfig
def _get(self, name="main", id=None, die=True, reload=False, autosave=True):
if id:
obj = self._model.get(id)
name = obj.name
return 1, self._new(name, obj)
obj = self._validate_child(name)
if obj:
if reload:
obj.load()
return 1, obj
self._log_debug("get child:'%s'from '%s'" % (name, self._classname))
# new = False
res = self.find(name=name)
if len(res) < 1:
if not die:
return 3, None
raise j.exceptions.Base(
"Did not find instance for:%s, name searched for:%s" % (self.__class__._location, name)
)
elif len(res) > 1:
raise j.exceptions.Base(
"Found more than 1 service for :%s, name searched for:%s" % (self.__class__._location, name)
)
else:
jsxconfig = res[0]
jsxconfig._autosave = autosave
return 2, jsxconfig
def reset(self):
"""
will destroy all data in the DB, be carefull
:return:
"""
self._log_debug("reset all data")
for item in self.find():
try:
item.delete()
except Exception as e:
j.shell()
if not self._mother_id_get():
self._model.index.destroy()
def _children_names_get(self, filter=None):
condition = False
Item = self._model.index.sql
mother_id = self._mother_id_get()
if mother_id:
condition = Item.mother_id == mother_id
if filter and filter != "*":
condition = Item.name.startswith(filter) and condition if condition else Item.name.startswith(filter)
if condition:
res = [i.name for i in Item.select().where(condition)]
else:
res = [i.name for i in Item.select()]
if len(res) > 50:
return []
return res
def find(self, reload=False, **kwargs):
"""
:param kwargs: e.g. color="red",...
:return: list of the config objects
"""
res = []
ids_done = []
for key, item in list(self._children.items()):
match = True
for key, val in kwargs.items():
if item._hasattr(key):
if val != getattr(item, key):
match = False
else:
match = False
if match:
if reload:
item.load()
res.append(item)
if item.id not in ids_done:
ids_done.append(item.id)
kwargs = self._kwargs_update(kwargs)
# this is more efficient no need to go to backend stor if the objects are already in mem
ids = self._model.find_ids(**kwargs)
for id in ids:
if id not in ids_done:
item = self.get(id=id, reload=reload, autosave=False)
res.append(item)
return res
def _kwargs_update(self, kwargs):
mother_id = self._mother_id_get()
if mother_id:
kwargs["mother_id"] = mother_id
return kwargs
def count(self, **kwargs):
"""
:param kwargs: e.g. color="red",...
:return: list of the config objects
"""
kwargs = self._kwargs_update(kwargs)
# TODO do proper count query
return len(list(self._model.find_ids(**kwargs)))
def _findData(self, **kwargs):
"""
:param kwargs: e.g. color="red",...
:return: list of the data objects (the data of the model)
"""
kwargs = self._kwargs_update(kwargs)
return self._model.find(**kwargs)
def save(self):
for item in self._children_get():
if item._hasattr("save"):
item.save()
def delete(self, name=None):
"""
:param name:
:return:
"""
self._delete(name=name)
def _delete(self, name=None):
if name:
_, child = self._get(name=name, die=False)
if child:
return child.delete()
else:
return self.reset()
if not name and self._parent:
if self._classname in self._parent._children:
if not isinstance(self._parent, j.baseclasses.factory):
# only delete when not a factory means is a custom class we're building
del self._parent._children[self._data.name]
def exists(self, name="main"):
"""
:param name: of the object
"""
obj = self._validate_child(name)
if obj:
return True
# will only use the index
return self.count(name=name) == 1
def _children_get(self, filter=None):
"""
:param filter: is '' then will show all, if None will ignore _
when * at end it will be considered a prefix
when * at start it will be considered a end of line filter (endswith)
when R as first char its considered to be a regex
everything else is a full match
:return:
"""
# TODO implement filter properly
x = []
for _, item in self._children.items():
x.append(item)
x = self._filter(filter=filter, llist=x, nameonly=False)
# be smarter in how we use the index
for item in self.find():
if item not in x:
x.append(item)
return x
def __str__(self):
return "jsxconfigobj:collection:%s" % self._model.schema.url
| # Copyright (C) July 2018: TF TECH NV in Belgium see https://www.threefold.tech/
# In case TF TECH NV ceases to exist (e.g. because of bankruptcy)
# then Incubaid NV also in Belgium will get the Copyright & Authorship for all changes made since July 2018
# and the license will automatically become Apache v2 for all code related to Jumpscale & DigitalMe
# This file is part of jumpscale at <https://github.com/threefoldtech>.
# jumpscale is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jumpscale is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License v3 for more details.
#
# You should have received a copy of the GNU General Public License
# along with jumpscale or jumpscale derived works. If not, see <http://www.gnu.org/licenses/>.
# LICENSE END
from Jumpscale import j
from .JSConfigBCDBBase import JSConfigBCDBBase
class JSConfigsBCDB(JSConfigBCDBBase):
def _childclass_selector(self, jsxobject, **kwargs):
"""
allow custom implementation of which child class to use
:return:
"""
return self.__class__._CHILDCLASS
def new(self, name, jsxobject=None, autosave=True, **kwargs):
"""
it it exists will delete if first when delete == True
:param name:
:param jsxobject:
:param autosave: sets the autosave argument on the data and also saves the object before the function returns. If set to False, you need to explicitly save the object.
:param kwargs:
:return:
"""
if self.exists(name=name):
raise j.exceptions.Base("cannot do new object, exists")
jsconfig = self._new(name=name, jsxobject=jsxobject, autosave=autosave, **kwargs)
self._check(jsconfig)
return jsconfig
def _check_children(self):
if not self._cache_use:
assert self._children == {}
def _check(self, jsconfig):
if jsconfig._id is None:
# model has never been saved no check required yet
return
# lets do some tests (maybe in future can be removed, but for now the safe bet)
assert jsconfig._id > 0
mother_id = jsconfig._mother_id_get()
if mother_id:
assert jsconfig.mother_id == mother_id
assert jsconfig._model.schema._md5 == self._model.schema._md5
def _new(self, name, jsxobject=None, autosave=True, **kwargs):
"""
:param name: for the CONFIG item (is a unique name for the service, client, ...)
:param jsxobject: you can right away specify the jsxobject
:param kwargs: the data elements which will be given to JSXObject underneith (given to constructor)
:return: the service
"""
kwargs_to_class = {}
if not jsxobject:
if kwargs:
kwargs_to_obj_new = {}
props = [i.name for i in self._model.schema.properties]
for key, val in kwargs.items():
if key in props:
kwargs_to_obj_new[key] = val
else:
kwargs_to_class[key] = val
jsxobject = self._model.new(data=kwargs_to_obj_new)
else:
jsxobject = self._model.new()
jsxobject.name = name
# means we need to remember the parent id
mother_id = self._mother_id_get()
if mother_id:
if jsxobject.mother_id != mother_id:
jsxobject.mother_id = mother_id
jsconfig_klass = self._childclass_selector(jsxobject=jsxobject)
jsconfig = jsconfig_klass(parent=self, jsxobject=jsxobject, **kwargs_to_class)
jsconfig._triggers_call(jsconfig, "new")
jsconfig._autosave = autosave
self._children[name] = jsconfig
if autosave:
self._children[name].save()
jsxobject._autosave = autosave
return self._children[name]
def get(self, name="main", id=None, needexist=False, autosave=True, reload=False, **kwargs):
"""
:param name: of the object
"""
# will reload if needed (not in self._children)
rc, jsconfig = self._get(name=name, id=id, die=needexist, reload=reload)
if not jsconfig:
self._log_debug("NEW OBJ:%s:%s" % (name, self._classname))
jsconfig = self._new(name=name, autosave=autosave, **kwargs)
else:
# check that the stored values correspond with kwargs given
# means comes from the database
if not jsconfig._data._model.schema._md5 == jsconfig._model.schema._md5:
# means data came from DB and schema is not same as config mgmt class
j.shell()
changed = False
jsconfig._data._autosave = False
for key, val in kwargs.items():
if not getattr(jsconfig, key) == val:
changed = True
setattr(jsconfig, key, val)
if changed and autosave:
try:
jsconfig.save()
except Exception as e:
print("CHECK WHY ERROR")
j.shell()
jsconfig._autosave = autosave
# lets do some tests (maybe in future can be removed, but for now the safe bet)
self._check(jsconfig)
jsconfig._triggers_call(jsconfig, "get")
return jsconfig
def _get(self, name="main", id=None, die=True, reload=False, autosave=True):
if id:
obj = self._model.get(id)
name = obj.name
return 1, self._new(name, obj)
obj = self._validate_child(name)
if obj:
if reload:
obj.load()
return 1, obj
self._log_debug("get child:'%s'from '%s'" % (name, self._classname))
# new = False
res = self.find(name=name)
if len(res) < 1:
if not die:
return 3, None
raise j.exceptions.Base(
"Did not find instance for:%s, name searched for:%s" % (self.__class__._location, name)
)
elif len(res) > 1:
raise j.exceptions.Base(
"Found more than 1 service for :%s, name searched for:%s" % (self.__class__._location, name)
)
else:
jsxconfig = res[0]
jsxconfig._autosave = autosave
return 2, jsxconfig
def reset(self):
"""
will destroy all data in the DB, be carefull
:return:
"""
self._log_debug("reset all data")
for item in self.find():
try:
item.delete()
except Exception as e:
j.shell()
if not self._mother_id_get():
self._model.index.destroy()
def _children_names_get(self, filter=None):
condition = False
Item = self._model.index.sql
mother_id = self._mother_id_get()
if mother_id:
condition = Item.mother_id == mother_id
if filter and filter != "*":
condition = Item.name.startswith(filter) and condition if condition else Item.name.startswith(filter)
if condition:
res = [i.name for i in Item.select().where(condition)]
else:
res = [i.name for i in Item.select()]
if len(res) > 50:
return []
return res
def find(self, reload=False, **kwargs):
"""
:param kwargs: e.g. color="red",...
:return: list of the config objects
"""
res = []
ids_done = []
for key, item in list(self._children.items()):
match = True
for key, val in kwargs.items():
if item._hasattr(key):
if val != getattr(item, key):
match = False
else:
match = False
if match:
if reload:
item.load()
res.append(item)
if item.id not in ids_done:
ids_done.append(item.id)
kwargs = self._kwargs_update(kwargs)
# this is more efficient no need to go to backend stor if the objects are already in mem
ids = self._model.find_ids(**kwargs)
for id in ids:
if id not in ids_done:
item = self.get(id=id, reload=reload, autosave=False)
res.append(item)
return res
def _kwargs_update(self, kwargs):
mother_id = self._mother_id_get()
if mother_id:
kwargs["mother_id"] = mother_id
return kwargs
def count(self, **kwargs):
"""
:param kwargs: e.g. color="red",...
:return: list of the config objects
"""
kwargs = self._kwargs_update(kwargs)
# TODO do proper count query
return len(list(self._model.find_ids(**kwargs)))
def _findData(self, **kwargs):
"""
:param kwargs: e.g. color="red",...
:return: list of the data objects (the data of the model)
"""
kwargs = self._kwargs_update(kwargs)
return self._model.find(**kwargs)
def save(self):
for item in self._children_get():
if item._hasattr("save"):
item.save()
def delete(self, name=None):
"""
:param name:
:return:
"""
self._delete(name=name)
def _delete(self, name=None):
if name:
_, child = self._get(name=name, die=False)
if child:
return child.delete()
else:
return self.reset()
if not name and self._parent:
if self._classname in self._parent._children:
if not isinstance(self._parent, j.baseclasses.factory):
# only delete when not a factory means is a custom class we're building
del self._parent._children[self._data.name]
def exists(self, name="main"):
"""
:param name: of the object
"""
obj = self._validate_child(name)
if obj:
return True
# will only use the index
return self.count(name=name) == 1
def _children_get(self, filter=None):
"""
:param filter: is '' then will show all, if None will ignore _
when * at end it will be considered a prefix
when * at start it will be considered a end of line filter (endswith)
when R as first char its considered to be a regex
everything else is a full match
:return:
"""
# TODO implement filter properly
x = []
for _, item in self._children.items():
x.append(item)
x = self._filter(filter=filter, llist=x, nameonly=False)
# be smarter in how we use the index
for item in self.find():
if item not in x:
x.append(item)
return x
def __str__(self):
return "jsxconfigobj:collection:%s" % self._model.schema.url | en | 0.860863 | # Copyright (C) July 2018: TF TECH NV in Belgium see https://www.threefold.tech/ # In case TF TECH NV ceases to exist (e.g. because of bankruptcy) # then Incubaid NV also in Belgium will get the Copyright & Authorship for all changes made since July 2018 # and the license will automatically become Apache v2 for all code related to Jumpscale & DigitalMe # This file is part of jumpscale at <https://github.com/threefoldtech>. # jumpscale is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # jumpscale is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License v3 for more details. # # You should have received a copy of the GNU General Public License # along with jumpscale or jumpscale derived works. If not, see <http://www.gnu.org/licenses/>. # LICENSE END allow custom implementation of which child class to use :return: it it exists will delete if first when delete == True :param name: :param jsxobject: :param autosave: sets the autosave argument on the data and also saves the object before the function returns. If set to False, you need to explicitly save the object. :param kwargs: :return: # model has never been saved no check required yet # lets do some tests (maybe in future can be removed, but for now the safe bet) :param name: for the CONFIG item (is a unique name for the service, client, ...) :param jsxobject: you can right away specify the jsxobject :param kwargs: the data elements which will be given to JSXObject underneith (given to constructor) :return: the service # means we need to remember the parent id :param name: of the object # will reload if needed (not in self._children) # check that the stored values correspond with kwargs given # means comes from the database # means data came from DB and schema is not same as config mgmt class # lets do some tests (maybe in future can be removed, but for now the safe bet) # new = False will destroy all data in the DB, be carefull :return: :param kwargs: e.g. color="red",... :return: list of the config objects # this is more efficient no need to go to backend stor if the objects are already in mem :param kwargs: e.g. color="red",... :return: list of the config objects # TODO do proper count query :param kwargs: e.g. color="red",... :return: list of the data objects (the data of the model) :param name: :return: # only delete when not a factory means is a custom class we're building :param name: of the object # will only use the index :param filter: is '' then will show all, if None will ignore _ when * at end it will be considered a prefix when * at start it will be considered a end of line filter (endswith) when R as first char its considered to be a regex everything else is a full match :return: # TODO implement filter properly # be smarter in how we use the index | 1.675858 | 2 |
source/tree.py | holderekt/regression-tree | 0 | 5932 | import utils as utl
import error_measures as err
# Regression Tree Node
class Node:
def __init__(self, parent, node_id, index=None, value=None, examples=None, prediction=0):
self.index = index
self.id = node_id
self.prediction = prediction
self.value = value
self.parent = parent
self.examples = examples
self.right = None
self.left = None
self.ssr = 0
self.leaves = 0
self.ssr_as_root = 0
def is_leaf(self):
if(self.right == None and self.left == None):
return True
return False
def leafs_id(self):
if(not self.is_leaf()):
return self._leafs_search(self.left) + self._leafs_search(self.right)
return [1]
def n_leafs(self):
return len(self.leafs_id())
def _leafs_search(self, node):
if node.is_leaf():
return [node.id]
return self._leafs_search(node.left) + self._leafs_search(node.right)
def __str__(self):
return str(self.id)
# Regression Tree
class Regression_Tree:
def __init__(self, y_train, root):
self.y = y_train
self.root = root
# Generate Prediction given a test example
def predict(self, example, deleted=[]):
current_node = self.root
while(not current_node.is_leaf() and ((current_node in deleted) == False)):
if(example[current_node.index] <= current_node.value):
current_node = current_node.left
else:
current_node = current_node.right
return current_node.prediction
# Generate Sum Square Residuals of a given node on training data
def node_ssr(self, node):
ssr = 0
for example in node.examples:
ssr = ssr + pow((self.y[example] - node.prediction) , 2)
return ssr
def leafs_id(self):
return self.root.leafs_id()
def n_leafs(self):
return len(self.leafs_id())
def __str__(self):
return self._print(self.root)
def print_leaf(self, node):
if(node.is_leaf()):
print(len(node.examples))
else:
self.print_leaf(node.left)
self.print_leaf(node.right)
def _print(self, node):
node_id = str(node.id)
r_string = node_id + " " + str(node.ssr)
if(not node.is_leaf()):
r_string = r_string + "\nLeft : " + node_id + "\n" + self._print(node.left)
r_string = r_string + "\nRight: " + node_id + "\n" + self._print(node.right)
return r_string
| import utils as utl
import error_measures as err
# Regression Tree Node
class Node:
def __init__(self, parent, node_id, index=None, value=None, examples=None, prediction=0):
self.index = index
self.id = node_id
self.prediction = prediction
self.value = value
self.parent = parent
self.examples = examples
self.right = None
self.left = None
self.ssr = 0
self.leaves = 0
self.ssr_as_root = 0
def is_leaf(self):
if(self.right == None and self.left == None):
return True
return False
def leafs_id(self):
if(not self.is_leaf()):
return self._leafs_search(self.left) + self._leafs_search(self.right)
return [1]
def n_leafs(self):
return len(self.leafs_id())
def _leafs_search(self, node):
if node.is_leaf():
return [node.id]
return self._leafs_search(node.left) + self._leafs_search(node.right)
def __str__(self):
return str(self.id)
# Regression Tree
class Regression_Tree:
def __init__(self, y_train, root):
self.y = y_train
self.root = root
# Generate Prediction given a test example
def predict(self, example, deleted=[]):
current_node = self.root
while(not current_node.is_leaf() and ((current_node in deleted) == False)):
if(example[current_node.index] <= current_node.value):
current_node = current_node.left
else:
current_node = current_node.right
return current_node.prediction
# Generate Sum Square Residuals of a given node on training data
def node_ssr(self, node):
ssr = 0
for example in node.examples:
ssr = ssr + pow((self.y[example] - node.prediction) , 2)
return ssr
def leafs_id(self):
return self.root.leafs_id()
def n_leafs(self):
return len(self.leafs_id())
def __str__(self):
return self._print(self.root)
def print_leaf(self, node):
if(node.is_leaf()):
print(len(node.examples))
else:
self.print_leaf(node.left)
self.print_leaf(node.right)
def _print(self, node):
node_id = str(node.id)
r_string = node_id + " " + str(node.ssr)
if(not node.is_leaf()):
r_string = r_string + "\nLeft : " + node_id + "\n" + self._print(node.left)
r_string = r_string + "\nRight: " + node_id + "\n" + self._print(node.right)
return r_string
| en | 0.581112 | # Regression Tree Node # Regression Tree # Generate Prediction given a test example # Generate Sum Square Residuals of a given node on training data | 2.904119 | 3 |
src/site/config.py | ninaamorim/sentiment-analysis-2018-president-election | 39 | 5933 | <reponame>ninaamorim/sentiment-analysis-2018-president-election
from starlette.applications import Starlette
from starlette.middleware.gzip import GZipMiddleware
from starlette.middleware.cors import CORSMiddleware
from starlette.staticfiles import StaticFiles
app = Starlette(debug=False, template_directory='src/site/templates')
app.add_middleware(GZipMiddleware, minimum_size=500)
app.add_middleware(CORSMiddleware, allow_origins=['*'])
app.mount('/static', StaticFiles(directory='src/site/media'), name='static')
| from starlette.applications import Starlette
from starlette.middleware.gzip import GZipMiddleware
from starlette.middleware.cors import CORSMiddleware
from starlette.staticfiles import StaticFiles
app = Starlette(debug=False, template_directory='src/site/templates')
app.add_middleware(GZipMiddleware, minimum_size=500)
app.add_middleware(CORSMiddleware, allow_origins=['*'])
app.mount('/static', StaticFiles(directory='src/site/media'), name='static') | none | 1 | 1.508425 | 2 |
|
loadbalanceRL/lib/__init__.py | fqzhou/LoadBalanceControl-RL | 11 | 5934 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Contains core logic for Rainman2
"""
__author__ = '<NAME> (<EMAIL>), <NAME>(<EMAIL>)'
__date__ = 'Wednesday, February 14th 2018, 11:42:09 am'
| #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Contains core logic for Rainman2
"""
__author__ = '<NAME> (<EMAIL>), <NAME>(<EMAIL>)'
__date__ = 'Wednesday, February 14th 2018, 11:42:09 am'
| en | 0.4564 | #! /usr/bin/env python3 # -*- coding: utf-8 -*- Contains core logic for Rainman2 | 1.744781 | 2 |
openff/bespokefit/__init__.py | openforcefield/bespoke-f | 12 | 5935 | """
BespokeFit
Creating bespoke parameters for individual molecules.
"""
import logging
import sys
from ._version import get_versions
versions = get_versions()
__version__ = versions["version"]
__git_revision__ = versions["full-revisionid"]
del get_versions, versions
# Silence verbose messages when running the CLI otherwise you can't read the output
# without seeing tens of 'Unable to load AmberTools' or don't import simtk warnings...
if sys.argv[0].endswith("openff-bespoke"):
from openff.bespokefit.utilities.logging import DeprecationWarningFilter
# if "openff-bespoke"
logging.getLogger("openff.toolkit").setLevel(logging.ERROR)
logging.getLogger().addFilter(DeprecationWarningFilter())
| """
BespokeFit
Creating bespoke parameters for individual molecules.
"""
import logging
import sys
from ._version import get_versions
versions = get_versions()
__version__ = versions["version"]
__git_revision__ = versions["full-revisionid"]
del get_versions, versions
# Silence verbose messages when running the CLI otherwise you can't read the output
# without seeing tens of 'Unable to load AmberTools' or don't import simtk warnings...
if sys.argv[0].endswith("openff-bespoke"):
from openff.bespokefit.utilities.logging import DeprecationWarningFilter
# if "openff-bespoke"
logging.getLogger("openff.toolkit").setLevel(logging.ERROR)
logging.getLogger().addFilter(DeprecationWarningFilter())
| en | 0.697239 | BespokeFit Creating bespoke parameters for individual molecules. # Silence verbose messages when running the CLI otherwise you can't read the output # without seeing tens of 'Unable to load AmberTools' or don't import simtk warnings... # if "openff-bespoke" | 1.747885 | 2 |
TRANSFORM/Resources/python/2006LUT_to_SDF.py | greenwoodms/TRANSFORM-Library | 29 | 5936 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 03 11:06:37 2018
@author: vmg
"""
import sdf
import numpy as np
# Load 2006 LUT for interpolation
# 2006 Groeneveld Look-Up Table as presented in
# "2006 CHF Look-Up Table", Nuclear Engineering and Design 237, pp. 190-1922.
# This file requires the file 2006LUTdata.txt
# Pressure range [MPa] from 2006 LUT, convert to [Pa]
P = np.array((0.10,0.30,0.50,1.0,2.0,3.0,5.0,7.0,10.0,12.0,14.0,16.0,18.0,20.0,21.0))*1e6
# Mass Flux range [kg/m^2-s] from 2006 .LUT.
G = np.array((0.,50.,100.,300.,500.,750.,1000.,1500.,2000.,2500.,3000.,3500.,4000.,4500.,5000.,5500.,6000.,6500.,7000.,7500.,8000.))
# Quality range from 2006 LUT
x = np.array((-0.50,-0.40,-0.30,-0.20,-0.15,-0.10,-0.05,0.00,0.05,0.10,0.15,0.20,0.25,0.30,0.35,0.40,0.45,0.50,0.60,0.70,0.80,0.90,1.00))
# Critical heat flux [kW/m^2] from 2006 LUT, convert to [W/m^2]
q_raw=np.loadtxt('../Data/2006LUTdata.txt')*1e3
# Convert the imported array into a (MxNxQ) where:
# M is number of mass flux divisions
# N is number of quality divisions
# Q is number of pressure divisions
lenG = len(G)
lenx = len(x)
lenP = len(P)
q = np.zeros((lenG,lenx,lenP))
for i in xrange(lenG):
for j in xrange(lenx):
for k in xrange(lenP):
q[i,j,k] = q_raw[i + k*lenG,j]
# Create the datasets:
ds_G = sdf.Dataset('G', data=G, unit='kg/(m2.s)', is_scale=True, display_name='Mass Flux')
ds_x = sdf.Dataset('x', data=x, unit='1', is_scale=True, display_name='Quality')
ds_P = sdf.Dataset('P', data=P, unit='Pa', is_scale=True, display_name='Pressure')
ds_q = sdf.Dataset('q', data=q, unit='W/m2', scales=[ds_G,ds_x,ds_P])
# Create the root group and write the file:
g = sdf.Group('/', comment='2006 CHF LUT', datasets=[ds_G,ds_x,ds_P,ds_q])
sdf.save('../Data/2006LUT.sdf', g) | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 03 11:06:37 2018
@author: vmg
"""
import sdf
import numpy as np
# Load 2006 LUT for interpolation
# 2006 Groeneveld Look-Up Table as presented in
# "2006 CHF Look-Up Table", Nuclear Engineering and Design 237, pp. 190-1922.
# This file requires the file 2006LUTdata.txt
# Pressure range [MPa] from 2006 LUT, convert to [Pa]
P = np.array((0.10,0.30,0.50,1.0,2.0,3.0,5.0,7.0,10.0,12.0,14.0,16.0,18.0,20.0,21.0))*1e6
# Mass Flux range [kg/m^2-s] from 2006 .LUT.
G = np.array((0.,50.,100.,300.,500.,750.,1000.,1500.,2000.,2500.,3000.,3500.,4000.,4500.,5000.,5500.,6000.,6500.,7000.,7500.,8000.))
# Quality range from 2006 LUT
x = np.array((-0.50,-0.40,-0.30,-0.20,-0.15,-0.10,-0.05,0.00,0.05,0.10,0.15,0.20,0.25,0.30,0.35,0.40,0.45,0.50,0.60,0.70,0.80,0.90,1.00))
# Critical heat flux [kW/m^2] from 2006 LUT, convert to [W/m^2]
q_raw=np.loadtxt('../Data/2006LUTdata.txt')*1e3
# Convert the imported array into a (MxNxQ) where:
# M is number of mass flux divisions
# N is number of quality divisions
# Q is number of pressure divisions
lenG = len(G)
lenx = len(x)
lenP = len(P)
q = np.zeros((lenG,lenx,lenP))
for i in xrange(lenG):
for j in xrange(lenx):
for k in xrange(lenP):
q[i,j,k] = q_raw[i + k*lenG,j]
# Create the datasets:
ds_G = sdf.Dataset('G', data=G, unit='kg/(m2.s)', is_scale=True, display_name='Mass Flux')
ds_x = sdf.Dataset('x', data=x, unit='1', is_scale=True, display_name='Quality')
ds_P = sdf.Dataset('P', data=P, unit='Pa', is_scale=True, display_name='Pressure')
ds_q = sdf.Dataset('q', data=q, unit='W/m2', scales=[ds_G,ds_x,ds_P])
# Create the root group and write the file:
g = sdf.Group('/', comment='2006 CHF LUT', datasets=[ds_G,ds_x,ds_P,ds_q])
sdf.save('../Data/2006LUT.sdf', g) | en | 0.805239 | # -*- coding: utf-8 -*- Created on Tue Apr 03 11:06:37 2018 @author: vmg # Load 2006 LUT for interpolation # 2006 Groeneveld Look-Up Table as presented in # "2006 CHF Look-Up Table", Nuclear Engineering and Design 237, pp. 190-1922. # This file requires the file 2006LUTdata.txt # Pressure range [MPa] from 2006 LUT, convert to [Pa] # Mass Flux range [kg/m^2-s] from 2006 .LUT. # Quality range from 2006 LUT # Critical heat flux [kW/m^2] from 2006 LUT, convert to [W/m^2] # Convert the imported array into a (MxNxQ) where: # M is number of mass flux divisions # N is number of quality divisions # Q is number of pressure divisions # Create the datasets: # Create the root group and write the file: | 2.097189 | 2 |
test/asserting/policy.py | tmsanrinsha/vint | 2 | 5937 | import unittest
from pathlib import Path
from pprint import pprint
from vint.compat.itertools import zip_longest
from vint.linting.linter import Linter
from vint.linting.config.config_default_source import ConfigDefaultSource
class PolicyAssertion(unittest.TestCase):
class StubPolicySet(object):
def __init__(self, *policies):
self._policies = policies
def get_enabled_policies(self):
return self._policies
def update_by_config(self, policy_enabling_map):
pass
class StubConfigContainer(object):
def __init__(self, policy_names_to_enable):
default_config_dict = ConfigDefaultSource(None).get_config_dict()
policy_options = default_config_dict.get('policies', {})
for policy, options in policy_options.items():
options['enabled'] = False
for policy in policy_names_to_enable:
options = policy_options.setdefault(policy, {})
options['enabled'] = True
self._config_dict = {
'policies': policy_options,
}
def append_config_source(self, config_source):
# Ignore a comment config source
pass
def get_config_dict(self):
return self._config_dict
def assertFoundNoViolations(self, path, Policy, policy_options=None):
self.assertFoundViolationsEqual(path, Policy, [], policy_options)
def assertFoundViolationsEqual(self, path, Policy, expected_violations, policy_options=None):
policy_to_test = Policy()
policy_name = Policy.__name__
policy_set = PolicyAssertion.StubPolicySet(policy_to_test)
config = PolicyAssertion.StubConfigContainer(policy_name)
if policy_options is not None:
config.get_config_dict()['policies'][policy_name].update(policy_options)
linter = Linter(policy_set, config.get_config_dict())
violations = linter.lint_file(path)
pprint(violations)
assert len(violations) == len(expected_violations)
for violation, expected_violation in zip_longest(violations, expected_violations):
self.assertViolation(violation, expected_violation)
def assertViolation(self, actual_violation, expected_violation):
self.assertIsNot(actual_violation, None)
self.assertIsNot(expected_violation, None)
pprint(actual_violation)
assert actual_violation['name'] == expected_violation['name']
assert actual_violation['position'] == expected_violation['position']
assert actual_violation['level'] == expected_violation['level']
self.assertIsInstance(actual_violation['description'], str)
def get_fixture_path(*filename):
return Path('test', 'fixture', 'policy', *filename)
| import unittest
from pathlib import Path
from pprint import pprint
from vint.compat.itertools import zip_longest
from vint.linting.linter import Linter
from vint.linting.config.config_default_source import ConfigDefaultSource
class PolicyAssertion(unittest.TestCase):
class StubPolicySet(object):
def __init__(self, *policies):
self._policies = policies
def get_enabled_policies(self):
return self._policies
def update_by_config(self, policy_enabling_map):
pass
class StubConfigContainer(object):
def __init__(self, policy_names_to_enable):
default_config_dict = ConfigDefaultSource(None).get_config_dict()
policy_options = default_config_dict.get('policies', {})
for policy, options in policy_options.items():
options['enabled'] = False
for policy in policy_names_to_enable:
options = policy_options.setdefault(policy, {})
options['enabled'] = True
self._config_dict = {
'policies': policy_options,
}
def append_config_source(self, config_source):
# Ignore a comment config source
pass
def get_config_dict(self):
return self._config_dict
def assertFoundNoViolations(self, path, Policy, policy_options=None):
self.assertFoundViolationsEqual(path, Policy, [], policy_options)
def assertFoundViolationsEqual(self, path, Policy, expected_violations, policy_options=None):
policy_to_test = Policy()
policy_name = Policy.__name__
policy_set = PolicyAssertion.StubPolicySet(policy_to_test)
config = PolicyAssertion.StubConfigContainer(policy_name)
if policy_options is not None:
config.get_config_dict()['policies'][policy_name].update(policy_options)
linter = Linter(policy_set, config.get_config_dict())
violations = linter.lint_file(path)
pprint(violations)
assert len(violations) == len(expected_violations)
for violation, expected_violation in zip_longest(violations, expected_violations):
self.assertViolation(violation, expected_violation)
def assertViolation(self, actual_violation, expected_violation):
self.assertIsNot(actual_violation, None)
self.assertIsNot(expected_violation, None)
pprint(actual_violation)
assert actual_violation['name'] == expected_violation['name']
assert actual_violation['position'] == expected_violation['position']
assert actual_violation['level'] == expected_violation['level']
self.assertIsInstance(actual_violation['description'], str)
def get_fixture_path(*filename):
return Path('test', 'fixture', 'policy', *filename)
| en | 0.262934 | # Ignore a comment config source | 2.321222 | 2 |
dataprofiler/labelers/character_level_cnn_model.py | gliptak/DataProfiler | 0 | 5938 | import copy
import json
import logging
import os
import sys
import time
from collections import defaultdict
import numpy as np
import tensorflow as tf
from sklearn import decomposition
from .. import dp_logging
from . import labeler_utils
from .base_model import AutoSubRegistrationMeta, BaseModel, BaseTrainableModel
_file_dir = os.path.dirname(os.path.abspath(__file__))
logger = dp_logging.get_child_logger(__name__)
class NoV1ResourceMessageFilter(logging.Filter):
"""Removes TF2 warning for using TF1 model which has resources."""
def filter(self, record):
msg = 'is a problem, consider rebuilding the SavedModel after ' + \
'running tf.compat.v1.enable_resource_variables()'
return msg not in record.getMessage()
tf_logger = logging.getLogger('tensorflow')
tf_logger.addFilter(NoV1ResourceMessageFilter())
@tf.keras.utils.register_keras_serializable()
class FBetaScore(tf.keras.metrics.Metric):
r"""Computes F-Beta score.
Adapted and slightly modified from https://github.com/tensorflow/addons/blob/v0.12.0/tensorflow_addons/metrics/f_scores.py#L211-L283
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
It is the weighted harmonic mean of precision
and recall. Output range is `[0, 1]`. Works for
both multi-class and multi-label classification.
$$
F_{\beta} = (1 + \beta^2) * \frac{\textrm{precision} * \textrm{precision}}{(\beta^2 \cdot \textrm{precision}) + \textrm{recall}}
$$
Args:
num_classes: Number of unique classes in the dataset.
average: Type of averaging to be performed on data.
Acceptable values are `None`, `micro`, `macro` and
`weighted`. Default value is None.
beta: Determines the weight of precision and recall
in harmonic mean. Determines the weight given to the
precision and recall. Default value is 1.
threshold: Elements of `y_pred` greater than threshold are
converted to be 1, and the rest 0. If threshold is
None, the argmax is converted to 1, and the rest 0.
name: (Optional) String name of the metric instance.
dtype: (Optional) Data type of the metric result.
Returns:
F-Beta Score: float.
"""
# Modification: remove the run-time type checking for functions
def __init__(self, num_classes, average=None, beta=1.0, threshold=None,
name="fbeta_score", dtype=None, **kwargs):
super().__init__(name=name, dtype=dtype)
if average not in (None, "micro", "macro", "weighted"):
raise ValueError(
"Unknown average type. Acceptable values "
"are: [None, 'micro', 'macro', 'weighted']"
)
if not isinstance(beta, float):
raise TypeError("The value of beta should be a python float")
if beta <= 0.0:
raise ValueError("beta value should be greater than zero")
if threshold is not None:
if not isinstance(threshold, float):
raise TypeError("The value of threshold should be a python float")
if threshold > 1.0 or threshold <= 0.0:
raise ValueError("threshold should be between 0 and 1")
self.num_classes = num_classes
self.average = average
self.beta = beta
self.threshold = threshold
self.axis = None
self.init_shape = []
if self.average != "micro":
self.axis = 0
self.init_shape = [self.num_classes]
def _zero_wt_init(name):
return self.add_weight(
name, shape=self.init_shape, initializer="zeros", dtype=self.dtype
)
self.true_positives = _zero_wt_init("true_positives")
self.false_positives = _zero_wt_init("false_positives")
self.false_negatives = _zero_wt_init("false_negatives")
self.weights_intermediate = _zero_wt_init("weights_intermediate")
def update_state(self, y_true, y_pred, sample_weight=None):
if self.threshold is None:
threshold = tf.reduce_max(y_pred, axis=-1, keepdims=True)
# make sure [0, 0, 0] doesn't become [1, 1, 1]
# Use abs(x) > eps, instead of x != 0 to check for zero
y_pred = tf.logical_and(y_pred >= threshold, tf.abs(y_pred) > 1e-12)
else:
y_pred = y_pred > self.threshold
y_true = tf.cast(y_true, self.dtype)
y_pred = tf.cast(y_pred, self.dtype)
def _weighted_sum(val, sample_weight):
if sample_weight is not None:
val = tf.math.multiply(val, tf.expand_dims(sample_weight, 1))
return tf.reduce_sum(val, axis=self.axis)
self.true_positives.assign_add(_weighted_sum(y_pred * y_true, sample_weight))
self.false_positives.assign_add(
_weighted_sum(y_pred * (1 - y_true), sample_weight)
)
self.false_negatives.assign_add(
_weighted_sum((1 - y_pred) * y_true, sample_weight)
)
self.weights_intermediate.assign_add(_weighted_sum(y_true, sample_weight))
def result(self):
precision = tf.math.divide_no_nan(
self.true_positives, self.true_positives + self.false_positives
)
recall = tf.math.divide_no_nan(
self.true_positives, self.true_positives + self.false_negatives
)
mul_value = precision * recall
add_value = (tf.math.square(self.beta) * precision) + recall
mean = tf.math.divide_no_nan(mul_value, add_value)
f1_score = mean * (1 + tf.math.square(self.beta))
if self.average == "weighted":
weights = tf.math.divide_no_nan(
self.weights_intermediate, tf.reduce_sum(self.weights_intermediate)
)
f1_score = tf.reduce_sum(f1_score * weights)
elif self.average is not None: # [micro, macro]
f1_score = tf.reduce_mean(f1_score)
return f1_score
def get_config(self):
"""Returns the serializable config of the metric."""
config = {
"num_classes": self.num_classes,
"average": self.average,
"beta": self.beta,
"threshold": self.threshold,
}
base_config = super().get_config()
return {**base_config, **config}
def reset_states(self):
reset_value = tf.zeros(self.init_shape, dtype=self.dtype)
tf.keras.backend.batch_set_value([(v, reset_value) for v in self.variables])
@tf.keras.utils.register_keras_serializable()
class F1Score(FBetaScore):
r"""Computes F-1 Score.
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
It is the harmonic mean of precision and recall.
Output range is `[0, 1]`. Works for both multi-class
and multi-label classification.
$$
F_1 = 2 \cdot \frac{\textrm{precision} \cdot \textrm{recall}}{\textrm{precision} + \textrm{recall}}
$$
Args:
num_classes: Number of unique classes in the dataset.
average: Type of averaging to be performed on data.
Acceptable values are `None`, `micro`, `macro`
and `weighted`. Default value is None.
threshold: Elements of `y_pred` above threshold are
considered to be 1, and the rest 0. If threshold is
None, the argmax is converted to 1, and the rest 0.
name: (Optional) String name of the metric instance.
dtype: (Optional) Data type of the metric result.
Returns:
F-1 Score: float.
"""
# Modification: remove the run-time type checking for functions
def __init__(self, num_classes, average=None, threshold=None,
name="f1_score", dtype=None):
super().__init__(num_classes, average, 1.0, threshold, name=name, dtype=dtype)
def get_config(self):
base_config = super().get_config()
del base_config["beta"]
return base_config
def build_embd_dictionary(filename):
"""
Returns a numpy embedding dictionary from embed file with GloVe-like format
:param filename: Path to the embed file for loading
:type filename: str
"""
embd_table = dict()
with open(filename, 'r') as embds:
for line in embds:
line = line.strip().split()
embd_table[line[0]] = np.asarray(line[1:])
return embd_table
def create_glove_char(n_dims, source_file=None):
"""
Embeds GloVe chars embeddings from source file to n_dims principal
components in a new file
:param n_dims: Final number of principal component dims of the embeddings
:type n_dims: int
:param source_file: Location of original embeddings to factor down
:type source_file: str
"""
if source_file is None:
source_file = os.path.join(_file_dir,
"embeddings/glove.840B.300d-char.txt")
# get embedding table first and vectors as array
embd_table = build_embd_dictionary(source_file)
embd_words, embd_matrix = [
np.asarray(ls) if i > 0 else list(ls)
for i, ls in enumerate(zip(*embd_table.items()))]
# get PCA embedder
pca = decomposition.PCA(n_components=n_dims)
reduced_embds = pca.fit_transform(embd_matrix)
# write to file
dir_name = os.path.dirname(source_file)
embd_file_name = os.path.join(dir_name,
'glove-reduced-{}D.txt'.format(n_dims))
with open(embd_file_name, 'w') as file:
for word, embd in zip(embd_words, reduced_embds):
file.write(word + " " + ' '.join(str(num) for num in embd) + "\n")
class CharacterLevelCnnModel(BaseTrainableModel,
metaclass=AutoSubRegistrationMeta):
# boolean if the label mapping requires the mapping for index 0 reserved
requires_zero_mapping = True
def __init__(self, label_mapping=None, parameters=None):
"""
CNN Model Initializer. initialize epoch_id
:param label_mapping: maps labels to their encoded integers
:type label_mapping: dict
:param parameters: Contains all the appropriate parameters for the
model. Must contain num_labels. Other possible parameters are:
max_length, max_char_encoding_id, dim_embed, size_fc
dropout, size_conv, num_fil, optimizer, default_label
:type parameters: dict
:return: None
"""
# parameter initialization
if not parameters:
parameters = {}
parameters.setdefault('max_length', 3400)
parameters.setdefault('max_char_encoding_id', 127)
parameters.setdefault('dim_embed', 64)
parameters.setdefault('size_fc', [96, 96])
parameters.setdefault('dropout', 0.073)
parameters.setdefault('size_conv', 13)
parameters.setdefault('default_label', "UNKNOWN")
parameters.setdefault('num_fil', [48 for _ in range(4)])
parameters['pad_label'] = 'PAD'
self._epoch_id = 0
# reconstruct flags for model
self._model_num_labels = 0
self._model_default_ind = -1
BaseModel.__init__(self, label_mapping, parameters)
def __eq__(self, other):
"""
Checks if two models are equal with one another, may only check
important variables, i.e. may not check model itself.
:param self: a model
:param other: a model
:type self: BaseModel
:type other: BaseModel
:return: Whether or not self and other are equal
:rtype: bool
"""
if self._parameters != other._parameters \
or self._label_mapping != other._label_mapping:
return False
return True
def _validate_parameters(self, parameters):
"""
Validate the parameters sent in. Raise error if invalid parameters are
present.
:param parameters: parameter dict containing the following parameters:
max_length: Maximum char length in a sample
max_char_encoding_id: Maximum integer value for encoding the input
dim_embed: Number of embedded dimensions
size_fc: Size of each fully connected layers
dropout: Ratio of dropout in the model
size_conv: Convolution kernel size
default_label: Key for label_mapping that is the default label
pad_label: Key for entities_dict that is the pad label
num_fil: Number of filters in each convolution layer
:type parameters: dict
:return: None
"""
errors = []
list_of_necessary_params = ['max_length', 'max_char_encoding_id',
'dim_embed', 'size_fc', 'dropout',
'size_conv', 'default_label', 'pad_label',
'num_fil']
# Make sure the necessary parameters are present and valid.
for param in parameters:
if param in ['max_length', 'max_char_encoding_id', 'dim_embed',
'size_conv']:
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0:
errors.append(param + " must be a valid integer or float "
"greater than 0.")
elif param == 'dropout':
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0 or parameters[param] > 1:
errors.append(param + " must be a valid integer or float "
"from 0 to 1.")
elif param == 'size_fc' or param == 'num_fil':
if not isinstance(parameters[param], list) \
or len(parameters[param]) == 0:
errors.append(param + " must be a non-empty list of "
"integers.")
else:
for item in parameters[param]:
if not isinstance(item, int):
errors.append(param + " must be a non-empty "
"list of integers.")
break
elif param == 'default_label':
if not isinstance(parameters[param], str):
error = str(param) + " must be a string."
errors.append(error)
# Error if there are extra parameters thrown in
for param in parameters:
if param not in list_of_necessary_params:
errors.append(param + " is not an accepted parameter.")
if errors:
raise ValueError('\n'.join(errors))
def set_label_mapping(self, label_mapping):
"""
Sets the labels for the model
:param label_mapping: label mapping of the model
:type label_mapping: dict
:return: None
"""
if not isinstance(label_mapping, (list, dict)):
raise TypeError("Labels must either be a non-empty encoding dict "
"which maps labels to index encodings or a list.")
label_mapping = copy.deepcopy(label_mapping)
if 'PAD' not in label_mapping:
if isinstance(label_mapping, list): # if list missing PAD
label_mapping = ['PAD'] + label_mapping
elif 0 not in label_mapping.values(): # if dict missing PAD and 0
label_mapping.update({'PAD': 0})
if (isinstance(label_mapping, dict)
and label_mapping.get('PAD', None) != 0): # dict with bad PAD
raise ValueError("`PAD` must map to index zero.")
if self._parameters['default_label'] not in label_mapping:
raise ValueError("The `default_label` of {} must exist in the "
"label mapping.".format(
self._parameters['default_label']))
super().set_label_mapping(label_mapping)
def _need_to_reconstruct_model(self):
"""
Determines whether or not the model needs to be reconstructed.
:return: bool of whether or not the model needs to reconstruct.
"""
if not self._model:
return False
default_ind = self.label_mapping[self._parameters['default_label']]
return self.num_labels != self._model_num_labels or \
default_ind != self._model_default_ind
def save_to_disk(self, dirpath):
"""
Saves whole model to disk with weights
:param dirpath: directory path where you want to save the model to
:type dirpath: str
:return: None
"""
if not self._model:
self._construct_model()
elif self._need_to_reconstruct_model():
self._reconstruct_model()
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'w') as fp:
json.dump(self._parameters, fp)
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'w') as fp:
json.dump(self.label_mapping, fp)
self._model.save(os.path.join(dirpath))
@classmethod
def load_from_disk(cls, dirpath):
"""
Loads whole model from disk with weights
:param dirpath: directory path where you want to load the model from
:type dirpath: str
:return: None
"""
# load parameters
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'r') as fp:
parameters = json.load(fp)
# load label_mapping
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'r') as fp:
label_mapping = json.load(fp)
# use f1 score metric
custom_objects = {
"F1Score": F1Score(
num_classes=max(label_mapping.values()) + 1,
average='micro'),
"CharacterLevelCnnModel": cls,
}
with tf.keras.utils.custom_object_scope(custom_objects):
tf_model = tf.keras.models.load_model(dirpath)
loaded_model = cls(label_mapping, parameters)
loaded_model._model = tf_model
# Tensorflow v1 Model weights need to be transferred.
if not callable(tf_model):
loaded_model._construct_model()
tf1_weights = []
for var in tf_model.variables:
if 'training' not in var.name:
tf1_weights.append(var.value())
loaded_model._construct_model()
tf1_weights.append(loaded_model._model.weights[-1].value())
loaded_model._model.set_weights(tf1_weights)
# load self
loaded_model._model_num_labels = loaded_model.num_labels
loaded_model._model_default_ind = loaded_model.label_mapping[
loaded_model._parameters['default_label']
]
return loaded_model
@staticmethod
def _char_encoding_layer(input_str_tensor, max_char_encoding_id, max_len):
"""
Character encoding for the list of sentences
:param input_str_tensor: input list of sentences converted to tensor
:type input_str_tensor: tf.tensor
:param max_char_encoding_id: Maximum integer value for encoding the
input
:type max_char_encoding_id: int
:param max_len: Maximum char length in a sample
:type max_len: int
:return : tensor containing encoded list of input sentences
:rtype: tf.Tensor
"""
# convert characters to indices
input_str_flatten = tf.reshape(input_str_tensor, [-1])
sentences_encode = tf.strings.unicode_decode(input_str_flatten,
input_encoding='UTF-8')
sentences_encode = tf.add(tf.cast(1, tf.int32), sentences_encode)
sentences_encode = tf.math.minimum(sentences_encode,
max_char_encoding_id + 1)
# padding
sentences_encode_pad = sentences_encode.to_tensor(shape=[None, max_len])
return sentences_encode_pad
@staticmethod
def _argmax_threshold_layer(num_labels, threshold=0.0, default_ind=1):
"""
Adds an argmax threshold layer to the model. This layer's output will be
the argmax value if the confidence for that argmax meets the threshold
for its label, otherwise it will be the default label index.
:param num_labels: number of entities
:type num_labels: int
:param threshold: default set to 0 so all confidences pass.
:type threshold: float
:param default_ind: default index
:type default_ind: int
:return: final argmax threshold layer for the model
"""
# Initialize the thresholds vector variable and create the threshold
# matrix.
class ThreshArgMaxLayer(tf.keras.layers.Layer):
def __init__(self, threshold_, num_labels_):
super(ThreshArgMaxLayer, self).__init__()
thresh_init = tf.constant_initializer(threshold_)
self.thresh_vec = tf.Variable(
name='ThreshVec',
initial_value=thresh_init(shape=[num_labels_]),
trainable=False)
def call(self, argmax_layer, confidence_layer):
threshold_at_argmax = tf.gather(self.thresh_vec, argmax_layer)
confidence_max_layer = tf.keras.backend.max(confidence_layer,
axis=2)
# Check if the confidences meet the threshold minimum.
argmax_mask = tf.keras.backend.cast(
tf.keras.backend.greater_equal(confidence_max_layer,
threshold_at_argmax),
dtype=argmax_layer.dtype)
# Create a vector the same size as the batch_size which
# represents the background label
bg_label_tf = tf.keras.backend.constant(
default_ind, dtype=argmax_layer.dtype)
# Generate the final predicted output using the function:
final_predicted_layer = tf.add(
bg_label_tf,
tf.multiply(
tf.subtract(argmax_layer, bg_label_tf),
argmax_mask
), name='ThreshArgMax'
)
return final_predicted_layer
return ThreshArgMaxLayer(threshold, num_labels)
def _construct_model(self):
"""
Model constructor for the data labeler. This also serves as a weight
reset.
:return: None
"""
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Reset model
tf.keras.backend.clear_session()
# generate glove embedding
create_glove_char(self._parameters['dim_embed'])
# generate model
self._model = tf.keras.models.Sequential()
# default parameters
max_length = self._parameters['max_length']
max_char_encoding_id = self._parameters['max_char_encoding_id']
# Encoding layer
def encoding_function(input_str):
char_in_vector = CharacterLevelCnnModel._char_encoding_layer(
input_str, max_char_encoding_id, max_length)
return char_in_vector
self._model.add(tf.keras.layers.Input(shape=(None,), dtype=tf.string))
self._model.add(
tf.keras.layers.Lambda(encoding_function,
output_shape=tuple([max_length])))
# Create a pre-trained weight matrix
# character encoding indices range from 0 to max_char_encoding_id,
# we add one extra index for out-of-vocabulary character
embed_file = os.path.join(
_file_dir, "embeddings/glove-reduced-{}D.txt".format(
self._parameters['dim_embed']))
embedding_matrix = np.zeros((max_char_encoding_id + 2,
self._parameters['dim_embed']))
embedding_dict = build_embd_dictionary(embed_file)
input_shape = tuple([max_length])
# Fill in the weight matrix: let pad and space be 0s
for ascii_num in range(max_char_encoding_id):
if chr(ascii_num) in embedding_dict:
embedding_matrix[ascii_num + 1] = embedding_dict[chr(ascii_num)]
self._model.add(tf.keras.layers.Embedding(
max_char_encoding_id + 2,
self._parameters['dim_embed'],
weights=[embedding_matrix],
input_length=input_shape[0],
trainable=True))
# Add the convolutional layers
for fil in self._parameters['num_fil']:
self._model.add(tf.keras.layers.Conv1D(
filters=fil, kernel_size=self._parameters['size_conv'],
activation='relu', padding='same'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add batch normalization, set fused = True for compactness
self._model.add(
tf.keras.layers.BatchNormalization(fused=False, scale=True))
# Add the fully connected layers
for size in self._parameters['size_fc']:
self._model.add(
tf.keras.layers.Dense(units=size, activation='relu'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add the final Softmax layer
self._model.add(
tf.keras.layers.Dense(num_labels, activation='softmax'))
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(self._model.output)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = self._model.outputs + \
[argmax_layer,
final_predicted_layer(argmax_layer, self._model.output)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def reset_weights(self):
"""
Reset the weights of the model.
:return: None
"""
self._construct_model()
def _reconstruct_model(self):
"""
Reconstruct the appropriate layers if the number of number of labels is
altered
:return: None
"""
# Reset model
tf.keras.backend.clear_session()
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Remove the 3 output layers (dense_2', 'tf_op_layer_ArgMax',
# 'thresh_arg_max_layer')
for _ in range(3):
self._model.layers.pop()
# Add the final Softmax layer to the previous spot
final_softmax_layer = tf.keras.layers.Dense(
num_labels, activation='softmax', name="dense_2")(
self._model.layers[-4].output)
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(final_softmax_layer)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = [final_softmax_layer] + \
[argmax_layer,
final_predicted_layer(argmax_layer,
final_softmax_layer)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def fit(self, train_data, val_data=None, batch_size=32, label_mapping=None,
reset_weights=False, verbose=True):
"""
Train the current model with the training data and validation data
:param train_data: Training data used to train model
:type train_data: Union[list, np.ndarray]
:param val_data: Validation data used to validate the training
:type val_data: Union[list, np.ndarray]
:param batch_size: Used to determine number of samples in each batch
:type batch_size: int
:param label_mapping: maps labels to their encoded integers
:type label_mapping: Union[dict, None]
:param reset_weights: Flag to determine whether to reset the weights or
not
:type reset_weights: bool
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: None
"""
if label_mapping is not None:
self.set_label_mapping(label_mapping)
if not self._model:
self._construct_model()
else:
if self._need_to_reconstruct_model():
self._reconstruct_model()
if reset_weights:
self.reset_weights()
history = defaultdict()
f1 = None
f1_report = []
self._model.reset_metrics()
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
start_time = time.time()
batch_id = 0
for x_train, y_train in train_data:
model_results = self._model.train_on_batch(
x_train, {softmax_output_layer_name: y_train})
sys.stdout.flush()
if verbose:
sys.stdout.write(
"\rEPOCH %d, batch_id %d: loss: %f - acc: %f - "
"f1_score %f" %
(self._epoch_id, batch_id, *model_results[1:]))
batch_id += 1
for i, metric_label in enumerate(self._model.metrics_names):
history[metric_label] = model_results[i]
if val_data:
f1, f1_report = self._validate_training(val_data)
history['f1_report'] = f1_report
val_f1 = f1_report['weighted avg']['f1-score'] \
if f1_report else np.NAN
val_precision = f1_report['weighted avg']['precision'] \
if f1_report else np.NAN
val_recall = f1_report['weighted avg']['recall'] \
if f1_report else np.NAN
epoch_time = time.time() - start_time
logger.info("\rEPOCH %d (%ds), loss: %f - acc: %f - f1_score %f -- "
"val_f1: %f - val_precision: %f - val_recall %f" %
(self._epoch_id, epoch_time, *model_results[1:],
val_f1, val_precision, val_recall))
self._epoch_id += 1
return history, f1, f1_report
def _validate_training(self, val_data, batch_size_test=32,
verbose_log=True, verbose_keras=False):
"""
Validate the model on the test set and return the evaluation metrics.
:param val_data: data generator for the validation
:type val_data: iterator
:param batch_size_test: Number of samples to process in testing
:type batch_size_test: int
:param verbose_log: whether or not to print out scores for training,
etc.
:type verbose_log: bool
:param verbose_keras: whether or not to print out scores for training,
from keras.
:type verbose_keras: bool
return (f1-score, f1 report).
"""
f1 = None
f1_report = None
if val_data is None:
return f1, f1_report
# Predict on the test set
batch_id = 0
y_val_pred = []
y_val_test = []
for x_val, y_val in val_data:
y_val_pred.append(self._model.predict(
x_val, batch_size=batch_size_test, verbose=verbose_keras)[1])
y_val_test.append(np.argmax(y_val, axis=-1))
batch_id += 1
sys.stdout.flush()
if verbose_log:
sys.stdout.write("\rEPOCH %g, validation_batch_id %d" %
(self._epoch_id, batch_id))
tf.keras.backend.set_floatx('float32')
# Clean the predicted entities and the actual entities
f1, f1_report = labeler_utils.evaluate_accuracy(
np.concatenate(y_val_pred, axis=0),
np.concatenate(y_val_test, axis=0),
self.num_labels,
self.reverse_label_mapping,
verbose=verbose_keras)
return f1, f1_report
def predict(self, data, batch_size=32, show_confidences=False,
verbose=True):
"""
Run model and get predictions
:param data: text input
:type data: Union[list, numpy.ndarray]
:param batch_size: number of samples in the batch of data
:type batch_size: int
:param show_confidences: whether user wants prediction confidences
:type show_confidences:
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: char level predictions and confidences
:rtype: dict
"""
if not self._model:
raise ValueError("You are trying to predict without a model. "
"Construct/Load a model before predicting.")
elif self._need_to_reconstruct_model():
raise RuntimeError("The model label mapping definitions have been "
"altered without additional training. Please "
"train the model or reset the label mapping to "
"predict.")
# Pre-allocate space for predictions
confidences = []
sentence_lengths = np.zeros((batch_size,), dtype=int)
predictions = np.zeros((batch_size, self._parameters['max_length']))
if show_confidences:
confidences = np.zeros((batch_size,
self._parameters['max_length'],
self.num_labels))
# Run model with batching
allocation_index = 0
for batch_id, batch_data in enumerate(data):
model_output = self._model(
tf.convert_to_tensor(batch_data)
)
# Count number of samples in batch to prevent array mismatch
num_samples_in_batch = len(batch_data)
allocation_index = batch_id * batch_size
# Double array size
if len(predictions) <= allocation_index:
predictions = np.pad(predictions, ((0, len(predictions)),
(0, 0)), mode='constant')
sentence_lengths = np.pad(
sentence_lengths, pad_width=((0, len(sentence_lengths)),),
mode='constant')
if show_confidences:
confidences = np.pad(confidences,
((0, len(predictions)),
(0, 0), (0, 0)), mode='constant')
if show_confidences:
confidences[allocation_index:allocation_index + num_samples_in_batch] = model_output[0].numpy()
predictions[allocation_index:allocation_index + num_samples_in_batch] = model_output[1].numpy()
sentence_lengths[allocation_index:allocation_index + num_samples_in_batch] = list(map(lambda x: len(x[0]), batch_data))
allocation_index += num_samples_in_batch
# Convert predictions, confidences to lists from numpy
predictions_list = [i for i in range(0, allocation_index)]
confidences_list = None
if show_confidences:
confidences_list = [i for i in range(0, allocation_index)]
# Append slices of predictions to return prediction & confidence matrices
for index, sentence_length \
in enumerate(sentence_lengths[:allocation_index]):
predictions_list[index] = list(predictions[index][:sentence_length])
if show_confidences:
confidences_list[index] = list(confidences[index][:sentence_length])
if show_confidences:
return {'pred': predictions_list, 'conf': confidences_list}
return {'pred': predictions_list}
def details(self):
"""
Prints the relevant details of the model (summary, parameters, label
mapping)
"""
print("\n###### Model Details ######\n")
self._model.summary()
print("\nModel Parameters:")
for key, value in self._parameters.items():
print("{}: {}".format(key, value))
print("\nModel Label Mapping:")
for key, value in self.label_mapping.items():
print("{}: {}".format(key, value))
| import copy
import json
import logging
import os
import sys
import time
from collections import defaultdict
import numpy as np
import tensorflow as tf
from sklearn import decomposition
from .. import dp_logging
from . import labeler_utils
from .base_model import AutoSubRegistrationMeta, BaseModel, BaseTrainableModel
_file_dir = os.path.dirname(os.path.abspath(__file__))
logger = dp_logging.get_child_logger(__name__)
class NoV1ResourceMessageFilter(logging.Filter):
"""Removes TF2 warning for using TF1 model which has resources."""
def filter(self, record):
msg = 'is a problem, consider rebuilding the SavedModel after ' + \
'running tf.compat.v1.enable_resource_variables()'
return msg not in record.getMessage()
tf_logger = logging.getLogger('tensorflow')
tf_logger.addFilter(NoV1ResourceMessageFilter())
@tf.keras.utils.register_keras_serializable()
class FBetaScore(tf.keras.metrics.Metric):
r"""Computes F-Beta score.
Adapted and slightly modified from https://github.com/tensorflow/addons/blob/v0.12.0/tensorflow_addons/metrics/f_scores.py#L211-L283
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
It is the weighted harmonic mean of precision
and recall. Output range is `[0, 1]`. Works for
both multi-class and multi-label classification.
$$
F_{\beta} = (1 + \beta^2) * \frac{\textrm{precision} * \textrm{precision}}{(\beta^2 \cdot \textrm{precision}) + \textrm{recall}}
$$
Args:
num_classes: Number of unique classes in the dataset.
average: Type of averaging to be performed on data.
Acceptable values are `None`, `micro`, `macro` and
`weighted`. Default value is None.
beta: Determines the weight of precision and recall
in harmonic mean. Determines the weight given to the
precision and recall. Default value is 1.
threshold: Elements of `y_pred` greater than threshold are
converted to be 1, and the rest 0. If threshold is
None, the argmax is converted to 1, and the rest 0.
name: (Optional) String name of the metric instance.
dtype: (Optional) Data type of the metric result.
Returns:
F-Beta Score: float.
"""
# Modification: remove the run-time type checking for functions
def __init__(self, num_classes, average=None, beta=1.0, threshold=None,
name="fbeta_score", dtype=None, **kwargs):
super().__init__(name=name, dtype=dtype)
if average not in (None, "micro", "macro", "weighted"):
raise ValueError(
"Unknown average type. Acceptable values "
"are: [None, 'micro', 'macro', 'weighted']"
)
if not isinstance(beta, float):
raise TypeError("The value of beta should be a python float")
if beta <= 0.0:
raise ValueError("beta value should be greater than zero")
if threshold is not None:
if not isinstance(threshold, float):
raise TypeError("The value of threshold should be a python float")
if threshold > 1.0 or threshold <= 0.0:
raise ValueError("threshold should be between 0 and 1")
self.num_classes = num_classes
self.average = average
self.beta = beta
self.threshold = threshold
self.axis = None
self.init_shape = []
if self.average != "micro":
self.axis = 0
self.init_shape = [self.num_classes]
def _zero_wt_init(name):
return self.add_weight(
name, shape=self.init_shape, initializer="zeros", dtype=self.dtype
)
self.true_positives = _zero_wt_init("true_positives")
self.false_positives = _zero_wt_init("false_positives")
self.false_negatives = _zero_wt_init("false_negatives")
self.weights_intermediate = _zero_wt_init("weights_intermediate")
def update_state(self, y_true, y_pred, sample_weight=None):
if self.threshold is None:
threshold = tf.reduce_max(y_pred, axis=-1, keepdims=True)
# make sure [0, 0, 0] doesn't become [1, 1, 1]
# Use abs(x) > eps, instead of x != 0 to check for zero
y_pred = tf.logical_and(y_pred >= threshold, tf.abs(y_pred) > 1e-12)
else:
y_pred = y_pred > self.threshold
y_true = tf.cast(y_true, self.dtype)
y_pred = tf.cast(y_pred, self.dtype)
def _weighted_sum(val, sample_weight):
if sample_weight is not None:
val = tf.math.multiply(val, tf.expand_dims(sample_weight, 1))
return tf.reduce_sum(val, axis=self.axis)
self.true_positives.assign_add(_weighted_sum(y_pred * y_true, sample_weight))
self.false_positives.assign_add(
_weighted_sum(y_pred * (1 - y_true), sample_weight)
)
self.false_negatives.assign_add(
_weighted_sum((1 - y_pred) * y_true, sample_weight)
)
self.weights_intermediate.assign_add(_weighted_sum(y_true, sample_weight))
def result(self):
precision = tf.math.divide_no_nan(
self.true_positives, self.true_positives + self.false_positives
)
recall = tf.math.divide_no_nan(
self.true_positives, self.true_positives + self.false_negatives
)
mul_value = precision * recall
add_value = (tf.math.square(self.beta) * precision) + recall
mean = tf.math.divide_no_nan(mul_value, add_value)
f1_score = mean * (1 + tf.math.square(self.beta))
if self.average == "weighted":
weights = tf.math.divide_no_nan(
self.weights_intermediate, tf.reduce_sum(self.weights_intermediate)
)
f1_score = tf.reduce_sum(f1_score * weights)
elif self.average is not None: # [micro, macro]
f1_score = tf.reduce_mean(f1_score)
return f1_score
def get_config(self):
"""Returns the serializable config of the metric."""
config = {
"num_classes": self.num_classes,
"average": self.average,
"beta": self.beta,
"threshold": self.threshold,
}
base_config = super().get_config()
return {**base_config, **config}
def reset_states(self):
reset_value = tf.zeros(self.init_shape, dtype=self.dtype)
tf.keras.backend.batch_set_value([(v, reset_value) for v in self.variables])
@tf.keras.utils.register_keras_serializable()
class F1Score(FBetaScore):
r"""Computes F-1 Score.
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
It is the harmonic mean of precision and recall.
Output range is `[0, 1]`. Works for both multi-class
and multi-label classification.
$$
F_1 = 2 \cdot \frac{\textrm{precision} \cdot \textrm{recall}}{\textrm{precision} + \textrm{recall}}
$$
Args:
num_classes: Number of unique classes in the dataset.
average: Type of averaging to be performed on data.
Acceptable values are `None`, `micro`, `macro`
and `weighted`. Default value is None.
threshold: Elements of `y_pred` above threshold are
considered to be 1, and the rest 0. If threshold is
None, the argmax is converted to 1, and the rest 0.
name: (Optional) String name of the metric instance.
dtype: (Optional) Data type of the metric result.
Returns:
F-1 Score: float.
"""
# Modification: remove the run-time type checking for functions
def __init__(self, num_classes, average=None, threshold=None,
name="f1_score", dtype=None):
super().__init__(num_classes, average, 1.0, threshold, name=name, dtype=dtype)
def get_config(self):
base_config = super().get_config()
del base_config["beta"]
return base_config
def build_embd_dictionary(filename):
"""
Returns a numpy embedding dictionary from embed file with GloVe-like format
:param filename: Path to the embed file for loading
:type filename: str
"""
embd_table = dict()
with open(filename, 'r') as embds:
for line in embds:
line = line.strip().split()
embd_table[line[0]] = np.asarray(line[1:])
return embd_table
def create_glove_char(n_dims, source_file=None):
"""
Embeds GloVe chars embeddings from source file to n_dims principal
components in a new file
:param n_dims: Final number of principal component dims of the embeddings
:type n_dims: int
:param source_file: Location of original embeddings to factor down
:type source_file: str
"""
if source_file is None:
source_file = os.path.join(_file_dir,
"embeddings/glove.840B.300d-char.txt")
# get embedding table first and vectors as array
embd_table = build_embd_dictionary(source_file)
embd_words, embd_matrix = [
np.asarray(ls) if i > 0 else list(ls)
for i, ls in enumerate(zip(*embd_table.items()))]
# get PCA embedder
pca = decomposition.PCA(n_components=n_dims)
reduced_embds = pca.fit_transform(embd_matrix)
# write to file
dir_name = os.path.dirname(source_file)
embd_file_name = os.path.join(dir_name,
'glove-reduced-{}D.txt'.format(n_dims))
with open(embd_file_name, 'w') as file:
for word, embd in zip(embd_words, reduced_embds):
file.write(word + " " + ' '.join(str(num) for num in embd) + "\n")
class CharacterLevelCnnModel(BaseTrainableModel,
metaclass=AutoSubRegistrationMeta):
# boolean if the label mapping requires the mapping for index 0 reserved
requires_zero_mapping = True
def __init__(self, label_mapping=None, parameters=None):
"""
CNN Model Initializer. initialize epoch_id
:param label_mapping: maps labels to their encoded integers
:type label_mapping: dict
:param parameters: Contains all the appropriate parameters for the
model. Must contain num_labels. Other possible parameters are:
max_length, max_char_encoding_id, dim_embed, size_fc
dropout, size_conv, num_fil, optimizer, default_label
:type parameters: dict
:return: None
"""
# parameter initialization
if not parameters:
parameters = {}
parameters.setdefault('max_length', 3400)
parameters.setdefault('max_char_encoding_id', 127)
parameters.setdefault('dim_embed', 64)
parameters.setdefault('size_fc', [96, 96])
parameters.setdefault('dropout', 0.073)
parameters.setdefault('size_conv', 13)
parameters.setdefault('default_label', "UNKNOWN")
parameters.setdefault('num_fil', [48 for _ in range(4)])
parameters['pad_label'] = 'PAD'
self._epoch_id = 0
# reconstruct flags for model
self._model_num_labels = 0
self._model_default_ind = -1
BaseModel.__init__(self, label_mapping, parameters)
def __eq__(self, other):
"""
Checks if two models are equal with one another, may only check
important variables, i.e. may not check model itself.
:param self: a model
:param other: a model
:type self: BaseModel
:type other: BaseModel
:return: Whether or not self and other are equal
:rtype: bool
"""
if self._parameters != other._parameters \
or self._label_mapping != other._label_mapping:
return False
return True
def _validate_parameters(self, parameters):
"""
Validate the parameters sent in. Raise error if invalid parameters are
present.
:param parameters: parameter dict containing the following parameters:
max_length: Maximum char length in a sample
max_char_encoding_id: Maximum integer value for encoding the input
dim_embed: Number of embedded dimensions
size_fc: Size of each fully connected layers
dropout: Ratio of dropout in the model
size_conv: Convolution kernel size
default_label: Key for label_mapping that is the default label
pad_label: Key for entities_dict that is the pad label
num_fil: Number of filters in each convolution layer
:type parameters: dict
:return: None
"""
errors = []
list_of_necessary_params = ['max_length', 'max_char_encoding_id',
'dim_embed', 'size_fc', 'dropout',
'size_conv', 'default_label', 'pad_label',
'num_fil']
# Make sure the necessary parameters are present and valid.
for param in parameters:
if param in ['max_length', 'max_char_encoding_id', 'dim_embed',
'size_conv']:
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0:
errors.append(param + " must be a valid integer or float "
"greater than 0.")
elif param == 'dropout':
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0 or parameters[param] > 1:
errors.append(param + " must be a valid integer or float "
"from 0 to 1.")
elif param == 'size_fc' or param == 'num_fil':
if not isinstance(parameters[param], list) \
or len(parameters[param]) == 0:
errors.append(param + " must be a non-empty list of "
"integers.")
else:
for item in parameters[param]:
if not isinstance(item, int):
errors.append(param + " must be a non-empty "
"list of integers.")
break
elif param == 'default_label':
if not isinstance(parameters[param], str):
error = str(param) + " must be a string."
errors.append(error)
# Error if there are extra parameters thrown in
for param in parameters:
if param not in list_of_necessary_params:
errors.append(param + " is not an accepted parameter.")
if errors:
raise ValueError('\n'.join(errors))
def set_label_mapping(self, label_mapping):
"""
Sets the labels for the model
:param label_mapping: label mapping of the model
:type label_mapping: dict
:return: None
"""
if not isinstance(label_mapping, (list, dict)):
raise TypeError("Labels must either be a non-empty encoding dict "
"which maps labels to index encodings or a list.")
label_mapping = copy.deepcopy(label_mapping)
if 'PAD' not in label_mapping:
if isinstance(label_mapping, list): # if list missing PAD
label_mapping = ['PAD'] + label_mapping
elif 0 not in label_mapping.values(): # if dict missing PAD and 0
label_mapping.update({'PAD': 0})
if (isinstance(label_mapping, dict)
and label_mapping.get('PAD', None) != 0): # dict with bad PAD
raise ValueError("`PAD` must map to index zero.")
if self._parameters['default_label'] not in label_mapping:
raise ValueError("The `default_label` of {} must exist in the "
"label mapping.".format(
self._parameters['default_label']))
super().set_label_mapping(label_mapping)
def _need_to_reconstruct_model(self):
"""
Determines whether or not the model needs to be reconstructed.
:return: bool of whether or not the model needs to reconstruct.
"""
if not self._model:
return False
default_ind = self.label_mapping[self._parameters['default_label']]
return self.num_labels != self._model_num_labels or \
default_ind != self._model_default_ind
def save_to_disk(self, dirpath):
"""
Saves whole model to disk with weights
:param dirpath: directory path where you want to save the model to
:type dirpath: str
:return: None
"""
if not self._model:
self._construct_model()
elif self._need_to_reconstruct_model():
self._reconstruct_model()
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'w') as fp:
json.dump(self._parameters, fp)
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'w') as fp:
json.dump(self.label_mapping, fp)
self._model.save(os.path.join(dirpath))
@classmethod
def load_from_disk(cls, dirpath):
"""
Loads whole model from disk with weights
:param dirpath: directory path where you want to load the model from
:type dirpath: str
:return: None
"""
# load parameters
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'r') as fp:
parameters = json.load(fp)
# load label_mapping
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'r') as fp:
label_mapping = json.load(fp)
# use f1 score metric
custom_objects = {
"F1Score": F1Score(
num_classes=max(label_mapping.values()) + 1,
average='micro'),
"CharacterLevelCnnModel": cls,
}
with tf.keras.utils.custom_object_scope(custom_objects):
tf_model = tf.keras.models.load_model(dirpath)
loaded_model = cls(label_mapping, parameters)
loaded_model._model = tf_model
# Tensorflow v1 Model weights need to be transferred.
if not callable(tf_model):
loaded_model._construct_model()
tf1_weights = []
for var in tf_model.variables:
if 'training' not in var.name:
tf1_weights.append(var.value())
loaded_model._construct_model()
tf1_weights.append(loaded_model._model.weights[-1].value())
loaded_model._model.set_weights(tf1_weights)
# load self
loaded_model._model_num_labels = loaded_model.num_labels
loaded_model._model_default_ind = loaded_model.label_mapping[
loaded_model._parameters['default_label']
]
return loaded_model
@staticmethod
def _char_encoding_layer(input_str_tensor, max_char_encoding_id, max_len):
"""
Character encoding for the list of sentences
:param input_str_tensor: input list of sentences converted to tensor
:type input_str_tensor: tf.tensor
:param max_char_encoding_id: Maximum integer value for encoding the
input
:type max_char_encoding_id: int
:param max_len: Maximum char length in a sample
:type max_len: int
:return : tensor containing encoded list of input sentences
:rtype: tf.Tensor
"""
# convert characters to indices
input_str_flatten = tf.reshape(input_str_tensor, [-1])
sentences_encode = tf.strings.unicode_decode(input_str_flatten,
input_encoding='UTF-8')
sentences_encode = tf.add(tf.cast(1, tf.int32), sentences_encode)
sentences_encode = tf.math.minimum(sentences_encode,
max_char_encoding_id + 1)
# padding
sentences_encode_pad = sentences_encode.to_tensor(shape=[None, max_len])
return sentences_encode_pad
@staticmethod
def _argmax_threshold_layer(num_labels, threshold=0.0, default_ind=1):
"""
Adds an argmax threshold layer to the model. This layer's output will be
the argmax value if the confidence for that argmax meets the threshold
for its label, otherwise it will be the default label index.
:param num_labels: number of entities
:type num_labels: int
:param threshold: default set to 0 so all confidences pass.
:type threshold: float
:param default_ind: default index
:type default_ind: int
:return: final argmax threshold layer for the model
"""
# Initialize the thresholds vector variable and create the threshold
# matrix.
class ThreshArgMaxLayer(tf.keras.layers.Layer):
def __init__(self, threshold_, num_labels_):
super(ThreshArgMaxLayer, self).__init__()
thresh_init = tf.constant_initializer(threshold_)
self.thresh_vec = tf.Variable(
name='ThreshVec',
initial_value=thresh_init(shape=[num_labels_]),
trainable=False)
def call(self, argmax_layer, confidence_layer):
threshold_at_argmax = tf.gather(self.thresh_vec, argmax_layer)
confidence_max_layer = tf.keras.backend.max(confidence_layer,
axis=2)
# Check if the confidences meet the threshold minimum.
argmax_mask = tf.keras.backend.cast(
tf.keras.backend.greater_equal(confidence_max_layer,
threshold_at_argmax),
dtype=argmax_layer.dtype)
# Create a vector the same size as the batch_size which
# represents the background label
bg_label_tf = tf.keras.backend.constant(
default_ind, dtype=argmax_layer.dtype)
# Generate the final predicted output using the function:
final_predicted_layer = tf.add(
bg_label_tf,
tf.multiply(
tf.subtract(argmax_layer, bg_label_tf),
argmax_mask
), name='ThreshArgMax'
)
return final_predicted_layer
return ThreshArgMaxLayer(threshold, num_labels)
def _construct_model(self):
"""
Model constructor for the data labeler. This also serves as a weight
reset.
:return: None
"""
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Reset model
tf.keras.backend.clear_session()
# generate glove embedding
create_glove_char(self._parameters['dim_embed'])
# generate model
self._model = tf.keras.models.Sequential()
# default parameters
max_length = self._parameters['max_length']
max_char_encoding_id = self._parameters['max_char_encoding_id']
# Encoding layer
def encoding_function(input_str):
char_in_vector = CharacterLevelCnnModel._char_encoding_layer(
input_str, max_char_encoding_id, max_length)
return char_in_vector
self._model.add(tf.keras.layers.Input(shape=(None,), dtype=tf.string))
self._model.add(
tf.keras.layers.Lambda(encoding_function,
output_shape=tuple([max_length])))
# Create a pre-trained weight matrix
# character encoding indices range from 0 to max_char_encoding_id,
# we add one extra index for out-of-vocabulary character
embed_file = os.path.join(
_file_dir, "embeddings/glove-reduced-{}D.txt".format(
self._parameters['dim_embed']))
embedding_matrix = np.zeros((max_char_encoding_id + 2,
self._parameters['dim_embed']))
embedding_dict = build_embd_dictionary(embed_file)
input_shape = tuple([max_length])
# Fill in the weight matrix: let pad and space be 0s
for ascii_num in range(max_char_encoding_id):
if chr(ascii_num) in embedding_dict:
embedding_matrix[ascii_num + 1] = embedding_dict[chr(ascii_num)]
self._model.add(tf.keras.layers.Embedding(
max_char_encoding_id + 2,
self._parameters['dim_embed'],
weights=[embedding_matrix],
input_length=input_shape[0],
trainable=True))
# Add the convolutional layers
for fil in self._parameters['num_fil']:
self._model.add(tf.keras.layers.Conv1D(
filters=fil, kernel_size=self._parameters['size_conv'],
activation='relu', padding='same'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add batch normalization, set fused = True for compactness
self._model.add(
tf.keras.layers.BatchNormalization(fused=False, scale=True))
# Add the fully connected layers
for size in self._parameters['size_fc']:
self._model.add(
tf.keras.layers.Dense(units=size, activation='relu'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add the final Softmax layer
self._model.add(
tf.keras.layers.Dense(num_labels, activation='softmax'))
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(self._model.output)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = self._model.outputs + \
[argmax_layer,
final_predicted_layer(argmax_layer, self._model.output)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def reset_weights(self):
"""
Reset the weights of the model.
:return: None
"""
self._construct_model()
def _reconstruct_model(self):
"""
Reconstruct the appropriate layers if the number of number of labels is
altered
:return: None
"""
# Reset model
tf.keras.backend.clear_session()
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Remove the 3 output layers (dense_2', 'tf_op_layer_ArgMax',
# 'thresh_arg_max_layer')
for _ in range(3):
self._model.layers.pop()
# Add the final Softmax layer to the previous spot
final_softmax_layer = tf.keras.layers.Dense(
num_labels, activation='softmax', name="dense_2")(
self._model.layers[-4].output)
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(final_softmax_layer)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = [final_softmax_layer] + \
[argmax_layer,
final_predicted_layer(argmax_layer,
final_softmax_layer)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def fit(self, train_data, val_data=None, batch_size=32, label_mapping=None,
reset_weights=False, verbose=True):
"""
Train the current model with the training data and validation data
:param train_data: Training data used to train model
:type train_data: Union[list, np.ndarray]
:param val_data: Validation data used to validate the training
:type val_data: Union[list, np.ndarray]
:param batch_size: Used to determine number of samples in each batch
:type batch_size: int
:param label_mapping: maps labels to their encoded integers
:type label_mapping: Union[dict, None]
:param reset_weights: Flag to determine whether to reset the weights or
not
:type reset_weights: bool
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: None
"""
if label_mapping is not None:
self.set_label_mapping(label_mapping)
if not self._model:
self._construct_model()
else:
if self._need_to_reconstruct_model():
self._reconstruct_model()
if reset_weights:
self.reset_weights()
history = defaultdict()
f1 = None
f1_report = []
self._model.reset_metrics()
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
start_time = time.time()
batch_id = 0
for x_train, y_train in train_data:
model_results = self._model.train_on_batch(
x_train, {softmax_output_layer_name: y_train})
sys.stdout.flush()
if verbose:
sys.stdout.write(
"\rEPOCH %d, batch_id %d: loss: %f - acc: %f - "
"f1_score %f" %
(self._epoch_id, batch_id, *model_results[1:]))
batch_id += 1
for i, metric_label in enumerate(self._model.metrics_names):
history[metric_label] = model_results[i]
if val_data:
f1, f1_report = self._validate_training(val_data)
history['f1_report'] = f1_report
val_f1 = f1_report['weighted avg']['f1-score'] \
if f1_report else np.NAN
val_precision = f1_report['weighted avg']['precision'] \
if f1_report else np.NAN
val_recall = f1_report['weighted avg']['recall'] \
if f1_report else np.NAN
epoch_time = time.time() - start_time
logger.info("\rEPOCH %d (%ds), loss: %f - acc: %f - f1_score %f -- "
"val_f1: %f - val_precision: %f - val_recall %f" %
(self._epoch_id, epoch_time, *model_results[1:],
val_f1, val_precision, val_recall))
self._epoch_id += 1
return history, f1, f1_report
def _validate_training(self, val_data, batch_size_test=32,
verbose_log=True, verbose_keras=False):
"""
Validate the model on the test set and return the evaluation metrics.
:param val_data: data generator for the validation
:type val_data: iterator
:param batch_size_test: Number of samples to process in testing
:type batch_size_test: int
:param verbose_log: whether or not to print out scores for training,
etc.
:type verbose_log: bool
:param verbose_keras: whether or not to print out scores for training,
from keras.
:type verbose_keras: bool
return (f1-score, f1 report).
"""
f1 = None
f1_report = None
if val_data is None:
return f1, f1_report
# Predict on the test set
batch_id = 0
y_val_pred = []
y_val_test = []
for x_val, y_val in val_data:
y_val_pred.append(self._model.predict(
x_val, batch_size=batch_size_test, verbose=verbose_keras)[1])
y_val_test.append(np.argmax(y_val, axis=-1))
batch_id += 1
sys.stdout.flush()
if verbose_log:
sys.stdout.write("\rEPOCH %g, validation_batch_id %d" %
(self._epoch_id, batch_id))
tf.keras.backend.set_floatx('float32')
# Clean the predicted entities and the actual entities
f1, f1_report = labeler_utils.evaluate_accuracy(
np.concatenate(y_val_pred, axis=0),
np.concatenate(y_val_test, axis=0),
self.num_labels,
self.reverse_label_mapping,
verbose=verbose_keras)
return f1, f1_report
def predict(self, data, batch_size=32, show_confidences=False,
verbose=True):
"""
Run model and get predictions
:param data: text input
:type data: Union[list, numpy.ndarray]
:param batch_size: number of samples in the batch of data
:type batch_size: int
:param show_confidences: whether user wants prediction confidences
:type show_confidences:
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: char level predictions and confidences
:rtype: dict
"""
if not self._model:
raise ValueError("You are trying to predict without a model. "
"Construct/Load a model before predicting.")
elif self._need_to_reconstruct_model():
raise RuntimeError("The model label mapping definitions have been "
"altered without additional training. Please "
"train the model or reset the label mapping to "
"predict.")
# Pre-allocate space for predictions
confidences = []
sentence_lengths = np.zeros((batch_size,), dtype=int)
predictions = np.zeros((batch_size, self._parameters['max_length']))
if show_confidences:
confidences = np.zeros((batch_size,
self._parameters['max_length'],
self.num_labels))
# Run model with batching
allocation_index = 0
for batch_id, batch_data in enumerate(data):
model_output = self._model(
tf.convert_to_tensor(batch_data)
)
# Count number of samples in batch to prevent array mismatch
num_samples_in_batch = len(batch_data)
allocation_index = batch_id * batch_size
# Double array size
if len(predictions) <= allocation_index:
predictions = np.pad(predictions, ((0, len(predictions)),
(0, 0)), mode='constant')
sentence_lengths = np.pad(
sentence_lengths, pad_width=((0, len(sentence_lengths)),),
mode='constant')
if show_confidences:
confidences = np.pad(confidences,
((0, len(predictions)),
(0, 0), (0, 0)), mode='constant')
if show_confidences:
confidences[allocation_index:allocation_index + num_samples_in_batch] = model_output[0].numpy()
predictions[allocation_index:allocation_index + num_samples_in_batch] = model_output[1].numpy()
sentence_lengths[allocation_index:allocation_index + num_samples_in_batch] = list(map(lambda x: len(x[0]), batch_data))
allocation_index += num_samples_in_batch
# Convert predictions, confidences to lists from numpy
predictions_list = [i for i in range(0, allocation_index)]
confidences_list = None
if show_confidences:
confidences_list = [i for i in range(0, allocation_index)]
# Append slices of predictions to return prediction & confidence matrices
for index, sentence_length \
in enumerate(sentence_lengths[:allocation_index]):
predictions_list[index] = list(predictions[index][:sentence_length])
if show_confidences:
confidences_list[index] = list(confidences[index][:sentence_length])
if show_confidences:
return {'pred': predictions_list, 'conf': confidences_list}
return {'pred': predictions_list}
def details(self):
"""
Prints the relevant details of the model (summary, parameters, label
mapping)
"""
print("\n###### Model Details ######\n")
self._model.summary()
print("\nModel Parameters:")
for key, value in self._parameters.items():
print("{}: {}".format(key, value))
print("\nModel Label Mapping:")
for key, value in self.label_mapping.items():
print("{}: {}".format(key, value))
| en | 0.693262 | Removes TF2 warning for using TF1 model which has resources. Computes F-Beta score. Adapted and slightly modified from https://github.com/tensorflow/addons/blob/v0.12.0/tensorflow_addons/metrics/f_scores.py#L211-L283 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== It is the weighted harmonic mean of precision and recall. Output range is `[0, 1]`. Works for both multi-class and multi-label classification. $$ F_{\beta} = (1 + \beta^2) * \frac{\textrm{precision} * \textrm{precision}}{(\beta^2 \cdot \textrm{precision}) + \textrm{recall}} $$ Args: num_classes: Number of unique classes in the dataset. average: Type of averaging to be performed on data. Acceptable values are `None`, `micro`, `macro` and `weighted`. Default value is None. beta: Determines the weight of precision and recall in harmonic mean. Determines the weight given to the precision and recall. Default value is 1. threshold: Elements of `y_pred` greater than threshold are converted to be 1, and the rest 0. If threshold is None, the argmax is converted to 1, and the rest 0. name: (Optional) String name of the metric instance. dtype: (Optional) Data type of the metric result. Returns: F-Beta Score: float. # Modification: remove the run-time type checking for functions # make sure [0, 0, 0] doesn't become [1, 1, 1] # Use abs(x) > eps, instead of x != 0 to check for zero # [micro, macro] Returns the serializable config of the metric. Computes F-1 Score. # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== It is the harmonic mean of precision and recall. Output range is `[0, 1]`. Works for both multi-class and multi-label classification. $$ F_1 = 2 \cdot \frac{\textrm{precision} \cdot \textrm{recall}}{\textrm{precision} + \textrm{recall}} $$ Args: num_classes: Number of unique classes in the dataset. average: Type of averaging to be performed on data. Acceptable values are `None`, `micro`, `macro` and `weighted`. Default value is None. threshold: Elements of `y_pred` above threshold are considered to be 1, and the rest 0. If threshold is None, the argmax is converted to 1, and the rest 0. name: (Optional) String name of the metric instance. dtype: (Optional) Data type of the metric result. Returns: F-1 Score: float. # Modification: remove the run-time type checking for functions Returns a numpy embedding dictionary from embed file with GloVe-like format :param filename: Path to the embed file for loading :type filename: str Embeds GloVe chars embeddings from source file to n_dims principal components in a new file :param n_dims: Final number of principal component dims of the embeddings :type n_dims: int :param source_file: Location of original embeddings to factor down :type source_file: str # get embedding table first and vectors as array # get PCA embedder # write to file # boolean if the label mapping requires the mapping for index 0 reserved CNN Model Initializer. initialize epoch_id :param label_mapping: maps labels to their encoded integers :type label_mapping: dict :param parameters: Contains all the appropriate parameters for the model. Must contain num_labels. Other possible parameters are: max_length, max_char_encoding_id, dim_embed, size_fc dropout, size_conv, num_fil, optimizer, default_label :type parameters: dict :return: None # parameter initialization # reconstruct flags for model Checks if two models are equal with one another, may only check important variables, i.e. may not check model itself. :param self: a model :param other: a model :type self: BaseModel :type other: BaseModel :return: Whether or not self and other are equal :rtype: bool Validate the parameters sent in. Raise error if invalid parameters are present. :param parameters: parameter dict containing the following parameters: max_length: Maximum char length in a sample max_char_encoding_id: Maximum integer value for encoding the input dim_embed: Number of embedded dimensions size_fc: Size of each fully connected layers dropout: Ratio of dropout in the model size_conv: Convolution kernel size default_label: Key for label_mapping that is the default label pad_label: Key for entities_dict that is the pad label num_fil: Number of filters in each convolution layer :type parameters: dict :return: None # Make sure the necessary parameters are present and valid. # Error if there are extra parameters thrown in Sets the labels for the model :param label_mapping: label mapping of the model :type label_mapping: dict :return: None # if list missing PAD # if dict missing PAD and 0 # dict with bad PAD Determines whether or not the model needs to be reconstructed. :return: bool of whether or not the model needs to reconstruct. Saves whole model to disk with weights :param dirpath: directory path where you want to save the model to :type dirpath: str :return: None Loads whole model from disk with weights :param dirpath: directory path where you want to load the model from :type dirpath: str :return: None # load parameters # load label_mapping # use f1 score metric # Tensorflow v1 Model weights need to be transferred. # load self Character encoding for the list of sentences :param input_str_tensor: input list of sentences converted to tensor :type input_str_tensor: tf.tensor :param max_char_encoding_id: Maximum integer value for encoding the input :type max_char_encoding_id: int :param max_len: Maximum char length in a sample :type max_len: int :return : tensor containing encoded list of input sentences :rtype: tf.Tensor # convert characters to indices # padding Adds an argmax threshold layer to the model. This layer's output will be the argmax value if the confidence for that argmax meets the threshold for its label, otherwise it will be the default label index. :param num_labels: number of entities :type num_labels: int :param threshold: default set to 0 so all confidences pass. :type threshold: float :param default_ind: default index :type default_ind: int :return: final argmax threshold layer for the model # Initialize the thresholds vector variable and create the threshold # matrix. # Check if the confidences meet the threshold minimum. # Create a vector the same size as the batch_size which # represents the background label # Generate the final predicted output using the function: Model constructor for the data labeler. This also serves as a weight reset. :return: None # Reset model # generate glove embedding # generate model # default parameters # Encoding layer # Create a pre-trained weight matrix # character encoding indices range from 0 to max_char_encoding_id, # we add one extra index for out-of-vocabulary character # Fill in the weight matrix: let pad and space be 0s # Add the convolutional layers # Add batch normalization, set fused = True for compactness # Add the fully connected layers # Add the final Softmax layer # Output the model into a .pb file for TensorFlow # Create confidence layers # Compile the model # use f1 score metric Reset the weights of the model. :return: None Reconstruct the appropriate layers if the number of number of labels is altered :return: None # Reset model # Remove the 3 output layers (dense_2', 'tf_op_layer_ArgMax', # 'thresh_arg_max_layer') # Add the final Softmax layer to the previous spot # Output the model into a .pb file for TensorFlow # Create confidence layers # Compile the model # use f1 score metric Train the current model with the training data and validation data :param train_data: Training data used to train model :type train_data: Union[list, np.ndarray] :param val_data: Validation data used to validate the training :type val_data: Union[list, np.ndarray] :param batch_size: Used to determine number of samples in each batch :type batch_size: int :param label_mapping: maps labels to their encoded integers :type label_mapping: Union[dict, None] :param reset_weights: Flag to determine whether to reset the weights or not :type reset_weights: bool :param verbose: Flag to determine whether to print status or not :type verbose: bool :return: None Validate the model on the test set and return the evaluation metrics. :param val_data: data generator for the validation :type val_data: iterator :param batch_size_test: Number of samples to process in testing :type batch_size_test: int :param verbose_log: whether or not to print out scores for training, etc. :type verbose_log: bool :param verbose_keras: whether or not to print out scores for training, from keras. :type verbose_keras: bool return (f1-score, f1 report). # Predict on the test set # Clean the predicted entities and the actual entities Run model and get predictions :param data: text input :type data: Union[list, numpy.ndarray] :param batch_size: number of samples in the batch of data :type batch_size: int :param show_confidences: whether user wants prediction confidences :type show_confidences: :param verbose: Flag to determine whether to print status or not :type verbose: bool :return: char level predictions and confidences :rtype: dict # Pre-allocate space for predictions # Run model with batching # Count number of samples in batch to prevent array mismatch # Double array size # Convert predictions, confidences to lists from numpy # Append slices of predictions to return prediction & confidence matrices Prints the relevant details of the model (summary, parameters, label mapping) ###### Model Details ######\n") | 1.786601 | 2 |
airflow/contrib/plugins/metastore_browser/main.py | Nipica/airflow | 0 | 5939 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_admin import BaseView, expose
import pandas as pd
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
from airflow.www.decorators import gzipped
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/admin/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
admin_views = [v]
| # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_admin import BaseView, expose
import pandas as pd
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
from airflow.www.decorators import gzipped
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/admin/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
admin_views = [v]
| en | 0.730421 | # -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Keeping pandas from truncating long strings # Creating a flask admin BaseView SELECT a.name as db, db_location_uri as location, count(1) as object_count, a.desc as description FROM DBS a JOIN TBLS b ON a.DB_ID = b.DB_ID GROUP BY a.name, db_location_uri, a.desc SELECT a.PART_NAME, a.CREATE_TIME, c.LOCATION, c.IS_COMPRESSED, c.INPUT_FORMAT, c.OUTPUT_FORMAT FROM PARTITIONS a JOIN TBLS b ON a.TBL_ID = b.TBL_ID JOIN DBS d ON b.DB_ID = d.DB_ID JOIN SDS c ON a.SD_ID = c.SD_ID WHERE b.TBL_NAME like '{table}' AND d.NAME like '{schema}' ORDER BY PART_NAME DESC SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE FROM TBLS a JOIN DBS b ON a.DB_ID = b.DB_ID WHERE a.TBL_NAME NOT LIKE '%tmp%' AND a.TBL_NAME NOT LIKE '%temp%' AND b.NAME NOT LIKE '%tmp%' AND b.NAME NOT LIKE '%temp%' {where_clause} LIMIT {LIMIT}; # Creating a flask blueprint to intergrate the templates and static folder # Defining the plugin class | 1.500088 | 2 |
app/lib/manage.py | AaronDewes/compose-nonfree | 5 | 5940 | #!/usr/bin/env python3
# SPDX-FileCopyrightText: 2021 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
import stat
import tempfile
import threading
from typing import List
from sys import argv
import os
import requests
import shutil
import json
import yaml
import subprocess
from lib.composegenerator.v0.generate import createComposeConfigFromV0
from lib.composegenerator.v1.generate import createComposeConfigFromV1
from lib.appymlgenerator import convertComposeYMLToAppYML
from lib.validate import findAndValidateApps
from lib.metadata import getAppRegistry, getSimpleAppRegistry
from lib.entropy import deriveEntropy
# For an array of threads, join them and wait for them to finish
def joinThreads(threads: List[threading.Thread]):
for thread in threads:
thread.join()
# The directory with this script
scriptDir = os.path.dirname(os.path.realpath(__file__))
nodeRoot = os.path.join(scriptDir, "..", "..")
appsDir = os.path.join(nodeRoot, "apps")
appSystemDir = os.path.join(nodeRoot, "app-system")
sourcesList = os.path.join(appSystemDir, "sources.list")
appDataDir = os.path.join(nodeRoot, "app-data")
userFile = os.path.join(nodeRoot, "db", "user.json")
legacyScript = os.path.join(nodeRoot, "scripts", "app")
def runCompose(app: str, args: str):
compose(app, args)
# Returns a list of every argument after the second one in sys.argv joined into a string by spaces
def getArguments():
arguments = ""
for i in range(3, len(argv)):
arguments += argv[i] + " "
return arguments
def getAppYml(name):
url = 'https://raw.githubusercontent.com/runcitadel/compose-nonfree/main/apps/' + \
name + '/' + 'app.yml'
response = requests.get(url)
if response.status_code == 200:
return response.text
else:
return False
def getAppYmlPath(app):
return os.path.join(appsDir, app, 'app.yml')
def composeToAppYml(app):
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
appYml = os.path.join(appsDir, app, "app.yml")
# Read the compose file and parse it
with open(composeFile, "r") as f:
compose = yaml.safe_load(f)
registry = os.path.join(appsDir, "registry.json")
# Load the registry
with open(registry, "r") as f:
registryData = json.load(f)
converted = convertComposeYMLToAppYML(compose, app, registryData)
# Put converted into the app.yml after encoding it as YAML
with open(appYml, "w") as f:
f.write(yaml.dump(converted, sort_keys=False))
def update(verbose: bool = False):
apps = findAndValidateApps(appsDir)
# The compose generation process updates the registry, so we need to get it set up with the basics before that
registry = getAppRegistry(apps, appsDir)
with open(os.path.join(appsDir, "registry.json"), "w") as f:
json.dump(registry, f, indent=4, sort_keys=True)
print("Wrote registry to registry.json")
simpleRegistry = getSimpleAppRegistry(apps, appsDir)
with open(os.path.join(appSystemDir, "apps.json"), "w") as f:
json.dump(simpleRegistry, f, indent=4, sort_keys=True)
print("Wrote version information to apps.json")
# Loop through the apps and generate valid compose files from them, then put these into the app dir
for app in apps:
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
appYml = os.path.join(appsDir, app, "app.yml")
with open(composeFile, "w") as f:
appCompose = getApp(appYml, app)
if(appCompose):
f.write(yaml.dump(appCompose, sort_keys=False))
if verbose:
print("Wrote " + app + " to " + composeFile)
print("Generated configuration successfully")
def download(app: str = None):
if(app is None):
apps = findAndValidateApps(appsDir)
for app in apps:
data = getAppYml(app)
if data:
with open(getAppYmlPath(app), 'w') as f:
f.write(data)
else:
print("Warning: Could not download " + app)
else:
data = getAppYml(app)
if data:
with open(getAppYmlPath(app), 'w') as f:
f.write(data)
else:
print("Warning: Could not download " + app)
def getUserData():
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
return userData
def startInstalled():
# If userfile doen't exist, just do nothing
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
threads = []
for app in userData["installedApps"]:
print("Starting app {}...".format(app))
# Run runCompose(args.app, "up --detach") asynchrounously for all apps, then exit(0) when all are finished
thread = threading.Thread(target=runCompose, args=(app, "up --detach"))
thread.start()
threads.append(thread)
joinThreads(threads)
def stopInstalled():
# If userfile doen't exist, just do nothing
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
threads = []
for app in userData["installedApps"]:
print("Stopping app {}...".format(app))
# Run runCompose(args.app, "up --detach") asynchrounously for all apps, then exit(0) when all are finished
thread = threading.Thread(
target=runCompose, args=(app, "rm --force --stop"))
thread.start()
threads.append(thread)
joinThreads(threads)
# Loads an app.yml and converts it to a docker-compose.yml
def getApp(appFile: str, appId: str):
with open(appFile, 'r') as f:
app = yaml.safe_load(f)
if not "metadata" in app:
raise Exception("Error: Could not find metadata in " + appFile)
app["metadata"]["id"] = appId
if('version' in app and str(app['version']) == "1"):
return createComposeConfigFromV1(app, nodeRoot)
else:
return createComposeConfigFromV0(app)
def compose(app, arguments):
# Runs a compose command in the app dir
# Before that, check if a docker-compose.yml exists in the app dir
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
commonComposeFile = os.path.join(appSystemDir, "docker-compose.common.yml")
os.environ["APP_DOMAIN"] = subprocess.check_output(
"hostname -s 2>/dev/null || echo 'umbrel'", shell=True).decode("utf-8") + ".local"
os.environ["APP_HIDDEN_SERVICE"] = subprocess.check_output("cat {} 2>/dev/null || echo 'notyetset.onion'".format(
os.path.join(nodeRoot, "tor", "data", "app-{}/hostname".format(app))), shell=True).decode("utf-8")
os.environ["APP_SEED"] = deriveEntropy("app-{}-seed".format(app))
# Allow more app seeds, with random numbers from 1-5 assigned in a loop
for i in range(1, 6):
os.environ["APP_SEED_{}".format(i)] = deriveEntropy("app-{}-seed{}".format(app, i))
os.environ["APP_DATA_DIR"] = os.path.join(appDataDir, app)
os.environ["BITCOIN_DATA_DIR"] = os.path.join(nodeRoot, "bitcoin")
os.environ["LND_DATA_DIR"] = os.path.join(nodeRoot, "lnd")
# List all hidden services for an app and put their hostname in the environment
hiddenServices: List[str] = getAppHiddenServices(app)
for service in hiddenServices:
appHiddenServiceFile = os.path.join(
nodeRoot, "tor", "data", "app-{}-{}/hostname".format(app, service))
os.environ["APP_HIDDEN_SERVICE_{}".format(service.upper().replace("-", "_"))] = subprocess.check_output("cat {} 2>/dev/null || echo 'notyetset.onion'".format(
appHiddenServiceFile), shell=True).decode("utf-8")
if not os.path.isfile(composeFile):
print("Error: Could not find docker-compose.yml in " + app)
exit(1)
os.system(
"docker compose --env-file '{}' --project-name '{}' --file '{}' --file '{}' {}".format(
os.path.join(nodeRoot, ".env"), app, commonComposeFile, composeFile, arguments))
def remove_readonly(func, path, _):
os.chmod(path, stat.S_IWRITE)
func(path)
def deleteData(app: str):
dataDir = os.path.join(appDataDir, app)
try:
shutil.rmtree(dataDir, onerror=remove_readonly)
except FileNotFoundError:
pass
def createDataDir(app: str):
dataDir = os.path.join(appDataDir, app)
appDir = os.path.join(appsDir, app)
if os.path.isdir(dataDir):
deleteData(app)
# Recursively copy everything from appDir to dataDir while excluding .gitignore
shutil.copytree(appDir, dataDir, symlinks=False,
ignore=shutil.ignore_patterns(".gitignore"))
# Chown and chmod dataDir to have the same owner and permissions as appDir
os.chown(dataDir, os.stat(appDir).st_uid, os.stat(appDir).st_gid)
os.chmod(dataDir, os.stat(appDir).st_mode)
def setInstalled(app: str):
userData = getUserData()
if not "installedApps" in userData:
userData["installedApps"] = []
userData["installedApps"].append(app)
userData["installedApps"] = list(set(userData["installedApps"]))
with open(userFile, "w") as f:
json.dump(userData, f)
def setRemoved(app: str):
userData = getUserData()
if not "installedApps" in userData:
return
userData["installedApps"] = list(set(userData["installedApps"]))
userData["installedApps"].remove(app)
with open(userFile, "w") as f:
json.dump(userData, f)
def getAppHiddenServices(app: str):
torDir = os.path.join(nodeRoot, "tor", "data")
# List all subdirectories of torDir which start with app-${APP}-
# but return them without the app-${APP}- prefix
results = []
for subdir in os.listdir(torDir):
if subdir.startswith("app-{}-".format(app)):
results.append(subdir[len("app-{}-".format(app)):])
return results
# Parse the sources.list repo file, which contains a list of sources in the format
# <git-url> <branch>
# For every line, clone the repo to a temporary dir and checkout the branch
# Then, check that repos apps in the temporary dir/apps and for every app,
# overwrite the current app dir with the contents of the temporary dir/apps/app
# Also, keep a list of apps from every repo, a repo later in the file may not overwrite an app from a repo earlier in the file
def updateRepos():
# Get the list of repos
repos = []
with open(sourcesList) as f:
repos = f.readlines()
# For each repo, clone the repo to a temporary dir, checkout the branch,
# and overwrite the current app dir with the contents of the temporary dir/apps/app
alreadyInstalled = []
for repo in repos:
repo = repo.strip()
if repo == "":
continue
# Split the repo into the git url and the branch
repo = repo.split(" ")
if len(repo) != 2:
print("Error: Invalid repo format in " + sourcesList)
exit(1)
gitUrl = repo[0]
branch = repo[1]
# Clone the repo to a temporary dir
tempDir = tempfile.mkdtemp()
print("Cloning the repository")
# Git clone with a depth of 1 to avoid cloning the entire repo
# Dont print anything to stdout, as we don't want to see the git clone output
subprocess.run("git clone --depth 1 {} {}".format(gitUrl, tempDir), shell=True, stdout=subprocess.DEVNULL)
# Overwrite the current app dir with the contents of the temporary dir/apps/app
for app in os.listdir(os.path.join(tempDir, "apps")):
# if the app is already installed, don't overwrite it
if app in alreadyInstalled:
continue
if os.path.isdir(os.path.join(appsDir, app)):
shutil.rmtree(os.path.join(appsDir, app), onerror=remove_readonly)
if os.path.isdir(os.path.join(tempDir, "apps", app)):
shutil.copytree(os.path.join(tempDir, "apps", app), os.path.join(appsDir, app),
symlinks=False, ignore=shutil.ignore_patterns(".gitignore"))
alreadyInstalled.append(app)
# Remove the temporary dir
shutil.rmtree(tempDir)
| #!/usr/bin/env python3
# SPDX-FileCopyrightText: 2021 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
import stat
import tempfile
import threading
from typing import List
from sys import argv
import os
import requests
import shutil
import json
import yaml
import subprocess
from lib.composegenerator.v0.generate import createComposeConfigFromV0
from lib.composegenerator.v1.generate import createComposeConfigFromV1
from lib.appymlgenerator import convertComposeYMLToAppYML
from lib.validate import findAndValidateApps
from lib.metadata import getAppRegistry, getSimpleAppRegistry
from lib.entropy import deriveEntropy
# For an array of threads, join them and wait for them to finish
def joinThreads(threads: List[threading.Thread]):
for thread in threads:
thread.join()
# The directory with this script
scriptDir = os.path.dirname(os.path.realpath(__file__))
nodeRoot = os.path.join(scriptDir, "..", "..")
appsDir = os.path.join(nodeRoot, "apps")
appSystemDir = os.path.join(nodeRoot, "app-system")
sourcesList = os.path.join(appSystemDir, "sources.list")
appDataDir = os.path.join(nodeRoot, "app-data")
userFile = os.path.join(nodeRoot, "db", "user.json")
legacyScript = os.path.join(nodeRoot, "scripts", "app")
def runCompose(app: str, args: str):
compose(app, args)
# Returns a list of every argument after the second one in sys.argv joined into a string by spaces
def getArguments():
arguments = ""
for i in range(3, len(argv)):
arguments += argv[i] + " "
return arguments
def getAppYml(name):
url = 'https://raw.githubusercontent.com/runcitadel/compose-nonfree/main/apps/' + \
name + '/' + 'app.yml'
response = requests.get(url)
if response.status_code == 200:
return response.text
else:
return False
def getAppYmlPath(app):
return os.path.join(appsDir, app, 'app.yml')
def composeToAppYml(app):
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
appYml = os.path.join(appsDir, app, "app.yml")
# Read the compose file and parse it
with open(composeFile, "r") as f:
compose = yaml.safe_load(f)
registry = os.path.join(appsDir, "registry.json")
# Load the registry
with open(registry, "r") as f:
registryData = json.load(f)
converted = convertComposeYMLToAppYML(compose, app, registryData)
# Put converted into the app.yml after encoding it as YAML
with open(appYml, "w") as f:
f.write(yaml.dump(converted, sort_keys=False))
def update(verbose: bool = False):
apps = findAndValidateApps(appsDir)
# The compose generation process updates the registry, so we need to get it set up with the basics before that
registry = getAppRegistry(apps, appsDir)
with open(os.path.join(appsDir, "registry.json"), "w") as f:
json.dump(registry, f, indent=4, sort_keys=True)
print("Wrote registry to registry.json")
simpleRegistry = getSimpleAppRegistry(apps, appsDir)
with open(os.path.join(appSystemDir, "apps.json"), "w") as f:
json.dump(simpleRegistry, f, indent=4, sort_keys=True)
print("Wrote version information to apps.json")
# Loop through the apps and generate valid compose files from them, then put these into the app dir
for app in apps:
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
appYml = os.path.join(appsDir, app, "app.yml")
with open(composeFile, "w") as f:
appCompose = getApp(appYml, app)
if(appCompose):
f.write(yaml.dump(appCompose, sort_keys=False))
if verbose:
print("Wrote " + app + " to " + composeFile)
print("Generated configuration successfully")
def download(app: str = None):
if(app is None):
apps = findAndValidateApps(appsDir)
for app in apps:
data = getAppYml(app)
if data:
with open(getAppYmlPath(app), 'w') as f:
f.write(data)
else:
print("Warning: Could not download " + app)
else:
data = getAppYml(app)
if data:
with open(getAppYmlPath(app), 'w') as f:
f.write(data)
else:
print("Warning: Could not download " + app)
def getUserData():
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
return userData
def startInstalled():
# If userfile doen't exist, just do nothing
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
threads = []
for app in userData["installedApps"]:
print("Starting app {}...".format(app))
# Run runCompose(args.app, "up --detach") asynchrounously for all apps, then exit(0) when all are finished
thread = threading.Thread(target=runCompose, args=(app, "up --detach"))
thread.start()
threads.append(thread)
joinThreads(threads)
def stopInstalled():
# If userfile doen't exist, just do nothing
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
threads = []
for app in userData["installedApps"]:
print("Stopping app {}...".format(app))
# Run runCompose(args.app, "up --detach") asynchrounously for all apps, then exit(0) when all are finished
thread = threading.Thread(
target=runCompose, args=(app, "rm --force --stop"))
thread.start()
threads.append(thread)
joinThreads(threads)
# Loads an app.yml and converts it to a docker-compose.yml
def getApp(appFile: str, appId: str):
with open(appFile, 'r') as f:
app = yaml.safe_load(f)
if not "metadata" in app:
raise Exception("Error: Could not find metadata in " + appFile)
app["metadata"]["id"] = appId
if('version' in app and str(app['version']) == "1"):
return createComposeConfigFromV1(app, nodeRoot)
else:
return createComposeConfigFromV0(app)
def compose(app, arguments):
# Runs a compose command in the app dir
# Before that, check if a docker-compose.yml exists in the app dir
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
commonComposeFile = os.path.join(appSystemDir, "docker-compose.common.yml")
os.environ["APP_DOMAIN"] = subprocess.check_output(
"hostname -s 2>/dev/null || echo 'umbrel'", shell=True).decode("utf-8") + ".local"
os.environ["APP_HIDDEN_SERVICE"] = subprocess.check_output("cat {} 2>/dev/null || echo 'notyetset.onion'".format(
os.path.join(nodeRoot, "tor", "data", "app-{}/hostname".format(app))), shell=True).decode("utf-8")
os.environ["APP_SEED"] = deriveEntropy("app-{}-seed".format(app))
# Allow more app seeds, with random numbers from 1-5 assigned in a loop
for i in range(1, 6):
os.environ["APP_SEED_{}".format(i)] = deriveEntropy("app-{}-seed{}".format(app, i))
os.environ["APP_DATA_DIR"] = os.path.join(appDataDir, app)
os.environ["BITCOIN_DATA_DIR"] = os.path.join(nodeRoot, "bitcoin")
os.environ["LND_DATA_DIR"] = os.path.join(nodeRoot, "lnd")
# List all hidden services for an app and put their hostname in the environment
hiddenServices: List[str] = getAppHiddenServices(app)
for service in hiddenServices:
appHiddenServiceFile = os.path.join(
nodeRoot, "tor", "data", "app-{}-{}/hostname".format(app, service))
os.environ["APP_HIDDEN_SERVICE_{}".format(service.upper().replace("-", "_"))] = subprocess.check_output("cat {} 2>/dev/null || echo 'notyetset.onion'".format(
appHiddenServiceFile), shell=True).decode("utf-8")
if not os.path.isfile(composeFile):
print("Error: Could not find docker-compose.yml in " + app)
exit(1)
os.system(
"docker compose --env-file '{}' --project-name '{}' --file '{}' --file '{}' {}".format(
os.path.join(nodeRoot, ".env"), app, commonComposeFile, composeFile, arguments))
def remove_readonly(func, path, _):
os.chmod(path, stat.S_IWRITE)
func(path)
def deleteData(app: str):
dataDir = os.path.join(appDataDir, app)
try:
shutil.rmtree(dataDir, onerror=remove_readonly)
except FileNotFoundError:
pass
def createDataDir(app: str):
dataDir = os.path.join(appDataDir, app)
appDir = os.path.join(appsDir, app)
if os.path.isdir(dataDir):
deleteData(app)
# Recursively copy everything from appDir to dataDir while excluding .gitignore
shutil.copytree(appDir, dataDir, symlinks=False,
ignore=shutil.ignore_patterns(".gitignore"))
# Chown and chmod dataDir to have the same owner and permissions as appDir
os.chown(dataDir, os.stat(appDir).st_uid, os.stat(appDir).st_gid)
os.chmod(dataDir, os.stat(appDir).st_mode)
def setInstalled(app: str):
userData = getUserData()
if not "installedApps" in userData:
userData["installedApps"] = []
userData["installedApps"].append(app)
userData["installedApps"] = list(set(userData["installedApps"]))
with open(userFile, "w") as f:
json.dump(userData, f)
def setRemoved(app: str):
userData = getUserData()
if not "installedApps" in userData:
return
userData["installedApps"] = list(set(userData["installedApps"]))
userData["installedApps"].remove(app)
with open(userFile, "w") as f:
json.dump(userData, f)
def getAppHiddenServices(app: str):
torDir = os.path.join(nodeRoot, "tor", "data")
# List all subdirectories of torDir which start with app-${APP}-
# but return them without the app-${APP}- prefix
results = []
for subdir in os.listdir(torDir):
if subdir.startswith("app-{}-".format(app)):
results.append(subdir[len("app-{}-".format(app)):])
return results
# Parse the sources.list repo file, which contains a list of sources in the format
# <git-url> <branch>
# For every line, clone the repo to a temporary dir and checkout the branch
# Then, check that repos apps in the temporary dir/apps and for every app,
# overwrite the current app dir with the contents of the temporary dir/apps/app
# Also, keep a list of apps from every repo, a repo later in the file may not overwrite an app from a repo earlier in the file
def updateRepos():
# Get the list of repos
repos = []
with open(sourcesList) as f:
repos = f.readlines()
# For each repo, clone the repo to a temporary dir, checkout the branch,
# and overwrite the current app dir with the contents of the temporary dir/apps/app
alreadyInstalled = []
for repo in repos:
repo = repo.strip()
if repo == "":
continue
# Split the repo into the git url and the branch
repo = repo.split(" ")
if len(repo) != 2:
print("Error: Invalid repo format in " + sourcesList)
exit(1)
gitUrl = repo[0]
branch = repo[1]
# Clone the repo to a temporary dir
tempDir = tempfile.mkdtemp()
print("Cloning the repository")
# Git clone with a depth of 1 to avoid cloning the entire repo
# Dont print anything to stdout, as we don't want to see the git clone output
subprocess.run("git clone --depth 1 {} {}".format(gitUrl, tempDir), shell=True, stdout=subprocess.DEVNULL)
# Overwrite the current app dir with the contents of the temporary dir/apps/app
for app in os.listdir(os.path.join(tempDir, "apps")):
# if the app is already installed, don't overwrite it
if app in alreadyInstalled:
continue
if os.path.isdir(os.path.join(appsDir, app)):
shutil.rmtree(os.path.join(appsDir, app), onerror=remove_readonly)
if os.path.isdir(os.path.join(tempDir, "apps", app)):
shutil.copytree(os.path.join(tempDir, "apps", app), os.path.join(appsDir, app),
symlinks=False, ignore=shutil.ignore_patterns(".gitignore"))
alreadyInstalled.append(app)
# Remove the temporary dir
shutil.rmtree(tempDir)
| en | 0.872908 | #!/usr/bin/env python3 # SPDX-FileCopyrightText: 2021 <NAME> <<EMAIL>> # # SPDX-License-Identifier: MIT # For an array of threads, join them and wait for them to finish # The directory with this script # Returns a list of every argument after the second one in sys.argv joined into a string by spaces # Read the compose file and parse it # Load the registry # Put converted into the app.yml after encoding it as YAML # The compose generation process updates the registry, so we need to get it set up with the basics before that # Loop through the apps and generate valid compose files from them, then put these into the app dir # If userfile doen't exist, just do nothing # Run runCompose(args.app, "up --detach") asynchrounously for all apps, then exit(0) when all are finished # If userfile doen't exist, just do nothing # Run runCompose(args.app, "up --detach") asynchrounously for all apps, then exit(0) when all are finished # Loads an app.yml and converts it to a docker-compose.yml # Runs a compose command in the app dir # Before that, check if a docker-compose.yml exists in the app dir # Allow more app seeds, with random numbers from 1-5 assigned in a loop # List all hidden services for an app and put their hostname in the environment # Recursively copy everything from appDir to dataDir while excluding .gitignore # Chown and chmod dataDir to have the same owner and permissions as appDir # List all subdirectories of torDir which start with app-${APP}- # but return them without the app-${APP}- prefix # Parse the sources.list repo file, which contains a list of sources in the format # <git-url> <branch> # For every line, clone the repo to a temporary dir and checkout the branch # Then, check that repos apps in the temporary dir/apps and for every app, # overwrite the current app dir with the contents of the temporary dir/apps/app # Also, keep a list of apps from every repo, a repo later in the file may not overwrite an app from a repo earlier in the file # Get the list of repos # For each repo, clone the repo to a temporary dir, checkout the branch, # and overwrite the current app dir with the contents of the temporary dir/apps/app # Split the repo into the git url and the branch # Clone the repo to a temporary dir # Git clone with a depth of 1 to avoid cloning the entire repo # Dont print anything to stdout, as we don't want to see the git clone output # Overwrite the current app dir with the contents of the temporary dir/apps/app # if the app is already installed, don't overwrite it # Remove the temporary dir | 2.299607 | 2 |
features/jit-features/query/query.py | YuanruiZJU/SZZ-TSE | 13 | 5941 | <gh_stars>10-100
from query.base import BaseQuery
class CommitMetaQuery(BaseQuery):
table_name = 'commit_meta'
class DiffusionFeaturesQuery(BaseQuery):
table_name = 'diffusion_features'
class SizeFeaturesQuery(BaseQuery):
table_name = 'size_features'
class PurposeFeaturesQuery(BaseQuery):
table_name = 'purpose_features'
class HistoryFeaturesQuery(BaseQuery):
table_name = 'history_features'
class ExperienceFeaturesQuery(BaseQuery):
table_name = 'experience_features'
class ProjectQuery:
def __init__(self, project):
self.project = project
self.cms = CommitMetaQuery(project).do_query()
self.diffusion_features = DiffusionFeaturesQuery(project).do_query()
self.size_features = SizeFeaturesQuery(project).do_query()
self.purpose_features = PurposeFeaturesQuery(project).do_query()
self.history_features = HistoryFeaturesQuery(project).do_query()
self.exp_features = ExperienceFeaturesQuery(project).do_query()
self.__cache_end_commit_id = None
@property
def end_commit_id(self):
if self.__cache_end_commit_id is not None:
return self.__cache_end_commit_id
commit_id = None
for pf in self.purpose_features:
if pf.fix:
commit_id = pf.commit_id
self.__cache_end_commit_id = commit_id
return self.__cache_end_commit_id
def combine(self):
features_dict = dict()
for sf in self.size_features:
features_dict[sf.commit_id] = dict()
features_dict[sf.commit_id]['la'] = sf.la
features_dict[sf.commit_id]['ld'] = sf.ld
features_dict[sf.commit_id]['lt'] = sf.lt
for df in self.diffusion_features:
features_dict[df.commit_id]['ns'] = df.ns
features_dict[df.commit_id]['nd'] = df.nd
features_dict[df.commit_id]['nf'] = df.nf
features_dict[df.commit_id]['entropy'] = df.entropy
for pf in self.purpose_features:
features_dict[pf.commit_id]['fix'] = pf.fix
for hf in self.history_features:
features_dict[hf.commit_id]['ndev'] = hf.ndev
features_dict[hf.commit_id]['age'] = hf.age
features_dict[hf.commit_id]['nuc'] = hf.nuc
for ef in self.exp_features:
features_dict[ef.commit_id]['exp'] = ef.exp
features_dict[ef.commit_id]['rexp'] = ef.rexp
features_dict[ef.commit_id]['sexp'] = ef.sexp
ret_list = list()
for cm in self.cms:
cm_dict = features_dict[cm.commit_id]
if len(cm_dict) == 14:
cm_dict['commit_id'] = cm.commit_id
ret_list.append(cm_dict)
if cm.commit_id == self.end_commit_id:
break
return ret_list
| from query.base import BaseQuery
class CommitMetaQuery(BaseQuery):
table_name = 'commit_meta'
class DiffusionFeaturesQuery(BaseQuery):
table_name = 'diffusion_features'
class SizeFeaturesQuery(BaseQuery):
table_name = 'size_features'
class PurposeFeaturesQuery(BaseQuery):
table_name = 'purpose_features'
class HistoryFeaturesQuery(BaseQuery):
table_name = 'history_features'
class ExperienceFeaturesQuery(BaseQuery):
table_name = 'experience_features'
class ProjectQuery:
def __init__(self, project):
self.project = project
self.cms = CommitMetaQuery(project).do_query()
self.diffusion_features = DiffusionFeaturesQuery(project).do_query()
self.size_features = SizeFeaturesQuery(project).do_query()
self.purpose_features = PurposeFeaturesQuery(project).do_query()
self.history_features = HistoryFeaturesQuery(project).do_query()
self.exp_features = ExperienceFeaturesQuery(project).do_query()
self.__cache_end_commit_id = None
@property
def end_commit_id(self):
if self.__cache_end_commit_id is not None:
return self.__cache_end_commit_id
commit_id = None
for pf in self.purpose_features:
if pf.fix:
commit_id = pf.commit_id
self.__cache_end_commit_id = commit_id
return self.__cache_end_commit_id
def combine(self):
features_dict = dict()
for sf in self.size_features:
features_dict[sf.commit_id] = dict()
features_dict[sf.commit_id]['la'] = sf.la
features_dict[sf.commit_id]['ld'] = sf.ld
features_dict[sf.commit_id]['lt'] = sf.lt
for df in self.diffusion_features:
features_dict[df.commit_id]['ns'] = df.ns
features_dict[df.commit_id]['nd'] = df.nd
features_dict[df.commit_id]['nf'] = df.nf
features_dict[df.commit_id]['entropy'] = df.entropy
for pf in self.purpose_features:
features_dict[pf.commit_id]['fix'] = pf.fix
for hf in self.history_features:
features_dict[hf.commit_id]['ndev'] = hf.ndev
features_dict[hf.commit_id]['age'] = hf.age
features_dict[hf.commit_id]['nuc'] = hf.nuc
for ef in self.exp_features:
features_dict[ef.commit_id]['exp'] = ef.exp
features_dict[ef.commit_id]['rexp'] = ef.rexp
features_dict[ef.commit_id]['sexp'] = ef.sexp
ret_list = list()
for cm in self.cms:
cm_dict = features_dict[cm.commit_id]
if len(cm_dict) == 14:
cm_dict['commit_id'] = cm.commit_id
ret_list.append(cm_dict)
if cm.commit_id == self.end_commit_id:
break
return ret_list | none | 1 | 1.976943 | 2 |
|
example/mappers.py | mikeywaites/flask-arrested | 46 | 5942 | <reponame>mikeywaites/flask-arrested
from kim import Mapper, field
from example.models import Planet, Character
class PlanetMapper(Mapper):
__type__ = Planet
id = field.Integer(read_only=True)
name = field.String()
description = field.String()
created_at = field.DateTime(read_only=True)
class CharacterMapper(Mapper):
__type__ = Character
id = field.Integer(read_only=True)
name = field.String()
created_at = field.DateTime(read_only=True)
| from kim import Mapper, field
from example.models import Planet, Character
class PlanetMapper(Mapper):
__type__ = Planet
id = field.Integer(read_only=True)
name = field.String()
description = field.String()
created_at = field.DateTime(read_only=True)
class CharacterMapper(Mapper):
__type__ = Character
id = field.Integer(read_only=True)
name = field.String()
created_at = field.DateTime(read_only=True) | none | 1 | 2.666846 | 3 |
|
collections/ansible_collections/community/general/plugins/connection/saltstack.py | escalate/ansible-gitops-example-repository | 1 | 5943 | # Based on local.py (c) 2012, <NAME> <<EMAIL>>
# Based on chroot.py (c) 2013, <NAME> <<EMAIL>>
# Based on func.py
# (c) 2014, <NAME> <<EMAIL>>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: <NAME> (@mscherer) <<EMAIL>>
name: saltstack
short_description: Allow ansible to piggyback on salt minions
description:
- This allows you to use existing Saltstack infrastructure to connect to targets.
'''
import os
import base64
from ansible import errors
from ansible.plugins.connection import ConnectionBase
HAVE_SALTSTACK = False
try:
import salt.client as sc
HAVE_SALTSTACK = True
except ImportError:
pass
class Connection(ConnectionBase):
""" Salt-based connections """
has_pipelining = False
# while the name of the product is salt, naming that module salt cause
# trouble with module import
transport = 'community.general.saltstack'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.host = self._play_context.remote_addr
def _connect(self):
if not HAVE_SALTSTACK:
raise errors.AnsibleError("saltstack is not installed")
self.client = sc.LocalClient()
self._connected = True
return self
def exec_command(self, cmd, sudoable=False, in_data=None):
""" run a command on the remote minion """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
self._display.vvv("EXEC %s" % cmd, host=self.host)
# need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
if self.host not in res:
raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
p = res[self.host]
return p['retcode'], p['stdout'], p['stderr']
@staticmethod
def _normalize_path(path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
""" transfer a file from local to remote """
super(Connection, self).put_file(in_path, out_path)
out_path = self._normalize_path(out_path, '/')
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
with open(in_path, 'rb') as in_fh:
content = in_fh.read()
self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path])
# TODO test it
def fetch_file(self, in_path, out_path):
""" fetch a file from remote to local """
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._normalize_path(in_path, '/')
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host]
open(out_path, 'wb').write(content)
def close(self):
""" terminate the connection; nothing to do here """
pass
| # Based on local.py (c) 2012, <NAME> <<EMAIL>>
# Based on chroot.py (c) 2013, <NAME> <<EMAIL>>
# Based on func.py
# (c) 2014, <NAME> <<EMAIL>>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: <NAME> (@mscherer) <<EMAIL>>
name: saltstack
short_description: Allow ansible to piggyback on salt minions
description:
- This allows you to use existing Saltstack infrastructure to connect to targets.
'''
import os
import base64
from ansible import errors
from ansible.plugins.connection import ConnectionBase
HAVE_SALTSTACK = False
try:
import salt.client as sc
HAVE_SALTSTACK = True
except ImportError:
pass
class Connection(ConnectionBase):
""" Salt-based connections """
has_pipelining = False
# while the name of the product is salt, naming that module salt cause
# trouble with module import
transport = 'community.general.saltstack'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.host = self._play_context.remote_addr
def _connect(self):
if not HAVE_SALTSTACK:
raise errors.AnsibleError("saltstack is not installed")
self.client = sc.LocalClient()
self._connected = True
return self
def exec_command(self, cmd, sudoable=False, in_data=None):
""" run a command on the remote minion """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
self._display.vvv("EXEC %s" % cmd, host=self.host)
# need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
if self.host not in res:
raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
p = res[self.host]
return p['retcode'], p['stdout'], p['stderr']
@staticmethod
def _normalize_path(path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
""" transfer a file from local to remote """
super(Connection, self).put_file(in_path, out_path)
out_path = self._normalize_path(out_path, '/')
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
with open(in_path, 'rb') as in_fh:
content = in_fh.read()
self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path])
# TODO test it
def fetch_file(self, in_path, out_path):
""" fetch a file from remote to local """
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._normalize_path(in_path, '/')
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host]
open(out_path, 'wb').write(content)
def close(self):
""" terminate the connection; nothing to do here """
pass
| en | 0.697819 | # Based on local.py (c) 2012, <NAME> <<EMAIL>> # Based on chroot.py (c) 2013, <NAME> <<EMAIL>> # Based on func.py # (c) 2014, <NAME> <<EMAIL>> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) author: <NAME> (@mscherer) <<EMAIL>> name: saltstack short_description: Allow ansible to piggyback on salt minions description: - This allows you to use existing Saltstack infrastructure to connect to targets. Salt-based connections # while the name of the product is salt, naming that module salt cause # trouble with module import run a command on the remote minion # need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077 transfer a file from local to remote # TODO test it fetch a file from remote to local terminate the connection; nothing to do here | 2.226166 | 2 |
create/views.py | normaldotcom/webvirtmgr | 1 | 5944 | from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from servers.models import Compute
from create.models import Flavor
from instance.models import Instance
from libvirt import libvirtError
from vrtManager.create import wvmCreate
from vrtManager import util
from create.forms import FlavorAddForm, NewVMForm
def create(request, host_id):
"""
Create new instance.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
errors = []
compute = Compute.objects.get(id=host_id)
flavors = Flavor.objects.filter().order_by('id')
try:
conn = wvmCreate(compute.hostname,
compute.login,
compute.password,
compute.type)
storages = sorted(conn.get_storages())
networks = sorted(conn.get_networks())
instances = conn.get_instances()
get_images = sorted(conn.get_storages_images())
mac_auto = util.randomMAC()
except libvirtError as err:
errors.append(err.message)
if not storages:
msg = _("You haven't defined have any storage pools")
errors.append(msg)
if not networks:
msg = _("You haven't defined have any network pools")
errors.append(msg)
if request.method == 'POST':
if 'create_flavor' in request.POST:
form = FlavorAddForm(request.POST)
if form.is_valid():
data = form.cleaned_data
create_flavor = Flavor(label=data['label'],
vcpu=data['vcpu'],
memory=data['memory'],
disk=data['disk'])
create_flavor.save()
return HttpResponseRedirect(request.get_full_path())
if 'delete_flavor' in request.POST:
flavor_id = request.POST.get('flavor', '')
delete_flavor = Flavor.objects.get(id=flavor_id)
delete_flavor.delete()
return HttpResponseRedirect(request.get_full_path())
if 'create' in request.POST:
volumes = {}
form = NewVMForm(request.POST)
if form.is_valid():
data = form.cleaned_data
if instances:
if data['name'] in instances:
msg = _("A virtual machine with this name already exists")
errors.append(msg)
if not errors:
if data['hdd_size']:
if not data['mac']:
msg = _("No Virtual Machine MAC has been entered")
errors.append(msg)
else:
try:
path = conn.create_volume(data['storage'], data['name'], data['hdd_size'])
volumes[path] = conn.get_volume_type(path)
except libvirtError as msg_error:
errors.append(msg_error.message)
elif data['template']:
templ_path = conn.get_volume_path(data['template'])
clone_path = conn.clone_from_template(data['name'], templ_path)
volumes[clone_path] = conn.get_volume_type(clone_path)
else:
if not data['images']:
msg = _("First you need to create or select an image")
errors.append(msg)
else:
for vol in data['images'].split(','):
try:
path = conn.get_volume_path(vol)
volumes[path] = conn.get_volume_type(path)
except libvirtError as msg_error:
errors.append(msg_error.message)
if not errors:
uuid = util.randomUUID()
try:
conn.create_instance(data['name'], data['memory'], data['vcpu'], data['host_model'],
uuid, volumes, data['networks'], data['virtio'], data['mac'])
create_instance = Instance(compute_id=host_id, name=data['name'], uuid=uuid)
create_instance.save()
return HttpResponseRedirect('/instance/%s/%s/' % (host_id, data['name']))
except libvirtError as msg_error:
if data['hdd_size']:
conn.delete_volume(volumes.keys()[0])
errors.append(msg_error.message)
conn.close()
return render_to_response('create.html', locals(), context_instance=RequestContext(request))
| from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from servers.models import Compute
from create.models import Flavor
from instance.models import Instance
from libvirt import libvirtError
from vrtManager.create import wvmCreate
from vrtManager import util
from create.forms import FlavorAddForm, NewVMForm
def create(request, host_id):
"""
Create new instance.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
errors = []
compute = Compute.objects.get(id=host_id)
flavors = Flavor.objects.filter().order_by('id')
try:
conn = wvmCreate(compute.hostname,
compute.login,
compute.password,
compute.type)
storages = sorted(conn.get_storages())
networks = sorted(conn.get_networks())
instances = conn.get_instances()
get_images = sorted(conn.get_storages_images())
mac_auto = util.randomMAC()
except libvirtError as err:
errors.append(err.message)
if not storages:
msg = _("You haven't defined have any storage pools")
errors.append(msg)
if not networks:
msg = _("You haven't defined have any network pools")
errors.append(msg)
if request.method == 'POST':
if 'create_flavor' in request.POST:
form = FlavorAddForm(request.POST)
if form.is_valid():
data = form.cleaned_data
create_flavor = Flavor(label=data['label'],
vcpu=data['vcpu'],
memory=data['memory'],
disk=data['disk'])
create_flavor.save()
return HttpResponseRedirect(request.get_full_path())
if 'delete_flavor' in request.POST:
flavor_id = request.POST.get('flavor', '')
delete_flavor = Flavor.objects.get(id=flavor_id)
delete_flavor.delete()
return HttpResponseRedirect(request.get_full_path())
if 'create' in request.POST:
volumes = {}
form = NewVMForm(request.POST)
if form.is_valid():
data = form.cleaned_data
if instances:
if data['name'] in instances:
msg = _("A virtual machine with this name already exists")
errors.append(msg)
if not errors:
if data['hdd_size']:
if not data['mac']:
msg = _("No Virtual Machine MAC has been entered")
errors.append(msg)
else:
try:
path = conn.create_volume(data['storage'], data['name'], data['hdd_size'])
volumes[path] = conn.get_volume_type(path)
except libvirtError as msg_error:
errors.append(msg_error.message)
elif data['template']:
templ_path = conn.get_volume_path(data['template'])
clone_path = conn.clone_from_template(data['name'], templ_path)
volumes[clone_path] = conn.get_volume_type(clone_path)
else:
if not data['images']:
msg = _("First you need to create or select an image")
errors.append(msg)
else:
for vol in data['images'].split(','):
try:
path = conn.get_volume_path(vol)
volumes[path] = conn.get_volume_type(path)
except libvirtError as msg_error:
errors.append(msg_error.message)
if not errors:
uuid = util.randomUUID()
try:
conn.create_instance(data['name'], data['memory'], data['vcpu'], data['host_model'],
uuid, volumes, data['networks'], data['virtio'], data['mac'])
create_instance = Instance(compute_id=host_id, name=data['name'], uuid=uuid)
create_instance.save()
return HttpResponseRedirect('/instance/%s/%s/' % (host_id, data['name']))
except libvirtError as msg_error:
if data['hdd_size']:
conn.delete_volume(volumes.keys()[0])
errors.append(msg_error.message)
conn.close()
return render_to_response('create.html', locals(), context_instance=RequestContext(request))
| en | 0.806663 | Create new instance. | 2.076478 | 2 |
utils/wassersteinGradientPenalty.py | andimarafioti/GACELA | 15 | 5945 | import torch
__author__ = 'Andres'
def calc_gradient_penalty_bayes(discriminator, real_data, fake_data, gamma):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = real_data.size()[0]
alpha = torch.rand(batch_size, 1, 1, 1)
alpha = alpha.expand(real_data.size()).to(device)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = torch.autograd.Variable(interpolates, requires_grad=True).to(device)
disc_interpolates = discriminator(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2) - 1) ** 2) * gamma
return gradient_penalty | import torch
__author__ = 'Andres'
def calc_gradient_penalty_bayes(discriminator, real_data, fake_data, gamma):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = real_data.size()[0]
alpha = torch.rand(batch_size, 1, 1, 1)
alpha = alpha.expand(real_data.size()).to(device)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = torch.autograd.Variable(interpolates, requires_grad=True).to(device)
disc_interpolates = discriminator(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2) - 1) ** 2) * gamma
return gradient_penalty | none | 1 | 2.311322 | 2 |
|
pytest_capture_log_error/test_file.py | butla/experiments | 1 | 5946 | <reponame>butla/experiments
import a_file
def test_a(capsys):
assert a_file.bla() == 5
assert a_file.LOG_MESSAGE in capsys.readouterr().err
| import a_file
def test_a(capsys):
assert a_file.bla() == 5
assert a_file.LOG_MESSAGE in capsys.readouterr().err | none | 1 | 2.225779 | 2 |
|
src_py/ui/identify_page.py | Magier/Aetia | 0 | 5947 | <reponame>Magier/Aetia
import streamlit as st
from ui.session_state import SessionState, get_state
from infer import ModelStage
def show(state: SessionState):
st.header("identify")
state = get_state()
if state.model.stage < ModelStage.DEFINED:
st.error("Please create the model first!")
| import streamlit as st
from ui.session_state import SessionState, get_state
from infer import ModelStage
def show(state: SessionState):
st.header("identify")
state = get_state()
if state.model.stage < ModelStage.DEFINED:
st.error("Please create the model first!") | none | 1 | 2.440287 | 2 |
|
openke/data/UniverseTrainDataLoader.py | luofeisg/OpenKE-PuTransE | 0 | 5948 | <reponame>luofeisg/OpenKE-PuTransE
'''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import ctypes
import numpy as np
from .TrainDataLoader import TrainDataLoader
class UniverseTrainDataLoader(TrainDataLoader):
def __init__(self, in_path="./", batch_size=None, nbatches=None, threads=8, sampling_mode="normal", bern_flag=0,
filter_flag=1, neg_ent=1, neg_rel=0, initial_random_seed=2):
super(UniverseTrainDataLoader, self).__init__(in_path=in_path, batch_size=batch_size, nbatches=nbatches,
threads=threads, sampling_mode=sampling_mode, bern_flag=bern_flag,
filter_flag=filter_flag, neg_ent=neg_ent, neg_rel=neg_rel,
initial_random_seed=initial_random_seed)
self.entity_total_universe = 0
self.relation_total_universe = 0
self.train_total_universe = 0
"""argtypes"""
self.lib.sampling.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64
]
self.lib.getParallelUniverse.argtypes = [
ctypes.c_int64,
ctypes.c_float,
ctypes.c_int64
]
self.lib.getEntityRemapping.argtypes = [
ctypes.c_void_p
]
self.lib.getRelationRemapping.argtypes = [
ctypes.c_void_p
]
self.lib.getEntityTotalUniverse.restype = ctypes.c_int64
self.lib.getRelationTotalUniverse.restype = ctypes.c_int64
self.lib.getTrainTotalUniverse.restype = ctypes.c_int64
def swap_helpers(self):
self.lib.swapHelpers()
def reset_universe(self):
self.lib.resetUniverse()
self.set_nbatches(self.lib.getTrainTotal, self.nbatches)
def get_universe_mappings(self):
entity_remapping = np.zeros(self.entity_total_universe, dtype=np.int64)
relation_remapping = np.zeros(self.relation_total_universe, dtype=np.int64)
entity_remapping_addr = entity_remapping.__array_interface__["data"][0]
relation_remapping_addr = relation_remapping.__array_interface__["data"][0]
self.lib.getEntityRemapping(entity_remapping_addr)
self.lib.getRelationRemapping(relation_remapping_addr)
return entity_remapping, relation_remapping
def compile_universe_dataset(self, triple_constraint, balance_param, relation_in_focus):
self.lib.getParallelUniverse(triple_constraint, balance_param, relation_in_focus)
self.entity_total_universe = self.lib.getEntityTotalUniverse()
self.relation_total_universe = self.lib.getRelationTotalUniverse()
self.train_total_universe = self.lib.getTrainTotalUniverse()
self.set_nbatches(self.train_total_universe, self.nbatches)
| '''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import ctypes
import numpy as np
from .TrainDataLoader import TrainDataLoader
class UniverseTrainDataLoader(TrainDataLoader):
def __init__(self, in_path="./", batch_size=None, nbatches=None, threads=8, sampling_mode="normal", bern_flag=0,
filter_flag=1, neg_ent=1, neg_rel=0, initial_random_seed=2):
super(UniverseTrainDataLoader, self).__init__(in_path=in_path, batch_size=batch_size, nbatches=nbatches,
threads=threads, sampling_mode=sampling_mode, bern_flag=bern_flag,
filter_flag=filter_flag, neg_ent=neg_ent, neg_rel=neg_rel,
initial_random_seed=initial_random_seed)
self.entity_total_universe = 0
self.relation_total_universe = 0
self.train_total_universe = 0
"""argtypes"""
self.lib.sampling.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64
]
self.lib.getParallelUniverse.argtypes = [
ctypes.c_int64,
ctypes.c_float,
ctypes.c_int64
]
self.lib.getEntityRemapping.argtypes = [
ctypes.c_void_p
]
self.lib.getRelationRemapping.argtypes = [
ctypes.c_void_p
]
self.lib.getEntityTotalUniverse.restype = ctypes.c_int64
self.lib.getRelationTotalUniverse.restype = ctypes.c_int64
self.lib.getTrainTotalUniverse.restype = ctypes.c_int64
def swap_helpers(self):
self.lib.swapHelpers()
def reset_universe(self):
self.lib.resetUniverse()
self.set_nbatches(self.lib.getTrainTotal, self.nbatches)
def get_universe_mappings(self):
entity_remapping = np.zeros(self.entity_total_universe, dtype=np.int64)
relation_remapping = np.zeros(self.relation_total_universe, dtype=np.int64)
entity_remapping_addr = entity_remapping.__array_interface__["data"][0]
relation_remapping_addr = relation_remapping.__array_interface__["data"][0]
self.lib.getEntityRemapping(entity_remapping_addr)
self.lib.getRelationRemapping(relation_remapping_addr)
return entity_remapping, relation_remapping
def compile_universe_dataset(self, triple_constraint, balance_param, relation_in_focus):
self.lib.getParallelUniverse(triple_constraint, balance_param, relation_in_focus)
self.entity_total_universe = self.lib.getEntityTotalUniverse()
self.relation_total_universe = self.lib.getRelationTotalUniverse()
self.train_total_universe = self.lib.getTrainTotalUniverse()
self.set_nbatches(self.train_total_universe, self.nbatches) | en | 0.764591 | MIT License Copyright (c) 2020 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. argtypes | 1.4753 | 1 |
test/jit/test_backend_nnapi.py | Hacky-DH/pytorch | 60,067 | 5949 | import os
import sys
import unittest
import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
"""
Unit Tests for Nnapi backend with delegate
Inherits most tests from TestNNAPI, which loads Android NNAPI models
without the delegate API.
"""
# First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests.
# Second skip is because ASAN is currently causing an error.
# It is still unclear how to resolve this. T95764916
torch_root = Path(__file__).resolve().parent.parent.parent
lib_path = torch_root / 'build' / 'lib' / 'libnnapi_backend.so'
@unittest.skipIf(not os.path.exists(lib_path),
"Skipping the test as libnnapi_backend.so was not found")
@unittest.skipIf(TEST_WITH_ASAN, "Unresolved bug with ASAN")
class TestNnapiBackend(TestNNAPI):
def setUp(self):
super().setUp()
# Save default dtype
module = torch.nn.PReLU()
self.default_dtype = module.weight.dtype
# Change dtype to float32 (since a different unit test changed dtype to float64,
# which is not supported by the Android NNAPI delegate)
# Float32 should typically be the default in other files.
torch.set_default_dtype(torch.float32)
# Load nnapi delegate library
torch.ops.load_library(str(lib_path))
# Override
def call_lowering_to_nnapi(self, traced_module, args):
compile_spec = {"forward": {"inputs": args}}
return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)
def test_tensor_input(self):
# Lower a simple module
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
# Argument input is a single Tensor
self.call_lowering_to_nnapi(traced, args)
# Argument input is a Tensor in a list
self.call_lowering_to_nnapi(traced, [args])
# Test exceptions for incorrect compile specs
def test_compile_spec_santiy(self):
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
errorMsgTail = r"""
method_compile_spec should contain a Tensor or Tensor List which bundles input parameters: shape, dtype, quantization, and dimorder.
For input shapes, use 0 for run/load time flexible input.
method_compile_spec must use the following format:
{"forward": {"inputs": at::Tensor}} OR {"forward": {"inputs": c10::List<at::Tensor>}}"""
# No forward key
compile_spec = {"backward": {"inputs": args}}
with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No dictionary under the forward key
compile_spec = {"forward": 1}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No inputs key (in the dictionary under the forward key)
compile_spec = {"forward": {"not inputs": args}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No Tensor or TensorList under the inputs key
compile_spec = {"forward": {"inputs": 1}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
compile_spec = {"forward": {"inputs": [1]}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
def tearDown(self):
# Change dtype back to default (Otherwise, other unit tests will complain)
torch.set_default_dtype(self.default_dtype)
| import os
import sys
import unittest
import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
"""
Unit Tests for Nnapi backend with delegate
Inherits most tests from TestNNAPI, which loads Android NNAPI models
without the delegate API.
"""
# First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests.
# Second skip is because ASAN is currently causing an error.
# It is still unclear how to resolve this. T95764916
torch_root = Path(__file__).resolve().parent.parent.parent
lib_path = torch_root / 'build' / 'lib' / 'libnnapi_backend.so'
@unittest.skipIf(not os.path.exists(lib_path),
"Skipping the test as libnnapi_backend.so was not found")
@unittest.skipIf(TEST_WITH_ASAN, "Unresolved bug with ASAN")
class TestNnapiBackend(TestNNAPI):
def setUp(self):
super().setUp()
# Save default dtype
module = torch.nn.PReLU()
self.default_dtype = module.weight.dtype
# Change dtype to float32 (since a different unit test changed dtype to float64,
# which is not supported by the Android NNAPI delegate)
# Float32 should typically be the default in other files.
torch.set_default_dtype(torch.float32)
# Load nnapi delegate library
torch.ops.load_library(str(lib_path))
# Override
def call_lowering_to_nnapi(self, traced_module, args):
compile_spec = {"forward": {"inputs": args}}
return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)
def test_tensor_input(self):
# Lower a simple module
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
# Argument input is a single Tensor
self.call_lowering_to_nnapi(traced, args)
# Argument input is a Tensor in a list
self.call_lowering_to_nnapi(traced, [args])
# Test exceptions for incorrect compile specs
def test_compile_spec_santiy(self):
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
errorMsgTail = r"""
method_compile_spec should contain a Tensor or Tensor List which bundles input parameters: shape, dtype, quantization, and dimorder.
For input shapes, use 0 for run/load time flexible input.
method_compile_spec must use the following format:
{"forward": {"inputs": at::Tensor}} OR {"forward": {"inputs": c10::List<at::Tensor>}}"""
# No forward key
compile_spec = {"backward": {"inputs": args}}
with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No dictionary under the forward key
compile_spec = {"forward": 1}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No inputs key (in the dictionary under the forward key)
compile_spec = {"forward": {"not inputs": args}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No Tensor or TensorList under the inputs key
compile_spec = {"forward": {"inputs": 1}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
compile_spec = {"forward": {"inputs": [1]}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
def tearDown(self):
# Change dtype back to default (Otherwise, other unit tests will complain)
torch.set_default_dtype(self.default_dtype)
| en | 0.743254 | # Make the helper files in test/ importable Unit Tests for Nnapi backend with delegate Inherits most tests from TestNNAPI, which loads Android NNAPI models without the delegate API. # First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests. # Second skip is because ASAN is currently causing an error. # It is still unclear how to resolve this. T95764916 # Save default dtype # Change dtype to float32 (since a different unit test changed dtype to float64, # which is not supported by the Android NNAPI delegate) # Float32 should typically be the default in other files. # Load nnapi delegate library # Override # Lower a simple module # Argument input is a single Tensor # Argument input is a Tensor in a list # Test exceptions for incorrect compile specs method_compile_spec should contain a Tensor or Tensor List which bundles input parameters: shape, dtype, quantization, and dimorder. For input shapes, use 0 for run/load time flexible input. method_compile_spec must use the following format: {"forward": {"inputs": at::Tensor}} OR {"forward": {"inputs": c10::List<at::Tensor>}} # No forward key # No dictionary under the forward key # No inputs key (in the dictionary under the forward key) # No Tensor or TensorList under the inputs key # Change dtype back to default (Otherwise, other unit tests will complain) | 2.294284 | 2 |
main/configure.py | syxu828/Graph2Seq-0.1 | 24 | 5950 | <gh_stars>10-100
train_data_path = "../data/no_cycle/train.data"
dev_data_path = "../data/no_cycle/dev.data"
test_data_path = "../data/no_cycle/test.data"
word_idx_file_path = "../data/word.idx"
word_embedding_dim = 100
train_batch_size = 32
dev_batch_size = 500
test_batch_size = 500
l2_lambda = 0.000001
learning_rate = 0.001
epochs = 100
encoder_hidden_dim = 200
num_layers_decode = 1
word_size_max = 1
dropout = 0.0
path_embed_method = "lstm" # cnn or lstm or bi-lstm
unknown_word = "<unk>"
PAD = "<PAD>"
GO = "<GO>"
EOS = "<EOS>"
deal_unknown_words = True
seq_max_len = 11
decoder_type = "greedy" # greedy, beam
beam_width = 4
attention = True
num_layers = 1 # 1 or 2
# the following are for the graph encoding method
weight_decay = 0.0000
sample_size_per_layer = 4
sample_layer_size = 4
hidden_layer_dim = 100
feature_max_len = 1
feature_encode_type = "uni"
# graph_encode_method = "max-pooling" # "lstm" or "max-pooling"
graph_encode_direction = "bi" # "single" or "bi"
concat = True
encoder = "gated_gcn" # "gated_gcn" "gcn" "seq"
lstm_in_gcn = "none" # before, after, none
| train_data_path = "../data/no_cycle/train.data"
dev_data_path = "../data/no_cycle/dev.data"
test_data_path = "../data/no_cycle/test.data"
word_idx_file_path = "../data/word.idx"
word_embedding_dim = 100
train_batch_size = 32
dev_batch_size = 500
test_batch_size = 500
l2_lambda = 0.000001
learning_rate = 0.001
epochs = 100
encoder_hidden_dim = 200
num_layers_decode = 1
word_size_max = 1
dropout = 0.0
path_embed_method = "lstm" # cnn or lstm or bi-lstm
unknown_word = "<unk>"
PAD = "<PAD>"
GO = "<GO>"
EOS = "<EOS>"
deal_unknown_words = True
seq_max_len = 11
decoder_type = "greedy" # greedy, beam
beam_width = 4
attention = True
num_layers = 1 # 1 or 2
# the following are for the graph encoding method
weight_decay = 0.0000
sample_size_per_layer = 4
sample_layer_size = 4
hidden_layer_dim = 100
feature_max_len = 1
feature_encode_type = "uni"
# graph_encode_method = "max-pooling" # "lstm" or "max-pooling"
graph_encode_direction = "bi" # "single" or "bi"
concat = True
encoder = "gated_gcn" # "gated_gcn" "gcn" "seq"
lstm_in_gcn = "none" # before, after, none | en | 0.824399 | # cnn or lstm or bi-lstm # greedy, beam # 1 or 2 # the following are for the graph encoding method # graph_encode_method = "max-pooling" # "lstm" or "max-pooling" # "single" or "bi" # "gated_gcn" "gcn" "seq" # before, after, none | 2.281579 | 2 |
dataControlWidget.py | andreasbayer/AEGUIFit | 0 | 5951 | from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox
from InftyDoubleSpinBox import InftyDoubleSpinBox
from PyQt5.QtCore import pyqtSignal, Qt
import helplib as hl
import numpy as np
class dataControlWidget(QGroupBox):
showErrorBars_changed = pyqtSignal(bool)
ignoreFirstPoint_changed = pyqtSignal(bool)
data_changed = pyqtSignal(bool, bool)
data_shift = pyqtSignal(np.float64)
load_fits = pyqtSignal(list)
load_view = pyqtSignal(str)
load_meta = pyqtSignal(str)
fit_on_startup = pyqtSignal()
SHOW_ERROR_BARS = "Show error bars"
SHOW_ERROR_BARS_NOT_LOADED = "Show error bars (could not be calculated)"
def __init__(self):
QWidget.__init__(self)
self.setTitle('Data Settings')
self.__lblEnergyShift = QLabel("Energy Shift:")
self.__dsbEnergyShift = InftyDoubleSpinBox()
self.__dsbEnergyShift.editingFinished.connect(self.__energyShiftChanged)
self.__dsbEnergyShift.setSingleStep(0.01)
self.__chkShowErrorBars = QCheckBox(self.SHOW_ERROR_BARS_NOT_LOADED)
self.__chkShowErrorBars.stateChanged.connect(self.__chkShowErrorBars_changed)
self.__chkIgnoreFirstPoint = QCheckBox('Ignore first data point.')
self.__chkIgnoreFirstPoint.stateChanged.connect(self.__chkIgnoreFirstPoint_changed)
self.__mainLayout = QGridLayout()
self.setLayout(self.__mainLayout)
self.__mainLayout.setAlignment(Qt.AlignTop)
self.__mainLayout.addWidget(self.__lblEnergyShift, 0, 0)
self.__mainLayout.addWidget(self.__dsbEnergyShift, 0, 1)
self.__mainLayout.addWidget(self.__chkShowErrorBars, 1, 0, 1, 2)
self.__mainLayout.addWidget(self.__chkIgnoreFirstPoint, 2, 0, 1, 2)
self.__chkIgnoreFirstPoint.setVisible(False)
self.reset(False)
def reset(self, enable):
self.__data = None
self.__all_data = None
self.__stdErrors = None
self.__chkShowErrorBars.setCheckable(True)
self.__chkShowErrorBars.setChecked(False)
self.__chkShowErrorBars.setEnabled(False)
self.__chkIgnoreFirstPoint.setCheckable(True)
self.__chkIgnoreFirstPoint.setChecked(False)
self.__chkIgnoreFirstPoint.setEnabled(False)
self.setEnergyShift(0.0)
self.__prevShift = 0.0
self.setEnabled(enable)
def __chkShowErrorBars_changed(self, state):
self.__chkShowErrorBars.setCheckState(state)
self.showErrorBars_changed.emit(self.getShowErrorBars())
def __chkIgnoreFirstPoint_changed(self, state):
self.__chkIgnoreFirstPoint.setCheckState(state)
self.ignoreFirstPoint_changed.emit(self.getIgnoreFirstPoint())
def __energyShiftChanged(self):
self.cause_shift()
def cause_shift(self):
energyShift = self.__dsbEnergyShift.value()
increment = energyShift - self.__prevShift
self.__prevShift = energyShift
self.data_shift.emit(increment)
self.data_changed.emit(self.getShowErrorBars(), self.getIgnoreFirstPoint())
# def setData(self, data):
# self.__data = data
def getData(self):
first_point = 0
if self.getIgnoreFirstPoint():
first_point = 1
return self.__data[first_point:,]
def getEnergyShift(self):
return (self.__dsbEnergyShift.value())
def setEnergyShift(self, value):
#increment = self.__dsbEnergyShift.value() - value
increment = value - self.__dsbEnergyShift.value()
self.__dsbEnergyShift.setValue(value)
#self.__shiftData(increment)
#self.data_shift.emit(increment)
def __shiftData(self, increment):
try:
if self.__data is not None:
for set in self.__data:
set[0] += increment
except Exception as e:
print(e)
def getStdErrors(self):
if self.__stdErrors is not None:
first_point = 0
if self.getIgnoreFirstPoint():
first_point = 1
return self.__stdErrors[first_point:]
else:
return None
def getMax_Energy(self):
if self.getData() is not None:
return self.getData()[-1][0]
else:
return None
def getMin_Energy(self):
if self.getData() is not None:
return self.getData()[0][0]
else:
return None
def getShowErrorBars(self):
return self.__chkShowErrorBars.isChecked()
def setShowErrorBars(self, value):
self.__chkShowErrorBars.setChecked(value)
def getIgnoreFirstPoint(self):
return self.__chkIgnoreFirstPoint.isChecked()
def setIgnoreFirstPoint(self, value):
self.__chkIgnoreFirstPoint.setChecked(value)
def hasStdErrors(self):
return self.__stdErrors is not None
def loadFile(self, fileName, id_string):
self.__all_data, self.__stdErrors, (fit_strings, view_string, data_string, meta_string), id_found =\
hl.readFileForFitsDataAndStdErrorAndMetaData(fileName, id_string)
#we need a copy to not save any altered data!
self.__data = (self.__all_data[:, 0:2]).copy()
if len(self.__data) <= 1:
raise Exception("Not enough data in file!")
if self.hasStdErrors():
self.__chkShowErrorBars.setText(self.SHOW_ERROR_BARS)
else:
self.__chkShowErrorBars.setText(self.SHOW_ERROR_BARS_NOT_LOADED)
self.__chkShowErrorBars.setEnabled(self.hasStdErrors())
self.__chkShowErrorBars.setChecked(self.hasStdErrors())
self.__chkIgnoreFirstPoint.setEnabled(True)
self.data_changed.emit(self.hasStdErrors(), self.getIgnoreFirstPoint())
self.load_fits.emit(fit_strings)
self.load_view.emit(view_string)
self.load_meta.emit(meta_string)
self.load_from_data_string(data_string)
self.cause_shift()
self.fit_on_startup.emit()
return id_found
def load_from_data_string(self, data_string):
if data_string is not None:
split_string = data_string.split('\v')
for i in range(0, len(split_string)):
item = split_string[i].split('=')
if len(item) == 2:
if (item[0] == 'egs'):
self.setEnergyShift(np.float64(item[1]))
elif item[0] == 'seb':
if item[1] == '1' or item[1] == 'True':
self.setShowErrorBars(True)
elif item[1] == '0' or item[1] == 'False':
self.setShowErrorBars(False)
elif item[0] == 'ifd':
if item[1] == '1' or item[1] == 'True':
self.setIgnoreFirstPoint(True)
elif item[1] == '0' or item[1] == 'False':
self.setIgnoreFirstPoint(False)
def get_data_string(self):
return 'egs=' + str(self.getEnergyShift()) + '\vseb=' + str(self.getShowErrorBars()) +\
'\vifd=' + str(self.getIgnoreFirstPoint())
def saveFile(self, fileName, id_string, fit_strings, view_string, data_string, meta_string):
hl.saveFilewithMetaData(id_string, fileName, self.__all_data, (fit_strings, view_string, data_string, meta_string))
| from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox
from InftyDoubleSpinBox import InftyDoubleSpinBox
from PyQt5.QtCore import pyqtSignal, Qt
import helplib as hl
import numpy as np
class dataControlWidget(QGroupBox):
showErrorBars_changed = pyqtSignal(bool)
ignoreFirstPoint_changed = pyqtSignal(bool)
data_changed = pyqtSignal(bool, bool)
data_shift = pyqtSignal(np.float64)
load_fits = pyqtSignal(list)
load_view = pyqtSignal(str)
load_meta = pyqtSignal(str)
fit_on_startup = pyqtSignal()
SHOW_ERROR_BARS = "Show error bars"
SHOW_ERROR_BARS_NOT_LOADED = "Show error bars (could not be calculated)"
def __init__(self):
QWidget.__init__(self)
self.setTitle('Data Settings')
self.__lblEnergyShift = QLabel("Energy Shift:")
self.__dsbEnergyShift = InftyDoubleSpinBox()
self.__dsbEnergyShift.editingFinished.connect(self.__energyShiftChanged)
self.__dsbEnergyShift.setSingleStep(0.01)
self.__chkShowErrorBars = QCheckBox(self.SHOW_ERROR_BARS_NOT_LOADED)
self.__chkShowErrorBars.stateChanged.connect(self.__chkShowErrorBars_changed)
self.__chkIgnoreFirstPoint = QCheckBox('Ignore first data point.')
self.__chkIgnoreFirstPoint.stateChanged.connect(self.__chkIgnoreFirstPoint_changed)
self.__mainLayout = QGridLayout()
self.setLayout(self.__mainLayout)
self.__mainLayout.setAlignment(Qt.AlignTop)
self.__mainLayout.addWidget(self.__lblEnergyShift, 0, 0)
self.__mainLayout.addWidget(self.__dsbEnergyShift, 0, 1)
self.__mainLayout.addWidget(self.__chkShowErrorBars, 1, 0, 1, 2)
self.__mainLayout.addWidget(self.__chkIgnoreFirstPoint, 2, 0, 1, 2)
self.__chkIgnoreFirstPoint.setVisible(False)
self.reset(False)
def reset(self, enable):
self.__data = None
self.__all_data = None
self.__stdErrors = None
self.__chkShowErrorBars.setCheckable(True)
self.__chkShowErrorBars.setChecked(False)
self.__chkShowErrorBars.setEnabled(False)
self.__chkIgnoreFirstPoint.setCheckable(True)
self.__chkIgnoreFirstPoint.setChecked(False)
self.__chkIgnoreFirstPoint.setEnabled(False)
self.setEnergyShift(0.0)
self.__prevShift = 0.0
self.setEnabled(enable)
def __chkShowErrorBars_changed(self, state):
self.__chkShowErrorBars.setCheckState(state)
self.showErrorBars_changed.emit(self.getShowErrorBars())
def __chkIgnoreFirstPoint_changed(self, state):
self.__chkIgnoreFirstPoint.setCheckState(state)
self.ignoreFirstPoint_changed.emit(self.getIgnoreFirstPoint())
def __energyShiftChanged(self):
self.cause_shift()
def cause_shift(self):
energyShift = self.__dsbEnergyShift.value()
increment = energyShift - self.__prevShift
self.__prevShift = energyShift
self.data_shift.emit(increment)
self.data_changed.emit(self.getShowErrorBars(), self.getIgnoreFirstPoint())
# def setData(self, data):
# self.__data = data
def getData(self):
first_point = 0
if self.getIgnoreFirstPoint():
first_point = 1
return self.__data[first_point:,]
def getEnergyShift(self):
return (self.__dsbEnergyShift.value())
def setEnergyShift(self, value):
#increment = self.__dsbEnergyShift.value() - value
increment = value - self.__dsbEnergyShift.value()
self.__dsbEnergyShift.setValue(value)
#self.__shiftData(increment)
#self.data_shift.emit(increment)
def __shiftData(self, increment):
try:
if self.__data is not None:
for set in self.__data:
set[0] += increment
except Exception as e:
print(e)
def getStdErrors(self):
if self.__stdErrors is not None:
first_point = 0
if self.getIgnoreFirstPoint():
first_point = 1
return self.__stdErrors[first_point:]
else:
return None
def getMax_Energy(self):
if self.getData() is not None:
return self.getData()[-1][0]
else:
return None
def getMin_Energy(self):
if self.getData() is not None:
return self.getData()[0][0]
else:
return None
def getShowErrorBars(self):
return self.__chkShowErrorBars.isChecked()
def setShowErrorBars(self, value):
self.__chkShowErrorBars.setChecked(value)
def getIgnoreFirstPoint(self):
return self.__chkIgnoreFirstPoint.isChecked()
def setIgnoreFirstPoint(self, value):
self.__chkIgnoreFirstPoint.setChecked(value)
def hasStdErrors(self):
return self.__stdErrors is not None
def loadFile(self, fileName, id_string):
self.__all_data, self.__stdErrors, (fit_strings, view_string, data_string, meta_string), id_found =\
hl.readFileForFitsDataAndStdErrorAndMetaData(fileName, id_string)
#we need a copy to not save any altered data!
self.__data = (self.__all_data[:, 0:2]).copy()
if len(self.__data) <= 1:
raise Exception("Not enough data in file!")
if self.hasStdErrors():
self.__chkShowErrorBars.setText(self.SHOW_ERROR_BARS)
else:
self.__chkShowErrorBars.setText(self.SHOW_ERROR_BARS_NOT_LOADED)
self.__chkShowErrorBars.setEnabled(self.hasStdErrors())
self.__chkShowErrorBars.setChecked(self.hasStdErrors())
self.__chkIgnoreFirstPoint.setEnabled(True)
self.data_changed.emit(self.hasStdErrors(), self.getIgnoreFirstPoint())
self.load_fits.emit(fit_strings)
self.load_view.emit(view_string)
self.load_meta.emit(meta_string)
self.load_from_data_string(data_string)
self.cause_shift()
self.fit_on_startup.emit()
return id_found
def load_from_data_string(self, data_string):
if data_string is not None:
split_string = data_string.split('\v')
for i in range(0, len(split_string)):
item = split_string[i].split('=')
if len(item) == 2:
if (item[0] == 'egs'):
self.setEnergyShift(np.float64(item[1]))
elif item[0] == 'seb':
if item[1] == '1' or item[1] == 'True':
self.setShowErrorBars(True)
elif item[1] == '0' or item[1] == 'False':
self.setShowErrorBars(False)
elif item[0] == 'ifd':
if item[1] == '1' or item[1] == 'True':
self.setIgnoreFirstPoint(True)
elif item[1] == '0' or item[1] == 'False':
self.setIgnoreFirstPoint(False)
def get_data_string(self):
return 'egs=' + str(self.getEnergyShift()) + '\vseb=' + str(self.getShowErrorBars()) +\
'\vifd=' + str(self.getIgnoreFirstPoint())
def saveFile(self, fileName, id_string, fit_strings, view_string, data_string, meta_string):
hl.saveFilewithMetaData(id_string, fileName, self.__all_data, (fit_strings, view_string, data_string, meta_string))
| en | 0.198907 | # def setData(self, data): # self.__data = data #increment = self.__dsbEnergyShift.value() - value #self.__shiftData(increment) #self.data_shift.emit(increment) #we need a copy to not save any altered data! | 2.320407 | 2 |
src/freemovr_engine/calib/acquire.py | strawlab/flyvr | 3 | 5952 | <filename>src/freemovr_engine/calib/acquire.py
import roslib
roslib.load_manifest('sensor_msgs')
roslib.load_manifest('dynamic_reconfigure')
import rospy
import sensor_msgs.msg
import dynamic_reconfigure.srv
import dynamic_reconfigure.encoding
import numpy as np
import time
import os.path
import queue
class CameraHandler(object):
def __init__(self,topic_prefix='',debug=False,enable_dynamic_reconfigure=False):
self.topic_prefix=topic_prefix
self.debug = debug
rospy.Subscriber( '%s/image_raw'%self.topic_prefix, sensor_msgs.msg.Image,
self.get_image_callback)
self.pipeline_max_latency = 0.2
self.last_image = None
self.im_queue = None
self.recon = None
if enable_dynamic_reconfigure:
self.recon = rospy.ServiceProxy('%s/set_parameters'%self.topic_prefix, dynamic_reconfigure.srv.Reconfigure)
self.recon_cache = {}
def reconfigure(self, **params):
if self.recon is not None:
changed = {}
for k,v in list(params.items()):
if k in self.recon_cache:
if self.recon_cache[k] != v:
changed[k] = v
else:
changed[k] = v
if changed:
msg = dynamic_reconfigure.encoding.encode_config(params)
self.recon_cache.update(changed)
self.recon(msg)
if self.im_queue is not None:
#clear the queue so we get a new image with the new settings
while True:
try:
self.im_queue.get_nowait()
except queue.Empty:
break
def set_im_queue(self,q):
self.im_queue = q
def get_image_callback(self,msg):
if self.im_queue is None:
return
try:
if self.debug:
print("%s got image: %f" % (self.topic_prefix, msg.header.stamp.to_sec()))
self.im_queue.put_nowait((self.topic_prefix,msg))
except queue.Full:
if self.debug:
print(self.topic_prefix,"full")
class _Runner(object):
def __init__(self,cam_handlers,ros_latency=0.2,queue_depth=20):
self.cam_handlers = cam_handlers
self.im_queue = queue.Queue(len(cam_handlers)*queue_depth)
for ch in self.cam_handlers:
ch.set_im_queue(self.im_queue)
self.ros_latency = ros_latency
self.max_cam_latency = max( [ch.pipeline_max_latency for ch in self.cam_handlers ])
self._result = {}
@property
def result(self):
return self._result
@property
def result_as_nparray(self):
res = {}
for cam in self._result:
nimgs = len(self._result[cam])
tmpres = [0]*nimgs
for i in range(nimgs):
msg = self._result[cam][i]
shape = (msg.height, msg.width)
imarr = np.fromstring(msg.data,dtype=np.uint8)
imarr.shape = (msg.height, msg.width)
tmpres[i] = imarr
#sad to use dstack here, IMO res[cam][:,:,i] = imarr
#should have worked.
res[cam] = np.dstack(tmpres)
return res
def cycle_duration( self, dur ):
tstart = time.time()
while (time.time() - tstart) < dur:
time.sleep(0.05) # wait 50 msec
def clear_queue(self):
q = self.im_queue
while 1:
try:
q.get_nowait()
except queue.Empty:
break
def _is_done(self,rdict,n_per_camera,verbose=False):
done=True
for topic_prefix in list(rdict.keys()):
if verbose:
rospy.loginfo(' _is_done() has %d frames for %r'%(len(rdict[topic_prefix]), topic_prefix))
if len(rdict[topic_prefix]) < n_per_camera:
done=False
return done
class SimultaneousCameraRunner(_Runner):
def __init__(self,cam_handlers,**kwargs):
_Runner.__init__(self, cam_handlers,**kwargs)
def get_images(self,n_per_camera, pre_func=None, pre_func_args=[], post_func=None, post_func_args=[], verbose=False):
self._result.clear()
for ch in self.cam_handlers:
self._result[ch.topic_prefix] = []
#clear the queue
self.clear_queue()
if pre_func: pre_func(*pre_func_args)
t_latest = time.time() + (self.ros_latency + self.max_cam_latency)*n_per_camera
#wait for the images to arrive
while not self._is_done(self._result,n_per_camera,verbose=verbose):
try:
topic_prefix, msg = self.im_queue.get(1,10.0) # block, 10 second timeout
except queue.Empty:
continue
t_image = msg.header.stamp.to_sec()
if t_image > t_latest:
rospy.logwarn("image from %s at t=%f was too slow (by %f)" % (topic_prefix, t_image, t_image - t_latest))
self._result[topic_prefix].append( msg )
if post_func: post_func(*post_func_args)
class SequentialCameraRunner(_Runner):
def __init__(self,cam_handlers,**kwargs):
_Runner.__init__(self, cam_handlers,**kwargs)
self.wait_duration = kwargs.get("wait_duration", 0.1)
self.check_earliest = False
self.check_latest = False
def get_images(self,n_per_camera,verbose=False):
self._result.clear()
for ch in self.cam_handlers:
self._result[ch.topic_prefix] = []
t_earliest = time.time()
self.clear_queue()
t_latest = t_earliest + (self.ros_latency + self.max_cam_latency)
while not self._is_done(self._result,n_per_camera,verbose=verbose):
try:
topic_prefix, msg = self.im_queue.get(1,10.0) # block, 10 second timeout
except queue.Empty:
continue
t_image = msg.header.stamp.to_sec()
if self.check_latest and t_image > t_latest:
rospy.logwarn("image from %s at t=%f was too slow (by %f)" % (topic_prefix, t_image, t_image - t_latest))
if self.check_earliest and t_image < t_earliest:
rospy.logwarn("image from %s at t=%f was too early (by %f)" % (topic_prefix, t_image, t_earliest - t_image))
continue
self._result[topic_prefix].append( msg )
| <filename>src/freemovr_engine/calib/acquire.py
import roslib
roslib.load_manifest('sensor_msgs')
roslib.load_manifest('dynamic_reconfigure')
import rospy
import sensor_msgs.msg
import dynamic_reconfigure.srv
import dynamic_reconfigure.encoding
import numpy as np
import time
import os.path
import queue
class CameraHandler(object):
def __init__(self,topic_prefix='',debug=False,enable_dynamic_reconfigure=False):
self.topic_prefix=topic_prefix
self.debug = debug
rospy.Subscriber( '%s/image_raw'%self.topic_prefix, sensor_msgs.msg.Image,
self.get_image_callback)
self.pipeline_max_latency = 0.2
self.last_image = None
self.im_queue = None
self.recon = None
if enable_dynamic_reconfigure:
self.recon = rospy.ServiceProxy('%s/set_parameters'%self.topic_prefix, dynamic_reconfigure.srv.Reconfigure)
self.recon_cache = {}
def reconfigure(self, **params):
if self.recon is not None:
changed = {}
for k,v in list(params.items()):
if k in self.recon_cache:
if self.recon_cache[k] != v:
changed[k] = v
else:
changed[k] = v
if changed:
msg = dynamic_reconfigure.encoding.encode_config(params)
self.recon_cache.update(changed)
self.recon(msg)
if self.im_queue is not None:
#clear the queue so we get a new image with the new settings
while True:
try:
self.im_queue.get_nowait()
except queue.Empty:
break
def set_im_queue(self,q):
self.im_queue = q
def get_image_callback(self,msg):
if self.im_queue is None:
return
try:
if self.debug:
print("%s got image: %f" % (self.topic_prefix, msg.header.stamp.to_sec()))
self.im_queue.put_nowait((self.topic_prefix,msg))
except queue.Full:
if self.debug:
print(self.topic_prefix,"full")
class _Runner(object):
def __init__(self,cam_handlers,ros_latency=0.2,queue_depth=20):
self.cam_handlers = cam_handlers
self.im_queue = queue.Queue(len(cam_handlers)*queue_depth)
for ch in self.cam_handlers:
ch.set_im_queue(self.im_queue)
self.ros_latency = ros_latency
self.max_cam_latency = max( [ch.pipeline_max_latency for ch in self.cam_handlers ])
self._result = {}
@property
def result(self):
return self._result
@property
def result_as_nparray(self):
res = {}
for cam in self._result:
nimgs = len(self._result[cam])
tmpres = [0]*nimgs
for i in range(nimgs):
msg = self._result[cam][i]
shape = (msg.height, msg.width)
imarr = np.fromstring(msg.data,dtype=np.uint8)
imarr.shape = (msg.height, msg.width)
tmpres[i] = imarr
#sad to use dstack here, IMO res[cam][:,:,i] = imarr
#should have worked.
res[cam] = np.dstack(tmpres)
return res
def cycle_duration( self, dur ):
tstart = time.time()
while (time.time() - tstart) < dur:
time.sleep(0.05) # wait 50 msec
def clear_queue(self):
q = self.im_queue
while 1:
try:
q.get_nowait()
except queue.Empty:
break
def _is_done(self,rdict,n_per_camera,verbose=False):
done=True
for topic_prefix in list(rdict.keys()):
if verbose:
rospy.loginfo(' _is_done() has %d frames for %r'%(len(rdict[topic_prefix]), topic_prefix))
if len(rdict[topic_prefix]) < n_per_camera:
done=False
return done
class SimultaneousCameraRunner(_Runner):
def __init__(self,cam_handlers,**kwargs):
_Runner.__init__(self, cam_handlers,**kwargs)
def get_images(self,n_per_camera, pre_func=None, pre_func_args=[], post_func=None, post_func_args=[], verbose=False):
self._result.clear()
for ch in self.cam_handlers:
self._result[ch.topic_prefix] = []
#clear the queue
self.clear_queue()
if pre_func: pre_func(*pre_func_args)
t_latest = time.time() + (self.ros_latency + self.max_cam_latency)*n_per_camera
#wait for the images to arrive
while not self._is_done(self._result,n_per_camera,verbose=verbose):
try:
topic_prefix, msg = self.im_queue.get(1,10.0) # block, 10 second timeout
except queue.Empty:
continue
t_image = msg.header.stamp.to_sec()
if t_image > t_latest:
rospy.logwarn("image from %s at t=%f was too slow (by %f)" % (topic_prefix, t_image, t_image - t_latest))
self._result[topic_prefix].append( msg )
if post_func: post_func(*post_func_args)
class SequentialCameraRunner(_Runner):
def __init__(self,cam_handlers,**kwargs):
_Runner.__init__(self, cam_handlers,**kwargs)
self.wait_duration = kwargs.get("wait_duration", 0.1)
self.check_earliest = False
self.check_latest = False
def get_images(self,n_per_camera,verbose=False):
self._result.clear()
for ch in self.cam_handlers:
self._result[ch.topic_prefix] = []
t_earliest = time.time()
self.clear_queue()
t_latest = t_earliest + (self.ros_latency + self.max_cam_latency)
while not self._is_done(self._result,n_per_camera,verbose=verbose):
try:
topic_prefix, msg = self.im_queue.get(1,10.0) # block, 10 second timeout
except queue.Empty:
continue
t_image = msg.header.stamp.to_sec()
if self.check_latest and t_image > t_latest:
rospy.logwarn("image from %s at t=%f was too slow (by %f)" % (topic_prefix, t_image, t_image - t_latest))
if self.check_earliest and t_image < t_earliest:
rospy.logwarn("image from %s at t=%f was too early (by %f)" % (topic_prefix, t_image, t_earliest - t_image))
continue
self._result[topic_prefix].append( msg )
| en | 0.825659 | #clear the queue so we get a new image with the new settings #sad to use dstack here, IMO res[cam][:,:,i] = imarr #should have worked. # wait 50 msec #clear the queue #wait for the images to arrive # block, 10 second timeout # block, 10 second timeout | 2.062517 | 2 |
examples/hfht/pointnet_classification.py | nixli/hfta | 24 | 5953 | <gh_stars>10-100
import argparse
import logging
import numpy as np
import os
import pandas as pd
import random
import subprocess
from pathlib import Path
from hyperopt import hp
from hyperopt.pyll.stochastic import sample
from hfta.hfht import (tune_hyperparameters, attach_common_args,
rearrange_algorithm_kwargs, handle_integers,
generate_fusible_param_flags, generate_nonfusible_param)
from hfta.workflow import extract_logging_level
from hfta.hfht.utils import fuse_dicts
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
rng_state = np.random.RandomState(seed=args.seed)
fusibles = {
'lr': hp.uniform('lr', 0.0001, 0.01),
'beta1': hp.uniform('beta1', 0.001, 0.999),
'beta2': hp.uniform('beta2', 0.001, 0.999),
'weight_decay': hp.uniform('weight_decay', 0.0, 0.5),
'gamma': hp.uniform('gamma', 0.1, 0.9),
'step_size': hp.choice('step_size', (5, 10, 20, 40)),
}
nonfusibles = {
'batch_size': hp.choice('batch_size', (8, 16, 32)),
'feature_transform': hp.choice('feature_transform', (True, False)),
}
def _run(results_dir, epochs, iters_per_epoch, params, env_vars=None):
# Build the cmd.
cmd = [
'python',
'train_classification.py',
'--epochs',
str(epochs),
'--iters-per-epoch',
str(iters_per_epoch),
'--dataset',
args.dataset,
'--dataset_type',
args.dataset_type,
'--num_points',
str(args.num_points),
'--device',
args.device,
'--eval',
'--seed',
str(args.seed),
'--batch_size',
str(generate_nonfusible_param(params, 'batch_size')),
]
if results_dir is not None:
cmd.extend(['--outf', results_dir])
if generate_nonfusible_param(params, 'feature_transform'):
cmd.append('--feature_transform')
cmd.extend(
generate_fusible_param_flags(
params,
['lr', 'beta1', 'beta2', 'weight_decay', 'gamma', 'step_size'],
))
if args.mode == 'hfta':
cmd.append('--hfta')
if args.amp:
cmd.append('--amp')
# Launch the training process.
succeeded = True
try:
logging.info('--> Running cmd = {}'.format(cmd))
subprocess.run(
cmd,
stdout=subprocess.DEVNULL if results_dir is None else open(
os.path.join(results_dir, 'stdout.txt'),
'w',
),
stderr=subprocess.DEVNULL if results_dir is None else open(
os.path.join(results_dir, 'stderr.txt'),
'w',
),
check=True,
cwd=os.path.join(
os.path.abspath(os.path.expanduser(os.path.dirname(__file__))),
'../pointnet/'),
env=env_vars,
)
except subprocess.CalledProcessError as e:
logging.error(e)
succeeded = False
return succeeded
def try_params(ids, epochs, params, env_vars=None):
""" Running the training process for pointnet classification task.
Args:
ids: Either a single int ID (for serial), or a list of IDs (for HFTA).
epochs: number of epochs to run.
params: maps hyperparameter name to its value(s). For HFTA, the values are
provided as a list.
env_vars: optional, dict(str, str) that includes extra environment that
needs to be forwarded to the subprocess call
Returns:
result(s): A single result dict for serial or a list of result dicts for
HFTA in the same order as ids.
early_stop(s): Whether the training process early stopped. A single bool
for serial or a list of bools for HFTA in the same order as ids.
"""
epochs = int(round(epochs))
ids_str = (','.join([str(i) for i in ids]) if isinstance(
ids,
(list, tuple),
) else str(ids))
# Allocate result dir.
results_dir = os.path.join(args.outdir, ids_str)
Path(results_dir).mkdir(parents=True, exist_ok=True)
# Run training.
succeeded = _run(
results_dir,
epochs,
args.iters_per_epoch,
params,
env_vars=env_vars,
)
if not succeeded:
raise RuntimeError('_run failed!')
# Gather the results.
results_frame = pd.read_csv(os.path.join(results_dir, 'eval.csv'))
if isinstance(ids, (list, tuple)):
results = [{'acc': acc} for acc in results_frame['acc'].tolist()]
assert len(results) == len(ids)
return results, [False] * len(ids)
else:
return {'acc': results_frame['acc'][0]}, False
def dry_run(
B=None,
nonfusibles_kvs=None,
epochs=None,
iters_per_epoch=None,
env_vars=None,
):
params = [{
**handle_integers(sample(fusibles, rng=rng_state)),
**nonfusibles_kvs
} for _ in range(max(B, 1))]
if B > 0:
params = fuse_dicts(params)
else:
params = params[0]
return _run(None, epochs, iters_per_epoch, params, env_vars=env_vars)
tune_hyperparameters(
space={
**fusibles,
**nonfusibles
},
try_params_callback=try_params,
dry_run_callback=dry_run,
mode=args.mode,
algorithm=args.algorithm,
nonfusibles=nonfusibles.keys(),
dry_run_repeats=args.dry_run_repeats,
dry_run_epochs=args.dry_run_epochs,
dry_run_iters_per_epoch=args.dry_run_iters_per_epoch,
metric='acc',
goal='max',
algorithm_configs={
'hyperband': args.hyperband_kwargs,
'random': args.random_kwargs,
},
seed=args.seed,
outdir=args.outdir,
)
def attach_args(parser=argparse.ArgumentParser()):
parser.add_argument(
'--workers',
type=int,
help='number of data loading workers',
default=4,
)
parser.add_argument(
'--iters-per-epoch',
type=int,
default=int(1e9),
help='number of epochs to train for',
)
parser.add_argument('--dataset', type=str, required=True, help="dataset path")
parser.add_argument(
'--dataset-type',
type=str,
default='shapenet',
help="dataset type shapenet|modelnet40",
)
parser.add_argument(
'--num-points',
type=int,
default=2500,
help='num of points for dataset',
)
parser.add_argument(
'--device',
type=str,
default='cuda',
choices=['cpu', 'cuda', 'xla'],
help="the device where this test is running",
)
parser.add_argument(
'--amp',
default=False,
action='store_true',
help='Enable AMP; only used when --device is cuda',
)
parser = attach_common_args(parser)
return parser
if __name__ == '__main__':
args = attach_args().parse_args()
rearrange_algorithm_kwargs(args)
logging.basicConfig(level=extract_logging_level(args))
args.outdir = os.path.abspath(os.path.expanduser(args.outdir))
args.dataset = os.path.abspath(os.path.expanduser(args.dataset))
main(args)
| import argparse
import logging
import numpy as np
import os
import pandas as pd
import random
import subprocess
from pathlib import Path
from hyperopt import hp
from hyperopt.pyll.stochastic import sample
from hfta.hfht import (tune_hyperparameters, attach_common_args,
rearrange_algorithm_kwargs, handle_integers,
generate_fusible_param_flags, generate_nonfusible_param)
from hfta.workflow import extract_logging_level
from hfta.hfht.utils import fuse_dicts
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
rng_state = np.random.RandomState(seed=args.seed)
fusibles = {
'lr': hp.uniform('lr', 0.0001, 0.01),
'beta1': hp.uniform('beta1', 0.001, 0.999),
'beta2': hp.uniform('beta2', 0.001, 0.999),
'weight_decay': hp.uniform('weight_decay', 0.0, 0.5),
'gamma': hp.uniform('gamma', 0.1, 0.9),
'step_size': hp.choice('step_size', (5, 10, 20, 40)),
}
nonfusibles = {
'batch_size': hp.choice('batch_size', (8, 16, 32)),
'feature_transform': hp.choice('feature_transform', (True, False)),
}
def _run(results_dir, epochs, iters_per_epoch, params, env_vars=None):
# Build the cmd.
cmd = [
'python',
'train_classification.py',
'--epochs',
str(epochs),
'--iters-per-epoch',
str(iters_per_epoch),
'--dataset',
args.dataset,
'--dataset_type',
args.dataset_type,
'--num_points',
str(args.num_points),
'--device',
args.device,
'--eval',
'--seed',
str(args.seed),
'--batch_size',
str(generate_nonfusible_param(params, 'batch_size')),
]
if results_dir is not None:
cmd.extend(['--outf', results_dir])
if generate_nonfusible_param(params, 'feature_transform'):
cmd.append('--feature_transform')
cmd.extend(
generate_fusible_param_flags(
params,
['lr', 'beta1', 'beta2', 'weight_decay', 'gamma', 'step_size'],
))
if args.mode == 'hfta':
cmd.append('--hfta')
if args.amp:
cmd.append('--amp')
# Launch the training process.
succeeded = True
try:
logging.info('--> Running cmd = {}'.format(cmd))
subprocess.run(
cmd,
stdout=subprocess.DEVNULL if results_dir is None else open(
os.path.join(results_dir, 'stdout.txt'),
'w',
),
stderr=subprocess.DEVNULL if results_dir is None else open(
os.path.join(results_dir, 'stderr.txt'),
'w',
),
check=True,
cwd=os.path.join(
os.path.abspath(os.path.expanduser(os.path.dirname(__file__))),
'../pointnet/'),
env=env_vars,
)
except subprocess.CalledProcessError as e:
logging.error(e)
succeeded = False
return succeeded
def try_params(ids, epochs, params, env_vars=None):
""" Running the training process for pointnet classification task.
Args:
ids: Either a single int ID (for serial), or a list of IDs (for HFTA).
epochs: number of epochs to run.
params: maps hyperparameter name to its value(s). For HFTA, the values are
provided as a list.
env_vars: optional, dict(str, str) that includes extra environment that
needs to be forwarded to the subprocess call
Returns:
result(s): A single result dict for serial or a list of result dicts for
HFTA in the same order as ids.
early_stop(s): Whether the training process early stopped. A single bool
for serial or a list of bools for HFTA in the same order as ids.
"""
epochs = int(round(epochs))
ids_str = (','.join([str(i) for i in ids]) if isinstance(
ids,
(list, tuple),
) else str(ids))
# Allocate result dir.
results_dir = os.path.join(args.outdir, ids_str)
Path(results_dir).mkdir(parents=True, exist_ok=True)
# Run training.
succeeded = _run(
results_dir,
epochs,
args.iters_per_epoch,
params,
env_vars=env_vars,
)
if not succeeded:
raise RuntimeError('_run failed!')
# Gather the results.
results_frame = pd.read_csv(os.path.join(results_dir, 'eval.csv'))
if isinstance(ids, (list, tuple)):
results = [{'acc': acc} for acc in results_frame['acc'].tolist()]
assert len(results) == len(ids)
return results, [False] * len(ids)
else:
return {'acc': results_frame['acc'][0]}, False
def dry_run(
B=None,
nonfusibles_kvs=None,
epochs=None,
iters_per_epoch=None,
env_vars=None,
):
params = [{
**handle_integers(sample(fusibles, rng=rng_state)),
**nonfusibles_kvs
} for _ in range(max(B, 1))]
if B > 0:
params = fuse_dicts(params)
else:
params = params[0]
return _run(None, epochs, iters_per_epoch, params, env_vars=env_vars)
tune_hyperparameters(
space={
**fusibles,
**nonfusibles
},
try_params_callback=try_params,
dry_run_callback=dry_run,
mode=args.mode,
algorithm=args.algorithm,
nonfusibles=nonfusibles.keys(),
dry_run_repeats=args.dry_run_repeats,
dry_run_epochs=args.dry_run_epochs,
dry_run_iters_per_epoch=args.dry_run_iters_per_epoch,
metric='acc',
goal='max',
algorithm_configs={
'hyperband': args.hyperband_kwargs,
'random': args.random_kwargs,
},
seed=args.seed,
outdir=args.outdir,
)
def attach_args(parser=argparse.ArgumentParser()):
parser.add_argument(
'--workers',
type=int,
help='number of data loading workers',
default=4,
)
parser.add_argument(
'--iters-per-epoch',
type=int,
default=int(1e9),
help='number of epochs to train for',
)
parser.add_argument('--dataset', type=str, required=True, help="dataset path")
parser.add_argument(
'--dataset-type',
type=str,
default='shapenet',
help="dataset type shapenet|modelnet40",
)
parser.add_argument(
'--num-points',
type=int,
default=2500,
help='num of points for dataset',
)
parser.add_argument(
'--device',
type=str,
default='cuda',
choices=['cpu', 'cuda', 'xla'],
help="the device where this test is running",
)
parser.add_argument(
'--amp',
default=False,
action='store_true',
help='Enable AMP; only used when --device is cuda',
)
parser = attach_common_args(parser)
return parser
if __name__ == '__main__':
args = attach_args().parse_args()
rearrange_algorithm_kwargs(args)
logging.basicConfig(level=extract_logging_level(args))
args.outdir = os.path.abspath(os.path.expanduser(args.outdir))
args.dataset = os.path.abspath(os.path.expanduser(args.dataset))
main(args) | en | 0.896387 | # Build the cmd. # Launch the training process. Running the training process for pointnet classification task. Args: ids: Either a single int ID (for serial), or a list of IDs (for HFTA). epochs: number of epochs to run. params: maps hyperparameter name to its value(s). For HFTA, the values are provided as a list. env_vars: optional, dict(str, str) that includes extra environment that needs to be forwarded to the subprocess call Returns: result(s): A single result dict for serial or a list of result dicts for HFTA in the same order as ids. early_stop(s): Whether the training process early stopped. A single bool for serial or a list of bools for HFTA in the same order as ids. # Allocate result dir. # Run training. # Gather the results. | 2.003975 | 2 |
cpdb/trr/migrations/0002_alter_trr_subject_id_type.py | invinst/CPDBv2_backend | 25 | 5954 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-06 04:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trr', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='trr',
name='subject_id',
field=models.PositiveIntegerField(null=True),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-06 04:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trr', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='trr',
name='subject_id',
field=models.PositiveIntegerField(null=True),
),
]
| en | 0.640072 | # -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2018-03-06 04:00 | 1.406311 | 1 |
tests/utils/dut.py | Ostrokrzew/standalone-linux-io-tracer | 24 | 5955 | #
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from core.test_run_utils import TestRun
from utils.installer import install_iotrace, check_if_installed
from utils.iotrace import IotracePlugin
from utils.misc import kill_all_io
from test_tools.fio.fio import Fio
def dut_prepare(reinstall: bool):
if not check_if_installed() or reinstall:
TestRun.LOGGER.info("Installing iotrace:")
install_iotrace()
else:
TestRun.LOGGER.info("iotrace is already installed by previous test")
# Call it after installing iotrace because we need iotrace
# to get valid paths
dut_cleanup()
fio = Fio()
if not fio.is_installed():
TestRun.LOGGER.info("Installing fio")
fio.install()
TestRun.LOGGER.info("Killing all IO")
kill_all_io()
def dut_cleanup():
iotrace: IotracePlugin = TestRun.plugins['iotrace']
TestRun.LOGGER.info("Stopping fuzzing")
TestRun.executor.run(f'{iotrace.working_dir}/standalone-linux-io-tracer/tests/security/fuzzy/fuzz.sh clean')
output = TestRun.executor.run('pgrep iotrace')
if output.stdout != "":
TestRun.executor.run(f'kill -9 {output.stdout}')
TestRun.LOGGER.info("Removing existing traces")
trace_repository_path: str = iotrace.get_trace_repository_path()
TestRun.executor.run_expect_success(f'rm -rf {trace_repository_path}/kernel')
| #
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from core.test_run_utils import TestRun
from utils.installer import install_iotrace, check_if_installed
from utils.iotrace import IotracePlugin
from utils.misc import kill_all_io
from test_tools.fio.fio import Fio
def dut_prepare(reinstall: bool):
if not check_if_installed() or reinstall:
TestRun.LOGGER.info("Installing iotrace:")
install_iotrace()
else:
TestRun.LOGGER.info("iotrace is already installed by previous test")
# Call it after installing iotrace because we need iotrace
# to get valid paths
dut_cleanup()
fio = Fio()
if not fio.is_installed():
TestRun.LOGGER.info("Installing fio")
fio.install()
TestRun.LOGGER.info("Killing all IO")
kill_all_io()
def dut_cleanup():
iotrace: IotracePlugin = TestRun.plugins['iotrace']
TestRun.LOGGER.info("Stopping fuzzing")
TestRun.executor.run(f'{iotrace.working_dir}/standalone-linux-io-tracer/tests/security/fuzzy/fuzz.sh clean')
output = TestRun.executor.run('pgrep iotrace')
if output.stdout != "":
TestRun.executor.run(f'kill -9 {output.stdout}')
TestRun.LOGGER.info("Removing existing traces")
trace_repository_path: str = iotrace.get_trace_repository_path()
TestRun.executor.run_expect_success(f'rm -rf {trace_repository_path}/kernel')
| en | 0.618712 | # # Copyright(c) 2020 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause-Clear # # Call it after installing iotrace because we need iotrace # to get valid paths | 1.810269 | 2 |
game_service.py | Drew8521/MusiQ | 0 | 5956 | from models import Song
from random import choice
def random_song(genre):
results = Song.query().filter(Song.genre==genre).fetch()
print(results)
songs = choice(results)
random_song = {
"title": songs.song,
"album": songs.album,
"artist": songs.artist.lower(),
"genre": genre,
}
return random_song
| from models import Song
from random import choice
def random_song(genre):
results = Song.query().filter(Song.genre==genre).fetch()
print(results)
songs = choice(results)
random_song = {
"title": songs.song,
"album": songs.album,
"artist": songs.artist.lower(),
"genre": genre,
}
return random_song
| none | 1 | 3.144495 | 3 |
|
stdlib/csv/custom_dialect.py | janbodnar/Python-Course | 13 | 5957 | #!/usr/bin/python
# custom_dialect.py
import csv
csv.register_dialect("hashes", delimiter="#")
f = open('items3.csv', 'w')
with f:
writer = csv.writer(f, dialect="hashes")
writer.writerow(("pencils", 2))
writer.writerow(("plates", 1))
writer.writerow(("books", 4))
| #!/usr/bin/python
# custom_dialect.py
import csv
csv.register_dialect("hashes", delimiter="#")
f = open('items3.csv', 'w')
with f:
writer = csv.writer(f, dialect="hashes")
writer.writerow(("pencils", 2))
writer.writerow(("plates", 1))
writer.writerow(("books", 4))
| fr | 0.125269 | #!/usr/bin/python # custom_dialect.py | 3.288357 | 3 |
servicex/web/forms.py | zorache/ServiceX_App | 3 | 5958 | from typing import Optional
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, SubmitField
from wtforms.validators import DataRequired, Length, Email
from servicex.models import UserModel
class ProfileForm(FlaskForm):
name = StringField('Full Name', validators=[DataRequired(), Length(0, 120)])
email = StringField('Email', validators=[DataRequired(), Email()])
institution = StringField('Institution', validators=[DataRequired()])
experiment = SelectField('Experiment', validators=[DataRequired()],
choices=[("ATLAS", "ATLAS"), ("CMS", "CMS")],
default="ATLAS")
submit = SubmitField('Save Profile')
def __init__(self, user: Optional[UserModel] = None):
super().__init__()
if user:
self.name.data = user.name
self.email.data = user.email
self.institution.data = user.institution
self.experiment.data = user.experiment
| from typing import Optional
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, SubmitField
from wtforms.validators import DataRequired, Length, Email
from servicex.models import UserModel
class ProfileForm(FlaskForm):
name = StringField('Full Name', validators=[DataRequired(), Length(0, 120)])
email = StringField('Email', validators=[DataRequired(), Email()])
institution = StringField('Institution', validators=[DataRequired()])
experiment = SelectField('Experiment', validators=[DataRequired()],
choices=[("ATLAS", "ATLAS"), ("CMS", "CMS")],
default="ATLAS")
submit = SubmitField('Save Profile')
def __init__(self, user: Optional[UserModel] = None):
super().__init__()
if user:
self.name.data = user.name
self.email.data = user.email
self.institution.data = user.institution
self.experiment.data = user.experiment
| none | 1 | 2.713077 | 3 |
|
data/studio21_generated/interview/1657/starter_code.py | vijaykumawat256/Prompt-Summarization | 0 | 5959 | <reponame>vijaykumawat256/Prompt-Summarization
def string_func(s, n):
| def string_func(s, n): | none | 1 | 1.370885 | 1 |
|
libs/export_pbs/exportPb.py | linye931025/FPN_Tensorflow-master | 0 | 5960 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import os, sys
import tensorflow as tf
import tf_slim as slim
from tensorflow.python.tools import freeze_graph
sys.path.append('../../')
from data.io.image_preprocess import short_side_resize_for_inference_data
from libs.configs import cfgs
from libs.networks import build_whole_network
CKPT_PATH = '/home/yjr/PycharmProjects/Faster-RCNN_Tensorflow/output/trained_weights/FasterRCNN_20180517/voc_200000model.ckpt'
OUT_DIR = '../../output/Pbs'
PB_NAME = 'FasterRCNN_Res101_Pascal.pb'
def build_detection_graph():
# 1. preprocess img
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3],
name='input_img') # is RGB. not GBR
raw_shape = tf.shape(img_plac)
raw_h, raw_w = tf.to_float(raw_shape[0]), tf.to_float(raw_shape[1])
img_batch = tf.cast(img_plac, tf.float32)
img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,
target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
length_limitation=cfgs.IMG_MAX_LENGTH)
img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0) # [1, None, None, 3]
det_net = build_whole_network.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
detected_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch=None)
xmin, ymin, xmax, ymax = detected_boxes[:, 0], detected_boxes[:, 1], \
detected_boxes[:, 2], detected_boxes[:, 3]
resized_shape = tf.shape(img_batch)
resized_h, resized_w = tf.to_float(resized_shape[1]), tf.to_float(resized_shape[2])
xmin = xmin * raw_w / resized_w
xmax = xmax * raw_w / resized_w
ymin = ymin * raw_h / resized_h
ymax = ymax * raw_h / resized_h
boxes = tf.transpose(tf.stack([xmin, ymin, xmax, ymax]))
dets = tf.concat([tf.reshape(detection_category, [-1, 1]),
tf.reshape(detection_scores, [-1, 1]),
boxes], axis=1, name='DetResults')
return dets
def export_frozenPB():
tf.reset_default_graph()
dets = build_detection_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
print("we have restred the weights from =====>>\n", CKPT_PATH)
saver.restore(sess, CKPT_PATH)
tf.train.write_graph(sess.graph_def, OUT_DIR, PB_NAME)
freeze_graph.freeze_graph(input_graph=os.path.join(OUT_DIR, PB_NAME),
input_saver='',
input_binary=False,
input_checkpoint=CKPT_PATH,
output_node_names="DetResults",
restore_op_name="save/restore_all",
filename_tensor_name='save/Const:0',
output_graph=os.path.join(OUT_DIR, PB_NAME.replace('.pb', '_Frozen.pb')),
clear_devices=False,
initializer_nodes='')
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = ''
export_frozenPB()
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import os, sys
import tensorflow as tf
import tf_slim as slim
from tensorflow.python.tools import freeze_graph
sys.path.append('../../')
from data.io.image_preprocess import short_side_resize_for_inference_data
from libs.configs import cfgs
from libs.networks import build_whole_network
CKPT_PATH = '/home/yjr/PycharmProjects/Faster-RCNN_Tensorflow/output/trained_weights/FasterRCNN_20180517/voc_200000model.ckpt'
OUT_DIR = '../../output/Pbs'
PB_NAME = 'FasterRCNN_Res101_Pascal.pb'
def build_detection_graph():
# 1. preprocess img
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3],
name='input_img') # is RGB. not GBR
raw_shape = tf.shape(img_plac)
raw_h, raw_w = tf.to_float(raw_shape[0]), tf.to_float(raw_shape[1])
img_batch = tf.cast(img_plac, tf.float32)
img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,
target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
length_limitation=cfgs.IMG_MAX_LENGTH)
img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0) # [1, None, None, 3]
det_net = build_whole_network.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
detected_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch=None)
xmin, ymin, xmax, ymax = detected_boxes[:, 0], detected_boxes[:, 1], \
detected_boxes[:, 2], detected_boxes[:, 3]
resized_shape = tf.shape(img_batch)
resized_h, resized_w = tf.to_float(resized_shape[1]), tf.to_float(resized_shape[2])
xmin = xmin * raw_w / resized_w
xmax = xmax * raw_w / resized_w
ymin = ymin * raw_h / resized_h
ymax = ymax * raw_h / resized_h
boxes = tf.transpose(tf.stack([xmin, ymin, xmax, ymax]))
dets = tf.concat([tf.reshape(detection_category, [-1, 1]),
tf.reshape(detection_scores, [-1, 1]),
boxes], axis=1, name='DetResults')
return dets
def export_frozenPB():
tf.reset_default_graph()
dets = build_detection_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
print("we have restred the weights from =====>>\n", CKPT_PATH)
saver.restore(sess, CKPT_PATH)
tf.train.write_graph(sess.graph_def, OUT_DIR, PB_NAME)
freeze_graph.freeze_graph(input_graph=os.path.join(OUT_DIR, PB_NAME),
input_saver='',
input_binary=False,
input_checkpoint=CKPT_PATH,
output_node_names="DetResults",
restore_op_name="save/restore_all",
filename_tensor_name='save/Const:0',
output_graph=os.path.join(OUT_DIR, PB_NAME.replace('.pb', '_Frozen.pb')),
clear_devices=False,
initializer_nodes='')
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = ''
export_frozenPB()
| en | 0.788513 | # -*- coding: utf-8 -*- # 1. preprocess img # is RGB. not GBR # [1, None, None, 3] | 2.163872 | 2 |
ngadnap/command_templates/adapter_removal.py | smilefreak/NaDNAP | 0 | 5961 | <gh_stars>0
"""
Adapter Removal templates
"""
# AdapterRemoval
#
# {0}: executable
# {1}: fastq1 abs
# {2}: fastq2 abs
# {3}: fastq1
# {4}: fastq2
# {5}: minimum length
# {6}: mismatch_rate
# {7}: min base uality
# {8}: min merge_length
__ADAPTER_REMOVAL__="""
{0} --collapse --file1 {1} --file2 {2} --outputstats {3}.stats --trimns --outputcollapsed {3}.collapsed --minlength {5} --output1 {3}.p1 --output2 {4}.p2 --mm {6} --minquality {7} --minalignmentlength {8} --trimqualities
"""
import os
from ngadnap.dependency_graph.graph import CommandNode
def adapter_removal(config, args, fq1 ,fq2):
fq1o = os.path.abspath(fq1)
fq2o = os.path.abspath(fq2)
cmd = __ADAPTER_REMOVAL__.format(config['adapter_removal']['executable'], fq1o, fq2o, fq1, fq2, args.adapt_min_length, args.adapt_mismatch_rate ,args.adapt_min_qual, args.adapt_alignment_length)
job_id = fq1 + ".adapter_removal"
return CommandNode(cmd, job_id, None, args.temp_directory)
| """
Adapter Removal templates
"""
# AdapterRemoval
#
# {0}: executable
# {1}: fastq1 abs
# {2}: fastq2 abs
# {3}: fastq1
# {4}: fastq2
# {5}: minimum length
# {6}: mismatch_rate
# {7}: min base uality
# {8}: min merge_length
__ADAPTER_REMOVAL__="""
{0} --collapse --file1 {1} --file2 {2} --outputstats {3}.stats --trimns --outputcollapsed {3}.collapsed --minlength {5} --output1 {3}.p1 --output2 {4}.p2 --mm {6} --minquality {7} --minalignmentlength {8} --trimqualities
"""
import os
from ngadnap.dependency_graph.graph import CommandNode
def adapter_removal(config, args, fq1 ,fq2):
fq1o = os.path.abspath(fq1)
fq2o = os.path.abspath(fq2)
cmd = __ADAPTER_REMOVAL__.format(config['adapter_removal']['executable'], fq1o, fq2o, fq1, fq2, args.adapt_min_length, args.adapt_mismatch_rate ,args.adapt_min_qual, args.adapt_alignment_length)
job_id = fq1 + ".adapter_removal"
return CommandNode(cmd, job_id, None, args.temp_directory) | en | 0.20981 | Adapter Removal templates # AdapterRemoval # # {0}: executable # {1}: fastq1 abs # {2}: fastq2 abs # {3}: fastq1 # {4}: fastq2 # {5}: minimum length # {6}: mismatch_rate # {7}: min base uality # {8}: min merge_length {0} --collapse --file1 {1} --file2 {2} --outputstats {3}.stats --trimns --outputcollapsed {3}.collapsed --minlength {5} --output1 {3}.p1 --output2 {4}.p2 --mm {6} --minquality {7} --minalignmentlength {8} --trimqualities | 2.241746 | 2 |
undercloud_heat_plugins/immutable_resources.py | AllenJSebastian/tripleo-common | 52 | 5962 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from heat.engine.resources.openstack.neutron import net
from heat.engine.resources.openstack.neutron import port
from heat.engine.resources.openstack.neutron import subnet
def _copy_schema_immutable(schema):
new_schema = copy.deepcopy(schema)
if not schema.update_allowed:
new_schema.immutable = True
return new_schema
class ImmutableNet(net.Net):
'''Ensure an existing net doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in net.Net.properties_schema.items()
}
class ImmutablePort(port.Port):
'''Ensure an existing port doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in port.Port.properties_schema.items()
}
class ImmutableSubnet(subnet.Subnet):
'''Ensure an existing subnet doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in subnet.Subnet.properties_schema.items()
}
def resource_mapping():
return {
'OS::Neutron::Net': ImmutableNet,
'OS::Neutron::Port': ImmutablePort,
'OS::Neutron::Subnet': ImmutableSubnet,
}
| #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from heat.engine.resources.openstack.neutron import net
from heat.engine.resources.openstack.neutron import port
from heat.engine.resources.openstack.neutron import subnet
def _copy_schema_immutable(schema):
new_schema = copy.deepcopy(schema)
if not schema.update_allowed:
new_schema.immutable = True
return new_schema
class ImmutableNet(net.Net):
'''Ensure an existing net doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in net.Net.properties_schema.items()
}
class ImmutablePort(port.Port):
'''Ensure an existing port doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in port.Port.properties_schema.items()
}
class ImmutableSubnet(subnet.Subnet):
'''Ensure an existing subnet doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in subnet.Subnet.properties_schema.items()
}
def resource_mapping():
return {
'OS::Neutron::Net': ImmutableNet,
'OS::Neutron::Port': ImmutablePort,
'OS::Neutron::Subnet': ImmutableSubnet,
}
| en | 0.880129 | # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Ensure an existing net doesn't change. Ensure an existing port doesn't change. Ensure an existing subnet doesn't change. | 1.809633 | 2 |
lm5/input.py | jmcph4/lm5 | 4 | 5963 | class Input(object):
def __init__(self, type, data):
self.__type = type
self.__data = deepcopy(data)
def __repr__(self):
return repr(self.__data)
def __str__(self):
return str(self.__type) + str(self.__data)
| class Input(object):
def __init__(self, type, data):
self.__type = type
self.__data = deepcopy(data)
def __repr__(self):
return repr(self.__data)
def __str__(self):
return str(self.__type) + str(self.__data)
| none | 1 | 3.397468 | 3 |
|
slybot/setup.py | DataKnower/dk-portia | 0 | 5964 | <filename>slybot/setup.py
from os.path import join, abspath, dirname, exists
from slybot import __version__
from setuptools import setup, find_packages
from setuptools.command.bdist_egg import bdist_egg
from setuptools.command.sdist import sdist
def build_js():
root = abspath(dirname(__file__))
base_path = abspath(join(root, '..', 'splash_utils'))
if not exists(base_path):
base_path = abspath(join(root, '..', 'slyd', 'splash_utils'))
files = ('waitAsync.js', 'perform_actions.js')
fdata = []
for fname in files:
with open(join(base_path, fname)) as f:
fdata.append(f.read())
js_file = abspath(join(root, 'slybot', 'splash-script-combined.js'))
with open(js_file, 'w') as f:
f.write(';(function(){\n%s\n})();' % '\n'.join(fdata))
class bdist_egg_command(bdist_egg):
def run(self):
build_js()
bdist_egg.run(self)
class sdist_command(sdist):
def run(self):
build_js()
sdist.run(self)
install_requires = ['Scrapy', 'scrapely', 'loginform', 'lxml', 'jsonschema',
'dateparser', 'scrapyjs', 'page_finder', 'six']
extras = {
'tests': ['nose', 'nose-timer'],
'clustering': ['page_clustering']
}
setup(name='slybot',
version=__version__,
license='BSD',
description='Slybot crawler',
author='Scrapy project',
author_email='<EMAIL>',
url='http://github.com/scrapinghub/portia',
packages=find_packages(exclude=('tests', 'tests.*')),
platforms=['Any'],
scripts=['bin/slybot', 'bin/portiacrawl'],
install_requires=install_requires,
extras_require=extras,
package_data={'': ['slybot/splash-script-combined.js']},
include_package_data=True,
cmdclass={
'bdist_egg': bdist_egg_command,
'sdist': sdist_command
},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
])
| <filename>slybot/setup.py
from os.path import join, abspath, dirname, exists
from slybot import __version__
from setuptools import setup, find_packages
from setuptools.command.bdist_egg import bdist_egg
from setuptools.command.sdist import sdist
def build_js():
root = abspath(dirname(__file__))
base_path = abspath(join(root, '..', 'splash_utils'))
if not exists(base_path):
base_path = abspath(join(root, '..', 'slyd', 'splash_utils'))
files = ('waitAsync.js', 'perform_actions.js')
fdata = []
for fname in files:
with open(join(base_path, fname)) as f:
fdata.append(f.read())
js_file = abspath(join(root, 'slybot', 'splash-script-combined.js'))
with open(js_file, 'w') as f:
f.write(';(function(){\n%s\n})();' % '\n'.join(fdata))
class bdist_egg_command(bdist_egg):
def run(self):
build_js()
bdist_egg.run(self)
class sdist_command(sdist):
def run(self):
build_js()
sdist.run(self)
install_requires = ['Scrapy', 'scrapely', 'loginform', 'lxml', 'jsonschema',
'dateparser', 'scrapyjs', 'page_finder', 'six']
extras = {
'tests': ['nose', 'nose-timer'],
'clustering': ['page_clustering']
}
setup(name='slybot',
version=__version__,
license='BSD',
description='Slybot crawler',
author='Scrapy project',
author_email='<EMAIL>',
url='http://github.com/scrapinghub/portia',
packages=find_packages(exclude=('tests', 'tests.*')),
platforms=['Any'],
scripts=['bin/slybot', 'bin/portiacrawl'],
install_requires=install_requires,
extras_require=extras,
package_data={'': ['slybot/splash-script-combined.js']},
include_package_data=True,
cmdclass={
'bdist_egg': bdist_egg_command,
'sdist': sdist_command
},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
])
| none | 1 | 2.039829 | 2 |
|
yolov3.py | huhuhang/yolov3 | 35 | 5965 | <filename>yolov3.py
import torch
import torch.nn as nn
from .yolo_layer import *
from .yolov3_base import *
class Yolov3(Yolov3Base):
def __init__(self, num_classes=80):
super().__init__()
self.backbone = Darknet([1,2,8,8,4])
anchors_per_region = 3
self.yolo_0_pre = Yolov3UpsamplePrep([512, 1024], 1024, anchors_per_region*(5+num_classes))
self.yolo_0 = YoloLayer(anchors=[(116., 90.), (156., 198.), (373., 326.)], stride=32, num_classes=num_classes)
self.yolo_1_c = ConvBN(512, 256, 1)
self.yolo_1_prep = Yolov3UpsamplePrep([256, 512], 512+256, anchors_per_region*(5+num_classes))
self.yolo_1 = YoloLayer(anchors=[(30., 61.), (62., 45.), (59., 119.)], stride=16, num_classes=num_classes)
self.yolo_2_c = ConvBN(256, 128, 1)
self.yolo_2_prep = Yolov3UpsamplePrep([128, 256], 256+128, anchors_per_region*(5+num_classes))
self.yolo_2 = YoloLayer(anchors=[(10., 13.), (16., 30.), (33., 23.)], stride=8, num_classes=num_classes)
def get_loss_layers(self):
return [self.yolo_0, self.yolo_1, self.yolo_2]
def forward_yolo(self, xb):
x, y0 = self.yolo_0_pre(xb[-1])
x = self.yolo_1_c(x)
x = nn.Upsample(scale_factor=2, mode='nearest')(x)
x = torch.cat([x, xb[-2]], 1)
x, y1 = self.yolo_1_prep(x)
x = self.yolo_2_c(x)
x = nn.Upsample(scale_factor=2, mode='nearest')(x)
x = torch.cat([x, xb[-3]], 1)
x, y2 = self.yolo_2_prep(x)
return [y0, y1, y2]
###################################################################
## Backbone and helper modules
class DarknetBlock(nn.Module):
def __init__(self, ch_in):
super().__init__()
ch_hid = ch_in//2
self.conv1 = ConvBN(ch_in, ch_hid, kernel_size=1, stride=1, padding=0)
self.conv2 = ConvBN(ch_hid, ch_in, kernel_size=3, stride=1, padding=1)
def forward(self, x): return self.conv2(self.conv1(x)) + x
class Darknet(nn.Module):
def __init__(self, num_blocks, start_nf=32):
super().__init__()
nf = start_nf
self.base = ConvBN(3, nf, kernel_size=3, stride=1) #, padding=1)
self.layers = []
for i, nb in enumerate(num_blocks):
# dn_layer = make_group_layer(nf, nb, stride=(1 if i==-1 else 2))
dn_layer = self.make_group_layer(nf, nb, stride=2)
self.add_module(f"darknet_{i}", dn_layer)
self.layers.append(dn_layer)
nf *= 2
def make_group_layer(self, ch_in, num_blocks, stride=2):
layers = [ConvBN(ch_in, ch_in*2, stride=stride)]
for i in range(num_blocks): layers.append(DarknetBlock(ch_in*2))
return nn.Sequential(*layers)
def forward(self, x):
y = [self.base(x)]
for l in self.layers:
y.append(l(y[-1]))
return y
class Yolov3UpsamplePrep(nn.Module):
def __init__(self, filters_list, in_filters, out_filters):
super().__init__()
self.branch = nn.ModuleList([
ConvBN(in_filters, filters_list[0], 1),
ConvBN(filters_list[0], filters_list[1], kernel_size=3),
ConvBN(filters_list[1], filters_list[0], kernel_size=1),
ConvBN(filters_list[0], filters_list[1], kernel_size=3),
ConvBN(filters_list[1], filters_list[0], kernel_size=1),])
self.for_yolo = nn.ModuleList([
ConvBN(filters_list[0], filters_list[1], kernel_size=3),
nn.Conv2d(filters_list[1], out_filters, kernel_size=1, stride=1,
padding=0, bias=True)])
def forward(self, x):
for m in self.branch: x = m(x)
branch_out = x
for m in self.for_yolo: x = m(x)
return branch_out, x
| <filename>yolov3.py
import torch
import torch.nn as nn
from .yolo_layer import *
from .yolov3_base import *
class Yolov3(Yolov3Base):
def __init__(self, num_classes=80):
super().__init__()
self.backbone = Darknet([1,2,8,8,4])
anchors_per_region = 3
self.yolo_0_pre = Yolov3UpsamplePrep([512, 1024], 1024, anchors_per_region*(5+num_classes))
self.yolo_0 = YoloLayer(anchors=[(116., 90.), (156., 198.), (373., 326.)], stride=32, num_classes=num_classes)
self.yolo_1_c = ConvBN(512, 256, 1)
self.yolo_1_prep = Yolov3UpsamplePrep([256, 512], 512+256, anchors_per_region*(5+num_classes))
self.yolo_1 = YoloLayer(anchors=[(30., 61.), (62., 45.), (59., 119.)], stride=16, num_classes=num_classes)
self.yolo_2_c = ConvBN(256, 128, 1)
self.yolo_2_prep = Yolov3UpsamplePrep([128, 256], 256+128, anchors_per_region*(5+num_classes))
self.yolo_2 = YoloLayer(anchors=[(10., 13.), (16., 30.), (33., 23.)], stride=8, num_classes=num_classes)
def get_loss_layers(self):
return [self.yolo_0, self.yolo_1, self.yolo_2]
def forward_yolo(self, xb):
x, y0 = self.yolo_0_pre(xb[-1])
x = self.yolo_1_c(x)
x = nn.Upsample(scale_factor=2, mode='nearest')(x)
x = torch.cat([x, xb[-2]], 1)
x, y1 = self.yolo_1_prep(x)
x = self.yolo_2_c(x)
x = nn.Upsample(scale_factor=2, mode='nearest')(x)
x = torch.cat([x, xb[-3]], 1)
x, y2 = self.yolo_2_prep(x)
return [y0, y1, y2]
###################################################################
## Backbone and helper modules
class DarknetBlock(nn.Module):
def __init__(self, ch_in):
super().__init__()
ch_hid = ch_in//2
self.conv1 = ConvBN(ch_in, ch_hid, kernel_size=1, stride=1, padding=0)
self.conv2 = ConvBN(ch_hid, ch_in, kernel_size=3, stride=1, padding=1)
def forward(self, x): return self.conv2(self.conv1(x)) + x
class Darknet(nn.Module):
def __init__(self, num_blocks, start_nf=32):
super().__init__()
nf = start_nf
self.base = ConvBN(3, nf, kernel_size=3, stride=1) #, padding=1)
self.layers = []
for i, nb in enumerate(num_blocks):
# dn_layer = make_group_layer(nf, nb, stride=(1 if i==-1 else 2))
dn_layer = self.make_group_layer(nf, nb, stride=2)
self.add_module(f"darknet_{i}", dn_layer)
self.layers.append(dn_layer)
nf *= 2
def make_group_layer(self, ch_in, num_blocks, stride=2):
layers = [ConvBN(ch_in, ch_in*2, stride=stride)]
for i in range(num_blocks): layers.append(DarknetBlock(ch_in*2))
return nn.Sequential(*layers)
def forward(self, x):
y = [self.base(x)]
for l in self.layers:
y.append(l(y[-1]))
return y
class Yolov3UpsamplePrep(nn.Module):
def __init__(self, filters_list, in_filters, out_filters):
super().__init__()
self.branch = nn.ModuleList([
ConvBN(in_filters, filters_list[0], 1),
ConvBN(filters_list[0], filters_list[1], kernel_size=3),
ConvBN(filters_list[1], filters_list[0], kernel_size=1),
ConvBN(filters_list[0], filters_list[1], kernel_size=3),
ConvBN(filters_list[1], filters_list[0], kernel_size=1),])
self.for_yolo = nn.ModuleList([
ConvBN(filters_list[0], filters_list[1], kernel_size=3),
nn.Conv2d(filters_list[1], out_filters, kernel_size=1, stride=1,
padding=0, bias=True)])
def forward(self, x):
for m in self.branch: x = m(x)
branch_out = x
for m in self.for_yolo: x = m(x)
return branch_out, x
| de | 0.444692 | ################################################################### ## Backbone and helper modules #, padding=1) # dn_layer = make_group_layer(nf, nb, stride=(1 if i==-1 else 2)) | 2.322464 | 2 |
tests/assets/test_driver_errors.py | CyrilLeMat/modelkit | 0 | 5966 | import os
import pytest
from modelkit.assets import errors
from tests.conftest import skip_unless
def _perform_driver_error_object_not_found(driver):
with pytest.raises(errors.ObjectDoesNotExistError):
driver.download_object("someasset", "somedestination")
assert not os.path.isfile("somedestination")
def test_local_driver(local_assetsmanager):
local_driver = local_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(local_driver)
@skip_unless("ENABLE_GCS_TEST", "True")
def test_gcs_driver(gcs_assetsmanager):
gcs_driver = gcs_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(gcs_driver)
@skip_unless("ENABLE_S3_TEST", "True")
def test_s3_driver(s3_assetsmanager):
s3_driver = s3_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(s3_driver)
| import os
import pytest
from modelkit.assets import errors
from tests.conftest import skip_unless
def _perform_driver_error_object_not_found(driver):
with pytest.raises(errors.ObjectDoesNotExistError):
driver.download_object("someasset", "somedestination")
assert not os.path.isfile("somedestination")
def test_local_driver(local_assetsmanager):
local_driver = local_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(local_driver)
@skip_unless("ENABLE_GCS_TEST", "True")
def test_gcs_driver(gcs_assetsmanager):
gcs_driver = gcs_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(gcs_driver)
@skip_unless("ENABLE_S3_TEST", "True")
def test_s3_driver(s3_assetsmanager):
s3_driver = s3_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(s3_driver)
| none | 1 | 2.350005 | 2 |
|
wiki/tests.py | Jarquevious/makewiki | 0 | 5967 | from django.test import TestCase
from django.contrib.auth.models import User
from wiki.models import Page
# Create your tests here.
def test_detail_page(self):
""" Test to see if slug generated when saving a Page."""
# Create a user and save to the database
user = User.objects.create()
user.save()
# Create a page and save to the database
page = Page(title="My Detail Test Page", content="details_test", author=user)
page.save()
# Slug is generated matches with what we expect
slug = page.slug
response = self.client.get(f'/{slug}/')
self.assertEqual(response.status_code, 200)
info = self.client.get('/')
self.assertContains(info, 'makewiki', html=True)
def test_edit_page(self):
"""Test edit page."""
# Test data that will be displayed on the screen
user = User.objects.create()
user.save()
page = Page.objects.create(title="My Test Page", content="edit_test", author=user)
page.save()
# Make a GET request to the MakeWiki homepage that will get a response back
post_data = {
'title': 'Who',
'content': 'Are you?',
'author': user.id,
}
response = self.client.post('/form/', data=post_data)
# Check if response is 200
self.assertEqual(response.status_code, 200)
# Check the number of pages passed to the template matches the number of pages in the database
end = self.client.get('/')
result = end.context['pages']
self.assertQuerysetEqual(result, ['<Page: My Test Page>', '<Page: Test>'], ordered=False)
def test_page_creation(self):
# Create user object and save it
user = User.objects.create()
user.save()
# Create a page
page = Page.objects.create(title="The Test Page", content="edit_test", author=user)
page.save()
post_data = {
'title': 'COVID19',
'content': 'Mass Testing is Underway',
'author': user.id
}
response = self.client.post('/form/', data = post_data)
self.assertEqual(response.status_code, 302)
page_object = Page.objects.get(title='COVID19')
self.assertEqual(page_object.content, 'Mass Testing is Underway') | from django.test import TestCase
from django.contrib.auth.models import User
from wiki.models import Page
# Create your tests here.
def test_detail_page(self):
""" Test to see if slug generated when saving a Page."""
# Create a user and save to the database
user = User.objects.create()
user.save()
# Create a page and save to the database
page = Page(title="My Detail Test Page", content="details_test", author=user)
page.save()
# Slug is generated matches with what we expect
slug = page.slug
response = self.client.get(f'/{slug}/')
self.assertEqual(response.status_code, 200)
info = self.client.get('/')
self.assertContains(info, 'makewiki', html=True)
def test_edit_page(self):
"""Test edit page."""
# Test data that will be displayed on the screen
user = User.objects.create()
user.save()
page = Page.objects.create(title="My Test Page", content="edit_test", author=user)
page.save()
# Make a GET request to the MakeWiki homepage that will get a response back
post_data = {
'title': 'Who',
'content': 'Are you?',
'author': user.id,
}
response = self.client.post('/form/', data=post_data)
# Check if response is 200
self.assertEqual(response.status_code, 200)
# Check the number of pages passed to the template matches the number of pages in the database
end = self.client.get('/')
result = end.context['pages']
self.assertQuerysetEqual(result, ['<Page: My Test Page>', '<Page: Test>'], ordered=False)
def test_page_creation(self):
# Create user object and save it
user = User.objects.create()
user.save()
# Create a page
page = Page.objects.create(title="The Test Page", content="edit_test", author=user)
page.save()
post_data = {
'title': 'COVID19',
'content': 'Mass Testing is Underway',
'author': user.id
}
response = self.client.post('/form/', data = post_data)
self.assertEqual(response.status_code, 302)
page_object = Page.objects.get(title='COVID19')
self.assertEqual(page_object.content, 'Mass Testing is Underway') | en | 0.795874 | # Create your tests here. Test to see if slug generated when saving a Page. # Create a user and save to the database # Create a page and save to the database # Slug is generated matches with what we expect Test edit page. # Test data that will be displayed on the screen # Make a GET request to the MakeWiki homepage that will get a response back # Check if response is 200 # Check the number of pages passed to the template matches the number of pages in the database # Create user object and save it # Create a page | 2.711636 | 3 |
BanditSim/__init__.py | AJB0211/BanditSim | 0 | 5968 | <reponame>AJB0211/BanditSim
from .multiarmedbandit import MultiArmedBandit
from .eps_greedy_constant_stepsize import EpsilonGreedyConstantStepsize
from .greedy_constant_stepsize import GreedyConstantStepsize
from .epsilon_greedy_average_step import EpsilonGreedyAverageStep
from .greedy_average_step import GreedyAverageStep
from .greedy_bayes_update import GreedyBayesianUpdate
from .eps_greedy_bayes_update import EpsilonGreedyBayesianUpdate
| from .multiarmedbandit import MultiArmedBandit
from .eps_greedy_constant_stepsize import EpsilonGreedyConstantStepsize
from .greedy_constant_stepsize import GreedyConstantStepsize
from .epsilon_greedy_average_step import EpsilonGreedyAverageStep
from .greedy_average_step import GreedyAverageStep
from .greedy_bayes_update import GreedyBayesianUpdate
from .eps_greedy_bayes_update import EpsilonGreedyBayesianUpdate | none | 1 | 1.028919 | 1 |
|
tests/queries/test_query.py | txf626/django | 2 | 5969 | from datetime import datetime
from django.core.exceptions import FieldError
from django.db.models import CharField, F, Q
from django.db.models.expressions import SimpleCol
from django.db.models.fields.related_lookups import RelatedIsNull
from django.db.models.functions import Lower
from django.db.models.lookups import Exact, GreaterThan, IsNull, LessThan
from django.db.models.sql.query import Query
from django.db.models.sql.where import OR
from django.test import TestCase
from django.test.utils import register_lookup
from .models import Author, Item, ObjectC, Ranking
class TestQuery(TestCase):
def test_simple_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
def test_complex_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2) | Q(num__lt=0))
self.assertEqual(where.connector, OR)
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
lookup = where.children[1]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.rhs, 0)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
def test_multiple_fields(self):
query = Query(Item)
where = query.build_where(Q(modified__gt=F('created')))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertIsInstance(lookup.rhs, SimpleCol)
self.assertIsInstance(lookup.lhs, SimpleCol)
self.assertEqual(lookup.rhs.target, Item._meta.get_field('created'))
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
def test_transform(self):
query = Query(Author)
with register_lookup(CharField, Lower):
where = query.build_where(~Q(name__lower='foo'))
lookup = where.children[0]
self.assertIsInstance(lookup, Exact)
self.assertIsInstance(lookup.lhs, Lower)
self.assertIsInstance(lookup.lhs.lhs, SimpleCol)
self.assertEqual(lookup.lhs.lhs.target, Author._meta.get_field('name'))
def test_negated_nullable(self):
query = Query(Item)
where = query.build_where(~Q(modified__lt=datetime(2017, 1, 1)))
self.assertTrue(where.negated)
lookup = where.children[0]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
lookup = where.children[1]
self.assertIsInstance(lookup, IsNull)
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
def test_foreign_key(self):
query = Query(Item)
msg = 'Joined field references are not permitted in this query'
with self.assertRaisesMessage(FieldError, msg):
query.build_where(Q(creator__num__gt=2))
def test_foreign_key_f(self):
query = Query(Ranking)
with self.assertRaises(FieldError):
query.build_where(Q(rank__gt=F('author__num')))
def test_foreign_key_exclusive(self):
query = Query(ObjectC)
where = query.build_where(Q(objecta=None) | Q(objectb=None))
a_isnull = where.children[0]
self.assertIsInstance(a_isnull, RelatedIsNull)
self.assertIsInstance(a_isnull.lhs, SimpleCol)
self.assertEqual(a_isnull.lhs.target, ObjectC._meta.get_field('objecta'))
b_isnull = where.children[1]
self.assertIsInstance(b_isnull, RelatedIsNull)
self.assertIsInstance(b_isnull.lhs, SimpleCol)
self.assertEqual(b_isnull.lhs.target, ObjectC._meta.get_field('objectb'))
| from datetime import datetime
from django.core.exceptions import FieldError
from django.db.models import CharField, F, Q
from django.db.models.expressions import SimpleCol
from django.db.models.fields.related_lookups import RelatedIsNull
from django.db.models.functions import Lower
from django.db.models.lookups import Exact, GreaterThan, IsNull, LessThan
from django.db.models.sql.query import Query
from django.db.models.sql.where import OR
from django.test import TestCase
from django.test.utils import register_lookup
from .models import Author, Item, ObjectC, Ranking
class TestQuery(TestCase):
def test_simple_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
def test_complex_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2) | Q(num__lt=0))
self.assertEqual(where.connector, OR)
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
lookup = where.children[1]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.rhs, 0)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
def test_multiple_fields(self):
query = Query(Item)
where = query.build_where(Q(modified__gt=F('created')))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertIsInstance(lookup.rhs, SimpleCol)
self.assertIsInstance(lookup.lhs, SimpleCol)
self.assertEqual(lookup.rhs.target, Item._meta.get_field('created'))
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
def test_transform(self):
query = Query(Author)
with register_lookup(CharField, Lower):
where = query.build_where(~Q(name__lower='foo'))
lookup = where.children[0]
self.assertIsInstance(lookup, Exact)
self.assertIsInstance(lookup.lhs, Lower)
self.assertIsInstance(lookup.lhs.lhs, SimpleCol)
self.assertEqual(lookup.lhs.lhs.target, Author._meta.get_field('name'))
def test_negated_nullable(self):
query = Query(Item)
where = query.build_where(~Q(modified__lt=datetime(2017, 1, 1)))
self.assertTrue(where.negated)
lookup = where.children[0]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
lookup = where.children[1]
self.assertIsInstance(lookup, IsNull)
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
def test_foreign_key(self):
query = Query(Item)
msg = 'Joined field references are not permitted in this query'
with self.assertRaisesMessage(FieldError, msg):
query.build_where(Q(creator__num__gt=2))
def test_foreign_key_f(self):
query = Query(Ranking)
with self.assertRaises(FieldError):
query.build_where(Q(rank__gt=F('author__num')))
def test_foreign_key_exclusive(self):
query = Query(ObjectC)
where = query.build_where(Q(objecta=None) | Q(objectb=None))
a_isnull = where.children[0]
self.assertIsInstance(a_isnull, RelatedIsNull)
self.assertIsInstance(a_isnull.lhs, SimpleCol)
self.assertEqual(a_isnull.lhs.target, ObjectC._meta.get_field('objecta'))
b_isnull = where.children[1]
self.assertIsInstance(b_isnull, RelatedIsNull)
self.assertIsInstance(b_isnull.lhs, SimpleCol)
self.assertEqual(b_isnull.lhs.target, ObjectC._meta.get_field('objectb'))
| none | 1 | 2.172581 | 2 |
|
src/matrix_game/matrix_game.py | ewanlee/mackrl | 26 | 5970 | # This notebook implements a proof-of-principle for
# Multi-Agent Common Knowledge Reinforcement Learning (MACKRL)
# The entire notebook can be executed online, no need to download anything
# http://pytorch.org/
from itertools import chain
import torch
import torch.nn.functional as F
from torch.multiprocessing import Pool, set_start_method, freeze_support
try:
set_start_method('spawn')
except RuntimeError:
pass
from torch.nn import init
from torch.optim import Adam, SGD
import numpy as np
import matplotlib.pyplot as plt
use_cuda = False
payoff_values = []
payoff_values.append(torch.tensor([ # payoff values
[5, 0, 0, 2, 0],
[0, 1, 2, 4, 2],
[0, 0, 0, 2, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
], dtype=torch.float32) * 0.2)
payoff_values.append(
torch.tensor([ # payoff values
[0, 0, 1, 0, 5],
[0, 0, 2, 0, 0],
[1, 2, 4, 2, 1],
[0, 0, 2, 0, 0],
[0, 0, 1, 0, 0],
], dtype=torch.float32) * 0.2)
n_agents = 2
n_actions = len(payoff_values[0])
n_states_dec = 5
n_states_joint = 3
n_mix_hidden = 3
p_observation = 0.5
p_ck_noise = [0.0]
# Number of gradient steps
t_max = 202
# We'll be using a high learning rate, since we have exact gradients
lr = 0.05 # DEBUG: 0.05 if exact gradients!
optim = 'adam'
# You can reduce this number if you are short on time. (Eg. n_trials = 20)
#n_trials = 100 # 30
n_trials = 20 #15 #100
std_val = 1.0
# These are the 3 settings we run: MACRKL, Joint-action-learner (always uses CK),
# Independent Actor-Critic (always uses decentralised actions selection)
labels = ["IAC", "JAL"]
p_vec = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
final_res = []
# # Pair-Controller with 3 input state (no CK, CK & Matrix ID = 0, CK & Matrix ID = 1), n_actions^2 actions for
# # joint action + 1 action for delegation to the independent agents.
# theta_joint = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True), std=0.1)
# Produce marginalised policy: pi_pc[0] * pi^a * pi^b + p(u^ab)
def p_joint_all(pi_pc, pi_dec):
p_joint = pi_pc[1:].view(n_actions, n_actions).clone()
pi_a_pi_b = torch.ger(pi_dec[0], pi_dec[1])
p_joint = pi_pc[0] * pi_a_pi_b + p_joint
return p_joint
def p_joint_all_noise_alt(pi_pcs, pi_dec, p_ck_noise, ck_state):
p_none = (1-p_ck_noise) ** 2 # both unnoised
p_both = (p_ck_noise) ** 2 # both noised
p_one = (1-p_ck_noise) * p_ck_noise # exactly one noised
p_marg_ag0_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone().sum(dim=0)
p_marg_ag0_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone().sum(dim=0)
p_marg_ag1_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone().sum(dim=1)
p_marg_ag1_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone().sum(dim=1)
p_joint_ck0 = pi_pcs[0][1:].view(n_actions, n_actions).clone()
p_joint_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone()
p_joint_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone()
p_d_ck0 = pi_pcs[0][0]
p_d_ck1 = pi_pcs[1][0]
p_d_ck2 = pi_pcs[2][0]
def make_joint(p1, p2, mode="interval"):
"""
1. Pick uniform random variable between [0,1]
2. Do multinomial sampling through contiguous, ordered bucketing for both p1, p2
"""
p1 = p1.clone().view(-1)
p2 = p2.clone().view(-1)
p_final = p1.clone().zero_()
if mode == "interval":
for i in range(p1.shape[0]):
# calculate overlap between the probability distributions
low1 = torch.sum(p1[:i])
high1 = low1 + p1[i]
low2 = torch.sum(p2[:i])
high2 = low2 + p2[i]
if low1 >= low2 and high2 > low1:
p_final[i] = torch.min(high1, high2) - low1
pass
elif low2 >= low1 and high1 > low2:
p_final[i] = torch.min(high1, high2) - low2
else:
p_final[i] = 0
return p_final.clone().view(n_actions, n_actions)
if ck_state == 0:
p_joint = p_joint_ck0 + p_d_ck0 * torch.ger(pi_dec[0], pi_dec[1])
return p_joint # always delegate
elif ck_state == 1:
p_joint = p_none * p_joint_ck1 + \
p_both * p_joint_ck2 + \
p_one * make_joint(p_joint_ck1, p_joint_ck2) + \
p_one * make_joint(p_joint_ck2, p_joint_ck1) + \
(p_one * p_d_ck1 * p_d_ck2
+ p_one * p_d_ck2 * p_d_ck1
+ p_both * p_d_ck2
+ p_none * p_d_ck1) * torch.ger(pi_dec[0], pi_dec[1]) \
+ p_one * p_d_ck1 * (1 - p_d_ck2) * torch.ger(pi_dec[0], p_marg_ag1_ck2) \
+ p_one * (1 - p_d_ck2) * p_d_ck1 * torch.ger(p_marg_ag0_ck2, pi_dec[1]) \
+ p_one * p_d_ck2 * (1 - p_d_ck1) * torch.ger(pi_dec[0], p_marg_ag1_ck1) \
+ p_one * (1 - p_d_ck1) * p_d_ck2 * torch.ger(p_marg_ag0_ck1, pi_dec[1])
return p_joint
elif ck_state == 2:
p_joint = p_none * p_joint_ck2 + \
p_both * p_joint_ck1 + \
p_one * make_joint(p_joint_ck2, p_joint_ck1) + \
p_one * make_joint(p_joint_ck1, p_joint_ck2) + \
(p_one * p_d_ck2 * p_d_ck1
+ p_one * p_d_ck1 * p_d_ck2
+ p_both * p_d_ck1
+ p_none * p_d_ck2) * torch.ger(pi_dec[0], pi_dec[1]) \
+ p_one * p_d_ck2 * (1 - p_d_ck1) * torch.ger(pi_dec[0], p_marg_ag1_ck1) \
+ p_one * (1 - p_d_ck1) * p_d_ck2 * torch.ger(p_marg_ag0_ck1, pi_dec[1]) \
+ p_one * p_d_ck1 * (1 - p_d_ck2) * torch.ger(pi_dec[0], p_marg_ag1_ck2) \
+ p_one * (1 - p_d_ck2) * p_d_ck1 * torch.ger(p_marg_ag0_ck2, pi_dec[1])
return p_joint
pass
def get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint, p_ck_noise=0):
if test:
beta = 100
else:
beta = 1
actions = []
pi_dec = []
# common_knowledge decides whether ck_state is informative
if common_knowledge == 0:
ck_state = 0
else:
ck_state = int(observations[0] + 1)
if p_ck_noise == 0:
pol_vals = theta_joint[ck_state, :].clone()
# logits get masked out for independent learner and joint-action-learner
# independent learner has a pair controller that always delegates
if run == 'JAL':
pol_vals[0] = -10 ** 10
elif run == 'IAC':
pol_vals[1:] = -10 ** 10
# apply temperature to set testing
pi_pc = F.softmax(pol_vals * beta, -1)
# calcuate decentralised policies
for i in range(n_agents):
dec_state = int(observations[i])
pi = F.softmax(thetas_dec[i][dec_state] * beta, -1)
pi_dec.append(pi)
return pi_pc, pi_dec
else:
pol_vals = theta_joint.clone()
pi_pcs = []
for i in range(n_states_joint):
if run == 'JAL':
pol_vals[i][0] = -10 ** 10
elif run == 'IAC':
pol_vals[i][1:] = -10 ** 10
# apply temperature to set testing
pi_pcs.append(F.softmax(pol_vals[i] * beta, -1))
# calcuate decentralised policies
for i in range(n_agents):
dec_state = int(observations[i])
pi = F.softmax(thetas_dec[i][dec_state] * beta, -1)
pi_dec.append(pi)
return pi_pcs, pi_dec, ck_state
def get_state(common_knowledge, obs_0, obs_1, matrix_id):
receives_obs = [obs_0, obs_1]
if common_knowledge == 1:
observations = np.repeat(matrix_id, 2)
else:
observations = np.ones((n_agents)) * 2 #
for ag in range(n_agents):
if receives_obs[ag]:
observations[ag] += matrix_id + 1
return common_knowledge, observations, matrix_id
# Calculate the expected return: sum_{\tau} P(\tau | pi) R(\tau)
def expected_return(p_common, p_observation, thetas, run, test, p_ck_noise=0):
thetas_dec = thetas["dec"]
theta_joint = thetas["joint"]
# Probability of CK
p_common_val = [1 - p_common, p_common]
# Probability of observation given no CK)
p_obs_val = [1 - p_observation, p_observation]
# Matrices are chosen 50 / 50
p_matrix = [0.5, 0.5]
# p_matrix = [1.0, 0.0] # DEBUG!
# Initialise expected return
ret_val = 0
for ck in [0, 1]:
for matrix_id in [0, 1]:
for obs_0 in [0, 1]:
for obs_1 in [0, 1]:
p_state = p_common_val[ck] * p_obs_val[obs_0] * p_obs_val[obs_1] * p_matrix[matrix_id]
common_knowledge, observations, matrix_id = get_state(ck, obs_0, obs_1, matrix_id)
# Get final probabilities for joint actions
if p_ck_noise==0:
pi_pc, pi_dec = get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint)
p_joint_val = p_joint_all(pi_pc, pi_dec)
else:
pol_vals, pi_dec, ck_state = get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint, p_ck_noise)
p_joint_val = p_joint_all_noise_alt(pol_vals, pi_dec, p_ck_noise, ck_state)
# Expected return is just the elementwise product of rewards and action probabilities
expected_ret = (p_joint_val * payoff_values[matrix_id]).sum()
# Add return from given state
ret_val = ret_val + p_state * expected_ret
return ret_val
def _proc(args):
p_common, p_observation, run, p_ck_noise, t_max, n_trials = args
results = []
for nt in range(n_trials):
print("Run: {} P_CK_NOISE: {} P_common: {} #Trial: {}".format(run, p_ck_noise, p_common, nt))
results_log = np.zeros((t_max // (t_max // 100),))
results_log_test = np.zeros((t_max // (t_max // 100),))
thetas = {}
thetas["dec"] = [init.normal_(torch.zeros(n_states_dec, n_actions, requires_grad=True), std=std_val) for i in
range(n_agents)]
thetas["joint"] = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True),
std=std_val)
params = chain(*[_v if isinstance(_v, (list, tuple)) else [_v] for _v in thetas.values()])
params = list(params)
if use_cuda:
for param in params:
param = param.to("cuda")
if optim == 'sgd':
optimizer = SGD(params, lr=lr)
else:
optimizer = Adam(params, lr=lr)
for i in range(t_max):
if run in ['MACKRL',
'JAL',
'IAC']:
loss = - expected_return(p_common, p_observation, thetas, run, False, p_ck_noise)
r_s = -loss.data.numpy()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % (t_max // 100) == 0:
if run in ['MACKRL',
'JAL',
'IAC']:
r_test = expected_return(p_common, p_observation, thetas, run, True, p_ck_noise)
results_log_test[i // (t_max // 100)] = r_test
results_log[i // (t_max // 100)] = r_s
results.append((results_log_test, results_log))
return results
def main():
use_mp = True
if use_mp:
pool = Pool(processes=2)
# Well be appending results to these lists
run_results = []
for run in labels:
noise_results = []
for pnoise in p_ck_noise:
print("Run: {} P_CK_NOISE: {}".format(run, pnoise))
results = pool.map(_proc, [ (pc, p_observation, run, pnoise, t_max, n_trials) for pc in p_vec ])
noise_results.append(results)
run_results.append(noise_results)
for p_common_id, p_common in enumerate(p_vec):
all_res = []
all_res_test = []
for run_id, run in enumerate(labels):
for pnoise_id, pnoise in enumerate(p_ck_noise):
try:
results = run_results[run_id][pnoise_id][p_common_id]
except Exception as e:
pass
all_res_test.append(np.stack([r[0] for r in results], axis=1))
all_res.append(np.stack([r[1] for r in results], axis=1))
final_res.append([all_res_test, all_res])
pool.close()
pool.join()
else:
# Well be appending results to these lists
run_results = []
for run in labels:
noise_results = []
for pnoise in p_ck_noise:
print("Run: {} P_CK_NOISE: {}".format(run, pnoise))
results = [_proc((pc, p_observation, run, pnoise, t_max, n_trials)) for pc in p_vec ]
noise_results.append(results)
run_results.append(noise_results)
for p_common_id, p_common in enumerate(p_vec):
all_res = []
all_res_test = []
for run_id, run in enumerate(labels):
for pnoise_id, pnoise in enumerate(p_ck_noise):
try:
results = run_results[run_id][pnoise_id][p_common_id]
except Exception as e:
pass
all_res_test.append(np.stack([r[0] for r in results], axis=1))
all_res.append(np.stack([r[1] for r in results], axis=1))
final_res.append([all_res_test, all_res])
import pickle
import uuid
import os
res_dict = {}
res_dict["final_res"] = final_res
res_dict["labels"] = labels
res_dict["p_ck_noise"] = p_ck_noise
res_dict["p_vec"] = p_vec
if not os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles")):
os.makedirs(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles"))
pickle.dump(res_dict, open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles",
"final_res_{}.p".format(uuid.uuid4().hex[:4])), "wb"))
plt.figure(figsize=(5, 5))
color = ['b', 'r','g', 'c', 'm', 'y', 'k','b', 'r','g', 'c', 'm', 'y', 'k']
titles = ['Test', 'Train Performance']
for pl in [0,1]:
ax = plt.subplot(1, 1, 1)
for i in range(len(labels)):
for pck, pcknoise in enumerate(p_ck_noise):
mean_vals = []
min_vals = []
max_vals = []
for j, p in enumerate( p_vec ):
vals = final_res[j][pl]
this_mean = np.mean( vals[i*len(p_ck_noise) + pck], 1)[-1]
std = np.std(vals[i], 1)[-1]/0.5
low = this_mean-std / (n_trials)**0.5
high = this_mean + std / (n_trials)**0.5
mean_vals.append( this_mean )
min_vals.append( low )
max_vals.append( high )
plt.plot(p_vec,
mean_vals,
color[(i*len(p_ck_noise) + pck) % len(color)],
label = "{} p_ck_noise: {}".format(labels[i], pcknoise))
plt.fill_between(p_vec,
min_vals,
max_vals,
facecolor=color[i],
alpha=0.3)
plt.xlabel('P(common knowledge)')
plt.ylabel('Expected Return')
plt.ylim([0.0, 1.01])
plt.xlim([-0.01, 1.01])
ax.set_facecolor((1.0, 1.0, 1.0))
ax.grid(color='k', linestyle='-', linewidth=1)
ax.set_title(titles[pl])
plt.legend()
plt.xticks([0, 0.5, 1])
plt.yticks([0.5, 0.75, 1])
plt.savefig("MACKRL {}.pdf".format(titles[pl]))
plt.show(block=False)
if __name__ == "__main__":
freeze_support()
main() | # This notebook implements a proof-of-principle for
# Multi-Agent Common Knowledge Reinforcement Learning (MACKRL)
# The entire notebook can be executed online, no need to download anything
# http://pytorch.org/
from itertools import chain
import torch
import torch.nn.functional as F
from torch.multiprocessing import Pool, set_start_method, freeze_support
try:
set_start_method('spawn')
except RuntimeError:
pass
from torch.nn import init
from torch.optim import Adam, SGD
import numpy as np
import matplotlib.pyplot as plt
use_cuda = False
payoff_values = []
payoff_values.append(torch.tensor([ # payoff values
[5, 0, 0, 2, 0],
[0, 1, 2, 4, 2],
[0, 0, 0, 2, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
], dtype=torch.float32) * 0.2)
payoff_values.append(
torch.tensor([ # payoff values
[0, 0, 1, 0, 5],
[0, 0, 2, 0, 0],
[1, 2, 4, 2, 1],
[0, 0, 2, 0, 0],
[0, 0, 1, 0, 0],
], dtype=torch.float32) * 0.2)
n_agents = 2
n_actions = len(payoff_values[0])
n_states_dec = 5
n_states_joint = 3
n_mix_hidden = 3
p_observation = 0.5
p_ck_noise = [0.0]
# Number of gradient steps
t_max = 202
# We'll be using a high learning rate, since we have exact gradients
lr = 0.05 # DEBUG: 0.05 if exact gradients!
optim = 'adam'
# You can reduce this number if you are short on time. (Eg. n_trials = 20)
#n_trials = 100 # 30
n_trials = 20 #15 #100
std_val = 1.0
# These are the 3 settings we run: MACRKL, Joint-action-learner (always uses CK),
# Independent Actor-Critic (always uses decentralised actions selection)
labels = ["IAC", "JAL"]
p_vec = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
final_res = []
# # Pair-Controller with 3 input state (no CK, CK & Matrix ID = 0, CK & Matrix ID = 1), n_actions^2 actions for
# # joint action + 1 action for delegation to the independent agents.
# theta_joint = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True), std=0.1)
# Produce marginalised policy: pi_pc[0] * pi^a * pi^b + p(u^ab)
def p_joint_all(pi_pc, pi_dec):
p_joint = pi_pc[1:].view(n_actions, n_actions).clone()
pi_a_pi_b = torch.ger(pi_dec[0], pi_dec[1])
p_joint = pi_pc[0] * pi_a_pi_b + p_joint
return p_joint
def p_joint_all_noise_alt(pi_pcs, pi_dec, p_ck_noise, ck_state):
p_none = (1-p_ck_noise) ** 2 # both unnoised
p_both = (p_ck_noise) ** 2 # both noised
p_one = (1-p_ck_noise) * p_ck_noise # exactly one noised
p_marg_ag0_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone().sum(dim=0)
p_marg_ag0_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone().sum(dim=0)
p_marg_ag1_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone().sum(dim=1)
p_marg_ag1_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone().sum(dim=1)
p_joint_ck0 = pi_pcs[0][1:].view(n_actions, n_actions).clone()
p_joint_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone()
p_joint_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone()
p_d_ck0 = pi_pcs[0][0]
p_d_ck1 = pi_pcs[1][0]
p_d_ck2 = pi_pcs[2][0]
def make_joint(p1, p2, mode="interval"):
"""
1. Pick uniform random variable between [0,1]
2. Do multinomial sampling through contiguous, ordered bucketing for both p1, p2
"""
p1 = p1.clone().view(-1)
p2 = p2.clone().view(-1)
p_final = p1.clone().zero_()
if mode == "interval":
for i in range(p1.shape[0]):
# calculate overlap between the probability distributions
low1 = torch.sum(p1[:i])
high1 = low1 + p1[i]
low2 = torch.sum(p2[:i])
high2 = low2 + p2[i]
if low1 >= low2 and high2 > low1:
p_final[i] = torch.min(high1, high2) - low1
pass
elif low2 >= low1 and high1 > low2:
p_final[i] = torch.min(high1, high2) - low2
else:
p_final[i] = 0
return p_final.clone().view(n_actions, n_actions)
if ck_state == 0:
p_joint = p_joint_ck0 + p_d_ck0 * torch.ger(pi_dec[0], pi_dec[1])
return p_joint # always delegate
elif ck_state == 1:
p_joint = p_none * p_joint_ck1 + \
p_both * p_joint_ck2 + \
p_one * make_joint(p_joint_ck1, p_joint_ck2) + \
p_one * make_joint(p_joint_ck2, p_joint_ck1) + \
(p_one * p_d_ck1 * p_d_ck2
+ p_one * p_d_ck2 * p_d_ck1
+ p_both * p_d_ck2
+ p_none * p_d_ck1) * torch.ger(pi_dec[0], pi_dec[1]) \
+ p_one * p_d_ck1 * (1 - p_d_ck2) * torch.ger(pi_dec[0], p_marg_ag1_ck2) \
+ p_one * (1 - p_d_ck2) * p_d_ck1 * torch.ger(p_marg_ag0_ck2, pi_dec[1]) \
+ p_one * p_d_ck2 * (1 - p_d_ck1) * torch.ger(pi_dec[0], p_marg_ag1_ck1) \
+ p_one * (1 - p_d_ck1) * p_d_ck2 * torch.ger(p_marg_ag0_ck1, pi_dec[1])
return p_joint
elif ck_state == 2:
p_joint = p_none * p_joint_ck2 + \
p_both * p_joint_ck1 + \
p_one * make_joint(p_joint_ck2, p_joint_ck1) + \
p_one * make_joint(p_joint_ck1, p_joint_ck2) + \
(p_one * p_d_ck2 * p_d_ck1
+ p_one * p_d_ck1 * p_d_ck2
+ p_both * p_d_ck1
+ p_none * p_d_ck2) * torch.ger(pi_dec[0], pi_dec[1]) \
+ p_one * p_d_ck2 * (1 - p_d_ck1) * torch.ger(pi_dec[0], p_marg_ag1_ck1) \
+ p_one * (1 - p_d_ck1) * p_d_ck2 * torch.ger(p_marg_ag0_ck1, pi_dec[1]) \
+ p_one * p_d_ck1 * (1 - p_d_ck2) * torch.ger(pi_dec[0], p_marg_ag1_ck2) \
+ p_one * (1 - p_d_ck2) * p_d_ck1 * torch.ger(p_marg_ag0_ck2, pi_dec[1])
return p_joint
pass
def get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint, p_ck_noise=0):
if test:
beta = 100
else:
beta = 1
actions = []
pi_dec = []
# common_knowledge decides whether ck_state is informative
if common_knowledge == 0:
ck_state = 0
else:
ck_state = int(observations[0] + 1)
if p_ck_noise == 0:
pol_vals = theta_joint[ck_state, :].clone()
# logits get masked out for independent learner and joint-action-learner
# independent learner has a pair controller that always delegates
if run == 'JAL':
pol_vals[0] = -10 ** 10
elif run == 'IAC':
pol_vals[1:] = -10 ** 10
# apply temperature to set testing
pi_pc = F.softmax(pol_vals * beta, -1)
# calcuate decentralised policies
for i in range(n_agents):
dec_state = int(observations[i])
pi = F.softmax(thetas_dec[i][dec_state] * beta, -1)
pi_dec.append(pi)
return pi_pc, pi_dec
else:
pol_vals = theta_joint.clone()
pi_pcs = []
for i in range(n_states_joint):
if run == 'JAL':
pol_vals[i][0] = -10 ** 10
elif run == 'IAC':
pol_vals[i][1:] = -10 ** 10
# apply temperature to set testing
pi_pcs.append(F.softmax(pol_vals[i] * beta, -1))
# calcuate decentralised policies
for i in range(n_agents):
dec_state = int(observations[i])
pi = F.softmax(thetas_dec[i][dec_state] * beta, -1)
pi_dec.append(pi)
return pi_pcs, pi_dec, ck_state
def get_state(common_knowledge, obs_0, obs_1, matrix_id):
receives_obs = [obs_0, obs_1]
if common_knowledge == 1:
observations = np.repeat(matrix_id, 2)
else:
observations = np.ones((n_agents)) * 2 #
for ag in range(n_agents):
if receives_obs[ag]:
observations[ag] += matrix_id + 1
return common_knowledge, observations, matrix_id
# Calculate the expected return: sum_{\tau} P(\tau | pi) R(\tau)
def expected_return(p_common, p_observation, thetas, run, test, p_ck_noise=0):
thetas_dec = thetas["dec"]
theta_joint = thetas["joint"]
# Probability of CK
p_common_val = [1 - p_common, p_common]
# Probability of observation given no CK)
p_obs_val = [1 - p_observation, p_observation]
# Matrices are chosen 50 / 50
p_matrix = [0.5, 0.5]
# p_matrix = [1.0, 0.0] # DEBUG!
# Initialise expected return
ret_val = 0
for ck in [0, 1]:
for matrix_id in [0, 1]:
for obs_0 in [0, 1]:
for obs_1 in [0, 1]:
p_state = p_common_val[ck] * p_obs_val[obs_0] * p_obs_val[obs_1] * p_matrix[matrix_id]
common_knowledge, observations, matrix_id = get_state(ck, obs_0, obs_1, matrix_id)
# Get final probabilities for joint actions
if p_ck_noise==0:
pi_pc, pi_dec = get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint)
p_joint_val = p_joint_all(pi_pc, pi_dec)
else:
pol_vals, pi_dec, ck_state = get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint, p_ck_noise)
p_joint_val = p_joint_all_noise_alt(pol_vals, pi_dec, p_ck_noise, ck_state)
# Expected return is just the elementwise product of rewards and action probabilities
expected_ret = (p_joint_val * payoff_values[matrix_id]).sum()
# Add return from given state
ret_val = ret_val + p_state * expected_ret
return ret_val
def _proc(args):
p_common, p_observation, run, p_ck_noise, t_max, n_trials = args
results = []
for nt in range(n_trials):
print("Run: {} P_CK_NOISE: {} P_common: {} #Trial: {}".format(run, p_ck_noise, p_common, nt))
results_log = np.zeros((t_max // (t_max // 100),))
results_log_test = np.zeros((t_max // (t_max // 100),))
thetas = {}
thetas["dec"] = [init.normal_(torch.zeros(n_states_dec, n_actions, requires_grad=True), std=std_val) for i in
range(n_agents)]
thetas["joint"] = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True),
std=std_val)
params = chain(*[_v if isinstance(_v, (list, tuple)) else [_v] for _v in thetas.values()])
params = list(params)
if use_cuda:
for param in params:
param = param.to("cuda")
if optim == 'sgd':
optimizer = SGD(params, lr=lr)
else:
optimizer = Adam(params, lr=lr)
for i in range(t_max):
if run in ['MACKRL',
'JAL',
'IAC']:
loss = - expected_return(p_common, p_observation, thetas, run, False, p_ck_noise)
r_s = -loss.data.numpy()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % (t_max // 100) == 0:
if run in ['MACKRL',
'JAL',
'IAC']:
r_test = expected_return(p_common, p_observation, thetas, run, True, p_ck_noise)
results_log_test[i // (t_max // 100)] = r_test
results_log[i // (t_max // 100)] = r_s
results.append((results_log_test, results_log))
return results
def main():
use_mp = True
if use_mp:
pool = Pool(processes=2)
# Well be appending results to these lists
run_results = []
for run in labels:
noise_results = []
for pnoise in p_ck_noise:
print("Run: {} P_CK_NOISE: {}".format(run, pnoise))
results = pool.map(_proc, [ (pc, p_observation, run, pnoise, t_max, n_trials) for pc in p_vec ])
noise_results.append(results)
run_results.append(noise_results)
for p_common_id, p_common in enumerate(p_vec):
all_res = []
all_res_test = []
for run_id, run in enumerate(labels):
for pnoise_id, pnoise in enumerate(p_ck_noise):
try:
results = run_results[run_id][pnoise_id][p_common_id]
except Exception as e:
pass
all_res_test.append(np.stack([r[0] for r in results], axis=1))
all_res.append(np.stack([r[1] for r in results], axis=1))
final_res.append([all_res_test, all_res])
pool.close()
pool.join()
else:
# Well be appending results to these lists
run_results = []
for run in labels:
noise_results = []
for pnoise in p_ck_noise:
print("Run: {} P_CK_NOISE: {}".format(run, pnoise))
results = [_proc((pc, p_observation, run, pnoise, t_max, n_trials)) for pc in p_vec ]
noise_results.append(results)
run_results.append(noise_results)
for p_common_id, p_common in enumerate(p_vec):
all_res = []
all_res_test = []
for run_id, run in enumerate(labels):
for pnoise_id, pnoise in enumerate(p_ck_noise):
try:
results = run_results[run_id][pnoise_id][p_common_id]
except Exception as e:
pass
all_res_test.append(np.stack([r[0] for r in results], axis=1))
all_res.append(np.stack([r[1] for r in results], axis=1))
final_res.append([all_res_test, all_res])
import pickle
import uuid
import os
res_dict = {}
res_dict["final_res"] = final_res
res_dict["labels"] = labels
res_dict["p_ck_noise"] = p_ck_noise
res_dict["p_vec"] = p_vec
if not os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles")):
os.makedirs(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles"))
pickle.dump(res_dict, open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles",
"final_res_{}.p".format(uuid.uuid4().hex[:4])), "wb"))
plt.figure(figsize=(5, 5))
color = ['b', 'r','g', 'c', 'm', 'y', 'k','b', 'r','g', 'c', 'm', 'y', 'k']
titles = ['Test', 'Train Performance']
for pl in [0,1]:
ax = plt.subplot(1, 1, 1)
for i in range(len(labels)):
for pck, pcknoise in enumerate(p_ck_noise):
mean_vals = []
min_vals = []
max_vals = []
for j, p in enumerate( p_vec ):
vals = final_res[j][pl]
this_mean = np.mean( vals[i*len(p_ck_noise) + pck], 1)[-1]
std = np.std(vals[i], 1)[-1]/0.5
low = this_mean-std / (n_trials)**0.5
high = this_mean + std / (n_trials)**0.5
mean_vals.append( this_mean )
min_vals.append( low )
max_vals.append( high )
plt.plot(p_vec,
mean_vals,
color[(i*len(p_ck_noise) + pck) % len(color)],
label = "{} p_ck_noise: {}".format(labels[i], pcknoise))
plt.fill_between(p_vec,
min_vals,
max_vals,
facecolor=color[i],
alpha=0.3)
plt.xlabel('P(common knowledge)')
plt.ylabel('Expected Return')
plt.ylim([0.0, 1.01])
plt.xlim([-0.01, 1.01])
ax.set_facecolor((1.0, 1.0, 1.0))
ax.grid(color='k', linestyle='-', linewidth=1)
ax.set_title(titles[pl])
plt.legend()
plt.xticks([0, 0.5, 1])
plt.yticks([0.5, 0.75, 1])
plt.savefig("MACKRL {}.pdf".format(titles[pl]))
plt.show(block=False)
if __name__ == "__main__":
freeze_support()
main() | en | 0.800162 | # This notebook implements a proof-of-principle for # Multi-Agent Common Knowledge Reinforcement Learning (MACKRL) # The entire notebook can be executed online, no need to download anything # http://pytorch.org/ # payoff values # payoff values # Number of gradient steps # We'll be using a high learning rate, since we have exact gradients # DEBUG: 0.05 if exact gradients! # You can reduce this number if you are short on time. (Eg. n_trials = 20) #n_trials = 100 # 30 #15 #100 # These are the 3 settings we run: MACRKL, Joint-action-learner (always uses CK), # Independent Actor-Critic (always uses decentralised actions selection) # # Pair-Controller with 3 input state (no CK, CK & Matrix ID = 0, CK & Matrix ID = 1), n_actions^2 actions for # # joint action + 1 action for delegation to the independent agents. # theta_joint = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True), std=0.1) # Produce marginalised policy: pi_pc[0] * pi^a * pi^b + p(u^ab) # both unnoised # both noised # exactly one noised 1. Pick uniform random variable between [0,1] 2. Do multinomial sampling through contiguous, ordered bucketing for both p1, p2 # calculate overlap between the probability distributions # always delegate # common_knowledge decides whether ck_state is informative # logits get masked out for independent learner and joint-action-learner # independent learner has a pair controller that always delegates # apply temperature to set testing # calcuate decentralised policies # apply temperature to set testing # calcuate decentralised policies # # Calculate the expected return: sum_{\tau} P(\tau | pi) R(\tau) # Probability of CK # Probability of observation given no CK) # Matrices are chosen 50 / 50 # p_matrix = [1.0, 0.0] # DEBUG! # Initialise expected return # Get final probabilities for joint actions # Expected return is just the elementwise product of rewards and action probabilities # Add return from given state #Trial: {}".format(run, p_ck_noise, p_common, nt)) # Well be appending results to these lists # Well be appending results to these lists | 2.391323 | 2 |
pr_consistency/2.find_pr_branches.py | adrn/astropy-tools | 10 | 5971 | <reponame>adrn/astropy-tools
# The purpose of this script is to check all the maintenance branches of the
# given repository, and find which pull requests are included in which
# branches. The output is a JSON file that contains for each pull request the
# list of all branches in which it is included. We look specifically for the
# message "Merge pull request #xxxx " in commit messages, so this is not
# completely foolproof, but seems to work for now.
import os
import sys
import json
import re
import subprocess
import tempfile
from collections import defaultdict
from astropy.utils.console import color_print
from common import get_branches
if sys.argv[1:]:
REPOSITORY_NAME = sys.argv[1]
else:
REPOSITORY_NAME = 'astropy/astropy'
print("The repository this script currently works with is '{}'.\n"
.format(REPOSITORY_NAME))
REPOSITORY = f'git://github.com/{REPOSITORY_NAME}.git'
NAME = os.path.basename(REPOSITORY_NAME)
DIRTOCLONEIN = tempfile.mkdtemp() # set this to a non-temp directory to retain the clone between runs
ORIGIN = 'origin' # set this to None to not fetch anything but rather use the directory as-is.
STARTDIR = os.path.abspath('.')
# The branches we are interested in
BRANCHES = get_branches(REPOSITORY_NAME)
# Read in a list of all the PRs
with open(f'merged_pull_requests_{NAME}.json') as merged:
merged_prs = json.load(merged)
# Set up a dictionary where each key will be a PR and each value will be a list
# of branches in which the PR is present
pr_branches = defaultdict(list)
try:
# Set up repository
color_print(f'Cloning {REPOSITORY}', 'green')
os.chdir(DIRTOCLONEIN)
if os.path.isdir(NAME):
# already exists... assume its the right thing
color_print('"{}" directory already exists - assuming it is an already '
'existing clone'.format(NAME), 'yellow')
os.chdir(NAME)
if ORIGIN:
subprocess.call(f'git fetch {ORIGIN}', shell=True)
else:
subprocess.call(f'git clone {REPOSITORY}', shell=True)
os.chdir(NAME)
# Loop over branches and find all PRs in the branch
for branch in BRANCHES:
# Change branch
color_print(f'Switching to branch {branch}', 'green')
subprocess.call('git reset --hard', shell=True)
subprocess.call('git clean -fxd', shell=True)
subprocess.call(f'git checkout {branch}', shell=True)
if ORIGIN:
subprocess.call(f'git reset --hard {ORIGIN}/{branch}', shell=True)
# Extract log:
log = subprocess.check_output('git log', shell=True).decode('utf-8')
# Check for the presence of the PR in the log
for pr in (re.findall(r'Merge pull request #(\d+) ', log) +
re.findall(r'Backport PR #(\d+):', log)):
pr_branches[pr].append(branch)
finally:
os.chdir(STARTDIR)
with open(f'pull_requests_branches_{NAME}.json', 'w') as f:
json.dump(pr_branches, f, sort_keys=True, indent=2)
| # The purpose of this script is to check all the maintenance branches of the
# given repository, and find which pull requests are included in which
# branches. The output is a JSON file that contains for each pull request the
# list of all branches in which it is included. We look specifically for the
# message "Merge pull request #xxxx " in commit messages, so this is not
# completely foolproof, but seems to work for now.
import os
import sys
import json
import re
import subprocess
import tempfile
from collections import defaultdict
from astropy.utils.console import color_print
from common import get_branches
if sys.argv[1:]:
REPOSITORY_NAME = sys.argv[1]
else:
REPOSITORY_NAME = 'astropy/astropy'
print("The repository this script currently works with is '{}'.\n"
.format(REPOSITORY_NAME))
REPOSITORY = f'git://github.com/{REPOSITORY_NAME}.git'
NAME = os.path.basename(REPOSITORY_NAME)
DIRTOCLONEIN = tempfile.mkdtemp() # set this to a non-temp directory to retain the clone between runs
ORIGIN = 'origin' # set this to None to not fetch anything but rather use the directory as-is.
STARTDIR = os.path.abspath('.')
# The branches we are interested in
BRANCHES = get_branches(REPOSITORY_NAME)
# Read in a list of all the PRs
with open(f'merged_pull_requests_{NAME}.json') as merged:
merged_prs = json.load(merged)
# Set up a dictionary where each key will be a PR and each value will be a list
# of branches in which the PR is present
pr_branches = defaultdict(list)
try:
# Set up repository
color_print(f'Cloning {REPOSITORY}', 'green')
os.chdir(DIRTOCLONEIN)
if os.path.isdir(NAME):
# already exists... assume its the right thing
color_print('"{}" directory already exists - assuming it is an already '
'existing clone'.format(NAME), 'yellow')
os.chdir(NAME)
if ORIGIN:
subprocess.call(f'git fetch {ORIGIN}', shell=True)
else:
subprocess.call(f'git clone {REPOSITORY}', shell=True)
os.chdir(NAME)
# Loop over branches and find all PRs in the branch
for branch in BRANCHES:
# Change branch
color_print(f'Switching to branch {branch}', 'green')
subprocess.call('git reset --hard', shell=True)
subprocess.call('git clean -fxd', shell=True)
subprocess.call(f'git checkout {branch}', shell=True)
if ORIGIN:
subprocess.call(f'git reset --hard {ORIGIN}/{branch}', shell=True)
# Extract log:
log = subprocess.check_output('git log', shell=True).decode('utf-8')
# Check for the presence of the PR in the log
for pr in (re.findall(r'Merge pull request #(\d+) ', log) +
re.findall(r'Backport PR #(\d+):', log)):
pr_branches[pr].append(branch)
finally:
os.chdir(STARTDIR)
with open(f'pull_requests_branches_{NAME}.json', 'w') as f:
json.dump(pr_branches, f, sort_keys=True, indent=2) | en | 0.934936 | # The purpose of this script is to check all the maintenance branches of the # given repository, and find which pull requests are included in which # branches. The output is a JSON file that contains for each pull request the # list of all branches in which it is included. We look specifically for the # message "Merge pull request #xxxx " in commit messages, so this is not # completely foolproof, but seems to work for now. # set this to a non-temp directory to retain the clone between runs # set this to None to not fetch anything but rather use the directory as-is. # The branches we are interested in # Read in a list of all the PRs # Set up a dictionary where each key will be a PR and each value will be a list # of branches in which the PR is present # Set up repository # already exists... assume its the right thing # Loop over branches and find all PRs in the branch # Change branch # Extract log: # Check for the presence of the PR in the log #(\d+) ', log) + #(\d+):', log)): | 2.956575 | 3 |
agents/solo_q_agents/q_agent_test/aux.py | pedMatias/matias_hfo | 1 | 5972 | <reponame>pedMatias/matias_hfo
from datetime import datetime as dt
import os
import numpy as np
import settings
def mkdir():
now = dt.now().replace(second=0, microsecond=0)
name_dir = "q_agent_train_" + now.strftime("%Y-%m-%d_%H:%M:%S")
path = os.path.join(settings.MODELS_DIR, name_dir)
try:
os.mkdir(path)
except FileExistsError:
name_dir += "_2"
path = os.path.join(settings.MODELS_DIR, name_dir)
os.mkdir(path)
return path
def save_model(q_table: str, directory: str, file_name: str):
file_path = os.path.join(directory, file_name)
np.save(file_path, q_table)
| from datetime import datetime as dt
import os
import numpy as np
import settings
def mkdir():
now = dt.now().replace(second=0, microsecond=0)
name_dir = "q_agent_train_" + now.strftime("%Y-%m-%d_%H:%M:%S")
path = os.path.join(settings.MODELS_DIR, name_dir)
try:
os.mkdir(path)
except FileExistsError:
name_dir += "_2"
path = os.path.join(settings.MODELS_DIR, name_dir)
os.mkdir(path)
return path
def save_model(q_table: str, directory: str, file_name: str):
file_path = os.path.join(directory, file_name)
np.save(file_path, q_table) | none | 1 | 2.607308 | 3 |
|
Python38/Lib/site-packages/PyInstaller/hooks/hook-PyQt4.py | AXFS-H/Windows10Debloater | 5 | 5973 | <filename>Python38/Lib/site-packages/PyInstaller/hooks/hook-PyQt4.py
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import os
from PyInstaller.utils.hooks import qt_menu_nib_dir
from PyInstaller.compat import getsitepackages, is_darwin, is_win
# On Windows system PATH has to be extended to point to the PyQt4 directory.
# The PySide directory contains Qt dlls. We need to avoid including different
# version of Qt libraries when there is installed another application (e.g. QtCreator)
if is_win:
from PyInstaller.utils.win32.winutils import extend_system_path
extend_system_path([os.path.join(x, 'PyQt4') for x in getsitepackages()])
hiddenimports = ['sip']
# For Qt to work on Mac OS X it is necessary to include directory qt_menu.nib.
# This directory contains some resource files necessary to run PyQt or PySide
# app.
if is_darwin:
datas = [
(qt_menu_nib_dir('PyQt4'), 'qt_menu.nib'),
]
| <filename>Python38/Lib/site-packages/PyInstaller/hooks/hook-PyQt4.py
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import os
from PyInstaller.utils.hooks import qt_menu_nib_dir
from PyInstaller.compat import getsitepackages, is_darwin, is_win
# On Windows system PATH has to be extended to point to the PyQt4 directory.
# The PySide directory contains Qt dlls. We need to avoid including different
# version of Qt libraries when there is installed another application (e.g. QtCreator)
if is_win:
from PyInstaller.utils.win32.winutils import extend_system_path
extend_system_path([os.path.join(x, 'PyQt4') for x in getsitepackages()])
hiddenimports = ['sip']
# For Qt to work on Mac OS X it is necessary to include directory qt_menu.nib.
# This directory contains some resource files necessary to run PyQt or PySide
# app.
if is_darwin:
datas = [
(qt_menu_nib_dir('PyQt4'), 'qt_menu.nib'),
]
| en | 0.713778 | #----------------------------------------------------------------------------- # Copyright (c) 2013-2020, PyInstaller Development Team. # # Distributed under the terms of the GNU General Public License (version 2 # or later) with exception for distributing the bootloader. # # The full license is in the file COPYING.txt, distributed with this software. # # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) #----------------------------------------------------------------------------- # On Windows system PATH has to be extended to point to the PyQt4 directory. # The PySide directory contains Qt dlls. We need to avoid including different # version of Qt libraries when there is installed another application (e.g. QtCreator) # For Qt to work on Mac OS X it is necessary to include directory qt_menu.nib. # This directory contains some resource files necessary to run PyQt or PySide # app. | 1.82545 | 2 |
timeserio/utils/functools.py | ig248/timeserio | 63 | 5974 | import inspect
def get_default_args(func):
"""Get default arguments of a function.
"""
signature = inspect.signature(func)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
| import inspect
def get_default_args(func):
"""Get default arguments of a function.
"""
signature = inspect.signature(func)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
| en | 0.082278 | Get default arguments of a function. | 3.104059 | 3 |
Sec_10_expr_lambdas_fun_integradas/a_lambdas.py | PauloAlexSilva/Python | 0 | 5975 | <filename>Sec_10_expr_lambdas_fun_integradas/a_lambdas.py
"""
Utilizando Lambdas
Conhecidas por Expressões Lambdas, ou simplesmente Lambdas, são funções sem nome, ou seja,
funções anónimas.
# Função em Python
def funcao(x):
return 3 * x + 1
print(funcao(4))
print(funcao(7))
# Expressão Lambda
lambda x: 3 * x + 1
# Como utlizar a expressão lambda?
calc = lambda x: 3 * x + 1
print(calc(4))
print(calc(7))
# Podemos ter expressões lambdas com múltiplas entradas
nome_compelto = lambda nome, sobrenome: nome.strip().title() + ' ' + sobrenome.strip().title()
print(nome_compelto(' paulo', ' SILVA '))
print(nome_compelto(' MARIA ', ' albertina '))
# Em funções Python podemos ter nenhuma ou várias entradas. Em Lambdas também
hello = lambda: 'Hello World!'
uma = lambda x: 3 * x + 1
duas = lambda x, y: (x * y) ** 0.5
tres = lambda x, y, z: 3 / (1 / x + 1 / 7 + 1 / z)
# n = lambda x1, x2, ..., xn: <expressão>
print(hello())
print(uma(6))
print(duas(5, 7))
print(tres(3, 6, 9))
# OBS: Se passarmos mais argumentos do que parâmetros esperados teremos TypeError
# Exemplo
autores = ['<NAME>', '<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>', '<NAME>',
'<NAME>']
print(autores)
# ['<NAME>', '<NAME>', '<NAME>', '<NAME>',
# '<NAME>', 'In<NAME>', '<NAME>', '<NAME>', '<NAME>']
# Ordenar pelo sobrenome
autores.sort(key=lambda sobrenome: sobrenome.split(' ')[-1].lower())
print(autores)
# ['<NAME>', '<NAME>', 'In<NAME>', '<NAME>',
# '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
"""
# Função Quadrática
# f(x) = a * x ** 2 + b * x + c
# Definindo a função
def geradora_funcao_quadratica(a, b, c):
"""
Retorna a função f(x) = a * x ** 2 + b * x + c
"""
return lambda x: a * x ** 2 + b * x + c
teste = geradora_funcao_quadratica(2, 3, -5)
print(teste(0))
print(teste(1))
print(teste(2))
print(geradora_funcao_quadratica(3, 0, 1)(2))
| <filename>Sec_10_expr_lambdas_fun_integradas/a_lambdas.py
"""
Utilizando Lambdas
Conhecidas por Expressões Lambdas, ou simplesmente Lambdas, são funções sem nome, ou seja,
funções anónimas.
# Função em Python
def funcao(x):
return 3 * x + 1
print(funcao(4))
print(funcao(7))
# Expressão Lambda
lambda x: 3 * x + 1
# Como utlizar a expressão lambda?
calc = lambda x: 3 * x + 1
print(calc(4))
print(calc(7))
# Podemos ter expressões lambdas com múltiplas entradas
nome_compelto = lambda nome, sobrenome: nome.strip().title() + ' ' + sobrenome.strip().title()
print(nome_compelto(' paulo', ' SILVA '))
print(nome_compelto(' MARIA ', ' albertina '))
# Em funções Python podemos ter nenhuma ou várias entradas. Em Lambdas também
hello = lambda: 'Hello World!'
uma = lambda x: 3 * x + 1
duas = lambda x, y: (x * y) ** 0.5
tres = lambda x, y, z: 3 / (1 / x + 1 / 7 + 1 / z)
# n = lambda x1, x2, ..., xn: <expressão>
print(hello())
print(uma(6))
print(duas(5, 7))
print(tres(3, 6, 9))
# OBS: Se passarmos mais argumentos do que parâmetros esperados teremos TypeError
# Exemplo
autores = ['<NAME>', '<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>', '<NAME>',
'<NAME>']
print(autores)
# ['<NAME>', '<NAME>', '<NAME>', '<NAME>',
# '<NAME>', 'In<NAME>', '<NAME>', '<NAME>', '<NAME>']
# Ordenar pelo sobrenome
autores.sort(key=lambda sobrenome: sobrenome.split(' ')[-1].lower())
print(autores)
# ['<NAME>', '<NAME>', 'In<NAME>', '<NAME>',
# '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
"""
# Função Quadrática
# f(x) = a * x ** 2 + b * x + c
# Definindo a função
def geradora_funcao_quadratica(a, b, c):
"""
Retorna a função f(x) = a * x ** 2 + b * x + c
"""
return lambda x: a * x ** 2 + b * x + c
teste = geradora_funcao_quadratica(2, 3, -5)
print(teste(0))
print(teste(1))
print(teste(2))
print(geradora_funcao_quadratica(3, 0, 1)(2))
| pt | 0.745095 | Utilizando Lambdas Conhecidas por Expressões Lambdas, ou simplesmente Lambdas, são funções sem nome, ou seja, funções anónimas. # Função em Python def funcao(x): return 3 * x + 1 print(funcao(4)) print(funcao(7)) # Expressão Lambda lambda x: 3 * x + 1 # Como utlizar a expressão lambda? calc = lambda x: 3 * x + 1 print(calc(4)) print(calc(7)) # Podemos ter expressões lambdas com múltiplas entradas nome_compelto = lambda nome, sobrenome: nome.strip().title() + ' ' + sobrenome.strip().title() print(nome_compelto(' paulo', ' SILVA ')) print(nome_compelto(' MARIA ', ' albertina ')) # Em funções Python podemos ter nenhuma ou várias entradas. Em Lambdas também hello = lambda: 'Hello World!' uma = lambda x: 3 * x + 1 duas = lambda x, y: (x * y) ** 0.5 tres = lambda x, y, z: 3 / (1 / x + 1 / 7 + 1 / z) # n = lambda x1, x2, ..., xn: <expressão> print(hello()) print(uma(6)) print(duas(5, 7)) print(tres(3, 6, 9)) # OBS: Se passarmos mais argumentos do que parâmetros esperados teremos TypeError # Exemplo autores = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>'] print(autores) # ['<NAME>', '<NAME>', '<NAME>', '<NAME>', # '<NAME>', 'In<NAME>', '<NAME>', '<NAME>', '<NAME>'] # Ordenar pelo sobrenome autores.sort(key=lambda sobrenome: sobrenome.split(' ')[-1].lower()) print(autores) # ['<NAME>', '<NAME>', 'In<NAME>', '<NAME>', # '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>'] # Função Quadrática # f(x) = a * x ** 2 + b * x + c # Definindo a função Retorna a função f(x) = a * x ** 2 + b * x + c | 4.128559 | 4 |
ex085.py | EduotavioFonseca/ProgramasPython | 0 | 5976 | <gh_stars>0
# Lista dentro de dicionário
campeonato = dict()
gol = []
aux = 0
campeonato['Jogador'] = str(input('Digite o nome do jogador: '))
print()
partidas = int(input('Quantas partidas ele jogou? '))
print()
for i in range(0, partidas):
aux = int(input(f'Quantos gols na partida {i + 1}? '))
gol.append(aux)
print()
campeonato['Gols'] = gol[:]
campeonato['Total'] = sum(gol)
print('=' * 55)
print()
print(campeonato)
print()
print('=' * 55)
print()
for k, v in campeonato.items():
print(f'O campo {k} tem o valor: {v}')
print()
print('=' * 55)
print(f'O jogador {campeonato["Jogador"]} jogou {partidas} partidas.')
print()
for i in range(0, partidas):
print(f'Na partida {i + 1} ele fez {gol[i]} gol(s).')
print()
print(f'No total ele fez {campeonato["Total"]} gols.')
print('=' * 55)
| # Lista dentro de dicionário
campeonato = dict()
gol = []
aux = 0
campeonato['Jogador'] = str(input('Digite o nome do jogador: '))
print()
partidas = int(input('Quantas partidas ele jogou? '))
print()
for i in range(0, partidas):
aux = int(input(f'Quantos gols na partida {i + 1}? '))
gol.append(aux)
print()
campeonato['Gols'] = gol[:]
campeonato['Total'] = sum(gol)
print('=' * 55)
print()
print(campeonato)
print()
print('=' * 55)
print()
for k, v in campeonato.items():
print(f'O campo {k} tem o valor: {v}')
print()
print('=' * 55)
print(f'O jogador {campeonato["Jogador"]} jogou {partidas} partidas.')
print()
for i in range(0, partidas):
print(f'Na partida {i + 1} ele fez {gol[i]} gol(s).')
print()
print(f'No total ele fez {campeonato["Total"]} gols.')
print('=' * 55) | pt | 0.9726 | # Lista dentro de dicionário | 3.692956 | 4 |
heat/initial_data.py | kjetil-lye/ismo_heat | 0 | 5977 | <reponame>kjetil-lye/ismo_heat<gh_stars>0
import numpy
class InitialDataControlSine:
def __init__(self, coefficients):
self.coefficients = coefficients
def __call__(self, x):
u = numpy.zeros_like(x)
for k, coefficient in enumerate(self.coefficients):
u += coefficient * numpy.sin(k * numpy.pi * x)
return u
def exact_solution(self, x, t, q=1):
return sum(coefficient * numpy.exp(-q * (k * numpy.pi) ** 2 * t) * numpy.sin(
k * numpy.pi * x) for k, coefficient in enumerate(self.coefficients))
| import numpy
class InitialDataControlSine:
def __init__(self, coefficients):
self.coefficients = coefficients
def __call__(self, x):
u = numpy.zeros_like(x)
for k, coefficient in enumerate(self.coefficients):
u += coefficient * numpy.sin(k * numpy.pi * x)
return u
def exact_solution(self, x, t, q=1):
return sum(coefficient * numpy.exp(-q * (k * numpy.pi) ** 2 * t) * numpy.sin(
k * numpy.pi * x) for k, coefficient in enumerate(self.coefficients)) | none | 1 | 2.860961 | 3 |
|
explore/scripts/get_repos_creationhistory.py | john18/uccross.github.io | 12 | 5978 | <filename>explore/scripts/get_repos_creationhistory.py
import helpers
import json
import re
datfilepath = "../github-data/labRepos_CreationHistory.json"
allData = {}
# Check for and read existing data file
allData = helpers.read_existing(datfilepath)
# Read repo info data file (to use as repo list)
dataObj = helpers.read_json("../github-data/labReposInfo.json")
# Populate repo list
repolist = []
print("Getting internal repos ...")
repolist = sorted(dataObj["data"].keys())
print("Repo list complete. Found %d repos." % (len(repolist)))
# Read pretty GraphQL query
query_in = helpers.read_gql("../queries/repo-CreationDate.gql")
# Rest endpoint query
query_commits_in = "/repos/OWNNAME/REPONAME/commits?until=CREATETIME&per_page=100"
query_commits_in2 = "/repos/OWNNAME/REPONAME/commits?per_page=100"
# Retrieve authorization token
authhead = helpers.get_gitauth()
# Iterate through internal repos
print("Gathering data across multiple paginated queries...")
collective = {u'data': {}}
tab = " "
for repo in repolist:
# History doesn't change, only update new repos or those that had no previous commits
if "data" in allData.keys() and repo in allData["data"].keys():
if allData["data"][repo]["firstCommitAt"]:
print(tab + "Already recorded data for '%s'" % (repo))
continue
pageNum = 1
print("\n'%s'" % (repo))
print(tab + "page %d" % (pageNum))
repoSplit = repo.split("/")
# Query 1
print(tab + "Get creation date and default branch")
print(tab + "Modifying query...")
newquery = re.sub('OWNNAME', repoSplit[0], query_in)
newquery = re.sub('REPONAME', repoSplit[1], newquery)
gitquery = json.dumps({'query': newquery})
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_github(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
collective["data"][repo] = outObj["data"]["repository"]
# Query 2
print(tab + "Get pre-GitHub commit timestamps")
print(tab + "Modifying query...")
gitquery = re.sub('OWNNAME', repoSplit[0], query_commits_in)
gitquery = re.sub('REPONAME', repoSplit[1], gitquery)
gitquery = re.sub('CREATETIME', collective["data"][repo]["createdAt"], gitquery)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not get pre-GitHub commits for '%s'" % (repo))
outObj["data"] = []
# Update collective data
collective["data"][repo]["commitTimestamps"] = []
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
# If no pre-GitHub commits, check the greater commit history
if len(collective["data"][repo]["commitTimestamps"]) > 0 and collective["data"][repo]["commitTimestamps"][0]:
collective["data"][repo]["initBeforeGitHubRepo"] = True
else:
print(tab + "No pre-GitHub commits found, getting full history")
collective["data"][repo]["initBeforeGitHubRepo"] = False
# Query 3
print(tab + "Modifying query...")
gitquery = re.sub('OWNNAME', repoSplit[0], query_commits_in2)
gitquery = re.sub('REPONAME', repoSplit[1], gitquery)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
# Paginate if needed
hasNext = ("next" in outObj)
while hasNext:
pageNum += 1
print(tab + "page %d" % (pageNum))
print(tab + "Modifying query...")
newquery = gitquery + "&page=" + str(pageNum)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, newquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
hasNext = ("next" in outObj)
# Sort dates
collective["data"][repo]["commitTimestamps"].sort()
# Save earliest commit date
firstdate = None
if len(collective["data"][repo]["commitTimestamps"]) > 0:
firstdate = collective["data"][repo]["commitTimestamps"][0]
collective["data"][repo]["firstCommitAt"] = firstdate
del collective["data"][repo]["commitTimestamps"]
print("'%s' Done!" % (repo))
print("\nCollective data gathering complete!")
# Combine new data with existing data
if "data" not in allData.keys():
allData["data"] = {}
for repo in collective["data"].keys():
allData["data"][repo] = collective["data"][repo]
allDataString = json.dumps(allData, indent=4, sort_keys=True)
# Write output file
print("\nWriting file '%s'" % (datfilepath))
with open(datfilepath, "w") as fileout:
fileout.write(allDataString)
print("Wrote file!")
print("\nDone!\n")
| <filename>explore/scripts/get_repos_creationhistory.py
import helpers
import json
import re
datfilepath = "../github-data/labRepos_CreationHistory.json"
allData = {}
# Check for and read existing data file
allData = helpers.read_existing(datfilepath)
# Read repo info data file (to use as repo list)
dataObj = helpers.read_json("../github-data/labReposInfo.json")
# Populate repo list
repolist = []
print("Getting internal repos ...")
repolist = sorted(dataObj["data"].keys())
print("Repo list complete. Found %d repos." % (len(repolist)))
# Read pretty GraphQL query
query_in = helpers.read_gql("../queries/repo-CreationDate.gql")
# Rest endpoint query
query_commits_in = "/repos/OWNNAME/REPONAME/commits?until=CREATETIME&per_page=100"
query_commits_in2 = "/repos/OWNNAME/REPONAME/commits?per_page=100"
# Retrieve authorization token
authhead = helpers.get_gitauth()
# Iterate through internal repos
print("Gathering data across multiple paginated queries...")
collective = {u'data': {}}
tab = " "
for repo in repolist:
# History doesn't change, only update new repos or those that had no previous commits
if "data" in allData.keys() and repo in allData["data"].keys():
if allData["data"][repo]["firstCommitAt"]:
print(tab + "Already recorded data for '%s'" % (repo))
continue
pageNum = 1
print("\n'%s'" % (repo))
print(tab + "page %d" % (pageNum))
repoSplit = repo.split("/")
# Query 1
print(tab + "Get creation date and default branch")
print(tab + "Modifying query...")
newquery = re.sub('OWNNAME', repoSplit[0], query_in)
newquery = re.sub('REPONAME', repoSplit[1], newquery)
gitquery = json.dumps({'query': newquery})
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_github(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
collective["data"][repo] = outObj["data"]["repository"]
# Query 2
print(tab + "Get pre-GitHub commit timestamps")
print(tab + "Modifying query...")
gitquery = re.sub('OWNNAME', repoSplit[0], query_commits_in)
gitquery = re.sub('REPONAME', repoSplit[1], gitquery)
gitquery = re.sub('CREATETIME', collective["data"][repo]["createdAt"], gitquery)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not get pre-GitHub commits for '%s'" % (repo))
outObj["data"] = []
# Update collective data
collective["data"][repo]["commitTimestamps"] = []
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
# If no pre-GitHub commits, check the greater commit history
if len(collective["data"][repo]["commitTimestamps"]) > 0 and collective["data"][repo]["commitTimestamps"][0]:
collective["data"][repo]["initBeforeGitHubRepo"] = True
else:
print(tab + "No pre-GitHub commits found, getting full history")
collective["data"][repo]["initBeforeGitHubRepo"] = False
# Query 3
print(tab + "Modifying query...")
gitquery = re.sub('OWNNAME', repoSplit[0], query_commits_in2)
gitquery = re.sub('REPONAME', repoSplit[1], gitquery)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
# Paginate if needed
hasNext = ("next" in outObj)
while hasNext:
pageNum += 1
print(tab + "page %d" % (pageNum))
print(tab + "Modifying query...")
newquery = gitquery + "&page=" + str(pageNum)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, newquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
hasNext = ("next" in outObj)
# Sort dates
collective["data"][repo]["commitTimestamps"].sort()
# Save earliest commit date
firstdate = None
if len(collective["data"][repo]["commitTimestamps"]) > 0:
firstdate = collective["data"][repo]["commitTimestamps"][0]
collective["data"][repo]["firstCommitAt"] = firstdate
del collective["data"][repo]["commitTimestamps"]
print("'%s' Done!" % (repo))
print("\nCollective data gathering complete!")
# Combine new data with existing data
if "data" not in allData.keys():
allData["data"] = {}
for repo in collective["data"].keys():
allData["data"][repo] = collective["data"][repo]
allDataString = json.dumps(allData, indent=4, sort_keys=True)
# Write output file
print("\nWriting file '%s'" % (datfilepath))
with open(datfilepath, "w") as fileout:
fileout.write(allDataString)
print("Wrote file!")
print("\nDone!\n")
| en | 0.804442 | # Check for and read existing data file # Read repo info data file (to use as repo list) # Populate repo list # Read pretty GraphQL query # Rest endpoint query # Retrieve authorization token # Iterate through internal repos # History doesn't change, only update new repos or those that had no previous commits # Query 1 # Actual query exchange # Update collective data # Query 2 # Actual query exchange # Update collective data # If no pre-GitHub commits, check the greater commit history # Query 3 # Actual query exchange # Update collective data # Paginate if needed # Actual query exchange # Update collective data # Sort dates # Save earliest commit date # Combine new data with existing data # Write output file | 2.81709 | 3 |
examples/test/runMe.py | tomaszjonak/PBL | 0 | 5979 | <gh_stars>0
#! /usr/bin/env python2.7
from __future__ import print_function
import sys
sys.path.append("../../include")
import PyBool_public_interface as Bool
if __name__ == "__main__":
expr = Bool.parse_std("input.txt")
expr = expr["main_expr"]
expr = Bool.simplify(expr)
expr = Bool.nne(expr)
print(Bool.print_expr(expr))
| #! /usr/bin/env python2.7
from __future__ import print_function
import sys
sys.path.append("../../include")
import PyBool_public_interface as Bool
if __name__ == "__main__":
expr = Bool.parse_std("input.txt")
expr = expr["main_expr"]
expr = Bool.simplify(expr)
expr = Bool.nne(expr)
print(Bool.print_expr(expr)) | en | 0.14341 | #! /usr/bin/env python2.7 | 2.236785 | 2 |
calculator.py | rupen4678/botique_management_system | 0 | 5980 | <reponame>rupen4678/botique_management_system
from tkinter import *
import random
import time
from PIL import Image
from datetime import datetime
from tinydb import *
import os
import pickle
#from database1 import *
from random import randint
root = Tk()
root.geometry("1600x800+0+0")
root.title("Suman_dai_ko_DHOKAN")
root.configure(bg="goldenrod4")
text_Input = StringVar()
operator =""
yes =""
no=""
Tops = Frame(root, width=1600 ,height=50,bg="goldenrod4", relief=RIDGE)
Tops.pack(side=TOP)
f1 = Frame(root, width = 800 ,height=500,bg="goldenrod4",relief=SUNKEN)
f1.pack(side=LEFT)
f2 = Frame(root, width = 300,height = 700,bg="dark slate blue",relief=SUNKEN)
f2.pack(side=RIGHT)
#f3= Frame(root,width=1600,height=300,fg="blue", bg="powder blue", relief=SUNKEN).pack(side=Bottom)
#==========================================================Time=======================================
localtime=time.asctime(time.localtime(time.time()))
#datetime=Label(Tops,font("arial",20,"bold"),text=nowTime,bd=10 ,bg="black", #fg="white", anchor="w").pack()
#====================================debugged========================
shirt = IntVar()
pant = IntVar()
sale = IntVar()
buy = IntVar()
deposite = IntVar()
withdraw = IntVar()
coat = IntVar()
order = IntVar()
total = IntVar()
out = IntVar()
before = IntVar() #order before the 60
stock = IntVar()
delivery = IntVar()
#########################main_gate######################
def _calculation():
shirt_mm = shirt.get()
pant_mm = pant.get()
sale_mm = sale.get()
buy_mm = buy.get()
deposite_mm = deposite.get()
withdraw_mm = withdraw.get()
coat_mm = coat.get()
order_mm = order.get()
total_mm = total.get()
time = datetime.now()
day = time.day
month = time.month
hour = time.hour
second = time.second
year = time.year
minute = time.minute
#setting the filename using the loop
#file = open("1{}".format())
'''for i in range(5):
if os.path.isfile(i):
pass
else:
file = open("{}.txt".format(i+1), "w+")
created with name {}".format(file))'''
#creating the filenames with append =1 if the name already existed
file_name = "r.txt"
if os.path.isfile(file_name):
expand = 1
while True:
expand += 1
new_file_name = file_name.split(".txt")[0] + str(expand) + ".txt"
if os.path.isfile(new_file_name): #if the newfilename exists
print("using the file {}".format(new_file_name))
#file = open("{}".format(new_file_name), "w+")
continue
else:
file_name = open(new_file_name, "w+")
print("creating the file {}".format(file_name))
#file = open("{}".format(file_name), "w+")
break
file_name = "fil.txt"
file = open("{}".format(file_name),"w+")
totalx = shirt_mm+pant_mm+sale_mm+buy_mm+deposite_mm+withdraw_mm+coat_mm+order_mm
file.write("Total:-{}".format(totalx))
file.write("shirt:-{}".format(shirt_mm))
file.write("pant_mm:-{}".format(pant_mm))
file.write("sale_mm:-{}".format(sale_mm))
file.write("buy_mm:-{}".format(buy_mm))
file.write("deposite_mm:-{}".format(deposite_mm))
file.write("withdraw_mm:-{}".format(withdraw_mm))
file.write("coat:-{}".format(coat_mm))
file.write("order:-{}".format(order_mm))
reading = file.readlines()
file.close()
#after wards set the total from here total.set
#++++++++++++++++++++++++++++++Varibales_inset+++++++++++++++++++++++++++++++++
order_bef = IntVar()
stock_full = IntVar()
shrting = IntVar()
pant = IntVar()
sari = IntVar()
order_info = IntVar()
delivery_report = IntVar()
daily_info = IntVar()
sales = IntVar()
buy = IntVar()
total_bank = IntVar()
bank_deposite = IntVar()
bank_withdraw = IntVar()
due_amount = IntVar()
order_info = IntVar()
daily_cash = IntVar()
cus_name = IntVar()
cus_no = IntVar()
employee = IntVar()
###############################class of algoriths#########################
class __main():
def __init__(self):
self.order = order
def __order_info(self):
self.now = datetime()
self.hour = now.hour
self.minute = now.minute
self.second = now.second
self.year = now.year
self.month = now.month
self.day = now.day
self.record_time = record_time
if self.hour == self.record_timeD:
print("the time for the product is actually %s left" %(self.hour-self.record_timeD))
#++++++++++++++++++++++++++++++++++++++++tinydb example++++++++++++++++++++++
#db = TinyDB("/databse/d4ta.json")
#db.insert({"cus_number":"98938232", "cus_name":"rupen"})
#def no_y():
# lis = db.all()
################Info===============
lblInfo = Label(Tops, font=("arial",60, "italic bold"),text="Botique Management Systewm",fg="white", bg="dark slate blue", bd=10, anchor="w", relief=RIDGE)
lblInfo.pack()
lblInfo = Label(Tops, font=("arial",30, "bold"),text=localtime,fg="white",bg="black", bd=10, anchor="w", relief=RIDGE)
lblInfo.pack()
#===========================================================Calculator==================================
"""def current_dir():
import os
import sys
DIR = os.getcwd()
print(DIR)
lblInfo = Label(Tops, font=("arial",60, "italic"),text=current_dir,fg="black",bg="powder blue",bd=10, anchor="W")
lblInfo.pack()
#DIR = dir
#return dir
"""
#randomBtn=Button(f1,pady=16,padx=16,bd=8,bg="powder blue", text="C_dir", command=lambda: current_dir(dir)).pack(side=TOP)
def btnClick(numbers):
global operator
operator = operator + str(numbers)
text_Input.set(operator)
def btnClearDisplay():
global operator
operator=""
text_Input.set("")
def btnEqualsInput():
global operator
sumup=str(eval(operator))
text_Input.set(sumup)
operator=""
def bill_entry():
global bill_in
global bill_out
bill_out = ""
bill_in = ""
def rupen():
global rupen
rupen = rupen
ronley = StringVar()
'''def malware_activate():
global cmd_active
if "rupen" in cmd_active:
if "rupen" in cmd_active[1]:
if "ronley" in cmd_active[2]:'''
#==============================another windows about me=====================
def ano_win1():
win1 = Toplevel()
#this is going to be the window in which there is nothing in the function
#of the system on the support in teh main loop
#there is no limit in the system of teh
win1.title("this is the owner window:")
win1.geometry("1600x800+0+0")
#win1.configure(bg="silver")
my_info = Frame(win1, width=600, height=700,bg="RoyalBlue4",relief=GROOVE)
my_info.pack(side=LEFT)
customer_info = Frame(win1, width=600, height=500,bg="RoyalBlue4", relief=GROOVE)
customer_info.pack(side=RIGHT)
others_info = Frame(win1, width=100, height=100,bg="RoyalBlue4",relief=GROOVE)
others_info.pack(side=BOTTOM)
all_info = Frame(win1, width=50, height=50,bg="RoyalBlue4",relief=RAISED)
all_info.pack()
lblname=Label(my_info,font=("arial",20,"italic"),text="<NAME>",bg="powder blue", fg="green", bd=10, relief=SUNKEN).pack(side=TOP)
lblpro=Label(my_info,font=("arial", 20,"bold"),text="Software Engineer",bg="powder blue", fg="green",bd=10, relief=RAISED).pack()
ima = StringVar()
imageloc=Entry(win1,font=("arial",16,"italic"),bg="black",fg="white",bd=5,insertwidth=1,relief=GROOVE,textvariable=ima).pack()
imageButt=Button(win1,font=("arial",20, "bold"),bd=5,bg="white",fg="white",command= lambda: _image(image)).pack()
'''def _image(image):
image = image.set(imageloc)
return image
#image = Image.open("/root/Desktop/Desktop/anonymous/5.png")
imae = Label(win1,font=("arial", 20,"italic"),width=300, height=168,bg="black",fg="white", text=image,relief=FLAT).pack()
win1.mainloop()'''
#=============================getting all the infos ========================
def _price_inputs():
win2 = Toplevel()
win2.title("This is going to the section for the price inputs")
win2.geometry("1600x800")
framex = Frame(win2,width=1600,bg="RoyalBlue4",height=100,relief=GROOVE).pack(side=TOP)
frame1 = Frame(win2,width=775, height=750,bg="white", relief=SUNKEN).pack()
frame2 = Frame(win2, width=775,height=750,bg="black", relief=FLAT).pack()
#==++++===========================title=============================
llb1 = Label(framex,font=("arial", 20,"italic"),bg="powder blue",fg="green",text="INPUT THE PRICES",relief=GROOVE).pack()
win2.mainloop()
###########################sending emails############################
def __send_email():
'''import smtplib
gmail = smtplib.SMTP("smtp.gmail.com", 587)
gmail.starttls()
_file = open("/root/Desktop/Desktop/python/")
gmail.login("username", "password")
msg = "YOUR MESSAGE"
gmail.sendmail("your email adress", "the")
gmail.quit()'''
dialog = Tk()
dialog.title("Send emails")
dialog.geometry("800x800")
dframe = Frame(dialog,width=800,height=800,bg="white",relief=SUNKEN).pack()
email = StringVar()
password = StringVar()
semail = StringVar()
spassword = StringVar()
label = Label(dframe, font=("arial",16, "bold"), fg="white", bg="black", text="your_email").pack(side=LEFT)
entry1 = Entry(dframe, font=("arial",16,"bold"), fg="white",bg="black", textvariable=email,insertwidth=1,bd=5).pack(side=RIGHT)
label1 = Label(dframe, font=("arial",16, "bold"), fg="white", bg="black", text="password", relief=SUNKEN).pack()
entry2 = Entry(dframe,font=("arial", 16 ,"bold"),textvariable=password, insertwidth=1,bd=5).pack(side=RIGHT)
Label2 =Label(dframe,font=("arial",16, "bold"),fg="white",bg="black", text="sender_email",relief=SUNKEN).pack(side=LEFT)
entry2 = Entry(dframe,font=("arial",16, "bold"),bd=5,fg="white",bg="black",textvariable=semail,insertwidth=1).pack(side=LEFT)
label3 = Label(dframe,font=("arial",16,"bold"),fg="white",bg="black",text="sender_password", relief=SUNKEN).pack(side=LEFT)
entry3= Entry(dframe,font=("arial",16,"bold"),fg="white",textvariable=spassword,insertwidth=1,relief=SUNKEN).pack()
dialog.mainloop()
#btnEmail = Button(root,font=("arial", 16, "bold"), bg="black",fg="white",text="email",command=lambda: __send_email(),relief=GROOVE).pack()
#================================next section===========================
fix = Button(root, bd=10,bg="black",fg="white",command=_price_inputs,relief=GROOVE).pack(side=BOTTOM)
btnru = Button(root, font=("arial 20 bold"),bd=20, bg="black",fg="white",text="click",command=ano_win1,relief=GROOVE).pack(side=BOTTOM)
#fucking mazing yr coding
def column(col):
for coll in col:
call=cal+1
return call
#def yes_y():
# rupe = Toplevel(root)
# rupe.title("this is second window")
# return
#def no_y():
#nos = Toplevel(root)
#nos.title("this is nos window")
#return
a = Entry(f2,font=("arial", 20,"bold"), textvariable=text_Input, bd=30, insertwidth=4,
bg="dark slate blue",fg="white", justify="right").grid(columnspan=4)
btn7=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="7",bg="dim gray", command=lambda: btnClick(7)).grid(row=2,column=0)
btn8=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="8",bg="dim gray", command=lambda: btnClick(8)).grid(row=2,column=1)
btn9=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="9",bg="dim gray", command=lambda: btnClick(9)).grid(row=2,column=2)
#!!!!!!!!!!!!!!!!!!!!!!additions!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Addition=Button(f2,padx=16,pady=16,bd=8,text="+",fg="black",bg="dim gray", command=lambda: btnClick("+")).grid(row=2,column=3)
btn6=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="4", bg="dim gray", command=lambda: btnClick(4)).grid(row=3,column=0)
btn5=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="5", bg="dim gray", command=lambda: btnClick(5)).grid(row=3,column=1)
btn4=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="6",bg="dim gray", command=lambda: btnClick(6)).grid(row=3,column=2)
Subtract=Button(f2,padx=16,pady=16,bd=8,text="-", bg="dim gray", command=lambda: btnClick("-")).grid(row=3,column=3)
btn3=Button(f2,padx=16,pady=16,bd=8,text="3",font=("arial", 20, "bold") ,bg="dim gray", command=lambda: btnClick(3)).grid(row=4,column=0)
btn2=Button(f2,padx=16,pady=16,bd=8,text="2",font=("arial", 20, "bold"), bg="dim gray", command=lambda: btnClick(2)).grid(row=4,column=1)
btn1=Button(f2,padx=16,pady=16,bd=8,text="1",font=("arial", 20, "bold") ,bg="dim gray", command=lambda: btnClick(1)).grid(row=4,column=2)
Multiply=Button(f2,padx=16,pady=16,bd=8,text="*", bg="dim gray", command=lambda: btnClick("X")).grid(row=4,column=3)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
btn0=Button(f2,padx=16,pady=16,bd=8,bg="dim gray",text="0",fg="black",font=("arial", 20, "bold"), command=lambda: btnClick(0)).grid(row=5,column=0)
btnClear=Button(f2,pady=16,padx=16,bd=8, fg="black",font=("arial", 20, "bold"),text="C",bg="dim gray", command=btnClearDisplay).grid(row=5,column=1)
btnEquals=Button(f2,padx=16,pady=16,fg="black",bd=8,text="=",bg="dim gray", font=("arial", 20,"bold"), command=btnEqualsInput).grid(row=5,column=2)
#btn2=Button(f2,padx=16,pady=16,bd=8,fg="black",text="2",bg="dim gray", command=lambda: btnClick(2)).grid(row=5,column=3)
division=Button(f2,padx=16,pady=16,bd=8,fg="black", text="/", bg="dim gray", command=lambda: btnClick("/")).grid(row=5,column=3)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
rand = StringVar()
#lblReference = Label(f1,font=("arial", 16,"bold"), text="Reference",bd=16,fg="red",bg="red",anchor="w",relief=RIDGE).grid(row=0,column=0)
#txtReference=Entry(f1,font=("arial", 16, "bold"), textvariable=rand, bd=10,insertwidth=4,bg="red",fg="white", justify = "right").grid(row=0,column=1)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
lblReference = Label(f1,font=("arial", 16,"bold"), text="Reference",bd=16,fg="white",bg="green",anchor="w", relief=RIDGE)
lblReference.grid(row=0,column=0)
b=Entry(f1,font=("arial", 16, "bold"), textvariable=rand, bd=10,insertwidth=4,fg="white",bg="black", justify = "left")
b.grid(row=0,column=1)
#img = "/root/Desktop/Desktop/python/projects/prj1_Botik/1.jpg"
#root.ima = Image.open(img)
#Label (root,bg="white",width=120,height=120, image=ima).pack()
bill_in = StringVar()
bill_out = StringVar()
shrting=Label(f1,font=("arial", 20, "bold"), text="Shirting:",bg="powder blue", fg="black",anchor="w",relief=GROOVE).grid(row=1,column=0)
shirts=Entry(f1,font=("arial", 16, "italic"), bd=10, textvariable=shirt, insertwidth=1,bg="black",fg="white", justify="left").grid(row=2,column=0)
owner=Button(root,padx=16,pady=16, font=("arial",12, "bold"),text="info", bd=8,bg="black",command=ano_win1,fg="white",relief=RAISED).pack(side=LEFT)
yes=Button(root,padx=16,pady=16,font=("arial",12, "bold"),text="Done",bd=8,bg="black", fg="white", command=_calculation(),relief=RAISED).pack(side=RIGHT)
panting=Label(f1,font=("arial",20, "bold"), text="pant_mm:", bg="powder blue",fg="black",anchor="w",relief=GROOVE).grid(row=1,column=1)
pantx=Entry(f1,font=("arial",16, "bold"), textvariable=pant, insertwidth=1, bd=10,bg="black",fg="white", justify="left").grid(row=2,column=1)
sales=Label(f1,font=("arial",16, "bold"), text="sales_total:",bg="powder blue",fg="black",anchor="w",bd=8,relief=GROOVE).grid(row=1,column=2)
salex=Entry(f1,font=("arial",16, "bold"),bg="black",fg="white",textvariable=sale,insertwidth=1,bd=10,justify="left").grid(row=2,column=2)
buying=Label(f1,font=("arial",16, "bold"), text="buying_something: ",bg="powder blue",fg="black", anchor="e", relief=GROOVE).grid(row=3,column=0)
buyx=Entry(f1,font=("arial", 16, "bold"), textvariable=buy, insertwidth=1, bd=10,bg="black", fg="white", justify="left").grid(row=4,column=0)
Bank_Total=Label(f1,font=("arial",16,"bold"),text="Bank_Deposite: ", bg="powder blue", fg="black", anchor="e",relief=GROOVE).grid(row=3, column=1)
depositex=Entry(f1,font=("arial",16,"bold"),bd=10, textvariable=deposite, bg="black", fg="white", justify="left").grid(row=4, column=1)
lblBankwith=Label(f1, font=("arial", 16, "bold"),fg="black",bg="powder blue",text="Bank_Withdraw", anchor="e",relief=GROOVE).grid(row=3,column=2)
withdrawx=Entry(f1,font=("arial",16, "bold"),bd=10, fg="white",bg="black", textvariable=withdraw, insertwidth=1).grid(row=4,column=2)
coating=Label(f1, font=("arial", 16, "bold"),text="coat_mm:", bg="powder blue",fg="black",anchor="e").grid(row=5,column=0)
coatx=Entry(f1, font=("arial", 16, "bold"), bg="black", fg="white",
textvariable=coat, insertwidth=1, justify="left",bd=10).grid(row=6,column=0)
lablsari=Label(f1,font=("arial", 16, "bold"), bg="powder blue",text="sari mm:", fg="black",anchor="e",relief=GROOVE).grid(row=5,column=1)
sarix=Entry(f1, font=("arial", 16, "bold"), bg="black",bd=10, fg="white",textvariable=sari, insertwidth=1).grid(row=6,column=1)
buying=Label(f1,font=("arial", 16, "bold"), bg="powder blue",text="buy_info:",fg="black",anchor="e",relief=GROOVE).grid(row=7,column=0)
buyx=Entry(f1,font=("arial",16, "bold"),bd=8, fg="white",bg="black",textvariable=buy,insertwidth=1).grid(row=8,column=0)
outgoing =Label(f1, font=("arial", 16, "bold"), bg="powder blue", text="outgoing:", fg="black",anchor="e",relief=GROOVE).grid(row=7,column=1)
outx=Entry(f1,font=("arial", 16, "bold"),textvariable=out, bd=8,fg="white",bg="black",insertwidth=1).grid(row=8,column=1)
ordering=Label(f1,font=("arial",16,"bold"),bg="powder blue",text="order_info:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=0)
orderx=Entry(f1,font=("arial",16,"bold"),insertwidth=1, textvariable=order,bd=8,fg="white",bg="black").grid(row=10,column=0)
lblcustomer=Label(f1,font=("arial",16,"bold"),bg="powder blue",text="cus_name:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=1)
no=Entry(f1,font=("arial",16, "bold"),bd=8,bg="black",fg="white",insertwidth=1, textvariable=cus_name).grid(row=10,column=1)
lblmonthly=Label(f1, font=("arial",16,"bold"),bg="powder blue",text="monthly:",fg="black",anchor="e",relief=GROOVE).grid(row=5,column=2)
monthly=StringVar()
monthx=Entry(f1,font=("arial",16,"bold"),show="blank",bg="black",textvariable=monthly,insertwidth=1,fg="white",bd=10).grid(row=6,column=2)
lbltotal=Label(f1, font=("arial", 16, "bold"),bg="powder blue",text="Total:",fg="black").grid(row=7,column=2)
totalx=Entry(f1, font=("arial", 16, "bold"),bg="black",textvariable=total,fg="white",insertwidth=1,bd=10).grid(row=8,column=2)
lblemployee = Label(f1,font=("arial", 16, "bold"),bg="powder blue",text="employee name:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=2)
employx= Entry(f1,font=("arial", 16,"bold"),textvariable=employee,insertwidth=1,bg="black",fg="white",bd=10).grid(row=10,column=2)
###############################database for the project######################
'''def __database():
db = TinyDB("/records.json")
#print(monthly)
#print(b)
#fuck = c.get()
a = order_bef.get()
b = stock_full.get()
c = shrting.get()
d = pant.get()
e = sari.get()
f = order_info.get()
g = delivery_report.get()
h = daily_info.get()
i = sales.get()
j = buy.get()
k = total_bank.get()
l = bank_deposite.get()
m = bank_withdraw.get()
n = due_amount.get()
o = order_info.get()
p = daily_cash.get()
q = cus_name.get()
r = cus_no.get()
s = employee.get()
files = {"a": "", "b": "", "c": "", "d": "", "e": "", "f": "", "g": "", "h": "", "i": "", "j": ""
, "k": "", "l": "", "m": "", "n": "", "o": "", "p": "", "q": "", "r": "", "s": ""}
db.insert({"total": a }),
db.insert({"regrds":"reference"}),
db.insert({"day_income":"billion"}),
db.insert({"day_outgoing":"billout"}),
db.insert({"bankdeposit":"bankdepo"}),
db.insert({"full_stock":"stock"}),
db.insert({"shirt_mm":"shirt"}),
db.insert({"bankwithdraw":"bankwith"}),
db.insert({"pantmm":"pant"}),
db.insert({"sarimm":"sari"}),
db.insert({"orderday":"orderinfo"}),
db.insert({"salling":"sales"}),
db.insert({"buying":"buy"}),
db.insert({"customern":"customer"}),
db.insert({"monthly_info":"monthly"}),
db.insert({"totaldy":"total"}),
db.insert({"employeid":"employee"})
for db in range(1):
print(db)
files = list(files)
file = open("/file.txt", "wb")
da = ""
for data in files:
if len(data) != 0:
print("this is are the files written in python\\n check the file.txt for debug ")
da += data
print(data)
da = int(da)
file.write(da)
try:
file = open("/records.txt", "r")
except:
print("creating the file from script {}".format(__file__))
file = open("/records.txt","w")
finally:
pass
check = os.path.isfile("/records.txt")
if check:
for item in db:
data = open("/records.txt","wb")
#with open("/records.txt","wb") as file:
#pickle.dump(item, data)
#file.close()
#file1 = pickle.load(file)
if len(item) == len(file1):
break
if item != file:
#item = str(item)
file.write("%s" %(item))
time.sleep(1)
print("done writing to the file")
#for item in db:
with open("/records.txt", "rb") as file:
reading = file1
if len(reading) != None:
print("its printed")
print(reading)
file.close()
#db.insert({"name":"<NAME>"})
name = Query()
#db(name.type == "changed")
d = datetime.now()
month = str(d.month)
day = str(d.day)
year = str(d.year)
hour = str(d.hour)
minute = str(d.minute)
second = str(d.second)
between = str(":")'''
'''def __time(infos):
time = datetime.now()
day = str(time.day)
month = str(time.month)
hour = str(time.hour)
second = str(time.second)
year = str(time.year)
minute = str(time.minute)
#assuming the infos as the order taken that will be notified before the
#60 hours
#changing all the formats to the seconds that will be easy for the #calculation
#first calculating seconds in one day that will ease all the further operations
daysec = (24*60) * 60 * 60
###
##this is will be easy now
yearSec = daysec * 365
month = daysec * 30
daySec = daysec
hourSec = 60 * 60 * 60
minuteSec = 60 * 60
files = {"a":"", "b":"","c":"","d":"","e":"","f":"","g":"","h":"","i":"","j":""
,"k":"","l":"","m":"","n":"","o":"","p":"","q":"","r":"","s":""}'''
#files = list(files)
'''for data in files:
if len(data) != 0:
print(data)'''
#lenght = len(db)
##this will show the recorded bill numbers
def bill_in():
##assuming the variable as bill number .get var
bill = bill_in.get()
billo = bill_out.get()
bills = tinydb.TinyDb("/bills.json")
while bill or billo != None:
bills.insert({"billInput": bill, "billOutput": billo})
win = Toplevel()
win.title("bills")
winF = Frame(win, bg="black",relief=SUNKEN).pack()
winE = Entry(winF, insertwidth=10,insertheight=10,fg="white",bg="black",textvariable=bills).pack()
win.mainloop()
#l
# command=bill_in).pack(anchor=NE)
root.mainloop()
#__database()
#add1=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
#text="+",bg="powder blue", command=lambda: btnClick("+")).grid(row=3,column=6)
#btn10=Button(f2,padx=16,padx=16, fg="blue", font("arial",5,"bold"),
# text="rupen",bg="powder blue", command=rupen).grid(row=3,column=5)
#def function():
# pass():
# pass main():
# root.mainloop()
#for the revies of the follow in the sorry of the same of the tkinter in the main function of the sollow
#main()
| from tkinter import *
import random
import time
from PIL import Image
from datetime import datetime
from tinydb import *
import os
import pickle
#from database1 import *
from random import randint
root = Tk()
root.geometry("1600x800+0+0")
root.title("Suman_dai_ko_DHOKAN")
root.configure(bg="goldenrod4")
text_Input = StringVar()
operator =""
yes =""
no=""
Tops = Frame(root, width=1600 ,height=50,bg="goldenrod4", relief=RIDGE)
Tops.pack(side=TOP)
f1 = Frame(root, width = 800 ,height=500,bg="goldenrod4",relief=SUNKEN)
f1.pack(side=LEFT)
f2 = Frame(root, width = 300,height = 700,bg="dark slate blue",relief=SUNKEN)
f2.pack(side=RIGHT)
#f3= Frame(root,width=1600,height=300,fg="blue", bg="powder blue", relief=SUNKEN).pack(side=Bottom)
#==========================================================Time=======================================
localtime=time.asctime(time.localtime(time.time()))
#datetime=Label(Tops,font("arial",20,"bold"),text=nowTime,bd=10 ,bg="black", #fg="white", anchor="w").pack()
#====================================debugged========================
shirt = IntVar()
pant = IntVar()
sale = IntVar()
buy = IntVar()
deposite = IntVar()
withdraw = IntVar()
coat = IntVar()
order = IntVar()
total = IntVar()
out = IntVar()
before = IntVar() #order before the 60
stock = IntVar()
delivery = IntVar()
#########################main_gate######################
def _calculation():
shirt_mm = shirt.get()
pant_mm = pant.get()
sale_mm = sale.get()
buy_mm = buy.get()
deposite_mm = deposite.get()
withdraw_mm = withdraw.get()
coat_mm = coat.get()
order_mm = order.get()
total_mm = total.get()
time = datetime.now()
day = time.day
month = time.month
hour = time.hour
second = time.second
year = time.year
minute = time.minute
#setting the filename using the loop
#file = open("1{}".format())
'''for i in range(5):
if os.path.isfile(i):
pass
else:
file = open("{}.txt".format(i+1), "w+")
created with name {}".format(file))'''
#creating the filenames with append =1 if the name already existed
file_name = "r.txt"
if os.path.isfile(file_name):
expand = 1
while True:
expand += 1
new_file_name = file_name.split(".txt")[0] + str(expand) + ".txt"
if os.path.isfile(new_file_name): #if the newfilename exists
print("using the file {}".format(new_file_name))
#file = open("{}".format(new_file_name), "w+")
continue
else:
file_name = open(new_file_name, "w+")
print("creating the file {}".format(file_name))
#file = open("{}".format(file_name), "w+")
break
file_name = "fil.txt"
file = open("{}".format(file_name),"w+")
totalx = shirt_mm+pant_mm+sale_mm+buy_mm+deposite_mm+withdraw_mm+coat_mm+order_mm
file.write("Total:-{}".format(totalx))
file.write("shirt:-{}".format(shirt_mm))
file.write("pant_mm:-{}".format(pant_mm))
file.write("sale_mm:-{}".format(sale_mm))
file.write("buy_mm:-{}".format(buy_mm))
file.write("deposite_mm:-{}".format(deposite_mm))
file.write("withdraw_mm:-{}".format(withdraw_mm))
file.write("coat:-{}".format(coat_mm))
file.write("order:-{}".format(order_mm))
reading = file.readlines()
file.close()
#after wards set the total from here total.set
#++++++++++++++++++++++++++++++Varibales_inset+++++++++++++++++++++++++++++++++
order_bef = IntVar()
stock_full = IntVar()
shrting = IntVar()
pant = IntVar()
sari = IntVar()
order_info = IntVar()
delivery_report = IntVar()
daily_info = IntVar()
sales = IntVar()
buy = IntVar()
total_bank = IntVar()
bank_deposite = IntVar()
bank_withdraw = IntVar()
due_amount = IntVar()
order_info = IntVar()
daily_cash = IntVar()
cus_name = IntVar()
cus_no = IntVar()
employee = IntVar()
###############################class of algoriths#########################
class __main():
def __init__(self):
self.order = order
def __order_info(self):
self.now = datetime()
self.hour = now.hour
self.minute = now.minute
self.second = now.second
self.year = now.year
self.month = now.month
self.day = now.day
self.record_time = record_time
if self.hour == self.record_timeD:
print("the time for the product is actually %s left" %(self.hour-self.record_timeD))
#++++++++++++++++++++++++++++++++++++++++tinydb example++++++++++++++++++++++
#db = TinyDB("/databse/d4ta.json")
#db.insert({"cus_number":"98938232", "cus_name":"rupen"})
#def no_y():
# lis = db.all()
################Info===============
lblInfo = Label(Tops, font=("arial",60, "italic bold"),text="Botique Management Systewm",fg="white", bg="dark slate blue", bd=10, anchor="w", relief=RIDGE)
lblInfo.pack()
lblInfo = Label(Tops, font=("arial",30, "bold"),text=localtime,fg="white",bg="black", bd=10, anchor="w", relief=RIDGE)
lblInfo.pack()
#===========================================================Calculator==================================
"""def current_dir():
import os
import sys
DIR = os.getcwd()
print(DIR)
lblInfo = Label(Tops, font=("arial",60, "italic"),text=current_dir,fg="black",bg="powder blue",bd=10, anchor="W")
lblInfo.pack()
#DIR = dir
#return dir
"""
#randomBtn=Button(f1,pady=16,padx=16,bd=8,bg="powder blue", text="C_dir", command=lambda: current_dir(dir)).pack(side=TOP)
def btnClick(numbers):
global operator
operator = operator + str(numbers)
text_Input.set(operator)
def btnClearDisplay():
global operator
operator=""
text_Input.set("")
def btnEqualsInput():
global operator
sumup=str(eval(operator))
text_Input.set(sumup)
operator=""
def bill_entry():
global bill_in
global bill_out
bill_out = ""
bill_in = ""
def rupen():
global rupen
rupen = rupen
ronley = StringVar()
'''def malware_activate():
global cmd_active
if "rupen" in cmd_active:
if "rupen" in cmd_active[1]:
if "ronley" in cmd_active[2]:'''
#==============================another windows about me=====================
def ano_win1():
win1 = Toplevel()
#this is going to be the window in which there is nothing in the function
#of the system on the support in teh main loop
#there is no limit in the system of teh
win1.title("this is the owner window:")
win1.geometry("1600x800+0+0")
#win1.configure(bg="silver")
my_info = Frame(win1, width=600, height=700,bg="RoyalBlue4",relief=GROOVE)
my_info.pack(side=LEFT)
customer_info = Frame(win1, width=600, height=500,bg="RoyalBlue4", relief=GROOVE)
customer_info.pack(side=RIGHT)
others_info = Frame(win1, width=100, height=100,bg="RoyalBlue4",relief=GROOVE)
others_info.pack(side=BOTTOM)
all_info = Frame(win1, width=50, height=50,bg="RoyalBlue4",relief=RAISED)
all_info.pack()
lblname=Label(my_info,font=("arial",20,"italic"),text="<NAME>",bg="powder blue", fg="green", bd=10, relief=SUNKEN).pack(side=TOP)
lblpro=Label(my_info,font=("arial", 20,"bold"),text="Software Engineer",bg="powder blue", fg="green",bd=10, relief=RAISED).pack()
ima = StringVar()
imageloc=Entry(win1,font=("arial",16,"italic"),bg="black",fg="white",bd=5,insertwidth=1,relief=GROOVE,textvariable=ima).pack()
imageButt=Button(win1,font=("arial",20, "bold"),bd=5,bg="white",fg="white",command= lambda: _image(image)).pack()
'''def _image(image):
image = image.set(imageloc)
return image
#image = Image.open("/root/Desktop/Desktop/anonymous/5.png")
imae = Label(win1,font=("arial", 20,"italic"),width=300, height=168,bg="black",fg="white", text=image,relief=FLAT).pack()
win1.mainloop()'''
#=============================getting all the infos ========================
def _price_inputs():
win2 = Toplevel()
win2.title("This is going to the section for the price inputs")
win2.geometry("1600x800")
framex = Frame(win2,width=1600,bg="RoyalBlue4",height=100,relief=GROOVE).pack(side=TOP)
frame1 = Frame(win2,width=775, height=750,bg="white", relief=SUNKEN).pack()
frame2 = Frame(win2, width=775,height=750,bg="black", relief=FLAT).pack()
#==++++===========================title=============================
llb1 = Label(framex,font=("arial", 20,"italic"),bg="powder blue",fg="green",text="INPUT THE PRICES",relief=GROOVE).pack()
win2.mainloop()
###########################sending emails############################
def __send_email():
'''import smtplib
gmail = smtplib.SMTP("smtp.gmail.com", 587)
gmail.starttls()
_file = open("/root/Desktop/Desktop/python/")
gmail.login("username", "password")
msg = "YOUR MESSAGE"
gmail.sendmail("your email adress", "the")
gmail.quit()'''
dialog = Tk()
dialog.title("Send emails")
dialog.geometry("800x800")
dframe = Frame(dialog,width=800,height=800,bg="white",relief=SUNKEN).pack()
email = StringVar()
password = StringVar()
semail = StringVar()
spassword = StringVar()
label = Label(dframe, font=("arial",16, "bold"), fg="white", bg="black", text="your_email").pack(side=LEFT)
entry1 = Entry(dframe, font=("arial",16,"bold"), fg="white",bg="black", textvariable=email,insertwidth=1,bd=5).pack(side=RIGHT)
label1 = Label(dframe, font=("arial",16, "bold"), fg="white", bg="black", text="password", relief=SUNKEN).pack()
entry2 = Entry(dframe,font=("arial", 16 ,"bold"),textvariable=password, insertwidth=1,bd=5).pack(side=RIGHT)
Label2 =Label(dframe,font=("arial",16, "bold"),fg="white",bg="black", text="sender_email",relief=SUNKEN).pack(side=LEFT)
entry2 = Entry(dframe,font=("arial",16, "bold"),bd=5,fg="white",bg="black",textvariable=semail,insertwidth=1).pack(side=LEFT)
label3 = Label(dframe,font=("arial",16,"bold"),fg="white",bg="black",text="sender_password", relief=SUNKEN).pack(side=LEFT)
entry3= Entry(dframe,font=("arial",16,"bold"),fg="white",textvariable=spassword,insertwidth=1,relief=SUNKEN).pack()
dialog.mainloop()
#btnEmail = Button(root,font=("arial", 16, "bold"), bg="black",fg="white",text="email",command=lambda: __send_email(),relief=GROOVE).pack()
#================================next section===========================
fix = Button(root, bd=10,bg="black",fg="white",command=_price_inputs,relief=GROOVE).pack(side=BOTTOM)
btnru = Button(root, font=("arial 20 bold"),bd=20, bg="black",fg="white",text="click",command=ano_win1,relief=GROOVE).pack(side=BOTTOM)
#fucking mazing yr coding
def column(col):
for coll in col:
call=cal+1
return call
#def yes_y():
# rupe = Toplevel(root)
# rupe.title("this is second window")
# return
#def no_y():
#nos = Toplevel(root)
#nos.title("this is nos window")
#return
a = Entry(f2,font=("arial", 20,"bold"), textvariable=text_Input, bd=30, insertwidth=4,
bg="dark slate blue",fg="white", justify="right").grid(columnspan=4)
btn7=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="7",bg="dim gray", command=lambda: btnClick(7)).grid(row=2,column=0)
btn8=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="8",bg="dim gray", command=lambda: btnClick(8)).grid(row=2,column=1)
btn9=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="9",bg="dim gray", command=lambda: btnClick(9)).grid(row=2,column=2)
#!!!!!!!!!!!!!!!!!!!!!!additions!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Addition=Button(f2,padx=16,pady=16,bd=8,text="+",fg="black",bg="dim gray", command=lambda: btnClick("+")).grid(row=2,column=3)
btn6=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="4", bg="dim gray", command=lambda: btnClick(4)).grid(row=3,column=0)
btn5=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="5", bg="dim gray", command=lambda: btnClick(5)).grid(row=3,column=1)
btn4=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="6",bg="dim gray", command=lambda: btnClick(6)).grid(row=3,column=2)
Subtract=Button(f2,padx=16,pady=16,bd=8,text="-", bg="dim gray", command=lambda: btnClick("-")).grid(row=3,column=3)
btn3=Button(f2,padx=16,pady=16,bd=8,text="3",font=("arial", 20, "bold") ,bg="dim gray", command=lambda: btnClick(3)).grid(row=4,column=0)
btn2=Button(f2,padx=16,pady=16,bd=8,text="2",font=("arial", 20, "bold"), bg="dim gray", command=lambda: btnClick(2)).grid(row=4,column=1)
btn1=Button(f2,padx=16,pady=16,bd=8,text="1",font=("arial", 20, "bold") ,bg="dim gray", command=lambda: btnClick(1)).grid(row=4,column=2)
Multiply=Button(f2,padx=16,pady=16,bd=8,text="*", bg="dim gray", command=lambda: btnClick("X")).grid(row=4,column=3)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
btn0=Button(f2,padx=16,pady=16,bd=8,bg="dim gray",text="0",fg="black",font=("arial", 20, "bold"), command=lambda: btnClick(0)).grid(row=5,column=0)
btnClear=Button(f2,pady=16,padx=16,bd=8, fg="black",font=("arial", 20, "bold"),text="C",bg="dim gray", command=btnClearDisplay).grid(row=5,column=1)
btnEquals=Button(f2,padx=16,pady=16,fg="black",bd=8,text="=",bg="dim gray", font=("arial", 20,"bold"), command=btnEqualsInput).grid(row=5,column=2)
#btn2=Button(f2,padx=16,pady=16,bd=8,fg="black",text="2",bg="dim gray", command=lambda: btnClick(2)).grid(row=5,column=3)
division=Button(f2,padx=16,pady=16,bd=8,fg="black", text="/", bg="dim gray", command=lambda: btnClick("/")).grid(row=5,column=3)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
rand = StringVar()
#lblReference = Label(f1,font=("arial", 16,"bold"), text="Reference",bd=16,fg="red",bg="red",anchor="w",relief=RIDGE).grid(row=0,column=0)
#txtReference=Entry(f1,font=("arial", 16, "bold"), textvariable=rand, bd=10,insertwidth=4,bg="red",fg="white", justify = "right").grid(row=0,column=1)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
lblReference = Label(f1,font=("arial", 16,"bold"), text="Reference",bd=16,fg="white",bg="green",anchor="w", relief=RIDGE)
lblReference.grid(row=0,column=0)
b=Entry(f1,font=("arial", 16, "bold"), textvariable=rand, bd=10,insertwidth=4,fg="white",bg="black", justify = "left")
b.grid(row=0,column=1)
#img = "/root/Desktop/Desktop/python/projects/prj1_Botik/1.jpg"
#root.ima = Image.open(img)
#Label (root,bg="white",width=120,height=120, image=ima).pack()
bill_in = StringVar()
bill_out = StringVar()
shrting=Label(f1,font=("arial", 20, "bold"), text="Shirting:",bg="powder blue", fg="black",anchor="w",relief=GROOVE).grid(row=1,column=0)
shirts=Entry(f1,font=("arial", 16, "italic"), bd=10, textvariable=shirt, insertwidth=1,bg="black",fg="white", justify="left").grid(row=2,column=0)
owner=Button(root,padx=16,pady=16, font=("arial",12, "bold"),text="info", bd=8,bg="black",command=ano_win1,fg="white",relief=RAISED).pack(side=LEFT)
yes=Button(root,padx=16,pady=16,font=("arial",12, "bold"),text="Done",bd=8,bg="black", fg="white", command=_calculation(),relief=RAISED).pack(side=RIGHT)
panting=Label(f1,font=("arial",20, "bold"), text="pant_mm:", bg="powder blue",fg="black",anchor="w",relief=GROOVE).grid(row=1,column=1)
pantx=Entry(f1,font=("arial",16, "bold"), textvariable=pant, insertwidth=1, bd=10,bg="black",fg="white", justify="left").grid(row=2,column=1)
sales=Label(f1,font=("arial",16, "bold"), text="sales_total:",bg="powder blue",fg="black",anchor="w",bd=8,relief=GROOVE).grid(row=1,column=2)
salex=Entry(f1,font=("arial",16, "bold"),bg="black",fg="white",textvariable=sale,insertwidth=1,bd=10,justify="left").grid(row=2,column=2)
buying=Label(f1,font=("arial",16, "bold"), text="buying_something: ",bg="powder blue",fg="black", anchor="e", relief=GROOVE).grid(row=3,column=0)
buyx=Entry(f1,font=("arial", 16, "bold"), textvariable=buy, insertwidth=1, bd=10,bg="black", fg="white", justify="left").grid(row=4,column=0)
Bank_Total=Label(f1,font=("arial",16,"bold"),text="Bank_Deposite: ", bg="powder blue", fg="black", anchor="e",relief=GROOVE).grid(row=3, column=1)
depositex=Entry(f1,font=("arial",16,"bold"),bd=10, textvariable=deposite, bg="black", fg="white", justify="left").grid(row=4, column=1)
lblBankwith=Label(f1, font=("arial", 16, "bold"),fg="black",bg="powder blue",text="Bank_Withdraw", anchor="e",relief=GROOVE).grid(row=3,column=2)
withdrawx=Entry(f1,font=("arial",16, "bold"),bd=10, fg="white",bg="black", textvariable=withdraw, insertwidth=1).grid(row=4,column=2)
coating=Label(f1, font=("arial", 16, "bold"),text="coat_mm:", bg="powder blue",fg="black",anchor="e").grid(row=5,column=0)
coatx=Entry(f1, font=("arial", 16, "bold"), bg="black", fg="white",
textvariable=coat, insertwidth=1, justify="left",bd=10).grid(row=6,column=0)
lablsari=Label(f1,font=("arial", 16, "bold"), bg="powder blue",text="sari mm:", fg="black",anchor="e",relief=GROOVE).grid(row=5,column=1)
sarix=Entry(f1, font=("arial", 16, "bold"), bg="black",bd=10, fg="white",textvariable=sari, insertwidth=1).grid(row=6,column=1)
buying=Label(f1,font=("arial", 16, "bold"), bg="powder blue",text="buy_info:",fg="black",anchor="e",relief=GROOVE).grid(row=7,column=0)
buyx=Entry(f1,font=("arial",16, "bold"),bd=8, fg="white",bg="black",textvariable=buy,insertwidth=1).grid(row=8,column=0)
outgoing =Label(f1, font=("arial", 16, "bold"), bg="powder blue", text="outgoing:", fg="black",anchor="e",relief=GROOVE).grid(row=7,column=1)
outx=Entry(f1,font=("arial", 16, "bold"),textvariable=out, bd=8,fg="white",bg="black",insertwidth=1).grid(row=8,column=1)
ordering=Label(f1,font=("arial",16,"bold"),bg="powder blue",text="order_info:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=0)
orderx=Entry(f1,font=("arial",16,"bold"),insertwidth=1, textvariable=order,bd=8,fg="white",bg="black").grid(row=10,column=0)
lblcustomer=Label(f1,font=("arial",16,"bold"),bg="powder blue",text="cus_name:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=1)
no=Entry(f1,font=("arial",16, "bold"),bd=8,bg="black",fg="white",insertwidth=1, textvariable=cus_name).grid(row=10,column=1)
lblmonthly=Label(f1, font=("arial",16,"bold"),bg="powder blue",text="monthly:",fg="black",anchor="e",relief=GROOVE).grid(row=5,column=2)
monthly=StringVar()
monthx=Entry(f1,font=("arial",16,"bold"),show="blank",bg="black",textvariable=monthly,insertwidth=1,fg="white",bd=10).grid(row=6,column=2)
lbltotal=Label(f1, font=("arial", 16, "bold"),bg="powder blue",text="Total:",fg="black").grid(row=7,column=2)
totalx=Entry(f1, font=("arial", 16, "bold"),bg="black",textvariable=total,fg="white",insertwidth=1,bd=10).grid(row=8,column=2)
lblemployee = Label(f1,font=("arial", 16, "bold"),bg="powder blue",text="employee name:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=2)
employx= Entry(f1,font=("arial", 16,"bold"),textvariable=employee,insertwidth=1,bg="black",fg="white",bd=10).grid(row=10,column=2)
###############################database for the project######################
'''def __database():
db = TinyDB("/records.json")
#print(monthly)
#print(b)
#fuck = c.get()
a = order_bef.get()
b = stock_full.get()
c = shrting.get()
d = pant.get()
e = sari.get()
f = order_info.get()
g = delivery_report.get()
h = daily_info.get()
i = sales.get()
j = buy.get()
k = total_bank.get()
l = bank_deposite.get()
m = bank_withdraw.get()
n = due_amount.get()
o = order_info.get()
p = daily_cash.get()
q = cus_name.get()
r = cus_no.get()
s = employee.get()
files = {"a": "", "b": "", "c": "", "d": "", "e": "", "f": "", "g": "", "h": "", "i": "", "j": ""
, "k": "", "l": "", "m": "", "n": "", "o": "", "p": "", "q": "", "r": "", "s": ""}
db.insert({"total": a }),
db.insert({"regrds":"reference"}),
db.insert({"day_income":"billion"}),
db.insert({"day_outgoing":"billout"}),
db.insert({"bankdeposit":"bankdepo"}),
db.insert({"full_stock":"stock"}),
db.insert({"shirt_mm":"shirt"}),
db.insert({"bankwithdraw":"bankwith"}),
db.insert({"pantmm":"pant"}),
db.insert({"sarimm":"sari"}),
db.insert({"orderday":"orderinfo"}),
db.insert({"salling":"sales"}),
db.insert({"buying":"buy"}),
db.insert({"customern":"customer"}),
db.insert({"monthly_info":"monthly"}),
db.insert({"totaldy":"total"}),
db.insert({"employeid":"employee"})
for db in range(1):
print(db)
files = list(files)
file = open("/file.txt", "wb")
da = ""
for data in files:
if len(data) != 0:
print("this is are the files written in python\\n check the file.txt for debug ")
da += data
print(data)
da = int(da)
file.write(da)
try:
file = open("/records.txt", "r")
except:
print("creating the file from script {}".format(__file__))
file = open("/records.txt","w")
finally:
pass
check = os.path.isfile("/records.txt")
if check:
for item in db:
data = open("/records.txt","wb")
#with open("/records.txt","wb") as file:
#pickle.dump(item, data)
#file.close()
#file1 = pickle.load(file)
if len(item) == len(file1):
break
if item != file:
#item = str(item)
file.write("%s" %(item))
time.sleep(1)
print("done writing to the file")
#for item in db:
with open("/records.txt", "rb") as file:
reading = file1
if len(reading) != None:
print("its printed")
print(reading)
file.close()
#db.insert({"name":"<NAME>"})
name = Query()
#db(name.type == "changed")
d = datetime.now()
month = str(d.month)
day = str(d.day)
year = str(d.year)
hour = str(d.hour)
minute = str(d.minute)
second = str(d.second)
between = str(":")'''
'''def __time(infos):
time = datetime.now()
day = str(time.day)
month = str(time.month)
hour = str(time.hour)
second = str(time.second)
year = str(time.year)
minute = str(time.minute)
#assuming the infos as the order taken that will be notified before the
#60 hours
#changing all the formats to the seconds that will be easy for the #calculation
#first calculating seconds in one day that will ease all the further operations
daysec = (24*60) * 60 * 60
###
##this is will be easy now
yearSec = daysec * 365
month = daysec * 30
daySec = daysec
hourSec = 60 * 60 * 60
minuteSec = 60 * 60
files = {"a":"", "b":"","c":"","d":"","e":"","f":"","g":"","h":"","i":"","j":""
,"k":"","l":"","m":"","n":"","o":"","p":"","q":"","r":"","s":""}'''
#files = list(files)
'''for data in files:
if len(data) != 0:
print(data)'''
#lenght = len(db)
##this will show the recorded bill numbers
def bill_in():
##assuming the variable as bill number .get var
bill = bill_in.get()
billo = bill_out.get()
bills = tinydb.TinyDb("/bills.json")
while bill or billo != None:
bills.insert({"billInput": bill, "billOutput": billo})
win = Toplevel()
win.title("bills")
winF = Frame(win, bg="black",relief=SUNKEN).pack()
winE = Entry(winF, insertwidth=10,insertheight=10,fg="white",bg="black",textvariable=bills).pack()
win.mainloop()
#l
# command=bill_in).pack(anchor=NE)
root.mainloop()
#__database()
#add1=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
#text="+",bg="powder blue", command=lambda: btnClick("+")).grid(row=3,column=6)
#btn10=Button(f2,padx=16,padx=16, fg="blue", font("arial",5,"bold"),
# text="rupen",bg="powder blue", command=rupen).grid(row=3,column=5)
#def function():
# pass():
# pass main():
# root.mainloop()
#for the revies of the follow in the sorry of the same of the tkinter in the main function of the sollow
#main() | en | 0.488896 | #from database1 import * #f3= Frame(root,width=1600,height=300,fg="blue", bg="powder blue", relief=SUNKEN).pack(side=Bottom) #==========================================================Time======================================= #datetime=Label(Tops,font("arial",20,"bold"),text=nowTime,bd=10 ,bg="black", #fg="white", anchor="w").pack() #====================================debugged======================== #order before the 60 #########################main_gate###################### #setting the filename using the loop #file = open("1{}".format()) for i in range(5): if os.path.isfile(i): pass else: file = open("{}.txt".format(i+1), "w+") created with name {}".format(file)) #creating the filenames with append =1 if the name already existed #if the newfilename exists #file = open("{}".format(new_file_name), "w+") #file = open("{}".format(file_name), "w+") #after wards set the total from here total.set #++++++++++++++++++++++++++++++Varibales_inset+++++++++++++++++++++++++++++++++ ###############################class of algoriths######################### #++++++++++++++++++++++++++++++++++++++++tinydb example++++++++++++++++++++++ #db = TinyDB("/databse/d4ta.json") #db.insert({"cus_number":"98938232", "cus_name":"rupen"}) #def no_y(): # lis = db.all() ################Info=============== #===========================================================Calculator================================== def current_dir(): import os import sys DIR = os.getcwd() print(DIR) lblInfo = Label(Tops, font=("arial",60, "italic"),text=current_dir,fg="black",bg="powder blue",bd=10, anchor="W") lblInfo.pack() #DIR = dir #return dir #randomBtn=Button(f1,pady=16,padx=16,bd=8,bg="powder blue", text="C_dir", command=lambda: current_dir(dir)).pack(side=TOP) def malware_activate(): global cmd_active if "rupen" in cmd_active: if "rupen" in cmd_active[1]: if "ronley" in cmd_active[2]: #==============================another windows about me===================== #this is going to be the window in which there is nothing in the function #of the system on the support in teh main loop #there is no limit in the system of teh #win1.configure(bg="silver") def _image(image): image = image.set(imageloc) return image #image = Image.open("/root/Desktop/Desktop/anonymous/5.png") imae = Label(win1,font=("arial", 20,"italic"),width=300, height=168,bg="black",fg="white", text=image,relief=FLAT).pack() win1.mainloop() #=============================getting all the infos ======================== #==++++===========================title============================= ###########################sending emails############################ import smtplib gmail = smtplib.SMTP("smtp.gmail.com", 587) gmail.starttls() _file = open("/root/Desktop/Desktop/python/") gmail.login("username", "password") msg = "YOUR MESSAGE" gmail.sendmail("your email adress", "the") gmail.quit() #btnEmail = Button(root,font=("arial", 16, "bold"), bg="black",fg="white",text="email",command=lambda: __send_email(),relief=GROOVE).pack() #================================next section=========================== #fucking mazing yr coding #def yes_y(): # rupe = Toplevel(root) # rupe.title("this is second window") # return #def no_y(): #nos = Toplevel(root) #nos.title("this is nos window") #return #!!!!!!!!!!!!!!!!!!!!!!additions!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #btn2=Button(f2,padx=16,pady=16,bd=8,fg="black",text="2",bg="dim gray", command=lambda: btnClick(2)).grid(row=5,column=3) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #lblReference = Label(f1,font=("arial", 16,"bold"), text="Reference",bd=16,fg="red",bg="red",anchor="w",relief=RIDGE).grid(row=0,column=0) #txtReference=Entry(f1,font=("arial", 16, "bold"), textvariable=rand, bd=10,insertwidth=4,bg="red",fg="white", justify = "right").grid(row=0,column=1) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #img = "/root/Desktop/Desktop/python/projects/prj1_Botik/1.jpg" #root.ima = Image.open(img) #Label (root,bg="white",width=120,height=120, image=ima).pack() ###############################database for the project###################### def __database(): db = TinyDB("/records.json") #print(monthly) #print(b) #fuck = c.get() a = order_bef.get() b = stock_full.get() c = shrting.get() d = pant.get() e = sari.get() f = order_info.get() g = delivery_report.get() h = daily_info.get() i = sales.get() j = buy.get() k = total_bank.get() l = bank_deposite.get() m = bank_withdraw.get() n = due_amount.get() o = order_info.get() p = daily_cash.get() q = cus_name.get() r = cus_no.get() s = employee.get() files = {"a": "", "b": "", "c": "", "d": "", "e": "", "f": "", "g": "", "h": "", "i": "", "j": "" , "k": "", "l": "", "m": "", "n": "", "o": "", "p": "", "q": "", "r": "", "s": ""} db.insert({"total": a }), db.insert({"regrds":"reference"}), db.insert({"day_income":"billion"}), db.insert({"day_outgoing":"billout"}), db.insert({"bankdeposit":"bankdepo"}), db.insert({"full_stock":"stock"}), db.insert({"shirt_mm":"shirt"}), db.insert({"bankwithdraw":"bankwith"}), db.insert({"pantmm":"pant"}), db.insert({"sarimm":"sari"}), db.insert({"orderday":"orderinfo"}), db.insert({"salling":"sales"}), db.insert({"buying":"buy"}), db.insert({"customern":"customer"}), db.insert({"monthly_info":"monthly"}), db.insert({"totaldy":"total"}), db.insert({"employeid":"employee"}) for db in range(1): print(db) files = list(files) file = open("/file.txt", "wb") da = "" for data in files: if len(data) != 0: print("this is are the files written in python\\n check the file.txt for debug ") da += data print(data) da = int(da) file.write(da) try: file = open("/records.txt", "r") except: print("creating the file from script {}".format(__file__)) file = open("/records.txt","w") finally: pass check = os.path.isfile("/records.txt") if check: for item in db: data = open("/records.txt","wb") #with open("/records.txt","wb") as file: #pickle.dump(item, data) #file.close() #file1 = pickle.load(file) if len(item) == len(file1): break if item != file: #item = str(item) file.write("%s" %(item)) time.sleep(1) print("done writing to the file") #for item in db: with open("/records.txt", "rb") as file: reading = file1 if len(reading) != None: print("its printed") print(reading) file.close() #db.insert({"name":"<NAME>"}) name = Query() #db(name.type == "changed") d = datetime.now() month = str(d.month) day = str(d.day) year = str(d.year) hour = str(d.hour) minute = str(d.minute) second = str(d.second) between = str(":") def __time(infos): time = datetime.now() day = str(time.day) month = str(time.month) hour = str(time.hour) second = str(time.second) year = str(time.year) minute = str(time.minute) #assuming the infos as the order taken that will be notified before the #60 hours #changing all the formats to the seconds that will be easy for the #calculation #first calculating seconds in one day that will ease all the further operations daysec = (24*60) * 60 * 60 ### ##this is will be easy now yearSec = daysec * 365 month = daysec * 30 daySec = daysec hourSec = 60 * 60 * 60 minuteSec = 60 * 60 files = {"a":"", "b":"","c":"","d":"","e":"","f":"","g":"","h":"","i":"","j":"" ,"k":"","l":"","m":"","n":"","o":"","p":"","q":"","r":"","s":""} #files = list(files) for data in files: if len(data) != 0: print(data) #lenght = len(db) ##this will show the recorded bill numbers ##assuming the variable as bill number .get var #l # command=bill_in).pack(anchor=NE) #__database() #add1=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"), #text="+",bg="powder blue", command=lambda: btnClick("+")).grid(row=3,column=6) #btn10=Button(f2,padx=16,padx=16, fg="blue", font("arial",5,"bold"), # text="rupen",bg="powder blue", command=rupen).grid(row=3,column=5) #def function(): # pass(): # pass main(): # root.mainloop() #for the revies of the follow in the sorry of the same of the tkinter in the main function of the sollow #main() | 2.681939 | 3 |
mmdet/models/anchor_heads/embedding_nnms_head_v2_limited.py | Lanselott/mmdetection | 0 | 5981 | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, Scale, bias_init_with_prob
from IPython import embed
INF = 1e8
@HEADS.register_module
class EmbeddingNNmsHeadV2limited(nn.Module):
"""
Fully Convolutional One-Stage Object Detection head from [1]_.
The FCOS head does not use anchor boxes. Instead bounding boxes are
predicted at each pixel and a centerness measure is used to supress
low-quality predictions.
References:
.. [1] https://arxiv.org/abs/1904.01355
Example:
>>> self = FCOSHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_score, bbox_pred, centerness = self.forward(feats)
>>> assert len(cls_score) == len(self.scales)
"""
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
embedding_convs_num=2,
strides=(4, 8, 16, 32, 64),
delta=2.0,
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(EmbeddingNNmsHeadV2limited, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.embedding_convs_num = embedding_convs_num
self.strides = strides
self.delta = delta
self.regress_ranges = regress_ranges
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.embedding_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.embedding_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.fcos_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.embedding_cls = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
# Pull and Push loss
self.pull_loss = nn.MSELoss()
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.fcos_cls, std=0.01, bias=bias_cls)
normal_init(self.fcos_reg, std=0.01)
normal_init(self.embedding_cls, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
cls_feat = x
reg_feat = x
embedding_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.fcos_cls(cls_feat)
for embedding_layer in self.embedding_convs:
embedding_feat = embedding_layer(embedding_feat)
embedding_pred = self.embedding_cls(embedding_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(self.fcos_reg(reg_feat)).float().exp()
return cls_score, bbox_pred, embedding_pred
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
embedding_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(bbox_preds) == len(embedding_preds)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, bbox_targets = self.fcos_target(all_level_points, gt_bboxes,
gt_labels)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores and bbox_preds
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_embedding_preds = [
embedding_feat.permute(0, 2, 3, 1).reshape(-1, 1)
for embedding_feat in embedding_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_embedding_preds = torch.cat(flatten_embedding_preds)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
pos_iou_scores = bbox_overlaps(pos_decoded_bbox_preds, pos_decoded_target_preds, is_aligned=True).clamp(min=1e-6)
max_scores, max_inds = flatten_cls_scores.sigmoid().max(1)
pos_embedding_preds = flatten_embedding_preds[pos_inds]
# Instance level op
dist_conf_mask_list = []
# generate instance levels index
instance_counter = torch.zeros(num_pos, device=pos_points.device)
remove = torch.zeros(num_pos, device=pos_points.device)
obj_id = 0
# NOTE: get mask for each obj
for i in range(len(pos_decoded_target_preds)):
if remove[i] == 0:
current_bbox = pos_decoded_target_preds[i]
mask = ((pos_decoded_target_preds == current_bbox).sum(1)==4).nonzero()
instance_counter[mask] = obj_id
remove[mask] = 1
obj_id += 1
instance_counter = instance_counter.int()
obj_ids = torch.bincount(instance_counter).nonzero().int()
for obj_id in obj_ids:
dist_conf_mask_list.append((instance_counter==obj_id).float())
# Opt for each obj
objs_embedding_list = []
obj_embedding_means_list = []
obj_embedding_means_expand_list = []
for dist_conf_mask in dist_conf_mask_list:
obj_mask_inds = dist_conf_mask.nonzero().reshape(-1)
obj_embedding_preds = pos_embedding_preds[obj_mask_inds]
objs_embedding_list.append(obj_embedding_preds)
# mean value
embedding_mean = obj_embedding_preds.sum() / obj_embedding_preds.shape[0]
obj_embedding_means_list.append(embedding_mean)
obj_embedding_means_expand_list.append(torch.zeros_like(obj_embedding_preds).fill_(embedding_mean))
embed()
# pull loss
theta = 1
embedding_expand_means = torch.cat(obj_embedding_means_expand_list)
pull_embedding = torch.cat(objs_embedding_list)
pull_loss = theta * self.pull_loss(pull_embedding, embedding_expand_means)
# push loss
N_samples = len(dist_conf_mask_list)
push_loss = 0
for obj_j_embedding_mean in obj_embedding_means_list:
for obj_k_embedding_mean in obj_embedding_means_list:
if torch.equal(obj_j_embedding_mean, obj_k_embedding_mean):
continue
else:
push_dist = self.delta - torch.abs(obj_k_embedding_mean - obj_j_embedding_mean)
push_loss += torch.max(push_dist, torch.zeros(1, device=push_dist.device))
push_loss = push_loss / N_samples**2
# iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds)
else:
loss_bbox = pos_bbox_preds.sum()
push_loss = pos_bbox_preds.sum()
pull_loss = pos_bbox_preds.sum()
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
push_loss=push_loss,
pull_loss=pull_loss)
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, points in zip(
cls_scores, bbox_preds, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def fcos_target(self, points, gt_bboxes_list, gt_labels_list):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# get labels and bbox_targets of each image
labels_list, bbox_targets_list = multi_apply(
self.fcos_target_single,
gt_bboxes_list,
gt_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges)
# split to per img, per level
num_points = [center.size(0) for center in points]
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets
def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
# if there are still more than one objects for a location,
# we choose the one with minimal area
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0
bbox_targets = bbox_targets[range(num_points), min_area_inds]
return labels, bbox_targets
| import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, Scale, bias_init_with_prob
from IPython import embed
INF = 1e8
@HEADS.register_module
class EmbeddingNNmsHeadV2limited(nn.Module):
"""
Fully Convolutional One-Stage Object Detection head from [1]_.
The FCOS head does not use anchor boxes. Instead bounding boxes are
predicted at each pixel and a centerness measure is used to supress
low-quality predictions.
References:
.. [1] https://arxiv.org/abs/1904.01355
Example:
>>> self = FCOSHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_score, bbox_pred, centerness = self.forward(feats)
>>> assert len(cls_score) == len(self.scales)
"""
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
embedding_convs_num=2,
strides=(4, 8, 16, 32, 64),
delta=2.0,
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(EmbeddingNNmsHeadV2limited, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.embedding_convs_num = embedding_convs_num
self.strides = strides
self.delta = delta
self.regress_ranges = regress_ranges
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.embedding_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.embedding_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.fcos_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.embedding_cls = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
# Pull and Push loss
self.pull_loss = nn.MSELoss()
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.fcos_cls, std=0.01, bias=bias_cls)
normal_init(self.fcos_reg, std=0.01)
normal_init(self.embedding_cls, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
cls_feat = x
reg_feat = x
embedding_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.fcos_cls(cls_feat)
for embedding_layer in self.embedding_convs:
embedding_feat = embedding_layer(embedding_feat)
embedding_pred = self.embedding_cls(embedding_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(self.fcos_reg(reg_feat)).float().exp()
return cls_score, bbox_pred, embedding_pred
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
embedding_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(bbox_preds) == len(embedding_preds)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, bbox_targets = self.fcos_target(all_level_points, gt_bboxes,
gt_labels)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores and bbox_preds
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_embedding_preds = [
embedding_feat.permute(0, 2, 3, 1).reshape(-1, 1)
for embedding_feat in embedding_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_embedding_preds = torch.cat(flatten_embedding_preds)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
pos_iou_scores = bbox_overlaps(pos_decoded_bbox_preds, pos_decoded_target_preds, is_aligned=True).clamp(min=1e-6)
max_scores, max_inds = flatten_cls_scores.sigmoid().max(1)
pos_embedding_preds = flatten_embedding_preds[pos_inds]
# Instance level op
dist_conf_mask_list = []
# generate instance levels index
instance_counter = torch.zeros(num_pos, device=pos_points.device)
remove = torch.zeros(num_pos, device=pos_points.device)
obj_id = 0
# NOTE: get mask for each obj
for i in range(len(pos_decoded_target_preds)):
if remove[i] == 0:
current_bbox = pos_decoded_target_preds[i]
mask = ((pos_decoded_target_preds == current_bbox).sum(1)==4).nonzero()
instance_counter[mask] = obj_id
remove[mask] = 1
obj_id += 1
instance_counter = instance_counter.int()
obj_ids = torch.bincount(instance_counter).nonzero().int()
for obj_id in obj_ids:
dist_conf_mask_list.append((instance_counter==obj_id).float())
# Opt for each obj
objs_embedding_list = []
obj_embedding_means_list = []
obj_embedding_means_expand_list = []
for dist_conf_mask in dist_conf_mask_list:
obj_mask_inds = dist_conf_mask.nonzero().reshape(-1)
obj_embedding_preds = pos_embedding_preds[obj_mask_inds]
objs_embedding_list.append(obj_embedding_preds)
# mean value
embedding_mean = obj_embedding_preds.sum() / obj_embedding_preds.shape[0]
obj_embedding_means_list.append(embedding_mean)
obj_embedding_means_expand_list.append(torch.zeros_like(obj_embedding_preds).fill_(embedding_mean))
embed()
# pull loss
theta = 1
embedding_expand_means = torch.cat(obj_embedding_means_expand_list)
pull_embedding = torch.cat(objs_embedding_list)
pull_loss = theta * self.pull_loss(pull_embedding, embedding_expand_means)
# push loss
N_samples = len(dist_conf_mask_list)
push_loss = 0
for obj_j_embedding_mean in obj_embedding_means_list:
for obj_k_embedding_mean in obj_embedding_means_list:
if torch.equal(obj_j_embedding_mean, obj_k_embedding_mean):
continue
else:
push_dist = self.delta - torch.abs(obj_k_embedding_mean - obj_j_embedding_mean)
push_loss += torch.max(push_dist, torch.zeros(1, device=push_dist.device))
push_loss = push_loss / N_samples**2
# iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds)
else:
loss_bbox = pos_bbox_preds.sum()
push_loss = pos_bbox_preds.sum()
pull_loss = pos_bbox_preds.sum()
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
push_loss=push_loss,
pull_loss=pull_loss)
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, points in zip(
cls_scores, bbox_preds, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def fcos_target(self, points, gt_bboxes_list, gt_labels_list):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# get labels and bbox_targets of each image
labels_list, bbox_targets_list = multi_apply(
self.fcos_target_single,
gt_bboxes_list,
gt_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges)
# split to per img, per level
num_points = [center.size(0) for center in points]
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets
def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
# if there are still more than one objects for a location,
# we choose the one with minimal area
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0
bbox_targets = bbox_targets[range(num_points), min_area_inds]
return labels, bbox_targets
| en | 0.764753 | Fully Convolutional One-Stage Object Detection head from [1]_. The FCOS head does not use anchor boxes. Instead bounding boxes are predicted at each pixel and a centerness measure is used to supress low-quality predictions. References: .. [1] https://arxiv.org/abs/1904.01355 Example: >>> self = FCOSHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred, centerness = self.forward(feats) >>> assert len(cls_score) == len(self.scales) # Pull and Push loss # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 # flatten cls_scores and bbox_preds # repeat points to align with bbox_preds # avoid num_pos is 0 # Instance level op # generate instance levels index # NOTE: get mask for each obj # Opt for each obj # mean value # pull loss # push loss # iou loss Get points according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. dtype (torch.dtype): Type of points. device (torch.device): Device of points. Returns: tuple: points of each image. # expand regress ranges to align with points # concat all levels points and regress ranges # get labels and bbox_targets of each image # split to per img, per level # concat per level image # TODO: figure out why these two are different # areas = areas[None].expand(num_points, num_gts) # condition1: inside a gt bbox # condition2: limit the regression range for each location # if there are still more than one objects for a location, # we choose the one with minimal area | 2.055353 | 2 |
firefly_flask/app/models.py | Haehnchen/trivago-firefly | 0 | 5982 | from . import db
from sqlalchemy.dialects.mysql import LONGTEXT
class Search(db.Model):
__tablename__ = 'spots'
id = db.Column(db.Integer, primary_key=True)
search_string = db.Column(db.Text)
lat = db.Column(db.Float)
lon = db.Column(db.Float)
location_name = db.Column(db.Text)
json_result = db.Column(LONGTEXT)
class Photo(db.Model):
__tablename__ = 'photos'
id = db.Column(db.Integer, primary_key=True)
spotname = db.Column(db.Text)
source_id = db.Column(db.Text)
latitude = db.Column(db.Float)
longitude = db.Column(db.Float)
tags = db.Column(db.Text)
views = db.Column(db.Integer)
favourites = db.Column(db.Integer)
comments = db.Column(db.Integer)
username = db.Column(db.Text)
photo_url = db.Column(db.Text)
search_id = db.Column(db.ForeignKey(Search.id),nullable=False)
| from . import db
from sqlalchemy.dialects.mysql import LONGTEXT
class Search(db.Model):
__tablename__ = 'spots'
id = db.Column(db.Integer, primary_key=True)
search_string = db.Column(db.Text)
lat = db.Column(db.Float)
lon = db.Column(db.Float)
location_name = db.Column(db.Text)
json_result = db.Column(LONGTEXT)
class Photo(db.Model):
__tablename__ = 'photos'
id = db.Column(db.Integer, primary_key=True)
spotname = db.Column(db.Text)
source_id = db.Column(db.Text)
latitude = db.Column(db.Float)
longitude = db.Column(db.Float)
tags = db.Column(db.Text)
views = db.Column(db.Integer)
favourites = db.Column(db.Integer)
comments = db.Column(db.Integer)
username = db.Column(db.Text)
photo_url = db.Column(db.Text)
search_id = db.Column(db.ForeignKey(Search.id),nullable=False)
| none | 1 | 2.58183 | 3 |
|
plotly_basic_plots/line_chart2.py | HarishOsthe/Plotly_Dash_Practice_Codes | 0 | 5983 | <gh_stars>0
import pandas as pd
import numpy as np
import plotly.offline as pyo
import plotly.graph_objs as go
df= pd.read_csv("Data/nst-est2017-alldata.csv")
df2=df[df["DIVISION"] == '1']
df2.set_index("NAME",inplace=True)
list_of_pop_col=[col for col in df2.columns if col.startswith('POP')]
df2=df2[list_of_pop_col]
data=[go.Scatter(x=df2.columns,
y=df2.loc[name],
mode='lines',
name=name) for name in df2.index]
pyo.plot(data) | import pandas as pd
import numpy as np
import plotly.offline as pyo
import plotly.graph_objs as go
df= pd.read_csv("Data/nst-est2017-alldata.csv")
df2=df[df["DIVISION"] == '1']
df2.set_index("NAME",inplace=True)
list_of_pop_col=[col for col in df2.columns if col.startswith('POP')]
df2=df2[list_of_pop_col]
data=[go.Scatter(x=df2.columns,
y=df2.loc[name],
mode='lines',
name=name) for name in df2.index]
pyo.plot(data) | none | 1 | 3.088481 | 3 |
|
tests/test_markup.py | samdoran/sphinx | 4,973 | 5984 | <gh_stars>1000+
"""
test_markup
~~~~~~~~~~~
Test various Sphinx-specific markup extensions.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import pytest
from docutils import frontend, nodes, utils
from docutils.parsers.rst import Parser as RstParser
from sphinx import addnodes
from sphinx.builders.html.transforms import KeyboardTransform
from sphinx.builders.latex import LaTeXBuilder
from sphinx.roles import XRefRole
from sphinx.testing.util import Struct, assert_node
from sphinx.transforms import SphinxSmartQuotes
from sphinx.util import docutils, texescape
from sphinx.util.docutils import sphinx_domains
from sphinx.writers.html import HTMLTranslator, HTMLWriter
from sphinx.writers.latex import LaTeXTranslator, LaTeXWriter
@pytest.fixture
def settings(app):
texescape.init() # otherwise done by the latex builder
optparser = frontend.OptionParser(
components=(RstParser, HTMLWriter, LaTeXWriter))
settings = optparser.get_default_values()
settings.smart_quotes = True
settings.env = app.builder.env
settings.env.temp_data['docname'] = 'dummy'
settings.contentsname = 'dummy'
settings.rfc_base_url = 'http://tools.ietf.org/html/'
domain_context = sphinx_domains(settings.env)
domain_context.enable()
yield settings
domain_context.disable()
@pytest.fixture
def new_document(settings):
def create():
document = utils.new_document('test data', settings)
document['file'] = 'dummy'
return document
return create
@pytest.fixture
def inliner(new_document):
document = new_document()
document.reporter.get_source_and_line = lambda line=1: ('dummy.rst', line)
return Struct(document=document, reporter=document.reporter)
@pytest.fixture
def parse(new_document):
def parse_(rst):
document = new_document()
parser = RstParser()
parser.parse(rst, document)
SphinxSmartQuotes(document, startnode=None).apply()
for msg in document.traverse(nodes.system_message):
if msg['level'] == 1:
msg.replace_self([])
return document
return parse_
# since we're not resolving the markup afterwards, these nodes may remain
class ForgivingTranslator:
def visit_pending_xref(self, node):
pass
def depart_pending_xref(self, node):
pass
class ForgivingHTMLTranslator(HTMLTranslator, ForgivingTranslator):
pass
class ForgivingLaTeXTranslator(LaTeXTranslator, ForgivingTranslator):
pass
@pytest.fixture
def verify_re_html(app, parse):
def verify(rst, html_expected):
document = parse(rst)
KeyboardTransform(document).apply()
html_translator = ForgivingHTMLTranslator(document, app.builder)
document.walkabout(html_translator)
html_translated = ''.join(html_translator.fragment).strip()
assert re.match(html_expected, html_translated), 'from ' + rst
return verify
@pytest.fixture
def verify_re_latex(app, parse):
def verify(rst, latex_expected):
document = parse(rst)
app.builder = LaTeXBuilder(app)
app.builder.set_environment(app.env)
app.builder.init()
theme = app.builder.themes.get('manual')
latex_translator = ForgivingLaTeXTranslator(document, app.builder, theme)
latex_translator.first_document = -1 # don't write \begin{document}
document.walkabout(latex_translator)
latex_translated = ''.join(latex_translator.body).strip()
assert re.match(latex_expected, latex_translated), 'from ' + repr(rst)
return verify
@pytest.fixture
def verify_re(verify_re_html, verify_re_latex):
def verify_re_(rst, html_expected, latex_expected):
if html_expected:
verify_re_html(rst, html_expected)
if latex_expected:
verify_re_latex(rst, latex_expected)
return verify_re_
@pytest.fixture
def verify(verify_re_html, verify_re_latex):
def verify_(rst, html_expected, latex_expected):
if html_expected:
verify_re_html(rst, re.escape(html_expected) + '$')
if latex_expected:
verify_re_latex(rst, re.escape(latex_expected) + '$')
return verify_
@pytest.fixture
def get_verifier(verify, verify_re):
v = {
'verify': verify,
'verify_re': verify_re,
}
def get(name):
return v[name]
return get
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
# pep role
'verify',
':pep:`8`',
('<p><span class="target" id="index-0"></span><a class="pep reference external" '
'href="http://www.python.org/dev/peps/pep-0008"><strong>PEP 8</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{Python Enhancement Proposals@\\spxentry{Python Enhancement Proposals}'
'!PEP 8@\\spxentry{PEP 8}}\\sphinxhref{http://www.python.org/dev/peps/pep-0008}'
'{\\sphinxstylestrong{PEP 8}}')
),
(
# pep role with anchor
'verify',
':pep:`8#id1`',
('<p><span class="target" id="index-0"></span><a class="pep reference external" '
'href="http://www.python.org/dev/peps/pep-0008#id1">'
'<strong>PEP 8#id1</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{Python Enhancement Proposals@\\spxentry{Python Enhancement Proposals}'
'!PEP 8\\#id1@\\spxentry{PEP 8\\#id1}}\\sphinxhref'
'{http://www.python.org/dev/peps/pep-0008\\#id1}'
'{\\sphinxstylestrong{PEP 8\\#id1}}')
),
(
# rfc role
'verify',
':rfc:`2324`',
('<p><span class="target" id="index-0"></span><a class="rfc reference external" '
'href="http://tools.ietf.org/html/rfc2324.html"><strong>RFC 2324</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{RFC@\\spxentry{RFC}!RFC 2324@\\spxentry{RFC 2324}}'
'\\sphinxhref{http://tools.ietf.org/html/rfc2324.html}'
'{\\sphinxstylestrong{RFC 2324}}')
),
(
# rfc role with anchor
'verify',
':rfc:`2324#id1`',
('<p><span class="target" id="index-0"></span><a class="rfc reference external" '
'href="http://tools.ietf.org/html/rfc2324.html#id1">'
'<strong>RFC 2324#id1</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{RFC@\\spxentry{RFC}!RFC 2324\\#id1@\\spxentry{RFC 2324\\#id1}}'
'\\sphinxhref{http://tools.ietf.org/html/rfc2324.html\\#id1}'
'{\\sphinxstylestrong{RFC 2324\\#id1}}')
),
(
# correct interpretation of code with whitespace
'verify_re',
'``code sample``',
('<p><code class="(samp )?docutils literal notranslate"><span class="pre">'
'code</span>   <span class="pre">sample</span></code></p>'),
r'\\sphinxAtStartPar\n\\sphinxcode{\\sphinxupquote{code sample}}',
),
(
# interpolation of arrows in menuselection
'verify',
':menuselection:`a --> b`',
('<p><span class="menuselection">a \N{TRIANGULAR BULLET} b</span></p>'),
'\\sphinxAtStartPar\n\\sphinxmenuselection{a \\(\\rightarrow\\) b}',
),
(
# interpolation of ampersands in menuselection
'verify',
':menuselection:`&Foo -&&- &Bar`',
('<p><span class="menuselection"><span class="accelerator">F</span>oo '
'-&- <span class="accelerator">B</span>ar</span></p>'),
('\\sphinxAtStartPar\n'
r'\sphinxmenuselection{\sphinxaccelerator{F}oo \sphinxhyphen{}'
r'\&\sphinxhyphen{} \sphinxaccelerator{B}ar}'),
),
(
# interpolation of ampersands in guilabel
'verify',
':guilabel:`&Foo -&&- &Bar`',
('<p><span class="guilabel"><span class="accelerator">F</span>oo '
'-&- <span class="accelerator">B</span>ar</span></p>'),
('\\sphinxAtStartPar\n'
r'\sphinxguilabel{\sphinxaccelerator{F}oo \sphinxhyphen{}\&\sphinxhyphen{} \sphinxaccelerator{B}ar}'),
),
(
# no ampersands in guilabel
'verify',
':guilabel:`Foo`',
'<p><span class="guilabel">Foo</span></p>',
'\\sphinxAtStartPar\n\\sphinxguilabel{Foo}',
),
(
# kbd role
'verify',
':kbd:`space`',
'<p><kbd class="kbd docutils literal notranslate">space</kbd></p>',
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{space}}',
),
(
# kbd role
'verify',
':kbd:`Control+X`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">Control</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">X</kbd>'
'</kbd></p>'),
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{Control+X}}',
),
(
# kbd role
'verify',
':kbd:`Alt+^`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">Alt</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">^</kbd>'
'</kbd></p>'),
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{Alt+\\textasciicircum{}}}'),
),
(
# kbd role
'verify',
':kbd:`M-x M-s`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">M</kbd>'
'-'
'<kbd class="kbd docutils literal notranslate">x</kbd>'
' '
'<kbd class="kbd docutils literal notranslate">M</kbd>'
'-'
'<kbd class="kbd docutils literal notranslate">s</kbd>'
'</kbd></p>'),
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{M\\sphinxhyphen{}x M\\sphinxhyphen{}s}}'),
),
(
# kbd role
'verify',
':kbd:`-`',
'<p><kbd class="kbd docutils literal notranslate">-</kbd></p>',
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{\\sphinxhyphen{}}}'),
),
(
# kbd role
'verify',
':kbd:`Caps Lock`',
'<p><kbd class="kbd docutils literal notranslate">Caps Lock</kbd></p>',
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{Caps Lock}}'),
),
(
# non-interpolation of dashes in option role
'verify_re',
':option:`--with-option`',
('<p><code( class="xref std std-option docutils literal notranslate")?>'
'<span class="pre">--with-option</span></code></p>$'),
(r'\\sphinxAtStartPar\n'
r'\\sphinxcode{\\sphinxupquote{\\sphinxhyphen{}\\sphinxhyphen{}with\\sphinxhyphen{}option}}$'),
),
(
# verify smarty-pants quotes
'verify',
'"John"',
'<p>“John”</p>',
"\\sphinxAtStartPar\n“John”",
),
(
# ... but not in literal text
'verify',
'``"John"``',
('<p><code class="docutils literal notranslate"><span class="pre">'
'"John"</span></code></p>'),
'\\sphinxAtStartPar\n\\sphinxcode{\\sphinxupquote{"John"}}',
),
(
# verify classes for inline roles
'verify',
':manpage:`mp(1)`',
'<p><em class="manpage">mp(1)</em></p>',
'\\sphinxAtStartPar\n\\sphinxstyleliteralemphasis{\\sphinxupquote{mp(1)}}',
),
(
# correct escaping in normal mode
'verify',
'Γ\\\\∞$',
None,
'\\sphinxAtStartPar\nΓ\\textbackslash{}\\(\\infty\\)\\$',
),
(
# in verbatim code fragments
'verify',
'::\n\n @Γ\\∞${}',
None,
('\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
'@Γ\\PYGZbs{}\\(\\infty\\)\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
'\\end{sphinxVerbatim}'),
),
(
# in URIs
'verify_re',
'`test <https://www.google.com/~me/>`_',
None,
r'\\sphinxAtStartPar\n\\sphinxhref{https://www.google.com/~me/}{test}.*',
),
(
# description list: simple
'verify',
'term\n description',
'<dl class="docutils">\n<dt>term</dt><dd>description</dd>\n</dl>',
None,
),
(
# description list: with classifiers
'verify',
'term : class1 : class2\n description',
('<dl class="docutils">\n<dt>term<span class="classifier">class1</span>'
'<span class="classifier">class2</span></dt><dd>description</dd>\n</dl>'),
None,
),
(
# glossary (description list): multiple terms
'verify',
'.. glossary::\n\n term1\n term2\n description',
('<dl class="glossary docutils">\n'
'<dt id="term-term1">term1<a class="headerlink" href="#term-term1"'
' title="Permalink to this term">¶</a></dt>'
'<dt id="term-term2">term2<a class="headerlink" href="#term-term2"'
' title="Permalink to this term">¶</a></dt>'
'<dd>description</dd>\n</dl>'),
None,
),
])
def test_inline(get_verifier, type, rst, html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
'verify',
r'4 backslashes \\\\',
r'<p>4 backslashes \\</p>',
None,
),
])
@pytest.mark.skipif(docutils.__version_info__ < (0, 16),
reason='docutils-0.16 or above is required')
def test_inline_docutils16(get_verifier, type, rst, html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
@pytest.mark.sphinx(confoverrides={'latex_engine': 'xelatex'})
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
# in verbatim code fragments
'verify',
'::\n\n @Γ\\∞${}',
None,
('\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
'@Γ\\PYGZbs{}∞\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
'\\end{sphinxVerbatim}'),
),
])
def test_inline_for_unicode_latex_engine(get_verifier, type, rst,
html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
def test_samp_role(parse):
# no braces
text = ':samp:`a{b}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a",
[nodes.emphasis, "b"],
"c")])
# nested braces
text = ':samp:`a{{b}}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a",
[nodes.emphasis, "{b"],
"}c")])
# half-opened braces
text = ':samp:`a{bc`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{bc"])
# escaped braces
text = ':samp:`a\\\\{b}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{b}c"])
# no braces (whitespaces are keeped as is)
text = ':samp:`code sample`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "code sample"])
def test_download_role(parse):
# implicit
text = ':download:`sphinx.rst`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference,
nodes.literal, "sphinx.rst"])
assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download',
refexplicit=False, reftarget='sphinx.rst', refwarn=False)
assert_node(doctree[0][0][0], classes=['xref', 'download'])
# explicit
text = ':download:`reftitle <sphinx.rst>`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference,
nodes.literal, "reftitle"])
assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download',
refexplicit=True, reftarget='sphinx.rst', refwarn=False)
assert_node(doctree[0][0][0], classes=['xref', 'download'])
def test_XRefRole(inliner):
role = XRefRole()
# implicit
doctrees, errors = role('ref', 'rawtext', 'text', 5, inliner, {}, [])
assert len(doctrees) == 1
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
assert errors == []
# explicit
doctrees, errors = role('ref', 'rawtext', 'title <target>', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'title'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='target',
refexplicit=True, refwarn=False)
# bang
doctrees, errors = role('ref', 'rawtext', '!title <target>', 5, inliner, {}, [])
assert_node(doctrees[0], [nodes.literal, 'title <target>'])
# refdomain
doctrees, errors = role('test:doc', 'rawtext', 'text', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(doctrees[0], refdoc='dummy', refdomain='test', reftype='doc', reftarget='text',
refexplicit=False, refwarn=False)
# fix_parens
role = XRefRole(fix_parens=True)
doctrees, errors = role('ref', 'rawtext', 'text()', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text()'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
# lowercase
role = XRefRole(lowercase=True)
doctrees, errors = role('ref', 'rawtext', 'TEXT', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'TEXT'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
@pytest.mark.sphinx('dummy', testroot='prolog')
def test_rst_prolog(app, status, warning):
app.builder.build_all()
rst = app.env.get_doctree('restructuredtext')
md = app.env.get_doctree('markdown')
# rst_prolog
assert_node(rst[0], nodes.paragraph)
assert_node(rst[0][0], nodes.emphasis)
assert_node(rst[0][0][0], nodes.Text)
assert rst[0][0][0] == 'Hello world'
# rst_epilog
assert_node(rst[-1], nodes.section)
assert_node(rst[-1][-1], nodes.paragraph)
assert_node(rst[-1][-1][0], nodes.emphasis)
assert_node(rst[-1][-1][0][0], nodes.Text)
assert rst[-1][-1][0][0] == 'Good-bye world'
# rst_prolog & rst_epilog on exlucding reST parser
assert not md.rawsource.startswith('*Hello world*.')
assert not md.rawsource.endswith('*Good-bye world*.\n')
@pytest.mark.sphinx('dummy', testroot='keep_warnings')
def test_keep_warnings_is_True(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 2
assert_node(doctree[0][1], nodes.system_message)
@pytest.mark.sphinx('dummy', testroot='keep_warnings',
confoverrides={'keep_warnings': False})
def test_keep_warnings_is_False(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 1
@pytest.mark.sphinx('dummy', testroot='refonly_bullet_list')
def test_compact_refonly_bullet_list(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 5
assert doctree[0][1].astext() == 'List A:'
assert_node(doctree[0][2], nodes.bullet_list)
assert_node(doctree[0][2][0][0], addnodes.compact_paragraph)
assert doctree[0][2][0][0].astext() == 'genindex'
assert doctree[0][3].astext() == 'List B:'
assert_node(doctree[0][4], nodes.bullet_list)
assert_node(doctree[0][4][0][0], nodes.paragraph)
assert doctree[0][4][0][0].astext() == 'Hello'
@pytest.mark.sphinx('dummy', testroot='default_role')
def test_default_role1(app, status, warning):
app.builder.build_all()
# default-role: pep
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], addnodes.index)
assert_node(doctree[0][1][1], nodes.target)
assert_node(doctree[0][1][2], nodes.reference, classes=["pep"])
# no default-role
doctree = app.env.get_doctree('foo')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], nodes.title_reference)
assert_node(doctree[0][1][1], nodes.Text)
@pytest.mark.sphinx('dummy', testroot='default_role',
confoverrides={'default_role': 'guilabel'})
def test_default_role2(app, status, warning):
app.builder.build_all()
# default-role directive is stronger than configratuion
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], addnodes.index)
assert_node(doctree[0][1][1], nodes.target)
assert_node(doctree[0][1][2], nodes.reference, classes=["pep"])
# default_role changes the default behavior
doctree = app.env.get_doctree('foo')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], nodes.inline, classes=["guilabel"])
assert_node(doctree[0][1][1], nodes.Text)
| """
test_markup
~~~~~~~~~~~
Test various Sphinx-specific markup extensions.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import pytest
from docutils import frontend, nodes, utils
from docutils.parsers.rst import Parser as RstParser
from sphinx import addnodes
from sphinx.builders.html.transforms import KeyboardTransform
from sphinx.builders.latex import LaTeXBuilder
from sphinx.roles import XRefRole
from sphinx.testing.util import Struct, assert_node
from sphinx.transforms import SphinxSmartQuotes
from sphinx.util import docutils, texescape
from sphinx.util.docutils import sphinx_domains
from sphinx.writers.html import HTMLTranslator, HTMLWriter
from sphinx.writers.latex import LaTeXTranslator, LaTeXWriter
@pytest.fixture
def settings(app):
texescape.init() # otherwise done by the latex builder
optparser = frontend.OptionParser(
components=(RstParser, HTMLWriter, LaTeXWriter))
settings = optparser.get_default_values()
settings.smart_quotes = True
settings.env = app.builder.env
settings.env.temp_data['docname'] = 'dummy'
settings.contentsname = 'dummy'
settings.rfc_base_url = 'http://tools.ietf.org/html/'
domain_context = sphinx_domains(settings.env)
domain_context.enable()
yield settings
domain_context.disable()
@pytest.fixture
def new_document(settings):
def create():
document = utils.new_document('test data', settings)
document['file'] = 'dummy'
return document
return create
@pytest.fixture
def inliner(new_document):
document = new_document()
document.reporter.get_source_and_line = lambda line=1: ('dummy.rst', line)
return Struct(document=document, reporter=document.reporter)
@pytest.fixture
def parse(new_document):
def parse_(rst):
document = new_document()
parser = RstParser()
parser.parse(rst, document)
SphinxSmartQuotes(document, startnode=None).apply()
for msg in document.traverse(nodes.system_message):
if msg['level'] == 1:
msg.replace_self([])
return document
return parse_
# since we're not resolving the markup afterwards, these nodes may remain
class ForgivingTranslator:
def visit_pending_xref(self, node):
pass
def depart_pending_xref(self, node):
pass
class ForgivingHTMLTranslator(HTMLTranslator, ForgivingTranslator):
pass
class ForgivingLaTeXTranslator(LaTeXTranslator, ForgivingTranslator):
pass
@pytest.fixture
def verify_re_html(app, parse):
def verify(rst, html_expected):
document = parse(rst)
KeyboardTransform(document).apply()
html_translator = ForgivingHTMLTranslator(document, app.builder)
document.walkabout(html_translator)
html_translated = ''.join(html_translator.fragment).strip()
assert re.match(html_expected, html_translated), 'from ' + rst
return verify
@pytest.fixture
def verify_re_latex(app, parse):
def verify(rst, latex_expected):
document = parse(rst)
app.builder = LaTeXBuilder(app)
app.builder.set_environment(app.env)
app.builder.init()
theme = app.builder.themes.get('manual')
latex_translator = ForgivingLaTeXTranslator(document, app.builder, theme)
latex_translator.first_document = -1 # don't write \begin{document}
document.walkabout(latex_translator)
latex_translated = ''.join(latex_translator.body).strip()
assert re.match(latex_expected, latex_translated), 'from ' + repr(rst)
return verify
@pytest.fixture
def verify_re(verify_re_html, verify_re_latex):
def verify_re_(rst, html_expected, latex_expected):
if html_expected:
verify_re_html(rst, html_expected)
if latex_expected:
verify_re_latex(rst, latex_expected)
return verify_re_
@pytest.fixture
def verify(verify_re_html, verify_re_latex):
def verify_(rst, html_expected, latex_expected):
if html_expected:
verify_re_html(rst, re.escape(html_expected) + '$')
if latex_expected:
verify_re_latex(rst, re.escape(latex_expected) + '$')
return verify_
@pytest.fixture
def get_verifier(verify, verify_re):
v = {
'verify': verify,
'verify_re': verify_re,
}
def get(name):
return v[name]
return get
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
# pep role
'verify',
':pep:`8`',
('<p><span class="target" id="index-0"></span><a class="pep reference external" '
'href="http://www.python.org/dev/peps/pep-0008"><strong>PEP 8</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{Python Enhancement Proposals@\\spxentry{Python Enhancement Proposals}'
'!PEP 8@\\spxentry{PEP 8}}\\sphinxhref{http://www.python.org/dev/peps/pep-0008}'
'{\\sphinxstylestrong{PEP 8}}')
),
(
# pep role with anchor
'verify',
':pep:`8#id1`',
('<p><span class="target" id="index-0"></span><a class="pep reference external" '
'href="http://www.python.org/dev/peps/pep-0008#id1">'
'<strong>PEP 8#id1</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{Python Enhancement Proposals@\\spxentry{Python Enhancement Proposals}'
'!PEP 8\\#id1@\\spxentry{PEP 8\\#id1}}\\sphinxhref'
'{http://www.python.org/dev/peps/pep-0008\\#id1}'
'{\\sphinxstylestrong{PEP 8\\#id1}}')
),
(
# rfc role
'verify',
':rfc:`2324`',
('<p><span class="target" id="index-0"></span><a class="rfc reference external" '
'href="http://tools.ietf.org/html/rfc2324.html"><strong>RFC 2324</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{RFC@\\spxentry{RFC}!RFC 2324@\\spxentry{RFC 2324}}'
'\\sphinxhref{http://tools.ietf.org/html/rfc2324.html}'
'{\\sphinxstylestrong{RFC 2324}}')
),
(
# rfc role with anchor
'verify',
':rfc:`2324#id1`',
('<p><span class="target" id="index-0"></span><a class="rfc reference external" '
'href="http://tools.ietf.org/html/rfc2324.html#id1">'
'<strong>RFC 2324#id1</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{RFC@\\spxentry{RFC}!RFC 2324\\#id1@\\spxentry{RFC 2324\\#id1}}'
'\\sphinxhref{http://tools.ietf.org/html/rfc2324.html\\#id1}'
'{\\sphinxstylestrong{RFC 2324\\#id1}}')
),
(
# correct interpretation of code with whitespace
'verify_re',
'``code sample``',
('<p><code class="(samp )?docutils literal notranslate"><span class="pre">'
'code</span>   <span class="pre">sample</span></code></p>'),
r'\\sphinxAtStartPar\n\\sphinxcode{\\sphinxupquote{code sample}}',
),
(
# interpolation of arrows in menuselection
'verify',
':menuselection:`a --> b`',
('<p><span class="menuselection">a \N{TRIANGULAR BULLET} b</span></p>'),
'\\sphinxAtStartPar\n\\sphinxmenuselection{a \\(\\rightarrow\\) b}',
),
(
# interpolation of ampersands in menuselection
'verify',
':menuselection:`&Foo -&&- &Bar`',
('<p><span class="menuselection"><span class="accelerator">F</span>oo '
'-&- <span class="accelerator">B</span>ar</span></p>'),
('\\sphinxAtStartPar\n'
r'\sphinxmenuselection{\sphinxaccelerator{F}oo \sphinxhyphen{}'
r'\&\sphinxhyphen{} \sphinxaccelerator{B}ar}'),
),
(
# interpolation of ampersands in guilabel
'verify',
':guilabel:`&Foo -&&- &Bar`',
('<p><span class="guilabel"><span class="accelerator">F</span>oo '
'-&- <span class="accelerator">B</span>ar</span></p>'),
('\\sphinxAtStartPar\n'
r'\sphinxguilabel{\sphinxaccelerator{F}oo \sphinxhyphen{}\&\sphinxhyphen{} \sphinxaccelerator{B}ar}'),
),
(
# no ampersands in guilabel
'verify',
':guilabel:`Foo`',
'<p><span class="guilabel">Foo</span></p>',
'\\sphinxAtStartPar\n\\sphinxguilabel{Foo}',
),
(
# kbd role
'verify',
':kbd:`space`',
'<p><kbd class="kbd docutils literal notranslate">space</kbd></p>',
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{space}}',
),
(
# kbd role
'verify',
':kbd:`Control+X`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">Control</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">X</kbd>'
'</kbd></p>'),
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{Control+X}}',
),
(
# kbd role
'verify',
':kbd:`Alt+^`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">Alt</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">^</kbd>'
'</kbd></p>'),
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{Alt+\\textasciicircum{}}}'),
),
(
# kbd role
'verify',
':kbd:`M-x M-s`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">M</kbd>'
'-'
'<kbd class="kbd docutils literal notranslate">x</kbd>'
' '
'<kbd class="kbd docutils literal notranslate">M</kbd>'
'-'
'<kbd class="kbd docutils literal notranslate">s</kbd>'
'</kbd></p>'),
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{M\\sphinxhyphen{}x M\\sphinxhyphen{}s}}'),
),
(
# kbd role
'verify',
':kbd:`-`',
'<p><kbd class="kbd docutils literal notranslate">-</kbd></p>',
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{\\sphinxhyphen{}}}'),
),
(
# kbd role
'verify',
':kbd:`Caps Lock`',
'<p><kbd class="kbd docutils literal notranslate">Caps Lock</kbd></p>',
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{Caps Lock}}'),
),
(
# non-interpolation of dashes in option role
'verify_re',
':option:`--with-option`',
('<p><code( class="xref std std-option docutils literal notranslate")?>'
'<span class="pre">--with-option</span></code></p>$'),
(r'\\sphinxAtStartPar\n'
r'\\sphinxcode{\\sphinxupquote{\\sphinxhyphen{}\\sphinxhyphen{}with\\sphinxhyphen{}option}}$'),
),
(
# verify smarty-pants quotes
'verify',
'"John"',
'<p>“John”</p>',
"\\sphinxAtStartPar\n“John”",
),
(
# ... but not in literal text
'verify',
'``"John"``',
('<p><code class="docutils literal notranslate"><span class="pre">'
'"John"</span></code></p>'),
'\\sphinxAtStartPar\n\\sphinxcode{\\sphinxupquote{"John"}}',
),
(
# verify classes for inline roles
'verify',
':manpage:`mp(1)`',
'<p><em class="manpage">mp(1)</em></p>',
'\\sphinxAtStartPar\n\\sphinxstyleliteralemphasis{\\sphinxupquote{mp(1)}}',
),
(
# correct escaping in normal mode
'verify',
'Γ\\\\∞$',
None,
'\\sphinxAtStartPar\nΓ\\textbackslash{}\\(\\infty\\)\\$',
),
(
# in verbatim code fragments
'verify',
'::\n\n @Γ\\∞${}',
None,
('\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
'@Γ\\PYGZbs{}\\(\\infty\\)\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
'\\end{sphinxVerbatim}'),
),
(
# in URIs
'verify_re',
'`test <https://www.google.com/~me/>`_',
None,
r'\\sphinxAtStartPar\n\\sphinxhref{https://www.google.com/~me/}{test}.*',
),
(
# description list: simple
'verify',
'term\n description',
'<dl class="docutils">\n<dt>term</dt><dd>description</dd>\n</dl>',
None,
),
(
# description list: with classifiers
'verify',
'term : class1 : class2\n description',
('<dl class="docutils">\n<dt>term<span class="classifier">class1</span>'
'<span class="classifier">class2</span></dt><dd>description</dd>\n</dl>'),
None,
),
(
# glossary (description list): multiple terms
'verify',
'.. glossary::\n\n term1\n term2\n description',
('<dl class="glossary docutils">\n'
'<dt id="term-term1">term1<a class="headerlink" href="#term-term1"'
' title="Permalink to this term">¶</a></dt>'
'<dt id="term-term2">term2<a class="headerlink" href="#term-term2"'
' title="Permalink to this term">¶</a></dt>'
'<dd>description</dd>\n</dl>'),
None,
),
])
def test_inline(get_verifier, type, rst, html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
'verify',
r'4 backslashes \\\\',
r'<p>4 backslashes \\</p>',
None,
),
])
@pytest.mark.skipif(docutils.__version_info__ < (0, 16),
reason='docutils-0.16 or above is required')
def test_inline_docutils16(get_verifier, type, rst, html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
@pytest.mark.sphinx(confoverrides={'latex_engine': 'xelatex'})
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
# in verbatim code fragments
'verify',
'::\n\n @Γ\\∞${}',
None,
('\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
'@Γ\\PYGZbs{}∞\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
'\\end{sphinxVerbatim}'),
),
])
def test_inline_for_unicode_latex_engine(get_verifier, type, rst,
html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
def test_samp_role(parse):
# no braces
text = ':samp:`a{b}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a",
[nodes.emphasis, "b"],
"c")])
# nested braces
text = ':samp:`a{{b}}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a",
[nodes.emphasis, "{b"],
"}c")])
# half-opened braces
text = ':samp:`a{bc`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{bc"])
# escaped braces
text = ':samp:`a\\\\{b}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{b}c"])
# no braces (whitespaces are keeped as is)
text = ':samp:`code sample`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "code sample"])
def test_download_role(parse):
# implicit
text = ':download:`sphinx.rst`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference,
nodes.literal, "sphinx.rst"])
assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download',
refexplicit=False, reftarget='sphinx.rst', refwarn=False)
assert_node(doctree[0][0][0], classes=['xref', 'download'])
# explicit
text = ':download:`reftitle <sphinx.rst>`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference,
nodes.literal, "reftitle"])
assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download',
refexplicit=True, reftarget='sphinx.rst', refwarn=False)
assert_node(doctree[0][0][0], classes=['xref', 'download'])
def test_XRefRole(inliner):
role = XRefRole()
# implicit
doctrees, errors = role('ref', 'rawtext', 'text', 5, inliner, {}, [])
assert len(doctrees) == 1
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
assert errors == []
# explicit
doctrees, errors = role('ref', 'rawtext', 'title <target>', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'title'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='target',
refexplicit=True, refwarn=False)
# bang
doctrees, errors = role('ref', 'rawtext', '!title <target>', 5, inliner, {}, [])
assert_node(doctrees[0], [nodes.literal, 'title <target>'])
# refdomain
doctrees, errors = role('test:doc', 'rawtext', 'text', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(doctrees[0], refdoc='dummy', refdomain='test', reftype='doc', reftarget='text',
refexplicit=False, refwarn=False)
# fix_parens
role = XRefRole(fix_parens=True)
doctrees, errors = role('ref', 'rawtext', 'text()', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text()'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
# lowercase
role = XRefRole(lowercase=True)
doctrees, errors = role('ref', 'rawtext', 'TEXT', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'TEXT'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
@pytest.mark.sphinx('dummy', testroot='prolog')
def test_rst_prolog(app, status, warning):
app.builder.build_all()
rst = app.env.get_doctree('restructuredtext')
md = app.env.get_doctree('markdown')
# rst_prolog
assert_node(rst[0], nodes.paragraph)
assert_node(rst[0][0], nodes.emphasis)
assert_node(rst[0][0][0], nodes.Text)
assert rst[0][0][0] == 'Hello world'
# rst_epilog
assert_node(rst[-1], nodes.section)
assert_node(rst[-1][-1], nodes.paragraph)
assert_node(rst[-1][-1][0], nodes.emphasis)
assert_node(rst[-1][-1][0][0], nodes.Text)
assert rst[-1][-1][0][0] == 'Good-bye world'
# rst_prolog & rst_epilog on exlucding reST parser
assert not md.rawsource.startswith('*Hello world*.')
assert not md.rawsource.endswith('*Good-bye world*.\n')
@pytest.mark.sphinx('dummy', testroot='keep_warnings')
def test_keep_warnings_is_True(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 2
assert_node(doctree[0][1], nodes.system_message)
@pytest.mark.sphinx('dummy', testroot='keep_warnings',
confoverrides={'keep_warnings': False})
def test_keep_warnings_is_False(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 1
@pytest.mark.sphinx('dummy', testroot='refonly_bullet_list')
def test_compact_refonly_bullet_list(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 5
assert doctree[0][1].astext() == 'List A:'
assert_node(doctree[0][2], nodes.bullet_list)
assert_node(doctree[0][2][0][0], addnodes.compact_paragraph)
assert doctree[0][2][0][0].astext() == 'genindex'
assert doctree[0][3].astext() == 'List B:'
assert_node(doctree[0][4], nodes.bullet_list)
assert_node(doctree[0][4][0][0], nodes.paragraph)
assert doctree[0][4][0][0].astext() == 'Hello'
@pytest.mark.sphinx('dummy', testroot='default_role')
def test_default_role1(app, status, warning):
app.builder.build_all()
# default-role: pep
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], addnodes.index)
assert_node(doctree[0][1][1], nodes.target)
assert_node(doctree[0][1][2], nodes.reference, classes=["pep"])
# no default-role
doctree = app.env.get_doctree('foo')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], nodes.title_reference)
assert_node(doctree[0][1][1], nodes.Text)
@pytest.mark.sphinx('dummy', testroot='default_role',
confoverrides={'default_role': 'guilabel'})
def test_default_role2(app, status, warning):
app.builder.build_all()
# default-role directive is stronger than configratuion
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], addnodes.index)
assert_node(doctree[0][1][1], nodes.target)
assert_node(doctree[0][1][2], nodes.reference, classes=["pep"])
# default_role changes the default behavior
doctree = app.env.get_doctree('foo')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], nodes.inline, classes=["guilabel"])
assert_node(doctree[0][1][1], nodes.Text) | en | 0.52784 | test_markup ~~~~~~~~~~~ Test various Sphinx-specific markup extensions. :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. # otherwise done by the latex builder # since we're not resolving the markup afterwards, these nodes may remain # don't write \begin{document} # pep role # pep role with anchor #id1`', #id1">' #id1</strong></a></p>'), #id1@\\spxentry{PEP 8\\#id1}}\\sphinxhref' #id1}' #id1}}') # rfc role # rfc role with anchor #id1`', #id1">' #id1</strong></a></p>'), #id1@\\spxentry{RFC 2324\\#id1}}' #id1}' #id1}}') # correct interpretation of code with whitespace #160;  <span class="pre">sample</span></code></p>'), # interpolation of arrows in menuselection # interpolation of ampersands in menuselection # interpolation of ampersands in guilabel # no ampersands in guilabel # kbd role # kbd role # kbd role # kbd role # kbd role # kbd role # non-interpolation of dashes in option role # verify smarty-pants quotes # ... but not in literal text # verify classes for inline roles # correct escaping in normal mode # in verbatim code fragments # in URIs # description list: simple # description list: with classifiers # glossary (description list): multiple terms # in verbatim code fragments # no braces # nested braces # half-opened braces # escaped braces # no braces (whitespaces are keeped as is) # implicit # explicit # implicit # explicit # bang # refdomain # fix_parens # lowercase # rst_prolog # rst_epilog # rst_prolog & rst_epilog on exlucding reST parser # default-role: pep # no default-role # default-role directive is stronger than configratuion # default_role changes the default behavior | 1.9995 | 2 |
dev/tools/leveleditor/direct/showbase/ContainerLeakDetector.py | CrankySupertoon01/Toontown-2 | 1 | 5985 | from pandac.PandaModules import PStatCollector
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.PythonUtil import Queue, invertDictLossless, makeFlywheelGen
from direct.showbase.PythonUtil import itype, serialNum, safeRepr, fastRepr
from direct.showbase.Job import Job
import types, weakref, random, __builtin__
def _createContainerLeak():
def leakContainer(task=None):
base = getBase()
if not hasattr(base, 'leakContainer'):
base.leakContainer = {}
# use tuples as keys since they can't be weakref'd, and use an instance
# since it can't be repr/eval'd
# that will force the leak detector to hold a normal 'non-weak' reference
class LeakKey:
pass
base.leakContainer[(LeakKey(),)] = {}
# test the non-weakref object reference handling
if random.random() < .01:
key = random.choice(base.leakContainer.keys())
ContainerLeakDetector.notify.debug(
'removing reference to leakContainer key %s so it will be garbage-collected' % safeRepr(key))
del base.leakContainer[key]
taskMgr.doMethodLater(10, leakContainer, 'leakContainer-%s' % serialNum())
if task:
return task.done
leakContainer()
def _createTaskLeak():
leakTaskName = uniqueName('leakedTask')
leakDoLaterName = uniqueName('leakedDoLater')
def nullTask(task=None):
return task.cont
def nullDoLater(task=None):
return task.done
def leakTask(task=None, leakTaskName=leakTaskName):
base = getBase()
taskMgr.add(nullTask, uniqueName(leakTaskName))
taskMgr.doMethodLater(1 << 31, nullDoLater, uniqueName(leakDoLaterName))
taskMgr.doMethodLater(10, leakTask, 'doLeakTask-%s' % serialNum())
if task:
return task.done
leakTask()
class NoDictKey:
pass
class Indirection:
"""
Represents the indirection that brings you from a container to an element of the container.
Stored as a string to be used as part of an eval, or as a key to be looked up in a dict.
Each dictionary dereference is individually eval'd since the dict key might have been
garbage-collected
TODO: store string components that are duplicates of strings in the actual system so that
Python will keep one copy and reduce memory usage
"""
def __init__(self, evalStr=None, dictKey=NoDictKey):
# if this is a dictionary lookup, pass dictKey instead of evalStr
self.evalStr = evalStr
self.dictKey = NoDictKey
# is the dictKey a weak reference?
self._isWeakRef = False
self._refCount = 0
if dictKey is not NoDictKey:
# if we can repr/eval the key, store it as an evalStr
keyRepr = safeRepr(dictKey)
useEval = False
try:
keyEval = eval(keyRepr)
useEval = True
except:
pass
if useEval:
# check to make sure the eval succeeded
if hash(keyEval) != hash(dictKey):
useEval = False
if useEval:
# eval/repr succeeded, store as an evalStr
self.evalStr = '[%s]' % keyRepr
else:
try:
# store a weakref to the key
self.dictKey = weakref.ref(dictKey)
self._isWeakRef = True
except TypeError, e:
ContainerLeakDetector.notify.debug('could not weakref dict key %s' % keyRepr)
self.dictKey = dictKey
self._isWeakRef = False
def destroy(self):
# re-entrant
self.dictKey = NoDictKey
def acquire(self):
self._refCount += 1
def release(self):
self._refCount -= 1
if self._refCount == 0:
self.destroy()
def isDictKey(self):
# is this an indirection through a dictionary?
return self.dictKey is not NoDictKey
def _getNonWeakDictKey(self):
if not self._isWeakRef:
return self.dictKey
else:
key = self.dictKey()
if key is None:
return '<garbage-collected dict key>'
return key
def dereferenceDictKey(self, parentDict):
# look ourselves up in parentDict
key = self._getNonWeakDictKey()
# objects in __builtin__ will have parentDict==None
if parentDict is None:
return key
return parentDict[key]
def getString(self, prevIndirection=None, nextIndirection=None):
# return our contribution to the full name of an object
instanceDictStr = '.__dict__'
if self.evalStr is not None:
# if we're an instance dict, skip over this one (obj.__dict__[keyName] == obj.keyName)
if nextIndirection is not None and self.evalStr[-len(instanceDictStr):] == instanceDictStr:
return self.evalStr[:-len(instanceDictStr)]
# if the previous indirection was an instance dict, change our syntax from ['key'] to .key
if prevIndirection is not None and prevIndirection.evalStr is not None:
if prevIndirection.evalStr[-len(instanceDictStr):] == instanceDictStr:
return '.%s' % self.evalStr[2:-2]
return self.evalStr
# we're stored as a dict key
keyRepr = safeRepr(self._getNonWeakDictKey())
# if the previous indirection was an instance dict, change our syntax from ['key'] to .key
if prevIndirection is not None and prevIndirection.evalStr is not None:
if prevIndirection.evalStr[-len(instanceDictStr):] == instanceDictStr:
return '.%s' % keyRepr
return '[%s]' % keyRepr
def __repr__(self):
return self.getString()
class ObjectRef:
"""
stores a reference to a container in a way that does not prevent garbage
collection of the container if possible
stored as a series of 'indirections' (obj.foo -> '.foo', dict[key] -> '[key]', etc.)
"""
notify = directNotify.newCategory("ObjectRef")
class FailedEval(Exception):
pass
def __init__(self, indirection, objId, other=None):
self._indirections = []
# are we building off of an existing ref?
if other is not None:
for ind in other._indirections:
self._indirections.append(ind)
# make sure we're not storing a reference to the actual object,
# that could cause a memory leak
assert type(objId) in (types.IntType, types.LongType)
# prevent cycles (i.e. base.loader.base.loader)
assert not self.goesThrough(objId=objId)
self._indirections.append(indirection)
# make sure our indirections don't get destroyed while we're using them
for ind in self._indirections:
ind.acquire()
self.notify.debug(repr(self))
def destroy(self):
for indirection in self._indirections:
indirection.release()
del self._indirections
def getNumIndirections(self):
return len(self._indirections)
def goesThroughGen(self, obj=None, objId=None):
if obj is None:
assert type(objId) in (types.IntType, types.LongType)
else:
objId = id(obj)
o = None
evalStr = ''
curObj = None
# make sure the indirections don't go away on us
indirections = self._indirections
for indirection in indirections:
yield None
indirection.acquire()
for indirection in indirections:
yield None
if not indirection.isDictKey():
# build up a string to be eval'd
evalStr += indirection.getString()
else:
curObj = self._getContainerByEval(evalStr, curObj=curObj)
if curObj is None:
raise FailedEval(evalStr)
# try to look up this key in the curObj dictionary
curObj = indirection.dereferenceDictKey(curObj)
evalStr = ''
yield None
o = self._getContainerByEval(evalStr, curObj=curObj)
if id(o) == objId:
break
for indirection in indirections:
yield None
indirection.release()
yield id(o) == objId
def goesThrough(self, obj=None, objId=None):
# since we cache the ids involved in this reference,
# this isn't perfect, for example if base.myObject is reassigned
# to a different object after this Ref was created this would return
# false, allowing a ref to base.myObject.otherObject.myObject
for goesThrough in self.goesThroughGen(obj=obj, objId=objId):
pass
return goesThrough
def _getContainerByEval(self, evalStr, curObj=None):
if curObj is not None:
# eval('curObj.foo.bar.someDict')
evalStr = 'curObj%s' % evalStr
else:
# this eval is not based off of curObj, use the global__builtin__ namespace
# put __builtin__ at the start if it's not already there
bis = '__builtin__'
if evalStr[:len(bis)] != bis:
evalStr = '%s.%s' % (bis, evalStr)
try:
container = eval(evalStr)
except NameError, ne:
return None
except AttributeError, ae:
return None
except KeyError, ke:
return None
return container
def getContainerGen(self, getInstance=False):
# try to get a handle on the container by eval'ing and looking things
# up in dictionaries, depending on the type of each indirection
# if getInstance is True, will return instance instead of instance dict
#import pdb;pdb.set_trace()
evalStr = ''
curObj = None
# make sure the indirections don't go away on us
indirections = self._indirections
for indirection in indirections:
indirection.acquire()
for indirection in indirections:
yield None
if not indirection.isDictKey():
# build up a string to be eval'd
evalStr += indirection.getString()
else:
curObj = self._getContainerByEval(evalStr, curObj=curObj)
if curObj is None:
raise FailedEval(evalStr)
# try to look up this key in the curObj dictionary
curObj = indirection.dereferenceDictKey(curObj)
evalStr = ''
for indirection in indirections:
yield None
indirection.release()
if getInstance:
lenDict = len('.__dict__')
if evalStr[-lenDict:] == '.__dict__':
evalStr = evalStr[:-lenDict]
# TODO: check that this is still the object we originally pointed to
yield self._getContainerByEval(evalStr, curObj=curObj)
def getEvalStrGen(self, getInstance=False):
str = ''
prevIndirection = None
curIndirection = None
nextIndirection = None
# make sure the indirections don't go away on us
indirections = self._indirections
for indirection in indirections:
indirection.acquire()
for i in xrange(len(indirections)):
yield None
if i > 0:
prevIndirection = indirections[i-1]
else:
prevIndirection = None
curIndirection = indirections[i]
if i < len(indirections)-1:
nextIndirection = indirections[i+1]
else:
nextIndirection = None
str += curIndirection.getString(prevIndirection=prevIndirection,
nextIndirection=nextIndirection)
if getInstance:
lenDict = len('.__dict__')
if str[-lenDict:] == '.__dict__':
str = str[:-lenDict]
for indirection in indirections:
yield None
indirection.release()
yield str
def getFinalIndirectionStr(self):
prevIndirection = None
if len(self._indirections) > 1:
prevIndirection = self._indirections[-2]
return self._indirections[-1].getString(prevIndirection=prevIndirection)
def __repr__(self):
for result in self.getEvalStrGen():
pass
return result
class FindContainers(Job):
"""
Explore the Python graph, looking for objects that support __len__()
"""
def __init__(self, name, leakDetector):
Job.__init__(self, name)
self._leakDetector = leakDetector
self._id2ref = self._leakDetector._id2ref
# these hold objects that we should start traversals from often and not-as-often,
# respectively
self._id2baseStartRef = {}
self._id2discoveredStartRef = {}
# these are working copies so that our iterations aren't disturbed by changes to the
# definitive ref sets
self._baseStartRefWorkingList = ScratchPad(refGen=nullGen(),
source=self._id2baseStartRef)
self._discoveredStartRefWorkingList = ScratchPad(refGen=nullGen(),
source=self._id2discoveredStartRef)
self.notify = self._leakDetector.notify
ContainerLeakDetector.addPrivateObj(self.__dict__)
# set up the base containers, the ones that hold most objects
ref = ObjectRef(Indirection(evalStr='__builtin__.__dict__'), id(__builtin__.__dict__))
self._id2baseStartRef[id(__builtin__.__dict__)] = ref
# container for objects that want to make sure they are found by
# the object exploration algorithm, including objects that exist
# just to measure things such as C++ memory usage, scene graph size,
# framerate, etc. See LeakDetectors.py
if not hasattr(__builtin__, "leakDetectors"):
__builtin__.leakDetectors = {}
ref = ObjectRef(Indirection(evalStr='leakDetectors'), id(leakDetectors))
self._id2baseStartRef[id(leakDetectors)] = ref
for i in self._addContainerGen(__builtin__.__dict__, ref):
pass
try:
base
except:
pass
else:
ref = ObjectRef(Indirection(evalStr='base.__dict__'), id(base.__dict__))
self._id2baseStartRef[id(base.__dict__)] = ref
for i in self._addContainerGen(base.__dict__, ref):
pass
try:
simbase
except:
pass
else:
ref = ObjectRef(Indirection(evalStr='simbase.__dict__'), id(simbase.__dict__))
self._id2baseStartRef[id(simbase.__dict__)] = ref
for i in self._addContainerGen(simbase.__dict__, ref):
pass
def destroy(self):
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def getPriority(self):
return Job.Priorities.Low
@staticmethod
def getStartObjAffinity(startObj):
# how good of a starting object is this object for traversing the object graph?
try:
return len(startObj)
except:
return 1
def _isDeadEnd(self, obj, objName=None):
if type(obj) in (types.BooleanType, types.BuiltinFunctionType,
types.BuiltinMethodType, types.ComplexType,
types.FloatType, types.IntType, types.LongType,
types.NoneType, types.NotImplementedType,
types.TypeType, types.CodeType, types.FunctionType,
types.StringType, types.UnicodeType,
types.TupleType):
return True
# if it's an internal object, ignore it
if id(obj) in ContainerLeakDetector.PrivateIds:
return True
# prevent crashes in objects that define __cmp__ and don't handle strings
if type(objName) == types.StringType and objName in ('im_self', 'im_class'):
return True
try:
className = obj.__class__.__name__
except:
pass
else:
# prevent infinite recursion in built-in containers related to methods
if className == 'method-wrapper':
return True
return False
def _hasLength(self, obj):
return hasattr(obj, '__len__')
def _addContainerGen(self, cont, objRef):
contId = id(cont)
# if this container is new, or the objRef repr is shorter than what we already have,
# put it in the table
if contId in self._id2ref:
for existingRepr in self._id2ref[contId].getEvalStrGen():
yield None
for newRepr in objRef.getEvalStrGen():
yield None
if contId not in self._id2ref or len(newRepr) < len(existingRepr):
if contId in self._id2ref:
self._leakDetector.removeContainerById(contId)
self._id2ref[contId] = objRef
def _addDiscoveredStartRef(self, obj, ref):
# we've discovered an object that can be used to start an object graph traversal
objId = id(obj)
if objId in self._id2discoveredStartRef:
existingRef = self._id2discoveredStartRef[objId]
if type(existingRef) not in (types.IntType, types.LongType):
if (existingRef.getNumIndirections() >=
ref.getNumIndirections()):
# the ref that we already have is more concise than the new ref
return
if objId in self._id2ref:
if (self._id2ref[objId].getNumIndirections() >=
ref.getNumIndirections()):
# the ref that we already have is more concise than the new ref
return
storedItem = ref
# if we already are storing a reference to this object, don't store a second reference
if objId in self._id2ref:
storedItem = objId
self._id2discoveredStartRef[objId] = storedItem
def run(self):
try:
# this yields a different set of start refs every time we start a new traversal
# force creation of a new workingListSelector inside the while loop right off the bat
workingListSelector = nullGen()
# this holds the current step of the current traversal
curObjRef = None
while True:
# yield up here instead of at the end, since we skip back to the
# top of the while loop from various points
yield None
#import pdb;pdb.set_trace()
if curObjRef is None:
# choose an object to start a traversal from
try:
startRefWorkingList = workingListSelector.next()
except StopIteration:
# do relative # of traversals on each set based on how many refs it contains
baseLen = len(self._baseStartRefWorkingList.source)
discLen = len(self._discoveredStartRefWorkingList.source)
minLen = float(max(1, min(baseLen, discLen)))
# this will cut down the traversals of the larger set by 2/3
minLen *= 3.
workingListSelector = flywheel([self._baseStartRefWorkingList, self._discoveredStartRefWorkingList],
[baseLen/minLen, discLen/minLen])
yield None
continue
# grab the next start ref from this sequence and see if it's still valid
while True:
yield None
try:
curObjRef = startRefWorkingList.refGen.next()
break
except StopIteration:
# we've run out of refs, grab a new set
if len(startRefWorkingList.source) == 0:
# ref set is empty, choose another
break
# make a generator that yields containers a # of times that is
# proportional to their length
for fw in makeFlywheelGen(
startRefWorkingList.source.values(),
countFunc=lambda x: self.getStartObjAffinity(x),
scale=.05):
yield None
startRefWorkingList.refGen = fw
if curObjRef is None:
# this ref set is empty, choose another
# the base set should never be empty (__builtin__ etc.)
continue
# do we need to go look up the object in _id2ref? sometimes we do that
# to avoid storing multiple redundant refs to a single item
if type(curObjRef) in (types.IntType, types.LongType):
startId = curObjRef
curObjRef = None
try:
for containerRef in self._leakDetector.getContainerByIdGen(startId):
yield None
except:
# ref is invalid
self.notify.debug('invalid startRef, stored as id %s' % startId)
self._leakDetector.removeContainerById(startId)
continue
curObjRef = containerRef
try:
for curObj in curObjRef.getContainerGen():
yield None
except:
self.notify.debug('lost current container, ref.getContainerGen() failed')
# that container is gone, try again
curObjRef = None
continue
self.notify.debug('--> %s' % curObjRef)
#import pdb;pdb.set_trace()
# store a copy of the current objRef
parentObjRef = curObjRef
# if we hit a dead end, start over from another container
curObjRef = None
if hasattr(curObj, '__dict__'):
child = curObj.__dict__
hasLength = self._hasLength(child)
notDeadEnd = not self._isDeadEnd(child)
if hasLength or notDeadEnd:
# prevent cycles in the references (i.e. base.loader.base)
for goesThrough in parentObjRef.goesThroughGen(child):
# don't yield, container might lose this element
pass
if not goesThrough:
objRef = ObjectRef(Indirection(evalStr='.__dict__'),
id(child), parentObjRef)
yield None
if hasLength:
for i in self._addContainerGen(child, objRef):
yield None
if notDeadEnd:
self._addDiscoveredStartRef(child, objRef)
curObjRef = objRef
continue
if type(curObj) is types.DictType:
key = None
attr = None
keys = curObj.keys()
# we will continue traversing the object graph via one key of the dict,
# choose it at random without taking a big chunk of CPU time
numKeysLeft = len(keys) + 1
for key in keys:
yield None
numKeysLeft -= 1
try:
attr = curObj[key]
except KeyError, e:
# this is OK because we are yielding during the iteration
self.notify.debug('could not index into %s with key %s' % (
parentObjRef, safeRepr(key)))
continue
hasLength = self._hasLength(attr)
notDeadEnd = False
# if we haven't picked the next ref, check if this one is a candidate
if curObjRef is None:
notDeadEnd = not self._isDeadEnd(attr, key)
if hasLength or notDeadEnd:
# prevent cycles in the references (i.e. base.loader.base)
for goesThrough in parentObjRef.goesThroughGen(curObj[key]):
# don't yield, container might lose this element
pass
if not goesThrough:
if curObj is __builtin__.__dict__:
objRef = ObjectRef(Indirection(evalStr='%s' % key),
id(curObj[key]))
else:
objRef = ObjectRef(Indirection(dictKey=key),
id(curObj[key]), parentObjRef)
yield None
if hasLength:
for i in self._addContainerGen(attr, objRef):
yield None
if notDeadEnd:
self._addDiscoveredStartRef(attr, objRef)
if curObjRef is None and random.randrange(numKeysLeft) == 0:
curObjRef = objRef
del key
del attr
continue
try:
childNames = dir(curObj)
except:
pass
else:
try:
index = -1
attrs = []
while 1:
yield None
try:
attr = itr.next()
except:
# some custom classes don't do well when iterated
attr = None
break
attrs.append(attr)
# we will continue traversing the object graph via one attr,
# choose it at random without taking a big chunk of CPU time
numAttrsLeft = len(attrs) + 1
for attr in attrs:
yield None
index += 1
numAttrsLeft -= 1
hasLength = self._hasLength(attr)
notDeadEnd = False
if curObjRef is None:
notDeadEnd = not self._isDeadEnd(attr)
if hasLength or notDeadEnd:
# prevent cycles in the references (i.e. base.loader.base)
for goesThrough in parentObjRef.goesThrough(curObj[index]):
# don't yield, container might lose this element
pass
if not goesThrough:
objRef = ObjectRef(Indirection(evalStr='[%s]' % index),
id(curObj[index]), parentObjRef)
yield None
if hasLength:
for i in self._addContainerGen(attr, objRef):
yield None
if notDeadEnd:
self._addDiscoveredStartRef(attr, objRef)
if curObjRef is None and random.randrange(numAttrsLeft) == 0:
curObjRef = objRef
del attr
except StopIteration, e:
pass
del itr
continue
except Exception, e:
print 'FindContainers job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class CheckContainers(Job):
"""
Job to check container sizes and find potential leaks; sub-job of ContainerLeakDetector
"""
ReprItems = 5
def __init__(self, name, leakDetector, index):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._index = index
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def getPriority(self):
return Job.Priorities.Normal
def run(self):
try:
self._leakDetector._index2containerId2len[self._index] = {}
ids = self._leakDetector.getContainerIds()
# record the current len of each container
for objId in ids:
yield None
try:
for result in self._leakDetector.getContainerByIdGen(objId):
yield None
container = result
except Exception, e:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug(
'%s no longer exists; caught exception in getContainerById (%s)' % (
contName, e))
self._leakDetector.removeContainerById(objId)
continue
if container is None:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug('%s no longer exists; getContainerById returned None' %
contName)
self._leakDetector.removeContainerById(objId)
continue
try:
cLen = len(container)
except Exception, e:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug(
'%s is no longer a container, it is now %s (%s)' %
(contName, safeRepr(container), e))
self._leakDetector.removeContainerById(objId)
continue
self._leakDetector._index2containerId2len[self._index][objId] = cLen
# compare the current len of each container to past lens
if self._index > 0:
idx2id2len = self._leakDetector._index2containerId2len
for objId in idx2id2len[self._index]:
yield None
if objId in idx2id2len[self._index-1]:
diff = idx2id2len[self._index][objId] - idx2id2len[self._index-1][objId]
"""
# this check is too spammy
if diff > 20:
if diff > idx2id2len[self._index-1][objId]:
minutes = (self._leakDetector._index2delay[self._index] -
self._leakDetector._index2delay[self._index-1]) / 60.
name = self._leakDetector.getContainerNameById(objId)
if idx2id2len[self._index-1][objId] != 0:
percent = 100. * (float(diff) / float(idx2id2len[self._index-1][objId]))
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (1)')
else:
self.notify.warning(
'%s (%s) grew %.2f%% in %.2f minutes (%s items at last measurement, current contents: %s)' % (
name, itype(container), percent, minutes, idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
yield None
"""
if (self._index > 2 and
objId in idx2id2len[self._index-2] and
objId in idx2id2len[self._index-3]):
diff2 = idx2id2len[self._index-1][objId] - idx2id2len[self._index-2][objId]
diff3 = idx2id2len[self._index-2][objId] - idx2id2len[self._index-3][objId]
if self._index <= 4:
if diff > 0 and diff2 > 0 and diff3 > 0:
name = self._leakDetector.getContainerNameById(objId)
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (2)')
else:
msg = ('%s (%s) consistently increased in size over the last '
'3 periods (%s items at last measurement, current contents: %s)' %
(name, itype(container), idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
self.notify.warning(msg)
yield None
elif (objId in idx2id2len[self._index-4] and
objId in idx2id2len[self._index-5]):
# if size has consistently increased over the last 5 checks,
# send out a warning
diff4 = idx2id2len[self._index-3][objId] - idx2id2len[self._index-4][objId]
diff5 = idx2id2len[self._index-4][objId] - idx2id2len[self._index-5][objId]
if diff > 0 and diff2 > 0 and diff3 > 0 and diff4 > 0 and diff5 > 0:
name = self._leakDetector.getContainerNameById(objId)
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (3)')
else:
msg = ('leak detected: %s (%s) consistently increased in size over the last '
'5 periods (%s items at last measurement, current contents: %s)' %
(name, itype(container), idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
self.notify.warning(msg)
yield None
messenger.send(self._leakDetector.getLeakEvent(), [container, name])
if config.GetBool('pdb-on-leak-detect', 0):
import pdb;pdb.set_trace()
pass
except Exception, e:
print 'CheckContainers job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class FPTObjsOfType(Job):
def __init__(self, name, leakDetector, otn, doneCallback=None):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._otn = otn
self._doneCallback = doneCallback
self._ldde = self._leakDetector._getDestroyEvent()
self.accept(self._ldde, self._handleLDDestroy)
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
self.ignore(self._ldde)
self._leakDetector = None
self._doneCallback = None
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def _handleLDDestroy(self):
self.destroy()
def getPriority(self):
return Job.Priorities.High
def run(self):
ids = self._leakDetector.getContainerIds()
try:
for id in ids:
getInstance = (self._otn.lower() not in 'dict')
yield None
try:
for container in self._leakDetector.getContainerByIdGen(
id, getInstance=getInstance):
yield None
except:
pass
else:
if hasattr(container, '__class__'):
cName = container.__class__.__name__
else:
cName = container.__name__
if (self._otn.lower() in cName.lower()):
try:
for ptc in self._leakDetector.getContainerNameByIdGen(
id, getInstance=getInstance):
yield None
except:
pass
else:
print 'GPTC(' + self._otn + '):' + self.getJobName() + ': ' + ptc
except Exception, e:
print 'FPTObjsOfType job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
def finished(self):
if self._doneCallback:
self._doneCallback(self)
class FPTObjsNamed(Job):
def __init__(self, name, leakDetector, on, doneCallback=None):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._on = on
self._doneCallback = doneCallback
self._ldde = self._leakDetector._getDestroyEvent()
self.accept(self._ldde, self._handleLDDestroy)
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
self.ignore(self._ldde)
self._leakDetector = None
self._doneCallback = None
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def _handleLDDestroy(self):
self.destroy()
def getPriority(self):
return Job.Priorities.High
def run(self):
ids = self._leakDetector.getContainerIds()
try:
for id in ids:
yield None
try:
for container in self._leakDetector.getContainerByIdGen(id):
yield None
except:
pass
else:
name = self._leakDetector._id2ref[id].getFinalIndirectionStr()
if self._on.lower() in name.lower():
try:
for ptc in self._leakDetector.getContainerNameByIdGen(id):
yield None
except:
pass
else:
print 'GPTCN(' + self._on + '):' + self.getJobName() + ': ' + ptc
except Exception, e:
print 'FPTObjsNamed job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
def finished(self):
if self._doneCallback:
self._doneCallback(self)
class PruneObjectRefs(Job):
"""
Job to destroy any container refs that are no longer valid.
Checks validity by asking for each container
"""
def __init__(self, name, leakDetector):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def getPriority(self):
return Job.Priorities.Normal
def run(self):
try:
ids = self._leakDetector.getContainerIds()
for id in ids:
yield None
try:
for container in self._leakDetector.getContainerByIdGen(id):
yield None
except:
# reference is invalid, remove it
self._leakDetector.removeContainerById(id)
_id2baseStartRef = self._leakDetector._findContainersJob._id2baseStartRef
ids = _id2baseStartRef.keys()
for id in ids:
yield None
try:
for container in _id2baseStartRef[id].getContainerGen():
yield None
except:
# reference is invalid, remove it
del _id2baseStartRef[id]
_id2discoveredStartRef = self._leakDetector._findContainersJob._id2discoveredStartRef
ids = _id2discoveredStartRef.keys()
for id in ids:
yield None
try:
for container in _id2discoveredStartRef[id].getContainerGen():
yield None
except:
# reference is invalid, remove it
del _id2discoveredStartRef[id]
except Exception, e:
print 'PruneObjectRefs job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class ContainerLeakDetector(Job):
"""
Low-priority Python object-graph walker that looks for leaking containers.
To reduce memory usage, this does a random walk of the Python objects to
discover containers rather than keep a set of all visited objects; it may
visit the same object many times but eventually it will discover every object.
Checks container sizes at ever-increasing intervals.
"""
notify = directNotify.newCategory("ContainerLeakDetector")
# set of containers that should not be examined
PrivateIds = set()
def __init__(self, name, firstCheckDelay = None):
Job.__init__(self, name)
self._serialNum = serialNum()
self._findContainersJob = None
self._checkContainersJob = None
self._pruneContainersJob = None
if firstCheckDelay is None:
firstCheckDelay = 60. * 15.
# divide by two, since the first check just takes length measurements and
# doesn't check for leaks
self._nextCheckDelay = firstCheckDelay/2.
self._checkDelayScale = config.GetFloat('leak-detector-check-delay-scale', 1.5)
self._pruneTaskPeriod = config.GetFloat('leak-detector-prune-period', 60. * 30.)
# main dict of id(container)->containerRef
self._id2ref = {}
# storage for results of check-container job
self._index2containerId2len = {}
self._index2delay = {}
if config.GetBool('leak-container', 0):
_createContainerLeak()
if config.GetBool('leak-tasks', 0):
_createTaskLeak()
# don't check our own tables for leaks
ContainerLeakDetector.addPrivateObj(ContainerLeakDetector.PrivateIds)
ContainerLeakDetector.addPrivateObj(self.__dict__)
self.setPriority(Job.Priorities.Min)
jobMgr.add(self)
def destroy(self):
messenger.send(self._getDestroyEvent())
self.ignoreAll()
if self._pruneContainersJob is not None:
jobMgr.remove(self._pruneContainersJob)
self._pruneContainersJob = None
if self._checkContainersJob is not None:
jobMgr.remove(self._checkContainersJob)
self._checkContainersJob = None
jobMgr.remove(self._findContainersJob)
self._findContainersJob = None
del self._id2ref
del self._index2containerId2len
del self._index2delay
def _getDestroyEvent(self):
# sent when leak detector is about to be destroyed
return 'cldDestroy-%s' % self._serialNum
def getLeakEvent(self):
# sent when a leak is detected
# passes description string as argument
return 'containerLeakDetected-%s' % self._serialNum
@classmethod
def addPrivateObj(cls, obj):
cls.PrivateIds.add(id(obj))
@classmethod
def removePrivateObj(cls, obj):
cls.PrivateIds.remove(id(obj))
def _getCheckTaskName(self):
return 'checkForLeakingContainers-%s' % self._serialNum
def _getPruneTaskName(self):
return 'pruneLeakingContainerRefs-%s' % self._serialNum
def getContainerIds(self):
return self._id2ref.keys()
def getContainerByIdGen(self, id, **kwArgs):
# return a generator to look up a container
return self._id2ref[id].getContainerGen(**kwArgs)
def getContainerById(self, id):
for result in self._id2ref[id].getContainerGen():
pass
return result
def getContainerNameByIdGen(self, id, **kwArgs):
return self._id2ref[id].getEvalStrGen(**kwArgs)
def getContainerNameById(self, id):
if id in self._id2ref:
return repr(self._id2ref[id])
return '<unknown container>'
def removeContainerById(self, id):
if id in self._id2ref:
self._id2ref[id].destroy()
del self._id2ref[id]
def run(self):
# start looking for containers
self._findContainersJob = FindContainers(
'%s-findContainers' % self.getJobName(), self)
jobMgr.add(self._findContainersJob)
self._scheduleNextLeakCheck()
self._scheduleNextPruning()
while True:
yield Job.Sleep
def getPathsToContainers(self, name, ot, doneCallback=None):
j = FPTObjsOfType(name, self, ot, doneCallback)
jobMgr.add(j)
return j
def getPathsToContainersNamed(self, name, on, doneCallback=None):
j = FPTObjsNamed(name, self, on, doneCallback)
jobMgr.add(j)
return j
def _scheduleNextLeakCheck(self):
taskMgr.doMethodLater(self._nextCheckDelay, self._checkForLeaks,
self._getCheckTaskName())
# delay between checks
# fib: 1 1 2 3 5 8 13 21 34 55 89
# * 2.: 1 2 4 8 16 32 64 128 256 512 1024
# * 1.5: 1 1.5 2.3 3.4 5.1 7.6 11.4 17.1 25.6 38.4 57.7
#
# delay from job start
# fib: 1 2 4 7 12 20 33 54 88 143 232
# * 2.: 1 3 7 15 31 63 127 255 511 1023 2047
# * 1.5: 1 2.5 4.75 8.1 13.2 20.8 32.2 49.3 74.9 113.3 171
self._nextCheckDelay = self._nextCheckDelay * self._checkDelayScale
def _checkForLeaks(self, task=None):
self._index2delay[len(self._index2containerId2len)] = self._nextCheckDelay
self._checkContainersJob = CheckContainers(
'%s-checkForLeaks' % self.getJobName(), self, len(self._index2containerId2len))
self.acceptOnce(self._checkContainersJob.getFinishedEvent(),
self._scheduleNextLeakCheck)
jobMgr.add(self._checkContainersJob)
return task.done
def _scheduleNextPruning(self):
taskMgr.doMethodLater(self._pruneTaskPeriod, self._pruneObjectRefs,
self._getPruneTaskName())
def _pruneObjectRefs(self, task=None):
self._pruneContainersJob = PruneObjectRefs(
'%s-pruneObjectRefs' % self.getJobName(), self)
self.acceptOnce(self._pruneContainersJob.getFinishedEvent(),
self._scheduleNextPruning)
jobMgr.add(self._pruneContainersJob)
return task.done
| from pandac.PandaModules import PStatCollector
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.PythonUtil import Queue, invertDictLossless, makeFlywheelGen
from direct.showbase.PythonUtil import itype, serialNum, safeRepr, fastRepr
from direct.showbase.Job import Job
import types, weakref, random, __builtin__
def _createContainerLeak():
def leakContainer(task=None):
base = getBase()
if not hasattr(base, 'leakContainer'):
base.leakContainer = {}
# use tuples as keys since they can't be weakref'd, and use an instance
# since it can't be repr/eval'd
# that will force the leak detector to hold a normal 'non-weak' reference
class LeakKey:
pass
base.leakContainer[(LeakKey(),)] = {}
# test the non-weakref object reference handling
if random.random() < .01:
key = random.choice(base.leakContainer.keys())
ContainerLeakDetector.notify.debug(
'removing reference to leakContainer key %s so it will be garbage-collected' % safeRepr(key))
del base.leakContainer[key]
taskMgr.doMethodLater(10, leakContainer, 'leakContainer-%s' % serialNum())
if task:
return task.done
leakContainer()
def _createTaskLeak():
leakTaskName = uniqueName('leakedTask')
leakDoLaterName = uniqueName('leakedDoLater')
def nullTask(task=None):
return task.cont
def nullDoLater(task=None):
return task.done
def leakTask(task=None, leakTaskName=leakTaskName):
base = getBase()
taskMgr.add(nullTask, uniqueName(leakTaskName))
taskMgr.doMethodLater(1 << 31, nullDoLater, uniqueName(leakDoLaterName))
taskMgr.doMethodLater(10, leakTask, 'doLeakTask-%s' % serialNum())
if task:
return task.done
leakTask()
class NoDictKey:
pass
class Indirection:
"""
Represents the indirection that brings you from a container to an element of the container.
Stored as a string to be used as part of an eval, or as a key to be looked up in a dict.
Each dictionary dereference is individually eval'd since the dict key might have been
garbage-collected
TODO: store string components that are duplicates of strings in the actual system so that
Python will keep one copy and reduce memory usage
"""
def __init__(self, evalStr=None, dictKey=NoDictKey):
# if this is a dictionary lookup, pass dictKey instead of evalStr
self.evalStr = evalStr
self.dictKey = NoDictKey
# is the dictKey a weak reference?
self._isWeakRef = False
self._refCount = 0
if dictKey is not NoDictKey:
# if we can repr/eval the key, store it as an evalStr
keyRepr = safeRepr(dictKey)
useEval = False
try:
keyEval = eval(keyRepr)
useEval = True
except:
pass
if useEval:
# check to make sure the eval succeeded
if hash(keyEval) != hash(dictKey):
useEval = False
if useEval:
# eval/repr succeeded, store as an evalStr
self.evalStr = '[%s]' % keyRepr
else:
try:
# store a weakref to the key
self.dictKey = weakref.ref(dictKey)
self._isWeakRef = True
except TypeError, e:
ContainerLeakDetector.notify.debug('could not weakref dict key %s' % keyRepr)
self.dictKey = dictKey
self._isWeakRef = False
def destroy(self):
# re-entrant
self.dictKey = NoDictKey
def acquire(self):
self._refCount += 1
def release(self):
self._refCount -= 1
if self._refCount == 0:
self.destroy()
def isDictKey(self):
# is this an indirection through a dictionary?
return self.dictKey is not NoDictKey
def _getNonWeakDictKey(self):
if not self._isWeakRef:
return self.dictKey
else:
key = self.dictKey()
if key is None:
return '<garbage-collected dict key>'
return key
def dereferenceDictKey(self, parentDict):
# look ourselves up in parentDict
key = self._getNonWeakDictKey()
# objects in __builtin__ will have parentDict==None
if parentDict is None:
return key
return parentDict[key]
def getString(self, prevIndirection=None, nextIndirection=None):
# return our contribution to the full name of an object
instanceDictStr = '.__dict__'
if self.evalStr is not None:
# if we're an instance dict, skip over this one (obj.__dict__[keyName] == obj.keyName)
if nextIndirection is not None and self.evalStr[-len(instanceDictStr):] == instanceDictStr:
return self.evalStr[:-len(instanceDictStr)]
# if the previous indirection was an instance dict, change our syntax from ['key'] to .key
if prevIndirection is not None and prevIndirection.evalStr is not None:
if prevIndirection.evalStr[-len(instanceDictStr):] == instanceDictStr:
return '.%s' % self.evalStr[2:-2]
return self.evalStr
# we're stored as a dict key
keyRepr = safeRepr(self._getNonWeakDictKey())
# if the previous indirection was an instance dict, change our syntax from ['key'] to .key
if prevIndirection is not None and prevIndirection.evalStr is not None:
if prevIndirection.evalStr[-len(instanceDictStr):] == instanceDictStr:
return '.%s' % keyRepr
return '[%s]' % keyRepr
def __repr__(self):
return self.getString()
class ObjectRef:
"""
stores a reference to a container in a way that does not prevent garbage
collection of the container if possible
stored as a series of 'indirections' (obj.foo -> '.foo', dict[key] -> '[key]', etc.)
"""
notify = directNotify.newCategory("ObjectRef")
class FailedEval(Exception):
pass
def __init__(self, indirection, objId, other=None):
self._indirections = []
# are we building off of an existing ref?
if other is not None:
for ind in other._indirections:
self._indirections.append(ind)
# make sure we're not storing a reference to the actual object,
# that could cause a memory leak
assert type(objId) in (types.IntType, types.LongType)
# prevent cycles (i.e. base.loader.base.loader)
assert not self.goesThrough(objId=objId)
self._indirections.append(indirection)
# make sure our indirections don't get destroyed while we're using them
for ind in self._indirections:
ind.acquire()
self.notify.debug(repr(self))
def destroy(self):
for indirection in self._indirections:
indirection.release()
del self._indirections
def getNumIndirections(self):
return len(self._indirections)
def goesThroughGen(self, obj=None, objId=None):
if obj is None:
assert type(objId) in (types.IntType, types.LongType)
else:
objId = id(obj)
o = None
evalStr = ''
curObj = None
# make sure the indirections don't go away on us
indirections = self._indirections
for indirection in indirections:
yield None
indirection.acquire()
for indirection in indirections:
yield None
if not indirection.isDictKey():
# build up a string to be eval'd
evalStr += indirection.getString()
else:
curObj = self._getContainerByEval(evalStr, curObj=curObj)
if curObj is None:
raise FailedEval(evalStr)
# try to look up this key in the curObj dictionary
curObj = indirection.dereferenceDictKey(curObj)
evalStr = ''
yield None
o = self._getContainerByEval(evalStr, curObj=curObj)
if id(o) == objId:
break
for indirection in indirections:
yield None
indirection.release()
yield id(o) == objId
def goesThrough(self, obj=None, objId=None):
# since we cache the ids involved in this reference,
# this isn't perfect, for example if base.myObject is reassigned
# to a different object after this Ref was created this would return
# false, allowing a ref to base.myObject.otherObject.myObject
for goesThrough in self.goesThroughGen(obj=obj, objId=objId):
pass
return goesThrough
def _getContainerByEval(self, evalStr, curObj=None):
if curObj is not None:
# eval('curObj.foo.bar.someDict')
evalStr = 'curObj%s' % evalStr
else:
# this eval is not based off of curObj, use the global__builtin__ namespace
# put __builtin__ at the start if it's not already there
bis = '__builtin__'
if evalStr[:len(bis)] != bis:
evalStr = '%s.%s' % (bis, evalStr)
try:
container = eval(evalStr)
except NameError, ne:
return None
except AttributeError, ae:
return None
except KeyError, ke:
return None
return container
def getContainerGen(self, getInstance=False):
# try to get a handle on the container by eval'ing and looking things
# up in dictionaries, depending on the type of each indirection
# if getInstance is True, will return instance instead of instance dict
#import pdb;pdb.set_trace()
evalStr = ''
curObj = None
# make sure the indirections don't go away on us
indirections = self._indirections
for indirection in indirections:
indirection.acquire()
for indirection in indirections:
yield None
if not indirection.isDictKey():
# build up a string to be eval'd
evalStr += indirection.getString()
else:
curObj = self._getContainerByEval(evalStr, curObj=curObj)
if curObj is None:
raise FailedEval(evalStr)
# try to look up this key in the curObj dictionary
curObj = indirection.dereferenceDictKey(curObj)
evalStr = ''
for indirection in indirections:
yield None
indirection.release()
if getInstance:
lenDict = len('.__dict__')
if evalStr[-lenDict:] == '.__dict__':
evalStr = evalStr[:-lenDict]
# TODO: check that this is still the object we originally pointed to
yield self._getContainerByEval(evalStr, curObj=curObj)
def getEvalStrGen(self, getInstance=False):
str = ''
prevIndirection = None
curIndirection = None
nextIndirection = None
# make sure the indirections don't go away on us
indirections = self._indirections
for indirection in indirections:
indirection.acquire()
for i in xrange(len(indirections)):
yield None
if i > 0:
prevIndirection = indirections[i-1]
else:
prevIndirection = None
curIndirection = indirections[i]
if i < len(indirections)-1:
nextIndirection = indirections[i+1]
else:
nextIndirection = None
str += curIndirection.getString(prevIndirection=prevIndirection,
nextIndirection=nextIndirection)
if getInstance:
lenDict = len('.__dict__')
if str[-lenDict:] == '.__dict__':
str = str[:-lenDict]
for indirection in indirections:
yield None
indirection.release()
yield str
def getFinalIndirectionStr(self):
prevIndirection = None
if len(self._indirections) > 1:
prevIndirection = self._indirections[-2]
return self._indirections[-1].getString(prevIndirection=prevIndirection)
def __repr__(self):
for result in self.getEvalStrGen():
pass
return result
class FindContainers(Job):
"""
Explore the Python graph, looking for objects that support __len__()
"""
def __init__(self, name, leakDetector):
Job.__init__(self, name)
self._leakDetector = leakDetector
self._id2ref = self._leakDetector._id2ref
# these hold objects that we should start traversals from often and not-as-often,
# respectively
self._id2baseStartRef = {}
self._id2discoveredStartRef = {}
# these are working copies so that our iterations aren't disturbed by changes to the
# definitive ref sets
self._baseStartRefWorkingList = ScratchPad(refGen=nullGen(),
source=self._id2baseStartRef)
self._discoveredStartRefWorkingList = ScratchPad(refGen=nullGen(),
source=self._id2discoveredStartRef)
self.notify = self._leakDetector.notify
ContainerLeakDetector.addPrivateObj(self.__dict__)
# set up the base containers, the ones that hold most objects
ref = ObjectRef(Indirection(evalStr='__builtin__.__dict__'), id(__builtin__.__dict__))
self._id2baseStartRef[id(__builtin__.__dict__)] = ref
# container for objects that want to make sure they are found by
# the object exploration algorithm, including objects that exist
# just to measure things such as C++ memory usage, scene graph size,
# framerate, etc. See LeakDetectors.py
if not hasattr(__builtin__, "leakDetectors"):
__builtin__.leakDetectors = {}
ref = ObjectRef(Indirection(evalStr='leakDetectors'), id(leakDetectors))
self._id2baseStartRef[id(leakDetectors)] = ref
for i in self._addContainerGen(__builtin__.__dict__, ref):
pass
try:
base
except:
pass
else:
ref = ObjectRef(Indirection(evalStr='base.__dict__'), id(base.__dict__))
self._id2baseStartRef[id(base.__dict__)] = ref
for i in self._addContainerGen(base.__dict__, ref):
pass
try:
simbase
except:
pass
else:
ref = ObjectRef(Indirection(evalStr='simbase.__dict__'), id(simbase.__dict__))
self._id2baseStartRef[id(simbase.__dict__)] = ref
for i in self._addContainerGen(simbase.__dict__, ref):
pass
def destroy(self):
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def getPriority(self):
return Job.Priorities.Low
@staticmethod
def getStartObjAffinity(startObj):
# how good of a starting object is this object for traversing the object graph?
try:
return len(startObj)
except:
return 1
def _isDeadEnd(self, obj, objName=None):
if type(obj) in (types.BooleanType, types.BuiltinFunctionType,
types.BuiltinMethodType, types.ComplexType,
types.FloatType, types.IntType, types.LongType,
types.NoneType, types.NotImplementedType,
types.TypeType, types.CodeType, types.FunctionType,
types.StringType, types.UnicodeType,
types.TupleType):
return True
# if it's an internal object, ignore it
if id(obj) in ContainerLeakDetector.PrivateIds:
return True
# prevent crashes in objects that define __cmp__ and don't handle strings
if type(objName) == types.StringType and objName in ('im_self', 'im_class'):
return True
try:
className = obj.__class__.__name__
except:
pass
else:
# prevent infinite recursion in built-in containers related to methods
if className == 'method-wrapper':
return True
return False
def _hasLength(self, obj):
return hasattr(obj, '__len__')
def _addContainerGen(self, cont, objRef):
contId = id(cont)
# if this container is new, or the objRef repr is shorter than what we already have,
# put it in the table
if contId in self._id2ref:
for existingRepr in self._id2ref[contId].getEvalStrGen():
yield None
for newRepr in objRef.getEvalStrGen():
yield None
if contId not in self._id2ref or len(newRepr) < len(existingRepr):
if contId in self._id2ref:
self._leakDetector.removeContainerById(contId)
self._id2ref[contId] = objRef
def _addDiscoveredStartRef(self, obj, ref):
# we've discovered an object that can be used to start an object graph traversal
objId = id(obj)
if objId in self._id2discoveredStartRef:
existingRef = self._id2discoveredStartRef[objId]
if type(existingRef) not in (types.IntType, types.LongType):
if (existingRef.getNumIndirections() >=
ref.getNumIndirections()):
# the ref that we already have is more concise than the new ref
return
if objId in self._id2ref:
if (self._id2ref[objId].getNumIndirections() >=
ref.getNumIndirections()):
# the ref that we already have is more concise than the new ref
return
storedItem = ref
# if we already are storing a reference to this object, don't store a second reference
if objId in self._id2ref:
storedItem = objId
self._id2discoveredStartRef[objId] = storedItem
def run(self):
try:
# this yields a different set of start refs every time we start a new traversal
# force creation of a new workingListSelector inside the while loop right off the bat
workingListSelector = nullGen()
# this holds the current step of the current traversal
curObjRef = None
while True:
# yield up here instead of at the end, since we skip back to the
# top of the while loop from various points
yield None
#import pdb;pdb.set_trace()
if curObjRef is None:
# choose an object to start a traversal from
try:
startRefWorkingList = workingListSelector.next()
except StopIteration:
# do relative # of traversals on each set based on how many refs it contains
baseLen = len(self._baseStartRefWorkingList.source)
discLen = len(self._discoveredStartRefWorkingList.source)
minLen = float(max(1, min(baseLen, discLen)))
# this will cut down the traversals of the larger set by 2/3
minLen *= 3.
workingListSelector = flywheel([self._baseStartRefWorkingList, self._discoveredStartRefWorkingList],
[baseLen/minLen, discLen/minLen])
yield None
continue
# grab the next start ref from this sequence and see if it's still valid
while True:
yield None
try:
curObjRef = startRefWorkingList.refGen.next()
break
except StopIteration:
# we've run out of refs, grab a new set
if len(startRefWorkingList.source) == 0:
# ref set is empty, choose another
break
# make a generator that yields containers a # of times that is
# proportional to their length
for fw in makeFlywheelGen(
startRefWorkingList.source.values(),
countFunc=lambda x: self.getStartObjAffinity(x),
scale=.05):
yield None
startRefWorkingList.refGen = fw
if curObjRef is None:
# this ref set is empty, choose another
# the base set should never be empty (__builtin__ etc.)
continue
# do we need to go look up the object in _id2ref? sometimes we do that
# to avoid storing multiple redundant refs to a single item
if type(curObjRef) in (types.IntType, types.LongType):
startId = curObjRef
curObjRef = None
try:
for containerRef in self._leakDetector.getContainerByIdGen(startId):
yield None
except:
# ref is invalid
self.notify.debug('invalid startRef, stored as id %s' % startId)
self._leakDetector.removeContainerById(startId)
continue
curObjRef = containerRef
try:
for curObj in curObjRef.getContainerGen():
yield None
except:
self.notify.debug('lost current container, ref.getContainerGen() failed')
# that container is gone, try again
curObjRef = None
continue
self.notify.debug('--> %s' % curObjRef)
#import pdb;pdb.set_trace()
# store a copy of the current objRef
parentObjRef = curObjRef
# if we hit a dead end, start over from another container
curObjRef = None
if hasattr(curObj, '__dict__'):
child = curObj.__dict__
hasLength = self._hasLength(child)
notDeadEnd = not self._isDeadEnd(child)
if hasLength or notDeadEnd:
# prevent cycles in the references (i.e. base.loader.base)
for goesThrough in parentObjRef.goesThroughGen(child):
# don't yield, container might lose this element
pass
if not goesThrough:
objRef = ObjectRef(Indirection(evalStr='.__dict__'),
id(child), parentObjRef)
yield None
if hasLength:
for i in self._addContainerGen(child, objRef):
yield None
if notDeadEnd:
self._addDiscoveredStartRef(child, objRef)
curObjRef = objRef
continue
if type(curObj) is types.DictType:
key = None
attr = None
keys = curObj.keys()
# we will continue traversing the object graph via one key of the dict,
# choose it at random without taking a big chunk of CPU time
numKeysLeft = len(keys) + 1
for key in keys:
yield None
numKeysLeft -= 1
try:
attr = curObj[key]
except KeyError, e:
# this is OK because we are yielding during the iteration
self.notify.debug('could not index into %s with key %s' % (
parentObjRef, safeRepr(key)))
continue
hasLength = self._hasLength(attr)
notDeadEnd = False
# if we haven't picked the next ref, check if this one is a candidate
if curObjRef is None:
notDeadEnd = not self._isDeadEnd(attr, key)
if hasLength or notDeadEnd:
# prevent cycles in the references (i.e. base.loader.base)
for goesThrough in parentObjRef.goesThroughGen(curObj[key]):
# don't yield, container might lose this element
pass
if not goesThrough:
if curObj is __builtin__.__dict__:
objRef = ObjectRef(Indirection(evalStr='%s' % key),
id(curObj[key]))
else:
objRef = ObjectRef(Indirection(dictKey=key),
id(curObj[key]), parentObjRef)
yield None
if hasLength:
for i in self._addContainerGen(attr, objRef):
yield None
if notDeadEnd:
self._addDiscoveredStartRef(attr, objRef)
if curObjRef is None and random.randrange(numKeysLeft) == 0:
curObjRef = objRef
del key
del attr
continue
try:
childNames = dir(curObj)
except:
pass
else:
try:
index = -1
attrs = []
while 1:
yield None
try:
attr = itr.next()
except:
# some custom classes don't do well when iterated
attr = None
break
attrs.append(attr)
# we will continue traversing the object graph via one attr,
# choose it at random without taking a big chunk of CPU time
numAttrsLeft = len(attrs) + 1
for attr in attrs:
yield None
index += 1
numAttrsLeft -= 1
hasLength = self._hasLength(attr)
notDeadEnd = False
if curObjRef is None:
notDeadEnd = not self._isDeadEnd(attr)
if hasLength or notDeadEnd:
# prevent cycles in the references (i.e. base.loader.base)
for goesThrough in parentObjRef.goesThrough(curObj[index]):
# don't yield, container might lose this element
pass
if not goesThrough:
objRef = ObjectRef(Indirection(evalStr='[%s]' % index),
id(curObj[index]), parentObjRef)
yield None
if hasLength:
for i in self._addContainerGen(attr, objRef):
yield None
if notDeadEnd:
self._addDiscoveredStartRef(attr, objRef)
if curObjRef is None and random.randrange(numAttrsLeft) == 0:
curObjRef = objRef
del attr
except StopIteration, e:
pass
del itr
continue
except Exception, e:
print 'FindContainers job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class CheckContainers(Job):
"""
Job to check container sizes and find potential leaks; sub-job of ContainerLeakDetector
"""
ReprItems = 5
def __init__(self, name, leakDetector, index):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._index = index
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def getPriority(self):
return Job.Priorities.Normal
def run(self):
try:
self._leakDetector._index2containerId2len[self._index] = {}
ids = self._leakDetector.getContainerIds()
# record the current len of each container
for objId in ids:
yield None
try:
for result in self._leakDetector.getContainerByIdGen(objId):
yield None
container = result
except Exception, e:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug(
'%s no longer exists; caught exception in getContainerById (%s)' % (
contName, e))
self._leakDetector.removeContainerById(objId)
continue
if container is None:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug('%s no longer exists; getContainerById returned None' %
contName)
self._leakDetector.removeContainerById(objId)
continue
try:
cLen = len(container)
except Exception, e:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug(
'%s is no longer a container, it is now %s (%s)' %
(contName, safeRepr(container), e))
self._leakDetector.removeContainerById(objId)
continue
self._leakDetector._index2containerId2len[self._index][objId] = cLen
# compare the current len of each container to past lens
if self._index > 0:
idx2id2len = self._leakDetector._index2containerId2len
for objId in idx2id2len[self._index]:
yield None
if objId in idx2id2len[self._index-1]:
diff = idx2id2len[self._index][objId] - idx2id2len[self._index-1][objId]
"""
# this check is too spammy
if diff > 20:
if diff > idx2id2len[self._index-1][objId]:
minutes = (self._leakDetector._index2delay[self._index] -
self._leakDetector._index2delay[self._index-1]) / 60.
name = self._leakDetector.getContainerNameById(objId)
if idx2id2len[self._index-1][objId] != 0:
percent = 100. * (float(diff) / float(idx2id2len[self._index-1][objId]))
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (1)')
else:
self.notify.warning(
'%s (%s) grew %.2f%% in %.2f minutes (%s items at last measurement, current contents: %s)' % (
name, itype(container), percent, minutes, idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
yield None
"""
if (self._index > 2 and
objId in idx2id2len[self._index-2] and
objId in idx2id2len[self._index-3]):
diff2 = idx2id2len[self._index-1][objId] - idx2id2len[self._index-2][objId]
diff3 = idx2id2len[self._index-2][objId] - idx2id2len[self._index-3][objId]
if self._index <= 4:
if diff > 0 and diff2 > 0 and diff3 > 0:
name = self._leakDetector.getContainerNameById(objId)
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (2)')
else:
msg = ('%s (%s) consistently increased in size over the last '
'3 periods (%s items at last measurement, current contents: %s)' %
(name, itype(container), idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
self.notify.warning(msg)
yield None
elif (objId in idx2id2len[self._index-4] and
objId in idx2id2len[self._index-5]):
# if size has consistently increased over the last 5 checks,
# send out a warning
diff4 = idx2id2len[self._index-3][objId] - idx2id2len[self._index-4][objId]
diff5 = idx2id2len[self._index-4][objId] - idx2id2len[self._index-5][objId]
if diff > 0 and diff2 > 0 and diff3 > 0 and diff4 > 0 and diff5 > 0:
name = self._leakDetector.getContainerNameById(objId)
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (3)')
else:
msg = ('leak detected: %s (%s) consistently increased in size over the last '
'5 periods (%s items at last measurement, current contents: %s)' %
(name, itype(container), idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
self.notify.warning(msg)
yield None
messenger.send(self._leakDetector.getLeakEvent(), [container, name])
if config.GetBool('pdb-on-leak-detect', 0):
import pdb;pdb.set_trace()
pass
except Exception, e:
print 'CheckContainers job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class FPTObjsOfType(Job):
def __init__(self, name, leakDetector, otn, doneCallback=None):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._otn = otn
self._doneCallback = doneCallback
self._ldde = self._leakDetector._getDestroyEvent()
self.accept(self._ldde, self._handleLDDestroy)
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
self.ignore(self._ldde)
self._leakDetector = None
self._doneCallback = None
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def _handleLDDestroy(self):
self.destroy()
def getPriority(self):
return Job.Priorities.High
def run(self):
ids = self._leakDetector.getContainerIds()
try:
for id in ids:
getInstance = (self._otn.lower() not in 'dict')
yield None
try:
for container in self._leakDetector.getContainerByIdGen(
id, getInstance=getInstance):
yield None
except:
pass
else:
if hasattr(container, '__class__'):
cName = container.__class__.__name__
else:
cName = container.__name__
if (self._otn.lower() in cName.lower()):
try:
for ptc in self._leakDetector.getContainerNameByIdGen(
id, getInstance=getInstance):
yield None
except:
pass
else:
print 'GPTC(' + self._otn + '):' + self.getJobName() + ': ' + ptc
except Exception, e:
print 'FPTObjsOfType job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
def finished(self):
if self._doneCallback:
self._doneCallback(self)
class FPTObjsNamed(Job):
def __init__(self, name, leakDetector, on, doneCallback=None):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._on = on
self._doneCallback = doneCallback
self._ldde = self._leakDetector._getDestroyEvent()
self.accept(self._ldde, self._handleLDDestroy)
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
self.ignore(self._ldde)
self._leakDetector = None
self._doneCallback = None
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def _handleLDDestroy(self):
self.destroy()
def getPriority(self):
return Job.Priorities.High
def run(self):
ids = self._leakDetector.getContainerIds()
try:
for id in ids:
yield None
try:
for container in self._leakDetector.getContainerByIdGen(id):
yield None
except:
pass
else:
name = self._leakDetector._id2ref[id].getFinalIndirectionStr()
if self._on.lower() in name.lower():
try:
for ptc in self._leakDetector.getContainerNameByIdGen(id):
yield None
except:
pass
else:
print 'GPTCN(' + self._on + '):' + self.getJobName() + ': ' + ptc
except Exception, e:
print 'FPTObjsNamed job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
def finished(self):
if self._doneCallback:
self._doneCallback(self)
class PruneObjectRefs(Job):
"""
Job to destroy any container refs that are no longer valid.
Checks validity by asking for each container
"""
def __init__(self, name, leakDetector):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def getPriority(self):
return Job.Priorities.Normal
def run(self):
try:
ids = self._leakDetector.getContainerIds()
for id in ids:
yield None
try:
for container in self._leakDetector.getContainerByIdGen(id):
yield None
except:
# reference is invalid, remove it
self._leakDetector.removeContainerById(id)
_id2baseStartRef = self._leakDetector._findContainersJob._id2baseStartRef
ids = _id2baseStartRef.keys()
for id in ids:
yield None
try:
for container in _id2baseStartRef[id].getContainerGen():
yield None
except:
# reference is invalid, remove it
del _id2baseStartRef[id]
_id2discoveredStartRef = self._leakDetector._findContainersJob._id2discoveredStartRef
ids = _id2discoveredStartRef.keys()
for id in ids:
yield None
try:
for container in _id2discoveredStartRef[id].getContainerGen():
yield None
except:
# reference is invalid, remove it
del _id2discoveredStartRef[id]
except Exception, e:
print 'PruneObjectRefs job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class ContainerLeakDetector(Job):
"""
Low-priority Python object-graph walker that looks for leaking containers.
To reduce memory usage, this does a random walk of the Python objects to
discover containers rather than keep a set of all visited objects; it may
visit the same object many times but eventually it will discover every object.
Checks container sizes at ever-increasing intervals.
"""
notify = directNotify.newCategory("ContainerLeakDetector")
# set of containers that should not be examined
PrivateIds = set()
def __init__(self, name, firstCheckDelay = None):
Job.__init__(self, name)
self._serialNum = serialNum()
self._findContainersJob = None
self._checkContainersJob = None
self._pruneContainersJob = None
if firstCheckDelay is None:
firstCheckDelay = 60. * 15.
# divide by two, since the first check just takes length measurements and
# doesn't check for leaks
self._nextCheckDelay = firstCheckDelay/2.
self._checkDelayScale = config.GetFloat('leak-detector-check-delay-scale', 1.5)
self._pruneTaskPeriod = config.GetFloat('leak-detector-prune-period', 60. * 30.)
# main dict of id(container)->containerRef
self._id2ref = {}
# storage for results of check-container job
self._index2containerId2len = {}
self._index2delay = {}
if config.GetBool('leak-container', 0):
_createContainerLeak()
if config.GetBool('leak-tasks', 0):
_createTaskLeak()
# don't check our own tables for leaks
ContainerLeakDetector.addPrivateObj(ContainerLeakDetector.PrivateIds)
ContainerLeakDetector.addPrivateObj(self.__dict__)
self.setPriority(Job.Priorities.Min)
jobMgr.add(self)
def destroy(self):
messenger.send(self._getDestroyEvent())
self.ignoreAll()
if self._pruneContainersJob is not None:
jobMgr.remove(self._pruneContainersJob)
self._pruneContainersJob = None
if self._checkContainersJob is not None:
jobMgr.remove(self._checkContainersJob)
self._checkContainersJob = None
jobMgr.remove(self._findContainersJob)
self._findContainersJob = None
del self._id2ref
del self._index2containerId2len
del self._index2delay
def _getDestroyEvent(self):
# sent when leak detector is about to be destroyed
return 'cldDestroy-%s' % self._serialNum
def getLeakEvent(self):
# sent when a leak is detected
# passes description string as argument
return 'containerLeakDetected-%s' % self._serialNum
@classmethod
def addPrivateObj(cls, obj):
cls.PrivateIds.add(id(obj))
@classmethod
def removePrivateObj(cls, obj):
cls.PrivateIds.remove(id(obj))
def _getCheckTaskName(self):
return 'checkForLeakingContainers-%s' % self._serialNum
def _getPruneTaskName(self):
return 'pruneLeakingContainerRefs-%s' % self._serialNum
def getContainerIds(self):
return self._id2ref.keys()
def getContainerByIdGen(self, id, **kwArgs):
# return a generator to look up a container
return self._id2ref[id].getContainerGen(**kwArgs)
def getContainerById(self, id):
for result in self._id2ref[id].getContainerGen():
pass
return result
def getContainerNameByIdGen(self, id, **kwArgs):
return self._id2ref[id].getEvalStrGen(**kwArgs)
def getContainerNameById(self, id):
if id in self._id2ref:
return repr(self._id2ref[id])
return '<unknown container>'
def removeContainerById(self, id):
if id in self._id2ref:
self._id2ref[id].destroy()
del self._id2ref[id]
def run(self):
# start looking for containers
self._findContainersJob = FindContainers(
'%s-findContainers' % self.getJobName(), self)
jobMgr.add(self._findContainersJob)
self._scheduleNextLeakCheck()
self._scheduleNextPruning()
while True:
yield Job.Sleep
def getPathsToContainers(self, name, ot, doneCallback=None):
j = FPTObjsOfType(name, self, ot, doneCallback)
jobMgr.add(j)
return j
def getPathsToContainersNamed(self, name, on, doneCallback=None):
j = FPTObjsNamed(name, self, on, doneCallback)
jobMgr.add(j)
return j
def _scheduleNextLeakCheck(self):
taskMgr.doMethodLater(self._nextCheckDelay, self._checkForLeaks,
self._getCheckTaskName())
# delay between checks
# fib: 1 1 2 3 5 8 13 21 34 55 89
# * 2.: 1 2 4 8 16 32 64 128 256 512 1024
# * 1.5: 1 1.5 2.3 3.4 5.1 7.6 11.4 17.1 25.6 38.4 57.7
#
# delay from job start
# fib: 1 2 4 7 12 20 33 54 88 143 232
# * 2.: 1 3 7 15 31 63 127 255 511 1023 2047
# * 1.5: 1 2.5 4.75 8.1 13.2 20.8 32.2 49.3 74.9 113.3 171
self._nextCheckDelay = self._nextCheckDelay * self._checkDelayScale
def _checkForLeaks(self, task=None):
self._index2delay[len(self._index2containerId2len)] = self._nextCheckDelay
self._checkContainersJob = CheckContainers(
'%s-checkForLeaks' % self.getJobName(), self, len(self._index2containerId2len))
self.acceptOnce(self._checkContainersJob.getFinishedEvent(),
self._scheduleNextLeakCheck)
jobMgr.add(self._checkContainersJob)
return task.done
def _scheduleNextPruning(self):
taskMgr.doMethodLater(self._pruneTaskPeriod, self._pruneObjectRefs,
self._getPruneTaskName())
def _pruneObjectRefs(self, task=None):
self._pruneContainersJob = PruneObjectRefs(
'%s-pruneObjectRefs' % self.getJobName(), self)
self.acceptOnce(self._pruneContainersJob.getFinishedEvent(),
self._scheduleNextPruning)
jobMgr.add(self._pruneContainersJob)
return task.done
| en | 0.86277 | # use tuples as keys since they can't be weakref'd, and use an instance # since it can't be repr/eval'd # that will force the leak detector to hold a normal 'non-weak' reference # test the non-weakref object reference handling Represents the indirection that brings you from a container to an element of the container. Stored as a string to be used as part of an eval, or as a key to be looked up in a dict. Each dictionary dereference is individually eval'd since the dict key might have been garbage-collected TODO: store string components that are duplicates of strings in the actual system so that Python will keep one copy and reduce memory usage # if this is a dictionary lookup, pass dictKey instead of evalStr # is the dictKey a weak reference? # if we can repr/eval the key, store it as an evalStr # check to make sure the eval succeeded # eval/repr succeeded, store as an evalStr # store a weakref to the key # re-entrant # is this an indirection through a dictionary? # look ourselves up in parentDict # objects in __builtin__ will have parentDict==None # return our contribution to the full name of an object # if we're an instance dict, skip over this one (obj.__dict__[keyName] == obj.keyName) # if the previous indirection was an instance dict, change our syntax from ['key'] to .key # we're stored as a dict key # if the previous indirection was an instance dict, change our syntax from ['key'] to .key stores a reference to a container in a way that does not prevent garbage collection of the container if possible stored as a series of 'indirections' (obj.foo -> '.foo', dict[key] -> '[key]', etc.) # are we building off of an existing ref? # make sure we're not storing a reference to the actual object, # that could cause a memory leak # prevent cycles (i.e. base.loader.base.loader) # make sure our indirections don't get destroyed while we're using them # make sure the indirections don't go away on us # build up a string to be eval'd # try to look up this key in the curObj dictionary # since we cache the ids involved in this reference, # this isn't perfect, for example if base.myObject is reassigned # to a different object after this Ref was created this would return # false, allowing a ref to base.myObject.otherObject.myObject # eval('curObj.foo.bar.someDict') # this eval is not based off of curObj, use the global__builtin__ namespace # put __builtin__ at the start if it's not already there # try to get a handle on the container by eval'ing and looking things # up in dictionaries, depending on the type of each indirection # if getInstance is True, will return instance instead of instance dict #import pdb;pdb.set_trace() # make sure the indirections don't go away on us # build up a string to be eval'd # try to look up this key in the curObj dictionary # TODO: check that this is still the object we originally pointed to # make sure the indirections don't go away on us Explore the Python graph, looking for objects that support __len__() # these hold objects that we should start traversals from often and not-as-often, # respectively # these are working copies so that our iterations aren't disturbed by changes to the # definitive ref sets # set up the base containers, the ones that hold most objects # container for objects that want to make sure they are found by # the object exploration algorithm, including objects that exist # just to measure things such as C++ memory usage, scene graph size, # framerate, etc. See LeakDetectors.py # how good of a starting object is this object for traversing the object graph? # if it's an internal object, ignore it # prevent crashes in objects that define __cmp__ and don't handle strings # prevent infinite recursion in built-in containers related to methods # if this container is new, or the objRef repr is shorter than what we already have, # put it in the table # we've discovered an object that can be used to start an object graph traversal # the ref that we already have is more concise than the new ref # the ref that we already have is more concise than the new ref # if we already are storing a reference to this object, don't store a second reference # this yields a different set of start refs every time we start a new traversal # force creation of a new workingListSelector inside the while loop right off the bat # this holds the current step of the current traversal # yield up here instead of at the end, since we skip back to the # top of the while loop from various points #import pdb;pdb.set_trace() # choose an object to start a traversal from # do relative # of traversals on each set based on how many refs it contains # this will cut down the traversals of the larger set by 2/3 # grab the next start ref from this sequence and see if it's still valid # we've run out of refs, grab a new set # ref set is empty, choose another # make a generator that yields containers a # of times that is # proportional to their length # this ref set is empty, choose another # the base set should never be empty (__builtin__ etc.) # do we need to go look up the object in _id2ref? sometimes we do that # to avoid storing multiple redundant refs to a single item # ref is invalid # that container is gone, try again #import pdb;pdb.set_trace() # store a copy of the current objRef # if we hit a dead end, start over from another container # prevent cycles in the references (i.e. base.loader.base) # don't yield, container might lose this element # we will continue traversing the object graph via one key of the dict, # choose it at random without taking a big chunk of CPU time # this is OK because we are yielding during the iteration # if we haven't picked the next ref, check if this one is a candidate # prevent cycles in the references (i.e. base.loader.base) # don't yield, container might lose this element # some custom classes don't do well when iterated # we will continue traversing the object graph via one attr, # choose it at random without taking a big chunk of CPU time # prevent cycles in the references (i.e. base.loader.base) # don't yield, container might lose this element Job to check container sizes and find potential leaks; sub-job of ContainerLeakDetector # record the current len of each container # this container no longer exists # this container no longer exists # this container no longer exists # compare the current len of each container to past lens # this check is too spammy if diff > 20: if diff > idx2id2len[self._index-1][objId]: minutes = (self._leakDetector._index2delay[self._index] - self._leakDetector._index2delay[self._index-1]) / 60. name = self._leakDetector.getContainerNameById(objId) if idx2id2len[self._index-1][objId] != 0: percent = 100. * (float(diff) / float(idx2id2len[self._index-1][objId])) try: for container in self._leakDetector.getContainerByIdGen(objId): yield None except: # TODO self.notify.debug('caught exception in getContainerByIdGen (1)') else: self.notify.warning( '%s (%s) grew %.2f%% in %.2f minutes (%s items at last measurement, current contents: %s)' % ( name, itype(container), percent, minutes, idx2id2len[self._index][objId], fastRepr(container, maxLen=CheckContainers.ReprItems))) yield None # TODO # if size has consistently increased over the last 5 checks, # send out a warning # TODO Job to destroy any container refs that are no longer valid. Checks validity by asking for each container # reference is invalid, remove it # reference is invalid, remove it # reference is invalid, remove it Low-priority Python object-graph walker that looks for leaking containers. To reduce memory usage, this does a random walk of the Python objects to discover containers rather than keep a set of all visited objects; it may visit the same object many times but eventually it will discover every object. Checks container sizes at ever-increasing intervals. # set of containers that should not be examined # divide by two, since the first check just takes length measurements and # doesn't check for leaks # main dict of id(container)->containerRef # storage for results of check-container job # don't check our own tables for leaks # sent when leak detector is about to be destroyed # sent when a leak is detected # passes description string as argument # return a generator to look up a container # start looking for containers # delay between checks # fib: 1 1 2 3 5 8 13 21 34 55 89 # * 2.: 1 2 4 8 16 32 64 128 256 512 1024 # * 1.5: 1 1.5 2.3 3.4 5.1 7.6 11.4 17.1 25.6 38.4 57.7 # # delay from job start # fib: 1 2 4 7 12 20 33 54 88 143 232 # * 2.: 1 3 7 15 31 63 127 255 511 1023 2047 # * 1.5: 1 2.5 4.75 8.1 13.2 20.8 32.2 49.3 74.9 113.3 171 | 1.911967 | 2 |
virtual/lib/python3.6/site-packages/sqlalchemy/sql/default_comparator.py | mzazakeith/flask-blog | 207 | 5986 | <reponame>mzazakeith/flask-blog
# sql/default_comparator.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementation of SQL comparison operations.
"""
from .. import exc, util
from . import type_api
from . import operators
from .elements import BindParameter, True_, False_, BinaryExpression, \
Null, _const_expr, _clause_element_as_expr, \
ClauseList, ColumnElement, TextClause, UnaryExpression, \
collate, _is_literal, _literal_as_text, ClauseElement, and_, or_, \
Slice, Visitable, _literal_as_binds, CollectionAggregate
from .selectable import SelectBase, Alias, Selectable, ScalarSelect
def _boolean_compare(expr, op, obj, negate=None, reverse=False,
_python_is_types=(util.NoneType, bool),
result_type = None,
**kwargs):
if result_type is None:
result_type = type_api.BOOLEANTYPE
if isinstance(obj, _python_is_types + (Null, True_, False_)):
# allow x ==/!= True/False to be treated as a literal.
# this comes out to "== / != true/false" or "1/0" if those
# constants aren't supported and works on all platforms
if op in (operators.eq, operators.ne) and \
isinstance(obj, (bool, True_, False_)):
return BinaryExpression(expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate, modifiers=kwargs)
elif op in (operators.is_distinct_from, operators.isnot_distinct_from):
return BinaryExpression(expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate, modifiers=kwargs)
else:
# all other None/True/False uses IS, IS NOT
if op in (operators.eq, operators.is_):
return BinaryExpression(expr, _const_expr(obj),
operators.is_,
negate=operators.isnot,
type_=result_type
)
elif op in (operators.ne, operators.isnot):
return BinaryExpression(expr, _const_expr(obj),
operators.isnot,
negate=operators.is_,
type_=result_type
)
else:
raise exc.ArgumentError(
"Only '=', '!=', 'is_()', 'isnot()', "
"'is_distinct_from()', 'isnot_distinct_from()' "
"operators can be used with None/True/False")
else:
obj = _check_literal(expr, op, obj)
if reverse:
return BinaryExpression(obj,
expr,
op,
type_=result_type,
negate=negate, modifiers=kwargs)
else:
return BinaryExpression(expr,
obj,
op,
type_=result_type,
negate=negate, modifiers=kwargs)
def _custom_op_operate(expr, op, obj, reverse=False, result_type=None,
**kw):
if result_type is None:
if op.return_type:
result_type = op.return_type
elif op.is_comparison:
result_type = type_api.BOOLEANTYPE
return _binary_operate(
expr, op, obj, reverse=reverse, result_type=result_type, **kw)
def _binary_operate(expr, op, obj, reverse=False, result_type=None,
**kw):
obj = _check_literal(expr, op, obj)
if reverse:
left, right = obj, expr
else:
left, right = expr, obj
if result_type is None:
op, result_type = left.comparator._adapt_expression(
op, right.comparator)
return BinaryExpression(
left, right, op, type_=result_type, modifiers=kw)
def _conjunction_operate(expr, op, other, **kw):
if op is operators.and_:
return and_(expr, other)
elif op is operators.or_:
return or_(expr, other)
else:
raise NotImplementedError()
def _scalar(expr, op, fn, **kw):
return fn(expr)
def _in_impl(expr, op, seq_or_selectable, negate_op, **kw):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, ScalarSelect):
return _boolean_compare(expr, op, seq_or_selectable,
negate=negate_op)
elif isinstance(seq_or_selectable, SelectBase):
# TODO: if we ever want to support (x, y, z) IN (select x,
# y, z from table), we would need a multi-column version of
# as_scalar() to produce a multi- column selectable that
# does not export itself as a FROM clause
return _boolean_compare(
expr, op, seq_or_selectable.as_scalar(),
negate=negate_op, **kw)
elif isinstance(seq_or_selectable, (Selectable, TextClause)):
return _boolean_compare(expr, op, seq_or_selectable,
negate=negate_op, **kw)
elif isinstance(seq_or_selectable, ClauseElement):
if isinstance(seq_or_selectable, BindParameter) and \
seq_or_selectable.expanding:
return _boolean_compare(
expr, op,
seq_or_selectable,
negate=negate_op)
else:
raise exc.InvalidRequestError(
'in_() accepts'
' either a list of expressions, '
'a selectable, or an "expanding" bound parameter: %r'
% seq_or_selectable)
# Handle non selectable arguments as sequences
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance(o, operators.ColumnOperators):
raise exc.InvalidRequestError(
'in_() accepts'
' either a list of expressions, '
'a selectable, or an "expanding" bound parameter: %r' % o)
elif o is None:
o = Null()
else:
o = expr._bind_param(op, o)
args.append(o)
if len(args) == 0:
op, negate_op = (
operators.empty_in_op,
operators.empty_notin_op) if op is operators.in_op \
else (
operators.empty_notin_op,
operators.empty_in_op)
return _boolean_compare(expr, op,
ClauseList(*args).self_group(against=op),
negate=negate_op)
def _getitem_impl(expr, op, other, **kw):
if isinstance(expr.type, type_api.INDEXABLE):
other = _check_literal(expr, op, other)
return _binary_operate(expr, op, other, **kw)
else:
_unsupported_impl(expr, op, other, **kw)
def _unsupported_impl(expr, op, *arg, **kw):
raise NotImplementedError("Operator '%s' is not supported on "
"this expression" % op.__name__)
def _inv_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__inv__`."""
if hasattr(expr, 'negation_clause'):
return expr.negation_clause
else:
return expr._negate()
def _neg_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__neg__`."""
return UnaryExpression(expr, operator=operators.neg, type_=expr.type)
def _match_impl(expr, op, other, **kw):
"""See :meth:`.ColumnOperators.match`."""
return _boolean_compare(
expr, operators.match_op,
_check_literal(
expr, operators.match_op, other),
result_type=type_api.MATCHTYPE,
negate=operators.notmatch_op
if op is operators.match_op else operators.match_op,
**kw
)
def _distinct_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.distinct`."""
return UnaryExpression(expr, operator=operators.distinct_op,
type_=expr.type)
def _between_impl(expr, op, cleft, cright, **kw):
"""See :meth:`.ColumnOperators.between`."""
return BinaryExpression(
expr,
ClauseList(
_check_literal(expr, operators.and_, cleft),
_check_literal(expr, operators.and_, cright),
operator=operators.and_,
group=False, group_contents=False),
op,
negate=operators.notbetween_op
if op is operators.between_op
else operators.between_op,
modifiers=kw)
def _collate_impl(expr, op, other, **kw):
return collate(expr, other)
# a mapping of operators with the method they use, along with
# their negated operator for comparison operators
operator_lookup = {
"and_": (_conjunction_operate,),
"or_": (_conjunction_operate,),
"inv": (_inv_impl,),
"add": (_binary_operate,),
"mul": (_binary_operate,),
"sub": (_binary_operate,),
"div": (_binary_operate,),
"mod": (_binary_operate,),
"truediv": (_binary_operate,),
"custom_op": (_custom_op_operate,),
"json_path_getitem_op": (_binary_operate, ),
"json_getitem_op": (_binary_operate, ),
"concat_op": (_binary_operate,),
"any_op": (_scalar, CollectionAggregate._create_any),
"all_op": (_scalar, CollectionAggregate._create_all),
"lt": (_boolean_compare, operators.ge),
"le": (_boolean_compare, operators.gt),
"ne": (_boolean_compare, operators.eq),
"gt": (_boolean_compare, operators.le),
"ge": (_boolean_compare, operators.lt),
"eq": (_boolean_compare, operators.ne),
"is_distinct_from": (_boolean_compare, operators.isnot_distinct_from),
"isnot_distinct_from": (_boolean_compare, operators.is_distinct_from),
"like_op": (_boolean_compare, operators.notlike_op),
"ilike_op": (_boolean_compare, operators.notilike_op),
"notlike_op": (_boolean_compare, operators.like_op),
"notilike_op": (_boolean_compare, operators.ilike_op),
"contains_op": (_boolean_compare, operators.notcontains_op),
"startswith_op": (_boolean_compare, operators.notstartswith_op),
"endswith_op": (_boolean_compare, operators.notendswith_op),
"desc_op": (_scalar, UnaryExpression._create_desc),
"asc_op": (_scalar, UnaryExpression._create_asc),
"nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst),
"nullslast_op": (_scalar, UnaryExpression._create_nullslast),
"in_op": (_in_impl, operators.notin_op),
"notin_op": (_in_impl, operators.in_op),
"is_": (_boolean_compare, operators.is_),
"isnot": (_boolean_compare, operators.isnot),
"collate": (_collate_impl,),
"match_op": (_match_impl,),
"notmatch_op": (_match_impl,),
"distinct_op": (_distinct_impl,),
"between_op": (_between_impl, ),
"notbetween_op": (_between_impl, ),
"neg": (_neg_impl,),
"getitem": (_getitem_impl,),
"lshift": (_unsupported_impl,),
"rshift": (_unsupported_impl,),
"contains": (_unsupported_impl,),
}
def _check_literal(expr, operator, other, bindparam_type=None):
if isinstance(other, (ColumnElement, TextClause)):
if isinstance(other, BindParameter) and \
other.type._isnull:
other = other._clone()
other.type = expr.type
return other
elif hasattr(other, '__clause_element__'):
other = other.__clause_element__()
elif isinstance(other, type_api.TypeEngine.Comparator):
other = other.expr
if isinstance(other, (SelectBase, Alias)):
return other.as_scalar()
elif not isinstance(other, Visitable):
return expr._bind_param(operator, other, type_=bindparam_type)
else:
return other
| # sql/default_comparator.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementation of SQL comparison operations.
"""
from .. import exc, util
from . import type_api
from . import operators
from .elements import BindParameter, True_, False_, BinaryExpression, \
Null, _const_expr, _clause_element_as_expr, \
ClauseList, ColumnElement, TextClause, UnaryExpression, \
collate, _is_literal, _literal_as_text, ClauseElement, and_, or_, \
Slice, Visitable, _literal_as_binds, CollectionAggregate
from .selectable import SelectBase, Alias, Selectable, ScalarSelect
def _boolean_compare(expr, op, obj, negate=None, reverse=False,
_python_is_types=(util.NoneType, bool),
result_type = None,
**kwargs):
if result_type is None:
result_type = type_api.BOOLEANTYPE
if isinstance(obj, _python_is_types + (Null, True_, False_)):
# allow x ==/!= True/False to be treated as a literal.
# this comes out to "== / != true/false" or "1/0" if those
# constants aren't supported and works on all platforms
if op in (operators.eq, operators.ne) and \
isinstance(obj, (bool, True_, False_)):
return BinaryExpression(expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate, modifiers=kwargs)
elif op in (operators.is_distinct_from, operators.isnot_distinct_from):
return BinaryExpression(expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate, modifiers=kwargs)
else:
# all other None/True/False uses IS, IS NOT
if op in (operators.eq, operators.is_):
return BinaryExpression(expr, _const_expr(obj),
operators.is_,
negate=operators.isnot,
type_=result_type
)
elif op in (operators.ne, operators.isnot):
return BinaryExpression(expr, _const_expr(obj),
operators.isnot,
negate=operators.is_,
type_=result_type
)
else:
raise exc.ArgumentError(
"Only '=', '!=', 'is_()', 'isnot()', "
"'is_distinct_from()', 'isnot_distinct_from()' "
"operators can be used with None/True/False")
else:
obj = _check_literal(expr, op, obj)
if reverse:
return BinaryExpression(obj,
expr,
op,
type_=result_type,
negate=negate, modifiers=kwargs)
else:
return BinaryExpression(expr,
obj,
op,
type_=result_type,
negate=negate, modifiers=kwargs)
def _custom_op_operate(expr, op, obj, reverse=False, result_type=None,
**kw):
if result_type is None:
if op.return_type:
result_type = op.return_type
elif op.is_comparison:
result_type = type_api.BOOLEANTYPE
return _binary_operate(
expr, op, obj, reverse=reverse, result_type=result_type, **kw)
def _binary_operate(expr, op, obj, reverse=False, result_type=None,
**kw):
obj = _check_literal(expr, op, obj)
if reverse:
left, right = obj, expr
else:
left, right = expr, obj
if result_type is None:
op, result_type = left.comparator._adapt_expression(
op, right.comparator)
return BinaryExpression(
left, right, op, type_=result_type, modifiers=kw)
def _conjunction_operate(expr, op, other, **kw):
if op is operators.and_:
return and_(expr, other)
elif op is operators.or_:
return or_(expr, other)
else:
raise NotImplementedError()
def _scalar(expr, op, fn, **kw):
return fn(expr)
def _in_impl(expr, op, seq_or_selectable, negate_op, **kw):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, ScalarSelect):
return _boolean_compare(expr, op, seq_or_selectable,
negate=negate_op)
elif isinstance(seq_or_selectable, SelectBase):
# TODO: if we ever want to support (x, y, z) IN (select x,
# y, z from table), we would need a multi-column version of
# as_scalar() to produce a multi- column selectable that
# does not export itself as a FROM clause
return _boolean_compare(
expr, op, seq_or_selectable.as_scalar(),
negate=negate_op, **kw)
elif isinstance(seq_or_selectable, (Selectable, TextClause)):
return _boolean_compare(expr, op, seq_or_selectable,
negate=negate_op, **kw)
elif isinstance(seq_or_selectable, ClauseElement):
if isinstance(seq_or_selectable, BindParameter) and \
seq_or_selectable.expanding:
return _boolean_compare(
expr, op,
seq_or_selectable,
negate=negate_op)
else:
raise exc.InvalidRequestError(
'in_() accepts'
' either a list of expressions, '
'a selectable, or an "expanding" bound parameter: %r'
% seq_or_selectable)
# Handle non selectable arguments as sequences
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance(o, operators.ColumnOperators):
raise exc.InvalidRequestError(
'in_() accepts'
' either a list of expressions, '
'a selectable, or an "expanding" bound parameter: %r' % o)
elif o is None:
o = Null()
else:
o = expr._bind_param(op, o)
args.append(o)
if len(args) == 0:
op, negate_op = (
operators.empty_in_op,
operators.empty_notin_op) if op is operators.in_op \
else (
operators.empty_notin_op,
operators.empty_in_op)
return _boolean_compare(expr, op,
ClauseList(*args).self_group(against=op),
negate=negate_op)
def _getitem_impl(expr, op, other, **kw):
if isinstance(expr.type, type_api.INDEXABLE):
other = _check_literal(expr, op, other)
return _binary_operate(expr, op, other, **kw)
else:
_unsupported_impl(expr, op, other, **kw)
def _unsupported_impl(expr, op, *arg, **kw):
raise NotImplementedError("Operator '%s' is not supported on "
"this expression" % op.__name__)
def _inv_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__inv__`."""
if hasattr(expr, 'negation_clause'):
return expr.negation_clause
else:
return expr._negate()
def _neg_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__neg__`."""
return UnaryExpression(expr, operator=operators.neg, type_=expr.type)
def _match_impl(expr, op, other, **kw):
"""See :meth:`.ColumnOperators.match`."""
return _boolean_compare(
expr, operators.match_op,
_check_literal(
expr, operators.match_op, other),
result_type=type_api.MATCHTYPE,
negate=operators.notmatch_op
if op is operators.match_op else operators.match_op,
**kw
)
def _distinct_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.distinct`."""
return UnaryExpression(expr, operator=operators.distinct_op,
type_=expr.type)
def _between_impl(expr, op, cleft, cright, **kw):
"""See :meth:`.ColumnOperators.between`."""
return BinaryExpression(
expr,
ClauseList(
_check_literal(expr, operators.and_, cleft),
_check_literal(expr, operators.and_, cright),
operator=operators.and_,
group=False, group_contents=False),
op,
negate=operators.notbetween_op
if op is operators.between_op
else operators.between_op,
modifiers=kw)
def _collate_impl(expr, op, other, **kw):
return collate(expr, other)
# a mapping of operators with the method they use, along with
# their negated operator for comparison operators
operator_lookup = {
"and_": (_conjunction_operate,),
"or_": (_conjunction_operate,),
"inv": (_inv_impl,),
"add": (_binary_operate,),
"mul": (_binary_operate,),
"sub": (_binary_operate,),
"div": (_binary_operate,),
"mod": (_binary_operate,),
"truediv": (_binary_operate,),
"custom_op": (_custom_op_operate,),
"json_path_getitem_op": (_binary_operate, ),
"json_getitem_op": (_binary_operate, ),
"concat_op": (_binary_operate,),
"any_op": (_scalar, CollectionAggregate._create_any),
"all_op": (_scalar, CollectionAggregate._create_all),
"lt": (_boolean_compare, operators.ge),
"le": (_boolean_compare, operators.gt),
"ne": (_boolean_compare, operators.eq),
"gt": (_boolean_compare, operators.le),
"ge": (_boolean_compare, operators.lt),
"eq": (_boolean_compare, operators.ne),
"is_distinct_from": (_boolean_compare, operators.isnot_distinct_from),
"isnot_distinct_from": (_boolean_compare, operators.is_distinct_from),
"like_op": (_boolean_compare, operators.notlike_op),
"ilike_op": (_boolean_compare, operators.notilike_op),
"notlike_op": (_boolean_compare, operators.like_op),
"notilike_op": (_boolean_compare, operators.ilike_op),
"contains_op": (_boolean_compare, operators.notcontains_op),
"startswith_op": (_boolean_compare, operators.notstartswith_op),
"endswith_op": (_boolean_compare, operators.notendswith_op),
"desc_op": (_scalar, UnaryExpression._create_desc),
"asc_op": (_scalar, UnaryExpression._create_asc),
"nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst),
"nullslast_op": (_scalar, UnaryExpression._create_nullslast),
"in_op": (_in_impl, operators.notin_op),
"notin_op": (_in_impl, operators.in_op),
"is_": (_boolean_compare, operators.is_),
"isnot": (_boolean_compare, operators.isnot),
"collate": (_collate_impl,),
"match_op": (_match_impl,),
"notmatch_op": (_match_impl,),
"distinct_op": (_distinct_impl,),
"between_op": (_between_impl, ),
"notbetween_op": (_between_impl, ),
"neg": (_neg_impl,),
"getitem": (_getitem_impl,),
"lshift": (_unsupported_impl,),
"rshift": (_unsupported_impl,),
"contains": (_unsupported_impl,),
}
def _check_literal(expr, operator, other, bindparam_type=None):
if isinstance(other, (ColumnElement, TextClause)):
if isinstance(other, BindParameter) and \
other.type._isnull:
other = other._clone()
other.type = expr.type
return other
elif hasattr(other, '__clause_element__'):
other = other.__clause_element__()
elif isinstance(other, type_api.TypeEngine.Comparator):
other = other.expr
if isinstance(other, (SelectBase, Alias)):
return other.as_scalar()
elif not isinstance(other, Visitable):
return expr._bind_param(operator, other, type_=bindparam_type)
else:
return other | en | 0.783893 | # sql/default_comparator.py # Copyright (C) 2005-2018 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php Default implementation of SQL comparison operations. # allow x ==/!= True/False to be treated as a literal. # this comes out to "== / != true/false" or "1/0" if those # constants aren't supported and works on all platforms # all other None/True/False uses IS, IS NOT # TODO: if we ever want to support (x, y, z) IN (select x, # y, z from table), we would need a multi-column version of # as_scalar() to produce a multi- column selectable that # does not export itself as a FROM clause # Handle non selectable arguments as sequences See :meth:`.ColumnOperators.__inv__`. See :meth:`.ColumnOperators.__neg__`. See :meth:`.ColumnOperators.match`. See :meth:`.ColumnOperators.distinct`. See :meth:`.ColumnOperators.between`. # a mapping of operators with the method they use, along with # their negated operator for comparison operators | 2.219982 | 2 |
recipes/serializers.py | klharshini/recipe-django-api | 0 | 5987 | <gh_stars>0
from django.contrib.auth.validators import UnicodeUsernameValidator
from rest_framework import serializers
from django.contrib.auth.models import User
from recipes.models import Recipe, Ingredient, Step
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ("username", "last_name", "first_name", "email")
extra_kwargs = {
'username': {
'validators': [UnicodeUsernameValidator()],
}
}
class IngredientSerializer(serializers.ModelSerializer):
class Meta:
model = Ingredient
fields = ["text"]
class StepSerializer(serializers.ModelSerializer):
class Meta:
model = Step
fields = ["step_text"]
class RecipeSerializer(serializers.ModelSerializer):
ingredients = IngredientSerializer(many=True, required=False)
steps = StepSerializer(many=True, required=False)
user = UserSerializer(required=True)
def create(self, validated_data):
steps_data = validated_data.pop('steps')
ingredients_data = validated_data.pop('ingredients')
user_data = validated_data.pop('user')
username = user_data.pop('username')
user = User.objects.get_by_natural_key(username)
recipe = Recipe.objects.create(user=user, **validated_data)
for steps in steps_data:
Step.objects.create(recipe=recipe, **steps)
for ingredients in ingredients_data:
Ingredient.objects.create(recipe=recipe, **ingredients)
return recipe
class Meta:
model = Recipe
fields = ("name", "user", "steps", "ingredients")
def update(self, instance, validated_data):
steps_data = validated_data.pop('steps')
ingredients_data = validated_data.pop('ingredients')
Step.objects.filter(recipe=instance).delete()
Ingredient.objects.filter(recipe=instance).delete()
for steps in steps_data:
Step.objects.create(recipe=instance, **steps)
for ingredients in ingredients_data:
Ingredient.objects.create(recipe=instance, **ingredients)
return instance
| from django.contrib.auth.validators import UnicodeUsernameValidator
from rest_framework import serializers
from django.contrib.auth.models import User
from recipes.models import Recipe, Ingredient, Step
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ("username", "last_name", "first_name", "email")
extra_kwargs = {
'username': {
'validators': [UnicodeUsernameValidator()],
}
}
class IngredientSerializer(serializers.ModelSerializer):
class Meta:
model = Ingredient
fields = ["text"]
class StepSerializer(serializers.ModelSerializer):
class Meta:
model = Step
fields = ["step_text"]
class RecipeSerializer(serializers.ModelSerializer):
ingredients = IngredientSerializer(many=True, required=False)
steps = StepSerializer(many=True, required=False)
user = UserSerializer(required=True)
def create(self, validated_data):
steps_data = validated_data.pop('steps')
ingredients_data = validated_data.pop('ingredients')
user_data = validated_data.pop('user')
username = user_data.pop('username')
user = User.objects.get_by_natural_key(username)
recipe = Recipe.objects.create(user=user, **validated_data)
for steps in steps_data:
Step.objects.create(recipe=recipe, **steps)
for ingredients in ingredients_data:
Ingredient.objects.create(recipe=recipe, **ingredients)
return recipe
class Meta:
model = Recipe
fields = ("name", "user", "steps", "ingredients")
def update(self, instance, validated_data):
steps_data = validated_data.pop('steps')
ingredients_data = validated_data.pop('ingredients')
Step.objects.filter(recipe=instance).delete()
Ingredient.objects.filter(recipe=instance).delete()
for steps in steps_data:
Step.objects.create(recipe=instance, **steps)
for ingredients in ingredients_data:
Ingredient.objects.create(recipe=instance, **ingredients)
return instance | none | 1 | 2.20754 | 2 |
|
tests/test_model/test_temporal_regression_head.py | jcwon0/BlurHPE | 0 | 5988 | <reponame>jcwon0/BlurHPE
import numpy as np
import pytest
import torch
from mmpose.models import TemporalRegressionHead
def test_temporal_regression_head():
"""Test temporal head."""
head = TemporalRegressionHead(
in_channels=1024,
num_joints=17,
loss_keypoint=dict(type='MPJPELoss', use_target_weight=True))
head.init_weights()
with pytest.raises(AssertionError):
# ndim of the input tensor should be 3
input_shape = (1, 1024, 1, 1)
inputs = _demo_inputs(input_shape)
_ = head(inputs)
with pytest.raises(AssertionError):
# size of the last dim should be 1
input_shape = (1, 1024, 3)
inputs = _demo_inputs(input_shape)
_ = head(inputs)
input_shape = (1, 1024, 1)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out.shape == torch.Size([1, 17, 3])
loss = head.get_loss(out, out, torch.ones_like(out))
assert torch.allclose(loss['reg_loss'], torch.tensor(0.))
_ = head.inference_model(inputs)
_ = head.inference_model(inputs, [(0, 1), (2, 3)])
acc = head.get_accuracy(out, out, torch.ones_like(out))
assert acc['mpjpe'] == 0.
np.testing.assert_almost_equal(acc['p_mpjpe'], 0.)
def _demo_inputs(input_shape=(1, 1024, 1)):
"""Create a superset of inputs needed to run head.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 1024, 1).
Returns:
Random input tensor with the size of input_shape.
"""
inps = np.random.random(input_shape)
inps = torch.FloatTensor(inps)
return inps
| import numpy as np
import pytest
import torch
from mmpose.models import TemporalRegressionHead
def test_temporal_regression_head():
"""Test temporal head."""
head = TemporalRegressionHead(
in_channels=1024,
num_joints=17,
loss_keypoint=dict(type='MPJPELoss', use_target_weight=True))
head.init_weights()
with pytest.raises(AssertionError):
# ndim of the input tensor should be 3
input_shape = (1, 1024, 1, 1)
inputs = _demo_inputs(input_shape)
_ = head(inputs)
with pytest.raises(AssertionError):
# size of the last dim should be 1
input_shape = (1, 1024, 3)
inputs = _demo_inputs(input_shape)
_ = head(inputs)
input_shape = (1, 1024, 1)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out.shape == torch.Size([1, 17, 3])
loss = head.get_loss(out, out, torch.ones_like(out))
assert torch.allclose(loss['reg_loss'], torch.tensor(0.))
_ = head.inference_model(inputs)
_ = head.inference_model(inputs, [(0, 1), (2, 3)])
acc = head.get_accuracy(out, out, torch.ones_like(out))
assert acc['mpjpe'] == 0.
np.testing.assert_almost_equal(acc['p_mpjpe'], 0.)
def _demo_inputs(input_shape=(1, 1024, 1)):
"""Create a superset of inputs needed to run head.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 1024, 1).
Returns:
Random input tensor with the size of input_shape.
"""
inps = np.random.random(input_shape)
inps = torch.FloatTensor(inps)
return inps | en | 0.59129 | Test temporal head. # ndim of the input tensor should be 3 # size of the last dim should be 1 Create a superset of inputs needed to run head.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 1024, 1).
Returns:
Random input tensor with the size of input_shape. | 2.233449 | 2 |
django_orm/sports_orm/leagues/migrations/0002_auto_20161031_1620.py | gfhuertac/coding_dojo_python | 0 | 5989 | <filename>django_orm/sports_orm/leagues/migrations/0002_auto_20161031_1620.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-31 23:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('leagues', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='team',
old_name='city',
new_name='location',
),
]
| <filename>django_orm/sports_orm/leagues/migrations/0002_auto_20161031_1620.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-31 23:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('leagues', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='team',
old_name='city',
new_name='location',
),
]
| en | 0.764799 | # -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2016-10-31 23:20 | 1.627294 | 2 |
353-Design-Snake-Game/solution.py | Tanych/CodeTracking | 0 | 5990 | <gh_stars>0
class SnakeGame(object):
def __init__(self, width,height,food):
"""
Initialize your data structure here.
@param width - screen width
@param height - screen height
@param food - A list of food positions
E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].
:type width: int
:type height: int
:type food: List[List[int]]
"""
self.width=width
self.height=height
self.food=collections.deque(food)
self.position=collections.deque([(0,0)])
self.moveops={'U':(-1,0),'L':(0,-1),'R':(0,1),'D':(1,0)}
self.score=0
def move(self, direction):
"""
Moves the snake.
@param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down
@return The game's score after the move. Return -1 if game over.
Game over when snake crosses the screen boundary or bites its body.
:type direction: str
:rtype: int
"""
if direction not in self.moveops:
return -1
peak,tail=self.position[0],self.position[-1]
self.position.pop()
idxi,idxj=self.moveops[direction]
newi,newj=peak[0]+idxi,peak[1]+idxj
if (newi,newj) in self.position or \
newi<0 or newi>=self.height or \
newj<0 or newj>=self.width:
return -1
self.position.appendleft((newi,newj))
if self.food and [newi,newj]==self.food[0]:
self.food.popleft()
self.position.append(tail)
self.score+=1
return self.score
# Your SnakeGame object will be instantiated and called as such:
# obj = SnakeGame(width, height, food)
# param_1 = obj.move(direction) | class SnakeGame(object):
def __init__(self, width,height,food):
"""
Initialize your data structure here.
@param width - screen width
@param height - screen height
@param food - A list of food positions
E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].
:type width: int
:type height: int
:type food: List[List[int]]
"""
self.width=width
self.height=height
self.food=collections.deque(food)
self.position=collections.deque([(0,0)])
self.moveops={'U':(-1,0),'L':(0,-1),'R':(0,1),'D':(1,0)}
self.score=0
def move(self, direction):
"""
Moves the snake.
@param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down
@return The game's score after the move. Return -1 if game over.
Game over when snake crosses the screen boundary or bites its body.
:type direction: str
:rtype: int
"""
if direction not in self.moveops:
return -1
peak,tail=self.position[0],self.position[-1]
self.position.pop()
idxi,idxj=self.moveops[direction]
newi,newj=peak[0]+idxi,peak[1]+idxj
if (newi,newj) in self.position or \
newi<0 or newi>=self.height or \
newj<0 or newj>=self.width:
return -1
self.position.appendleft((newi,newj))
if self.food and [newi,newj]==self.food[0]:
self.food.popleft()
self.position.append(tail)
self.score+=1
return self.score
# Your SnakeGame object will be instantiated and called as such:
# obj = SnakeGame(width, height, food)
# param_1 = obj.move(direction) | en | 0.805402 | Initialize your data structure here. @param width - screen width @param height - screen height @param food - A list of food positions E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0]. :type width: int :type height: int :type food: List[List[int]] Moves the snake. @param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down @return The game's score after the move. Return -1 if game over. Game over when snake crosses the screen boundary or bites its body. :type direction: str :rtype: int # Your SnakeGame object will be instantiated and called as such: # obj = SnakeGame(width, height, food) # param_1 = obj.move(direction) | 4.126869 | 4 |
scripts/register_sam.py | jessebrennan/azul | 0 | 5991 | from itertools import (
chain,
)
import logging
from azul import (
config,
require,
)
from azul.logging import (
configure_script_logging,
)
from azul.terra import (
TDRClient,
TDRSourceName,
)
log = logging.getLogger(__name__)
def main():
configure_script_logging(log)
tdr = TDRClient()
tdr.register_with_sam()
tdr_catalogs = (
catalog.name
for catalog in config.catalogs.values()
if catalog.plugins['repository'] == 'tdr'
)
for source in set(chain(*map(config.tdr_sources, tdr_catalogs))):
source = TDRSourceName.parse(source)
api_project = tdr.lookup_source_project(source)
require(api_project == source.project,
'Actual Google project of TDR source differs from configured '
'one',
api_project, source)
tdr.check_api_access(source)
tdr.check_bigquery_access(source)
if __name__ == '__main__':
main()
| from itertools import (
chain,
)
import logging
from azul import (
config,
require,
)
from azul.logging import (
configure_script_logging,
)
from azul.terra import (
TDRClient,
TDRSourceName,
)
log = logging.getLogger(__name__)
def main():
configure_script_logging(log)
tdr = TDRClient()
tdr.register_with_sam()
tdr_catalogs = (
catalog.name
for catalog in config.catalogs.values()
if catalog.plugins['repository'] == 'tdr'
)
for source in set(chain(*map(config.tdr_sources, tdr_catalogs))):
source = TDRSourceName.parse(source)
api_project = tdr.lookup_source_project(source)
require(api_project == source.project,
'Actual Google project of TDR source differs from configured '
'one',
api_project, source)
tdr.check_api_access(source)
tdr.check_bigquery_access(source)
if __name__ == '__main__':
main()
| none | 1 | 2.088094 | 2 |
|
altitude/players.py | StamKaly/altitude-mod-foundation | 1 | 5992 | class Player:
def __init__(self, nickname, vapor_id, player_id, ip):
self.nickname = nickname
self.vapor_id = vapor_id
self.player_id = player_id
self.ip = ip
self.not_joined = True
self.loads_map = True
self.joined_after_change_map = True
class Players:
def __init__(self, main_object, modded, lobby):
self.main = main_object
self.players = []
self.modded = modded
self.map_changed = False
self.lobby = lobby
self.commands = None
def get_commands_object(self, commands_object):
self.commands = commands_object
def _on_map_change(self, map_name):
self.map_changed = map_name
if self.modded and self.players:
for player in self.players:
player.loads_map = True
def check_if_everyone_joined_after_change_map(self):
for player in self.players:
if player.loads_map and not player.joined_after_change_map:
return False
return True
def _on_player_info_ev(self, player_id):
player = [player for player in self.players if player.player_id == player_id][0]
if self.map_changed or hasattr(player, "not_joined"):
if player.loads_map and player.joined_after_change_map:
player.joined_after_change_map = False
elif player.loads_map and not player.joined_after_change_map:
player.loads_map = False
player.joined_after_change_map = True
self.main.on_player_map_change(player, self.map_changed)
if hasattr(player, "not_joined"):
del player.not_joined
self.main.on_client_join(player)
if self.check_if_everyone_joined_after_change_map():
self.map_changed = False
def check_nickname_existence(self, nickname):
for player in self.players:
if nickname == player.nickname:
return True
return False
def get_all_players(self, nicknames, vapor_ids, player_ids, ips):
players_list = [nicknames, vapor_ids, player_ids, ips]
for count in range(len(nicknames)):
self.players.append(Player(*[player[count] for player in players_list]))
def add(self, nickname, vapor_id, player_id, ip):
self.players.append(Player(nickname, vapor_id, player_id, ip))
def remove(self, nickname):
for player in self.players:
if nickname == player.nickname:
self.players.remove(player)
break
if self.lobby and len(self.players) == 0:
self.commands.change_map(self.lobby)
def nickname_change(self, old_nickname, new_nickname):
for player in self.players:
if old_nickname == player.nickname:
player.nickname = new_nickname
break
def all_nicknames(self):
return [player.nickname for player in self.players]
def player_from_nickname(self, nickname):
for player in self.players:
if nickname == player.nickname:
return player
def player_from_vapor_id(self, vapor_id):
for player in self.players:
if vapor_id == player.vapor_id:
return player
def player_from_player_id(self, player_id):
for player in self.players:
if player_id == player.player_id:
return player
def get_all_vapor_ids(self):
return [player.vapor_id for player in self.players]
| class Player:
def __init__(self, nickname, vapor_id, player_id, ip):
self.nickname = nickname
self.vapor_id = vapor_id
self.player_id = player_id
self.ip = ip
self.not_joined = True
self.loads_map = True
self.joined_after_change_map = True
class Players:
def __init__(self, main_object, modded, lobby):
self.main = main_object
self.players = []
self.modded = modded
self.map_changed = False
self.lobby = lobby
self.commands = None
def get_commands_object(self, commands_object):
self.commands = commands_object
def _on_map_change(self, map_name):
self.map_changed = map_name
if self.modded and self.players:
for player in self.players:
player.loads_map = True
def check_if_everyone_joined_after_change_map(self):
for player in self.players:
if player.loads_map and not player.joined_after_change_map:
return False
return True
def _on_player_info_ev(self, player_id):
player = [player for player in self.players if player.player_id == player_id][0]
if self.map_changed or hasattr(player, "not_joined"):
if player.loads_map and player.joined_after_change_map:
player.joined_after_change_map = False
elif player.loads_map and not player.joined_after_change_map:
player.loads_map = False
player.joined_after_change_map = True
self.main.on_player_map_change(player, self.map_changed)
if hasattr(player, "not_joined"):
del player.not_joined
self.main.on_client_join(player)
if self.check_if_everyone_joined_after_change_map():
self.map_changed = False
def check_nickname_existence(self, nickname):
for player in self.players:
if nickname == player.nickname:
return True
return False
def get_all_players(self, nicknames, vapor_ids, player_ids, ips):
players_list = [nicknames, vapor_ids, player_ids, ips]
for count in range(len(nicknames)):
self.players.append(Player(*[player[count] for player in players_list]))
def add(self, nickname, vapor_id, player_id, ip):
self.players.append(Player(nickname, vapor_id, player_id, ip))
def remove(self, nickname):
for player in self.players:
if nickname == player.nickname:
self.players.remove(player)
break
if self.lobby and len(self.players) == 0:
self.commands.change_map(self.lobby)
def nickname_change(self, old_nickname, new_nickname):
for player in self.players:
if old_nickname == player.nickname:
player.nickname = new_nickname
break
def all_nicknames(self):
return [player.nickname for player in self.players]
def player_from_nickname(self, nickname):
for player in self.players:
if nickname == player.nickname:
return player
def player_from_vapor_id(self, vapor_id):
for player in self.players:
if vapor_id == player.vapor_id:
return player
def player_from_player_id(self, player_id):
for player in self.players:
if player_id == player.player_id:
return player
def get_all_vapor_ids(self):
return [player.vapor_id for player in self.players]
| none | 1 | 2.775471 | 3 |
|
dsn/editor/construct.py | expressionsofchange/nerf0 | 2 | 5993 | <reponame>expressionsofchange/nerf0<gh_stars>1-10
"""
Tools to "play notes for the editor clef", which may be thought of as "executing editor commands".
NOTE: in the below, we often connect notes together "manually", i.e. using NoteSlur(..., previous_hash). As an
alternative, we could consider `nouts_for_notes`.
"""
from s_address import node_for_s_address, s_dfs
from dsn.s_expr.legato import NoteSlur, NoteCapo
from dsn.s_expr.utils import (
bubble_history_up,
calc_possibility,
insert_text_at,
insert_node_at,
replace_text_at,
weave_disjoint_replaces,
)
from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode
from dsn.s_expr.structure import TreeNode
from dsn.editor.clef import (
CursorChild,
CursorDFS,
CursorParent,
CursorSet,
EDelete,
EncloseWithParent,
InsertNodeChild,
InsertNodeSibbling,
MoveSelectionChild,
MoveSelectionSibbling,
LeaveChildrenBehind,
SwapSibbling,
TextInsert,
TextReplace,
)
def edit_note_play(structure, edit_note):
# :: EditStructure, EditNote => (new) s_cursor, posacts, error
def an_error():
return structure.s_cursor, [], True
if isinstance(edit_note, TextInsert):
posacts = insert_text_at(structure.tree, edit_note.parent_s_address, edit_note.index, edit_note.text)
new_s_cursor = edit_note.parent_s_address + [edit_note.index]
return new_s_cursor, posacts, False
if isinstance(edit_note, TextReplace):
posacts = replace_text_at(structure.tree, edit_note.s_address, edit_note.text)
return edit_note.s_address, posacts, False
if isinstance(edit_note, InsertNodeSibbling):
if structure.s_cursor == []:
return an_error() # adding sibblings to the root is not possible (it would lead to a forest)
# There is no need to check that the new index is a valid one. (Assuming: the cursor is valid, and direction is
# in the range [0, 1]; such assumptions fit with the general idea of "we only check that the user's command can
# be executed at this point, we do not check for arbitrary programming errors here). The proof flows directly
# from the idea that, for lists of length n, insertions at [0, n] are valid (insertion at n being an append).
index = structure.s_cursor[-1] + edit_note.direction
posacts = insert_node_at(structure.tree, structure.s_cursor[:-1], index)
new_s_cursor = structure.s_cursor[:-1] + [index]
return new_s_cursor, posacts, False
if isinstance(edit_note, InsertNodeChild):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not isinstance(cursor_node, TreeNode):
# for now... we just silently ignore the user's request when they ask to add a child node to a non-node
return an_error()
index = len(cursor_node.children)
posacts = insert_node_at(structure.tree, structure.s_cursor, index)
new_s_cursor = structure.s_cursor + [index]
return new_s_cursor, posacts, False
if isinstance(edit_note, EDelete):
if structure.s_cursor == []:
# silently ignored ('delete root' is not defined, because the root is assumed to exist.)
return an_error()
delete_from = structure.s_cursor[:-1]
delete_at_index = structure.s_cursor[-1]
delete_from_hash = node_for_s_address(structure.tree, delete_from).metadata.nout_hash
p, h = calc_possibility(NoteSlur(Delete(delete_at_index), delete_from_hash))
if delete_at_index == len(node_for_s_address(structure.tree, delete_from).children) - 1:
# deletion makes cursor pos invalid: up to parent (alternative: sibbling-up first, until no more sibblings)
new_s_cursor = delete_from
else:
new_s_cursor = structure.s_cursor # "stay in place (although new contents slide into the cursor position)
posacts = [p] + bubble_history_up(h, structure.tree, delete_from)
return new_s_cursor, posacts, False
if isinstance(edit_note, SwapSibbling):
if structure.s_cursor == []:
return an_error() # root has no sibblings
parent = node_for_s_address(structure.tree, structure.s_cursor[:-1])
index = structure.s_cursor[-1] + edit_note.direction
if not (0 <= index <= len(parent.children) - 1):
return an_error()
# For now, SwapSibbling is simply implemented as a "delete and insert"; if (or when) we'll introduce "Move" into
# the Clef, we should note the move here.
parent_s_address = structure.s_cursor[:-1]
delete_at_index = structure.s_cursor[-1]
delete_from_hash = node_for_s_address(structure.tree, parent_s_address).metadata.nout_hash
reinsert_later_hash = node_for_s_address(structure.tree, structure.s_cursor).metadata.nout_hash
p0, hash_after_deletion = calc_possibility(NoteSlur(Delete(delete_at_index), delete_from_hash))
p1, hash_after_insertion = calc_possibility(NoteSlur(Insert(index, reinsert_later_hash), hash_after_deletion))
new_cursor = structure.s_cursor[:-1] + [index]
posacts = [p0, p1] + bubble_history_up(hash_after_insertion, structure.tree, parent_s_address)
return new_cursor, posacts, False
if isinstance(edit_note, MoveSelectionChild):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not hasattr(cursor_node, 'children'):
return an_error() # The target must be a node to be able to add as a child
return do_move(structure, edit_note, structure.s_cursor, len(cursor_node.children))
if isinstance(edit_note, MoveSelectionSibbling):
if len(structure.s_cursor) == 0:
return an_error() # there is no sibbling of the root node
# edit_note.direction points to a valid insertion point for the same reasons detailed in the comment on
# InsertNodeSibbling
return do_move(structure, edit_note, structure.s_cursor[:-1], structure.s_cursor[-1] + edit_note.direction)
if isinstance(edit_note, LeaveChildrenBehind):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not hasattr(cursor_node, 'children'):
return an_error() # Leave _children_ behind presupposes the existance of children
if structure.s_cursor == []:
return an_error() # Root cannot die
# For now, LeaveChildrenBehind is simply implemented as a "delete and insert"; if (or when) we'll introduce
# "Move" into the Clef, we should note the move here.
parent_s_address = structure.s_cursor[:-1]
delete_at_index = structure.s_cursor[-1]
delete_from_hash = node_for_s_address(structure.tree, parent_s_address).metadata.nout_hash
p, hash_ = calc_possibility(NoteSlur(Delete(delete_at_index), delete_from_hash))
posacts = [p]
removed_node = node_for_s_address(structure.tree, structure.s_cursor)
for i, child in enumerate(removed_node.children):
p, hash_ = calc_possibility(NoteSlur(Insert(structure.s_cursor[-1] + i, child.metadata.nout_hash), hash_))
posacts.append(p)
# In general, leaving the cursor at the same s_address will be great: post-deletion you'll be in the right spot
new_cursor = structure.s_cursor
if len(removed_node.children) == 0:
# ... however, if there are no children to leave behind... this "right spot" may be illegal
parent_node = node_for_s_address(structure.tree, parent_s_address)
if len(parent_node.children) == 1:
# if the deleted node was the only node: fall back to the parent
new_cursor = parent_s_address
else:
# otherwise, make sure to stay in bounds.
new_cursor[len(new_cursor) - 1] = min(
len(parent_node.children) - 1 - 1, # len - 1 idiom; -1 for deletion.
new_cursor[len(new_cursor) - 1])
posacts += bubble_history_up(hash_, structure.tree, parent_s_address)
return new_cursor, posacts, False
if isinstance(edit_note, EncloseWithParent):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if structure.s_cursor == []:
# I am not sure about this one yet: should we have the option to create a new root? I don't see any direct
# objections (by which I mean: it's possible in terms of the math), but I still have a sense that it may
# create some asymmetries. For now I'm disallowing it; we'll see whether a use case arises.
return an_error()
# For now, EncloseWithParent is simply implemented as a "replace with the parent"; if (or when) we'll introduce
# "Move" (in particular: the MoveReplace) into the Clef, we should note the move here.
parent_s_address = structure.s_cursor[:-1]
replace_at_index = structure.s_cursor[-1]
replace_on_hash = node_for_s_address(structure.tree, parent_s_address).metadata.nout_hash
reinsert_later_hash = node_for_s_address(structure.tree, structure.s_cursor).metadata.nout_hash
p_capo, hash_capo = calc_possibility(NoteCapo())
p_create, hash_create = calc_possibility(NoteSlur(BecomeNode(), hash_capo))
p_enclosure, hash_enclosure = calc_possibility(NoteSlur(Insert(0, reinsert_later_hash), hash_create))
p_replace, hash_replace = calc_possibility(
NoteSlur(Replace(replace_at_index, hash_enclosure), replace_on_hash))
posacts = [p_capo, p_create, p_enclosure, p_replace] + bubble_history_up(
hash_replace, structure.tree, parent_s_address)
# We jump the cursor to the newly enclosed location:
new_cursor = structure.s_cursor + [0]
return new_cursor, posacts, False
def move_cursor(new_cursor):
return new_cursor, [], False
if isinstance(edit_note, CursorDFS):
dfs = s_dfs(structure.tree, [])
dfs_index = dfs.index(structure.s_cursor) + edit_note.direction
if not (0 <= dfs_index <= len(dfs) - 1):
return an_error()
return move_cursor(dfs[dfs_index])
"""At some point I had "regular sibbling" (as opposed to DFS sibbling) in the edit_clef. It looks like this:
if structure.s_cursor == []:
return an_error() # root has no sibblings
parent = node_for_s_address(structure.tree, s_cursor[:-1])
index = s_cursor[-1] + edit_node.direction
if not (0 <= index <= len(parent.children) - 1):
return an_error()
return move_cursor(s_cursor[:-1] + [index])
"""
if isinstance(edit_note, CursorSet):
return move_cursor(edit_note.s_address)
if isinstance(edit_note, CursorParent):
if structure.s_cursor == []:
return an_error()
return move_cursor(structure.s_cursor[:-1])
if isinstance(edit_note, CursorChild):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not hasattr(cursor_node, 'children') or len(cursor_node.children) == 0:
return an_error()
return move_cursor(structure.s_cursor + [0])
raise Exception("Unknown Note")
def do_move(structure, edit_note, target_parent_path, target_index):
selection_edge_0 = edit_note.selection_edge_0
selection_edge_1 = edit_note.selection_edge_1
def an_error():
return structure.s_cursor, [], True
if selection_edge_0[:-1] != selection_edge_1[:-1]:
# i.e. if not same-parent: this is an error. This may very well be too restrictive, but I'd rather move in the
# direction of "relax constraints later" than in the other directions. One particular reason I'm so restrictive
# for now: if I ever want to express a note "move" using a target_node, a source node and to indices in the
# source node, such a single-parent restriction is indeed a necessity.
# Note that "single parent" implies "same depth", but not vice versa. One possible relaxation is: make the
# restriction on "same depth" instead.
# Generally, the paths towards relaxation are to either [a] "be smart about the meaning of the selection's
# edges", i.e. find the first common ancestor and the relevant children of that ancestor or [b] to not care so
# much about single-parent.
return an_error()
if selection_edge_0 <= (target_parent_path + [target_index])[:len(selection_edge_0)] <= selection_edge_1:
# If the full target location, truncated to the length of the sources, is (inclusively) in the source's range,
# you're trying to move to [a descendant of] yourself. This is illegal. Moving something to a child of itself:
# I simply don't know what it would mean. Moving something to the same location (single source item, target path
# identical to the source path) could at least be understood to mean the no-op, so it's slightly less
# meaningless, but here I don't find that enough, so I'm just calling both scenarios error-scenarios.
# This implies protection against moving the root node around (because everything descends from the root node)
return an_error()
source_parent_path = selection_edge_0[:-1]
source_parent = node_for_s_address(structure.tree, source_parent_path)
target_parent = node_for_s_address(structure.tree, target_parent_path)
# For now, the "edit move" operations are simply implemented as a "insert and delete"; if (or when) we'll introduce
# "Move" into the Clef, we should note the move here.
posacts = []
source_index_lo, source_index_hi = sorted([selection_edge_0[-1], selection_edge_1[-1]])
hash_ = target_parent.metadata.nout_hash
for target_offset, source_index in enumerate(range(source_index_lo, source_index_hi + 1)): # edge-inclusive range
insert_hash = node_for_s_address(structure.tree, source_parent_path + [source_index]).metadata.nout_hash
p, hash_ = calc_possibility(NoteSlur(Insert(target_index + target_offset, insert_hash), hash_))
posacts.append(p)
weave_correction = 0
cursor_correction = 0
# TODO this part is still broken:
# Not only if the parents are exactly the same, but also if one parent is a prefix of the other (said differently:
# the longest_common_prefix of both parents matches one of them).
# In that case, we need to somehow connect the parents....
# (For the case of "parents match exactly", I did this using the idea "just don't reset hash_"... which works,
# because it allows you to continue operating on the the same "future". But in the case of shared prefix, this won't
# work.
if source_parent_path != target_parent_path:
wdr_hash = hash_
hash_ = source_parent.metadata.nout_hash
else:
if target_index < source_index_lo:
# We insert before we delete. If we do this on the same parent, and the insertions happen at lower indices
# than the deletions, they will affect the locations where the deletions must take place, by precisely the
# number of insertions that happened. (If we reverse the order of operations, we have the opposite problem)
# The reason we have this problem at all, is because we implement something that is atomic from the user's
# point of view in a non-atomic way in the clef. The problem may auto-disappear if we add "Move" to the
# clef.
# Another way we could handle the problem is once we have some tools to "realinearize while preserving
# meaning". I.e. we have deletions, we have insertions: at one point (e.g. once we build the cooperative
# editor) we should be able to express "weave those together, rewriting indices as required".
# In the if-statement above, we could pick either lo/hi for the comparison; source_index_lo and
# source_index_hi will never straddle target_index, because of the child-of-yourself checks at the top.
weave_correction = source_index_hi - source_index_lo + 1
else:
cursor_correction = source_index_hi - source_index_lo + 1
# we do _not_ fetch hash_ here, the idea being: it's the hash we just created.
# nor do we bubble up (yet); we can do a single bubble-up
for source_index in range(source_index_lo, source_index_hi + 1): # edge-inclusive range
# Note: we just Delete n times at the "lo" index (everything shifting to the left after each deletion)
p, hash_ = calc_possibility(NoteSlur(Delete(source_index_lo + weave_correction), hash_))
posacts.append(p)
if source_parent_path != target_parent_path:
posacts = posacts + weave_disjoint_replaces(
structure.tree,
target_parent_path, wdr_hash,
source_parent_path, hash_)
else:
posacts = posacts + bubble_history_up(hash_, structure.tree, source_parent_path)
# The current solution for "where to put the cursor after the move" is "at the end". This "seems intuitive" (but
# that may just be habituation). In any case, it's wat e.g. LibreOffice does when cut/pasting. (However, for a
# mouse-drag initiated move in LibreOffice, the selection is preserved).
# As it stands: the selection disappears automatically, because it points at a no-longer existing location. If we
# want to make the selection appear at the target-location, we need to change the interface of edit_note_play to
# include the resulting selection.
new_cursor = target_parent_path + [target_index + target_offset - cursor_correction]
return new_cursor, posacts, False
| """
Tools to "play notes for the editor clef", which may be thought of as "executing editor commands".
NOTE: in the below, we often connect notes together "manually", i.e. using NoteSlur(..., previous_hash). As an
alternative, we could consider `nouts_for_notes`.
"""
from s_address import node_for_s_address, s_dfs
from dsn.s_expr.legato import NoteSlur, NoteCapo
from dsn.s_expr.utils import (
bubble_history_up,
calc_possibility,
insert_text_at,
insert_node_at,
replace_text_at,
weave_disjoint_replaces,
)
from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode
from dsn.s_expr.structure import TreeNode
from dsn.editor.clef import (
CursorChild,
CursorDFS,
CursorParent,
CursorSet,
EDelete,
EncloseWithParent,
InsertNodeChild,
InsertNodeSibbling,
MoveSelectionChild,
MoveSelectionSibbling,
LeaveChildrenBehind,
SwapSibbling,
TextInsert,
TextReplace,
)
def edit_note_play(structure, edit_note):
# :: EditStructure, EditNote => (new) s_cursor, posacts, error
def an_error():
return structure.s_cursor, [], True
if isinstance(edit_note, TextInsert):
posacts = insert_text_at(structure.tree, edit_note.parent_s_address, edit_note.index, edit_note.text)
new_s_cursor = edit_note.parent_s_address + [edit_note.index]
return new_s_cursor, posacts, False
if isinstance(edit_note, TextReplace):
posacts = replace_text_at(structure.tree, edit_note.s_address, edit_note.text)
return edit_note.s_address, posacts, False
if isinstance(edit_note, InsertNodeSibbling):
if structure.s_cursor == []:
return an_error() # adding sibblings to the root is not possible (it would lead to a forest)
# There is no need to check that the new index is a valid one. (Assuming: the cursor is valid, and direction is
# in the range [0, 1]; such assumptions fit with the general idea of "we only check that the user's command can
# be executed at this point, we do not check for arbitrary programming errors here). The proof flows directly
# from the idea that, for lists of length n, insertions at [0, n] are valid (insertion at n being an append).
index = structure.s_cursor[-1] + edit_note.direction
posacts = insert_node_at(structure.tree, structure.s_cursor[:-1], index)
new_s_cursor = structure.s_cursor[:-1] + [index]
return new_s_cursor, posacts, False
if isinstance(edit_note, InsertNodeChild):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not isinstance(cursor_node, TreeNode):
# for now... we just silently ignore the user's request when they ask to add a child node to a non-node
return an_error()
index = len(cursor_node.children)
posacts = insert_node_at(structure.tree, structure.s_cursor, index)
new_s_cursor = structure.s_cursor + [index]
return new_s_cursor, posacts, False
if isinstance(edit_note, EDelete):
if structure.s_cursor == []:
# silently ignored ('delete root' is not defined, because the root is assumed to exist.)
return an_error()
delete_from = structure.s_cursor[:-1]
delete_at_index = structure.s_cursor[-1]
delete_from_hash = node_for_s_address(structure.tree, delete_from).metadata.nout_hash
p, h = calc_possibility(NoteSlur(Delete(delete_at_index), delete_from_hash))
if delete_at_index == len(node_for_s_address(structure.tree, delete_from).children) - 1:
# deletion makes cursor pos invalid: up to parent (alternative: sibbling-up first, until no more sibblings)
new_s_cursor = delete_from
else:
new_s_cursor = structure.s_cursor # "stay in place (although new contents slide into the cursor position)
posacts = [p] + bubble_history_up(h, structure.tree, delete_from)
return new_s_cursor, posacts, False
if isinstance(edit_note, SwapSibbling):
if structure.s_cursor == []:
return an_error() # root has no sibblings
parent = node_for_s_address(structure.tree, structure.s_cursor[:-1])
index = structure.s_cursor[-1] + edit_note.direction
if not (0 <= index <= len(parent.children) - 1):
return an_error()
# For now, SwapSibbling is simply implemented as a "delete and insert"; if (or when) we'll introduce "Move" into
# the Clef, we should note the move here.
parent_s_address = structure.s_cursor[:-1]
delete_at_index = structure.s_cursor[-1]
delete_from_hash = node_for_s_address(structure.tree, parent_s_address).metadata.nout_hash
reinsert_later_hash = node_for_s_address(structure.tree, structure.s_cursor).metadata.nout_hash
p0, hash_after_deletion = calc_possibility(NoteSlur(Delete(delete_at_index), delete_from_hash))
p1, hash_after_insertion = calc_possibility(NoteSlur(Insert(index, reinsert_later_hash), hash_after_deletion))
new_cursor = structure.s_cursor[:-1] + [index]
posacts = [p0, p1] + bubble_history_up(hash_after_insertion, structure.tree, parent_s_address)
return new_cursor, posacts, False
if isinstance(edit_note, MoveSelectionChild):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not hasattr(cursor_node, 'children'):
return an_error() # The target must be a node to be able to add as a child
return do_move(structure, edit_note, structure.s_cursor, len(cursor_node.children))
if isinstance(edit_note, MoveSelectionSibbling):
if len(structure.s_cursor) == 0:
return an_error() # there is no sibbling of the root node
# edit_note.direction points to a valid insertion point for the same reasons detailed in the comment on
# InsertNodeSibbling
return do_move(structure, edit_note, structure.s_cursor[:-1], structure.s_cursor[-1] + edit_note.direction)
if isinstance(edit_note, LeaveChildrenBehind):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not hasattr(cursor_node, 'children'):
return an_error() # Leave _children_ behind presupposes the existance of children
if structure.s_cursor == []:
return an_error() # Root cannot die
# For now, LeaveChildrenBehind is simply implemented as a "delete and insert"; if (or when) we'll introduce
# "Move" into the Clef, we should note the move here.
parent_s_address = structure.s_cursor[:-1]
delete_at_index = structure.s_cursor[-1]
delete_from_hash = node_for_s_address(structure.tree, parent_s_address).metadata.nout_hash
p, hash_ = calc_possibility(NoteSlur(Delete(delete_at_index), delete_from_hash))
posacts = [p]
removed_node = node_for_s_address(structure.tree, structure.s_cursor)
for i, child in enumerate(removed_node.children):
p, hash_ = calc_possibility(NoteSlur(Insert(structure.s_cursor[-1] + i, child.metadata.nout_hash), hash_))
posacts.append(p)
# In general, leaving the cursor at the same s_address will be great: post-deletion you'll be in the right spot
new_cursor = structure.s_cursor
if len(removed_node.children) == 0:
# ... however, if there are no children to leave behind... this "right spot" may be illegal
parent_node = node_for_s_address(structure.tree, parent_s_address)
if len(parent_node.children) == 1:
# if the deleted node was the only node: fall back to the parent
new_cursor = parent_s_address
else:
# otherwise, make sure to stay in bounds.
new_cursor[len(new_cursor) - 1] = min(
len(parent_node.children) - 1 - 1, # len - 1 idiom; -1 for deletion.
new_cursor[len(new_cursor) - 1])
posacts += bubble_history_up(hash_, structure.tree, parent_s_address)
return new_cursor, posacts, False
if isinstance(edit_note, EncloseWithParent):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if structure.s_cursor == []:
# I am not sure about this one yet: should we have the option to create a new root? I don't see any direct
# objections (by which I mean: it's possible in terms of the math), but I still have a sense that it may
# create some asymmetries. For now I'm disallowing it; we'll see whether a use case arises.
return an_error()
# For now, EncloseWithParent is simply implemented as a "replace with the parent"; if (or when) we'll introduce
# "Move" (in particular: the MoveReplace) into the Clef, we should note the move here.
parent_s_address = structure.s_cursor[:-1]
replace_at_index = structure.s_cursor[-1]
replace_on_hash = node_for_s_address(structure.tree, parent_s_address).metadata.nout_hash
reinsert_later_hash = node_for_s_address(structure.tree, structure.s_cursor).metadata.nout_hash
p_capo, hash_capo = calc_possibility(NoteCapo())
p_create, hash_create = calc_possibility(NoteSlur(BecomeNode(), hash_capo))
p_enclosure, hash_enclosure = calc_possibility(NoteSlur(Insert(0, reinsert_later_hash), hash_create))
p_replace, hash_replace = calc_possibility(
NoteSlur(Replace(replace_at_index, hash_enclosure), replace_on_hash))
posacts = [p_capo, p_create, p_enclosure, p_replace] + bubble_history_up(
hash_replace, structure.tree, parent_s_address)
# We jump the cursor to the newly enclosed location:
new_cursor = structure.s_cursor + [0]
return new_cursor, posacts, False
def move_cursor(new_cursor):
return new_cursor, [], False
if isinstance(edit_note, CursorDFS):
dfs = s_dfs(structure.tree, [])
dfs_index = dfs.index(structure.s_cursor) + edit_note.direction
if not (0 <= dfs_index <= len(dfs) - 1):
return an_error()
return move_cursor(dfs[dfs_index])
"""At some point I had "regular sibbling" (as opposed to DFS sibbling) in the edit_clef. It looks like this:
if structure.s_cursor == []:
return an_error() # root has no sibblings
parent = node_for_s_address(structure.tree, s_cursor[:-1])
index = s_cursor[-1] + edit_node.direction
if not (0 <= index <= len(parent.children) - 1):
return an_error()
return move_cursor(s_cursor[:-1] + [index])
"""
if isinstance(edit_note, CursorSet):
return move_cursor(edit_note.s_address)
if isinstance(edit_note, CursorParent):
if structure.s_cursor == []:
return an_error()
return move_cursor(structure.s_cursor[:-1])
if isinstance(edit_note, CursorChild):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not hasattr(cursor_node, 'children') or len(cursor_node.children) == 0:
return an_error()
return move_cursor(structure.s_cursor + [0])
raise Exception("Unknown Note")
def do_move(structure, edit_note, target_parent_path, target_index):
selection_edge_0 = edit_note.selection_edge_0
selection_edge_1 = edit_note.selection_edge_1
def an_error():
return structure.s_cursor, [], True
if selection_edge_0[:-1] != selection_edge_1[:-1]:
# i.e. if not same-parent: this is an error. This may very well be too restrictive, but I'd rather move in the
# direction of "relax constraints later" than in the other directions. One particular reason I'm so restrictive
# for now: if I ever want to express a note "move" using a target_node, a source node and to indices in the
# source node, such a single-parent restriction is indeed a necessity.
# Note that "single parent" implies "same depth", but not vice versa. One possible relaxation is: make the
# restriction on "same depth" instead.
# Generally, the paths towards relaxation are to either [a] "be smart about the meaning of the selection's
# edges", i.e. find the first common ancestor and the relevant children of that ancestor or [b] to not care so
# much about single-parent.
return an_error()
if selection_edge_0 <= (target_parent_path + [target_index])[:len(selection_edge_0)] <= selection_edge_1:
# If the full target location, truncated to the length of the sources, is (inclusively) in the source's range,
# you're trying to move to [a descendant of] yourself. This is illegal. Moving something to a child of itself:
# I simply don't know what it would mean. Moving something to the same location (single source item, target path
# identical to the source path) could at least be understood to mean the no-op, so it's slightly less
# meaningless, but here I don't find that enough, so I'm just calling both scenarios error-scenarios.
# This implies protection against moving the root node around (because everything descends from the root node)
return an_error()
source_parent_path = selection_edge_0[:-1]
source_parent = node_for_s_address(structure.tree, source_parent_path)
target_parent = node_for_s_address(structure.tree, target_parent_path)
# For now, the "edit move" operations are simply implemented as a "insert and delete"; if (or when) we'll introduce
# "Move" into the Clef, we should note the move here.
posacts = []
source_index_lo, source_index_hi = sorted([selection_edge_0[-1], selection_edge_1[-1]])
hash_ = target_parent.metadata.nout_hash
for target_offset, source_index in enumerate(range(source_index_lo, source_index_hi + 1)): # edge-inclusive range
insert_hash = node_for_s_address(structure.tree, source_parent_path + [source_index]).metadata.nout_hash
p, hash_ = calc_possibility(NoteSlur(Insert(target_index + target_offset, insert_hash), hash_))
posacts.append(p)
weave_correction = 0
cursor_correction = 0
# TODO this part is still broken:
# Not only if the parents are exactly the same, but also if one parent is a prefix of the other (said differently:
# the longest_common_prefix of both parents matches one of them).
# In that case, we need to somehow connect the parents....
# (For the case of "parents match exactly", I did this using the idea "just don't reset hash_"... which works,
# because it allows you to continue operating on the the same "future". But in the case of shared prefix, this won't
# work.
if source_parent_path != target_parent_path:
wdr_hash = hash_
hash_ = source_parent.metadata.nout_hash
else:
if target_index < source_index_lo:
# We insert before we delete. If we do this on the same parent, and the insertions happen at lower indices
# than the deletions, they will affect the locations where the deletions must take place, by precisely the
# number of insertions that happened. (If we reverse the order of operations, we have the opposite problem)
# The reason we have this problem at all, is because we implement something that is atomic from the user's
# point of view in a non-atomic way in the clef. The problem may auto-disappear if we add "Move" to the
# clef.
# Another way we could handle the problem is once we have some tools to "realinearize while preserving
# meaning". I.e. we have deletions, we have insertions: at one point (e.g. once we build the cooperative
# editor) we should be able to express "weave those together, rewriting indices as required".
# In the if-statement above, we could pick either lo/hi for the comparison; source_index_lo and
# source_index_hi will never straddle target_index, because of the child-of-yourself checks at the top.
weave_correction = source_index_hi - source_index_lo + 1
else:
cursor_correction = source_index_hi - source_index_lo + 1
# we do _not_ fetch hash_ here, the idea being: it's the hash we just created.
# nor do we bubble up (yet); we can do a single bubble-up
for source_index in range(source_index_lo, source_index_hi + 1): # edge-inclusive range
# Note: we just Delete n times at the "lo" index (everything shifting to the left after each deletion)
p, hash_ = calc_possibility(NoteSlur(Delete(source_index_lo + weave_correction), hash_))
posacts.append(p)
if source_parent_path != target_parent_path:
posacts = posacts + weave_disjoint_replaces(
structure.tree,
target_parent_path, wdr_hash,
source_parent_path, hash_)
else:
posacts = posacts + bubble_history_up(hash_, structure.tree, source_parent_path)
# The current solution for "where to put the cursor after the move" is "at the end". This "seems intuitive" (but
# that may just be habituation). In any case, it's wat e.g. LibreOffice does when cut/pasting. (However, for a
# mouse-drag initiated move in LibreOffice, the selection is preserved).
# As it stands: the selection disappears automatically, because it points at a no-longer existing location. If we
# want to make the selection appear at the target-location, we need to change the interface of edit_note_play to
# include the resulting selection.
new_cursor = target_parent_path + [target_index + target_offset - cursor_correction]
return new_cursor, posacts, False | en | 0.919634 | Tools to "play notes for the editor clef", which may be thought of as "executing editor commands". NOTE: in the below, we often connect notes together "manually", i.e. using NoteSlur(..., previous_hash). As an alternative, we could consider `nouts_for_notes`. # :: EditStructure, EditNote => (new) s_cursor, posacts, error # adding sibblings to the root is not possible (it would lead to a forest) # There is no need to check that the new index is a valid one. (Assuming: the cursor is valid, and direction is # in the range [0, 1]; such assumptions fit with the general idea of "we only check that the user's command can # be executed at this point, we do not check for arbitrary programming errors here). The proof flows directly # from the idea that, for lists of length n, insertions at [0, n] are valid (insertion at n being an append). # for now... we just silently ignore the user's request when they ask to add a child node to a non-node # silently ignored ('delete root' is not defined, because the root is assumed to exist.) # deletion makes cursor pos invalid: up to parent (alternative: sibbling-up first, until no more sibblings) # "stay in place (although new contents slide into the cursor position) # root has no sibblings # For now, SwapSibbling is simply implemented as a "delete and insert"; if (or when) we'll introduce "Move" into # the Clef, we should note the move here. # The target must be a node to be able to add as a child # there is no sibbling of the root node # edit_note.direction points to a valid insertion point for the same reasons detailed in the comment on # InsertNodeSibbling # Leave _children_ behind presupposes the existance of children # Root cannot die # For now, LeaveChildrenBehind is simply implemented as a "delete and insert"; if (or when) we'll introduce # "Move" into the Clef, we should note the move here. # In general, leaving the cursor at the same s_address will be great: post-deletion you'll be in the right spot # ... however, if there are no children to leave behind... this "right spot" may be illegal # if the deleted node was the only node: fall back to the parent # otherwise, make sure to stay in bounds. # len - 1 idiom; -1 for deletion. # I am not sure about this one yet: should we have the option to create a new root? I don't see any direct # objections (by which I mean: it's possible in terms of the math), but I still have a sense that it may # create some asymmetries. For now I'm disallowing it; we'll see whether a use case arises. # For now, EncloseWithParent is simply implemented as a "replace with the parent"; if (or when) we'll introduce # "Move" (in particular: the MoveReplace) into the Clef, we should note the move here. # We jump the cursor to the newly enclosed location: At some point I had "regular sibbling" (as opposed to DFS sibbling) in the edit_clef. It looks like this: if structure.s_cursor == []: return an_error() # root has no sibblings parent = node_for_s_address(structure.tree, s_cursor[:-1]) index = s_cursor[-1] + edit_node.direction if not (0 <= index <= len(parent.children) - 1): return an_error() return move_cursor(s_cursor[:-1] + [index]) # i.e. if not same-parent: this is an error. This may very well be too restrictive, but I'd rather move in the # direction of "relax constraints later" than in the other directions. One particular reason I'm so restrictive # for now: if I ever want to express a note "move" using a target_node, a source node and to indices in the # source node, such a single-parent restriction is indeed a necessity. # Note that "single parent" implies "same depth", but not vice versa. One possible relaxation is: make the # restriction on "same depth" instead. # Generally, the paths towards relaxation are to either [a] "be smart about the meaning of the selection's # edges", i.e. find the first common ancestor and the relevant children of that ancestor or [b] to not care so # much about single-parent. # If the full target location, truncated to the length of the sources, is (inclusively) in the source's range, # you're trying to move to [a descendant of] yourself. This is illegal. Moving something to a child of itself: # I simply don't know what it would mean. Moving something to the same location (single source item, target path # identical to the source path) could at least be understood to mean the no-op, so it's slightly less # meaningless, but here I don't find that enough, so I'm just calling both scenarios error-scenarios. # This implies protection against moving the root node around (because everything descends from the root node) # For now, the "edit move" operations are simply implemented as a "insert and delete"; if (or when) we'll introduce # "Move" into the Clef, we should note the move here. # edge-inclusive range # TODO this part is still broken: # Not only if the parents are exactly the same, but also if one parent is a prefix of the other (said differently: # the longest_common_prefix of both parents matches one of them). # In that case, we need to somehow connect the parents.... # (For the case of "parents match exactly", I did this using the idea "just don't reset hash_"... which works, # because it allows you to continue operating on the the same "future". But in the case of shared prefix, this won't # work. # We insert before we delete. If we do this on the same parent, and the insertions happen at lower indices # than the deletions, they will affect the locations where the deletions must take place, by precisely the # number of insertions that happened. (If we reverse the order of operations, we have the opposite problem) # The reason we have this problem at all, is because we implement something that is atomic from the user's # point of view in a non-atomic way in the clef. The problem may auto-disappear if we add "Move" to the # clef. # Another way we could handle the problem is once we have some tools to "realinearize while preserving # meaning". I.e. we have deletions, we have insertions: at one point (e.g. once we build the cooperative # editor) we should be able to express "weave those together, rewriting indices as required". # In the if-statement above, we could pick either lo/hi for the comparison; source_index_lo and # source_index_hi will never straddle target_index, because of the child-of-yourself checks at the top. # we do _not_ fetch hash_ here, the idea being: it's the hash we just created. # nor do we bubble up (yet); we can do a single bubble-up # edge-inclusive range # Note: we just Delete n times at the "lo" index (everything shifting to the left after each deletion) # The current solution for "where to put the cursor after the move" is "at the end". This "seems intuitive" (but # that may just be habituation). In any case, it's wat e.g. LibreOffice does when cut/pasting. (However, for a # mouse-drag initiated move in LibreOffice, the selection is preserved). # As it stands: the selection disappears automatically, because it points at a no-longer existing location. If we # want to make the selection appear at the target-location, we need to change the interface of edit_note_play to # include the resulting selection. | 2.234723 | 2 |
src/pytong/base.py | richtong/pytong | 0 | 5994 | """Base for all Classes.
Base mainly includes the description fields
"""
import logging
from typing import Optional
from .log import Log # type: ignore
class BaseLog:
"""
Set a base logging.
Use this as the base class for all your work. This adds a logging root.
"""
def __init__(self, log_root: Optional[Log] = None):
"""Set the Root Log."""
# since we have no log otherwise
self.log_root = log_root
self.log = (
log_root.log_class(self)
if log_root is not None
else logging.getLogger(__name__)
)
self.log.debug(f"{self=}")
| """Base for all Classes.
Base mainly includes the description fields
"""
import logging
from typing import Optional
from .log import Log # type: ignore
class BaseLog:
"""
Set a base logging.
Use this as the base class for all your work. This adds a logging root.
"""
def __init__(self, log_root: Optional[Log] = None):
"""Set the Root Log."""
# since we have no log otherwise
self.log_root = log_root
self.log = (
log_root.log_class(self)
if log_root is not None
else logging.getLogger(__name__)
)
self.log.debug(f"{self=}")
| en | 0.874398 | Base for all Classes. Base mainly includes the description fields # type: ignore Set a base logging. Use this as the base class for all your work. This adds a logging root. Set the Root Log. # since we have no log otherwise | 2.762542 | 3 |
subprocess-10.py | GuillaumeFalourd/poc-subprocess | 1 | 5995 | import subprocess
import re
programs = input('Separe the programs with a space: ').split()
secure_pattern = '[\w\d]'
for program in programs:
if not re.match(secure_pattern, program):
print("Sorry we can't check that program")
continue
process = subprocess. run(
['which', program], capture_output=True, text=True)
if process.returncode == 0:
print(f'The program "{program}" is installed')
print(f'The location of the binary is: {process.stdout}')
else:
print(f'Sorry the {program} is not installed')
print(process.stderr)
print('\n') | import subprocess
import re
programs = input('Separe the programs with a space: ').split()
secure_pattern = '[\w\d]'
for program in programs:
if not re.match(secure_pattern, program):
print("Sorry we can't check that program")
continue
process = subprocess. run(
['which', program], capture_output=True, text=True)
if process.returncode == 0:
print(f'The program "{program}" is installed')
print(f'The location of the binary is: {process.stdout}')
else:
print(f'Sorry the {program} is not installed')
print(process.stderr)
print('\n') | none | 1 | 3.152842 | 3 |
|
authentication/socialaccount/forms.py | vo0doO/pydj-persweb | 0 | 5996 | <filename>authentication/socialaccount/forms.py
from __future__ import absolute_import
from django import forms
from authentication.account.forms import BaseSignupForm
from . import app_settings, signals
from .adapter import get_adapter
from .models import SocialAccount
class SignupForm(BaseSignupForm):
def __init__(self, *args, **kwargs):
self.sociallogin = kwargs.pop('sociallogin')
initial = get_adapter().get_signup_form_initial_data(
self.sociallogin)
kwargs.update({
'initial': initial,
'email_required': kwargs.get('email_required',
app_settings.EMAIL_REQUIRED)})
super(SignupForm, self).__init__(*args, **kwargs)
def save(self, request):
adapter = get_adapter(request)
user = adapter.save_user(request, self.sociallogin, form=self)
self.custom_signup(request, user)
return user
def validate_unique_email(self, value):
try:
return super(SignupForm, self).validate_unique_email(value)
except forms.ValidationError:
raise forms.ValidationError(
get_adapter().error_messages['email_taken']
% self.sociallogin.account.get_provider().name)
class DisconnectForm(forms.Form):
account = forms.ModelChoiceField(queryset=SocialAccount.objects.none(),
widget=forms.RadioSelect,
required=True)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
self.accounts = SocialAccount.objects.filter(user=self.request.user)
super(DisconnectForm, self).__init__(*args, **kwargs)
self.fields['account'].queryset = self.accounts
def clean(self):
cleaned_data = super(DisconnectForm, self).clean()
account = cleaned_data.get('account')
if account:
get_adapter(self.request).validate_disconnect(
account,
self.accounts)
return cleaned_data
def save(self):
account = self.cleaned_data['account']
account.delete()
signals.social_account_removed.send(sender=SocialAccount,
request=self.request,
socialaccount=account)
| <filename>authentication/socialaccount/forms.py
from __future__ import absolute_import
from django import forms
from authentication.account.forms import BaseSignupForm
from . import app_settings, signals
from .adapter import get_adapter
from .models import SocialAccount
class SignupForm(BaseSignupForm):
def __init__(self, *args, **kwargs):
self.sociallogin = kwargs.pop('sociallogin')
initial = get_adapter().get_signup_form_initial_data(
self.sociallogin)
kwargs.update({
'initial': initial,
'email_required': kwargs.get('email_required',
app_settings.EMAIL_REQUIRED)})
super(SignupForm, self).__init__(*args, **kwargs)
def save(self, request):
adapter = get_adapter(request)
user = adapter.save_user(request, self.sociallogin, form=self)
self.custom_signup(request, user)
return user
def validate_unique_email(self, value):
try:
return super(SignupForm, self).validate_unique_email(value)
except forms.ValidationError:
raise forms.ValidationError(
get_adapter().error_messages['email_taken']
% self.sociallogin.account.get_provider().name)
class DisconnectForm(forms.Form):
account = forms.ModelChoiceField(queryset=SocialAccount.objects.none(),
widget=forms.RadioSelect,
required=True)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
self.accounts = SocialAccount.objects.filter(user=self.request.user)
super(DisconnectForm, self).__init__(*args, **kwargs)
self.fields['account'].queryset = self.accounts
def clean(self):
cleaned_data = super(DisconnectForm, self).clean()
account = cleaned_data.get('account')
if account:
get_adapter(self.request).validate_disconnect(
account,
self.accounts)
return cleaned_data
def save(self):
account = self.cleaned_data['account']
account.delete()
signals.social_account_removed.send(sender=SocialAccount,
request=self.request,
socialaccount=account)
| none | 1 | 1.983611 | 2 |
|
pytheos/pytheos.py | nilsbeck/pytheos | 0 | 5997 | #!/usr/bin/env python
""" Provides the primary interface into the library """
from __future__ import annotations
import asyncio
import logging
from typing import Callable, Optional, Union
from . import utils
from . import controllers
from .networking.connection import Connection
from .networking.types import SSDPResponse
from .networking.errors import ChannelUnavailableError
from .models.heos import HEOSEvent
from .models.system import AccountStatus
logger = logging.getLogger('pytheos')
class Pytheos:
""" Pytheos interface """
DEFAULT_PORT = 1255
@staticmethod
def check_channel_availability(channel: Connection):
""" Checks to make sure that the provided channel is available.
:param channel: Channel connection
:raises: ChannelUnavailableError
:return: None
"""
if not channel or not channel.connected:
raise ChannelUnavailableError()
@property
def log_level(self):
return logger.level
@log_level.setter
def log_level(self, value):
logger.setLevel(value)
@property
def connected(self):
return self._connected
@property
def signed_in(self):
return self._account_status == AccountStatus.SignedIn
@property
def username(self):
return self._account_username
def __init__(self, server: Union[str, SSDPResponse]=None, port: Optional[int]=DEFAULT_PORT):
""" Constructor
:param server: Server hostname or IP
:param port: Port number
"""
if isinstance(server, SSDPResponse):
server = utils.extract_host(server.location)
self.server: str = server
self.port: int = port
self._command_channel = Connection()
self._event_channel = Connection()
self._event_queue = asyncio.Queue()
self._event_task: Optional[asyncio.Task] = None
self._event_processor: Optional[asyncio.Task] = None
self._connected: bool = False
self._event_subscriptions: dict = {}
self._receive_events: bool = True
self._account_status: Optional[AccountStatus] = None
self._account_username: Optional[str] = None
self._players: list = []
self._groups: dict = {} # FIXME?: Not sure I like having this as a dict.
self._sources: dict = {} # FIXME?: Not sure I like having this as a dict.
self.api: Connection = self._command_channel
self._init_internal_event_handlers()
def __repr__(self):
return f'<Pytheos(server={self.server}, port={self.port})>'
def __enter__(self):
if not self._connected:
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._connected:
self.close()
async def connect(self, enable_event_connection: bool=True, refresh: bool=True) -> Pytheos:
""" Connect to our HEOS device.
:param enable_event_connection: Enables establishing an additional connection for system events
:param refresh: Determines if the system state should be automatically refreshed
:return: self
"""
logger.info(f'Connecting to {self.server}:{self.port}')
await self._command_channel.connect(self.server, self.port)
self._connected = True
self._receive_events = enable_event_connection
if self._receive_events:
await self._event_channel.connect(self.server, self.port, deduplicate=True)
await self.enable_event_reception(True)
loop = asyncio.get_running_loop()
self._event_task = loop.create_task(self._listen_for_events())
self._event_processor = loop.create_task(self._process_events())
if refresh:
await self.refresh()
return self
async def _set_register_for_change_events(self, value: bool):
""" Notifies HEOS that we want event messages on the event channel.
:param value: True or False
:return: None
"""
await self._event_channel.system.register_for_change_events(value)
def close(self):
""" Close the connection to our HEOS device
:return: None
"""
logger.info(f'Closing connection to {self.server}:{self.port}')
if self._event_task:
self._event_task.cancel()
if self._event_processor:
self._event_processor.cancel()
self._connected = False
def subscribe(self, event_name: str, callback: Callable):
""" Subscribe a callback function to a specific event
:param event_name: Event name
:param callback: Callback function
:return: None
"""
# FIXME: Change event_name to an enum
if self._event_subscriptions.get(event_name) is None:
self._event_subscriptions[event_name] = []
self._event_subscriptions[event_name].append(callback)
async def refresh(self):
""" Refreshes internal information from the HEOS system.
:return: None
"""
await self.check_account()
await self.get_players()
await self.get_groups()
await self.get_sources()
async def reboot(self):
""" Instructs the system to reboot.
:return: None
"""
await self.api.system.reboot()
async def check_account(self) -> tuple:
""" Checks if the system is logged into HEOS and returns the status and account name, if available.
:return: tuple
"""
self._account_status, self._account_username = await self.api.system.check_account()
return self._account_status, self._account_username
async def sign_in(self, username: str, password: str):
""" Signs the system into the HEOS service.
:param username: Username
:param password: Password
:return: None
"""
await self.api.system.sign_in(username, password)
async def sign_out(self):
""" Signs out from the HEOS service.
:return: None
"""
await self.api.system.sign_out()
async def get_players(self):
""" Retrieves a mapping of IDs to Players present in the HEOS system.
:return: list
"""
self._players = [controllers.Player(self, player) for player in await self.api.player.get_players()]
return self._players
async def get_group(self, group_id):
""" Retrieve a specific group by ID.
:param group_id: Group ID
:return: PytheosGroup
"""
groups = await self.get_groups()
return groups.get(group_id)
async def get_groups(self):
""" Retrieves a mapping of IDs to Groups present in the HEOS system.
:return: dict
"""
self._groups = {}
for group in await self.api.group.get_groups():
self._groups[group.group_id] = controllers.Group(self, group)
return self._groups
async def get_sources(self):
""" Retrieves a mapping of IDs to Sources present in the HEOS system.
:return:
"""
self._sources = {}
for source in await self.api.browse.get_music_sources():
self._sources[source.source_id] = controllers.Source(self, source)
return self._sources
def is_receiving_events(self):
""" Retrieves whether or not we're receiving events.
:return: bool
"""
return self._receive_events
async def enable_event_reception(self, value):
""" Enables or disables event reception.
:param value: True or False
:return: None
"""
self._receive_events = value
await self._set_register_for_change_events(value)
async def _listen_for_events(self):
""" Async task that reads messages from the event channel and adds them to our event queue for
later processing.
:return: None
"""
while True:
results = await self._event_channel.read_message()
if results:
event = HEOSEvent(results)
logger.debug(f"Received event: {event!r}")
await self._event_queue.put(event)
await asyncio.sleep(0.5)
async def _process_events(self):
""" Async task that processes events that originate from the event channel.
:return: None
"""
while True:
event = await self._event_queue.get()
if event:
logger.debug(f'Processing event: {event!r}')
await self._event_handler(event)
await asyncio.sleep(0.5)
async def _event_handler(self, event: HEOSEvent):
""" Internal event handler
:param event: HEOS Event
:return: None
"""
loop = asyncio.get_running_loop()
for callback in self._event_subscriptions.get(event.command, []):
logger.debug(f'Calling registered callback {callback} for event {event!r}')
loop.create_task(callback(event))
def _init_internal_event_handlers(self):
""" Initialize the internal event handlers
:return: None
"""
# FIXME: Meh, do something better with this.
internal_handler_map = {
# 'event/sources_changed': self._handle_sources_changed,
# 'event/players_changed': self._handle_players_changed,
# 'event/groups_changed': self._handle_groups_changed,
# 'event/player_state_changed': self._handle_player_state_changed,
# 'event/player_now_playing_changed': self._handle_now_playing_changed,
# 'event/player_now_playing_progress': self._handle_now_playing_progress,
# 'event/player_playback_error': self._handle_playback_error,
# 'event/player_queue_changed': self._handle_queue_changed,
# 'event/player_volume_changed': self._handle_volume_changed,
# 'event/repeat_mode_changed': self._handle_repeat_mode_changed,
# 'event/shuffle_mode_changed': self._handle_shuffle_mode_changed,
# 'event/group_volume_changed': self._handle_group_volume_changed,
# 'event/user_changed': self._handle_user_changed,
}
for event, callback in internal_handler_map.items():
self.subscribe(event, callback)
def _handle_sources_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_players_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_groups_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_player_state_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_now_playing_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_now_playing_progress(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_playback_error(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_queue_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_volume_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_repeat_mode_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_shuffle_mode_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_group_volume_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_user_changed(self, event: HEOSEvent):
raise NotImplementedError()
async def connect(host: Union[SSDPResponse, str], port: int=Pytheos.DEFAULT_PORT) -> Pytheos:
""" Connect to the provided host and return a context manager for use with the connection.
:param host: Host to connect to
:param port: Port to connect to
:raises: ValueError
:return: The Pytheos instance
"""
if isinstance(host, SSDPResponse):
host = utils.extract_host(host.location)
conn = Pytheos(host, port)
return await conn.connect()
| #!/usr/bin/env python
""" Provides the primary interface into the library """
from __future__ import annotations
import asyncio
import logging
from typing import Callable, Optional, Union
from . import utils
from . import controllers
from .networking.connection import Connection
from .networking.types import SSDPResponse
from .networking.errors import ChannelUnavailableError
from .models.heos import HEOSEvent
from .models.system import AccountStatus
logger = logging.getLogger('pytheos')
class Pytheos:
""" Pytheos interface """
DEFAULT_PORT = 1255
@staticmethod
def check_channel_availability(channel: Connection):
""" Checks to make sure that the provided channel is available.
:param channel: Channel connection
:raises: ChannelUnavailableError
:return: None
"""
if not channel or not channel.connected:
raise ChannelUnavailableError()
@property
def log_level(self):
return logger.level
@log_level.setter
def log_level(self, value):
logger.setLevel(value)
@property
def connected(self):
return self._connected
@property
def signed_in(self):
return self._account_status == AccountStatus.SignedIn
@property
def username(self):
return self._account_username
def __init__(self, server: Union[str, SSDPResponse]=None, port: Optional[int]=DEFAULT_PORT):
""" Constructor
:param server: Server hostname or IP
:param port: Port number
"""
if isinstance(server, SSDPResponse):
server = utils.extract_host(server.location)
self.server: str = server
self.port: int = port
self._command_channel = Connection()
self._event_channel = Connection()
self._event_queue = asyncio.Queue()
self._event_task: Optional[asyncio.Task] = None
self._event_processor: Optional[asyncio.Task] = None
self._connected: bool = False
self._event_subscriptions: dict = {}
self._receive_events: bool = True
self._account_status: Optional[AccountStatus] = None
self._account_username: Optional[str] = None
self._players: list = []
self._groups: dict = {} # FIXME?: Not sure I like having this as a dict.
self._sources: dict = {} # FIXME?: Not sure I like having this as a dict.
self.api: Connection = self._command_channel
self._init_internal_event_handlers()
def __repr__(self):
return f'<Pytheos(server={self.server}, port={self.port})>'
def __enter__(self):
if not self._connected:
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._connected:
self.close()
async def connect(self, enable_event_connection: bool=True, refresh: bool=True) -> Pytheos:
""" Connect to our HEOS device.
:param enable_event_connection: Enables establishing an additional connection for system events
:param refresh: Determines if the system state should be automatically refreshed
:return: self
"""
logger.info(f'Connecting to {self.server}:{self.port}')
await self._command_channel.connect(self.server, self.port)
self._connected = True
self._receive_events = enable_event_connection
if self._receive_events:
await self._event_channel.connect(self.server, self.port, deduplicate=True)
await self.enable_event_reception(True)
loop = asyncio.get_running_loop()
self._event_task = loop.create_task(self._listen_for_events())
self._event_processor = loop.create_task(self._process_events())
if refresh:
await self.refresh()
return self
async def _set_register_for_change_events(self, value: bool):
""" Notifies HEOS that we want event messages on the event channel.
:param value: True or False
:return: None
"""
await self._event_channel.system.register_for_change_events(value)
def close(self):
""" Close the connection to our HEOS device
:return: None
"""
logger.info(f'Closing connection to {self.server}:{self.port}')
if self._event_task:
self._event_task.cancel()
if self._event_processor:
self._event_processor.cancel()
self._connected = False
def subscribe(self, event_name: str, callback: Callable):
""" Subscribe a callback function to a specific event
:param event_name: Event name
:param callback: Callback function
:return: None
"""
# FIXME: Change event_name to an enum
if self._event_subscriptions.get(event_name) is None:
self._event_subscriptions[event_name] = []
self._event_subscriptions[event_name].append(callback)
async def refresh(self):
""" Refreshes internal information from the HEOS system.
:return: None
"""
await self.check_account()
await self.get_players()
await self.get_groups()
await self.get_sources()
async def reboot(self):
""" Instructs the system to reboot.
:return: None
"""
await self.api.system.reboot()
async def check_account(self) -> tuple:
""" Checks if the system is logged into HEOS and returns the status and account name, if available.
:return: tuple
"""
self._account_status, self._account_username = await self.api.system.check_account()
return self._account_status, self._account_username
async def sign_in(self, username: str, password: str):
""" Signs the system into the HEOS service.
:param username: Username
:param password: Password
:return: None
"""
await self.api.system.sign_in(username, password)
async def sign_out(self):
""" Signs out from the HEOS service.
:return: None
"""
await self.api.system.sign_out()
async def get_players(self):
""" Retrieves a mapping of IDs to Players present in the HEOS system.
:return: list
"""
self._players = [controllers.Player(self, player) for player in await self.api.player.get_players()]
return self._players
async def get_group(self, group_id):
""" Retrieve a specific group by ID.
:param group_id: Group ID
:return: PytheosGroup
"""
groups = await self.get_groups()
return groups.get(group_id)
async def get_groups(self):
""" Retrieves a mapping of IDs to Groups present in the HEOS system.
:return: dict
"""
self._groups = {}
for group in await self.api.group.get_groups():
self._groups[group.group_id] = controllers.Group(self, group)
return self._groups
async def get_sources(self):
""" Retrieves a mapping of IDs to Sources present in the HEOS system.
:return:
"""
self._sources = {}
for source in await self.api.browse.get_music_sources():
self._sources[source.source_id] = controllers.Source(self, source)
return self._sources
def is_receiving_events(self):
""" Retrieves whether or not we're receiving events.
:return: bool
"""
return self._receive_events
async def enable_event_reception(self, value):
""" Enables or disables event reception.
:param value: True or False
:return: None
"""
self._receive_events = value
await self._set_register_for_change_events(value)
async def _listen_for_events(self):
""" Async task that reads messages from the event channel and adds them to our event queue for
later processing.
:return: None
"""
while True:
results = await self._event_channel.read_message()
if results:
event = HEOSEvent(results)
logger.debug(f"Received event: {event!r}")
await self._event_queue.put(event)
await asyncio.sleep(0.5)
async def _process_events(self):
""" Async task that processes events that originate from the event channel.
:return: None
"""
while True:
event = await self._event_queue.get()
if event:
logger.debug(f'Processing event: {event!r}')
await self._event_handler(event)
await asyncio.sleep(0.5)
async def _event_handler(self, event: HEOSEvent):
""" Internal event handler
:param event: HEOS Event
:return: None
"""
loop = asyncio.get_running_loop()
for callback in self._event_subscriptions.get(event.command, []):
logger.debug(f'Calling registered callback {callback} for event {event!r}')
loop.create_task(callback(event))
def _init_internal_event_handlers(self):
""" Initialize the internal event handlers
:return: None
"""
# FIXME: Meh, do something better with this.
internal_handler_map = {
# 'event/sources_changed': self._handle_sources_changed,
# 'event/players_changed': self._handle_players_changed,
# 'event/groups_changed': self._handle_groups_changed,
# 'event/player_state_changed': self._handle_player_state_changed,
# 'event/player_now_playing_changed': self._handle_now_playing_changed,
# 'event/player_now_playing_progress': self._handle_now_playing_progress,
# 'event/player_playback_error': self._handle_playback_error,
# 'event/player_queue_changed': self._handle_queue_changed,
# 'event/player_volume_changed': self._handle_volume_changed,
# 'event/repeat_mode_changed': self._handle_repeat_mode_changed,
# 'event/shuffle_mode_changed': self._handle_shuffle_mode_changed,
# 'event/group_volume_changed': self._handle_group_volume_changed,
# 'event/user_changed': self._handle_user_changed,
}
for event, callback in internal_handler_map.items():
self.subscribe(event, callback)
def _handle_sources_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_players_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_groups_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_player_state_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_now_playing_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_now_playing_progress(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_playback_error(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_queue_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_volume_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_repeat_mode_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_shuffle_mode_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_group_volume_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_user_changed(self, event: HEOSEvent):
raise NotImplementedError()
async def connect(host: Union[SSDPResponse, str], port: int=Pytheos.DEFAULT_PORT) -> Pytheos:
""" Connect to the provided host and return a context manager for use with the connection.
:param host: Host to connect to
:param port: Port to connect to
:raises: ValueError
:return: The Pytheos instance
"""
if isinstance(host, SSDPResponse):
host = utils.extract_host(host.location)
conn = Pytheos(host, port)
return await conn.connect()
| en | 0.762989 | #!/usr/bin/env python Provides the primary interface into the library Pytheos interface Checks to make sure that the provided channel is available. :param channel: Channel connection :raises: ChannelUnavailableError :return: None Constructor :param server: Server hostname or IP :param port: Port number # FIXME?: Not sure I like having this as a dict. # FIXME?: Not sure I like having this as a dict. Connect to our HEOS device. :param enable_event_connection: Enables establishing an additional connection for system events :param refresh: Determines if the system state should be automatically refreshed :return: self Notifies HEOS that we want event messages on the event channel. :param value: True or False :return: None Close the connection to our HEOS device :return: None Subscribe a callback function to a specific event :param event_name: Event name :param callback: Callback function :return: None # FIXME: Change event_name to an enum Refreshes internal information from the HEOS system. :return: None Instructs the system to reboot. :return: None Checks if the system is logged into HEOS and returns the status and account name, if available. :return: tuple Signs the system into the HEOS service. :param username: Username :param password: Password :return: None Signs out from the HEOS service. :return: None Retrieves a mapping of IDs to Players present in the HEOS system. :return: list Retrieve a specific group by ID. :param group_id: Group ID :return: PytheosGroup Retrieves a mapping of IDs to Groups present in the HEOS system. :return: dict Retrieves a mapping of IDs to Sources present in the HEOS system. :return: Retrieves whether or not we're receiving events. :return: bool Enables or disables event reception. :param value: True or False :return: None Async task that reads messages from the event channel and adds them to our event queue for later processing. :return: None Async task that processes events that originate from the event channel. :return: None Internal event handler :param event: HEOS Event :return: None Initialize the internal event handlers :return: None # FIXME: Meh, do something better with this. # 'event/sources_changed': self._handle_sources_changed, # 'event/players_changed': self._handle_players_changed, # 'event/groups_changed': self._handle_groups_changed, # 'event/player_state_changed': self._handle_player_state_changed, # 'event/player_now_playing_changed': self._handle_now_playing_changed, # 'event/player_now_playing_progress': self._handle_now_playing_progress, # 'event/player_playback_error': self._handle_playback_error, # 'event/player_queue_changed': self._handle_queue_changed, # 'event/player_volume_changed': self._handle_volume_changed, # 'event/repeat_mode_changed': self._handle_repeat_mode_changed, # 'event/shuffle_mode_changed': self._handle_shuffle_mode_changed, # 'event/group_volume_changed': self._handle_group_volume_changed, # 'event/user_changed': self._handle_user_changed, Connect to the provided host and return a context manager for use with the connection. :param host: Host to connect to :param port: Port to connect to :raises: ValueError :return: The Pytheos instance | 2.305383 | 2 |
test_modules/language_dictionary_test.py | 1goodday/Google-Dictionary-Pronunciation.ankiaddon | 1 | 5998 | <gh_stars>1-10
import csv
_iso_639_1_codes_file = open("files/ISO-639-1_Codes.csv", mode='r')
_iso_639_1_codes_dictreader = csv.DictReader(_iso_639_1_codes_file)
_iso_639_1_codes_dict: dict = {}
for _row in _iso_639_1_codes_dictreader:
_iso_639_1_codes_dict[_row['ISO-639-1 Code']] = _row['Language']
print(str(_iso_639_1_codes_dict)) | import csv
_iso_639_1_codes_file = open("files/ISO-639-1_Codes.csv", mode='r')
_iso_639_1_codes_dictreader = csv.DictReader(_iso_639_1_codes_file)
_iso_639_1_codes_dict: dict = {}
for _row in _iso_639_1_codes_dictreader:
_iso_639_1_codes_dict[_row['ISO-639-1 Code']] = _row['Language']
print(str(_iso_639_1_codes_dict)) | none | 1 | 3.029177 | 3 |
|
tianshou/data/collector.py | DZ9/tianshou | 1 | 5999 | import time
import torch
import warnings
import numpy as np
from tianshou.env import BaseVectorEnv
from tianshou.data import Batch, ReplayBuffer,\
ListReplayBuffer
from tianshou.utils import MovAvg
class Collector(object):
"""docstring for Collector"""
def __init__(self, policy, env, buffer=None, stat_size=100):
super().__init__()
self.env = env
self.env_num = 1
self.collect_step = 0
self.collect_episode = 0
self.collect_time = 0
if buffer is None:
self.buffer = ReplayBuffer(100)
else:
self.buffer = buffer
self.policy = policy
self.process_fn = policy.process_fn
self._multi_env = isinstance(env, BaseVectorEnv)
self._multi_buf = False # True if buf is a list
# need multiple cache buffers only if storing in one buffer
self._cached_buf = []
if self._multi_env:
self.env_num = len(env)
if isinstance(self.buffer, list):
assert len(self.buffer) == self.env_num, \
'The number of data buffer does not match the number of ' \
'input env.'
self._multi_buf = True
elif isinstance(self.buffer, ReplayBuffer):
self._cached_buf = [
ListReplayBuffer() for _ in range(self.env_num)]
else:
raise TypeError('The buffer in data collector is invalid!')
self.reset_env()
self.reset_buffer()
# state over batch is either a list, an np.ndarray, or a torch.Tensor
self.state = None
self.step_speed = MovAvg(stat_size)
self.episode_speed = MovAvg(stat_size)
def reset_buffer(self):
if self._multi_buf:
for b in self.buffer:
b.reset()
else:
self.buffer.reset()
def get_env_num(self):
return self.env_num
def reset_env(self):
self._obs = self.env.reset()
self._act = self._rew = self._done = self._info = None
if self._multi_env:
self.reward = np.zeros(self.env_num)
self.length = np.zeros(self.env_num)
else:
self.reward, self.length = 0, 0
for b in self._cached_buf:
b.reset()
def seed(self, seed=None):
if hasattr(self.env, 'seed'):
return self.env.seed(seed)
def render(self, **kwargs):
if hasattr(self.env, 'render'):
return self.env.render(**kwargs)
def close(self):
if hasattr(self.env, 'close'):
self.env.close()
def _make_batch(self, data):
if isinstance(data, np.ndarray):
return data[None]
else:
return np.array([data])
def collect(self, n_step=0, n_episode=0, render=0):
warning_count = 0
if not self._multi_env:
n_episode = np.sum(n_episode)
start_time = time.time()
assert sum([(n_step != 0), (n_episode != 0)]) == 1, \
"One and only one collection number specification permitted!"
cur_step = 0
cur_episode = np.zeros(self.env_num) if self._multi_env else 0
reward_sum = 0
length_sum = 0
while True:
if warning_count >= 100000:
warnings.warn(
'There are already many steps in an episode. '
'You should add a time limitation to your environment!',
Warning)
if self._multi_env:
batch_data = Batch(
obs=self._obs, act=self._act, rew=self._rew,
done=self._done, obs_next=None, info=self._info)
else:
batch_data = Batch(
obs=self._make_batch(self._obs),
act=self._make_batch(self._act),
rew=self._make_batch(self._rew),
done=self._make_batch(self._done),
obs_next=None,
info=self._make_batch(self._info))
result = self.policy(batch_data, self.state)
self.state = result.state if hasattr(result, 'state') else None
if isinstance(result.act, torch.Tensor):
self._act = result.act.detach().cpu().numpy()
elif not isinstance(self._act, np.ndarray):
self._act = np.array(result.act)
else:
self._act = result.act
obs_next, self._rew, self._done, self._info = self.env.step(
self._act if self._multi_env else self._act[0])
if render > 0:
self.env.render()
time.sleep(render)
self.length += 1
self.reward += self._rew
if self._multi_env:
for i in range(self.env_num):
data = {
'obs': self._obs[i], 'act': self._act[i],
'rew': self._rew[i], 'done': self._done[i],
'obs_next': obs_next[i], 'info': self._info[i]}
if self._cached_buf:
warning_count += 1
self._cached_buf[i].add(**data)
elif self._multi_buf:
warning_count += 1
self.buffer[i].add(**data)
cur_step += 1
else:
warning_count += 1
self.buffer.add(**data)
cur_step += 1
if self._done[i]:
if n_step != 0 or np.isscalar(n_episode) or \
cur_episode[i] < n_episode[i]:
cur_episode[i] += 1
reward_sum += self.reward[i]
length_sum += self.length[i]
if self._cached_buf:
cur_step += len(self._cached_buf[i])
self.buffer.update(self._cached_buf[i])
self.reward[i], self.length[i] = 0, 0
if self._cached_buf:
self._cached_buf[i].reset()
if isinstance(self.state, list):
self.state[i] = None
elif self.state is not None:
if isinstance(self.state[i], dict):
self.state[i] = {}
else:
self.state[i] = self.state[i] * 0
if isinstance(self.state, torch.Tensor):
# remove ref count in pytorch (?)
self.state = self.state.detach()
if sum(self._done):
obs_next = self.env.reset(np.where(self._done)[0])
if n_episode != 0:
if isinstance(n_episode, list) and \
(cur_episode >= np.array(n_episode)).all() or \
np.isscalar(n_episode) and \
cur_episode.sum() >= n_episode:
break
else:
self.buffer.add(
self._obs, self._act[0], self._rew,
self._done, obs_next, self._info)
cur_step += 1
if self._done:
cur_episode += 1
reward_sum += self.reward
length_sum += self.length
self.reward, self.length = 0, 0
self.state = None
obs_next = self.env.reset()
if n_episode != 0 and cur_episode >= n_episode:
break
if n_step != 0 and cur_step >= n_step:
break
self._obs = obs_next
self._obs = obs_next
if self._multi_env:
cur_episode = sum(cur_episode)
duration = time.time() - start_time
self.step_speed.add(cur_step / duration)
self.episode_speed.add(cur_episode / duration)
self.collect_step += cur_step
self.collect_episode += cur_episode
self.collect_time += duration
if isinstance(n_episode, list):
n_episode = np.sum(n_episode)
else:
n_episode = max(cur_episode, 1)
return {
'n/ep': cur_episode,
'n/st': cur_step,
'v/st': self.step_speed.get(),
'v/ep': self.episode_speed.get(),
'rew': reward_sum / n_episode,
'len': length_sum / n_episode,
}
def sample(self, batch_size):
if self._multi_buf:
if batch_size > 0:
lens = [len(b) for b in self.buffer]
total = sum(lens)
batch_index = np.random.choice(
total, batch_size, p=np.array(lens) / total)
else:
batch_index = np.array([])
batch_data = Batch()
for i, b in enumerate(self.buffer):
cur_batch = (batch_index == i).sum()
if batch_size and cur_batch or batch_size <= 0:
batch, indice = b.sample(cur_batch)
batch = self.process_fn(batch, b, indice)
batch_data.append(batch)
else:
batch_data, indice = self.buffer.sample(batch_size)
batch_data = self.process_fn(batch_data, self.buffer, indice)
return batch_data
| import time
import torch
import warnings
import numpy as np
from tianshou.env import BaseVectorEnv
from tianshou.data import Batch, ReplayBuffer,\
ListReplayBuffer
from tianshou.utils import MovAvg
class Collector(object):
"""docstring for Collector"""
def __init__(self, policy, env, buffer=None, stat_size=100):
super().__init__()
self.env = env
self.env_num = 1
self.collect_step = 0
self.collect_episode = 0
self.collect_time = 0
if buffer is None:
self.buffer = ReplayBuffer(100)
else:
self.buffer = buffer
self.policy = policy
self.process_fn = policy.process_fn
self._multi_env = isinstance(env, BaseVectorEnv)
self._multi_buf = False # True if buf is a list
# need multiple cache buffers only if storing in one buffer
self._cached_buf = []
if self._multi_env:
self.env_num = len(env)
if isinstance(self.buffer, list):
assert len(self.buffer) == self.env_num, \
'The number of data buffer does not match the number of ' \
'input env.'
self._multi_buf = True
elif isinstance(self.buffer, ReplayBuffer):
self._cached_buf = [
ListReplayBuffer() for _ in range(self.env_num)]
else:
raise TypeError('The buffer in data collector is invalid!')
self.reset_env()
self.reset_buffer()
# state over batch is either a list, an np.ndarray, or a torch.Tensor
self.state = None
self.step_speed = MovAvg(stat_size)
self.episode_speed = MovAvg(stat_size)
def reset_buffer(self):
if self._multi_buf:
for b in self.buffer:
b.reset()
else:
self.buffer.reset()
def get_env_num(self):
return self.env_num
def reset_env(self):
self._obs = self.env.reset()
self._act = self._rew = self._done = self._info = None
if self._multi_env:
self.reward = np.zeros(self.env_num)
self.length = np.zeros(self.env_num)
else:
self.reward, self.length = 0, 0
for b in self._cached_buf:
b.reset()
def seed(self, seed=None):
if hasattr(self.env, 'seed'):
return self.env.seed(seed)
def render(self, **kwargs):
if hasattr(self.env, 'render'):
return self.env.render(**kwargs)
def close(self):
if hasattr(self.env, 'close'):
self.env.close()
def _make_batch(self, data):
if isinstance(data, np.ndarray):
return data[None]
else:
return np.array([data])
def collect(self, n_step=0, n_episode=0, render=0):
warning_count = 0
if not self._multi_env:
n_episode = np.sum(n_episode)
start_time = time.time()
assert sum([(n_step != 0), (n_episode != 0)]) == 1, \
"One and only one collection number specification permitted!"
cur_step = 0
cur_episode = np.zeros(self.env_num) if self._multi_env else 0
reward_sum = 0
length_sum = 0
while True:
if warning_count >= 100000:
warnings.warn(
'There are already many steps in an episode. '
'You should add a time limitation to your environment!',
Warning)
if self._multi_env:
batch_data = Batch(
obs=self._obs, act=self._act, rew=self._rew,
done=self._done, obs_next=None, info=self._info)
else:
batch_data = Batch(
obs=self._make_batch(self._obs),
act=self._make_batch(self._act),
rew=self._make_batch(self._rew),
done=self._make_batch(self._done),
obs_next=None,
info=self._make_batch(self._info))
result = self.policy(batch_data, self.state)
self.state = result.state if hasattr(result, 'state') else None
if isinstance(result.act, torch.Tensor):
self._act = result.act.detach().cpu().numpy()
elif not isinstance(self._act, np.ndarray):
self._act = np.array(result.act)
else:
self._act = result.act
obs_next, self._rew, self._done, self._info = self.env.step(
self._act if self._multi_env else self._act[0])
if render > 0:
self.env.render()
time.sleep(render)
self.length += 1
self.reward += self._rew
if self._multi_env:
for i in range(self.env_num):
data = {
'obs': self._obs[i], 'act': self._act[i],
'rew': self._rew[i], 'done': self._done[i],
'obs_next': obs_next[i], 'info': self._info[i]}
if self._cached_buf:
warning_count += 1
self._cached_buf[i].add(**data)
elif self._multi_buf:
warning_count += 1
self.buffer[i].add(**data)
cur_step += 1
else:
warning_count += 1
self.buffer.add(**data)
cur_step += 1
if self._done[i]:
if n_step != 0 or np.isscalar(n_episode) or \
cur_episode[i] < n_episode[i]:
cur_episode[i] += 1
reward_sum += self.reward[i]
length_sum += self.length[i]
if self._cached_buf:
cur_step += len(self._cached_buf[i])
self.buffer.update(self._cached_buf[i])
self.reward[i], self.length[i] = 0, 0
if self._cached_buf:
self._cached_buf[i].reset()
if isinstance(self.state, list):
self.state[i] = None
elif self.state is not None:
if isinstance(self.state[i], dict):
self.state[i] = {}
else:
self.state[i] = self.state[i] * 0
if isinstance(self.state, torch.Tensor):
# remove ref count in pytorch (?)
self.state = self.state.detach()
if sum(self._done):
obs_next = self.env.reset(np.where(self._done)[0])
if n_episode != 0:
if isinstance(n_episode, list) and \
(cur_episode >= np.array(n_episode)).all() or \
np.isscalar(n_episode) and \
cur_episode.sum() >= n_episode:
break
else:
self.buffer.add(
self._obs, self._act[0], self._rew,
self._done, obs_next, self._info)
cur_step += 1
if self._done:
cur_episode += 1
reward_sum += self.reward
length_sum += self.length
self.reward, self.length = 0, 0
self.state = None
obs_next = self.env.reset()
if n_episode != 0 and cur_episode >= n_episode:
break
if n_step != 0 and cur_step >= n_step:
break
self._obs = obs_next
self._obs = obs_next
if self._multi_env:
cur_episode = sum(cur_episode)
duration = time.time() - start_time
self.step_speed.add(cur_step / duration)
self.episode_speed.add(cur_episode / duration)
self.collect_step += cur_step
self.collect_episode += cur_episode
self.collect_time += duration
if isinstance(n_episode, list):
n_episode = np.sum(n_episode)
else:
n_episode = max(cur_episode, 1)
return {
'n/ep': cur_episode,
'n/st': cur_step,
'v/st': self.step_speed.get(),
'v/ep': self.episode_speed.get(),
'rew': reward_sum / n_episode,
'len': length_sum / n_episode,
}
def sample(self, batch_size):
if self._multi_buf:
if batch_size > 0:
lens = [len(b) for b in self.buffer]
total = sum(lens)
batch_index = np.random.choice(
total, batch_size, p=np.array(lens) / total)
else:
batch_index = np.array([])
batch_data = Batch()
for i, b in enumerate(self.buffer):
cur_batch = (batch_index == i).sum()
if batch_size and cur_batch or batch_size <= 0:
batch, indice = b.sample(cur_batch)
batch = self.process_fn(batch, b, indice)
batch_data.append(batch)
else:
batch_data, indice = self.buffer.sample(batch_size)
batch_data = self.process_fn(batch_data, self.buffer, indice)
return batch_data
| en | 0.698458 | docstring for Collector # True if buf is a list # need multiple cache buffers only if storing in one buffer # state over batch is either a list, an np.ndarray, or a torch.Tensor # remove ref count in pytorch (?) | 2.236032 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.