filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_11204 | # included code for NAF/KAF
from span_data import *
from external_references_data import *
from term_sentiment_data import *
from lxml import etree
class Cterm:
def __init__(self,node=None,type='NAF'):
self.type = type
if node is None:
self.node = etree.Element('term')
else:
self.node = node
def get_node(self):
return self.node
def get_id(self):
if self.type == 'NAF':
return self.node.get('id')
elif self.type == 'KAF':
return self.node.get('tid')
def get_lemma(self):
return self.node.get('lemma')
def get_pos(self):
return self.node.get('pos')
def get_morphofeat(self):
return self.node.get('morphofeat')
def get_span(self):
node_span = self.node.find('span')
if node_span is not None:
return Cspan(node_span)
else:
return None
def get_sentiment(self):
sent_node = self.node.find('sentiment')
if sent_node is None:
return None
else:
return Cterm_sentiment(sent_node)
def add_external_reference(self,ext_ref):
ext_refs_node = self.node.find('externalReferences')
if ext_refs_node is None:
ext_refs_obj = CexternalReferences()
self.node.append(ext_refs_obj.get_node())
else:
ext_refs_obj = CexternalReferences(ext_refs_node)
ext_refs_obj.add_external_reference(ext_ref)
def add_term_sentiment(self,term_sentiment):
self.node.append(term_sentiment.get_node())
def get_external_references(self):
ext_ref_node = self.node.find('externalReferences')
if ext_ref_node is not None:
ext_refs_obj = CexternalReferences(ext_ref_node)
for ref in ext_refs_obj:
yield ref
class Cterms:
def __init__(self,node=None,type='NAF'):
self.idx = {}
self.type = type
if node is None:
self.node = etree.Element('terms')
else:
self.node = node
for node_term in self.__get_node_terms():
self.idx[node_term.get('id')] = node_term
def get_node(self):
return self.node
def to_kaf(self):
if self.type == 'NAF':
self.type = 'KAF'
for node in self.__get_node_terms():
node.set('tid',node.get('id'))
del node.attrib['id']
def to_naf(self):
if self.type == 'KAF':
self.type = 'NAF'
for node in self.__get_node_terms():
node.set('id',node.get('tid'))
del node.attrib['tid']
def __get_node_terms(self):
for node_term in self.node.findall('term'):
yield node_term
def __iter__(self):
for node_term in self.__get_node_terms():
yield Cterm(node_term,self.type)
def get_term(self,term_id):
if term_id in self.idx:
return Cterm(self.idx[term_id],self.type)
else:
return None
def add_external_reference(self,term_id, external_ref):
if term_id in self.idx:
term_obj = Cterm(self.idx[term_id],self.type)
term_obj.add_external_reference(external_ref)
def remove_terms(self,list_term_ids):
nodes_to_remove = set()
for term in self:
if term.get_id() in list_term_ids:
nodes_to_remove.add(term.get_node())
#For removing the previous comment
prv = term.get_node().getprevious()
if prv is not None:
nodes_to_remove.add(prv)
for node in nodes_to_remove:
self.node.remove(node)
|
the-stack_0_11206 |
class Jet:
def __init__(
self,
progenitor=None,
constituents=None,
mass=None,
pt=None,
eta=None,
phi=None,
y=None,
tree=None,
root_id=None,
tree_content=None,
**kwargs
):
self.constituents = constituents
self.mass = mass
self.pt = pt
self.eta = eta
self.phi = phi
self.y = y
self.progenitor = progenitor
self.tree = tree
self.root_id = root_id
self.tree_content = tree_content
def __len__(self):
return len(self.constituents)
class QuarkGluonJet(Jet):
def __init__(self,
photon_pt=None,
photon_eta=None,
photon_phi=None,
env=None,
**kwargs):
self.photon_pt = photon_pt
self.photon_eta = photon_eta
self.photon_phi = photon_phi
self.env = env
super().__init__(**kwargs)
|
the-stack_0_11208 | # USAGE
# python match_histograms.py --source empire_state_cloudy.png --reference empire_state_sunset.png
# import the necessary packages
from skimage import exposure
import matplotlib.pyplot as plt
import argparse
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--source", required=True,
help="path to the input source image")
ap.add_argument("-r", "--reference", required=True,
help="path to the input reference image")
args = vars(ap.parse_args())
# load the source and reference images
print("[INFO] loading source and reference images...")
src = cv2.imread(args["source"])
ref = cv2.imread(args["reference"])
# determine if we are performing multichannel histogram matching
# and then perform histogram matching itself
print("[INFO] performing histogram matching...")
multi = True if src.shape[-1] > 1 else False
#matched = exposure.match_histograms(src, ref, channel_axis=2, multichannel=multi)
matched = exposure.match_histograms(src, ref, multichannel=multi)
# show the output images
cv2.imshow("Source", src)
cv2.imshow("Reference", ref)
cv2.imshow("Matched", matched)
cv2.waitKey(0)
# construct a figure to display the histogram plots for each channel
# before and after histogram matching was applied
(fig, axs) = plt.subplots(nrows=3, ncols=3, figsize=(8, 8))
# loop over our source image, reference image, and output matched
# image
for (i, image) in enumerate((src, ref, matched)):
# convert the image from BGR to RGB channel ordering
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# loop over the names of the channels in RGB order
for (j, color) in enumerate(("red", "green", "blue")):
# compute a histogram for the current channel and plot it
(hist, bins) = exposure.histogram(image[..., j],
source_range="dtype")
axs[j, i].plot(bins, hist / hist.max())
# compute the cumulative distribution function for the
# current channel and plot it
(cdf, bins) = exposure.cumulative_distribution(image[..., j])
axs[j, i].plot(bins, cdf)
# set the y-axis label of the current plot to be the name
# of the current color channel
axs[j, 0].set_ylabel(color)
# set the axes titles
axs[0, 0].set_title("Source")
axs[0, 1].set_title("Reference")
axs[0, 2].set_title("Matched")
# display the output plots
plt.tight_layout()
plt.show() |
the-stack_0_11211 | """About Dialog for IDLE
"""
import os
import sys
from platform import python_version, architecture
from tkinter import Toplevel, Frame, Label, Button, PhotoImage
from tkinter import SUNKEN, TOP, BOTTOM, LEFT, X, BOTH, W, EW, NSEW, E
from idlelib import textview
def build_bits():
"Return bits for platform."
if sys.platform == 'darwin':
return '64' if sys.maxsize > 2**32 else '32'
else:
return architecture()[0][:2]
class AboutDialog(Toplevel):
"""Modal about dialog for idle
"""
def __init__(self, parent, title=None, *, _htest=False, _utest=False):
"""Create popup, do not return until tk widget destroyed.
parent - parent of this dialog
title - string which is title of popup dialog
_htest - bool, change box location when running htest
_utest - bool, don't wait_window when running unittest
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
# place dialog below parent if running htest
self.geometry("+%d+%d" % (
parent.winfo_rootx()+30,
parent.winfo_rooty()+(30 if not _htest else 100)))
self.bg = "#bbbbbb"
self.fg = "#000000"
self.create_widgets()
self.resizable(height=False, width=False)
self.title(title or
f'About IDLE {python_version()} ({build_bits()} bit)')
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.ok)
self.parent = parent
self.button_ok.focus_set()
self.bind('<Return>', self.ok) # dismiss dialog
self.bind('<Escape>', self.ok) # dismiss dialog
self._current_textview = None
self._utest = _utest
if not _utest:
self.deiconify()
self.wait_window()
def create_widgets(self):
frame = Frame(self, borderwidth=2, relief=SUNKEN)
frame_buttons = Frame(self)
frame_buttons.pack(side=BOTTOM, fill=X)
frame.pack(side=TOP, expand=True, fill=BOTH)
self.button_ok = Button(frame_buttons, text='Close',
command=self.ok)
self.button_ok.pack(padx=5, pady=5)
frame_background = Frame(frame, bg=self.bg)
frame_background.pack(expand=True, fill=BOTH)
header = Label(frame_background, text='IDLE', fg=self.fg,
bg=self.bg, font=('courier', 24, 'bold'))
header.grid(row=0, column=0, sticky=E, padx=10, pady=10)
tk_patchlevel = self.tk.call('info', 'patchlevel')
ext = '.png' if tk_patchlevel >= '8.6' else '.gif'
icon = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'Icons', f'idle_48{ext}')
self.icon_image = PhotoImage(master=self._root(), file=icon)
logo = Label(frame_background, image=self.icon_image, bg=self.bg)
logo.grid(row=0, column=0, sticky=W, rowspan=2, padx=10, pady=10)
byline_text = "Python's Integrated Development\nand Learning Environment" + 5*'\n'
byline = Label(frame_background, text=byline_text, justify=LEFT,
fg=self.fg, bg=self.bg)
byline.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
email = Label(frame_background, text='email: [email protected]',
justify=LEFT, fg=self.fg, bg=self.bg)
email.grid(row=6, column=0, columnspan=2, sticky=W, padx=10, pady=0)
docs = Label(frame_background, text='https://docs.python.org/' +
python_version()[:3] + '/library/idle.html',
justify=LEFT, fg=self.fg, bg=self.bg)
docs.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
Frame(frame_background, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
pyver = Label(frame_background,
text='Python version: ' + python_version(),
fg=self.fg, bg=self.bg)
pyver.grid(row=9, column=0, sticky=W, padx=10, pady=0)
tkver = Label(frame_background, text='Tk version: ' + tk_patchlevel,
fg=self.fg, bg=self.bg)
tkver.grid(row=9, column=1, sticky=W, padx=2, pady=0)
py_buttons = Frame(frame_background, bg=self.bg)
py_buttons.grid(row=10, column=0, columnspan=2, sticky=NSEW)
self.py_license = Button(py_buttons, text='License', width=8,
highlightbackground=self.bg,
command=self.show_py_license)
self.py_license.pack(side=LEFT, padx=10, pady=10)
self.py_copyright = Button(py_buttons, text='Copyright', width=8,
highlightbackground=self.bg,
command=self.show_py_copyright)
self.py_copyright.pack(side=LEFT, padx=10, pady=10)
self.py_credits = Button(py_buttons, text='Credits', width=8,
highlightbackground=self.bg,
command=self.show_py_credits)
self.py_credits.pack(side=LEFT, padx=10, pady=10)
Frame(frame_background, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
idlever = Label(frame_background,
text='IDLE version: ' + python_version(),
fg=self.fg, bg=self.bg)
idlever.grid(row=12, column=0, sticky=W, padx=10, pady=0)
idle_buttons = Frame(frame_background, bg=self.bg)
idle_buttons.grid(row=13, column=0, columnspan=3, sticky=NSEW)
self.readme = Button(idle_buttons, text='README', width=8,
highlightbackground=self.bg,
command=self.show_readme)
self.readme.pack(side=LEFT, padx=10, pady=10)
self.idle_news = Button(idle_buttons, text='NEWS', width=8,
highlightbackground=self.bg,
command=self.show_idle_news)
self.idle_news.pack(side=LEFT, padx=10, pady=10)
self.idle_credits = Button(idle_buttons, text='Credits', width=8,
highlightbackground=self.bg,
command=self.show_idle_credits)
self.idle_credits.pack(side=LEFT, padx=10, pady=10)
# License, copyright, and credits are of type _sitebuiltins._Printer
def show_py_license(self):
"Handle License button event."
self.display_printer_text('About - License', license)
def show_py_copyright(self):
"Handle Copyright button event."
self.display_printer_text('About - Copyright', copyright)
def show_py_credits(self):
"Handle Python Credits button event."
self.display_printer_text('About - Python Credits', credits)
# Encode CREDITS.txt to utf-8 for proper version of Loewis.
# Specify others as ascii until need utf-8, so catch errors.
def show_idle_credits(self):
"Handle Idle Credits button event."
self.display_file_text('About - Credits', 'CREDITS.txt', 'utf-8')
def show_readme(self):
"Handle Readme button event."
self.display_file_text('About - Readme', 'README.txt', 'ascii')
def show_idle_news(self):
"Handle News button event."
self.display_file_text('About - NEWS', 'NEWS.txt', 'utf-8')
def display_printer_text(self, title, printer):
"""Create textview for built-in constants.
Built-in constants have type _sitebuiltins._Printer. The
text is extracted from the built-in and then sent to a text
viewer with self as the parent and title as the title of
the popup.
"""
printer._Printer__setup()
text = '\n'.join(printer._Printer__lines)
self._current_textview = textview.view_text(
self, title, text, _utest=self._utest)
def display_file_text(self, title, filename, encoding=None):
"""Create textview for filename.
The filename needs to be in the current directory. The path
is sent to a text viewer with self as the parent, title as
the title of the popup, and the file encoding.
"""
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), filename)
self._current_textview = textview.view_file(
self, title, fn, encoding, _utest=self._utest)
def ok(self, event=None):
"Dismiss help_about dialog."
self.grab_release()
self.destroy()
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_help_about', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(AboutDialog)
|
the-stack_0_11212 | import os
import unittest
import six
from conans.paths import BUILD_INFO, CONANFILE
from conans.test.utils.tools import TestClient
from conans.util.files import mkdir
class SourceTest(unittest.TestCase):
def test_local_flow_patch(self):
# https://github.com/conan-io/conan/issues/2327
conanfile = """from conans import ConanFile, tools
from conans.tools import save
import os
class TestexportConan(ConanFile):
exports = "mypython.py"
exports_sources = "patch.patch"
def source(self):
save("hello/hello.h", "my hello header!")
patch = os.path.join(self.source_folder, "patch.patch")
self.output.info("PATCH: %s" % tools.load(patch))
header = os.path.join(self.source_folder, "hello/hello.h")
self.output.info("HEADER: %s" % tools.load(header))
python = os.path.join(self.source_folder, "mypython.py")
self.output.info("PYTHON: %s" % tools.load(python))
"""
client = TestClient()
client.save({"conanfile.py": conanfile,
"patch.patch": "mypatch",
"mypython.py": "mypython"})
client.run("source .")
self.assertIn("conanfile.py: PATCH: mypatch", client.out)
self.assertIn("conanfile.py: HEADER: my hello header!", client.out)
self.assertIn("conanfile.py: PYTHON: mypython", client.out)
client.run("source . -sf=mysrc")
self.assertIn("conanfile.py: Executing exports to", client.out)
self.assertIn("conanfile.py: PATCH: mypatch", client.out)
self.assertIn("conanfile.py: HEADER: my hello header!", client.out)
self.assertIn("conanfile.py: PYTHON: mypython", client.out)
self.assertTrue(os.path.exists(os.path.join(client.current_folder,
"mysrc", "patch.patch")))
self.assertTrue(os.path.exists(os.path.join(client.current_folder,
"mysrc", "mypython.py")))
self.assertTrue(os.path.exists(os.path.join(client.current_folder,
"mysrc", "hello/hello.h")))
def test_apply_patch(self):
# https://github.com/conan-io/conan/issues/2327
# Test if a patch can be applied in source() both in create
# and local flow
client = TestClient()
conanfile = """from conans import ConanFile
from conans.tools import load
import os
class Pkg(ConanFile):
exports_sources = "*"
def source(self):
if self.develop:
patch = os.path.join(self.source_folder, "mypatch")
self.output.info("PATCH: %s" % load(patch))
"""
client.save({"conanfile.py": conanfile,
"mypatch": "this is my patch"})
client.run("source .")
self.assertIn("PATCH: this is my patch", client.out)
client.run("source . -sf=mysrc")
self.assertIn("PATCH: this is my patch", client.out)
client.run("create . Pkg/0.1@user/testing")
self.assertIn("PATCH: this is my patch", client.out)
def test_source_warning_os_build(self):
# https://github.com/conan-io/conan/issues/2368
conanfile = '''from conans import ConanFile
class ConanLib(ConanFile):
pass
'''
client = TestClient()
client.save({CONANFILE: conanfile})
client.run("source .")
self.assertNotIn("This package defines both 'os' and 'os_build'", client.out)
def test_source_reference(self):
client = TestClient()
client.run("source lib/1.0@conan/stable", assert_error=True)
self.assertIn("'conan source' doesn't accept a reference anymore", client.out)
def test_source_with_path_errors(self):
client = TestClient()
client.save({"conanfile.txt": "contents"}, clean_first=True)
# Path with conanfile.txt
client.run("source conanfile.txt --install-folder subdir", assert_error=True)
self.assertIn(
"A conanfile.py is needed, %s is not acceptable"
% os.path.join(client.current_folder, "conanfile.txt"),
client.out)
# Path with wrong conanfile path
client.run("package not_real_dir/conanfile.py --build-folder build2 --install-folder build",
assert_error=True)
self.assertIn("Conanfile not found at %s"
% os.path.join(client.current_folder, "not_real_dir", "conanfile.py"),
client.out)
def test_source_local_cwd(self):
conanfile = '''
import os
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
def source(self):
self.output.info("Running source!")
self.output.info("cwd=>%s" % os.getcwd())
'''
client = TestClient()
client.save({CONANFILE: conanfile})
subdir = os.path.join(client.current_folder, "subdir")
os.mkdir(subdir)
client.run("install . --install-folder subdir")
client.run("source . --install-folder subdir --source-folder subdir")
self.assertIn("conanfile.py (Hello/0.1): Configuring sources", client.out)
self.assertIn("conanfile.py (Hello/0.1): cwd=>%s" % subdir, client.out)
def test_local_source_src_not_exist(self):
conanfile = '''
import os
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
def source(self):
pass
'''
client = TestClient()
client.save({CONANFILE: conanfile})
# Automatically created
client.run("source conanfile.py --source-folder=src")
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "src")))
def test_build_folder_no_exists_crash(self):
conanfile = '''
import os
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
def source(self):
pass
'''
client = TestClient()
client.save({CONANFILE: conanfile})
# Automatically created
client.run("source ./conanfile.py --install-folder=missing_folder", assert_error=True)
self.assertIn("Specified info-folder doesn't exist", client.out)
def test_build_folder_reading_infos(self):
conanfile = '''
import os
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
def package_info(self):
self.cpp_info.cxxflags.append("FLAG")
self.env_info.MYVAR = "foo"
self.user_info.OTHERVAR = "bar"
'''
client = TestClient()
client.save({CONANFILE: conanfile})
client.run("export . conan/testing")
conanfile = '''
import os
from conans import ConanFile
from conans.util.files import save
class ConanLib(ConanFile):
requires="Hello/0.1@conan/testing"
def source(self):
assert(os.getcwd() == self.source_folder)
self.output.info("FLAG=%s" % self.deps_cpp_info["Hello"].cxxflags[0])
self.output.info("MYVAR=%s" % self.deps_env_info["Hello"].MYVAR)
self.output.info("OTHERVAR=%s" % self.deps_user_info["Hello"].OTHERVAR)
self.output.info("CURDIR=%s" % os.getcwd())
'''
# First, failing source()
client.save({CONANFILE: conanfile}, clean_first=True)
build_folder = os.path.join(client.current_folder, "build")
src_folder = os.path.join(client.current_folder, "src")
mkdir(build_folder)
mkdir(src_folder)
client.run("source . --install-folder='%s' --source-folder='%s'"
% (build_folder, src_folder),
assert_error=True)
self.assertIn("self.deps_cpp_info not defined.", client.out)
client.run("install . --install-folder build --build ")
client.run("source conanfile.py --install-folder='%s' --source-folder='%s'"
% (build_folder, src_folder))
self.assertIn("FLAG=FLAG", client.out)
self.assertIn("MYVAR=foo", client.out)
self.assertIn("OTHERVAR=bar", client.out)
self.assertIn("CURDIR=%s" % src_folder, client.out)
def test_repeat_args_fails(self):
conanfile = '''
from conans import ConanFile
class ConanLib(ConanFile):
def source(self):
pass
'''
client = TestClient()
client.save({CONANFILE: conanfile})
client.run("source ./conanfile.py --source-folder sf")
with six.assertRaisesRegex(self, Exception, "Command failed"):
client.run("source . --source-folder sf --source-folder sf")
with six.assertRaisesRegex(self, Exception, "Command failed"):
client.run("source conanfile.py --source-folder sf --install-folder if "
"--install-folder rr")
def test_local_source(self):
conanfile = '''
from conans import ConanFile
from conans.util.files import save
class ConanLib(ConanFile):
def source(self):
self.output.info("Running source!")
err
save("file1.txt", "Hello World")
'''
# First, failing source()
client = TestClient()
client.save({CONANFILE: conanfile,
BUILD_INFO: ""})
client.run("source .", assert_error=True)
self.assertIn("conanfile.py: Running source!", client.out)
self.assertIn("ERROR: conanfile.py: Error in source() method, line 9", client.out)
# Fix the error and repeat
client.save({CONANFILE: conanfile.replace("err", "")})
client.run("source .")
self.assertIn("conanfile.py: Configuring sources in", client.out)
self.assertIn("conanfile.py: Running source!", client.out)
self.assertEqual("Hello World", client.load("file1.txt"))
|
the-stack_0_11213 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RS4vectors(RPackage):
"""The S4Vectors package defines the Vector and List virtual classes and
a set of generic functions that extend the semantic of ordinary
vectors and lists in R. Package developers can easily implement
vector-like or list-like objects as concrete subclasses of Vector or
List. In addition, a few low-level concrete subclasses of general
interest (e.g. DataFrame, Rle, and Hits) are implemented in the
S4Vectors package itself (many more are implemented in the IRanges
package and in other Bioconductor infrastructure packages)."""
homepage = "https://bioconductor.org/packages/S4Vectors/"
git = "https://git.bioconductor.org/packages/S4Vectors.git"
version('0.18.3', commit='d6804f94ad3663828440914920ac933b934aeff1')
version('0.16.0', commit='00fec03fcbcb7cff37917fab0da28d91fdf9dc3d')
version('0.14.7', commit='40af17fe0b8e93b6a72fc787540d2961773b8e23')
depends_on('[email protected]:', type=('build', 'run'), when='@0.14.7')
depends_on('[email protected]:', type=('build', 'run'), when='@0.16.0:')
depends_on('[email protected]:3.4.9', when='@0.14.7', type=('build', 'run'))
depends_on('[email protected]:3.5.9', when='@0.18.3', type=('build', 'run'))
|
the-stack_0_11214 | import requests
headers = {"OCS-APIRequest": "true"}
# The API is implemented as documented here: https://deck.readthedocs.io/en/latest/API/
class DeckAPI:
def __init__(self, url, auth):
self.url = url
self.auth = auth
def get(self, route):
response = requests.get(
f"{self.url}{route}",
auth=self.auth,
headers=headers,
)
if response.status_code != requests.codes.ok:
print(f"The response was: {response.content}")
response.raise_for_status()
return response
def post(self, route, json):
response = requests.post(
f"{self.url}{route}",
auth=self.auth,
json=json,
headers=headers,
)
if response.status_code != requests.codes.ok:
print(f"The response was: {response.content}")
response.raise_for_status()
return response
def postFiles(self, route, data, files):
response = requests.post(
f"{self.url}{route}",
auth=self.auth,
data=data,
files=files,
headers=headers,
)
if response.status_code != requests.codes.ok:
print(f"The response was: {response.content}")
response.raise_for_status()
return response
def put(self, route, json):
response = requests.put(
f"{self.url}{route}",
auth=self.auth,
json=json,
headers=headers,
)
if response.status_code != requests.codes.ok:
print(f"The response was: {response.content}")
response.raise_for_status()
return response
def delete(self, route):
response = requests.delete(
f"{self.url}{route}",
auth=self.auth,
headers=headers,
)
if response.status_code != requests.codes.ok:
print(f"The response was: {response.conten}")
response.raise_for_status()
return response
def getBoards(self):
return self.get(f"/index.php/apps/deck/api/v1.0/boards").json()
def getBoardDetails(self, boardId):
return self.get(f"/index.php/apps/deck/api/v1.0/boards/{boardId}").json()
def getStacks(self, boardId):
return self.get(f"/index.php/apps/deck/api/v1.0/boards/{boardId}/stacks").json()
def getStacksArchived(self, boardId):
return self.get(
f"/index.php/apps/deck/api/v1.0/boards/{boardId}/stacks/archived"
).json()
def createBoard(self, title, color):
board = self.post(
"/index.php/apps/deck/api/v1.0/boards", {"title": title, "color": color}
).json()
boardId = board["id"]
# remove all default labels
for label in board["labels"]:
labelId = label["id"]
self.delete(
f"/index.php/apps/deck/api/v1.0/boards/{boardId}/labels/{labelId}"
)
return board
def createLabel(self, title, color, boardId):
return self.post(
f"/index.php/apps/deck/api/v1.0/boards/{boardId}/labels",
{"title": title, "color": color},
).json()
def createStack(self, title, order, boardId):
return self.post(
f"/index.php/apps/deck/api/v1.0/boards/{boardId}/stacks",
{"title": title, "order": order},
).json()
def createCard(self, title, ctype, order, description, duedate, boardId, stackId):
return self.post(
f"/index.php/apps/deck/api/v1.0/boards/{boardId}/stacks/{stackId}/cards",
{
"title": title,
"type": ctype,
"order": order,
"description": description,
"duedate": duedate.isoformat() if duedate is not None else None,
},
).json()
def assignLabel(self, labelId, cardId, boardId, stackId):
self.put(
f"/index.php/apps/deck/api/v1.0/boards/{boardId}/stacks/{stackId}/cards/{cardId}/assignLabel",
{"labelId": labelId},
)
def archiveCard(self, card, boardId, stackId):
card['archived'] = True
self.put(
f"/index.php/apps/deck/api/v1.0/boards/{boardId}/stacks/{stackId}/cards/{card['id']}",
card,
)
def commentOnCard(self, cardId, message, parentId=None):
self.post(
f"/ocs/v2.php/apps/deck/api/v1.0/cards/{cardId}/comments",
{"message": message, "parentId": parentId},
)
def attachToCard(self, boardId, stackId, cardId, fileName, fileObject, mimeType):
self.postFiles(
f"/index.php/apps/deck/api/v1.0/boards/{boardId}/stacks/{stackId}/cards/{cardId}/attachments",
{"type": "deck_file"},
{"file": (fileName, fileObject, mimeType)},
)
|
the-stack_0_11216 | """Decorators are higher order functions that accept functions and return another function that executes the original"""
import datetime
import functools
def check_value(func):
"""checking value parameter decorator - function that returns a function."""
def do_checking(name, value):
print("decorate: we can do anything we like here, even changing the function parameters or anything")
if value is None or value == 0: # decorate original function
value = 4
return func(name, value)
# return function that calls original function parameter
return do_checking
def fix_name(func):
"""ensure string is correct capitalised."""
def do_changes(name, value):
print("decorate: we can fix strings through capitalization")
name = name.capitalize()
return func(name, value)
return do_changes
def negate_value(func):
"""negate value decorator."""
def do_negation(name, value):
print("decorate: we can change return values by negating value")
return -value
return do_negation
def my_function(name, value):
"""this is our function we want to decorate."""
print("name:", name, "value:", value)
return
print("\nwe can stack functions so one will call the other...")
my_fixed_name_function = fix_name(my_function) # a way to create a decorated version of function
my_value_checked_and_fixed_name_function = check_value(my_fixed_name_function)
# original my_function has been decorated
my_value_checked_and_fixed_name_function("hello world!", None)
# this decorator is called first
@check_value
@fix_name
@negate_value # you can see this as series of function calls with a function as parameter
def my_decorated_function(name, value): # ...check_value(fix_name(negate_value(my_decorated_function)))
"""my original function."""
print("name:", name, "value:", value)
return value
print("\nwe can use the @symbol to simplify decoration of a function...")
print("my_decorated_function.__name__ =", my_decorated_function.__name__) # not what we expected
ret_value = my_decorated_function("hello world!", 0)
print("ret_value from my_decorated_function =", ret_value) # check value decorator used before negate_value
def my_general_capitalize_decorator(func):
def capitalise_func(*args, **kwargs):
args = tuple([x.capitalize() if isinstance(x, str) else x for x in args])
kwargs = {k: v.capitalize() if isinstance(v, str) else v for k, v in kwargs.items()}
func(*args, **kwargs)
return capitalise_func
@my_general_capitalize_decorator
def my_function(name, age, surname, middle_name):
print("name:", name, middle_name, surname, f"({age})")
@my_general_capitalize_decorator
def my_other_function(place, time):
print("meet me at", place, "at", time)
print("\nwe can use args and kwargs to make decorators suitable for different functions and parameters...")
my_function('bob', 34, 'smith', middle_name='reginald')
my_other_function('underneath the arches', datetime.datetime.now())
class SomeRandomClass:
def __init__(self):
pass
@my_general_capitalize_decorator
def a_method(self, name, age, surname, middle_name):
print("class name:", name, middle_name, surname, f"({age})")
print("or class methods...")
my_instance = SomeRandomClass()
my_instance.a_method('bob', 34, 'smith', middle_name='reginald')
print("or even a lambda...")
my_general_capitalize_decorator(lambda x, y: print(x, y))('hello', 'bobby')
def my_decorator(func):
@functools.wraps(func) # note, you need to send func parameter in this case, wraps accepts
def do_decoration(): # ...func as a parameter
print("hello from decorator!")
func()
return do_decoration
@my_decorator
def my_function():
"""my_function doc string"""
print("hello from function!")
print("\nwraps() decorator from functools can be used to preserve original name and docstring...")
my_function()
print("my_function.__name__ =", my_function.__name__)
print("my_function.__doc__ =", my_function.__doc__)
print("#################################")
def my_simple_decorator(func):
print("calling function", func.__name__) # this will be printed when function is decorated not..
return func # ..when the function is called
@my_simple_decorator # note that my_simple_decorator is applied here
def my_function():
return 'hello from my_function'
print("\ndecorators can be very simple for debugging or registering...")
print(my_function())
print("#################################")
def my_param_decorator(a_string, an_integer): # functool.wraps() takes a function object as a parameter
print("my_param_decorator")
def my_parameterised_decorator(func):
print("my_parameterised_decorator")
def do_decoration(*args, **kwargs):
print("do_decoration:", a_string)
print(f"..executing {an_integer} times")
for i in range(an_integer):
func(*args, **kwargs)
return do_decoration
return my_parameterised_decorator
@my_param_decorator('decorator parameter', 2) # my_param_decorator and my_parameterised_decorator called here
def my_function():
print("in my_function")
print("\nwe can pass parameters to a decorator using an extra function wrapper...")
my_function() # do_decoration is done here
print("#################################")
# thanks to https://realpython.com/primer-on-python-decorators/
def my_param_decorator(_func=None, *, a_string=None, an_integer=1): # * means all parameters after are keyword only
print("my_param_decorator")
def my_parameterised_decorator(func):
print("my_parameterised_decorator")
def do_decoration(*args, **kwargs):
do_decoration.number_decorations += 1 # decorator state update
print("do_decoration:", a_string)
print(f"..executing {an_integer} times")
for i in range(an_integer):
func(*args, **kwargs)
do_decoration.number_decorations = 0 # we can add attributes as usual for state
return do_decoration
if _func is None:
print("_func is None so parameters were specified")
print("a_string =", a_string, "an_integer =", an_integer)
return my_parameterised_decorator # return my_parameterised_decorator function as object
else:
print("_func is", _func)
print("...so no parameters were specified, calling my_parameterised_decorator...!")
_decorator_func = my_parameterised_decorator(_func)
print("called my_parameterised_decorator to get decorator function")
return _decorator_func # call function and returns the resulting function object
# ...do_decoration
@my_param_decorator # so this is effectively my_param_decorator(my_function) so _func = my_function
def my_function():
print("in my_function")
print("\ncalling function with non-parameterised decorator...")
my_function()
# my_function is actually the decorated function do_decoration so we can access its attributes
print("number of decorations:", my_function.number_decorations)
print("#################################")
@my_param_decorator(an_integer=2) # have parameters so _func = None so this is effectively...
def my_function(): # ...my_param_decorator(an_integer=2)(my_function)
print("in my_function")
print("\ncalling function with parameterised decorator...")
my_function()
my_function()
print("number of decorations:", my_function.number_decorations)
print("#################################")
|
the-stack_0_11217 | # -*- coding: utf-8 -*-
'''
Manage Grafana v4.0 users
.. versionadded:: 2017.7.0
:configuration: This state requires a configuration profile to be configured
in the minion config, minion pillar, or master config. The module will use
the 'grafana' key by default, if defined.
Example configuration using basic authentication:
.. code-block:: yaml
grafana:
grafana_url: http://grafana.localhost
grafana_user: admin
grafana_password: admin
grafana_timeout: 3
Example configuration using token based authentication:
.. code-block:: yaml
grafana:
grafana_url: http://grafana.localhost
grafana_token: token
grafana_timeout: 3
.. code-block:: yaml
Ensure foobar user is present:
grafana4_user.present:
- name: foobar
- password: mypass
- email: "foobar@localhost"
- fullname: Foo Bar
- is_admin: true
'''
from __future__ import absolute_import
from salt.ext.six import string_types
from salt.utils import dictupdate
from salt.utils.dictdiffer import deep_diff
def __virtual__():
'''Only load if grafana4 module is available'''
return 'grafana4.get_user' in __salt__
def present(name,
password,
email,
is_admin=False,
fullname=None,
theme=None,
profile='grafana'):
'''
Ensure that a user is present.
name
Name of the user.
password
Password of the user.
email
Email of the user.
is_admin
Optional - Set user as admin user. Default: False
fullname
Optional - Full name of the user.
theme
Optional - Selected theme of the user.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
'''
if isinstance(profile, string_types):
profile = __salt__['config.option'](profile)
ret = {'name': name, 'result': None, 'comment': None, 'changes': {}}
user = __salt__['grafana4.get_user'](name, profile)
create = not user
if create:
__salt__['grafana4.create_user'](
login=name,
password=password,
email=email,
name=fullname,
profile=profile)
user = __salt__['grafana4.get_user'](name, profile)
ret['changes']['new'] = user
user_data = __salt__['grafana4.get_user_data'](user['id'])
data = _get_json_data(login=name, email=email, name=fullname, theme=theme,
defaults=user_data)
if data != _get_json_data(login=None, email=None, name=None, theme=None,
defaults=user_data):
__salt__['grafana4.update_user'](user['id'], profile=profile, **data)
dictupdate.update(
ret['changes'], deep_diff(
user_data, __salt__['grafana4.get_user_data'](user['id'])))
if user['isAdmin'] != is_admin:
__salt__['grafana4.update_user_permissions'](
user['id'], isGrafanaAdmin=is_admin, profile=profile)
dictupdate.update(ret['changes'], deep_diff(
user, __salt__['grafana4.get_user'](name, profile)))
ret['result'] = True
if create:
ret['changes'] = ret['changes']['new']
ret['comment'] = 'New user {0} added'.format(name)
else:
if ret['changes']:
ret['comment'] = 'User {0} updated'.format(name)
else:
ret['changes'] = None
ret['comment'] = 'User {0} already up-to-date'.format(name)
return ret
def absent(name, profile='grafana'):
'''
Ensure that a user is present.
name
Name of the user to remove.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
'''
if isinstance(profile, string_types):
profile = __salt__['config.option'](profile)
ret = {'name': name, 'result': None, 'comment': None, 'changes': {}}
user = __salt__['grafana4.get_user'](name, profile)
if user:
orgs = __salt__['grafana4.get_user_orgs'](user['id'], profile=profile)
__salt__['grafana4.delete_user'](user['id'], profile=profile)
for org in orgs:
if org['name'] == user['email']:
# Remove entire Org in the case where auto_assign_org=false:
# When set to false, new users will automatically cause a new
# organization to be created for that new user (the org name
# will be the email)
__salt__['grafana4.delete_org'](org['orgId'], profile=profile)
else:
__salt__['grafana4.delete_user_org'](
user['id'], org['orgId'], profile=profile)
else:
ret['result'] = True
ret['comment'] = 'User {0} already absent'.format(name)
return ret
ret['result'] = True
ret['changes'][name] = 'Absent'
ret['comment'] = 'User {0} was deleted'.format(name)
return ret
def _get_json_data(defaults=None, **kwargs):
if defaults is None:
defaults = {}
for k, v in kwargs.items():
if v is None:
kwargs[k] = defaults.get(k)
return kwargs
|
the-stack_0_11218 | # Natural Language Toolkit: Dependency Corpus Reader
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Kepa Sarasola <[email protected]>
# Iker Manterola <[email protected]>
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import codecs
from cnltk.parse import DependencyGraph
from cnltk.tokenize import *
from cnltk.corpus.reader.util import *
from cnltk.corpus.reader.api import *
class DependencyCorpusReader(SyntaxCorpusReader):
def __init__(self, root, fileids, encoding='utf8',
word_tokenizer=TabTokenizer(),
sent_tokenizer=RegexpTokenizer('\n', gaps=True),
para_block_reader=read_blankline_block):
CorpusReader.__init__(self, root, fileids, encoding)
#########################################################
def raw(self, fileids=None):
"""
:return: the given file(s) as a single string.
:rtype: str
"""
result = []
for fileid, encoding in self.abspaths(fileids, include_encoding=True):
if isinstance(fileid, PathPointer):
result.append(fileid.open(encoding=encoding).read())
else:
with codecs.open(fileid, "r", encoding) as fp:
result.append(fp.read())
return concat(result)
def words(self, fileids=None):
return concat([DependencyCorpusView(fileid, False, False, False, encoding=enc)
for fileid, enc in self.abspaths(fileids, include_encoding=True)])
def tagged_words(self, fileids=None):
return concat([DependencyCorpusView(fileid, True, False, False, encoding=enc)
for fileid, enc in self.abspaths(fileids, include_encoding=True)])
def sents(self, fileids=None):
return concat([DependencyCorpusView(fileid, False, True, False, encoding=enc)
for fileid, enc in self.abspaths(fileids, include_encoding=True)])
def tagged_sents(self, fileids=None):
return concat([DependencyCorpusView(fileid, True, True, False, encoding=enc)
for fileid, enc in self.abspaths(fileids, include_encoding=True)])
def parsed_sents(self, fileids=None):
sents=concat([DependencyCorpusView(fileid, False, True, True, encoding=enc)
for fileid, enc in self.abspaths(fileids, include_encoding=True)])
return [DependencyGraph(sent) for sent in sents]
class DependencyCorpusView(StreamBackedCorpusView):
_DOCSTART = '-DOCSTART- -DOCSTART- O\n' #dokumentu hasiera definitzen da
def __init__(self, corpus_file, tagged, group_by_sent, dependencies,
chunk_types=None, encoding='utf8'):
self._tagged = tagged
self._dependencies = dependencies
self._group_by_sent = group_by_sent
self._chunk_types = chunk_types
StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
def read_block(self, stream):
# Read the next sentence.
sent = read_blankline_block(stream)[0].strip()
# Strip off the docstart marker, if present.
if sent.startswith(self._DOCSTART):
sent = sent[len(self._DOCSTART):].lstrip()
# extract word and tag from any of the formats
if not self._dependencies:
lines = [line.split('\t') for line in sent.split('\n')]
if len(lines[0]) == 3 or len(lines[0]) == 4:
sent = [(line[0], line[1]) for line in lines]
elif len(lines[0]) == 10:
sent = [(line[1], line[4]) for line in lines]
else:
raise ValueError('Unexpected number of fields in dependency tree file')
# discard tags if they weren't requested
if not self._tagged:
sent = [word for (word, tag) in sent]
# Return the result.
if self._group_by_sent:
return [sent]
else:
return list(sent)
|
the-stack_0_11222 | """
Print command.
Print information about the wily cache and what is in the index.
"""
import tabulate
from wily import logger, format_date, format_revision, MAX_MESSAGE_WIDTH
from wily.config import DEFAULT_GRID_STYLE
from wily.state import State
def index(config, include_message=False):
"""
Show information about the cache and runtime.
:param config: The wily configuration
:type config: :namedtuple:`wily.config.WilyConfig`
:param include_message: Include revision messages
:type include_message: ``bool``
"""
state = State(config=config)
logger.debug("Running show command")
logger.info("--------Configuration---------")
logger.info(f"Path: {config.path}")
logger.info(f"Archiver: {config.archiver}")
logger.info(f"Operators: {config.operators}")
logger.info("")
logger.info("-----------History------------")
data = []
for archiver in state.archivers:
for rev in state.index[archiver].revisions:
if include_message:
data.append(
(
format_revision(rev.revision.key),
rev.revision.author_name,
rev.revision.message[:MAX_MESSAGE_WIDTH],
format_date(rev.revision.date),
)
)
else:
data.append(
(
format_revision(rev.revision.key),
rev.revision.author_name,
format_date(rev.revision.date),
)
)
if include_message:
headers = ("Revision", "Author", "Message", "Date")
else:
headers = ("Revision", "Author", "Date")
print(
tabulate.tabulate(
headers=headers, tabular_data=data, tablefmt=DEFAULT_GRID_STYLE
)
)
|
the-stack_0_11223 | """
Test the pre-trained autoencoder model with test trajectory data.
"""
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import ae_utilities as aeu
import dataset_defines as dd
import numpy as np
import os
abspath = os.path.abspath(__file__)
dir_name = os.path.dirname(abspath)
dataset_name = dir_name[dir_name.rfind('/')+1:] + '_gt_data.csv'
dataset_file_path = os.path.join(dir_name + '/data', dataset_name)
abnormal_name = dir_name[dir_name.rfind('/')+1:] + '_gt_real_abnormal_2.csv'
abnormal_file_path = os.path.join(dir_name + '/data', abnormal_name)
def get_comparison_results(result_file):
"""
Returns a array of strings in the following format:
[[label_0, Size_N, size_A, TPR, TNR, TPR, TNR, TPR, TNR, TPR, TNR],
[label_1, Size_N, size_A, TPR, TNR, TPR, TNR, TPR, TNR, TPR, TNR],
...
[total, Size_N, size_A, TPR, TNR, TPR, TNR, TPR, TNR, TPR, TNR]]
"""
# Change the directory
or_dir_name = os.getcwd()
os.chdir(dir_name)
list_of_model_names = ['one_class_svm','isolation_forest','single_ae','deep_ae']
# Extract trajectories and export data to array
dataset = np.genfromtxt(dataset_file_path, delimiter=',')
# Ignore first column representing object_id
dataset = dataset[:,1:]
# Generate abnormal data
abnormal_data = np.genfromtxt(abnormal_file_path, delimiter=',')
abnormal_data = abnormal_data[:,1:]
# list of object labels: -1 means all objects. The following order follows: 1. Cars; 2. Peds; 3. Bike; 4.All
list_labels = [1,0,2,3]
label_names = ['Peds','Cars','Bike','All']
# Get the number of labels
n_labels = 1
for object_label in list_labels:
if object_label != 3:
if len(dataset[dataset[:,0] == object_label]) > 0:
n_labels += 1
row_string = r'\multirow{{{}}}{{*}}{{Rouen}}'.format(n_labels)
is_first = True
for object_label in list_labels:
print('====================================== {} ======================================'.
format(label_names[object_label]))
sub_normal_data = dataset
sub_abnormal_data = abnormal_data
if object_label != 3:
sub_normal_data = sub_normal_data[sub_normal_data[:,0] == object_label]
sub_abnormal_data = sub_abnormal_data[sub_abnormal_data[:,0] == object_label]
if len(sub_normal_data) == 0 or len(sub_abnormal_data) == 0:
continue
if is_first:
result_file.write(r'\multicolumn{{1}}{{c|}}{{{}}} & '.format(row_string))
is_first = False
else:
result_file.write(r'\multicolumn{1}{c|}{} & ')
# Get the number of samples
size_n = sub_normal_data.shape[0]
size_a = sub_abnormal_data.shape[0]
result_file.write(r'{} & {} & {} '.format(label_names[object_label], size_n, size_a))
for model_name in list_of_model_names:
print('================================== {} =================================='.format(model_name))
# Files containing info of the model and threshold value
trained_model_path = 'model/' + model_name + '/'
trained_model_summary_results_filename = 'results/' + model_name + '/summary_results.csv'
# Ref.: https://stackoverflow.com/questions/29451030/why-doesnt-np-genfromtxt-remove-header-while-importing-in-python
with open(trained_model_summary_results_filename, 'r') as results:
line = results.readline()
header = [e for e in line.strip().split(',') if e]
results_array = np.genfromtxt(results, names=header, dtype=None, delimiter=',')
TPR_list = []
TNR_list = []
for i in range(aeu.repeat_number):
print('======================== Iteration {} ========================'.format(i))
if model_name == 'single_ae' or model_name == 'deep_ae':
# Refer to the deep_ae_summary_results.csv
threshold_value = results_array['threshold_value'][i]
else:
threshold_value = 0
# Test normal data
TNR = aeu.test_trained_model(test_data=sub_normal_data,
clf_name=model_name,
model_dir_path=trained_model_path,
iteration_number=i,
is_abnormal=False,
threshold_value=threshold_value)
# Test abnormal data
TPR = aeu.test_trained_model(test_data=sub_abnormal_data,
clf_name=model_name,
model_dir_path=trained_model_path,
iteration_number=i,
is_abnormal=True,
threshold_value=threshold_value)
# Compute TP, TN, FP, FN
#TP = abnormal_ratio
#TN = normal_ratio
#FP = 1 - TN
#FN = 1 - TP
# Compute TPR and TNR
#TPR = TP / (TP + FN) = abnormal_ratio
#TNR = TN / (FP + TN) = normal_ratio
TPR_list.append(int(TPR*100))
TNR_list.append(int(TNR*100))
output_string = '\nTPR = {0:.2f}% and TNR = {1:.2f}%'.format(TPR*100, TNR*100)
print(output_string)
print('==============================================================')
# Get the best one that gives the max value of TPR + TNR
TPR_list = np.array(TPR_list)
TNR_list = np.array(TNR_list)
best_index = np.argmax(TPR_list + TNR_list)
TPR_best = TPR_list[best_index]
TNR_best = TNR_list[best_index]
is_TPR_best = (TPR_best == np.max(TPR_list))
is_TNR_best = (TNR_best == np.max(TNR_list))
if is_TPR_best:
TPR_string = r'\textbf{{{}}}'.format(TPR_best)
else:
TPR_string = str(TPR_best)
if is_TNR_best:
TNR_string = r'\textbf{{{}}}'.format(TNR_best)
else:
TNR_string = str(TNR_best)
result_file.write(r'& {} & {} '.format(TPR_string, TNR_string))
result_file.write(r'\\' + '\n')
# Change the directory back to the initial one
os.chdir(or_dir_name)
|
the-stack_0_11225 | import requests
import random
from time import sleep
from urllib.parse import urlparse as parsy
bad = '\033[91m[-]\033[0m'
user_agents = ['Mozilla/5.0 (X11; Linux i686; rv:60.0) Gecko/20100101 Firefox/60.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36 OPR/43.0.2442.991']
def make_request(url, param_data, method, cookie): #The main function which actually makes contact with the target
headers = {
'Host' : parsy(url).hostname,
'User-Agent' : random.choice(user_agents),
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language' : 'en-US,en;q=0.5',
'Accept-Encoding' : 'deflate',
'DNT' : '1',
'Connection' : 'close'}
try:
if method == 'GET':
resp = requests.get(url + param_data, cookies=cookie, headers=headers) #Makes request
return resp.text #Reads the output
elif method == 'POST':
resp = requests.post(url, data=param_data, cookies=cookie, headers=headers) #Makes request
return resp.text #Reads the output
except:
print('\n%s Target isn\'t responding properly.' % bad)
quit()
|
the-stack_0_11226 | # importing important librarires
import itertools
import numpy as np
import torch
import pydicom
from PIL import Image
from torch.utils.data import DataLoader
import pandas as pd
def load_scan(path):
"""
This function is used to load the MRI scans. It converts the scan into a numpy array
Parameters:
path (str): The path to the folder containing the MRI scans of all patients
Returns:
np_image (numpy.ndarray): A numpy array representing the MRI scan
"""
# slices = [pydicom.read_file(path + '/' + s) for s in os.listdir(path)]
# slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))
# try:
# slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
# except Exception as e:
# print("Exception raised: ", e)
# slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
# for s in slices:
# s.SliceThickness = slice_thickness
# image = np.stack([s.pixel_array for s in slices])
image = pydicom.read_file(path)
# print(type(image))
image = image.pixel_array.astype(np.int16)
np_image = np.array(image, dtype=np.int16)
# print("scan shape: ", np_image.shape)
return np_image
def load_seg(path):
"""
This function is used to load the segmented image. It returns the image in a numpy array
Parameters:
path (str): The directory where all the segmented images corresponding to one patient are stored
Returns:
seg_data (numpy.ndarray): A list of numpy arrays corresponding to segmented images
"""
# seg_paths = []
# if path[-1] != '/':
# path = path + '/'
# for seg in os.listdir(path):
# seg_paths.append(path + seg)
# seg_paths.sort()
seg = Image.open(path)
seg_data = np.asarray(seg)
seg_data = np.array(seg_data)
# for seg_path in seg_paths:
# seg = Image.open(seg_path)
# seg_data.append(np.asarray(seg))
# print("seg shape: ", seg_data.shape)
### This block of code was to list the different intensity values
# for arr in seg_data:
# for elem in arr:
# if (elem not in seg_val):
# seg_val.append(elem)
return seg_data
def resize_data(data, new_dimensions):
'''
This function resizes a numpy array.
TO DO: method used for interpolation?
Parameters:
data (numpy.ndarray): a numpy array representing an MRI scan
new_dimensions (list): a list containing the dimensions of the new scan [z,x,y]
Returns:
new_data (numpy.ndarray): a numpy array with the desired dimensions
'''
initial_size_x = data.shape[1]
initial_size_y = data.shape[2]
initial_size_z = data.shape[0]
new_size_z = new_dimensions[0]
new_size_x = new_dimensions[1]
new_size_y = new_dimensions[2]
delta_x = initial_size_x / new_size_x
delta_y = initial_size_y / new_size_y
delta_z = initial_size_z / new_size_z
new_data = np.zeros((new_size_z, new_size_x, new_size_y))
for x, y, z in itertools.product(range(new_size_x),
range(new_size_y),
range(new_size_z)):
new_data[z][x][y] = data[int(z * delta_z)][int(x * delta_x)][int(y * delta_y)]
return new_data
def padSlice(values):
'''
This function adds padding to images. The final size of the image is 320x320
Args:
values (np.ndarray): The image in the form of a numpy array
Returns:
values (np.ndarray): The padded image
'''
# print(values.shape)
target_shape = np.array((320, 320))
pad = ((target_shape - values.shape) / 2).astype("int")
values = np.pad(values, ((pad[0], pad[0]), (pad[1], pad[1])), mode="constant", constant_values=0)
return values
def findOrgan(img, seg, organ):
'''
This function is used to locate a specific organ in an image.
Args:
img (np.ndarray): The input image
seg (np.ndarray): The segmented image
organ (str): The organ that we want to locate. The following key is used:
rk: right kidney
lk: left kidney
lv: liver
sp: spleen
Returns:
img (np.ndarray): original image ---> should not be returned
new_seg (np.ndarray): the segmented image with only the selected organ segmented
'''
if organ == 'rk':
value = 126
elif organ == 'lk':
value = 189
elif organ == 'lv':
value = 63
elif organ == 'sp':
value = 252
else:
print("Wrong organ selected.")
print("Right kidney: rk \nLeft kidney: lk \nLiver: lv \nSpleen: sp")
new_seg = np.zeros(seg.shape)
new_img = np.zeros(img.shape)
return new_img, new_seg
new_seg = np.zeros(seg.shape)
new_img = np.zeros(img.shape)
indices = np.where(seg == value) # tuple of 2 arrays [i0,i1,...,in], [j0,j1,...,jn], where seg[i][j] == value
for i in range(len(indices[0])):
row = indices[0][i]
col = indices[1][i]
# new_img[row][col] = img[row][col]
new_seg[row][col] = 1
return img, new_seg
def check_accuracy(loader, model, loss_fn, device="cuda"):
'''
This function is used to check the accuracy of the model
Args:
loader (torch.utils.data.DataLoader): The dataloader that is being used
model (UNET): The model that is being used
loss_fn (): The loss function
device: CPU or CUDA
Returns:
loss (float): The total loss for the batch
dice_score (float): The average dice coefficient for the batch
'''
num_correct = 0
num_pixels = 0
dice_score = 0
loss = 0
model.eval()
d1 = 0
# with torch.no_grad():
# for x, y in loader:
# # print("x: ", x.shape)
# # print("y: ", y.shape)
# x = x.unsqueeze(1).to(device)
# # print("x: ", x.shape)
# y = y.unsqueeze(1).to(device)
# # print("mo la")
# preds = torch.sigmoid(model(x))
# preds = (preds > 0.5).float()
# loss = loss_fn.forward(preds,y)
# num_correct += (preds == y).sum()
# num_pixels += torch.numel(preds)
with torch.no_grad():
for x, y in loader:
x = x.unsqueeze(1).to(device)
y = y.unsqueeze(1).to(device).float()
preds = torch.sigmoid(model(x))
preds = (preds > 0.5).float()
# print(type(preds))
num_correct += (preds == y).sum()
num_pixels += torch.numel(preds)
# dice_score += (2 * (preds * y).sum() + 1) / (
# (preds + y).sum() + 1
# )
loss += loss_fn(preds,y)
inputs = preds.view(-1)
targets = y.view(-1)
intersection = (inputs * targets).sum()
dice = (2. * intersection + 1) / (inputs.sum() + targets.sum() + 1)
d1 += dice
print(
f"Got {num_correct}/{num_pixels} with acc {num_correct/num_pixels*100:.2f}"
)
loss = loss.cpu()
d1 = d1.cpu()
# print(f"Dice score: {dice_score/len(loader)}")
print(f"Dice score: {d1 / len(loader)}")
model.train()
return loss, d1/len(loader)
def save_checkpoint(state, filename="my_checkpoint2liver.pth.tar"):
print("=> Saving checkpoint")
torch.save(state, filename)
def load_checkpoint(checkpoint, model):
print("=> Loading checkpoint")
model.load_state_dict(checkpoint["state_dict"])
def get_loaders(train_ds, val_ds, b_size):
'''
This function creates the train and validation loaders with the specified batch size
Args:
train_ds (SliceDataset): The training dataset
val_ds (SliceDataset): The validation dataset
b_size: The desired batch size
Returns:
train_dataloader (torch.utils.data.DataLoader): The dataloader for the training set
val_dataloader (torch.utils.data.DataLoader): The dataloader for the validation set
'''
train_dataloader = DataLoader(train_ds, batch_size=b_size)
val_dataloader = DataLoader(val_ds, batch_size=b_size)
return train_dataloader, val_dataloader
def remove_bg_only_test(test_seg_paths):
test_idx = []
for path in test_seg_paths:
arr = load_seg(path)
result = np.amax(arr).float() == 0.0
if not result:
test_idx.append(test_seg_paths.index(path))
return test_idx
def clean_test_ds(test_img_paths, test_seg_paths, test_idx):
cleaned_img_paths = []
cleaned_seg_paths = []
for idx in test_idx:
cleaned_img_paths.append(test_img_paths[idx])
cleaned_seg_paths.append(test_seg_paths[idx])
return cleaned_img_paths, cleaned_seg_paths
def get_features(features):
return features
def get_num_layers(features):
return len(features)
def save_results(csv, dict):
'''
This function is used to save the conditions and results of training the DNN in a csv file
Args:
csv (str): The name of the csv file. Must be in the format 'XXX.csv'
dict (dict): The conditions and results of training in the form of a dictionary
Returns:
None
'''
df = pd.read_csv(csv, index_col=0)
df = df.append(dict, ignore_index=True)
df.to_csv(csv)
def save_preds():
pass |
the-stack_0_11227 | #!/usr/bin/env python
# Copyright 2016 99cloud Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import os
from io import StringIO
from oslotest import base
PROJECT_DIR = os.path.abspath(os.path.join(os. path.dirname(__file__), '../'))
MERGE_CONFIG_FILE = os.path.join(PROJECT_DIR,
'ansible/action_plugins/merge_configs.py')
merge_configs = imp.load_source('merge_configs', MERGE_CONFIG_FILE)
TESTA = '''[DEFAULT]
key1 = b
c
key2 = v1
v2
key3 = v3
key3 = v4
key4 = v5
[b]
b_key1 = 1
b_key2 = 1
2
[c]
c_key1 =
c_key2 = 1 2 3
4 5 6
'''
TESTB = '''[DEFAULT]
key2 = v3
v4
v5
key4 = v4
key4 =
[b]
b_key2 = 2
'''
# TESTC is TESTA + TESTB
TESTC = '''[DEFAULT]
key1 = b
c
key2 = v3
v4
v5
key3 = v3
key3 = v4
key4 = v4
key4 =
[b]
b_key1 = 1
b_key2 = 2
[c]
c_key1 =
c_key2 = 1 2 3
4 5 6
'''
TESTA_NO_SECTIONS = '''key1 = a
key2 = b
'''
TESTB_NO_SECTIONS = '''key3 = c
'''
# TESTA_NO_SECTIONS and TESTB_NO_SECTIONS combined
TESTC_NO_SECTIONS = '''key1 = a
key2 = b
key3 = c
'''
TESTA_NO_DEFAULT_SECTION = '''key1 = a
key2 = b
[a]
key1 = not_a
[b]
key3 = not_c
'''
TESTB_NO_DEFAULT_SECTION = '''key3 = c
[b]
key2 = not_b
key3 = override
'''
# TESTA_NO_DEFAULT_SECTION and TESTB_NO_DEFAULT_SECTION combined
TESTC_NO_DEFAULT_SECTION = '''key1 = a
key2 = b
key3 = c
[a]
key1 = not_a
[b]
key3 = override
key2 = not_b
'''
# TESTC_NO_WHITESPACE is TESTA + TESTB without whitespace around equal signs
TESTC_NO_WHITESPACE = '''[DEFAULT]
key1=b
c
key2=v3
v4
v5
key3=v3
key3=v4
key4=v4
key4=
[b]
b_key1=1
b_key2=2
[c]
c_key1=
c_key2=1 2 3
4 5 6
'''
class OverrideConfigParserTest(base.BaseTestCase):
def test_read_write(self):
for ini in [TESTA,
TESTB,
TESTC,
TESTA_NO_SECTIONS,
TESTB_NO_SECTIONS,
TESTC_NO_SECTIONS,
TESTA_NO_DEFAULT_SECTION,
TESTB_NO_DEFAULT_SECTION,
TESTC_NO_DEFAULT_SECTION]:
parser = merge_configs.OverrideConfigParser()
parser.parse(StringIO(ini))
output = StringIO()
parser.write(output)
self.assertEqual(ini, output.getvalue())
output.close()
def test_merge(self):
parser = merge_configs.OverrideConfigParser()
parser.parse(StringIO(TESTA))
parser.parse(StringIO(TESTB))
output = StringIO()
parser.write(output)
self.assertEqual(TESTC, output.getvalue())
output.close()
def test_merge_no_sections(self):
parser = merge_configs.OverrideConfigParser()
parser.parse(StringIO(TESTA_NO_SECTIONS))
parser.parse(StringIO(TESTB_NO_SECTIONS))
output = StringIO()
parser.write(output)
self.assertEqual(TESTC_NO_SECTIONS, output.getvalue())
output.close()
def test_merge_no_default_section(self):
parser = merge_configs.OverrideConfigParser()
parser.parse(StringIO(TESTA_NO_DEFAULT_SECTION))
parser.parse(StringIO(TESTB_NO_DEFAULT_SECTION))
output = StringIO()
parser.write(output)
self.assertEqual(TESTC_NO_DEFAULT_SECTION, output.getvalue())
output.close()
def test_merge_no_whitespace(self):
parser = merge_configs.OverrideConfigParser(whitespace=False)
parser.parse(StringIO(TESTA))
parser.parse(StringIO(TESTB))
output = StringIO()
parser.write(output)
self.assertEqual(TESTC_NO_WHITESPACE, output.getvalue())
output.close()
|
the-stack_0_11228 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette.by import By
from gaiatest import GaiaTestCase
from gaiatest.apps.gallery.app import Gallery
class TestGalleryCropPhoto(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
# add photo to storage
self.push_resource('IMG_0001.jpg')
def test_gallery_crop_photo(self):
gallery = Gallery(self.marionette)
gallery.launch()
gallery.wait_for_files_to_load(1)
initial_image_size = gallery.thumbnails[0].absolute_image_size
image = gallery.tap_first_gallery_item()
# Tap on Edit button.
edit_image = image.tap_edit_button()
edit_image.tap_edit_crop_button()
# portrait crop is 2:3 and will retain the image's height
edit_image.tap_portrait_crop()
gallery = edit_image.tap_edit_save_button()
gallery.wait_for_files_to_load(2)
# get the absolute image for the new first image
cropped_image_size = gallery.thumbnails[0].absolute_image_size
# As we have chosen portrait crop, height will remain the same, width should change
self.assertEqual(cropped_image_size['height'], initial_image_size['height'])
self.assertLess(cropped_image_size['width'], initial_image_size['width'])
|
the-stack_0_11229 | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="scatter3d.line.colorbar.title.font",
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
the-stack_0_11230 | # EXAMPLES needs to add parent directory to path so we can import EZ:
import os, sys
sys.path.append(os.path.dirname(__file__) + '/..')
# Disable the creation of python bytecode to keep stuff clean:
sys.dont_write_bytecode = True
from scripts.EZpanda.EZ import EZ, config
config['window-title'] = "EZpanda Examples"
# Setting in custom build of Panda3D, hopefully will be added in master:
config['bullet-split-impulse'] = True
# Load ez with config (ez is added to builtins so is global to all modules):
ez = EZ(config)
# Get the primary display mode aa- w5idth, height, refresh rate:
w, h, r = ez.window.get_display_mode(0)
# lets set a custom window size
w, h = 1024, 768
ez.window.set_display( w, h, r)
ez.window.fullscreen = False
# Enable everything:
ez.enable.gamepads()
ez.enable.particles()
ez.enable.collision()
ez.enable.physics()
# Load a scene and set it:
ez['menu'] = ez.load.scene('menu')
ez.set_scene( ez['menu'] )
ez.run() |
the-stack_0_11232 | import bisect
from collections import defaultdict
class Solution:
def dailyTemperatures(self, temperatures):
"""
:type temperatures: List[int]
:rtype: List[int]
"""
n = len(temperatures)
if n == 0:
return []
elif n == 1:
return [0]
# get the y axis of temperatures figure
dict_temperatures = defaultdict(list) # {temperature:[index1, index2, ...]}
for i in range(0, len(temperatures)):
dict_temperatures[temperatures[i]].append(i)
# ordering occurred temperatures
ordered_temperatures = sorted(dict_temperatures.keys())
# do computation
wait_days = []
for i in range(0, n-1):
current_temp = temperatures[i]
current_temp_idx = ordered_temperatures.index(current_temp)
if current_temp_idx == len(ordered_temperatures)-1: # no more higher temperature
wait_days.append(0)
else:
# get idx of nearest higher temperature
nearest_higher_idx = n # default idx > size of temperatures dataset
for higher_temp in ordered_temperatures[current_temp_idx+1:]:
for x in dict_temperatures[higher_temp]:
if x > i and nearest_higher_idx > x:
nearest_higher_idx = x
break
# find idx of the nearest higher temperature
if nearest_higher_idx == n:
wait_days.append(0)
else: # sort for the smallest idx
wait_days.append(nearest_higher_idx-i)
# the last one must be 0
wait_days.append(0)
return wait_days
|
the-stack_0_11233 | from contextlib import closing
from mysql.connector import connect
import random
def create_journal_group_name_lookup(filepath, encoding, delimiter):
data = load_delimited_data(filepath, encoding, delimiter)
lookup = {}
for row in data:
nlm_id = row[0]
group = row[1]
lookup[nlm_id] = group
return lookup
def create_id_lookup(db_config, sql):
lookup = {}
with closing(connect(**db_config)) as conn:
with closing(conn.cursor()) as cursor: #pylint: disable=E1101
cursor.execute(sql) #pylint: disable=E1101
for row in cursor.fetchall(): #pylint: disable=E1101
id, ui = row
lookup[ui] = id
return lookup
def load_delimited_data(path, encoding, delimiter):
with open(path, 'rt', encoding=encoding) as file:
data = tuple( tuple(data_item.strip() for data_item in line.strip().split(delimiter)) for line in file )
return data
def load_ids_from_file(path, encoding):
ids = [int(id[0]) for id in load_delimited_data(path, encoding, ',')]
return ids
def load_indexing_periods(filepath, encoding, is_fully_indexed):
periods = {}
with open(filepath, 'rt', encoding=encoding) as file:
for line in file:
split = line.split(',')
nlm_id = split[0].strip()
citation_subset = split[1].strip()
start_year = int(split[2].strip())
end_year = int(split[3].strip())
if start_year < 0:
continue
if end_year < 0:
end_year = None
period = { 'citation_subset': citation_subset, 'is_fully_indexed': is_fully_indexed, 'start_year': start_year, 'end_year': end_year }
if nlm_id in periods:
periods[nlm_id].append(period)
else:
periods[nlm_id] = [period]
return periods
def random_permutation(iterable, r=None):
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(random.sample(pool, r))
def save_delimited_data(path, encoding, delimiter, data):
with open(path, 'wt', encoding=encoding) as file:
for data_row in data:
line = delimiter.join([str(data_item) for data_item in data_row]) + '\n'
file.write(line)
def should_review_coverage_note(coverage_note_text):
coverage_note_text_lower = coverage_note_text.lower()
should_review = str('sel' in coverage_note_text_lower or 'ful' in coverage_note_text_lower)
return should_review
def write_ids_to_file(path, encoding, ids):
save_delimited_data(path, encoding, ',', [(id,) for id in ids]) |
the-stack_0_11235 | import asyncio
import functools
import inspect
import typing
from urllib.parse import urlencode
from starlette.exceptions import HTTPException
from starlette.requests import HTTPConnection, Request
from starlette.responses import RedirectResponse, Response
from starlette.websockets import WebSocket
def has_required_scope(conn: HTTPConnection, scopes: typing.Sequence[str]) -> bool:
for scope in scopes:
if scope not in conn.auth.scopes:
return False
return True
def requires(
scopes: typing.Union[str, typing.Sequence[str]],
status_code: int = 403,
redirect: str = None,
) -> typing.Callable:
scopes_list = [scopes] if isinstance(scopes, str) else list(scopes)
def decorator(func: typing.Callable) -> typing.Callable:
sig = inspect.signature(func)
for idx, parameter in enumerate(sig.parameters.values()):
if parameter.name == "request" or parameter.name == "websocket":
type_ = parameter.name
break
else:
raise Exception(
f'No "request" or "websocket" argument on function "{func}"'
)
if type_ == "websocket":
# Handle websocket functions. (Always async)
@functools.wraps(func)
async def websocket_wrapper(
*args: typing.Any, **kwargs: typing.Any
) -> None:
websocket = kwargs.get(
"websocket", args[idx] if idx < len(args) else None
)
assert isinstance(websocket, WebSocket)
if not has_required_scope(websocket, scopes_list):
await websocket.close()
else:
await func(*args, **kwargs)
return websocket_wrapper
elif asyncio.iscoroutinefunction(func):
# Handle async request/response functions.
@functools.wraps(func)
async def async_wrapper(
*args: typing.Any, **kwargs: typing.Any
) -> Response:
request = kwargs.get("request", args[idx] if idx < len(args) else None)
assert isinstance(request, Request)
if not has_required_scope(request, scopes_list):
if redirect is not None:
orig_request_qparam = urlencode({"next": str(request.url)})
next_url = "{redirect_path}?{orig_request}".format(
redirect_path=request.url_for(redirect),
orig_request=orig_request_qparam,
)
return RedirectResponse(url=next_url, status_code=303)
raise HTTPException(status_code=status_code)
return await func(*args, **kwargs)
return async_wrapper
else:
# Handle sync request/response functions.
@functools.wraps(func)
def sync_wrapper(*args: typing.Any, **kwargs: typing.Any) -> Response:
request = kwargs.get("request", args[idx] if idx < len(args) else None)
assert isinstance(request, Request)
if not has_required_scope(request, scopes_list):
if redirect is not None:
orig_request_qparam = urlencode({"next": str(request.url)})
next_url = "{redirect_path}?{orig_request}".format(
redirect_path=request.url_for(redirect),
orig_request=orig_request_qparam,
)
return RedirectResponse(url=next_url, status_code=303)
raise HTTPException(status_code=status_code)
return func(*args, **kwargs)
return sync_wrapper
return decorator
class AuthenticationError(Exception):
pass
class AuthenticationBackend:
async def authenticate(
self, conn: HTTPConnection
) -> typing.Optional[typing.Tuple["AuthCredentials", "BaseUser"]]:
raise NotImplementedError() # pragma: no cover
class AuthCredentials:
def __init__(self, scopes: typing.Sequence[str] = None):
self.scopes = [] if scopes is None else list(scopes)
class BaseUser:
@property
def is_authenticated(self) -> bool:
raise NotImplementedError() # pragma: no cover
@property
def display_name(self) -> str:
raise NotImplementedError() # pragma: no cover
@property
def identity(self) -> str:
raise NotImplementedError() # pragma: no cover
class SimpleUser(BaseUser):
def __init__(self, username: str) -> None:
self.username = username
@property
def is_authenticated(self) -> bool:
return True
@property
def display_name(self) -> str:
return self.username
class UnauthenticatedUser(BaseUser):
@property
def is_authenticated(self) -> bool:
return False
@property
def display_name(self) -> str:
return ""
|
the-stack_0_11236 | from expenses_tracker.expenses.models import Expense
from expenses_tracker.profiles.models import Profile
def get_profile():
profile = Profile.objects.first()
if profile:
expenses = Expense.objects.all()
profile.budget_left = profile.budget - sum(e.price for e in expenses)
return profile
|
the-stack_0_11238 | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe
from recipe.serializers import RecipeSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def sample_recipe(user, **params):
"""create and return a sample recipe"""
defaults = {
'title': 'Sample Recipe',
'time_minutes': 10,
'price': 5.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
"""test authenticated recipe API"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""test that authentication is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTest(TestCase):
"""test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""test retrieving a list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""test retrieving recipes for user"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'pass123123'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
|
the-stack_0_11239 | #! /usr/bin/env python
#
# 1440 files took about 38 mins
#
from __future__ import print_function
from tkinter import filedialog
from astride import Streak
import glob
import sys
import shutil
import os
import tkinter as tk
import matplotlib.pyplot as plt
from astropy.io import fits
import numpy as np
def get_arg(argv):
if len(argv) == 1:
return get_int_arg(argv)
else:
return get_cmd_arg(argv)
def mk_diff(f0,f1,diff):
hdu0 = fits.open(f0)
hdu1 = fits.open(f1)
h1 = hdu1[0].header
d0 = hdu0[0].data
d1 = hdu1[0].data
if v:
print("DEBUG mean/std: %s %s %s %g %g" % (f0,f1,diff,d0.mean(),d0.std()))
d2 = d1-d0
fits.writeto(diff,d2,h1,overwrite=True)
def get_cmd_arg(argv,shape=.14,area=120,contour=12):
import argparse as ap
parser = ap.ArgumentParser()
parser.add_argument('-i','--filein', nargs=1,help = 'Directory to fits directory')
parser.add_argument('-o','--fileout', nargs=1,help = 'Directory to detection folder')
parser.add_argument('-s','--shape', nargs=1,help = 'Shape factor')
parser.add_argument('-a','--area', nargs=1,help = 'Minimum area to be considered a streak')
parser.add_argument('-c','--contour',nargs=1,help = 'blah Control value')
args=vars(parser.parse_args())
if args['filein'] != None: file_pathin = (args['filein'][0])
if args['fileout'] != None: file_pathout = (args['fileout'][0])
if args['shape'] != None: shape = float(args['shape'][0])
if args['area'] != None: area = float(args['area'][0])
if args['contour'] != None: contour = float(args['contour'][0])
return (file_pathin,file_pathout,shape,area,contour)
def get_int_arg(argv):
#Creates folder input browsers
winin = tk.Tk()
winin.withdraw()
winin.attributes('-topmost', True)
file_pathin = filedialog.askdirectory(title = "Select input")
#Creates folder output browsers
winout = tk.Tk()
winout.withdraw()
winout.attributes('-topmost', True)
file_pathout = filedialog.askdirectory(title = "Select output")
winout.destroy()
winin.destroy()
#ask user for remaining arguments
print("\nClicking enter will apply default values, entering a value will change it.")
nshape = input("Shape value (1=circle, .1=thin oval) (default = 0.14): ")
if nshape == "":
shape = .14
else:
shape = float(nshape)
narea = input("Minimum area (default = 120): ")
if narea == "":
area = 120
else:
area = float(narea)
ncontour = input("Contour value (higher=only brighter streaks detected)(default = 12): ")
if ncontour == "":
contour = 12
else:
contour = float(ncontour)
ndiff = input("Create difference images (default = False): ")
if ndiff == "":
diff = False
else:
diff = ndiff.lower() == 'true'
nv = input("Enable verbose mode (default = False): ")
if nv == "":
v = False
else:
v = nv.lower() == 'true'
return(file_pathin,file_pathout,shape,area,contour,diff,v)
def do_dir(d,dsum,shape,area,contour,diff,v):
"""
process a directory 'd'
"""
#print("Outputting in directory: " + dsum)
if dsum == None:
dsum = d
else:
if not os.path.exists(dsum):
os.mkdir(dsum)
num = 0
detected = 0
fileCount = 0
zero = 0
# debug/verbose
if v:
print('DEBUG: shape=%g area=%g contour=%g' % (shape,area,contour))
ffs = glob.glob(d+'/*.FIT') + glob.glob(d+'/*.fit') + \
glob.glob(d+'/*.FTS') + glob.glob(d+'/*.fts') + \
glob.glob(d+'/*.FITS') + glob.glob(d+'/*.fits')
ffs = list(set(ffs)) # needed for dos
ffs.sort() # on linux wasn't sorted, on dos it was
f = open(dsum+'/summary.txt','w') # Creates summary text file
f.write('Streaks found in files: \n') #Creates first line for summary file
print('Processing %d files' % len(ffs))
for ff in ffs:
# creates directory one directory back from the folder which contains fits files
num = do_one(ff,dsum+'/'+ff[ff.rfind(os.sep)+1:ff.rfind('.')],shape,area,contour)
if num == 0:
zero += 1
else:
detected += int(num) #Counter of how many streaks detected
f.write(ff + '\n')
fileCount += 1 #Counter for how many files analyzed
# Produce and write summary file
f.write('\n' 'Files analyzed: ' + str(fileCount)+ '\n' )
f.write('Streaks detected: ' + str(detected) + '\n' )
f.write('Files with no detections: ' + str(zero) + '\n\n\n')
if diff:
num = 0
detected = 0
fileCount = 0
zero = 0
dfs = []
print('Computing %d differences' % (len(ffs)-1))
for i in range(len(ffs)-1):
dfs.append(ffs[i+1]+'.diff')
mk_diff(ffs[i],ffs[i+1],dfs[i])
print('Processing %d files' % (len(ffs)-1))
for df in dfs:
num = do_one(df,dsum+'/'+df[df.rfind(os.sep)+1:df.find('.')]+'DIFF',shape,area,contour)
if num == 0:
zero += 1
else:
detected += int(num) #Counter of how many streaks detected
f.write(df + '\n')
fileCount += 1 #Counter for how many files analyzed
# Produce and write summary file
f.write('\n' 'Files analyzed: ' + str(fileCount)+ '\n' )
f.write('Streaks detected: ' + str(detected) + '\n' )
f.write('Files with no detections: ' + str(zero) + '\n')
f.close()
else:
f.close()
def do_one(ff,output_path=None,shape=None,area=None,contour=None):
"""
process a directory one fits-file (ff)
"""
# Read a fits image and create a Streak instance.
streak = Streak(ff,output_path=output_path)
# Detect streaks.
# streak.shape_cut = .14
# streak.area_cut = 120
# streak.contour_threshold = 12
#Customization of values
streak.shape_cut = shape
streak.area_cut = area
streak.contour_threshold = contour
streak.detect()
# Write outputs and plot figures.
streak.write_outputs()
streak.plot_figures()
streakfile=output_path+"/streaks.txt"
fp=open(streakfile)
lines=fp.readlines()
fp.close()
#print("streaks found %d" % (len(lines)-1))
#print("%d " % (len(lines)-1))
n = len(lines)-1
if n == 0:
sys.stdout.write('.')
elif n < 10:
sys.stdout.write('%d' % n)
else:
sys.stdout.write('*')
sys.stdout.flush()
#Delete/move files
#if n == 0:
# shutil.rmtree(output_path)
return int(n)
#def do_one(ff,output_path=None,shape=None,area=None,contour=None): BACKUP
"""
process a directory one fits-file (ff)
"""
# Read a fits image and create a Streak instance.
streak = Streak(ff,output_path=output_path)
# Detect streaks.
# streak.shape_cut = .14
# streak.area_cut = 120
# streak.contour_threshold = 12
#Customization of values
streak.shape_cut = shape
streak.area_cut = area
streak.contour_threshold = contour
streak.detect()
# Write outputs and plot figures.
streak.write_outputs()
streak.plot_figures()
streakfile=output_path+"/streaks.txt"
fp=open(streakfile)
lines=fp.readlines()
fp.close()
#print("streaks found %d" % (len(lines)-1))
#print("%d " % (len(lines)-1))
n = len(lines)-1
if n == 0:
sys.stdout.write('.')
elif n < 10:
sys.stdout.write('%d' % n)
else:
sys.stdout.write('*')
sys.stdout.flush()
#Delete/move files
if n == 0:
shutil.rmtree(output_path)
return int(n)
#do_one('20151108_MD01_raw/IMG00681.FIT')
#do_dir('20151108_MD01_raw')
if __name__ == '__main__':
(file_pathin,file_pathout,shape,area,contour,diff,v) = get_arg(sys.argv)
#Prints selected folders
print("Running in data directory %s" % file_pathin)
print("Outputting in data directory %s" % file_pathout)
do_dir(file_pathin,file_pathout,shape,area,contour,diff,v)
#print("Running in data directory %s" % sys.argv[1])
#do_dir(sys.argv[1],sys.argv[2])
|
the-stack_0_11240 | import json
import logging
from django.utils.functional import wraps
from morango.sync.context import LocalSessionContext
from kolibri.core.auth.constants.morango_sync import ScopeDefinitions
from kolibri.core.auth.hooks import FacilityDataSyncHook
logger = logging.getLogger(__name__)
def _get_our_cert(context):
ss = context.sync_session
return ss.server_certificate if ss.is_server else ss.client_certificate
def _get_their_cert(context):
ss = context.sync_session
return ss.client_certificate if ss.is_server else ss.server_certificate
def this_side_using_single_user_cert(context):
return _get_our_cert(context).scope_definition_id == ScopeDefinitions.SINGLE_USER
def other_side_using_single_user_cert(context):
return _get_their_cert(context).scope_definition_id == ScopeDefinitions.SINGLE_USER
def get_user_id_for_single_user_sync(context):
if other_side_using_single_user_cert(context):
cert = _get_their_cert(context)
elif this_side_using_single_user_cert(context):
cert = _get_our_cert(context)
else:
return None
return json.loads(cert.scope_params)["user_id"]
def get_other_side_kolibri_version(context):
"""
:type context: morango.sync.context.LocalSessionContext
:return: A str or None
"""
# get the instance info for the other instance
instance_info = context.sync_session.server_instance_data
if context.is_server:
instance_info = context.sync_session.client_instance_data
# get the kolibri version, which is defined in
# kolibri.core.auth.constants.morango_sync:CUSTOM_INSTANCE_INFO
return instance_info.get("kolibri")
def _extract_kwargs_from_context(context):
return {
"dataset_id": _get_our_cert(context).get_root().id,
"local_is_single_user": this_side_using_single_user_cert(context),
"remote_is_single_user": other_side_using_single_user_cert(context),
"single_user_id": get_user_id_for_single_user_sync(context),
"context": context,
}
def _local_event_handler(func):
@wraps(func)
def wrapper(context):
if isinstance(context, LocalSessionContext):
kwargs = _extract_kwargs_from_context(context)
return func(**kwargs)
return wrapper
@_local_event_handler
def _pre_transfer_handler(**kwargs):
for hook in FacilityDataSyncHook.registered_hooks:
# we catch all errors because as a rule of thumb, we don't want hooks to fail
try:
hook.pre_transfer(**kwargs)
except Exception as e:
logger.error(
"{}.pre_transfer hook failed".format(hook.__class__.__name__),
exc_info=e,
)
@_local_event_handler
def _post_transfer_handler(**kwargs):
for hook in FacilityDataSyncHook.registered_hooks:
# we catch all errors because as a rule of thumb, we don't want hooks to fail
try:
hook.post_transfer(**kwargs)
except Exception as e:
logger.error(
"{}.post_transfer hook failed".format(hook.__class__.__name__),
exc_info=e,
)
def register_sync_event_handlers(session_controller):
session_controller.signals.initializing.completed.connect(_pre_transfer_handler)
session_controller.signals.cleanup.completed.connect(_post_transfer_handler)
|
the-stack_0_11244 | import warnings
from . import DealFormat
from .. import dto
class BRIFormat(DealFormat):
number_warning = '.bri file format assumes consequent deal numbers from 1'
@property
def suffix(self):
return '.bri'
def parse_content(self, content):
warnings.warn(self.number_warning)
dealset = []
number = 1
while True:
deal_str = content.read(128).strip()
if len(deal_str) > 0:
if len(deal_str) < 78:
warning.warn('truncated .bri input: %s' % (deal_str))
break
else:
deal_obj = dto.Deal()
deal_obj.number = number
deal_obj.dealer = deal_obj.get_dealer(number)
deal_obj.vulnerable = deal_obj.get_vulnerability(number)
deal_obj.hands = self.parse_hands(deal_str)
dealset.append(deal_obj)
number += 1
else:
break
return dealset
def parse_hands(self, deal_str):
deal_obj = dto.Deal()
try:
deal = [int(deal_str[i*2:(i+1)*2], 10) for i in range(0, 39)]
if max(deal) > 52:
raise RuntimeError(
'invalid card in .bri file: %d' % (max(deal)))
for hand in range(0, 3):
for card in deal[13*hand:13*(hand+1)]:
card = card - 1
suit = card / 13
card = card % 13
deal_obj.hands[hand][suit].append(self.cards[card])
deal_obj.fill_west()
except ValueError:
raise RuntimeError('invalid card in .bri file: %s' % (deal_str))
return deal_obj.hands
def output_content(self, out_file, dealset):
warnings.warn(self.number_warning)
for deal in dealset:
deal_str = self.single_deal_output(deal)
deal_str += ' ' * 32
deal_str += chr(0) * 18
out_file.write(deal_str)
def single_deal_output(self, deal):
deal_str = ''
for hand in deal.hands[0:3]:
for i, suit in enumerate(hand):
for card in suit:
try:
deal_str += '%02d' % (self.cards.index(card) + 13*i + 1)
except ValueError:
raise RuntimeError(
'invalid card character: %s in board %d' % (card, deal.number))
return deal_str
|
the-stack_0_11245 | import time
import cache
import vkapi
from log import datetime_format
def main(a, args):
dialogs = a.messages.getDialogs(unread=1)['items']
messages = {}
users = []
chats = []
for msg in dialogs:
def cb(req, resp):
messages[req['peer_id']] = resp['items'][::-1]
a.messages.getHistory.delayed(peer_id=vkapi.utils.getSender(msg['message']), count=min(msg['unread'], 10)).callback(cb)
if 'chat_id' in msg['message']:
chats.append(msg['message']['chat_id'])
else:
users.append(msg['message']['user_id'])
uc = cache.UserCache(a, 'online')
cc = cache.ConfCache(a)
uc.load(users)
cc.load(chats)
a.sync()
if dialogs:
print('-------------------------\n')
else:
print('Nothing here')
for msg in dialogs:
m = msg['message']
if 'chat_id' in m:
print('Chat "{}" ({}): {}'.format(cc[m['chat_id']]['title'], m['chat_id'], msg['unread']))
else:
print('{} {} ({}){}: {}'.format(uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'], m['user_id'],
', online' if uc[m['user_id']]['online'] else '', msg['unread']))
print()
for i in messages[vkapi.utils.getSender(msg['message'])]:
print('[{}] {}'.format(time.strftime(datetime_format, time.localtime(i['date'])), i['body']))
print()
print('-------------------------\n')
if args:
print(flush=True)
mr = vkapi.MessageReceiver(a)
while True:
time.sleep(1)
for m in mr.getMessages():
if 'chat_id' in m:
print('Chat "{}" ({}), {} {}:'.format(cc[m['chat_id']]['title'], m['chat_id'],
uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name']))
else:
print('{} {} ({}):'.format(uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'], m['user_id']))
print('[{}] {}'.format(time.strftime(datetime_format, time.localtime(m['date'])), m['body']))
print(flush=True)
|
the-stack_0_11246 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import nevergrad.common.typing as tp
# import numpy as np
from nevergrad.parametrization import parameter as p
from nevergrad.optimization.utils import UidQueue
from . import base
from .multiobjective import nsga2 as nsga2
class _EvolutionStrategy(base.Optimizer):
"""Experimental evolution-strategy-like algorithm
The behavior is going to evolve
"""
def __init__(
self,
parametrization: base.IntOrParameter,
budget: tp.Optional[int] = None,
num_workers: int = 1,
*,
config: tp.Optional["EvolutionStrategy"] = None,
) -> None:
if budget is not None and budget < 60:
warnings.warn(
"ES algorithms are inefficient with budget < 60", base.errors.InefficientSettingsWarning
)
super().__init__(parametrization, budget=budget, num_workers=num_workers)
self._population: tp.Dict[str, p.Parameter] = {}
self._uid_queue = UidQueue()
self._waiting: tp.List[p.Parameter] = []
# configuration
self._config = EvolutionStrategy() if config is None else config
self._rank_method: tp.Any = None # TODO better typing (eventually)
if self._config.ranker == "nsga2":
self._rank_method = nsga2.rank
elif self._config.ranker != "simple":
raise NotImplementedError(f"Unknown ranker {self._config.ranker}")
self._no_hypervolume = self._config.offsprings is None
def _internal_ask_candidate(self) -> p.Parameter:
if self.num_ask < self._config.popsize:
param = self.parametrization.sample()
assert param.uid == param.heritage["lineage"] # this is an assumption used below
self._uid_queue.asked.add(param.uid)
self._population[param.uid] = param
return param
uid = self._uid_queue.ask()
param = self._population[uid].spawn_child()
param.mutate()
ratio = self._config.recombination_ratio
if ratio and self._rng.rand() < ratio:
selected = self._rng.choice(list(self._population))
param.recombine(self._population[selected])
return param
def _internal_tell_candidate(self, candidate: p.Parameter, loss: float) -> None:
if self._config.offsprings is None:
uid = candidate.heritage["lineage"]
self._uid_queue.tell(uid)
parent_value = float("inf") if uid not in self._population else base._loss(self._population[uid])
if loss < parent_value:
self._population[uid] = candidate
else:
no_parent = next(iter(candidate.parents_uids), "#no_parent#") not in self._population
if no_parent and len(self._population) < self._config.popsize:
self._population[candidate.uid] = candidate
self._uid_queue.tell(candidate.uid)
else:
self._waiting.append(candidate)
if len(self._waiting) >= self._config.offsprings:
self._select()
def _select(self) -> None:
choices = self._waiting + ([] if self._config.only_offsprings else list(self._population.values()))
if self._rank_method is not None and self.num_objectives > 1:
choices_rank = self._rank_method(choices, n_selected=self._config.popsize)
choices = [x for x in choices if x.uid in choices_rank]
else:
choices.sort(key=base._loss)
self._population = {x.uid: x for x in choices[: self._config.popsize]}
self._uid_queue.clear()
self._waiting.clear()
for uid in self._population:
self._uid_queue.tell(uid)
class EvolutionStrategy(base.ConfiguredOptimizer):
"""Experimental evolution-strategy-like algorithm
The API is going to evolve
Parameters
----------
recombination_ratio: float
probability of using a recombination (after the mutation) for generating new offsprings
popsize: int
population size of the parents (lambda)
offsprings: int
number of generated offsprings (mu)
only_offsprings: bool
use only offsprings for the new generation if True (True: lambda,mu, False: lambda+mu)
ranker: str
ranker for the multiobjective case (defaults to NSGA2)
"""
# pylint: disable=unused-argument
def __init__(
self,
*,
recombination_ratio: float = 0,
popsize: int = 40,
offsprings: tp.Optional[int] = None,
only_offsprings: bool = False,
ranker: str = "nsga2",
) -> None:
super().__init__(_EvolutionStrategy, locals(), as_config=True)
assert offsprings is None or not only_offsprings or offsprings > popsize
if only_offsprings:
assert offsprings is not None, "only_offsprings only work if offsprings is not None (non-DE mode)"
assert 0 <= recombination_ratio <= 1
assert ranker in ["simple", "nsga2"]
self.recombination_ratio = recombination_ratio
self.popsize = popsize
self.offsprings = offsprings
self.only_offsprings = only_offsprings
self.ranker = ranker
RecES = EvolutionStrategy(recombination_ratio=1, only_offsprings=True, offsprings=60).set_name(
"RecES", register=True
)
RecMixES = EvolutionStrategy(recombination_ratio=1, only_offsprings=False, offsprings=20).set_name(
"RecMixES", register=True
)
RecMutDE = EvolutionStrategy(recombination_ratio=1, only_offsprings=False, offsprings=None).set_name(
"RecMutDE", register=True
)
ES = EvolutionStrategy(recombination_ratio=0, only_offsprings=True, offsprings=60).set_name(
"ES", register=True
)
MixES = EvolutionStrategy(recombination_ratio=0, only_offsprings=False, offsprings=20).set_name(
"MixES", register=True
)
MutDE = EvolutionStrategy(recombination_ratio=0, only_offsprings=False, offsprings=None).set_name(
"MutDE", register=True
)
NonNSGAIIES = EvolutionStrategy(
recombination_ratio=0, only_offsprings=True, offsprings=60, ranker="simple"
).set_name("NonNSGAIIES", register=True)
|
the-stack_0_11247 | from math import *
import random
### ------------------------------------- ###
# Below, is the robot class
#
# This robot lives in 2D, x-y space, and its motion is
# pointed in a random direction, initially.
# It moves in a straight line until it comes close to a wall
# at which point it stops.
#
# For measurements, it senses the x- and y-distance
# to landmarks. This is different from range and bearing as
# commonly studied in the literature, but this makes it much
# easier to implement the essentials of SLAM without
# cluttered math.
#
class robot:
# --------
# init:
# creates a robot with the specified parameters and initializes
# the location (self.x, self.y) to the center of the world
#
def __init__(self, world_size = 100.0, measurement_range = 30.0,
motion_noise = 1.0, measurement_noise = 1.0):
self.measurement_noise = 0.0
self.world_size = world_size
self.measurement_range = measurement_range
self.x = world_size / 2.0
self.y = world_size / 2.0
self.motion_noise = motion_noise
self.measurement_noise = measurement_noise
self.landmarks = []
self.num_landmarks = 0
# returns a positive, random float
def rand(self):
return random.random() * 2.0 - 1.0
# --------
# move: attempts to move robot by dx, dy. If outside world
# boundary, then the move does nothing and instead returns failure
#
def move(self, dx, dy):
x = self.x + dx + self.rand() * self.motion_noise
y = self.y + dy + self.rand() * self.motion_noise
if x < 0.0 or x > self.world_size or y < 0.0 or y > self.world_size:
return False
else:
self.x = x
self.y = y
return True
# --------
# sense: returns x- and y- distances to landmarks within visibility range
# because not all landmarks may be in this range, the list of measurements
# is of variable length. Set measurement_range to -1 if you want all
# landmarks to be visible at all times
#
## TODO: paste your complete the sense function, here
## make sure the indentation of the code is correct
def sense(self):
''' This function does not take in any parameters, instead it references internal variables
(such as self.landamrks) to measure the distance between the robot and any landmarks
that the robot can see (that are within its measurement range).
This function returns a list of landmark indices, and the measured distances (dx, dy)
between the robot's position and said landmarks.
This function should account for measurement_noise and measurement_range.
One item in the returned list should be in the form: [landmark_index, dx, dy].
'''
measurements = []
#import pdb; pdb.set_trace()
# iterate through all of the landmarks in a world
for i, landmark in enumerate(self.landmarks):
# For each landmark
# 1. compute dx and dy, the distances between the robot and the landmark
# 2. account for measurement noise by *adding* a noise component to dx and dy
# - The noise component should be a random value between [-1.0, 1.0)*measurement_noise
# - Feel free to use the function self.rand() to help calculate this noise component
# - It may help to reference the `move` function for noise calculation
# 3. If either of the distances, dx or dy, fall outside of the internal var, measurement_range
# then we cannot record them; if they do fall in the range, then add them to the measurements list
# as list.append([index, dx, dy]), this format is important for data creation done later
dx = fabs(self.x - landmark[0]) + self.rand() * self.measurement_noise
dy = fabs(self.y - landmark[1]) + self.rand() * self.measurement_noise
if dx < self.measurement_range and dy < self.measurement_range:
measurements.append([i, dx, dy])
# return the final, complete list of measurements
return measurements
# --------
# make_landmarks:
# make random landmarks located in the world
#
def make_landmarks(self, num_landmarks):
self.landmarks = []
for i in range(num_landmarks):
self.landmarks.append([round(random.random() * self.world_size),
round(random.random() * self.world_size)])
self.num_landmarks = num_landmarks
# called when print(robot) is called; prints the robot's location
def __repr__(self):
return 'Robot: [x=%.5f y=%.5f]' % (self.x, self.y)
####### END robot class ####### |
the-stack_0_11248 |
import unittest
import random
import threading
import System
from System.IO import Directory
from System.IO import Path
from System.Collections.Generic import Dictionary
from System.Collections.Generic import SortedDictionary
from System.Collections.Generic import SortedList
import clr
clr.AddReferenceByPartialName('Esent.Collections')
from Microsoft.Isam.Esent.Collections.Generic import PersistentDictionary
def deleteDirectory(directory):
if Directory.Exists(directory):
Directory.Delete(directory, True)
class SingleDictionaryFixture(unittest.TestCase):
def setUp(self):
self._dataDirectory = 'SingleDictionaryFixture'
self._deleteDataDirectory()
self._dict = PersistentDictionary[System.String,System.String](self._dataDirectory)
def tearDown(self):
self._dict.Dispose()
self._deleteDataDirectory()
def _deleteDataDirectory(self):
deleteDirectory(self._dataDirectory)
def testInsertAndRetrieveRecord(self):
self._dict['key'] = 'value'
self.assertEqual(self._dict['key'], 'value')
def testLargeKey(self):
# esent may truncate the key, but we should be able to set all this data
key = 'K' * 1024*1024
self._dict[key] = 'value'
self.assertEqual(self._dict[key], 'value')
def testLargeValue(self):
value = 'V' * 1024*1024
self._dict['bigstuff'] = value
self.assertEqual(self._dict['bigstuff'], value)
def testNullKey(self):
self._dict[None] = 'value'
self.assertEqual(self._dict[None], 'value')
def testNullValue(self):
self._dict['key'] = None
self.assertEqual(self._dict['key'], None)
def testOverwriteRecord(self):
self._dict['key'] = 'value'
self._dict['key'] = 'newvalue'
self.assertEqual(self._dict['key'], 'newvalue')
def testContainsKeyReturnsFalseWhenKeyNotPresent(self):
self.assertEqual(False, self._dict.ContainsKey('key'))
def testContainsKeyReturnsTrueWhenKeyIsPresent(self):
self._dict['key'] = 'value'
self.assertEqual(True, self._dict.ContainsKey('key'))
def testRemoveRemovesKey(self):
self._dict['key'] = 'value'
self.assertEqual(True, self._dict.Remove('key'))
self.assertEqual(False, self._dict.ContainsKey('key'))
def testRemoveReturnsFalseWhenKeyNotPresent(self):
self.assertEqual(False, self._dict.Remove('key'))
def testCountIsZeroWhenDictionaryIsEmpty(self):
self.assertEqual(0, self._dict.Count)
def testCountIncreasesWithInsert(self):
self._dict['a'] = 'a'
self._dict['b'] = 'b'
self.assertEqual(2, self._dict.Count)
def testLenDecreasesWithDelete(self):
self._dict['a'] = 'a'
self._dict['b'] = 'b'
self._dict['c'] = 'c'
self._dict.Remove('b')
self.assertEqual(2, self._dict.Count)
def testClearOnEmptyDictionary(self):
self._dict.Clear()
self.assertEqual(0, self._dict.Count)
def testClearRemovesRecords(self):
self._dict['b'] = 'b'
self._dict['a'] = 'a'
self._dict.Clear()
self.assertEqual(0, self._dict.Count)
class DictionaryFixture(unittest.TestCase):
def setUp(self):
self._dataDirectory = 'DictionaryFixture'
self._deleteDataDirectory()
def tearDown(self):
self._deleteDataDirectory()
def _deleteDataDirectory(self):
deleteDirectory(self._dataDirectory)
def disposeCloseTwice(self):
dict = PersistentDictionary[System.Guid,System.Int64](self._dataDirectory)
dict.Dispose()
dict.Dispose()
def testMultipleDictionaries(self):
dict1 = PersistentDictionary[System.Int32,System.String](self._dataDirectory + '\\a')
dict2 = PersistentDictionary[System.String,System.Int32](self._dataDirectory + '\\b')
dict1[0] = 'hello'
dict2['world'] = 1
self.assertEqual('hello', dict1[0])
self.assertEqual(1, dict2['world'])
dict1.Dispose()
dict2.Dispose()
def testCloseAndReopenEmptyDictionary(self):
dict = PersistentDictionary[System.DateTime,System.UInt16](self._dataDirectory)
dict.Dispose()
dict = PersistentDictionary[System.DateTime,System.UInt16](self._dataDirectory)
self.assertEqual(0, dict.Count)
dict.Dispose()
class DictionaryComparisonFixture(unittest.TestCase):
def setUp(self):
self._dataDirectory = 'DictionaryComparisonFixture'
self._deleteDataDirectory()
self._createDictionary()
self._expected = Dictionary[System.String,System.String]()
def tearDown(self):
self._closeDictionary()
self._deleteDataDirectory()
def _createDictionary(self):
self._dict = PersistentDictionary[System.String,System.String](self._dataDirectory)
def _closeDictionary(self):
self._dict.Dispose()
def _deleteDataDirectory(self):
deleteDirectory(self._dataDirectory)
def _compareWithExpected(self):
self.assertEqual(self._expected.Count, self._dict.Count)
for k in self._expected.Keys:
self.assertEqual(self._expected[k], self._dict[k])
def _insert(self, k, v):
self._expected[k] = v
self._dict[k] = v
def _delete(self, k):
self._expected.Remove(k)
self._dict.Remove(k)
def _clear(self):
self._expected.Clear()
self._dict.Clear()
def testEmptyDb(self):
self._compareWithExpected()
def testClear(self):
for i in xrange(256):
self._insert(str(i), repr(i))
self._compareWithExpected()
self._clear()
self._compareWithExpected()
def testInserts(self):
self._insert('a', '1234')
self._insert('z', '0xF00D')
self._insert('mmmmmm', 'donuts')
self._insert('IronPython', 'rocks')
self._compareWithExpected()
def testReplaceDelete(self):
self._insert('0', '')
self._insert('1', '1111111111')
self._insert('2', '222222222')
self._insert('3', '33333333')
self._insert('4', '4444444')
self._insert('5', '555555')
self._insert('5', '555555')
self._insert('5', 'foo')
self._insert('2', 'bar')
self._delete('4')
self._compareWithExpected()
def testCloseAndOpen(self):
for i in xrange(16):
self._insert(str(i), '?' * i)
self._compareWithExpected()
self._closeDictionary()
self._createDictionary()
self._compareWithExpected()
def testKeyIsCaseInsensitive(self):
self._insert('aaa', 'foo')
self._insert('aAa', 'bar')
self._compareWithExpected()
def testKeyRespectsSpaces(self):
self._insert(' x', 'foo')
self._insert('x', 'bar')
self._insert('x ', 'baz')
self._compareWithExpected()
def testKeyRespectsSymbols(self):
self._insert('QQQ.', 'foo')
self._insert('QQQ', 'bar')
self._insert('-QQQ', 'baz')
self._compareWithExpected()
def testRandomOperations(self):
keys = 'abcdefghijklmompqrstuvwzyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for i in xrange(12000):
k = random.choice(keys) * random.randint(1,2)
if random.random() < 0.005:
self._closeDictionary()
self._createDictionary()
elif random.random() < 0.01:
self._clear()
elif random.random() < 0.20:
if k in self._expected:
self._delete(k)
else:
self._compareWithExpected()
else:
v = random.choice('XYZ#@$%*.') * random.randint(0,1024)
self._insert(k,v)
self._compareWithExpected()
class MultiThreadingFixture(unittest.TestCase):
def setUp(self):
self._dataDirectory = 'MultiThreadingFixture'
self._deleteDataDirectory()
self._dict = PersistentDictionary[System.String,System.String](self._dataDirectory)
def tearDown(self):
self._dict.Dispose()
self._deleteDataDirectory()
def _deleteDataDirectory(self):
deleteDirectory(self._dataDirectory)
def _insertRange(self, low, high):
for i in xrange(low, high):
self._dict[str(i)] = str(i)
def _deleteRange(self, low, high):
for i in xrange(low, high):
self._dict.Remove(str(i))
def _retrieveAllRecords(self, n):
"""Check that key=value for all records and there are n records"""
self.assertEqual(n, self._dict.Count)
for i in self._dict:
self.assertEqual(i.Key, i.Value)
def _randomOperations(self):
keys = 'abcdefghijklmompqrstuvwzyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ-+'
for i in xrange(10000):
k = random.choice(keys) * random.randint(1,8)
if random.random() < 0.10:
self._dict.Remove(k)
else:
v = '#' * random.randint(256,1024)
self._dict[k] = v
def testMultiThreadedInserts(self):
threads = [threading.Thread(target = self._insertRange, args = (x*1000, (x+1) * 1000)) for x in range(4)]
for t in threads:
t.start()
d = {}
for i in xrange(4000):
d[str(i)] = str(i)
for t in threads:
t.join()
self.assertEqual(len(d), self._dict.Count)
for k in d.keys():
self.assertEqual(d[k], self._dict[k])
def testMultiThreadedReplaces(self):
for i in xrange(4000):
self._dict[str(i)] = 'XXXX'
threads = [threading.Thread(target = self._insertRange, args = (x*1000, (x+1) * 1000)) for x in range(4)]
for t in threads:
t.start()
d = {}
for i in xrange(4000):
d[str(i)] = str(i)
for t in threads:
t.join()
self.assertEqual(len(d), self._dict.Count)
for k in d.keys():
self.assertEqual(d[k], self._dict[k])
def testMultiThreadedRetrieves(self):
n = 4000
for i in xrange(n):
self._dict[str(i)] = str(i)
threads = [threading.Thread(target = self._retrieveAllRecords, args = (n,))]
for t in threads:
t.start()
for t in threads:
t.join()
def testMultiThreadedDeletes(self):
for i in xrange(4000):
self._dict[str(i)] = str(i)
threads = [threading.Thread(target = self._deleteRange, args = (x*1000, (x+1) * 1000)) for x in range(4)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, self._dict.Count)
def testRandomMultiThreadedOperations(self):
threads = [threading.Thread(target = self._randomOperations) for x in range(8)]
for t in threads:
t.start()
self._dict.Clear() # try a concurrent clear
for t in threads:
t.join()
class GenericDictionaryFixtureBase(unittest.TestCase):
def _deleteDataDirectory(self):
deleteDirectory(self._dataDirectory)
def _add(self, expected, actual, k, v):
"""Add (k,v). This fails if k already exists."""
actual.Add(k,v)
expected.Add(k,v)
def _set(self, expected, actual, k, v):
"""Set k = v."""
actual[k] = v
expected[k] = v
def _remove(self, expected, actual, k):
self.assertEqual(True, actual.Remove(k))
self.assertEqual(True, expected.Remove(k))
def _clear(self, expected, actual):
actual.Clear()
expected.Clear()
def _checkKeyIsNotPresent(self, dict, k):
self.assertEqual(False, dict.Keys.Contains(k))
self.assertEqual(False, dict.ContainsKey(k))
self.assertEqual(False, dict.TryGetValue(k)[0])
self.assertEqual(False, dict.Remove(k))
def _checkDuplicateKeyError(self, dict, k, v):
self.assertRaises(System.ArgumentException, dict.Add, k, v)
def _compareDictionaries(self, expected, actual):
self.assertEqual(expected.Count, actual.Count)
self.assertEqual(expected.Keys.Count, actual.Keys.Count)
self.assertEqual(expected.Values.Count, actual.Values.Count)
for i in expected:
self.assertEqual(True, actual.Contains(i))
self.assertEqual(True, actual.ContainsKey(i.Key))
self.assertEqual(True, actual.ContainsValue(i.Value))
self.assertEqual(True, actual.Keys.Contains(i.Key))
self.assertEqual(True, actual.Values.Contains(i.Value))
(f,v) = actual.TryGetValue(i.Key)
self.assertEqual(True, f)
self.assertEqual(i.Value, v)
self.assertEqual(i.Value, actual[i.Key])
for i in actual:
self.assertEqual(True, expected.ContainsKey(i.Key))
for k in actual.Keys:
self.assertEqual(True, expected.ContainsKey(k))
for v in actual.Values:
self.assertEqual(True, expected.Values.Contains(v))
def _doTest(self, expected, actual, keys, values):
# Compare empty
self._compareDictionaries(expected, actual)
# Insert with Add()
for k in keys:
v = random.choice(values)
self._add(expected, actual, k, v)
self._compareDictionaries(expected, actual)
# Replace with []
# Make sure to try setting every value
k = random.choice(keys)
for v in values:
self._set(expected, actual, k, v)
self._compareDictionaries(expected, actual)
# Delete key, reinsert with []
k = random.choice(keys)
v = random.choice(values)
self._checkDuplicateKeyError(actual, k, v)
self._remove(expected, actual, k)
self._checkKeyIsNotPresent(actual, k)
self._compareDictionaries(expected, actual)
self._set(expected, actual, k, v)
self._compareDictionaries(expected, actual)
# for i in actual:
# print '%s => %.32s' % (i.Key, i.Value)
# Clear
self._clear(expected, actual)
self._compareDictionaries(expected, actual)
def createDictAndTest(self, tkey, tvalue):
dict = PersistentDictionary[tkey,tvalue](self._dataDirectory)
try:
expected = Dictionary[tkey,tvalue]()
self._doTest(expected, dict, data[tkey], data[tvalue])
finally:
dict.Dispose()
class GenericDictionaryFixture(GenericDictionaryFixtureBase):
def setUp(self):
self._dataDirectory = 'GenericDictionaryFixture'
self._deleteDataDirectory()
self._dict = None
def tearDown(self):
self._deleteDataDirectory()
def createDictAndTest(self, tkey, tvalue):
dict = PersistentDictionary[tkey,tvalue](self._dataDirectory)
try:
expected = Dictionary[tkey,tvalue]()
self._doTest(expected, dict, data[tkey], data[tvalue])
finally:
dict.Dispose()
class SortedGenericDictionaryFixture(GenericDictionaryFixtureBase):
def setUp(self):
self._dataDirectory = 'SortedGenericDictionaryFixture'
self._deleteDataDirectory()
self._dict = None
def tearDown(self):
self._deleteDataDirectory()
def _compareDictionaries(self, expected, actual):
super(SortedGenericDictionaryFixture, self)._compareDictionaries(expected, actual)
for x,y in zip(expected.Keys, actual.Keys):
self.assertEqual(x, y)
def createDictAndTest(self, tkey, tvalue):
dict = PersistentDictionary[tkey,tvalue](self._dataDirectory)
try:
expected = SortedDictionary[tkey,tvalue]()
self._doTest(expected, dict, data[tkey], data[tvalue])
finally:
dict.Dispose()
class SortedGenericListFixture(SortedGenericDictionaryFixture):
def setUp(self):
self._dataDirectory = 'SortedGenericListFixture'
self._deleteDataDirectory()
self._dict = None
def tearDown(self):
self._deleteDataDirectory()
def createDictAndTest(self, tkey, tvalue):
dict = PersistentDictionary[tkey,tvalue](self._dataDirectory)
try:
expected = SortedList[tkey,tvalue]()
self._doTest(expected, dict, data[tkey], data[tvalue])
finally:
dict.Dispose()
keytypes = [
System.Boolean,
System.Byte,
System.Int16,
System.UInt16,
System.Int32,
System.UInt32,
System.Int64,
System.UInt64,
System.Single,
System.Double,
System.DateTime,
System.TimeSpan,
System.Guid,
System.String,
]
nullabletypes = [
System.Boolean,
System.Byte,
System.Int16,
System.UInt16,
System.Int32,
System.UInt32,
System.Int64,
System.UInt64,
System.Single,
System.Double,
System.DateTime,
System.TimeSpan,
System.Guid,
]
valuetypes = [
System.Boolean,
System.Byte,
System.Int16,
System.UInt16,
System.Int32,
System.UInt32,
System.Int64,
System.UInt64,
System.Single,
System.Double,
System.DateTime,
System.TimeSpan,
System.Guid,
System.String,
System.Decimal,
]
r = System.Random()
data = {}
data[System.Boolean] = [
True,
False]
data[System.Byte] = [
1,
2,
System.Byte.MinValue,
System.Byte.MaxValue,
r.Next(System.Byte.MinValue, System.Byte.MaxValue)]
data[System.Int16] = [
0,
1,
-1,
System.Int16.MinValue,
System.Int16.MaxValue,
r.Next(System.Int16.MinValue, System.Int16.MaxValue)]
data[System.UInt16] = [
1,
2,
System.UInt16.MinValue,
System.UInt16.MaxValue,
r.Next(System.UInt16.MinValue, System.UInt16.MaxValue)]
data[System.Int32] = [
0,
1,
-1,
System.Int32.MinValue,
System.Int32.MaxValue,
r.Next()]
data[System.UInt32] = [
1,
2,
System.UInt32.MinValue,
System.UInt32.MaxValue,
r.Next(0, System.Int32.MaxValue)]
data[System.Int64] = [
0,
1,
-1,
System.Int64.MinValue,
System.Int64.MaxValue,
r.Next()]
data[System.UInt64] = [
1,
2,
System.UInt64.MinValue,
System.UInt64.MaxValue,
r.Next(0, System.Int32.MaxValue)]
data[System.Single] = [
0,
1,
-1,
System.Single.MinValue,
System.Single.MaxValue,
r.Next()]
data[System.Double] = [
0,
1,
-1,
System.Math.PI,
System.Double.MinValue,
System.Double.MaxValue,
r.NextDouble()]
data[System.Decimal] = [
System.Decimal.MinValue,
System.Decimal.MaxValue,
System.Decimal.MinusOne,
System.Decimal.Zero,
System.Decimal.One,
System.Decimal(r.Next()),
System.Decimal(r.NextDouble())]
data[System.Guid] = [
System.Guid.Empty,
System.Guid.NewGuid()]
data[System.DateTime] = [
System.DateTime.MinValue,
System.DateTime.MaxValue,
System.DateTime.Now,
System.DateTime.UtcNow,
System.DateTime.Today]
data[System.TimeSpan] = [
System.TimeSpan.MinValue,
System.TimeSpan.MaxValue,
System.TimeSpan.FromDays(1),
System.TimeSpan.FromHours(1),
System.TimeSpan.FromMinutes(1),
System.TimeSpan.FromSeconds(1),
System.TimeSpan.FromMilliseconds(1),
System.TimeSpan.FromTicks(1),
System.TimeSpan(r.Next())]
data[System.String] = [
System.String.Empty,
'1',
'`',
'foo',
'bar',
'baz',
'space',
'space ',
'case',
'CASE',
'punctuation',
'punctuation!',
r.Next().ToString(),
r.NextDouble().ToString(),
System.Guid.NewGuid.ToString(),
System.DateTime.Now.ToString(),
'#'*65000]
# Use this to create a unique closure for tkey and tvalue
def makef(tkey, tvalue):
return lambda self : self.createDictAndTest(tkey, tvalue)
# Make nullable data, which is the non-nullable data + None
for t in nullabletypes:
data[System.Nullable[t]] = list(data[t])
data[System.Nullable[t]].append(None)
valuetypes.append(System.Nullable[t])
# Create the test functions
for tkey in keytypes:
for tvalue in valuetypes:
name = 'test%s%s' % (tkey, tvalue)
setattr(GenericDictionaryFixture, name, makef(tkey, tvalue))
setattr(SortedGenericDictionaryFixture, name, makef(tkey, tvalue))
setattr(SortedGenericListFixture, name, makef(tkey, tvalue))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_11252 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 23 17:38:24 2017
@author: Phoebe
"""
import os
import time
import numpy as np
import pandas as pd
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
#from pycocotools import mask as maskUtils
#%%
debugfile('ild.py', args='train --dataset=E:\lung_data --model=imagenet', wdir=r'C:\Users\Phoebe Chen\Desktop\CNNNNN\Mask_RCNN-master')
#%%
from config import Config
import utils
import model as modellib
ROOT_DIR = 'C:\\Users\\Phoebe Chen\\Desktop\\CNNNNN\\Mask_RCNN-master'
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
class InferenceConfig(ILDConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=DEFAULT_LOGS_DIR)
model_path='C:\\Users\\Phoebe Chen\\Desktop\\CNNNNN\\Mask_RCNN-master\\mask_rcnn_coco.h5'
model.load_weights(model_path, by_name=True)
#%%
dataset='E:\lung_data'
dataset_train = ILDDataset()
dataset_train.load_ILD(dataset, "train")
#dataset_train.prepare()
# Validation dataset
dataset_val = ILDDataset()
dataset_train.load_ILD(dataset, "val")
#dataset_val.prepare()
#%%
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=40,
layers='heads') |
the-stack_0_11253 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 22 22:51:13 2021
@author: liujinli
"""
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error,r2_score
from lightgbm import LGBMRegressor
from xgboost import XGBRegressor
from sklearn.ensemble import RandomForestRegressor,AdaBoostRegressor
from matplotlib import pyplot as plt
from sklearn.linear_model import Lasso,Ridge,ElasticNet
from sklearn.svm import SVR
from tqdm import tqdm
import os
import random
import warnings
from mpl_toolkits.mplot3d import Axes3D
from sklearn.utils import shuffle
warnings.filterwarnings("ignore")
def seed_everything(seed=555):
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
# torch.manual_seed(seed)
# torch.backends.cudnn.deterministic = True
seed_everything()
df = pd.read_csv('清洗_2018Spring.csv')
df = shuffle(df)
train_df = df[:-14]
valid_df = df[-14:]
# print(train_df)
# print(valid_df)
train_y = train_df.pop('TN')
train_x = train_df.values
valid_y = valid_df.pop('TN')
valid_x = valid_df.values
lgb = LGBMRegressor()
lgb.fit(train_x,train_y)
pred = lgb.predict(valid_x)
# print('score:', mean_squared_error(valid_y,pred))
# xgb = XGBRegressor()
# xgb.fit(train_x,train_y)
# pred = xgb.predict(valid_x)
# print('score:', mean_squared_error(valid_y,pred))
rf = RandomForestRegressor()
rf.fit(train_x,train_y)
pred = rf.predict(valid_x)
# print('score:', mean_squared_error(valid_y,pred))
f, ax = plt.subplots(figsize=(7, 5))
ax.bar(range(len(rf.feature_importances_)),rf.feature_importances_)
ax.set_title("Feature Importances")
f.show()
# print(len(train_df.columns))
# print(len(rf.feature_importances_))
df_show = pd.DataFrame({'f_name':train_df.columns,'importance':rf.feature_importances_})
# print(df_show.sort_values('importance',ascending=False))
df_show = df_show.sort_values('importance',ascending=False)['f_name'].values
best_mse = 100
best_fnum = 4
plt.show()
plt.close()
df_show = pd.DataFrame({'f_name':train_df.columns,'importance':rf.feature_importances_})
# print(df_show.sort_values('importance',ascending=False))
df_show = df_show.sort_values('importance',ascending=True)
plt.show()
f, ax = plt.subplots(figsize=(15, 20))
print(df_show['importance'].values)
ax.barh(df_show['f_name'],df_show['importance'].values)
ax.set_title("Feature Importances")
f.show()
plt.show()
df_show = df_show.sort_values('importance',ascending=False)['f_name'].values
mse=[];r2=[]
for i in range(4,60):
choose_feature = df_show[:i]
train_x = train_df[choose_feature].values
valid_x = valid_df[choose_feature].values
lgb = LGBMRegressor()
lgb.fit(train_x,train_y)
lgb_pred = lgb.predict(valid_x)
# rf = RandomForestRegressor()
# rf = ElasticNet()
# rf.fit(train_x,train_y)
# rf_pred = rf.predict(valid_x)
pred = lgb_pred
mse.append( mean_squared_error(valid_y,pred))
r2.append(r2_score(valid_y,pred))
# print(f'n_num:{i},score:{mse}')
if(best_mse > mean_squared_error(valid_y,pred)):
best_mse = mean_squared_error(valid_y,pred)
best_fnum = i
print(f'best f_num:{best_fnum}, best mse:{best_mse}')
plt.plot(range(4,60), mse)
plt.title('feature performance')
plt.xlabel('feature number')
plt.ylabel('mse')
plt.show()
plt.close()
plt.plot(range(4,60), r2)
plt.title('feature performance')
plt.xlabel('feature number')
plt.ylabel('r2')
plt.show()
plt.close()
choose_feature = df_show[:best_fnum]
train_x = train_df[choose_feature].values
valid_x = valid_df[choose_feature].values
#min_child_samples=10,reg_alpha=0.03,reg_lambda=0
alpha=[];lamda=[];mse_loss=[];r2_loss=[]
for i in [0, 0.001, 0.01, 0.03, 0.08, 0.3, 0.5]:
for j in [0, 0.001, 0.01, 0.03, 0.08, 0.3, 0.5]:
lgb = LGBMRegressor(min_child_samples=10,reg_alpha=i,reg_lambda=j)
lgb.fit(train_x,train_y)
alpha.append(i)
lamda.append(j)
pred = lgb.predict(valid_x)
# model = AdaBoostRegressor(lgb,n_estimators=i)
# model.fit(train_x,train_y)
# pred = model.predict(valid_x)
mse = mean_squared_error(valid_y,pred)
mse_loss.append(mse)
r2 = r2_score(valid_y,pred)
r2_loss.append(r2)
#print(f'min_child_samples:{i},min_child_weights:{j},mse_score:{mse},r2_score:{r2}')
# print(df_show)
param_grid =[
{'max_depth':range(3,12),
'min_child_weight':range(4,32,4),
'reg_alpha':[x/100 for x in range(1,51,2)],
'reg_lambda':[x/100 for x in range(1,51,2)],
}
]
model = LGBMRegressor()
from sklearn.model_selection import GridSearchCV
print('grid search begin')
grid_search = GridSearchCV(model,param_grid,scoring='neg_mean_squared_error')
grid_search.fit(train_x,train_y)
print(f'best score:{grid_search.best_score_},best param:{grid_search.best_params_}')
def get_pic(model_name,show_name):
print(f'---------------{model_name} best params is searching-------------')
if(model_name=='lgb'):
u = [x/100 for x in range(1,51)]
v = [x/100 for x in range(1,51)]
elif(model_name == 'lasso'):
u = [x/100 for x in range(1,51)]
v = [x/1000000 for x in range(1,51)]
elif(model_name=='svr'):
u = [x for x in range(1,51)]
v = [x/100000 for x in range(1,51)]
elif(model_name=='xgboost'):
u = [x/100 for x in range(1,51)]
v = [x/100 for x in range(1,51)]
u, v = np.meshgrid(u, v)
print(u.shape,v.shape)
best_mse_i, best_mse_j, best_mse, best_r2 = 0, 0, 1000, 0
z = np.zeros_like(u)
z2=np.zeros_like(u)
print(z.shape)
for i in tqdm(range(len(u))):
for j in range(len(u[i])):
if(model_name=='lgb'):
model = LGBMRegressor(min_child_samples=10,reg_alpha=u[i][j],reg_lambda=v[i][j])
elif(model_name=='lasso'):
model = Lasso(alpha=u[i][j],tol=v[i][j])
elif(model_name =='svr'):
model = SVR(C=u[i][j],tol=v[i][j])
elif(model_name=='xgboost'):
model=XGBRegressor(max_depth=2,min_child_weight=28,reg_alpha=u[i][j],reg_lambda=v[i][j])
model.fit(train_x,train_y)
pred = model.predict(valid_x)
# model = AdaBoostRegressor(lgb,n_estimators=i)
# model.fit(train_x,train_y)
# pred = model.predict(valid_x)
mse = mean_squared_error(valid_y,pred)
r2=r2_score(valid_y,pred)
z[i][j] = mse
z2[i][j]=r2
if(best_mse > mse):
best_mse = mse
best_mse_i = i
best_mse_j = j
best_r2 = r2
print('---------------------------------------')
# plt.figure()
# ax = Axes3D(fig)
plt.ion()
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(111, projection='3d')
ax.set_title(model_name)
if(model_name=='lgb'):
ax.set_xlabel('alpha')
ax.set_ylabel('lambda')
print(f'reg_alpha={u[best_mse_i][best_mse_j]},reg_lambda={v[best_mse_i][best_mse_j]},best mse:{best_mse},best r2:{best_r2}')
elif(model_name=='lasso'):
ax.set_xlabel('alpha')
ax.set_ylabel('tol')
print(f'alpha={u[best_mse_i][best_mse_j]},tol={v[best_mse_i][best_mse_j]},best mse:{best_mse},best r2:{best_r2}')
elif(model_name =='svr'):
ax.set_xlabel('C')
ax.set_ylabel('tol')
print(f'C={u[best_mse_i][best_mse_j]},tol={v[best_mse_i][best_mse_j]},best mse:{best_mse},best r2:{best_r2}')
elif(model_name =='xgboost'):
ax.set_xlabel('reg_alpha')
ax.set_ylabel('reg_lambda')
print(f'reg_alpha={u[best_mse_i][best_mse_j]},reg_lambda={v[best_mse_i][best_mse_j]},best mse:{best_mse},best r2:{best_r2}')
if(show_name == 'mse'):
ax.set_zlabel('mse')
surf=ax.plot_surface(u, v, z, cmap='jet')
fig.colorbar(surf, shrink=0.4, aspect=6)
plt.show()
else:
ax.set_zlabel('r2')
surf=ax.plot_surface(u, v, z2, cmap='jet')
fig.colorbar(surf, shrink=0.4, aspect=6)
plt.show()
# ax.close()
ax.cla()
plt.cla()
plt.close('all')
get_pic('lgb','mse')
get_pic('lasso','mse')
get_pic('xgboost','mse')
get_pic('svr','mse')
get_pic('lgb','r2')
get_pic('lasso','r2')
get_pic('xgboost','r2')
get_pic('svr','r2')
z=[];z2=[]
def get_2dpic(model_name,show_name):
plt.title(model_name)
z=[];z2=[]
if(model_name=='lgb'):
u = [x/100 for x in range(1,51)]
v = [x/100 for x in range(1,51)]
elif(model_name == 'lasso'):
u = [x/100 for x in range(1,51)]
v = [x/1000000 for x in range(1,51)]
elif(model_name=='svr'):
u = [x for x in range(1,51)]
v = [x/100000 for x in range(1,51)]
elif(model_name=='xgboost'):
u = [x/100 for x in range(1,51)]
v = [x/100 for x in range(1,51)]
best_mse_i, best_mse_j, best_mse, best_r2 = 0, 0, 1000, 0
if show_name=='mse':
plt.ylabel('mse')
for i in u:
if(model_name=='lgb'):
model = LGBMRegressor(min_child_samples=10,reg_alpha=i)
plt.xlabel('reg_alpha')
elif(model_name=='lasso'):
model = Lasso(alpha=i)
plt.xlabel('alpha')
elif(model_name =='svr'):
model = SVR(C=i)
plt.xlabel('c')
elif(model_name=='xgboost'):
plt.xlabel('reg_alpha')
model=XGBRegressor(max_depth=2,min_child_weight=28,reg_alpha=i)
model.fit(train_x,train_y)
pred = model.predict(valid_x)
mse = mean_squared_error(valid_y,pred)
r2=r2_score(valid_y,pred)
z.append(mse)
z2.append(r2)
plt.plot(u,z)
min_indx=np.argmin(z)
plt.plot(u[min_indx],z[min_indx],'ks')
show_max='['+str(np.round((u[min_indx]),2))+' '+str(np.round((z[min_indx]),3))+']'
plt.annotate(show_max,xytext=(u[min_indx],z[min_indx]),xy=(u[min_indx],z[min_indx]))
plt.show()
plt.close()
elif show_name=='r2':
plt.ylabel('r2')
for j in v:
if(model_name=='lgb'):
model = LGBMRegressor(min_child_samples=10,reg_lambda=j)
plt.xlabel('reg_lambda')
elif(model_name=='lasso'):
model = Lasso(tol=j)
plt.xlabel('tol')
elif(model_name =='svr'):
model = SVR(tol=j)
plt.xlabel('tol')
elif(model_name=='xgboost'):
model=XGBRegressor(max_depth=2,min_child_weight=28,reg_lambda=j)
plt.xlabel('reg_lambda')
model.fit(train_x,train_y)
pred = model.predict(valid_x)
mse = mean_squared_error(valid_y,pred)
r2=r2_score(valid_y,pred)
z.append( mse)
z2.append(r2)
plt.plot(v,z2)
max_indx=np.argmax(z2)
plt.plot(v[max_indx],z2[max_indx],'ks')
show_max='['+str(np.round(v[max_indx],2))+' '+str(np.round(z2[max_indx],3))+']'
plt.annotate(show_max,xytext=(v[max_indx],z2[max_indx]),xy=(v[max_indx],z2[max_indx]))
plt.show()
plt.close()
get_2dpic('lgb','mse')
get_2dpic('lasso','mse')
get_2dpic('xgboost','mse')
get_2dpic('svr','mse')
get_2dpic('lgb','r2')
get_2dpic('lasso','r2')
get_2dpic('xgboost','r2')
get_2dpic('svr','r2')
# plt.figure()
# ax = Axes3D(fig)
# ax.plot_surface(u,v,z2,cmap='jet')
# plt.show()
model = LGBMRegressor(min_child_samples=10)
model.fit(train_x,train_y)
def get_pred(model,test_df):
test_x = test_df[choose_feature].values
test_pred = model.predict(test_x)
return test_pred
test_df = pd.read_csv('201809.csv')
get_pred(model,test_df) |
the-stack_0_11255 | import asyncio
import shutil
import subprocess
from pathlib import Path
from typing import Any, List
from jinja2 import Environment, PackageLoader
from . import logger
from .exceptions import FetchError, GenerateError, GenerateScriptError
from .fetcher import fetch
from .parser import Blueprint
_environment = Environment(loader=PackageLoader("ops2deb", "templates"))
def _format_command_output(output: str) -> str:
lines = output.splitlines()
output = "\n ".join([line for line in lines])
return "> " + output
class SourcePackage:
def __init__(self, blueprint: Blueprint, work_directory: Path):
self.directory_name = f"{blueprint.name}_{blueprint.version}_{blueprint.arch}"
self.output_directory = (work_directory / self.directory_name).absolute()
self.debian_directory = self.output_directory / "debian"
self.src_directory = self.output_directory / "src"
self.tmp_directory = Path(f"/tmp/ops2deb_{self.directory_name}")
self.debian_version = f"{blueprint.version}-{blueprint.revision}~ops2deb"
self.blueprint = blueprint.render(self.src_directory)
def render_tpl(self, template_name: str) -> None:
template = _environment.get_template(f"{template_name}.j2")
package = self.blueprint.dict(exclude={"fetch", "script"})
package.update({"version": self.debian_version})
template.stream(package=package).dump(str(self.debian_directory / template_name))
def init(self) -> None:
shutil.rmtree(self.debian_directory, ignore_errors=True)
self.debian_directory.mkdir(parents=True)
shutil.rmtree(self.tmp_directory, ignore_errors=True)
self.tmp_directory.mkdir()
shutil.rmtree(self.src_directory, ignore_errors=True)
self.src_directory.mkdir(parents=True)
for path in ["usr/bin", "usr/share", "usr/lib"]:
(self.src_directory / path).mkdir(parents=True)
async def fetch(self) -> "SourcePackage":
if (remote_file := self.blueprint.fetch) is not None:
await fetch(
url=remote_file.url,
expected_hash=remote_file.sha256,
save_path=self.tmp_directory,
)
return self
def generate(self) -> None:
logger.title(f"Generating source package {self.directory_name}...")
# run script
for line in self.blueprint.script:
logger.info(f"$ {line}")
result = subprocess.run(
line, shell=True, cwd=self.tmp_directory, capture_output=True
)
if stdout := result.stdout.decode():
logger.info(_format_command_output(stdout))
if stderr := result.stderr.decode():
logger.error(_format_command_output(stderr))
if result.returncode:
raise GenerateScriptError
# render debian/* files
for template in [
"changelog",
"control",
"rules",
"compat",
"install",
"lintian-overrides",
]:
self.render_tpl(template)
def generate(blueprints: List[Blueprint], work_directory: Path) -> None:
packages = [SourcePackage(b, work_directory) for b in blueprints]
# make sure we generate source packages in a clean environment
# without artifacts from previous builds
for package in packages:
package.init()
# run fetch instructions (download, verify, extract) in parallel
file_count = sum([1 for b in blueprints if b.fetch is not None])
logger.title(f"Fetching {file_count} files...")
async def fetch_all() -> Any:
return await asyncio.gather(
*[p.fetch() for p in packages], return_exceptions=True
)
results = asyncio.run(fetch_all())
errors = [e for e in results if isinstance(e, Exception)]
for error in errors:
if not isinstance(error, FetchError):
raise error
# run scripts, build debian/* files
packages = [p for p in results if isinstance(p, SourcePackage)]
for package in packages:
package.generate()
if errors:
raise GenerateError(f"{len(errors)} failures occurred")
|
the-stack_0_11256 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBottleneck(PythonPackage):
"""A collection of fast NumPy array functions written in Cython."""
homepage = "https://pypi.python.org/pypi/Bottleneck/1.0.0"
url = "https://pypi.io/packages/source/B/Bottleneck/Bottleneck-1.0.0.tar.gz"
version('1.2.1', sha256='6efcde5f830aed64feafca0359b51db0e184c72af8ba6675b4a99f263922eb36')
version('1.0.0', '380fa6f275bd24f27e7cf0e0d752f5d2')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
|
the-stack_0_11258 | '''
Exercício Python 101: Crie um programa que tenha uma função chamada voto() que vai receber como parâmetro o ano de
nascimento de uma pessoa, retornando um valor literal indicando se uma pessoa tem voto NEGADO, OPCIONAL e OBRIGATÓRIO
nas eleições.
'''
def voto(ano):
from datetime import date
print('-='* 15)
id = date.today().year - ano
if id < 16:
return f'Com {id} anos: NÃO VOTA!'
elif 16 <= id < 18 or id > 65:
return f'Com {id} anos: VOTO OPCIONAL!'
else:
return f'Com {id} anos: VOTO OBRIGATÓRIO!'
nasc = int(input('Em que ano você nasceu? '))
print(voto(nasc)) |
the-stack_0_11259 | """Environment to render templates"""
import json
from pathlib import Path
from sys import executable
from diot import Diot, OrderedDiot
from pyppl.template import DEFAULT_ENVS
__all__ = []
def rimport(*paths):
rimport_rfunc = f"""
if (!exists('..rimport..') || !is.function(..rimport..)) {{
reticulate::use_python({executable!r}, required = TRUE)
..bioprocs.. = reticulate::import('bioprocs')
..rimport.. = function(...) {{
for (rfile in list(...)) {{
source(file.path(..bioprocs..$HERE, 'utils', rfile))
}}
}}
}}
"""
pathstr = ', '.join(f'{path!r}' for path in ((str(path) for path in paths)))
return f"""
{rimport_rfunc}
..rimport..({pathstr})
"""
def bashimport(*paths):
bashimport_bashfunc = f"""
type __bashimport__ 1>&2 2>/dev/null
if [ $? -ne 0 ]; then
__python__={executable!r}
__bioprocsdir__=$(exec $__python__ -c 'import bioprocs; print(bioprocs.HERE)')
function __bashimport__() {{
for src in "$@"; do
source $__bioprocsdir__/utils/$src
done
}}
fi
"""
pathstr = ' '.join(f'{path!r}' for path in ((str(path) for path in paths)))
return f"""
{bashimport_bashfunc}
__bashimport__ {pathstr}
"""
def read(var):
"""Read the contents from a file"""
with open(var) as fvar:
return fvar.read()
def readlines(var, skip_empty_lines = True):
"""Read the lines from a file"""
ret = []
with open(var) as fvar:
for line in fvar:
line = line.rstrip('\n\r')
if not line and skip_empty_lines:
continue
ret.append(line)
return ret
def basename(var, orig = False):
"""Get the basename of a path"""
bname = Path(var).name
if orig or not bname.startswith('['):
return bname
return bname[bname.find(']')+1:]
def filename(var, orig = False, dot = -1):
"""
Return the stem of the basename (stripping extension(s))
@params:
`var`: The path
`orig`: If the path is a renamed file (like: `origin[1].txt`),
- whether return its original filename or the parsed filename (`origin.txt`)
`dot`: Strip to which dot.
- `-1`: the last one
- `-2`: the 2nd last one ...
- `1` : remove all dots.
"""
bname = basename(var, orig)
if '.' not in bname:
return bname
return '.'.join(bname.split('.')[0:dot])
def prefix(var, orig = False, dot = -1):
"""Get the prefix part of a path"""
return str(Path(var).parent.joinpath(filename(var, orig, dot)))
def R(var, ignoreintkey = True):
"""Convert a value into R values"""
if var is True:
return 'TRUE'
if var is False:
return 'FALSE'
if var is None:
return 'NULL'
if isinstance(var, str):
if var.upper() in ['+INF', 'INF']:
return 'Inf'
if var.upper() == '-INF':
return '-Inf'
if var.upper() == 'TRUE':
return 'TRUE'
if var.upper() == 'FALSE':
return 'FALSE'
if var.upper() == 'NA' or var.upper() == 'NULL':
return var.upper()
if var.startswith('r:') or var.startswith('R:'):
return str(var)[2:]
return repr(str(var))
if isinstance(var, Path):
return repr(str(var))
if isinstance(var, (list, tuple, set)):
return 'c({})'.format(','.join([R(i) for i in var]))
if isinstance(var, dict):
# list allow repeated names
return 'list({})'.format(','.join([
'`{0}`={1}'.format(
k,
R(v)) if isinstance(k, int) and not ignoreintkey else \
R(v) if isinstance(k, int) and ignoreintkey else \
'`{0}`={1}'.format(str(k).split('#')[0], R(v))
for k, v in sorted(var.items())]))
return repr(var)
def Rlist(var, ignoreintkey = True): # pylint: disable=invalid-name
"""Convert a dict into an R list"""
assert isinstance(var, (list, tuple, set, dict))
if isinstance(var, dict):
return R(var, ignoreintkey)
return 'as.list({})'.format(R(var, ignoreintkey))
def render(var, data = None):
"""
Render a template variable, using the shared environment
"""
if not isinstance(var, str):
return var
import inspect
from pyppl.template import TemplateJinja2, TemplateLiquid
frames = inspect.getouterframes(inspect.currentframe())
data = data or {}
for frame in frames:
lvars = frame[0].f_locals
if lvars.get('__engine') == 'liquid':
evars = lvars.get('_liquid_context', {})
if 'true' in evars:
del evars['true']
if 'false' in evars:
del evars['false']
if 'nil' in evars:
del evars['nil']
if '_liquid_liquid_filters' in evars:
del evars['_liquid_liquid_filters']
break
if '_Context__self' in lvars:
evars = dict(lvars['_Context__self'])
break
engine = evars.get('__engine')
if not engine:
raise RuntimeError(
"I don't know which template engine to use to render {}...".format(var[:10]))
engine = TemplateJinja2 if engine == 'jinja2' else TemplateLiquid
return engine(var, **evars).render(data)
def box(var):
"""
Turn a dict into a Diot object
"""
from pyppl.utils import Diot
if not isinstance(var, dict):
raise TypeError('Cannot coerce non-dict object to Diot.')
return 'Diot(%r)' % var.items()
def obox(var):
"""
Turn a dict into an ordered Diot object
"""
if not isinstance(var, dict):
raise TypeError('Cannot coerce non-dict object to OrderedDiot.')
return 'OrderedDiot(%r)' % var.items()
def glob1(*paths, first = True):
"""
Return the paths matches the paths
"""
assert len(paths) >= 2
paths = list(paths)
path0 = paths.pop(0)
pattern = paths.pop(-1)
ret = list(Path(path0).joinpath(*paths).glob(pattern))
if ret and first:
return ret[0] # Path object
if not ret and first:
return '__NoNeXiStFiLe__'
return ret
def array_join(var, element_quote = None, all_quote = None, separator = ' '):
var = ( repr(str(element)) if element_quote in ("'", 'single') else \
json.dumps(str(element)) if element_quote in ('"', 'double') else \
element for element in var)
var = separator.join(var)
if all_quote in ("'", 'single'):
return repr(var)
if all_quote in ('"', 'double'):
return json.dumps(var)
return var
TEMPLATE_ENVS = dict(
R = R,
#Rvec = R, # will be deprecated!
Rlist = Rlist,
realpath = lambda var: Path(var).resolve().as_posix(),
dirname = lambda var: Path(var).parent.as_posix(),
# /a/b/c[1].txt => c.txt
basename = basename,
box = box,
obox = obox,
stem = filename,
# /a/b/c.d.e.txt => c
stem2 = lambda var, orig = False, dot = 1: filename(var, orig, dot),
# /a/b/c.txt => .txt
ext = lambda var: Path(var).suffix,
glob1 = glob1,
# /a/b/c[1].txt => /a/b/c
prefix = prefix,
# /a/b/c.d.e.txt => /a/b/c
prefix2 = lambda var, orig = False, dot = 1: prefix(var, orig, dot),
# double quote string
quote = lambda var: json.dumps(str(var)),
squote = lambda var: repr(str(var)),
json = json.dumps,
read = read,
readlines = readlines,
render = render,
array_join = array_join,
rimport = rimport,
bashimport = bashimport,
)
# aliases or reuses
TEMPLATE_ENVS['readlink'] = TEMPLATE_ENVS['realpath']
TEMPLATE_ENVS['parent'] = TEMPLATE_ENVS['dirname']
TEMPLATE_ENVS['bn'] = TEMPLATE_ENVS['basename']
TEMPLATE_ENVS['filename'] = TEMPLATE_ENVS['stem']
TEMPLATE_ENVS['fn'] = TEMPLATE_ENVS['stem']
TEMPLATE_ENVS['filename2'] = TEMPLATE_ENVS['stem2']
TEMPLATE_ENVS['fn2'] = TEMPLATE_ENVS['stem2']
TEMPLATE_ENVS['ext2'] = lambda var: TEMPLATE_ENVS['ext'](var).lstrip('.')
DEFAULT_ENVS.update(TEMPLATE_ENVS)
|
the-stack_0_11263 | """
选择枚举,用于对常量进行处理
"""
import collections
from enum import Enum
from typing import Dict, Tuple
__all__ = [
"ChoicesValue",
"ChoicesEnum",
]
ChoicesValue = collections.namedtuple("choices_value", ["id", "name"])
class ChoicesEnum(Enum):
@classmethod
def _get_members(cls):
return cls._members.value
@classmethod
def get_choices(cls) -> Tuple:
members = cls._get_members()
result = [(member.id, member.name) for member in members]
return tuple(result)
@classmethod
def get_dict_choices(cls) -> Dict:
members = cls._get_members()
result = {member.id: member.name for member in members}
return result
@classmethod
def get_choices_drop_down_list(cls):
members = cls._get_members()
result = [{"id": member.id, "name": member.name} for member in members]
return result
|
the-stack_0_11266 | import socket
import sys
send_response = True
default_response_str = ''
default_response_bytes = default_response_str.encode('utf-8')
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(10)
# Bind the socket to the port
server_address = ('localhost', 8001)
print(f"{sys.stderr}, 'starting up on %s port %s' - {server_address}")
sock.bind(server_address)
while True:
try:
data, address = sock.recvfrom(4096)
print(f"received %s bytes from {address}")
if data:
print(f"data:{data}")
if send_response:
sent = sock.sendto(default_response_bytes, address)
except KeyboardInterrupt:
print("Exiting via interrupt")
sys.exit()
except socket.timeout as e:
sys.exit()
|
the-stack_0_11267 | #!/usr/bin/env python
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
from argparse import ArgumentParser, SUPPRESS
from openvino.inference_engine import IECore
from action_recognition_demo.models import IEModel
from action_recognition_demo.result_renderer import ResultRenderer
from action_recognition_demo.steps import run_pipeline
from os import path
def video_demo(encoder, decoder, videos, fps=30, labels=None):
"""Continuously run demo on provided video list"""
result_presenter = ResultRenderer(labels=labels)
run_pipeline(videos, encoder, decoder, result_presenter.render_frame, fps=fps)
def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m_en", "--m_encoder", help="Required. Path to encoder model", required=True, type=str)
args.add_argument("-m_de", "--m_decoder", help="Required. Path to decoder model", required=True, type=str)
args.add_argument("-i", "--input",
help="Required. Id of the video capturing device to open (to open default camera just pass 0), "
"path to a video or a .txt file with a list of ids or video files (one object per line)",
required=True, type=str)
args.add_argument("-l", "--cpu_extension",
help="Optional. For CPU custom layers, if any. Absolute path to a shared library with the "
"kernels implementation.", type=str, default=None)
args.add_argument("-d", "--device",
help="Optional. Specify a target device to infer on. CPU, GPU, FPGA, HDDL or MYRIAD is "
"acceptable. The demo will look for a suitable plugin for the device specified. "
"Default value is CPU",
default="CPU", type=str)
args.add_argument("--fps", help="Optional. FPS for renderer", default=30, type=int)
args.add_argument("-lb", "--labels", help="Optional. Path to file with label names", type=str)
return parser
def main():
args = build_argparser().parse_args()
full_name = path.basename(args.input)
extension = path.splitext(full_name)[1]
if '.txt' in extension:
with open(args.input) as f:
videos = [line.strip() for line in f.read().split('\n')]
else:
videos = [args.input]
if not args.input:
raise ValueError("--input option is expected")
if args.labels:
with open(args.labels) as f:
labels = [l.strip() for l in f.read().strip().split('\n')]
else:
labels = None
ie = IECore()
if 'MYRIAD' in args.device:
myriad_config = {"VPU_HW_STAGES_OPTIMIZATION": "YES"}
ie.set_config(myriad_config, "MYRIAD")
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, "CPU")
decoder_target_device = "CPU"
if args.device != 'CPU':
encoder_target_device = args.device
else:
encoder_target_device = decoder_target_device
encoder_xml = args.m_encoder
encoder_bin = args.m_encoder.replace(".xml", ".bin")
decoder_xml = args.m_decoder
decoder_bin = args.m_decoder.replace(".xml", ".bin")
encoder = IEModel(encoder_xml, encoder_bin, ie, encoder_target_device,
num_requests=(3 if args.device == 'MYRIAD' else 1))
decoder = IEModel(decoder_xml, decoder_bin, ie, decoder_target_device, num_requests=2)
video_demo(encoder, decoder, videos, args.fps, labels)
if __name__ == '__main__':
sys.exit(main() or 0)
|
the-stack_0_11269 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines generic plotters.
"""
import collections
import importlib
from pymatgen.util.plotting import pretty_plot
class SpectrumPlotter:
"""
Class for plotting Spectrum objects and subclasses. Note that the interface
is extremely flexible given that there are many different ways in which
people want to view spectra. The typical usage is::
# Initializes plotter with some optional args. Defaults are usually
# fine,
plotter = SpectrumPlotter()
# Adds a DOS (A kind of spectra) with a label.
plotter.add_spectrum("Total DOS", dos)
# Alternatively, you can add a dict of DOSs. This is the typical
# form returned by CompleteDos.get_spd/element/others_dos().
plotter.add_spectra({"dos1": dos1, "dos2": dos2})
"""
def __init__(self, xshift=0.0, yshift=0.0, stack=False, color_cycle=("qualitative", "Set1_9")):
"""
Args:
xshift (float): A shift that is applied to the x values. This is
commonly used to shift to an arbitrary zero. E.g., zeroing at the
Fermi energy in DOS, or at the absorption edge in XAS spectra. The
same xshift is applied to all spectra.
yshift (float): A shift that is applied to the y values. This is
commonly used to displace spectra for easier visualization.
Successive spectra are applied successive shifts.
stack (bool): Whether to stack plots rather than simply plot them.
For example, DOS plot can usually be stacked to look at the
contribution of each orbital.
color_cycle (str): Default color cycle to use. Note that this can be
overridden
"""
self.xshift = xshift
self.yshift = yshift
self.stack = stack
mod = importlib.import_module("palettable.colorbrewer.%s" % color_cycle[0])
self.colors_cycle = getattr(mod, color_cycle[1]).mpl_colors
self.colors = []
self._spectra = collections.OrderedDict()
def add_spectrum(self, label, spectrum, color=None):
"""
Adds a Spectrum for plotting.
Args:
label (str): Label for the Spectrum. Must be unique.
spectrum: Spectrum object
color (str): This is passed on to matplotlib. E.g., "k--" indicates
a dashed black line. If None, a color will be chosen based on
the default color cycle.
"""
self._spectra[label] = spectrum
self.colors.append(color or self.colors_cycle[len(self._spectra) % len(self.colors_cycle)])
def add_spectra(self, spectra_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(spectra_dict.keys(), key=key_sort_func)
else:
keys = spectra_dict.keys()
for label in keys:
self.add_spectrum(str(label) + ' K', spectra_dict[label])
def get_plot(self, xlim=None, ylim=None):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = pretty_plot(7, 0)
base = 0.0
i = 0
for key, sp in self._spectra.items():
if not self.stack:
plt.plot(
sp.x,
sp.y + self.yshift * i,
color=self.colors[i],
label=str(key),
linewidth=3,
)
else:
plt.fill_between(
sp.x,
base,
sp.y + self.yshift * i,
color=self.colors[i],
label=str(key),
linewidth=3,
)
base = sp.y + base
plt.xlabel('Número de onda ' + r'($cm^{-1}$)')
plt.ylabel('Intensidadade (u.a.)')
i += 1
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
"""
*************************************************************************
Configuração feito para ordenar a legenda
*************************************************************************
"""
# current_handles, current_labels = plt.gca().get_legend_handles_labels()
# reversed_handles = list(reversed(current_handles))
# reversed_labels = list(reversed(current_labels))
# plt.legend(reversed_handles, reversed_labels)
# ***********************************************************************
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", **kwargs):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
"""
plt = self.get_plot(**kwargs)
plt.savefig(filename, format=img_format)
def show(self, **kwargs):
"""
Show the plot using matplotlib.
"""
plt = self.get_plot(**kwargs)
plt.show()
|
the-stack_0_11271 | # Exercise OLS (version without functions
# Load the data
x = [9.55, 9.36, 0.2, 2.06, 5.89, 9.3, 4.74, 2.43, 6.5, 4.77]
y = [15.28, 16.16, 1.2, 5.14, 9.82, 13.88, 6.3, 3.71, 9.96, 9]
# Let us compute the average of x
sum_x = 0
for i in x:
sum_x +=i
mean_x = sum_x/len(x)
# Let us compute the average of y
sum_y = 0
for i in y:
sum_y +=i
mean_y = sum_y/len(y)
# Let us compute the numerator and the denominator of the beta estimator:
numerator = 0
denominator = 0
for i in range(0,len(x)): # here I use the index for-loop to be able to use both x and y
numerator += (y[i]-mean_y)*(x[i]-mean_x)
denominator += (x[i]-mean_x)**2
beta = numerator / denominator
# Now get the intercept
alpha = mean_y - beta * mean_x
# Print the output
print("Regression analysis y = alpha + beta*x + u")
print("------------------------------------------")
print("x\t%10.5f" % beta)
print("const\t%10.5f" % alpha)
print("------------------------------------------")
|
the-stack_0_11272 | import torch
import torch.nn as nn
import numpy as np
from torchsummary import summary
def double_conv(in_c, out_c):
block = nn.Sequential(nn.Conv2d(in_c, out_c, kernel_size = 3, bias = False),
nn.BatchNorm2d(out_c),
nn.ReLU(inplace = True),
nn.Conv2d(out_c, out_c, kernel_size = 3, bias = False),
nn.BatchNorm2d(out_c),
nn.ReLU(inplace = True)
)
return block
def crop(input, target):
input_size = input.size()[2]
target_size = target.size()[2]
if input_size % 2 != 0:
alpha = int(np.ceil((input_size - target_size) / 2))
beta = int((input_size - target_size) / 2)
return input[:, :, beta:input_size-alpha, beta:input_size-alpha]
delta = (input_size - target_size) // 2
return input[:, :, delta:input_size-delta, delta:input_size-delta]
class UNet(nn.Module):
def __init__(self, num_classes):
super(UNet, self).__init__()
self.num_classes = num_classes
self.maxpool = nn.MaxPool2d(kernel_size = 2, stride = 2)
#Encoder
self.down_conv1 = double_conv(in_c = 1, out_c = 64)
self.down_conv2 = double_conv(in_c = 64, out_c = 128)
self.down_conv3 = double_conv(in_c = 128, out_c = 256)
self.down_conv4 = double_conv(in_c = 256, out_c = 512)
self.down_conv5 = double_conv(in_c = 512, out_c = 1024)
#Decoder
self.tconv1 = nn.ConvTranspose2d(in_channels = 1024, out_channels = 512, kernel_size = 2, stride = 2)
self.upconv1 = double_conv(in_c = 1024, out_c = 512)
self.tconv2 = nn.ConvTranspose2d(in_channels = 512, out_channels = 256, kernel_size = 2, stride = 2)
self.upconv2 = double_conv(in_c = 512, out_c = 256)
self.tconv3 = nn.ConvTranspose2d(in_channels = 256, out_channels = 128, kernel_size = 2, stride = 2)
self.upconv3 = double_conv(in_c = 256, out_c = 128)
self.tconv4 = nn.ConvTranspose2d(in_channels = 128, out_channels = 64, kernel_size = 2, stride = 2)
self.upconv4 = double_conv(in_c = 128, out_c = 64)
self.final = nn.Conv2d(in_channels = 64, out_channels = self.num_classes, kernel_size = 1)
def forward(self, x):
x1 = self.down_conv1(x)
x2 = self.maxpool(x1)
x3 = self.down_conv2(x2)
x4 = self.maxpool(x3)
x5 = self.down_conv3(x4)
x6 = self.maxpool(x5)
x7 = self.down_conv4(x6)
x8 = self.maxpool(x7)
x9 = self.down_conv5(x8)
y = self.tconv1(x9)
y1 = self.upconv1(torch.cat([crop(x7, y),y], dim = 1))
y2 = self.tconv2(y1)
y3 = self.upconv2(torch.cat([crop(x5,y2), y2], dim = 1))
y4 = self.tconv3(y3)
y5 = self.upconv3(torch.cat([crop(x3,y4), y4], dim = 1))
y6 = self.tconv4(y5)
y7 = self.upconv4(torch.cat([crop(x1,y6), y6], dim = 1))
out = self.final(y7)
return out
def test():
ip = torch.randn((1,1,572,572))
model = UNet(2)
print(summary(model, (1, 572, 572), device = 'cpu'))
print(model(ip).shape)
if __name__ == '__main__':
test() |
the-stack_0_11276 | class Solution:
def trap(self, height: List[int]) -> int:
n = len(height)
l = [0] * n # l[i] := max(height[0..i])
r = [0] * n # r[i] := max(height[i..n))
for i, h in enumerate(height):
l[i] = h if i == 0 else max(h, l[i - 1])
for i, h in reversed(list(enumerate(height))):
r[i] = h if i == n - 1 else max(h, r[i + 1])
return sum(min(l[i], r[i]) - h
for i, h in enumerate(height))
|
the-stack_0_11277 | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import specs.folly as folly
import specs.fizz as fizz
import specs.sodium as sodium
import specs.wangle as wangle
import specs.zstd as zstd
from shell_quoting import ShellQuoted
def fbcode_builder_spec(builder):
# This API should change rarely, so build the latest tag instead of master.
builder.add_option(
'no1msd/mstch:git_hash',
ShellQuoted('$(git describe --abbrev=0 --tags)')
)
builder.add_option(
'rsocket/rsocket-cpp/build:cmake_defines', {'BUILD_TESTS': 'OFF'}
)
builder.add_option('krb5/krb5:git_hash', 'krb5-1.16.1-final')
return {
'depends_on': [folly, fizz, sodium, wangle, zstd],
'steps': [
# This isn't a separete spec, since only fbthrift uses mstch.
builder.github_project_workdir('no1msd/mstch', 'build'),
builder.cmake_install('no1msd/mstch'),
builder.github_project_workdir('krb5/krb5', 'src'),
builder.autoconf_install('krb5/krb5'),
builder.github_project_workdir(
'rsocket/rsocket-cpp', 'build'
),
builder.step('configuration for rsocket', [
builder.cmake_configure('rsocket/rsocket-cpp/build'),
]),
builder.cmake_install('rsocket/rsocket-cpp'),
builder.fb_github_cmake_install('fbthrift/thrift'),
],
}
|
the-stack_0_11278 | # Copyright (c) 2016, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Class for handling environment configuration and defaults.'''
import re
from ipaddress import IPv4Address, IPv6Address
from typing import Type
from aiorpcx import Service, ServicePart
from electrumx.lib.coins import Coin
from electrumx.lib.env_base import EnvBase
class ServiceError(Exception):
pass
class Env(EnvBase):
'''Wraps environment configuration. Optionally, accepts a Coin class
as first argument to have ElectrumX serve custom coins not part of
the standard distribution.
'''
# Peer discovery
PD_OFF, PD_SELF, PD_ON = ('OFF', 'SELF', 'ON')
SSL_PROTOCOLS = {'ssl', 'wss'}
KNOWN_PROTOCOLS = {'ssl', 'tcp', 'ws', 'wss', 'rpc'}
coin: Type[Coin]
def __init__(self, coin=None):
super().__init__()
self.obsolete(["MAX_SUBSCRIPTIONS", "MAX_SUBS", "MAX_SESSION_SUBS", "BANDWIDTH_LIMIT",
"HOST", "TCP_PORT", "SSL_PORT", "RPC_HOST", "RPC_PORT", "REPORT_HOST",
"REPORT_TCP_PORT", "REPORT_SSL_PORT", "REPORT_HOST_TOR",
"REPORT_TCP_PORT_TOR", "REPORT_SSL_PORT_TOR"])
# Core items
self.db_dir = self.required('DB_DIRECTORY')
self.daemon_url = self.required('DAEMON_URL')
if coin is not None:
assert issubclass(coin, Coin)
self.coin = coin
else:
coin_name = self.required('COIN').strip()
network = self.default('NET', 'mainnet').strip()
self.coin = Coin.lookup_coin_class(coin_name, network)
# Peer discovery
self.peer_discovery = self.peer_discovery_enum()
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
self.force_proxy = self.boolean('FORCE_PROXY', False)
self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
# Misc
self.db_engine = self.default('DB_ENGINE', 'leveldb')
self.banner_file = self.default('BANNER_FILE', None)
self.tor_banner_file = self.default('TOR_BANNER_FILE',
self.banner_file)
self.anon_logs = self.boolean('ANON_LOGS', False)
self.log_sessions = self.integer('LOG_SESSIONS', 3600)
self.log_level = self.default('LOG_LEVEL', 'info').upper()
self.donation_address = self.default('DONATION_ADDRESS', '')
self.drop_client = self.custom("DROP_CLIENT", None, re.compile)
self.drop_client_unknown = self.boolean('DROP_CLIENT_UNKNOWN', False)
self.blacklist_url = self.default('BLACKLIST_URL', self.coin.BLACKLIST_URL)
self.cache_MB = self.integer('CACHE_MB', 1200)
self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
# Server limits to help prevent DoS
self.max_send = self.integer('MAX_SEND', self.coin.DEFAULT_MAX_SEND)
self.max_sessions = self.sane_max_sessions()
self.cost_soft_limit = self.integer('COST_SOFT_LIMIT', 1000)
self.cost_hard_limit = self.integer('COST_HARD_LIMIT', 10000)
self.bw_unit_cost = self.integer('BANDWIDTH_UNIT_COST', 5000)
self.initial_concurrent = self.integer('INITIAL_CONCURRENT', 10)
self.request_sleep = self.integer('REQUEST_SLEEP', 2500)
self.request_timeout = self.integer('REQUEST_TIMEOUT', 30)
self.session_timeout = self.integer('SESSION_TIMEOUT', 600)
self.session_group_by_subnet_ipv4 = self.integer('SESSION_GROUP_BY_SUBNET_IPV4', 24)
self.session_group_by_subnet_ipv6 = self.integer('SESSION_GROUP_BY_SUBNET_IPV6', 48)
self._check_and_fix_cost_limits()
# Services last - uses some env vars above
self.services = self.services_to_run()
if {service.protocol for service in self.services}.intersection(self.SSL_PROTOCOLS):
self.ssl_certfile = self.required('SSL_CERTFILE')
self.ssl_keyfile = self.required('SSL_KEYFILE')
self.report_services = self.services_to_report()
def sane_max_sessions(self):
'''Return the maximum number of sessions to permit. Normally this
is MAX_SESSIONS. However, to prevent open file exhaustion, ajdust
downwards if running with a small open file rlimit.'''
env_value = self.integer('MAX_SESSIONS', 1000)
# No resource module on Windows
try:
import resource
nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# We give the DB 250 files; allow ElectrumX 100 for itself
value = max(0, min(env_value, nofile_limit - 350))
if value < env_value:
self.logger.warning(
f'lowered maximum sessions from {env_value:,d} to '
f'{value:,d} because your open file limit is '
f'{nofile_limit:,d}'
)
except ImportError:
value = 512 # that is what returned by stdio's _getmaxstdio()
return value
def _check_and_fix_cost_limits(self):
if self.cost_hard_limit < self.cost_soft_limit:
raise self.Error(f"COST_HARD_LIMIT must be >= COST_SOFT_LIMIT. "
f"got (COST_HARD_LIMIT={self.cost_hard_limit} "
f"and COST_SOFT_LIMIT={self.cost_soft_limit})")
# hard limit should be strictly higher than soft limit (unless both are 0)
if self.cost_hard_limit == self.cost_soft_limit and self.cost_soft_limit > 0:
self.logger.info("found COST_HARD_LIMIT == COST_SOFT_LIMIT. "
"bumping COST_HARD_LIMIT by 1.")
self.cost_hard_limit = self.cost_soft_limit + 1
def _parse_services(self, services_str, default_func):
result = []
for service_str in services_str.split(','):
if not service_str:
continue
try:
service = Service.from_string(service_str, default_func=default_func)
except Exception as e:
raise ServiceError(f'"{service_str}" invalid: {e}') from None
if service.protocol not in self.KNOWN_PROTOCOLS:
raise ServiceError(f'"{service_str}" invalid: unknown protocol')
result.append(service)
# Find duplicate addresses
service_map = {service.address: [] for service in result}
for service in result:
service_map[service.address].append(service)
for address, services in service_map.items():
if len(services) > 1:
raise ServiceError(f'address {address} has multiple services')
return result
def services_to_run(self):
def default_part(protocol, part):
return default_services.get(protocol, {}).get(part)
default_services = {protocol: {ServicePart.HOST: 'all_interfaces'}
for protocol in self.KNOWN_PROTOCOLS}
default_services['rpc'] = {ServicePart.HOST: 'localhost', ServicePart.PORT: 8000}
services = self._parse_services(self.default('SERVICES', ''), default_part)
# Find onion hosts
for service in services:
if str(service.host).endswith('.onion'):
raise ServiceError(f'bad host for SERVICES: {service}')
return services
def services_to_report(self):
services = self._parse_services(self.default('REPORT_SERVICES', ''), None)
for service in services:
if service.protocol == 'rpc':
raise ServiceError(f'bad protocol for REPORT_SERVICES: {service.protocol}')
if isinstance(service.host, (IPv4Address, IPv6Address)):
ip_addr = service.host
if (ip_addr.is_multicast or ip_addr.is_unspecified or
(ip_addr.is_private and self.peer_announce)):
raise ServiceError(f'bad IP address for REPORT_SERVICES: {ip_addr}')
elif service.host.lower() == 'localhost':
raise ServiceError(f'bad host for REPORT_SERVICES: {service.host}')
return services
def peer_discovery_enum(self):
pd = self.default('PEER_DISCOVERY', 'on').strip().lower()
if pd in ('off', ''):
return self.PD_OFF
elif pd == 'self':
return self.PD_SELF
else:
return self.PD_ON
|
the-stack_0_11279 | """
MIT License
Copyright (c) 2019-2021 naoTimesdev
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
import asyncio
import logging
from math import ceil
from typing import TYPE_CHECKING, Dict, List, Optional, Union
import arrow
import wavelink
from discord.channel import StageChannel, VoiceChannel
from discord.colour import Colour
from discord.embeds import Embed
from wavelink import Player
from wavelink.errors import NodeOccupied, NoMatchingNode
from wavelink.ext import spotify
from wavelink.tracks import YouTubeTrack
from wavelink.utils import MISSING
from naotimes.timeparse import TimeString
from .errors import UnsupportedURLFormat
from .queue import (
GuildMusicInstance,
TrackEntry,
TrackQueueAll,
TrackQueueImpl,
TrackQueueSingle,
TrackRepeat,
)
from .track import (
BandcampDirectLink,
SoundcloudDirectLink,
SpotifyDirectTrack,
SpotifyTrack,
TwitchDirectLink,
YoutubeDirectLinkTrack,
)
if TYPE_CHECKING:
from discord.guild import Guild
from discord.member import Member
from naotimes.bot import naoTimesBot
from naotimes.config import naoTimesLavanodes
__all__ = (
"naoTimesPlayer",
"format_duration",
)
RealTrack = Union[YouTubeTrack, YoutubeDirectLinkTrack, SpotifyTrack]
VocalChannel = Union[VoiceChannel, StageChannel]
def format_duration(duration: float):
hours = duration // 3600
duration = duration % 3600
minutes = duration // 60
seconds = duration % 60
minutes = str(int(round(minutes))).zfill(2)
seconds = str(int(round(seconds))).zfill(2)
if hours >= 1:
hours = str(int(round(hours))).zfill(2)
return f"{hours}:{minutes}:{seconds}"
return f"{minutes}:{seconds}"
class naoTimesPlayer:
def __init__(
self,
client: naoTimesBot,
loop: asyncio.AbstractEventLoop = None,
spotify_client: spotify.SpotifyClient = None,
):
self.logger = logging.getLogger("naoTimes.MusicPlayer")
self._active_guilds: Dict[int, GuildMusicInstance] = {}
self._client = client
# Use single spotify client for all players
self._spotify = spotify_client
self._loop: asyncio.AbstractEventLoop = loop or asyncio.get_event_loop()
def __del__(self):
self._loop.create_task(self.close(), name="naotimes-player-close-all-players")
@property
def actives(self):
return self._active_guilds
async def close(self):
self.logger.info("Closing all instances...")
channel_ids = [instance.channel.id for instance in self._active_guilds.values() if instance.channel]
for vc_instance in self._client.voice_clients:
vc_instance: Player
if vc_instance.channel.id in channel_ids:
await vc_instance.disconnect(force=True)
self.logger.info("Disconnecting nodes...")
for node in wavelink.NodePool._nodes.copy().values():
await node.disconnect(force=True)
await self._spotify.session.close()
async def add_node(self, node: naoTimesLavanodes):
try:
self.logger.info(f"Trying to connect with node <{node.identifier}>...")
await wavelink.NodePool.create_node(
bot=self._client,
host=node.host,
port=node.port,
password=node.password,
region=node.region,
identifier=node.identifier,
spotify_client=self._spotify,
)
except NodeOccupied:
self.logger.warning(f"Node <{node.identifier}> is already occupied or registered.")
async def remove_node(self, identifier: str):
try:
node = wavelink.NodePool.get_node(identifier=identifier)
await node.disconnect(force=False)
except NoMatchingNode:
self.logger.warning(f"Node <{identifier}> is not registered.")
def _get_id(self, vc: Union[Player, Guild]) -> int:
if hasattr(vc, "guild"):
return vc.guild.id
else:
return vc.id
def create(self, vc: Union[Guild, Player]):
guild_id = self._get_id(vc)
if guild_id not in self._active_guilds:
track_queue = TrackQueueImpl()
self._active_guilds[guild_id] = GuildMusicInstance(track_queue)
def has(self, vc: Union[Player, Guild]) -> bool:
if hasattr(vc, "guild"):
return vc.guild.id in self._active_guilds
elif hasattr(vc, "id"):
return vc.id in self._active_guilds
return False
def get(self, vc: Union[Guild, Player]) -> GuildMusicInstance:
self.create(vc)
return self._active_guilds[self._get_id(vc)]
def set(self, vc: Union[Guild, Player], instance: GuildMusicInstance):
self._active_guilds[self._get_id(vc)] = instance
def get_tracks(self, vc: Union[Player, Guild]) -> List[TrackEntry]:
all_tracks: List[TrackEntry] = []
for track in self.get(vc).queue._queue:
all_tracks.append(track)
return all_tracks
def delete(self, vc: Union[Player, Guild]):
if self.has(vc):
del self._active_guilds[self._get_id(vc)]
def delete_track(self, vc: Union[Player, Guild], index: int):
try:
queue = self.get(vc)
if queue.repeat == TrackRepeat.single:
return True
self.logger.info(f"Player: Trying to remove track [{index}] at <{vc.guild}>")
del queue.queue._queue[index]
return True
except Exception as e:
self.logger.error(f"Player: Failed to remove track [{index}] at <{vc.guild}>", exc_info=e)
return False
def clear(self, vc: Union[Player, Guild]):
guild_id = self._get_id(vc)
self._active_guilds[guild_id].queue.clear()
async def enqueue(self, vc: Player, entries: Union[TrackEntry, List[TrackEntry]]):
if not isinstance(entries, list):
entries = [entries]
queue = self.get(vc)
guild_id = self._get_id(vc)
for entry in entries:
track = entry.track
self.logger.info(f"Player: Enqueueing at guild <{guild_id}>: {track.title} by {track.author}")
await queue.queue.put(entry)
self._active_guilds[guild_id] = queue
def _set_current(self, vc: Player, track: Optional[TrackEntry] = None) -> None:
self.get(vc).current = track
def change_dj(self, vc: Player, user: Member):
self.get(vc).host = user
def set_channel(self, vc: Player, channel: VocalChannel):
self.get(vc).channel = channel
def reset_vote(self, vc: Player):
self.get(vc).skip_votes.clear()
def add_vote(self, vc: Player, user: Member):
self.get(vc).skip_votes.add(user)
def change_repeat_mode(self, vc: Player, mode: TrackRepeat) -> Optional[GuildMusicInstance]:
queue = self.get(vc)
if queue.repeat == mode:
return None
queue.repeat = mode
if mode == TrackRepeat.single:
queue.queue = TrackQueueSingle.from_other(queue.queue)
elif mode == TrackRepeat.all:
queue.queue = TrackQueueAll.from_other(queue.queue)
elif mode == TrackRepeat.disable:
queue.queue = TrackQueueImpl.from_other(queue.queue)
self._active_guilds[self._get_id(vc)] = queue
return queue
def get_requirements(self, vc: Player) -> int:
in_voice = vc.channel.members
# 40% need to vote to skip.
required = ceil(len(in_voice) * 0.4)
return required
def generate_track_embed(self, entry: TrackEntry, position: int = MISSING) -> Embed:
embed = Embed(colour=Colour.from_rgb(78, 214, 139), timestamp=arrow.utcnow().datetime)
embed.set_author(name="Diputar 🎵", icon_url=self._client.user.avatar)
description = []
track = entry.track
track_url = track.uri
if hasattr(track, "internal_id"):
track_url = f"https://open.spotify.com/track/{track.internal_id}"
description.append(f"[{track.title}]({track_url})")
if track.author:
description.append(f"**Artis**: {track.author}")
if hasattr(track, "description") and track.description:
description.append(f"\n{track.description}")
embed.description = "\n".join(description)
embed.add_field(name="Diputar oleh", value=f"{entry.requester.mention}", inline=True)
durasi = TimeString.from_seconds(int(ceil(track.duration)))
if position is MISSING:
embed.add_field(name="Durasi", value=durasi.to_string(), inline=True)
else:
posisi = format_duration(position)
durasi = format_duration(track.duration)
embed.add_field(name="Durasi", value=f"{posisi}/{durasi}", inline=True)
internal_thumb = getattr(track, "_int_thumbnail", None)
if internal_thumb:
embed.set_thumbnail(url=internal_thumb)
elif isinstance(track, YouTubeTrack):
embed.set_thumbnail(url=f"https://i.ytimg.com/vi/{track.identifier}/maxresdefault.jpg")
return embed
async def _fetch_track_queue(self, player: Player):
"""Fetch a track from the queue"""
try:
queue = self.get(player)
return await queue.queue.get()
except asyncio.CancelledError:
return None
async def search_track(self, query: str, node: wavelink.Node):
if query.startswith("http"):
if "spotify.com" in query:
track_mode = spotify.SpotifySearchType.track
if "/album" in query:
track_mode = spotify.SpotifySearchType.album
elif "/playlist" in query:
track_mode = spotify.SpotifySearchType.playlist
spoti_results = await SpotifyDirectTrack.search(
query, type=track_mode, node=node, spotify=self._spotify, return_first=False
)
return spoti_results
elif "soundcloud.com" in query:
soundcloud_tracks = await SoundcloudDirectLink.search(query, node=node)
return soundcloud_tracks
elif "bandcamp.com" in query:
bandcamp_tracks = await BandcampDirectLink.search(query, node=node)
return bandcamp_tracks
elif "vimeo.com" in query:
raise UnsupportedURLFormat(query, "Vimeo tidak didukung untuk sekarang!")
elif "twitch.tv" in query:
ttv_results = await TwitchDirectLink.search(query, node=node, return_first=True)
return ttv_results
else:
return_first = "/playlist" not in query
results = await YoutubeDirectLinkTrack.search(
query,
node=node,
return_first=return_first,
)
return results
results = await YouTubeTrack.search(query, node=node, return_first=False)
for result in results:
setattr(result, "source", "youtube")
return results
# Listeners
# Call to this function later :)
async def play_next(self, player: Player):
self._set_current(player, None)
# Try to get new track.
try:
self.logger.info(f"Player: <{player.guild}> trying to enqueue new track... (5 minutes timeout)")
new_track = await asyncio.wait_for(self._fetch_track_queue(player), timeout=300)
except asyncio.TimeoutError:
# No more tracks, clear queue and stop player.
self.logger.info(f"Player: <{player.guild}> no more tracks, clearing queue and stopping player.")
self.delete(player)
await player.disconnect(force=True)
return
if new_track is None:
self.logger.info(f"Player: <{player.guild}> no more tracks, clearing queue and stopping player.")
self.delete(player)
await player.disconnect(force=True)
return
self.reset_vote(player)
self.logger.info(f"Player: <{player.guild}> got new track: {new_track.track}")
self._set_current(player, new_track)
try:
await player.play(new_track.track)
except Exception as e:
# Dispatch failed to play event
self._client.dispatch("naotimes_playback_failed", player, new_track, e)
return
wrapped_entry = TrackEntry(player.source, new_track.requester, new_track.channel)
self._set_current(player, wrapped_entry)
|
the-stack_0_11281 | import json
# Create a dictionary object
person_dict = {'first': 'Christopher', 'last':'Harrison'}
# Add additional key pairs to dictionary as needed
person_dict['City']='Seattle'
# Create a list object of programming languages
languages_list = ['CSharp','Python','JavaScript']
# Add list object to dictionary for the languages key
person_dict['languages']= languages_list
# Convert dictionary to JSON object
person_json = json.dumps(person_dict)
# Print JSON object
print(person_json) |
the-stack_0_11282 |
import pandas as pd
import numpy as np
import torch
import torch.utils.data as Data
def get_params_length(layer_id):
'''
获取不同层参数向量长度
'''
get_params_length_dic = {
0:13,
1:19,
2:25,
3:14,
4:20,
5:26,
6:11,
7:17,
8:23,
9:9,
10:14,
11:19,
12:7,
13:9,
14:11,
15:4,
16:5,
17:6,
18:4,
19:5,
20:6,
21:4,
22:6,
23:3,
24:3,
25:5,
26:6,
}
return get_params_length_dic[layer_id]
def link_vector_to_graph(link_list,length,max_layer_length):
'''
将连接向量转化成邻接矩阵,对角线元素表示是否接收初始输入
'''
adj = np.zeros((max_layer_length,max_layer_length))
graph = np.zeros([length,length],dtype = float)
flag = 0
# print(link_list,length,max_layer_length)
if len(link_list) != length * length:
for i in range(0,length):
for j in range(0,i+1):
graph[i,j] = link_list[flag]
flag += 1
else:
for i in range(0,length):
for j in range(0,length):
graph[i,j] = link_list[flag]
flag += 1
adj[0:length,0:length] = graph
for i in range(length):
adj[i][i] = 1
return adj.T
def get_params_position(id):
params_length_dic = {
0:0,
1:19,
2:0,
3:0,
4:20,
5:0,
6:0,
7:17,
8:0,
9:0,
10:14,
11:0,
12:0,
13:9,
14:0,
15:0,
16:5,
17:0,
18:0,
19:5,
20:0,
21:4,
22:6,
23:3,
24:3,
25:5,
26:0
}
start = 0
end = 0
for i in range(26):
if i != id:
start += params_length_dic[i]
end += params_length_dic[i]
else:
end += params_length_dic[i]
break
return start,end
def load_randomdataset_test_data():
df_1 = pd.read_csv('../data/dataset/random_testset_1.txt',sep = ' ',index_col=False)
df_2 = pd.read_csv('../data/dataset/random_testset_2.txt',sep = ' ',index_col=False)
df_3 = pd.read_csv('../data/dataset/random_testset_3.txt',sep = ' ',index_col=False)
mean_energy =(df_1['all_energy'] + df_2['all_energy'] + df_3['all_energy']) / 3
df_1['all_energy'] = mean_energy
return df_1
def load_customdataset_test_data():
df_1 = pd.read_csv('../data/dataset/custom_testset_1.txt',sep = ' ',index_col=False)
df_2 = pd.read_csv('../data/dataset/custom_testset_2.txt',sep = ' ',index_col=False)
df_3 = pd.read_csv('../data/dataset/custom_testset_3.txt',sep = ' ',index_col=False)
mean_energy =(df_1['all_energy'] + df_2['all_energy'] + df_3['all_energy']) / 3
df_1['all_energy'] = mean_energy
return df_1
def vaild(model,params_min_list,params_max_list,max_layer_length,layer_parameters,layer_link,layer_id,energy,split_gap = 24,split_index_list = None):
layer_parameters = np.array([float(x) if '.' in x else int(x) for x in layer_parameters.split(',')],dtype='float')
layer_link = np.array([int(x.replace('.0','')) for x in layer_link.split(',')])
layer_id = np.array([int(x) for x in layer_id.split(',')])
# array = np.zeros(1)
energy = [energy]
index = 0
for id in layer_id:
params_length = get_params_length(id)
params = layer_parameters[index:index+params_length]
params = [(params[j] - params_min_list[id][j]) / (params_max_list[id][j]) if params_max_list[id][j] != 0 or params_min_list[id][j] != params_max_list[id][j] else 0 for j in range(params_length)]
layer_parameters[index:index+params_length] = params
index += params_length
index = 0
layer_params = []
for id in layer_id:
params = [0 for i in range(110)]
start,end = get_params_position(id)
params_length = get_params_length(id)
params[start:end] = layer_parameters[index:index + params_length].tolist()
layer_params.append(params)
index += params_length
adj = link_vector_to_graph(layer_link,len(layer_id),max_layer_length)
layer_id = layer_id.tolist()
if len(layer_id) < max_layer_length:
for j in range(0,max_layer_length - len(layer_id)):
layer_params.append([0 for i in range(110)])
layer_id.extend([-1 for i in range(max_layer_length - len(layer_id))]) #层数量长度不足的填充-1
adj = torch.ShortTensor(np.array(adj)).unsqueeze(0).cuda() # [1,70,294]
data_x = torch.FloatTensor(np.array(layer_params)).unsqueeze(0).cuda() # [1,70,294]
data_id = np.array(layer_id)
data_id = torch.FloatTensor(data_id).unsqueeze(0).cuda()
# print()
output = model(data_x, adj, data_id)
# output = torch.squeeze(output, dim=0)
# print(output)
MAE_error = abs(output.item() - energy[0])
error_val = accuracy_test(output.cpu(),energy[0])
return output, MAE_error, error_val
def accuracy_test(output, labels):
return abs(output - labels)/labels * 100
def load_data(dataset_type):
print('load data...')
#存储每类层的所有元素,方便后续计算最大值最小值
params_list = {
0:[],1:[],2:[],3:[],4:[],5:[],6:[],7:[],8:[],9:[],10:[],11:[],12:[],13:[],14:[],15:[],16:[],17:[],18:[],19:[],20:[],21:[],22:[],23:[],24:[],25:[],26:[]
}
#存储每类层,各个元素的最小值
params_min_list = {
0:[],1:[],2:[],3:[],4:[],5:[],6:[],7:[],8:[],9:[],10:[],11:[],12:[],13:[],14:[],15:[],16:[],17:[],18:[],19:[],20:[],21:[],22:[],23:[],24:[],25:[],26:[]
}
#存储每类层,各个元素的最小值
params_max_list = {
0:[],1:[],2:[],3:[],4:[],5:[],6:[],7:[],8:[],9:[],10:[],11:[],12:[],13:[],14:[],15:[],16:[],17:[],18:[],19:[],20:[],21:[],22:[],23:[],24:[],25:[],26:[]
}
data = pd.read_csv('../data/dataset/%s_data.txt' % dataset_type,sep = ' ',index_col=False)
layer_parameters = data['layer_parameters'].values
layer_link = data['layer_link'].values
layer_id = data['layer_id'].values
max_layer_length = max([len(layer_id.split(',')) for layer_id in data['layer_id'].values]) #获取最长的层数
# print(max_layer_length)
for i in range(len(layer_parameters)):
try:
layer_parameters[i] = np.array([float(x) if '.' in x else int(x) for x in layer_parameters[i].split(',')],dtype='float')
layer_link[i] = np.array([int(x) for x in layer_link[i].split(',')])
layer_id[i] = np.array([int(x) for x in layer_id[i].split(',')])
except:
print(i,layer_parameters[i],layer_id[i])
for i in range(len(layer_parameters)):
one_net_layer_id = layer_id[i]
index = 0
for id in one_net_layer_id:
params_length = get_params_length(id)
params = layer_parameters[i][index:index+params_length]
index += params_length
params_list[id].append(params.tolist())
for i in range(0,27):
if len(params_list[i]) != 0:
params_max_list[i] = np.amax(np.array(params_list[i]), axis=0)
params_min_list[i] = np.amin(np.array(params_list[i]), axis=0)
# 归一化
for i in range(len(layer_parameters)):
one_net_layer_id = layer_id[i]
index = 0
#对不同层,分别归一化
for id in one_net_layer_id:
params_length = get_params_length(id)
params = layer_parameters[i][index:index+params_length]
params = [(params[j] - params_min_list[id][j]) / (params_max_list[id][j]) if params_max_list[id][j] != 0 else 0 for j in range(params_length)]
layer_parameters[i][index:index+params_length] = params
index += params_length
all_params_array = []
all_id_array = []
all_adj_array = []
data_link_all = torch.IntTensor()
for i in range(0,len(layer_parameters)):
# if i % 1000 == 0 and i == 1000:
# data_link = torch.IntTensor(np.array(all_adj_array))
# data_link_all = data_link
# all_adj_array = []
if i % 1000 == 0 and i != 0:
data_link = torch.IntTensor(np.array(all_adj_array))
data_link_all = torch.cat([data_link_all,data_link])
all_adj_array = []
net_adj = link_vector_to_graph(layer_link[i],len(layer_id[i]),max_layer_length)
all_adj_array.append(net_adj)
# print(all_adj_array[0])
data_link = torch.IntTensor(np.array(all_adj_array))
data_link_all = torch.cat([data_link_all,data_link])
print(data_link_all.shape)
for i in range(0,len(layer_parameters)):
index = 0
layer_params = []
for id in layer_id[i]:
params = [0 for i in range(110)]
start,end = get_params_position(id)
params_length = get_params_length(id)
if id != 23 or id != 24:
params[start:end] = layer_parameters[i][index:index + params_length].tolist()
layer_params.append(params)
index += params_length
for j in range(0,max_layer_length - len(layer_id[i])):
layer_params.append([0 for i in range(110)])
for j in range(len(layer_id[i])):
id = layer_id[i][j]
if id == 23 or id == 24:
for k in range(j,len(layer_id[i])-1):
layer_id[i][k] = layer_id[i][k+1]
layer_id[i][len(layer_id[i])-1] = -1
layer_id[i] = layer_id[i].tolist()
layer_id[i].extend([-1 for i in range(max_layer_length - len(layer_id[i]))]) #层数量长度不足的填充-1
all_id_array.append(layer_id[i])
all_params_array.append(layer_params)
# b = np.load("all_params_array.npy")
# data_link = torch.FloatTensor(np.array(all_adj_array))
data_x = torch.FloatTensor(np.array(all_params_array))
data_id = np.array(all_id_array)
data_id = torch.FloatTensor(data_id)
data_y = torch.FloatTensor(data['all_energy'].values)
train_size = int(0.8 * len(data_x))
test_size = len(data_x) - train_size
BATCH_SIZE = 128
full_dataset = Data.TensorDataset(data_x, data_id, data_link_all, data_y) #将x,y读取,转换成Tensor格式
train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size])
train_loader = Data.DataLoader(
dataset=train_dataset, # torch TensorDataset format
batch_size=BATCH_SIZE, # 最新批数据
shuffle=True, # 是否随机打乱数据
num_workers=0, # 用于加载数据的子进程
)
# test_torch_dataset = Data.TensorDataset(test_params_inputs, test_id_inputs, test_outputs) #将x,y读取,转换成Tensor格式
test_loader = Data.DataLoader(
dataset=test_dataset, # torch TensorDataset format
batch_size=BATCH_SIZE, # 最新批数据
shuffle=True, # 是否随机打乱数据
num_workers=0, # 用于加载数据的子进程
)
return train_loader,test_loader,params_min_list,params_max_list,max_layer_length
def get_50_epoch_MAPE(epoch,vaild_acc):
all_test_mean = 0
all_test_mean_list = []
count = 0
if epoch < 50:
start_index = 0
else:
start_index = epoch - 50
for net_name,acc_list in vaild_acc.items():
count += 1
all_test_mean += np.mean(acc_list[start_index:epoch],axis=0)[0]
all_test_mean_list.append(np.mean(acc_list[start_index:epoch],axis=0)[0])
all_test_mean_list.sort()
return np.mean(all_test_mean_list[0:18])
def accuracy_train(output, labels):
output = output.cpu().detach().numpy().tolist()
labels = labels.cpu().numpy().tolist()
for i in range(0,len(output)):
output[i] = abs(output[i] - labels[i])/labels[i] * 100
return np.mean(output) |
the-stack_0_11285 | import json
import tkinter
from alp.ml import Alp
class MainWindow(tkinter.Tk):
def __init__(self, filename):
super().__init__()
self.create_mf(filename)
def loop_mainframe(self):
self.mainloop()
def create_mf(self, filename):
f = open(f"{Alp.CONF_DIR}/{filename}", "r")
j = json.load(f)
alp = Alp()
alp.load_widgets(self, j)
mf = tkinter.Frame(master=self)
mf.pack()
mf.exit = tkinter.Button(mf, text="終了", fg="red",
command=self.destroy)
mf.exit.pack(side="bottom")
img = tkinter.Image("photo", file=f"{Alp.CONF_DIR}/gracie.png")
self.tk.call('wm', 'iconphoto', self._w, img)
|
the-stack_0_11287 | from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
class CarouselGalleryUniteOptionsAdmin(admin.ModelAdmin):
'''
Carousel
Tiles - Columns
Tiles - Grid
Tiles - Justified
Tiles - Nested
'''
fieldsets = (
(_('Gallery options'), {
'classes': ('collapse',),
'fields': (
# 'gallery_theme',
'gallery_width',
'gallery_min_width',
'gallery_background_color',
)
}),
)
class SliderGalleryUniteOptionsAdmin(admin.ModelAdmin):
'''
Compact theme
Default theme
Grid theme
Slider
'''
fieldsets = (
(_('Gallery options'), {
'classes': ('collapse',),
'fields': (
# 'gallery_theme',
'gallery_width',
'gallery_height',
'gallery_min_width',
'gallery_min_height',
'gallery_skin',
'gallery_images_preload_type',
'gallery_autoplay',
'gallery_play_interval',
'gallery_pause_on_mouseover',
'gallery_control_thumbs_mousewheel',
'gallery_control_keyboard',
'gallery_carousel',
'gallery_preserve_ratio',
'gallery_debug_errors',
'gallery_background_color',
)
}),
)
|
the-stack_0_11290 | #!/usr/bin/env python3
import os
import requests
import json
import sys
import psutil
import subprocess
import re
from colorama import Fore, Style, Back
from tqdm import tqdm
import urllib3
from troncli.constants import *
"""
Printing Messages
"""
def logo_simple():
print(Fore.RED + Style.BRIGHT + '')
print(' _________ ____ _ __ _______ ____')
print('/_ __/ _ \/ __ \/ |/ /___/ ___/ / / _/')
print(' / / / , _/ /_/ / /___/ /__/ /___/ / ')
print('/_/ /_/|_|\____/_/|_/ \___/____/___/ ')
print(Fore.RESET + Style.RESET_ALL + '')
def logo_shadow():
print(Fore.RED + '')
print('████████╗██████╗ ██████╗ ███╗ ██╗ ██████╗██╗ ██╗')
print('╚══██╔══╝██╔══██╗██╔═══██╗████╗ ██║ ██╔════╝██║ ██║')
print(' ██║ ██████╔╝██║ ██║██╔██╗ ██║█████╗██║ ██║ ██║')
print(' ██║ ██╔══██╗██║ ██║██║╚██╗██║╚════╝██║ ██║ ██║')
print(' ██║ ██║ ██║╚██████╔╝██║ ╚████║ ╚██████╗███████╗██║')
print(' ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═╝')
print(Fore.RESET + '')
def progress_msg(content):
print(Fore.CYAN + '[ TRON-CLI ]: ' + content + '...' + Fore.RESET)
def imode_msg(content):
print(Back.BLUE + Fore.WHITE + Style.BRIGHT + '[ I-MODE ]: ' + Style.NORMAL + content + Fore.RESET + Back.RESET + Style.RESET_ALL)
def success_msg(content):
print(Fore.GREEN + '✓ : ' + content + Fore.BLACK)
def warning_msg(content):
print(Fore.YELLOW + '⚠ : ' + content)
def error_msg(content):
print(Fore.RED + '✖ : ' + content)
def info_msg(content):
print(Fore.MAGENTA + 'ⓘ: ' + content + Fore.RESET)
def info_msg_div():
print(Fore.MAGENTA + '------------------' + Fore.RESET)
def status_msg(category, detail):
if sys.stdout.isatty() and psutil.POSIX:
fmt = '%-13s %s' % (Fore.BLUE + Style.BRIGHT + str(category),
Fore.RESET + Style.RESET_ALL + str(detail))
else:
fmt = '%-11s %s' % (category, detail)
print(fmt)
def status_msg_div():
print(Fore.BLUE + Style.BRIGHT + '------------------' + Fore.RESET + Style.RESET_ALL)
def msg(content):
print(Fore.WHITE + ' ' + content + Fore.RESET)
def debug(content):
print(Fore.YELLOW + Style.BRIGHT + 'DEBUG: ' + content + Fore.RESET + Style.RESET_ALL)
def node_instruction():
info_msg('Tips: ')
info_msg('Check overall status:')
msg('tron-cli status')
info_msg('Check specific node status:')
msg('tron-cli status --node <node id>')
info_msg('Stop all nodes:')
msg('tron-cli stop')
info_msg('Stop specific node:')
msg('tron-cli stop --node <node id>')
def node_cmds(node_id):
info_msg('CMD Tips: ')
info_msg('Check overall status:')
msg('tron-cli status')
info_msg('Check current node status:')
msg('tron-cli status --node ' + str(node_id))
info_msg('Stop all nodes:')
msg('tron-cli stop')
info_msg('Stop current node:')
msg('tron-cli stop --node ' + str(node_id))
def recommendation():
info_msg_div()
info_msg('Hardware recommendation for running a full node: ')
msg('CPU: 64 cores')
msg('RAM: 64 GB')
info_msg_div()
def log_location(root_path, node_type):
if node_type == 'full':
return root_path + NODES_DIR + FULL_NODE_DIR + '/logs/tron.log'
elif node_type == 'sol':
return root_path + NODES_DIR + SOLIDITY_NODE_DIR + '/logs/tron.log'
else:
return 'not recording logs'
"""
Node List
"""
class Node(object):
def __init__(self):
self.root_path = os.getcwd()
# load or init node list file
if os.path.isfile(self.root_path + '/' + RUNNING_NODE_LIST_FILE):
phrase = Phrase()
self.node_list = phrase.load_json_file(self.root_path + '/' + RUNNING_NODE_LIST_FILE)
else:
self.node_list = {'live': {'full': [], 'sol': [], 'event': [], 'grid': [], 'all': [], 'version': ''},
'db': {'dbname': '', 'dbusername': '', 'dbpassword': ''},
'config': {'nettype': 'private',
'fullhttpport': 8500,
'solhttpport': 8600,
'eventhttpport': 8400,
'fullrpcport': 58500,
'solrpcport': 58600,
'eventrpcport': 58400,
'enablememdb': 'True',
'dbsyncmode': 'async',
'saveintertx': 'False',
'savehistorytx': 'False',
'gridport': 18891,
'dbname': 'Null',
'dbusername': 'Null',
'dbpassword': 'Null'},
'init_ed': False,
'config_ed': False
}
def get(self):
return self.node_list
def save(self):
with open(self.root_path + '/' + RUNNING_NODE_LIST_FILE, 'w') as file:
file.write(json.dumps(self.node_list))
def reset_config(self):
self.node_list['config'] = {'nettype': 'private',
'fullhttpport': 8500,
'solhttpport': 8600,
'eventhttpport': 8400,
'fullrpcport': 58500,
'solrpcport': 58600,
'eventrpcport': 58400,
'enablememdb': 'True',
'dbsyncmode': 'async',
'saveintertx': 'False',
'savehistorytx': 'False',
'gridport': 18891,
'dbname': 'Null',
'dbusername': 'Null',
'dbpassword': 'Null'}
self.save()
async def update_init_done(self, flag):
self.node_list['init_ed'] = flag
self.save()
async def update_config_done(self, flag):
self.node_list['config_ed'] = flag
self.save()
async def update_node_version(self, version):
self.node_list['live']['version'] = version
self.node_list['init_ed'] = True # need to move this logic back to cli.py
self.save()
async def update_running_node(self, node_type, pid, execution):
"""
node_type: "full" / "sol" / "event" / "grid"
pid: int
execution: "add" / "remove"
"""
if execution == 'add':
self.node_list['live'][node_type].append(pid)
self.node_list['live']['all'].append(pid)
elif execution == 'remove':
if pid in self.node_list['live']['full']:
self.node_list['live']['full'].remove(pid)
self.node_list['live']['all'].remove(pid)
elif pid in self.node_list['live']['sol']:
self.node_list['live']['sol'].remove(pid)
self.node_list['live']['all'].remove(pid)
elif pid in self.node_list['live']['event']:
self.node_list['live']['event'].remove(pid)
self.node_list['live']['all'].remove(pid)
elif pid in self.node_list['live']['grid']:
self.node_list['live']['grid'].remove(pid)
self.node_list['live']['all'].remove(pid)
else:
warning_msg('process id: ' + str(pid) + ' not in the running node list')
else:
error_msg('wrong execution key word: ' + str(execution))
self.save()
# with open(self.root_path + '/' + RUNNING_NODE_LIST_FILE, 'w') as file:
# file.write(json.dumps(self.node_list))
async def update_db_settings(self, dbname, dbusername, dbpassword):
self.node_list['db']['dbname'] = dbname
self.node_list['db']['dbusername'] = dbusername
self.node_list['db']['dbpassword'] = dbpassword
self.save()
# with open(self.root_path + '/' + RUNNING_NODE_LIST_FILE, 'w') as file:
# file.write(json.dumps(self.node_list))
async def update_config(self, nettype, fullhttpport, solhttpport,
eventhttpport, fullrpcport, solrpcport, eventrpcport,
enablememdb, dbsyncmode, saveintertx, savehistorytx,
gridport, dbname, dbusername, dbpassword):
self.node_list['config']['nettype'] = nettype
self.node_list['config']['fullhttpport'] = fullhttpport
self.node_list['config']['solhttpport'] = solhttpport
self.node_list['config']['eventhttpport'] = eventhttpport
self.node_list['config']['fullrpcport'] = fullrpcport
self.node_list['config']['solrpcport'] = solrpcport
self.node_list['config']['eventrpcport'] = eventrpcport
self.node_list['config']['enablememdb'] = enablememdb
self.node_list['config']['dbsyncmode'] = dbsyncmode
self.node_list['config']['saveintertx'] = saveintertx
self.node_list['config']['savehistorytx'] = savehistorytx
self.node_list['config']['gridport'] = gridport
self.node_list['config']['dbname'] = dbname
self.node_list['config']['dbusername'] = dbusername
self.node_list['config']['dbpassword'] = dbpassword
self.node_list['config_ed'] = True
self.save()
"""
Download
"""
async def download(file_name, url_string):
with open(file_name, 'wb') as f:
# remove ssl warnings
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
resp = requests.get(url_string + '/' + file_name,
verify=False, stream=True)
except OSError as err:
# pbar.update(0)
error_msg('OS Error -' + str(err))
os.sys.exit()
else:
with tqdm(total=100) as pbar:
total_length = resp.headers.get('content-length')
if total_length is None:
pbar.update(100)
pbar.close()
f.write(resp.content)
else:
_chunk_num = 10
_chunk_size = int(int(total_length) / _chunk_num) + 1
for data in resp.iter_content(chunk_size=_chunk_size):
f.write(data)
pbar.update(_chunk_num)
pbar.close()
async def git_clone(host, branch, tar_path):
progress_msg('Git cloning ' + host + '-branch: ' + branch)
cmd = 'git clone --single-branch -b ' + branch + ' ' + host
cmd += ' ' + tar_path
# _process = subprocess.Popen("exec " + cmd, stdout=subprocess.PIPE, shell=True)
try:
os.system(cmd)
except OSError as err:
error_msg('OS Error -' + str(err))
os.sys.exit()
async def gradlew_build(task):
cmd = './gradlew build -x test'
try:
os.system(cmd)
except OSError as err:
error_msg('OS Error -' + str(err))
os.sys.exit()
else:
success_msg(task + ' gradlew build finished')
"""
Phrase
"""
class Phrase(object):
@staticmethod
def convert_bytes(n):
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
@staticmethod
def load_json_file(json_file_path):
f = open(json_file_path)
_json_props = json.load(f)
f.close()
return _json_props
def store_json2properties_to_file(self, json_props, target_file_path):
"""
convert json to properties and store in target file
"""
_properties = self.json2properties(json_props)
_properties_str_formatted = self.properties2str(_properties)
f = open(target_file_path, 'w')
f.write(_properties_str_formatted)
f.close()
def store_json2javabeanconfig_to_file(self, json_props, target_file_path):
"""
convert json to properties and store in target file
"""
_properties = self.json2properties(json_props)
_properties_str_formatted = self.properties2str_bean(_properties)
f = open(target_file_path, 'w')
f.write(_properties_str_formatted)
f.close()
@staticmethod
def properties2str(properties_props):
"""
convert properties to string, and change format
"""
_formatted_str = str(properties_props)
_formatted_str = re.sub("}, '", "},\n\n'", _formatted_str)
_formatted_str = re.sub("':", ":", _formatted_str)
_formatted_str = re.sub("' ", "", _formatted_str)
_formatted_str = re.sub("'", "\"", _formatted_str)
return _formatted_str
@staticmethod
def properties2str_bean(properties_props):
"""
convert properties to string, and change format
"""
_formatted_str = str(properties_props)
_formatted_str = re.sub("}, '", "},\n\n'", _formatted_str)
_formatted_str = re.sub("':", ":", _formatted_str)
_formatted_str = re.sub("' ", "", _formatted_str)
_formatted_str = re.sub("'", "\"", _formatted_str)
_formatted_str = re.sub(":", " =", _formatted_str)
_formatted_str = re.sub(", ", "\r", _formatted_str)
_formatted_str = re.sub("\"", "", _formatted_str)
return _formatted_str[1:-1]
@staticmethod
def json2properties(json_props):
"""
Credit: this function is based on the phrase code in the project:
echinopsii/net.echinopsii.ariane.community.cli.python3.
"""
properties = {}
if isinstance(json_props, list):
for prop in json_props:
if isinstance(prop['propertyValue'], list):
properties[prop['propertyName']] = prop['propertyValue'][1]
elif isinstance(prop['propertyValue'], dict):
map_property = {}
for prop_key, prop_value in prop['propertyValue'].items():
if prop_value.__len__() > 1:
map_property[prop_key] = prop_value[1]
else:
print('json2properties - ' + prop_key +
' will be ignored as its definition is incomplete...')
properties[prop['propertyName']] = map_property
elif prop['propertyType'] == 'array':
j_data = json.loads(prop['propertyValue'])
if j_data.__len__() > 1:
if j_data[0] == 'map':
t_data = []
for amap in j_data[1]:
t_data.append(DriverTools.json_map2properties(amap))
properties[prop['propertyName']] = t_data
elif j_data[0] == 'array':
t_data = []
for ar in j_data[1]:
t_data.append(DriverTools.json_array2properties(ar))
properties[prop['propertyName']] = t_data
else:
properties[prop['propertyName']] = j_data[1]
else:
print('json2properties - ' + prop['propertyName'] +
' will be ignored as its definition is incomplete...')
elif prop['propertyType'] == 'map':
j_data = json.loads(prop['propertyValue'])
map_property = DriverTools.json_map2properties(j_data)
properties[prop['propertyName']] = map_property
else:
properties[prop['propertyName']] = prop['propertyValue']
else:
properties = json_props
return properties
@staticmethod
def str2xml_to_file(xml_str, target_file_path):
"""
use xml string to create logback xml file
"""
f = open(target_file_path, 'w+')
f.write(xml_str) |
the-stack_0_11291 | from django.contrib import admin
from django.db.models import TextField
from django.forms import Textarea
from .models import Job, Analysis, Project, Access, Data
@admin.register(Access)
class AccessAdmin(admin.ModelAdmin):
list_select_related = (
'user',
'project',
)
readonly_fields = (
'user',
)
pass
@admin.register(Data)
class DataAdmin(admin.ModelAdmin):
formfield_overrides = {
TextField: {'widget': Textarea(attrs={'rows': 20, 'cols': 100})},
}
search_fields = ('name', 'owner__first_name', 'owner__email', 'state', "project__name",
"project__owner__first_name", "project__owner__email", "id", "uid")
list_display = ("name", "project", "lastedit_date", "date", 'size', 'type')
list_filter = ("project__name", "deleted")
fieldsets = (("Data Metadata",
{'fields': ("name", "owner", "image", "deleted", 'type',
'rank'),
#"file"),
"classes": ('extrapretty')}
),
("Optional Text Inputs",
{'fields': (("text", )),
"classes": ("collapse", 'extrapretty')}
),
)
pass
@admin.register(Job)
class JobAdmin(admin.ModelAdmin):
formfield_overrides = {
TextField: {'widget': Textarea(attrs={'rows': 20, 'cols': 100})},
}
search_fields = ('name', 'owner__first_name', 'owner__email', 'state', "project__name",
"project__owner__first_name", "project__owner__email", "id", "uid")
list_display = ("name", "state", "start_date", "security", "date")
list_filter = ("state", "security", "project__name", "deleted")
fieldsets = (("Job Metadata",
{'fields': ("name", "owner", 'project', ("uid"),
("state", "security"), "image"),
"classes": ('extrapretty')}
),
("Optional Text Inputs",
{'fields': (("text", "html")),
"classes": ("collapse", 'extrapretty')}
),
("Run Time Settings",
{'fields': ("json_text", "template"),
"classes": ("wide", 'extrapretty')},
),
)
@admin.register(Analysis)
class AnalysisAdmin(admin.ModelAdmin):
formfield_overrides = {
TextField: {'widget': Textarea(attrs={'rows': 20, 'cols': 100})},
}
search_fields = ('name', 'text', 'owner__first_name', 'owner__email', "project__name",
"project__owner__first_name", "project__owner__email", "id", "uid")
list_display = ("name", "date", "security")
list_filter = ("security", "project__name", "deleted")
fieldsets = (("Analysis Metadata",
{'fields': ("name", "owner", 'project', ("uid", "rank"),
("deleted", "security"), "image", 'root'),
"classes": ('extrapretty')}
),
("Optional Text Inputs",
{'fields': (("text", "html")),
"classes": ("collapse", 'extrapretty')}
),
("Run Time Settings",
{'fields': ("json_text", "template"),
"classes": ("wide", 'extrapretty')},
),
)
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
formfield_overrides = {
TextField: {'widget': Textarea(attrs={'rows': 20, 'cols': 100})},
}
search_fields = ('name', 'text', 'owner__first_name', 'owner__email', 'owner__username', "id", "uid")
list_display = ("name", "date", "deleted", 'privacy', 'owner')
list_filter = ("deleted", 'privacy')
fieldsets = (("Project Metadata",
{'fields': ("name", "owner", ("uid", "rank"),
"deleted", "image", "privacy", 'sharable_token'),
"classes": ('extrapretty')}
),
("Optional Text Inputs",
{'fields': ("text",),
"classes": ("collapse", 'extrapretty')}
),
)
|
the-stack_0_11292 | from denariusrpc.authproxy import AuthServiceProxy, JSONRPCException
import time
import sys
import datetime
import urllib
import json
from influxdb import InfluxDBClient
# rpc_user and rpc_password are set in the denarius.conf file
rpc_connection = AuthServiceProxy("http://%s:%[email protected]:32369"%("rpcuser", "rpcpassword"))
#test
blocktest = rpc_connection.getblockcount()
print(blocktest)
#for i in range(3):
# print(i)
# block = rpc_connection.getblockbynumber(i)
# print(block)
# Configure InfluxDB connection variables
host = "127.0.0.1" # My Ubuntu NUC
port = 8086 # default port
user = "admin" # the user/password with write access
password = "admin"
dbname = "blocks" # the database we created earlier
interval = 60 # Sample period in seconds
# Create the InfluxDB client object
client = InfluxDBClient(host, port, user, password, dbname)
# think of measurement as a SQL table, it's not...but...
measurement = "measurement"
# location will be used as a grouping tag later
blockchain = "denarius"
# Run until you get a ctrl^c
#def main():
import time
#for i in range(2499428, 2499437):
# print(i)
blockcount = rpc_connection.getblockcount()
block = rpc_connection.getblockbynumber(blockcount)
grafanatime = block['time'] * 1000000000
hash = block['hash']
size = block['size']
height = block['height']
version = block['version']
merkleroot = block['merkleroot']
mint = int(block['mint'])
timed = block['time']
nonce = block['nonce']
bits = block['bits']
difficulty = float(block['difficulty'])
blocktrust = block['blocktrust']
chaintrust = block['chaintrust']
chainwork = block['chainwork']
previousblockhash = block['previousblockhash']
#nextblockhash = block['nextblockhash']
flags = block['flags']
proofhash = block['proofhash']
entropybit = block['entropybit']
modifier = block['modifier']
modifierchecksum = block['modifierchecksum']
data = [
{
"measurement": measurement,
"tags": {
"blockchain": blockchain,
},
"time": grafanatime,
"fields": {
#"block" : i,
"hash" : hash,
"size" : size,
"height" : height,
"version" : version,
"merkleroot" : merkleroot,
"mint" : mint,
"time" : timed,
"nonce" : nonce,
"bits" : bits,
"difficulty" : difficulty,
"blocktrust" : blocktrust,
"chaintrust" : chaintrust,
"chainwork" : chainwork,
# "nextblockhash" : nextblockhash,
"flags" : flags,
"proofhash" : proofhash,
"entropybit" : entropybit,
"modifier" : modifier,
"modifierchecksum" : modifierchecksum
}
}
]
# Send the JSON data to InfluxDB
print(difficulty)
client.write_points(data)
|
the-stack_0_11294 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import widgets
from django.forms.util import ErrorList
from django.core.exceptions import ValidationError
class PartialFormField(object):
"""
Behave similar to django.forms.Field, encapsulating a partial dictionary, stored as
JSONField in the database.
"""
def __init__(self, name, widget, label=None, initial='', help_text='', error_class=ErrorList):
if not name:
raise AttributeError('The field must have a name')
self.name = name
if not isinstance(widget, widgets.Widget):
raise AttributeError('The field `widget` must be derived from django.forms.widgets.Widget')
self.label = label or name
self.widget = widget
self.initial = initial
self.help_text = help_text
self.error_class = error_class
def run_validators(self, value):
if not callable(getattr(self.widget, 'validate', None)):
return
errors = []
if callable(getattr(self.widget, '__iter__', None)):
for field_name in self.widget:
try:
self.widget.validate(value.get(self.name), field_name)
except ValidationError as e:
if isinstance(getattr(e, 'params', None), dict):
e.params.update(label=self.label)
messages = self.error_class([m for m in e.messages])
errors.extend(messages)
else:
try:
self.widget.validate(value.get(self.name))
except ValidationError as e:
if isinstance(getattr(e, 'params', None), dict):
e.params.update(label=self.label)
errors = self.error_class([m for m in e.messages])
if errors:
raise ValidationError(errors)
def get_element_ids(self, prefix_id):
"""
Returns a single or a list of element ids, one for each input widget of this field
"""
if isinstance(self.widget, widgets.MultiWidget):
ids = ['{0}_{1}_{2}'.format(prefix_id, self.name, field_name) for field_name in self.widget]
elif isinstance(self.widget, (widgets.SelectMultiple, widgets.RadioSelect)):
ids = ['{0}_{1}_{2}'.format(prefix_id, self.name, k) for k in range(len(self.widget.choices))]
else:
ids = ['{0}_{1}'.format(prefix_id, self.name)]
return ids
|
the-stack_0_11295 | """Provide functionality to stream video source.
Components use create_stream with a stream source (e.g. an rtsp url) to create
a new Stream object. Stream manages:
- Background work to fetch and decode a stream
- Desired output formats
- Home Assistant URLs for viewing a stream
- Access tokens for URLs for viewing a stream
A Stream consists of a background worker, and one or more output formats each
with their own idle timeout managed by the stream component. When an output
format is no longer in use, the stream component will expire it. When there
are no active output formats, the background worker is shut down and access
tokens are expired. Alternatively, a Stream can be configured with keepalive
to always keep workers active.
"""
from __future__ import annotations
from collections.abc import Callable, Mapping
import logging
import re
import secrets
import threading
import time
from types import MappingProxyType
from typing import cast
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .const import (
ATTR_ENDPOINTS,
ATTR_SETTINGS,
ATTR_STREAMS,
CONF_LL_HLS,
CONF_PART_DURATION,
CONF_SEGMENT_DURATION,
DOMAIN,
HLS_PROVIDER,
MAX_SEGMENTS,
OUTPUT_IDLE_TIMEOUT,
RECORDER_PROVIDER,
SEGMENT_DURATION_ADJUSTER,
STREAM_RESTART_INCREMENT,
STREAM_RESTART_RESET_TIME,
TARGET_SEGMENT_DURATION_NON_LL_HLS,
)
from .core import PROVIDERS, IdleTimer, KeyFrameConverter, StreamOutput, StreamSettings
from .hls import HlsStreamOutput, async_setup_hls
_LOGGER = logging.getLogger(__name__)
STREAM_SOURCE_REDACT_PATTERN = [
(re.compile(r"//.*:.*@"), "//****:****@"),
(re.compile(r"\?auth=.*"), "?auth=****"),
]
def redact_credentials(data: str) -> str:
"""Redact credentials from string data."""
for (pattern, repl) in STREAM_SOURCE_REDACT_PATTERN:
data = pattern.sub(repl, data)
return data
def create_stream(
hass: HomeAssistant,
stream_source: str,
options: dict[str, str],
stream_label: str | None = None,
) -> Stream:
"""Create a stream with the specified identfier based on the source url.
The stream_source is typically an rtsp url (though any url accepted by ffmpeg is fine) and
options are passed into pyav / ffmpeg as options.
The stream_label is a string used as an additional message in logging.
"""
if DOMAIN not in hass.config.components:
raise HomeAssistantError("Stream integration is not set up.")
# For RTSP streams, prefer TCP
if isinstance(stream_source, str) and stream_source[:7] == "rtsp://":
options = {
"rtsp_flags": "prefer_tcp",
"stimeout": "5000000",
**options,
}
stream = Stream(hass, stream_source, options=options, stream_label=stream_label)
hass.data[DOMAIN][ATTR_STREAMS].append(stream)
return stream
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_LL_HLS, default=False): cv.boolean,
vol.Optional(CONF_SEGMENT_DURATION, default=6): vol.All(
cv.positive_float, vol.Range(min=2, max=10)
),
vol.Optional(CONF_PART_DURATION, default=1): vol.All(
cv.positive_float, vol.Range(min=0.2, max=1.5)
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def filter_libav_logging() -> None:
"""Filter libav logging to only log when the stream logger is at DEBUG."""
stream_debug_enabled = logging.getLogger(__name__).isEnabledFor(logging.DEBUG)
def libav_filter(record: logging.LogRecord) -> bool:
return stream_debug_enabled
for logging_namespace in (
"libav.mp4",
"libav.h264",
"libav.hevc",
"libav.rtsp",
"libav.tcp",
"libav.tls",
"libav.mpegts",
"libav.NULL",
):
logging.getLogger(logging_namespace).addFilter(libav_filter)
# Set log level to error for libav.mp4
logging.getLogger("libav.mp4").setLevel(logging.ERROR)
# Suppress "deprecated pixel format" WARNING
logging.getLogger("libav.swscaler").setLevel(logging.ERROR)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up stream."""
# Drop libav log messages if stream logging is above DEBUG
filter_libav_logging()
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .recorder import async_setup_recorder
hass.data[DOMAIN] = {}
hass.data[DOMAIN][ATTR_ENDPOINTS] = {}
hass.data[DOMAIN][ATTR_STREAMS] = []
if (conf := config.get(DOMAIN)) and conf[CONF_LL_HLS]:
assert isinstance(conf[CONF_SEGMENT_DURATION], float)
assert isinstance(conf[CONF_PART_DURATION], float)
hass.data[DOMAIN][ATTR_SETTINGS] = StreamSettings(
ll_hls=True,
min_segment_duration=conf[CONF_SEGMENT_DURATION]
- SEGMENT_DURATION_ADJUSTER,
part_target_duration=conf[CONF_PART_DURATION],
hls_advance_part_limit=max(int(3 / conf[CONF_PART_DURATION]), 3),
hls_part_timeout=2 * conf[CONF_PART_DURATION],
)
else:
hass.data[DOMAIN][ATTR_SETTINGS] = StreamSettings(
ll_hls=False,
min_segment_duration=TARGET_SEGMENT_DURATION_NON_LL_HLS
- SEGMENT_DURATION_ADJUSTER,
part_target_duration=TARGET_SEGMENT_DURATION_NON_LL_HLS,
hls_advance_part_limit=3,
hls_part_timeout=TARGET_SEGMENT_DURATION_NON_LL_HLS,
)
# Setup HLS
hls_endpoint = async_setup_hls(hass)
hass.data[DOMAIN][ATTR_ENDPOINTS][HLS_PROVIDER] = hls_endpoint
# Setup Recorder
async_setup_recorder(hass)
@callback
def shutdown(event: Event) -> None:
"""Stop all stream workers."""
for stream in hass.data[DOMAIN][ATTR_STREAMS]:
stream.keepalive = False
stream.stop()
_LOGGER.info("Stopped stream workers")
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
return True
class Stream:
"""Represents a single stream."""
def __init__(
self,
hass: HomeAssistant,
source: str,
options: dict[str, str],
stream_label: str | None = None,
) -> None:
"""Initialize a stream."""
self.hass = hass
self.source = source
self.options = options
self._stream_label = stream_label
self.keepalive = False
self.access_token: str | None = None
self._thread: threading.Thread | None = None
self._thread_quit = threading.Event()
self._outputs: dict[str, StreamOutput] = {}
self._fast_restart_once = False
self._keyframe_converter = KeyFrameConverter(hass)
self._available: bool = True
self._update_callback: Callable[[], None] | None = None
self._logger = (
logging.getLogger(f"{__package__}.stream.{stream_label}")
if stream_label
else _LOGGER
)
def endpoint_url(self, fmt: str) -> str:
"""Start the stream and returns a url for the output format."""
if fmt not in self._outputs:
raise ValueError(f"Stream is not configured for format '{fmt}'")
if not self.access_token:
self.access_token = secrets.token_hex()
endpoint_fmt: str = self.hass.data[DOMAIN][ATTR_ENDPOINTS][fmt]
return endpoint_fmt.format(self.access_token)
def outputs(self) -> Mapping[str, StreamOutput]:
"""Return a copy of the stream outputs."""
# A copy is returned so the caller can iterate through the outputs
# without concern about self._outputs being modified from another thread.
return MappingProxyType(self._outputs.copy())
def add_provider(
self, fmt: str, timeout: int = OUTPUT_IDLE_TIMEOUT
) -> StreamOutput:
"""Add provider output stream."""
if not self._outputs.get(fmt):
@callback
def idle_callback() -> None:
if (
not self.keepalive or fmt == RECORDER_PROVIDER
) and fmt in self._outputs:
self.remove_provider(self._outputs[fmt])
self.check_idle()
provider = PROVIDERS[fmt](
self.hass, IdleTimer(self.hass, timeout, idle_callback)
)
self._outputs[fmt] = provider
return self._outputs[fmt]
def remove_provider(self, provider: StreamOutput) -> None:
"""Remove provider output stream."""
if provider.name in self._outputs:
self._outputs[provider.name].cleanup()
del self._outputs[provider.name]
if not self._outputs:
self.stop()
def check_idle(self) -> None:
"""Reset access token if all providers are idle."""
if all(p.idle for p in self._outputs.values()):
self.access_token = None
@property
def available(self) -> bool:
"""Return False if the stream is started and known to be unavailable."""
return self._available
def set_update_callback(self, update_callback: Callable[[], None]) -> None:
"""Set callback to run when state changes."""
self._update_callback = update_callback
@callback
def _async_update_state(self, available: bool) -> None:
"""Set state and Run callback to notify state has been updated."""
self._available = available
if self._update_callback:
self._update_callback()
def start(self) -> None:
"""Start a stream."""
if self._thread is None or not self._thread.is_alive():
if self._thread is not None:
# The thread must have crashed/exited. Join to clean up the
# previous thread.
self._thread.join(timeout=0)
self._thread_quit.clear()
self._thread = threading.Thread(
name="stream_worker",
target=self._run_worker,
)
self._thread.start()
self._logger.info(
"Started stream: %s", redact_credentials(str(self.source))
)
def update_source(self, new_source: str) -> None:
"""Restart the stream with a new stream source."""
self._logger.debug("Updating stream source %s", new_source)
self.source = new_source
self._fast_restart_once = True
self._thread_quit.set()
def _run_worker(self) -> None:
"""Handle consuming streams and restart keepalive streams."""
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .worker import StreamState, StreamWorkerError, stream_worker
stream_state = StreamState(self.hass, self.outputs)
wait_timeout = 0
while not self._thread_quit.wait(timeout=wait_timeout):
start_time = time.time()
self.hass.add_job(self._async_update_state, True)
try:
stream_worker(
self.source,
self.options,
stream_state,
self._keyframe_converter,
self._thread_quit,
)
except StreamWorkerError as err:
self._logger.error("Error from stream worker: %s", str(err))
self._available = False
stream_state.discontinuity()
if not self.keepalive or self._thread_quit.is_set():
if self._fast_restart_once:
# The stream source is updated, restart without any delay.
self._fast_restart_once = False
self._thread_quit.clear()
continue
break
self.hass.add_job(self._async_update_state, False)
# To avoid excessive restarts, wait before restarting
# As the required recovery time may be different for different setups, start
# with trying a short wait_timeout and increase it on each reconnection attempt.
# Reset the wait_timeout after the worker has been up for several minutes
if time.time() - start_time > STREAM_RESTART_RESET_TIME:
wait_timeout = 0
wait_timeout += STREAM_RESTART_INCREMENT
self._logger.debug(
"Restarting stream worker in %d seconds: %s",
wait_timeout,
self.source,
)
self._worker_finished()
def _worker_finished(self) -> None:
"""Schedule cleanup of all outputs."""
@callback
def remove_outputs() -> None:
for provider in self.outputs().values():
self.remove_provider(provider)
self.hass.loop.call_soon_threadsafe(remove_outputs)
def stop(self) -> None:
"""Remove outputs and access token."""
self._outputs = {}
self.access_token = None
if not self.keepalive:
self._stop()
def _stop(self) -> None:
"""Stop worker thread."""
if self._thread is not None:
self._thread_quit.set()
self._thread.join()
self._thread = None
self._logger.info(
"Stopped stream: %s", redact_credentials(str(self.source))
)
async def async_record(
self, video_path: str, duration: int = 30, lookback: int = 5
) -> None:
"""Make a .mp4 recording from a provided stream."""
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .recorder import RecorderOutput
# Check for file access
if not self.hass.config.is_allowed_path(video_path):
raise HomeAssistantError(f"Can't write {video_path}, no access to path!")
# Add recorder
if recorder := self.outputs().get(RECORDER_PROVIDER):
assert isinstance(recorder, RecorderOutput)
raise HomeAssistantError(
f"Stream already recording to {recorder.video_path}!"
)
recorder = cast(
RecorderOutput, self.add_provider(RECORDER_PROVIDER, timeout=duration)
)
recorder.video_path = video_path
self.start()
self._logger.debug("Started a stream recording of %s seconds", duration)
# Take advantage of lookback
hls: HlsStreamOutput = cast(HlsStreamOutput, self.outputs().get(HLS_PROVIDER))
if lookback > 0 and hls:
num_segments = min(int(lookback // hls.target_duration), MAX_SEGMENTS)
# Wait for latest segment, then add the lookback
await hls.recv()
recorder.prepend(list(hls.get_segments())[-num_segments:])
async def async_get_image(
self,
width: int | None = None,
height: int | None = None,
) -> bytes | None:
"""
Fetch an image from the Stream and return it as a jpeg in bytes.
Calls async_get_image from KeyFrameConverter. async_get_image should only be
called directly from the main loop and not from an executor thread as it uses
hass.add_executor_job underneath the hood.
"""
return await self._keyframe_converter.async_get_image(
width=width, height=height
)
|
the-stack_0_11296 | from __future__ import print_function
import array
import os
import shutil
import tempfile
import uuid
from collections import defaultdict, namedtuple
from mozlog import structuredlog
from . import manifestupdate
from . import testloader
from . import wptmanifest
from . import wpttest
from .expected import expected_path
from .vcs import git
manifest = None # Module that will be imported relative to test_root
manifestitem = None
logger = structuredlog.StructuredLogger("web-platform-tests")
try:
import ujson as json
except ImportError:
import json
def update_expected(test_paths, serve_root, log_file_names,
rev_old=None, rev_new="HEAD", ignore_existing=False,
sync_root=None, property_order=None, boolean_properties=None,
stability=None):
"""Update the metadata files for web-platform-tests based on
the results obtained in a previous run or runs
If stability is not None, assume log_file_names refers to logs from repeated
test jobs, disable tests that don't behave as expected on all runs"""
do_delayed_imports(serve_root)
id_test_map = load_test_data(test_paths)
for metadata_path, updated_ini in update_from_logs(id_test_map,
*log_file_names,
ignore_existing=ignore_existing,
property_order=property_order,
boolean_properties=boolean_properties,
stability=stability):
write_new_expected(metadata_path, updated_ini)
if stability:
for test in updated_ini.iterchildren():
for subtest in test.iterchildren():
if subtest.new_disabled:
print("disabled: %s" % os.path.dirname(subtest.root.test_path) + "/" + subtest.name)
if test.new_disabled:
print("disabled: %s" % test.root.test_path)
def do_delayed_imports(serve_root=None):
global manifest, manifestitem
from manifest import manifest, item as manifestitem
def files_in_repo(repo_root):
return git("ls-tree", "-r", "--name-only", "HEAD").split("\n")
def rev_range(rev_old, rev_new, symmetric=False):
joiner = ".." if not symmetric else "..."
return "".join([rev_old, joiner, rev_new])
def paths_changed(rev_old, rev_new, repo):
data = git("diff", "--name-status", rev_range(rev_old, rev_new), repo=repo)
lines = [tuple(item.strip() for item in line.strip().split("\t", 1))
for line in data.split("\n") if line.strip()]
output = set(lines)
return output
def load_change_data(rev_old, rev_new, repo):
changes = paths_changed(rev_old, rev_new, repo)
rv = {}
status_keys = {"M": "modified",
"A": "new",
"D": "deleted"}
# TODO: deal with renames
for item in changes:
rv[item[1]] = status_keys[item[0]]
return rv
def unexpected_changes(manifests, change_data, files_changed):
files_changed = set(files_changed)
root_manifest = None
for manifest, paths in manifests.iteritems():
if paths["url_base"] == "/":
root_manifest = manifest
break
else:
return []
return [fn for _, fn, _ in root_manifest if fn in files_changed and change_data.get(fn) != "M"]
# For each testrun
# Load all files and scan for the suite_start entry
# Build a hash of filename: properties
# For each different set of properties, gather all chunks
# For each chunk in the set of chunks, go through all tests
# for each test, make a map of {conditionals: [(platform, new_value)]}
# Repeat for each platform
# For each test in the list of tests:
# for each conditional:
# If all the new values match (or there aren't any) retain that conditional
# If any new values mismatch:
# If stability and any repeated values don't match, disable the test
# else mark the test as needing human attention
# Check if all the RHS values are the same; if so collapse the conditionals
class InternedData(object):
"""Class for interning data of any (hashable) type.
This class is intended for building a mapping of int <=> value, such
that the integer may be stored as a proxy for the real value, and then
the real value obtained later from the proxy value.
In order to support the use case of packing the integer value as binary,
it is possible to specify a maximum bitsize of the data; adding more items
than this allowed will result in a ValueError exception.
The zero value is reserved to use as a sentinal."""
type_conv = None
rev_type_conv = None
def __init__(self, max_bits=8):
self.max_idx = 2**max_bits - 2
# Reserve 0 as a sentinal
self._data = [None], {}
def store(self, obj):
if self.type_conv is not None:
obj = self.type_conv(obj)
objs, obj_to_idx = self._data
if obj not in obj_to_idx:
value = len(objs)
objs.append(obj)
obj_to_idx[obj] = value
if value > self.max_idx:
raise ValueError
else:
value = obj_to_idx[obj]
return value
def get(self, idx):
obj = self._data[0][idx]
if self.rev_type_conv is not None:
obj = self.rev_type_conv(obj)
return obj
class RunInfoInterned(InternedData):
def type_conv(self, value):
return tuple(value.items())
def rev_type_conv(self, value):
return dict(value)
prop_intern = InternedData(4)
run_info_intern = RunInfoInterned()
status_intern = InternedData(4)
def load_test_data(test_paths):
manifest_loader = testloader.ManifestLoader(test_paths, False)
manifests = manifest_loader.load()
id_test_map = {}
for test_manifest, paths in manifests.iteritems():
id_test_map.update(create_test_tree(paths["metadata_path"],
test_manifest))
return id_test_map
def update_from_logs(id_test_map, *log_filenames, **kwargs):
ignore_existing = kwargs.get("ignore_existing", False)
property_order = kwargs.get("property_order")
boolean_properties = kwargs.get("boolean_properties")
stability = kwargs.get("stability")
updater = ExpectedUpdater(id_test_map,
ignore_existing=ignore_existing)
for i, log_filename in enumerate(log_filenames):
print("Processing log %d/%d" % (i + 1, len(log_filenames)))
with open(log_filename) as f:
updater.update_from_log(f)
for item in update_results(id_test_map, property_order, boolean_properties, stability):
yield item
def update_results(id_test_map, property_order, boolean_properties, stability):
test_file_items = set(id_test_map.itervalues())
default_expected_by_type = {}
for test_type, test_cls in wpttest.manifest_test_cls.iteritems():
if test_cls.result_cls:
default_expected_by_type[(test_type, False)] = test_cls.result_cls.default_expected
if test_cls.subtest_result_cls:
default_expected_by_type[(test_type, True)] = test_cls.subtest_result_cls.default_expected
for test_file in test_file_items:
updated_expected = test_file.update(property_order, boolean_properties, stability,
default_expected_by_type)
if updated_expected is not None and updated_expected.modified:
yield test_file.metadata_path, updated_expected
def directory_manifests(metadata_path):
rv = []
for dirpath, dirname, filenames in os.walk(metadata_path):
if "__dir__.ini" in filenames:
rel_path = os.path.relpath(dirpath, metadata_path)
rv.append(os.path.join(rel_path, "__dir__.ini"))
return rv
def write_changes(metadata_path, expected):
# First write the new manifest files to a temporary directory
temp_path = tempfile.mkdtemp(dir=os.path.split(metadata_path)[0])
write_new_expected(temp_path, expected)
# Copy all files in the root to the temporary location since
# these cannot be ini files
keep_files = [item for item in os.listdir(metadata_path) if
not os.path.isdir(os.path.join(metadata_path, item))]
for item in keep_files:
dest_dir = os.path.dirname(os.path.join(temp_path, item))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copyfile(os.path.join(metadata_path, item),
os.path.join(temp_path, item))
# Then move the old manifest files to a new location
temp_path_2 = metadata_path + str(uuid.uuid4())
os.rename(metadata_path, temp_path_2)
# Move the new files to the destination location and remove the old files
os.rename(temp_path, metadata_path)
shutil.rmtree(temp_path_2)
def write_new_expected(metadata_path, expected):
# Serialize the data back to a file
path = expected_path(metadata_path, expected.test_path)
if not expected.is_empty:
manifest_str = wptmanifest.serialize(expected.node, skip_empty_data=True)
assert manifest_str != ""
dir = os.path.split(path)[0]
if not os.path.exists(dir):
os.makedirs(dir)
tmp_path = path + ".tmp"
try:
with open(tmp_path, "wb") as f:
f.write(manifest_str)
os.rename(tmp_path, path)
except (Exception, KeyboardInterrupt):
try:
os.unlink(tmp_path)
except OSError:
pass
else:
try:
os.unlink(path)
except OSError:
pass
class ExpectedUpdater(object):
def __init__(self, id_test_map, ignore_existing=False):
self.id_test_map = id_test_map
self.ignore_existing = ignore_existing
self.run_info = None
self.action_map = {"suite_start": self.suite_start,
"test_start": self.test_start,
"test_status": self.test_status,
"test_end": self.test_end,
"assertion_count": self.assertion_count,
"lsan_leak": self.lsan_leak,
"mozleak_object": self.mozleak_object,
"mozleak_total": self.mozleak_total}
self.tests_visited = {}
def update_from_log(self, log_file):
self.run_info = None
try:
data = json.load(log_file)
except Exception:
pass
else:
if "action" not in data and "results" in data:
self.update_from_wptreport_log(data)
return
log_file.seek(0)
self.update_from_raw_log(log_file)
def update_from_raw_log(self, log_file):
action_map = self.action_map
for line in log_file:
try:
data = json.loads(line)
except ValueError:
# Just skip lines that aren't json
continue
action = data["action"]
if action in action_map:
action_map[action](data)
def update_from_wptreport_log(self, data):
action_map = self.action_map
action_map["suite_start"]({"run_info": data["run_info"]})
for test in data["results"]:
action_map["test_start"]({"test": test["test"]})
for subtest in test["subtests"]:
action_map["test_status"]({"test": test["test"],
"subtest": subtest["name"],
"status": subtest["status"],
"expected": subtest.get("expected")})
action_map["test_end"]({"test": test["test"],
"status": test["status"],
"expected": test.get("expected")})
if "asserts" in test:
asserts = test["asserts"]
action_map["assertion_count"]({"test": test["test"],
"count": asserts["count"],
"min_expected": asserts["min"],
"max_expected": asserts["max"]})
for item in data.get("lsan_leaks", []):
action_map["lsan_leak"](item)
mozleak_data = data.get("mozleak", {})
for scope, scope_data in mozleak_data.iteritems():
for key, action in [("objects", "mozleak_object"),
("total", "mozleak_total")]:
for item in scope_data.get(key, []):
item_data = {"scope": scope}
item_data.update(item)
action_map[action](item_data)
def suite_start(self, data):
self.run_info = run_info_intern.store(data["run_info"])
def test_start(self, data):
test_id = intern(data["test"].encode("utf8"))
try:
test_data = self.id_test_map[test_id]
except KeyError:
print("Test not found %s, skipping" % test_id)
return
if self.ignore_existing:
test_data.set_requires_update()
test_data.clear.add("expected")
self.tests_visited[test_id] = set()
def test_status(self, data):
test_id = intern(data["test"].encode("utf8"))
subtest = intern(data["subtest"].encode("utf8"))
test_data = self.id_test_map.get(test_id)
if test_data is None:
return
self.tests_visited[test_id].add(subtest)
result = status_intern.store(data["status"])
test_data.set(test_id, subtest, "status", self.run_info, result)
if data.get("expected") and data["expected"] != data["status"]:
test_data.set_requires_update()
def test_end(self, data):
if data["status"] == "SKIP":
return
test_id = intern(data["test"].encode("utf8"))
test_data = self.id_test_map.get(test_id)
if test_data is None:
return
result = status_intern.store(data["status"])
test_data.set(test_id, None, "status", self.run_info, result)
if data.get("expected") and data["status"] != data["expected"]:
test_data.set_requires_update()
del self.tests_visited[test_id]
def assertion_count(self, data):
test_id = intern(data["test"].encode("utf8"))
test_data = self.id_test_map.get(test_id)
if test_data is None:
return
test_data.set(test_id, None, "asserts", self.run_info, data["count"])
if data["count"] < data["min_expected"] or data["count"] > data["max_expected"]:
test_data.set_requires_update()
def test_for_scope(self, data):
dir_path = data.get("scope", "/")
dir_id = intern(os.path.join(dir_path, "__dir__").replace(os.path.sep, "/").encode("utf8"))
if dir_id.startswith("/"):
dir_id = dir_id[1:]
return dir_id, self.id_test_map[dir_id]
def lsan_leak(self, data):
dir_id, test_data = self.test_for_scope(data)
test_data.set(dir_id, None, "lsan",
self.run_info, (data["frames"], data.get("allowed_match")))
if not data.get("allowed_match"):
test_data.set_requires_update()
def mozleak_object(self, data):
dir_id, test_data = self.test_for_scope(data)
test_data.set(dir_id, None, "leak-object",
self.run_info, ("%s:%s", (data["process"], data["name"]),
data.get("allowed")))
if not data.get("allowed"):
test_data.set_requires_update()
def mozleak_total(self, data):
if data["bytes"]:
dir_id, test_data = self.test_for_scope(data)
test_data.set(dir_id, None, "leak-threshold",
self.run_info, (data["process"], data["bytes"], data["threshold"]))
if data["bytes"] > data["threshold"] or data["bytes"] < 0:
test_data.set_requires_update()
def create_test_tree(metadata_path, test_manifest):
"""Create a map of test_id to TestFileData for that test.
"""
do_delayed_imports()
id_test_map = {}
exclude_types = frozenset(["stub", "helper", "manual", "support", "conformancechecker", "reftest_base"])
all_types = manifestitem.item_types.keys()
include_types = set(all_types) - exclude_types
for item_type, test_path, tests in test_manifest.itertypes(*include_types):
test_file_data = TestFileData(intern(test_manifest.url_base.encode("utf8")),
intern(item_type.encode("utf8")),
metadata_path,
test_path,
tests)
for test in tests:
id_test_map[intern(test.id.encode("utf8"))] = test_file_data
dir_path = os.path.split(test_path)[0].replace(os.path.sep, "/")
while True:
if dir_path:
dir_id = dir_path + "/__dir__"
else:
dir_id = "__dir__"
dir_id = intern((test_manifest.url_base + dir_id).lstrip("/").encode("utf8"))
if dir_id not in id_test_map:
test_file_data = TestFileData(intern(test_manifest.url_base.encode("utf8")),
None,
metadata_path,
dir_id,
[])
id_test_map[dir_id] = test_file_data
if not dir_path or dir_path in id_test_map:
break
dir_path = dir_path.rsplit("/", 1)[0] if "/" in dir_path else ""
return id_test_map
class PackedResultList(object):
"""Class for storing test results.
Results are stored as an array of 2-byte integers for compactness.
The first 4 bits represent the property name, the second 4 bits
represent the test status (if it's a result with a status code), and
the final 8 bits represent the run_info. If the result doesn't have a
simple status code but instead a richer type, we place that richer type
in a dictionary and set the status part of the result type to 0.
This class depends on the global prop_intern, run_info_intern and
status_intern InteredData objects to convert between the bit values
and corresponding Python objects."""
def __init__(self):
self.data = array.array("H")
__slots__ = ("data", "raw_data")
def append(self, prop, run_info, value):
out_val = (prop << 12) + run_info
if prop == prop_intern.store("status"):
out_val += value << 8
else:
if not hasattr(self, "raw_data"):
self.raw_data = {}
self.raw_data[len(self.data)] = value
self.data.append(out_val)
def unpack(self, idx, packed):
prop = prop_intern.get((packed & 0xF000) >> 12)
value_idx = (packed & 0x0F00) >> 8
if value_idx == 0:
value = self.raw_data[idx]
else:
value = status_intern.get(value_idx)
run_info = run_info_intern.get(packed & 0x00FF)
return prop, run_info, value
def __iter__(self):
for i, item in enumerate(self.data):
yield self.unpack(i, item)
class TestFileData(object):
__slots__ = ("url_base", "item_type", "test_path", "metadata_path", "tests",
"_requires_update", "clear", "data")
def __init__(self, url_base, item_type, metadata_path, test_path, tests):
self.url_base = url_base
self.item_type = item_type
self.test_path = test_path
self.metadata_path = metadata_path
self.tests = {intern(item.id.encode("utf8")) for item in tests}
self._requires_update = False
self.clear = set()
self.data = defaultdict(lambda: defaultdict(PackedResultList))
def set_requires_update(self):
self._requires_update = True
def set(self, test_id, subtest_id, prop, run_info, value):
self.data[test_id][subtest_id].append(prop_intern.store(prop),
run_info,
value)
def expected(self, property_order, boolean_properties):
expected_data = load_expected(self.url_base,
self.metadata_path,
self.test_path,
self.tests,
property_order,
boolean_properties)
if expected_data is None:
expected_data = create_expected(self.url_base,
self.test_path,
property_order,
boolean_properties)
return expected_data
def update(self, property_order, boolean_properties, stability,
default_expected_by_type):
if not self._requires_update:
return
expected = self.expected(property_order, boolean_properties)
expected_by_test = {}
for test_id in self.tests:
if not expected.has_test(test_id):
expected.append(manifestupdate.TestNode.create(test_id))
test_expected = expected.get_test(test_id)
expected_by_test[test_id] = test_expected
for prop in self.clear:
test_expected.clear(prop)
for test_id, test_data in self.data.iteritems():
for subtest_id, results_list in test_data.iteritems():
for prop, run_info, value in results_list:
# Special case directory metadata
if subtest_id is None and test_id.endswith("__dir__"):
if prop == "lsan":
expected.set_lsan(run_info, value)
elif prop == "leak-object":
expected.set_leak_object(run_info, value)
elif prop == "leak-threshold":
expected.set_leak_threshold(run_info, value)
continue
if prop == "status":
value = Result(value, default_expected_by_type[self.item_type,
subtest_id is not None])
test_expected = expected_by_test[test_id]
if subtest_id is None:
item_expected = test_expected
else:
item_expected = test_expected.get_subtest(subtest_id)
if prop == "status":
item_expected.set_result(run_info, value)
elif prop == "asserts":
item_expected.set_asserts(run_info, value)
expected.coalesce_properties(stability=stability)
for test in expected.iterchildren():
for subtest in test.iterchildren():
subtest.coalesce_properties(stability=stability)
test.coalesce_properties(stability=stability)
return expected
Result = namedtuple("Result", ["status", "default_expected"])
def create_expected(url_base, test_path, property_order=None,
boolean_properties=None):
expected = manifestupdate.ExpectedManifest(None,
test_path,
url_base,
property_order=property_order,
boolean_properties=boolean_properties)
return expected
def load_expected(url_base, metadata_path, test_path, tests, property_order=None,
boolean_properties=None):
expected_manifest = manifestupdate.get_manifest(metadata_path,
test_path,
url_base,
property_order=property_order,
boolean_properties=boolean_properties)
if expected_manifest is None:
return
# Remove expected data for tests that no longer exist
for test in expected_manifest.iterchildren():
if test.id not in tests:
test.remove()
return expected_manifest
|
the-stack_0_11297 | # NEW COLORS 108.04.24
# output=gray colors
import numpy as np
import pygame
import time
# Define some colors
COLORS = 3 # 測試次數上限
# 模擬器上顏色設定
BLACK = np.array((0, 0, 0))
WHITE = np.array((255, 255, 255))
BLUE = np.array((60, 150, 255))
PURPLE = np.array((153, 47, 185))
RED_PROBE = np.array((230, 90, 80))
YELLOW = np.array((235, 226, 80))
# 輸出圖顏色設定
BACKGROUND_COLORS = 255 # 背景
BUFFER_COLORS = 170 # 緩衝區
PROBE_COLORS = 220 # 探針
# 其他測試次數狀態
OTHER_COLORS = 129
NUM_COLORS = [] # ex: 測試上限3次 [129, 86, 43]
for num in range(COLORS):
NUM_COLORS.append(int(OTHER_COLORS * (1 - num / COLORS)))
# This sets the WIDTH and HEIGHT of each grid location
WIDTH = 1 # 實際環境圖,一像素代表一晶粒
HEIGHT = 1 # 實際環境圖
WIDTH_sc = 20 # 模擬器顯示畫面
HEIGHT_sc = 20 # 模擬器顯示畫面
# This sets the margin between each cell
MARGIN = 0 # 實際環境圖
MARGIN_sc = 2 # 模擬器顯示畫面
# Probe's location when the environment initialize
Initial = [(2, 2), (14, 14), (2, 14), (14, 2), (11, 5), (5, 11), (11, 11), (5, 5), (8, 8)]
PACE = 1 # 移動步伐
class wafer_check():
def __init__(self,wafer,probe,mode=0,training_time=60,training_steps=0):
self._envs = np.array(wafer) # 晶圓由 -1, 0表示(-1代表緩衝區, 0代表待測試晶粒)
self._envs_nan = np.zeros(self._envs.shape) # 晶圓由 nan, 0 表示(nan代表緩衝區, 0代表待測試晶粒)
self._probe = np.array(probe, np.int) # 探針卡由 0,1表示
self.envsY, self.envsX = self._envs.shape # 晶圓長寬
self.wafer_len = self.envsY * self.envsX # 晶粒總數
self.probY, self.probX = self._probe.shape # 探針長寬
self.probZ = max(self.probY, self.probX) # 探針最長邊
self.envs_list = [(b,a) for b in range(self.envsY) for a in range(self.envsX) if self._envs[b,a] == -1] # 緩衝區位置
self.envs_len = len(self.envs_list) # 緩衝區數量
self.probe_list = [(b,a) for b in range(self.probY) for a in range(self.probX) if self._probe[b,a] == 1] # 探針形狀
self.probe_len = len(self.probe_list) # 探針數量
self.size = [(self.envsX*WIDTH+(self.envsX+1)*MARGIN),
(self.envsY*HEIGHT+(self.envsY+1)*MARGIN)] # 實際環境圖尺寸
self.size_sc = [(self.envsX*WIDTH_sc+(self.envsX+1)*MARGIN_sc),
(self.envsY*HEIGHT_sc+(self.envsY+1)*MARGIN_sc)] # 模擬器顯示畫面尺寸
self._output = np.full((self.size[1],self.size[0]), BACKGROUND_COLORS, np.int) # 初始化輸出圖
self.location = np.array(Initial) # 初始位置
self.action_space = ['None','Down','Right','Up','Left','Down-Right','Up-Right','Up-Left','Down-Left']
self.action_space_num = int((len(self.action_space) - 1) * PACE) # 行為總數(為8個方向 * 移動步伐)
self.available = np.zeros(self.action_space_num, dtype=np.float32) # 表示可移動行為之向量
self.num_max = COLORS
self.reward_value = 0 # 獎勵
self.envs_mean = None # 所有晶粒被測試過次數平均
self.envs_std = None # 所有晶粒被測試過次數標準差
self.mode = mode # 是否顯示模擬畫面(是 = 1 ,否= 0)
# 限制一回合最長可訓練時間(若設小於0則訓練時間為無限制)
if training_time > 0:
self.training_time = training_time
else:
self.training_time = np.inf
# 限制一回合最多可移動步數(若設小於0則移動步數為無限制)
if training_steps > 0:
self.training_steps = training_steps
else:
self.training_steps = np.inf
# 是否顯示模擬畫面(是 = 1 ,否= 0)
if self.mode == 1:
self.sc = pygame.display.set_mode(self.size_sc)
# 初始化輸出圖
self.reset_observation()
# 初始化環境
self.reset()
# 計算方形尺寸
@staticmethod
def rect(column, row):
rect = [(MARGIN_sc + WIDTH_sc) * column + MARGIN_sc,
(MARGIN_sc + HEIGHT_sc) * row + MARGIN_sc,
WIDTH_sc,
HEIGHT_sc]
return rect
# 於圖output上填顏色
@staticmethod
def draw_plt(output, y, x, color): # X : column, Y : row
for h in range(HEIGHT):
for w in range(WIDTH):
output_h = y * HEIGHT + h
output_w = x * WIDTH + w
output[output_h][output_w] = color
def reset(self):
#reset the environment
self.y, self.x = self.location[np.random.randint(len(self.location))] # 隨機取一個初始位置為y, x
self.y_last, self.x_last = self.y, self.x
self.steps = 0 # 移動步署
self.dist = 0 # 移動距離
self.num_color = np.zeros(self.num_max+2, np.int) # 表示各個晶粒狀態的個數[未測試過, 已測試1次, 已測試2次, 已測試3次以上, 緩衝區]
self.action = 'None'
self.reward_value = 0
self.envs = np.copy(self._envs_nan) # 重新拷貝初始晶圓狀態
self.output = np.copy(self._output) # 重新拷貝初始輸出圖
if self.mode == 1: # 若有模擬畫面,畫面也須初始化
self.reset_envs()
# 將初始探針位置的晶圓狀態改為測試一次
for b in range(self.probY):
for a in range(self.probX):
if self._probe[b][a] == 1 and not np.isnan(self.envs[self.y+b][self.x+a]):
self.envs[self.y+b][self.x+a] = 1
self.num_color_last = np.zeros(self.num_max+2, np.int) # 表示前一次移動之各個晶粒狀態的個數
self.num_color_last[-1] = self.envs_len # 緩衝區個數
self.num_color_last[0] = (self._envs == 0).sum() # 未測試過數
self.time_end = time.time() + self.training_time # 有時間限制,最終訓練時刻
self.step()
return self.output, self.available
def step(self, action=None):
#Agent's action
now = time.time()
if action != None:
act = ((action) % 8) # 動作選擇(0~7)
pace = int((action) / 8) + 1 # 動作移動步伐
self.done = 0 # 測試終止為1
self.envs_mean = None
self.envs_std = None
self.time_is_end = 0 # 時間限制,測試終止
self.steps_is_end = 0 # 總步數限制,測試終止
self.episode_is_end = 0 # 所有晶粒皆已測試完成,測試終止
self.reward_value = 0
if now < self.time_end and self.steps < self.training_steps:
y = self.y
x = self.x
y_diff = self.envsY-self.probY # 探針座標於 y 方向最低位置
x_diff = self.envsX-self.probX # 探針座標於 x 方向最低位置
print(y_diff, x_diff)
probe_list = self.probe_list
invalid = 0
self.steps += 1 # 移動步數累計加1
# move the probe
if action == None: # 若為None則移動步數修正,減1
invalid = -1
self.steps -= 1
self.action = 'None'
elif pace > self.probZ: # 若步伐大於探針尺寸,視為無效行動
invalid = -1
self.steps -= 1
self.action = 'None'
elif act == 0:
if (y+pace-1) < y_diff:
y += pace
invalid = 0
self.action = 'Down'
else:
invalid = 1
elif act == 1:
if (x+pace-1) < x_diff:
x += pace
invalid = 0
self.action = 'Right'
else:
invalid = 1
elif act == 2:
if (y-pace+1) > 0:
y -= pace
invalid = 0
self.action = 'Up'
else:
invalid = 1
elif act == 3:
if (x - pace+1) > 0:
x -= pace
invalid = 0
self.action = 'Left'
else:
invalid = 1
elif act == 4:
if (y+pace-1) < y_diff and (x+pace-1) < x_diff:
y += pace
x += pace
invalid = 0
self.action = 'Down-Right'
else:
invalid = 1
elif act == 5:
if (y-pace+1) > 0 and (x+pace-1) < x_diff:
y-=pace
x+=pace
invalid = 0
self.action = 'Up-Right'
else:
invalid = 1
elif act == 6:
if (y-pace+1) > 0 and (x-pace+1) > 0:
y-=pace
x-=pace
invalid = 0
self.action = 'Up-Left'
else:
invalid = 1
elif act == 7:
if (y+pace-1) < y_diff and (x-pace+1) > 0:
y+=pace
x-=pace
invalid = 0
self.action = 'Down-Left'
else:
invalid = 1
else:
invalid = -1
self.action = 'None'
# 無效動作
if invalid == 1:
self.action = 'Invalid'
# 有效動作
elif invalid == 0:
# 更新探針座標位置
self.y = y
self.x = x
# 探針位置的晶圓測試狀態累加一次
for c in range(len(probe_list)):
self.envs[y+probe_list[c][0]][x+probe_list[c][1]] += 1
elif now >= self.time_end:
self.time_is_end = 1
if self.steps >= self.training_steps:
self.steps_is_end = 1
self.check() # 統計晶粒狀態並計算獎勵
self.observation()
self.action_available()
if self.mode == 1:
self.build_envs()
time.sleep(0.01)
self.y_last = self.y
self.x_last = self.x
if self.steps_is_end == 1:
self.steps = 0
if self.time_is_end == 1:
self.steps = 0
self.time_end = time.time() + self.training_time
return self.output, self.reward_value, self.done, self.available, self.envs_mean, self.envs_std
def check(self):
# 表示各個晶粒狀態的個數num_color[5] = [未測試過, 已測試1次, 已測試2次, 已測試3次以上, 緩衝區]
self.num_color[-1] = self.envs_len # 緩衝區數
for n in range(0, self.num_max):
self.num_color[n] = (self.envs == n).sum()
self.num_color[-2] = self.wafer_len - sum(self.num_color) + self.num_color[-2] # 已測試num_max次以上
self.dist = np.sqrt(np.square(self.y - self.y_last)+np.square(self.x - self.x_last)) # 計算探針移動距離
#calculate the reward
if self.action != "None":
#1st reward
if self.num_color_last[0] - self.num_color[0] > 0:
self.reward_value+=((self.num_color_last[0] - self.num_color[0])*0.01)
if self.num_color_last[0] - self.num_color[0] == self.probe_len:
self.reward_value+=((self.num_color_last[0] - self.num_color[0])*0.01)
#2nd reward
for num in range(2,self.num_max+1):
if self.num_color[num] - self.num_color_last[num] > 0:
self.reward_value-=(((self.num_color[num] - self.num_color_last[num])*num)*0.003)
#3rd reward
if np.array_equal(self.num_color,self.num_color_last):
self.reward_value-=0.1
#4th reward
self.reward_value-=self.dist*0.01
# 若測試終止
if self.num_color[0] == 0 or self.time_is_end == 1 or self.steps_is_end == 1:
self.envs_mean = np.nanmean(self.envs) # 計算平均
self.envs_std = np.nanstd(self.envs) # 計算標準差
#Stop the screen when the episode is end.
if self.mode == 1:
self.build_envs() # 初始化模擬畫面
time.sleep(0.1)
#Initialize the environment
self.action = 'None'
self.done = 1 # 代表測試終止
self.y, self.x = self.location[np.random.randint(len(self.location))]
self.y_last, self.x_last = self.y, self.x
self.dist = 0
self.num_color = np.zeros(self.num_max+2,np.int)
self.envs = np.copy(self._envs_nan)
self.output = np.copy(self._output)
if self.mode == 1:
self.reset_envs()
# 將初始探針位置的晶圓狀態改為測試一次
for b in range(self.probY):
for a in range(self.probX):
if self._probe[b][a] == 1 and not np.isnan(self.envs[self.y + b][self.x + a]):
self.envs[self.y + b][self.x + a] = 1
self.envs_show = np.copy(self.envs)
self.num_color[-1] = self.envs_len
self.num_color[0] = (self.envs == 0).sum()
self.num_color[1] = (self.envs == 1).sum()
if self.time_is_end != 1 and self.steps_is_end != 1:
# 代表成功完成所有晶粒測試
self.episode_is_end = 1
self.steps = 0
#5th reward
self.reward_value += 1
self.num_color_last = np.copy(self.num_color)
def observation(self):
# 更新輸出圖
probe_list = self.probe_list
probe_len = self.probe_len
# 畫探針走過位置的晶粒狀態
for c in range(probe_len):
for num in range(1, self.num_max+1):
if self.envs[self.y_last+probe_list[c][0]][self.x_last+probe_list[c][1]] == num: # 測試過1~3次
color = NUM_COLORS[num-1]
if self.envs[self.y_last+probe_list[c][0]][self.x_last+probe_list[c][1]] > self.num_max: # 測試過3次以上
color = NUM_COLORS[self.num_max-1]
if np.isnan(self.envs[self.y_last+probe_list[c][0]][self.x_last+probe_list[c][1]]): # 緩衝區
color = BUFFER_COLORS
wafer_check.draw_plt(self.output, self.y_last + self.probe_list[c][0], self.x_last + self.probe_list[c][1], color)
# 畫探針當下位置
for c in range(probe_len):
color = PROBE_COLORS
wafer_check.draw_plt(self.output, self.y + self.probe_list[c][0], self.x + self.probe_list[c][1], color)
def build_envs(self):
# 更新模擬器顯示畫面
# 畫探針走過位置的晶粒狀態
for c in range(self.probe_len):
if self.envs[self.y_last + self.probe_list[c][0]][self.x_last + self.probe_list[c][1]] >= 1: # 走過一次以上
color = (WHITE / self.num_max).astype(np.int)
elif np.isnan(self.envs[self.y_last+self.probe_list[c][0]][self.x_last+self.probe_list[c][1]]): # 緩衝區
color = YELLOW
pygame.draw.rect(self.sc,
color,
wafer_check.rect((self.x_last + self.probe_list[c][1]),
(self.y_last + self.probe_list[c][0])))
# 畫探針當下位置
for c in range(self.probe_len):
color = RED_PROBE
if self.action == 'Invalid': # 若為無效動作,呈現紫色
color = PURPLE
pygame.draw.rect(self.sc,
color,
wafer_check.rect((self.x + self.probe_list[c][1]),
(self.y + self.probe_list[c][0])))
pygame.display.flip()
def reset_observation(self):
# 初始化輸出圖,繪製晶圓狀態
color = BUFFER_COLORS
for row in range(self.envsY):
for column in range(self.envsX):
if self._envs[row][column] == -1:
wafer_check.draw_plt(self._output, column, row, color)
self._envs_nan[row][column] = np.nan
def reset_envs(self):
# 初始化模擬器顯示畫面,繪製晶圓狀態
self.sc.fill(BLACK)
for row in range(self.envsY):
for column in range(self.envsX):
if self._envs[row][column] == -1:
pygame.draw.rect(self.sc, YELLOW, wafer_check.rect(row, column)) # 緩衝區
else:
pygame.draw.rect(self.sc, BLUE, wafer_check.rect(row, column)) # 未測試區
def action_available(self):
# evaluate actions that will go beyond the boundary & produce vector to filter
m = self.envsY
n = self.envsX
i = self.probY
j = self.probX
for k in range(self.action_space_num):
act = k % 8
step = k // 8 + 1
y = self.y
x = self.x
if act == 0:
if (y+step-1) < (m-i):
y+=step
else:
self.available[k] = np.inf
continue
elif act == 1:
if (x+step-1) < (n-j):
x+=step
else:
self.available[k] = np.inf
continue
elif act == 2:
if (y-step+1) > 0:
y-=step
else:
self.available[k] = np.inf
continue
elif act == 3:
if (x-step+1) > 0:
x-=step
else:
self.available[k] = np.inf
continue
elif act == 4:
if (y+step-1) < (m-i) and (x+step-1) < (n-j):
y+=step
x+=step
else:
self.available[k] = np.inf
continue
elif act == 5:
if (y-step+1) > 0 and (x+step-1) < (n-j):
y-=step
x+=step
else:
self.available[k] = np.inf
continue
elif act == 6:
if (y-step+1) > 0 and (x-step+1) > 0:
y-=step
x-=step
else:
self.available[k] = np.inf
continue
elif act == 7:
if (y+step-1) < (m-i) and (x-step+1) > 0:
y+=step
x-=step
else:
self.available[k] = np.inf
continue
self.available[k] = 0
if __name__ == '__main__':
import matplotlib.pyplot as plt
wafer = np.loadtxt('envs.txt')
probe = np.loadtxt('probe.txt')
envs = wafer_check(wafer, probe, mode=1, training_time=0, training_steps=1000)
pygame.init()
pygame.display.set_caption("Wafer Check Simulator")
# Loop until the user clicks the close button.
done = False
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_r: # 初始化環境
envs.reset()
if event.key == pygame.K_s:
envs.step(0)
if event.key == pygame.K_d:
envs.step(1)
if event.key == pygame.K_w:
envs.step(2)
if event.key == pygame.K_a:
envs.step(3)
if event.key == pygame.K_c:
envs.step(4)
if event.key == pygame.K_e:
envs.step(5)
if event.key == pygame.K_q:
envs.step(6)
if event.key == pygame.K_z:
envs.step(7)
if event.key == pygame.K_p: # 顯示輸出圖
plt.subplot(1, 2, 1), plt.title('rainbow')
plt.imshow(envs.output,cmap = 'rainbow')
plt.subplot(1, 2, 2), plt.title('gray')
plt.imshow(envs.output,cmap = 'gray')
plt.show()
pygame.quit()
|
the-stack_0_11300 | import _pickle as pickle
from keras.models import load_model
class BaseModel(object):
def __init__(self, model_size):
self.model_size = model_size
self.model = None
def save(self, filename):
if self.model is not None:
self.model.save(filename + '.model')
d = dict(self.__dict__)
d.pop('model')
f = open(filename, 'wb')
pickle.dump(d, f)
f.close()
@classmethod
def load(cls, filename):
model = load_model(filename + '.model')
f = open(filename, 'rb')
attrs = pickle.load(f)
f.close()
obj = cls(attrs['model_size'])
for key, value in attrs.items():
setattr(obj, key, value)
obj.model = model
return obj |
the-stack_0_11302 | from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
def configuration(parent_package='', top_path=None):
config = Configuration('delaunay', parent_package, top_path)
config.add_extension("_delaunay",
sources=["_delaunay.cpp", "VoronoiDiagramGenerator.cpp",
"delaunay_utils.cpp", "natneighbors.cpp"],
include_dirs=['.'],
)
return config
if __name__ == '__main__':
setup(**configuration(top_path='').todict())
|
the-stack_0_11303 | """Provides a sensor to track various status aspects of a UPS."""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_NAME, CONF_USERNAME, CONF_PASSWORD,
TEMP_CELSIUS, CONF_RESOURCES, CONF_ALIAS, ATTR_STATE, STATE_UNKNOWN,
POWER_WATT)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'NUT UPS'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 3493
KEY_STATUS = 'ups.status'
KEY_STATUS_DISPLAY = 'ups.status.display'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
SENSOR_TYPES = {
'ups.status.display': ['Status', '', 'mdi:information-outline'],
'ups.status': ['Status Data', '', 'mdi:information-outline'],
'ups.alarm': ['Alarms', '', 'mdi:alarm'],
'ups.time': ['Internal Time', '', 'mdi:calendar-clock'],
'ups.date': ['Internal Date', '', 'mdi:calendar'],
'ups.model': ['Model', '', 'mdi:information-outline'],
'ups.mfr': ['Manufacturer', '', 'mdi:information-outline'],
'ups.mfr.date': ['Manufacture Date', '', 'mdi:calendar'],
'ups.serial': ['Serial Number', '', 'mdi:information-outline'],
'ups.vendorid': ['Vendor ID', '', 'mdi:information-outline'],
'ups.productid': ['Product ID', '', 'mdi:information-outline'],
'ups.firmware': ['Firmware Version', '', 'mdi:information-outline'],
'ups.firmware.aux': ['Firmware Version 2', '', 'mdi:information-outline'],
'ups.temperature': ['UPS Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
'ups.load': ['Load', '%', 'mdi:gauge'],
'ups.load.high': ['Overload Setting', '%', 'mdi:gauge'],
'ups.id': ['System identifier', '', 'mdi:information-outline'],
'ups.delay.start': ['Load Restart Delay', 's', 'mdi:timer'],
'ups.delay.reboot': ['UPS Reboot Delay', 's', 'mdi:timer'],
'ups.delay.shutdown': ['UPS Shutdown Delay', 's', 'mdi:timer'],
'ups.timer.start': ['Load Start Timer', 's', 'mdi:timer'],
'ups.timer.reboot': ['Load Reboot Timer', 's', 'mdi:timer'],
'ups.timer.shutdown': ['Load Shutdown Timer', 's', 'mdi:timer'],
'ups.test.interval': ['Self-Test Interval', 's', 'mdi:timer'],
'ups.test.result': ['Self-Test Result', '', 'mdi:information-outline'],
'ups.test.date': ['Self-Test Date', '', 'mdi:calendar'],
'ups.display.language': ['Language', '', 'mdi:information-outline'],
'ups.contacts': ['External Contacts', '', 'mdi:information-outline'],
'ups.efficiency': ['Efficiency', '%', 'mdi:gauge'],
'ups.power': ['Current Apparent Power', 'VA', 'mdi:flash'],
'ups.power.nominal': ['Nominal Power', 'VA', 'mdi:flash'],
'ups.realpower': ['Current Real Power', POWER_WATT, 'mdi:flash'],
'ups.realpower.nominal': ['Nominal Real Power', POWER_WATT, 'mdi:flash'],
'ups.beeper.status': ['Beeper Status', '', 'mdi:information-outline'],
'ups.type': ['UPS Type', '', 'mdi:information-outline'],
'ups.watchdog.status': ['Watchdog Status', '', 'mdi:information-outline'],
'ups.start.auto': ['Start on AC', '', 'mdi:information-outline'],
'ups.start.battery': ['Start on Battery', '', 'mdi:information-outline'],
'ups.start.reboot': ['Reboot on Battery', '', 'mdi:information-outline'],
'ups.shutdown': ['Shutdown Ability', '', 'mdi:information-outline'],
'battery.charge': ['Battery Charge', '%', 'mdi:gauge'],
'battery.charge.low': ['Low Battery Setpoint', '%', 'mdi:gauge'],
'battery.charge.restart': ['Minimum Battery to Start', '%', 'mdi:gauge'],
'battery.charge.warning': ['Warning Battery Setpoint', '%', 'mdi:gauge'],
'battery.charger.status':
['Charging Status', '', 'mdi:information-outline'],
'battery.voltage': ['Battery Voltage', 'V', 'mdi:flash'],
'battery.voltage.nominal': ['Nominal Battery Voltage', 'V', 'mdi:flash'],
'battery.voltage.low': ['Low Battery Voltage', 'V', 'mdi:flash'],
'battery.voltage.high': ['High Battery Voltage', 'V', 'mdi:flash'],
'battery.capacity': ['Battery Capacity', 'Ah', 'mdi:flash'],
'battery.current': ['Battery Current', 'A', 'mdi:flash'],
'battery.current.total': ['Total Battery Current', 'A', 'mdi:flash'],
'battery.temperature':
['Battery Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
'battery.runtime': ['Battery Runtime', 's', 'mdi:timer'],
'battery.runtime.low': ['Low Battery Runtime', 's', 'mdi:timer'],
'battery.runtime.restart':
['Minimum Battery Runtime to Start', 's', 'mdi:timer'],
'battery.alarm.threshold':
['Battery Alarm Threshold', '', 'mdi:information-outline'],
'battery.date': ['Battery Date', '', 'mdi:calendar'],
'battery.mfr.date': ['Battery Manuf. Date', '', 'mdi:calendar'],
'battery.packs': ['Number of Batteries', '', 'mdi:information-outline'],
'battery.packs.bad':
['Number of Bad Batteries', '', 'mdi:information-outline'],
'battery.type': ['Battery Chemistry', '', 'mdi:information-outline'],
'input.sensitivity':
['Input Power Sensitivity', '', 'mdi:information-outline'],
'input.transfer.low': ['Low Voltage Transfer', 'V', 'mdi:flash'],
'input.transfer.high': ['High Voltage Transfer', 'V', 'mdi:flash'],
'input.transfer.reason':
['Voltage Transfer Reason', '', 'mdi:information-outline'],
'input.voltage': ['Input Voltage', 'V', 'mdi:flash'],
'input.voltage.nominal': ['Nominal Input Voltage', 'V', 'mdi:flash'],
'input.frequency': ['Input Line Frequency', 'hz', 'mdi:flash'],
'input.frequency.nominal':
['Nominal Input Line Frequency', 'hz', 'mdi:flash'],
'input.frequency.status':
['Input Frequency Status', '', 'mdi:information-outline'],
'output.current': ['Output Current', 'A', 'mdi:flash'],
'output.current.nominal':
['Nominal Output Current', 'A', 'mdi:flash'],
'output.voltage': ['Output Voltage', 'V', 'mdi:flash'],
'output.voltage.nominal':
['Nominal Output Voltage', 'V', 'mdi:flash'],
'output.frequency': ['Output Frequency', 'hz', 'mdi:flash'],
'output.frequency.nominal':
['Nominal Output Frequency', 'hz', 'mdi:flash'],
}
STATE_TYPES = {
'OL': 'Online',
'OB': 'On Battery',
'LB': 'Low Battery',
'HB': 'High Battery',
'RB': 'Battery Needs Replaced',
'CHRG': 'Battery Charging',
'DISCHRG': 'Battery Discharging',
'BYPASS': 'Bypass Active',
'CAL': 'Runtime Calibration',
'OFF': 'Offline',
'OVER': 'Overloaded',
'TRIM': 'Trimming Voltage',
'BOOST': 'Boosting Voltage',
'FSD': 'Forced Shutdown',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_ALIAS): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Required(CONF_RESOURCES):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NUT sensors."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
alias = config.get(CONF_ALIAS)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
data = PyNUTData(host, port, alias, username, password)
if data.status is None:
_LOGGER.error("NUT Sensor has no data, unable to set up")
raise PlatformNotReady
_LOGGER.debug('NUT Sensors Available: %s', data.status)
entities = []
for resource in config[CONF_RESOURCES]:
sensor_type = resource.lower()
# Display status is a special case that falls back to the status value
# of the UPS instead.
if sensor_type in data.status or (sensor_type == KEY_STATUS_DISPLAY
and KEY_STATUS in data.status):
entities.append(NUTSensor(name, data, sensor_type))
else:
_LOGGER.warning(
"Sensor type: %s does not appear in the NUT status "
"output, cannot add", sensor_type)
try:
data.update(no_throttle=True)
except data.pynuterror as err:
_LOGGER.error("Failure while testing NUT status retrieval. "
"Cannot continue setup: %s", err)
raise PlatformNotReady
add_entities(entities, True)
class NUTSensor(Entity):
"""Representation of a sensor entity for NUT status values."""
def __init__(self, name, data, sensor_type):
"""Initialize the sensor."""
self._data = data
self.type = sensor_type
self._name = "{} {}".format(name, SENSOR_TYPES[sensor_type][0])
self._unit = SENSOR_TYPES[sensor_type][1]
self._state = None
@property
def name(self):
"""Return the name of the UPS sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return entity state from ups."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit
@property
def device_state_attributes(self):
"""Return the sensor attributes."""
attr = dict()
attr[ATTR_STATE] = self.display_state()
return attr
def display_state(self):
"""Return UPS display state."""
if self._data.status is None:
return STATE_TYPES['OFF']
try:
return " ".join(
STATE_TYPES[state]
for state in self._data.status[KEY_STATUS].split())
except KeyError:
return STATE_UNKNOWN
def update(self):
"""Get the latest status and use it to update our sensor state."""
if self._data.status is None:
self._state = None
return
# In case of the display status sensor, keep a human-readable form
# as the sensor state.
if self.type == KEY_STATUS_DISPLAY:
self._state = self.display_state()
elif self.type not in self._data.status:
self._state = None
else:
self._state = self._data.status[self.type]
class PyNUTData:
"""Stores the data retrieved from NUT.
For each entity to use, acts as the single point responsible for fetching
updates from the server.
"""
def __init__(self, host, port, alias, username, password):
"""Initialize the data object."""
from pynut2.nut2 import PyNUTClient, PyNUTError
self._host = host
self._port = port
self._alias = alias
self._username = username
self._password = password
self.pynuterror = PyNUTError
# Establish client with persistent=False to open/close connection on
# each update call. This is more reliable with async.
self._client = PyNUTClient(self._host, self._port,
self._username, self._password, 5, False)
self._status = None
@property
def status(self):
"""Get latest update if throttle allows. Return status."""
self.update()
return self._status
def _get_alias(self):
"""Get the ups alias from NUT."""
try:
return next(iter(self._client.list_ups()))
except self.pynuterror as err:
_LOGGER.error("Failure getting NUT ups alias, %s", err)
return None
def _get_status(self):
"""Get the ups status from NUT."""
if self._alias is None:
self._alias = self._get_alias()
try:
return self._client.list_vars(self._alias)
except (self.pynuterror, ConnectionResetError) as err:
_LOGGER.debug(
"Error getting NUT vars for host %s: %s", self._host, err)
return None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, **kwargs):
"""Fetch the latest status from NUT."""
self._status = self._get_status()
|
the-stack_0_11307 | from sstcam_sandbox import get_plot
from CHECLabPy.plotting.setup import Plotter
from CHECOnsky.utils.astri_database import ASTRISQLQuerier
import pandas as pd
import matplotlib.dates as mdates
import matplotlib.colors as mcolors
class Uptime(Plotter):
def plot(self, sql, start, end, title):
start_day = start.floor("D")
end_day = end.ceil("D")
df = sql.get_table_between_datetimes(
"TCU_ELACTPOS", start_day, end_day
)
df = df.set_index('timestamp')
df = df.resample('1h').count()
idx = pd.date_range(start_day, end_day, freq='h')
df_c = df.loc[start_day:end_day].reindex(idx, fill_value=0)
date = df_c.resample('d').count().index
time = pd.date_range("2019-10-09 00:00", "2019-10-10 00:00", freq='h')
count = df_c.iloc[:-1]['value'].values.reshape(
(date.size - 1, time.size - 1))
count_3 = count.copy()
count_3[count == 0] = 0
count_3[(count > 0) & (count < count.max())] = 1
count_3[count == count.max()] = 2
weeks = mdates.WeekdayLocator(byweekday=mdates.MO)
hours = mdates.HourLocator()
hours_fmt = mdates.DateFormatter('%H:%M')
cmap = mcolors.LinearSegmentedColormap.from_list("", [
"red", "yellow", "green"
])
self.ax.pcolor(
date, time, count_3.T, cmap=cmap, edgecolors='k', linewidths=0.5
)
self.fig.autofmt_xdate()
self.ax.xaxis.set_major_locator(weeks)
self.ax.yaxis.set_major_locator(hours)
self.ax.yaxis.set_major_formatter(hours_fmt)
self.ax.yaxis.set_tick_params(direction='out', which='both')
self.ax.yaxis.set_tick_params(which='minor', left=False, right=False)
self.ax.xaxis.set_tick_params(direction='out', which='both')
self.ax.xaxis.set_tick_params(which='minor', left=False, right=False)
self.ax.set_title(title)
def main():
sql = ASTRISQLQuerier()
p_uptime = Uptime()
start = pd.Timestamp("2019-04-29 00:00")
end = pd.Timestamp("2019-05-13 00:00")
p_uptime.plot(sql, start, end, None)
start = pd.Timestamp("2019-06-10 00:00")
end = pd.Timestamp("2019-06-16 00:00")
p_uptime.plot(sql, start, end, "ASTRI Pointing Database Uptime")
p_uptime.save(get_plot("d191009_astri_db_uptime/campaign_all.pdf"))
if __name__ == '__main__':
main()
|
the-stack_0_11308 | agenda = dict()
td = list()
nomes = list()
cont = 0
print(' AGENDA TELEFONICA ')
while True:
menu = int(input('[0] Mostrar agenda\n'
'[1] Novo contato\n'
'[2] Pesquisar contato\n'
'[3] Remover ou fazer alteração do contato\n:'))
while menu not in (0, 1, 2, 3):
print('----Digite um número válido----')
menu = int(input(f'[1] Novo contato\n'
f'[2] Pesquisar contato\n'
f'[3] Remover ou fazer alteração do contato\n:'))
if menu == 0:
print(' ', end='')
for k, v in agenda.items():
print(f'{k}', 17 * ' ', end='')
print()
for k, v in enumerate(td):
if cont != len(td) + 1:
print(f'{k} {td[cont]["nome"]:<22}'
f'{td[cont]["telefone"]:<26}'
f'{td[cont]["email"]:<23}'
f'{td[cont]["twitter"]:<25}'
f'{td[cont]["instagram"]:<5}')
cont += 1
cont = 0
print()
# MENU==1
elif menu == 1:
agenda['nome'] = input(str('Nome: ')).lower()
try:
agenda['telefone'] = int(input('Telefone: '))
except:
print('\033[31mDigite somente números!!!\033[m')
agenda['telefone'] = int(input('Telefone: '))
agenda['email'] = input('Email: ')
agenda['twitter'] = input('twitter: ')
agenda['instagram'] = input('Instagram: ')
td.append(agenda.copy())
nomes.append(agenda.copy()['nome'])
print(f'Contato "{agenda["nome"]}" adicionado na agenda!')
print(menu)
# MENU==2
elif menu == 2:
try:
pesquisar = input(f'Pesquisar nome: ')
num = (nomes.index(pesquisar.lower()))
print(f'{num} - {td[num]}')
except:
print('O item não foi encontrado.')
print()
# MENU==3
elif menu == 3:
opcao = int(input('[1] Remover contato\n[2] Fazer alteração\n:'))
while opcao not in (1, 2):
print('----Digite um número válido----')
print(opcao)
# OPCAO=1
if opcao == 1:
try:
remcont = input('Nome do contato que deseja remover: ').lower()
num2 = (nomes.index(remcont.lower()))
td.pop(num2)
print(f'Contato {remcont} excluido')
except:
print('O item não foi encontrado.')
elif opcao == 2:
try:
altcont = input('Nome do contato que deseja fazer alteração: ').lower()
num2 = (nomes.index(altcont.lower()))
qual = int(input('Em qual setor deseja fazer alteração:\n'
'[1] Nome\n[2] Telefone\n[3] Email\n[4] Twitter\n[5] Instagram\n:'))
while qual not in (1, 2, 3, 4, 5):
print('----Digite um número válido----')
print(qual)
if qual == 1:
novnom = input('Novo nome do contato: ')
td[num2] = {**td[num2], 'nome': novnom}
print(f'contato alterado!\n{td[num2]}')
elif qual == 2:
novtel = input('Novo telefone do contato: ')
td[num2] = {**td[num2], 'telefone': novtel}
print(f'contato alterado!\n{td[num2]}')
elif qual == 3:
novema = input('Novo email do contato: ')
td[num2] = {**td[num2], 'email': novema}
print(f'contato alterado!\n{td[num2]}')
elif qual == 4:
novtwi = input('Novo twitter do contato: ')
td[num2] = {**td[num2], 'twitter': novtwi}
print(f'contato alterado!\n{td[num2]}')
elif qual == 5:
novinsta = input("Novo instagram do contato: ")
td[num2] = {**td[num2], 'instagram': novinsta}
print(f'contato alterado!\n{td[num2]}')
except:
print('O item não foi encontrado.')
|
the-stack_0_11310 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from pathlib import Path
import subprocess
from setuptools import setup, find_packages
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
from io import open
here = Path()
# Get the long description from the README file
with open((here / "README.md"), encoding="utf-8") as f:
long_description = f.read()
# get all the git tags from the cmd line that follow our versioning pattern
git_tags = subprocess.Popen(['git', 'tag', '--list', 'v*[0-9]', '--sort=version:refname'], stdout=subprocess.PIPE)
# get the most recent tag after it's been sorted 👆
latest_git_tag = subprocess.Popen(['tail', '-1'], stdin=git_tags.stdout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
git_tags.stdout.close()
latest_version = latest_git_tag.communicate()[0]
# PEP 440 won't accept the v in front, so here we remove it, strip the new line and decode the byte stream
VERSION_FROM_GIT_TAG = latest_version[1:].strip().decode("utf-8")
with open((here / "requirements.txt"), encoding="utf-8") as f:
install_requires = f.read().splitlines()
# removes comments in the requirements file
dependencies = [dependency for dependency in install_requires if (dependency[0] != "#")]
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='statistical-clear-sky', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=VERSION_FROM_GIT_TAG,
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='Statistical estimation of a clear sky signal from PV system power data', # Optional
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown', # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/bmeyers/StatisticalClearSky', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='SLAC National Accelerator Laboratory - Bennet Meyers', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='[email protected]', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
# Pick your license as you wish
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# These classifiers are *not* checked by 'pip install'. See instead
# 'python_requires' below.
#'Programming Language :: Python :: 2',
#'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
#'Programming Language :: Python :: 3.4',
#'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='solar pv photovoltaic', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['tests', 'contrib', 'docs', 'clearsky', 'dataviewer', 'notebooks']), # Required
# Specify which Python versions you support. In contrast to the
# 'Programming Language' classifiers above, 'pip install' will check this
# and refuse to install the project if the version does not match. If you
# do not support Python 2, you can simplify this to '>=3.5' or similar, see
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
#python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',
python_requires='>=3.6, <4',
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=dependencies,
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
extras_require={ # Optional
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
#package_data={ # Optional
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
#entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
entry_points={
'console_scripts': [
'statistical_clear_sky=statistical_clear_sky.command_line:main',
],
},
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Bug Reports': 'https://github.com/bmeyers/StatisticalClearSky/issues',
},
)
|
the-stack_0_11311 | #%%
import os
# import warnings
# warnings.filterwarnings('ignore') # 注:放的位置也会影响效果,真是奇妙的代码
#
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
from matplotlib import pyplot as plt
import cv2
from detection.datasets import myDataset,data_generator
from detection.models import faster_rcnn2
import tensorflow as tf
from tensorflow import keras
tf.random.set_seed(22)
np.random.seed(22)
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
train_dataset = myDataset.myDataSet(flip_ratio=0.5,scale=(768, 768))
num_classes = len(train_dataset.get_categories())
train_generator = data_generator.DataGenerator(train_dataset)
train_tf_dataset = tf.data.Dataset.from_generator(train_generator, (tf.float32, tf.float32, tf.float32, tf.int32))
train_tf_dataset = train_tf_dataset.batch(2).prefetch(100).shuffle(100)
model = faster_rcnn2.FasterRCNN(num_classes=num_classes)
# optimizer = keras.optimizers.SGD(1e-3, momentum=0.9, nesterov=True)
optimizer = keras.optimizers.Adam(0.0001)
print([var.name for var in model.trainable_variables])
#####################all Test################################
for i in range(20):
img, img_meta, bboxes, labels = train_dataset[i]
batch_imgs = tf.convert_to_tensor(np.expand_dims(img.astype(np.float32), 0))
batch_metas = tf.convert_to_tensor(np.expand_dims(img_meta.astype(np.float32), 0))
batch_bboxes = tf.convert_to_tensor(np.expand_dims(bboxes.astype(np.float32), 0))
batch_labels = tf.convert_to_tensor(np.expand_dims(labels.astype(np.int), 0))
#%%
if i == 0:
_ = model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True)
model.load_weights('weights/faster_rcnn0_4.h5', by_name=True)
_ = model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True)
# tf.keras.utils.plot_model(model.rpn_head, show_shapes=True, show_layer_names=True)
#%%
########################test#################################
# rois_list = model((batch_imgs, batch_metas),training=False)
rois_list,rois_list2 = model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True,rec=2)
import imgTest
print(rois_list)
image = batch_imgs[0].numpy()
bboxs = rois_list[0].numpy()
for i in range(bboxs.shape[0]):
# if bboxs[i][4] < 0.9:
# continue
bbox = bboxs[i]
image = cv2.rectangle(image, (int(float(bbox[0])),
int(float(bbox[1]))),
(int(float(bbox[2])),
int(float(bbox[3]))), (255, 0, 0), 2)
cv2.imshow('img', image)
img2 = imgTest.showLabRpn(batch_imgs, batch_metas, batch_bboxes, None)
cv2.imshow('img2', img2)
print(rois_list2)
image = batch_imgs[0].numpy()
bboxs = rois_list2[0].numpy()
for i in range(bboxs.shape[0]):
# if bboxs[i][4] < 0.9:
# continue
bbox = bboxs[i]
image = cv2.rectangle(image, (int(float(bbox[0])),
int(float(bbox[1]))),
(int(float(bbox[2])),
int(float(bbox[3]))), (255, 0, 0), 2)
cv2.imshow('img3', image)
cv2.waitKey(0)
# #####################RPN Test################################
# for i in range(20):
# img, img_meta, bboxes, labels = train_dataset[i]
# batch_imgs = tf.convert_to_tensor(np.expand_dims(img.astype(np.float32), 0))
# batch_metas = tf.convert_to_tensor(np.expand_dims(img_meta.astype(np.float32), 0))
# batch_bboxes = tf.convert_to_tensor(np.expand_dims(bboxes.astype(np.float32), 0))
# batch_labels = tf.convert_to_tensor(np.expand_dims(labels.astype(np.int), 0))
# #%%
# if i == 0:
# _ = model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True)
# model.load_weights('weights/faster_rcnn0_4.h5', by_name=True)
#
# # tf.keras.utils.plot_model(model.rpn_head, show_shapes=True, show_layer_names=True)
# #%%
# ########################test#################################
# rpn_class_logits, rpn_probs = model((batch_imgs, batch_metas),training=False)
#
# import imgTest
#
# img1 = imgTest.showRunRpn(batch_imgs, batch_metas,rpn_class_logits, rpn_probs,100)
# img2 = imgTest.showLabRpn(batch_imgs, batch_metas,batch_bboxes, None)
# cv2.imshow('img1', img1)
# cv2.imshow('img2', img2)
# cv2.waitKey(0)
########################train#################################
# for (batch, inputs) in enumerate(train_tf_dataset):
# batch_imgs, batch_metas, batch_bboxes, batch_labels = inputs
# rpn_class_loss, rpn_bbox_loss, rcnn_class_loss,rcnn_bbox_loss = model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True)
# model.load_weights('weights/faster_rcnn0_4.h5', by_name=True)
# break
#
#
# for epoch in range(100):
# loss_history = []
# for (batch, inputs) in enumerate(train_tf_dataset):
# batch_imgs, batch_metas, batch_bboxes, batch_labels = inputs
# with tf.GradientTape() as tape:
# rpn_class_loss, rpn_bbox_loss, rcnn_class_loss,rcnn_bbox_loss = model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True) # , rcnn_class_loss, rcnn_bbox_loss
#
# loss_value = rpn_class_loss + rpn_bbox_loss + rcnn_class_loss + rcnn_bbox_loss
#
# grads = tape.gradient(loss_value, model.trainable_variables)
# optimizer.apply_gradients(zip(grads, model.trainable_variables))
#
# loss_history.append(loss_value.numpy())
#
# if batch % 100 == 0:
# print(rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss) #
# print('epoch', epoch, batch, np.mean(loss_history))
# model.save_weights('weights/faster_rcnn0_4.h5')
|
the-stack_0_11312 | class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
if not matrix or len(matrix[0]) <= 0:
return 0
h = len(matrix)
w = len(matrix[0])
memo_arr = [[0 for _ in range(w)] for _ in range(h)]
max_len = 0
def in_or_not(x, y):
return h > x >= 0 and w > y >= 0
def bfs(i, j, matrix):
# bfs i, j, and memorize
nonlocal memo_arr, max_len
dir_x = [1, -1, 0, 0]
dir_y = [0, 0, 1, -1]
ret = []
if not in_or_not(i, j):
return 0
for t, x in enumerate(dir_x):
if in_or_not(i+x, j+dir_y[t]) and matrix[i][j] > matrix[i+x][j+dir_y[t]]:
if memo_arr[i+x][j+dir_y[t]] != 0:
ret.append(memo_arr[i+x][j+dir_y[t]])
else:
ret.append(bfs(i+x, j+dir_y[t], matrix))
else:
ret.append(0)
memo_arr[i][j] = max(ret) + 1
max_len = max(max_len, memo_arr[i][j])
return max(ret) + 1
for i in range(h):
for j in range(w):
bfs(i, j, matrix)
return max_len
# Sol-2 memorize dfs AC, TC: O(n*m)
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
# corner case
if len(matrix) == 0 or len(matrix[0]) == 0:
return 0
directions = [(1, 0), (-1, 0), (0, 1), (0, -1)]
cache = [[1 for _ in range(len(matrix[0]))] for _ in range(len(matrix))]
def dfs(i, j, cache):
if cache[i][j] != 1:
return cache[i][j]
for d in directions:
x, y = i + d[0], j + d[1]
if self.inposition(x, y, len(matrix), len(matrix[0])) and matrix[i][j] < matrix[x][y]:
cache[i][j] = max(cache[i][j], dfs(x, y, cache) + 1)
return cache[i][j]
longest = 0
for i in range(len(matrix)):
for j in range(len(matrix[0])):
longest = max(longest, dfs(i, j, cache))
return longest
def inposition(self, i, j, m, n):
return 0 <= i < m and 0 <= j < n
# Sol-3 sorted dp, original idea from hua hua
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
dp = [[1 for _ in range(len(matrix[0]))] for _ in range(len(matrix))]
positions = [(1, 0), (-1, 0), (0, 1), (0, -1)]
longest = 0
tmp = list()
for i in range(len(matrix)):
for j in range(len(matrix[0])):
tmp.append((matrix[i][j], i, j))
tmp.sort(key=lambda x: -x[0])
for t in tmp:
(num, i, j) = t
for p in positions:
x = i + p[0]
y = j + p[1]
if 0 <= x < len(matrix) and 0 <= y < len(matrix[0]) and matrix[i][j] < matrix[x][y]:
dp[i][j] = max(dp[i][j], dp[x][y] + 1)
longest = max(longest, dp[i][j])
return longest
|
the-stack_0_11314 | # BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import gzip
import json
import os
import random
import socket
import time
import zlib
from collections import defaultdict
import jsonschema
import pytest
from pytest_localserver.http import ContentServer
from werkzeug.wrappers import Request, Response
import elasticapm
from elasticapm.base import Client
from elasticapm.conf.constants import SPAN
from elasticapm.traces import execution_context
from elasticapm.transport.http_base import HTTPTransportBase
from elasticapm.utils import compat
from elasticapm.utils.threading import ThreadManager
try:
from urllib.request import pathname2url
except ImportError:
# Python 2
from urllib import pathname2url
cur_dir = os.path.dirname(os.path.realpath(__file__))
ERRORS_SCHEMA = os.path.join(cur_dir, ".schemacache", "errors", "error.json")
TRANSACTIONS_SCHEMA = os.path.join(cur_dir, ".schemacache", "transactions", "transaction.json")
SPAN_SCHEMA = os.path.join(cur_dir, ".schemacache", "spans", "span.json")
METADATA_SCHEMA = os.path.join(cur_dir, ".schemacache", "metadata.json")
assert os.path.exists(ERRORS_SCHEMA) and os.path.exists(
TRANSACTIONS_SCHEMA
), 'JSON Schema files not found. Run "make update-json-schema" to download'
with codecs.open(ERRORS_SCHEMA, encoding="utf8") as errors_json, codecs.open(
TRANSACTIONS_SCHEMA, encoding="utf8"
) as transactions_json, codecs.open(SPAN_SCHEMA, encoding="utf8") as span_json, codecs.open(
METADATA_SCHEMA, encoding="utf8"
) as metadata_json:
VALIDATORS = {
"error": jsonschema.Draft4Validator(
json.load(errors_json),
resolver=jsonschema.RefResolver(
base_uri="file:" + pathname2url(ERRORS_SCHEMA), referrer="file:" + pathname2url(ERRORS_SCHEMA)
),
),
"transaction": jsonschema.Draft4Validator(
json.load(transactions_json),
resolver=jsonschema.RefResolver(
base_uri="file:" + pathname2url(TRANSACTIONS_SCHEMA),
referrer="file:" + pathname2url(TRANSACTIONS_SCHEMA),
),
),
"span": jsonschema.Draft4Validator(
json.load(span_json),
resolver=jsonschema.RefResolver(
base_uri="file:" + pathname2url(SPAN_SCHEMA), referrer="file:" + pathname2url(SPAN_SCHEMA)
),
),
"metadata": jsonschema.Draft4Validator(
json.load(metadata_json),
resolver=jsonschema.RefResolver(
base_uri="file:" + pathname2url(METADATA_SCHEMA), referrer="file:" + pathname2url(METADATA_SCHEMA)
),
),
}
class ValidatingWSGIApp(ContentServer):
def __init__(self, **kwargs):
self.skip_validate = kwargs.pop("skip_validate", False)
super(ValidatingWSGIApp, self).__init__(**kwargs)
self.payloads = []
self.responses = []
def __call__(self, environ, start_response):
content = self.content
request = Request(environ)
self.requests.append(request)
data = request.data
if request.content_encoding == "deflate":
data = zlib.decompress(data)
elif request.content_encoding == "gzip":
with gzip.GzipFile(fileobj=compat.BytesIO(data)) as f:
data = f.read()
data = data.decode(request.charset)
if request.content_type == "application/x-ndjson":
data = [json.loads(line) for line in data.split("\n") if line]
self.payloads.append(data)
code = 202
success = 0
fail = 0
if not self.skip_validate:
for line in data:
item_type, item = list(line.items())[0]
validator = VALIDATORS[item_type]
try:
validator.validate(item)
success += 1
except jsonschema.ValidationError as e:
fail += 1
content += "/".join(map(compat.text_type, e.absolute_schema_path)) + ": " + e.message + "\n"
code = 202 if not fail else 400
response = Response(status=code)
response.headers.clear()
response.headers.extend(self.headers)
response.data = content
self.responses.append({"code": code, "content": content})
return response(environ, start_response)
@pytest.fixture()
def elasticapm_client(request):
client_config = getattr(request, "param", {})
client_config.setdefault("service_name", "myapp")
client_config.setdefault("secret_token", "test_key")
client_config.setdefault("central_config", "false")
client_config.setdefault("include_paths", ("*/tests/*",))
client_config.setdefault("span_frames_min_duration", -1)
client_config.setdefault("metrics_interval", "0ms")
client = TempStoreClient(**client_config)
yield client
client.close()
# clear any execution context that might linger around
execution_context.set_transaction(None)
execution_context.set_span(None)
@pytest.fixture()
def waiting_httpserver(httpserver):
wait_for_http_server(httpserver)
return httpserver
@pytest.fixture
def httpsserver_custom(request):
"""The returned ``httpsserver`` (note the additional S!) provides a
threaded HTTP server instance similar to funcarg ``httpserver`` but with
SSL encryption.
"""
from pytest_localserver import https
config = getattr(request, "param", {})
key = os.path.join(cur_dir, "ca", config.get("key", "server.pem"))
server = https.SecureContentServer(key=key, cert=key)
server.start()
request.addfinalizer(server.stop)
return server
@pytest.fixture()
def waiting_httpsserver(httpsserver_custom):
wait_for_http_server(httpsserver_custom)
return httpsserver_custom
@pytest.fixture()
def validating_httpserver(request):
config = getattr(request, "param", {})
app = config.pop("app", ValidatingWSGIApp)
server = app(**config)
server.start()
wait_for_http_server(server)
request.addfinalizer(server.stop)
return server
@pytest.fixture()
def sending_elasticapm_client(request, validating_httpserver):
validating_httpserver.serve_content(code=202, content="", headers={"Location": "http://example.com/foo"})
client_config = getattr(request, "param", {})
client_config.setdefault("server_url", validating_httpserver.url)
client_config.setdefault("service_name", "myapp")
client_config.setdefault("secret_token", "test_key")
client_config.setdefault("transport_class", "elasticapm.transport.http.Transport")
client_config.setdefault("span_frames_min_duration", -1)
client_config.setdefault("include_paths", ("*/tests/*",))
client_config.setdefault("metrics_interval", "0ms")
client_config.setdefault("central_config", "false")
client = Client(**client_config)
client.httpserver = validating_httpserver
yield client
client.close()
# clear any execution context that might linger around
execution_context.set_transaction(None)
execution_context.set_span(None)
class DummyTransport(HTTPTransportBase):
def __init__(self, url, *args, **kwargs):
super(DummyTransport, self).__init__(url, *args, **kwargs)
self.events = defaultdict(list)
def queue(self, event_type, data, flush=False):
self._flushed.clear()
data = self._process_event(event_type, data)
self.events[event_type].append(data)
self._flushed.set()
def start_thread(self, pid=None):
# don't call the parent method, but the one from ThreadManager
ThreadManager.start_thread(self, pid=pid)
def stop_thread(self):
pass
def get_config(self, current_version=None, keys=None):
return False, None, 30
class TempStoreClient(Client):
def __init__(self, **inline):
inline.setdefault("transport_class", "tests.fixtures.DummyTransport")
super(TempStoreClient, self).__init__(**inline)
@property
def events(self):
return self._transport.events
def spans_for_transaction(self, transaction):
"""Test helper method to get all spans of a specific transaction"""
return [span for span in self.events[SPAN] if span["transaction_id"] == transaction["id"]]
@pytest.fixture()
def not_so_random():
old_state = random.getstate()
random.seed(42)
yield
random.setstate(old_state)
@pytest.fixture()
def instrument():
elasticapm.instrument()
yield
elasticapm.uninstrument()
def wait_for_http_server(httpserver, timeout=30):
start_time = time.time()
while True:
try:
sock = socket.create_connection(httpserver.server_address, timeout=0.1)
sock.close()
break
except socket.error:
if time.time() - start_time > timeout:
raise TimeoutError()
|
the-stack_0_11317 | import randopt as ro
def loss(x):
return x**2
e = ro.Experiment('myexp', {
'alpha': ro.Gaussian(mean=0.0, std=1.0, dtype='float'),
})
# Sampling parameters
for i in range(100):
e.sample('alpha')
res = loss(e.alpha)
print('Result: ', res)
e.add_result(res)
# Manually setting parameters
e.alpha = 0.00001
res = loss(e.alpha)
e.add_result(res)
# Search over all experiments results, including ones from previous runs
opt = e.minimum()
print('Best result: ', opt.result, ' with params: ', opt.params) |
the-stack_0_11318 | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="pypodo",
version="3.0.3",
description="pypodo is a todolist tool which works with a .todo file at the root of the home directory. It has a mecanism of indexes and tags.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/thib1984/pypodo",
author="thib1984",
author_email="[email protected]",
license="mit",
packages=["pypodo"],
install_requires=["setuptools", "termcolor", "cryptography"],
zip_safe=False,
entry_points={
"console_scripts": ["pypodo=pypodo.__pypodo__:pypodo"],
},
)
|
the-stack_0_11322 | # -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper to manipulate GCP git repository."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import errno
import os
import re
import subprocess
import textwrap
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import encoding
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
import six
import uritemplate
# This is the minimum version of git required to use credential helpers.
_HELPER_MIN = (2, 0, 1)
_WINDOWS_HELPER_MIN = (2, 15, 0)
class Error(exceptions.Error):
"""Exceptions for this module."""
class UnknownRepositoryAliasException(Error):
"""Exception to be thrown when a repository alias provided cannot be found."""
class CannotInitRepositoryException(Error):
"""Exception to be thrown when a repository cannot be created."""
class CannotFetchRepositoryException(Error):
"""Exception to be thrown when a repository cannot be fetched."""
class GitVersionException(Error):
"""Exceptions for when git version is too old."""
def __init__(self, fmtstr, cur_version, min_version):
self.cur_version = cur_version
super(GitVersionException, self).__init__(
fmtstr.format(cur_version=cur_version, min_version=min_version))
class InvalidGitException(Error):
"""Exceptions for when git version is empty or invalid."""
class GcloudIsNotInPath(Error):
"""Exception for when the gcloud cannot be found."""
def CheckGitVersion(version_lower_bound=None):
"""Returns true when version of git is >= min_version.
Args:
version_lower_bound: (int,int,int), The lowest allowed version, or None to
just check for the presence of git.
Returns:
True if version >= min_version.
Raises:
GitVersionException: if `git` was found, but the version is incorrect.
InvalidGitException: if `git` was found, but the output of `git version` is
not as expected.
NoGitException: if `git` was not found.
"""
try:
cur_version = encoding.Decode(subprocess.check_output(['git', 'version']))
if not cur_version:
raise InvalidGitException('The git version string is empty.')
if not cur_version.startswith('git version '):
raise InvalidGitException(('The git version string must start with '
'git version .'))
match = re.search(r'(\d+)\.(\d+)\.(\d+)', cur_version)
if not match:
raise InvalidGitException('The git version string must contain a '
'version number.')
current_version = tuple([int(item) for item in match.group(1, 2, 3)])
if version_lower_bound and current_version < version_lower_bound:
min_version = '.'.join(six.text_type(i) for i in version_lower_bound)
raise GitVersionException(
'Your git version {cur_version} is older than the minimum version '
'{min_version}. Please install a newer version of git.',
cur_version=cur_version, min_version=min_version)
except OSError as e:
if e.errno == errno.ENOENT:
raise NoGitException()
raise
return True
class NoGitException(Error):
"""Exceptions for when git is not available."""
def __init__(self):
super(NoGitException, self).__init__(
textwrap.dedent("""\
Cannot find git. Please install git and try again.
You can find git installers at [http://git-scm.com/downloads], or use
your favorite package manager to install it on your computer. Make sure
it can be found on your system PATH.
"""))
def _GetRepositoryURI(project, alias):
"""Get the URI for a repository, given its project and alias.
Args:
project: str, The project name.
alias: str, The repository alias.
Returns:
str, The repository URI.
"""
return uritemplate.expand(
'https://source.developers.google.com/p/{project}/r/{alias}',
{'project': project, 'alias': alias})
def _GetGcloudScript(full_path=False):
"""Get name of the gcloud script.
Args:
full_path: boolean, True if the gcloud full path should be used if free
of spaces.
Returns:
str, command to use to execute gcloud
Raises:
GcloudIsNotInPath: if gcloud is not found in the path
"""
if (platforms.OperatingSystem.Current() ==
platforms.OperatingSystem.WINDOWS):
gcloud_ext = '.cmd'
else:
gcloud_ext = ''
gcloud_name = 'gcloud'
gcloud = files.FindExecutableOnPath(gcloud_name, pathext=[gcloud_ext])
if not gcloud:
raise GcloudIsNotInPath(
'Could not verify that gcloud is in the PATH. '
'Please make sure the Cloud SDK bin folder is in PATH.')
if full_path:
if not re.match(r'[-a-zA-Z0-9_/]+$', gcloud):
log.warning(
textwrap.dedent("""\
You specified the option to use the full gcloud path in the git
credential.helper, but the path contains non alphanumberic characters
so the credential helper may not work correctly."""))
return gcloud
else:
return gcloud_name + gcloud_ext
def _GetCredHelperCommand(uri, full_path=False, min_version=_HELPER_MIN):
"""Returns the gcloud credential helper command for a remote repository.
The command will be of the form '!gcloud auth git-helper --account=EMAIL
--ignore-unknown $@`. See https://git-scm.com/docs/git-config. If the
installed version of git or the remote repository does not support
the gcloud credential helper, then returns None.
Args:
uri: str, The uri of the remote repository.
full_path: bool, If true, use the full path to gcloud.
min_version: minimum git version; if found git is earlier than this, warn
and return None
Returns:
str, The credential helper command if it is available.
"""
credentialed_hosts = ['source.developers.google.com']
extra = properties.VALUES.core.credentialed_hosted_repo_domains.Get()
if extra:
credentialed_hosts.extend(extra.split(','))
if any(
uri.startswith('https://' + host + '/') for host in credentialed_hosts):
try:
CheckGitVersion(min_version)
except GitVersionException as e:
helper_min_str = '.'.join(six.text_type(i) for i in min_version)
log.warning(
textwrap.dedent("""\
You are using a Google-hosted repository with a
{current} which is older than {min_version}. If you upgrade
to {min_version} or later, gcloud can handle authentication to
this repository. Otherwise, to authenticate, use your Google
account and the password found by running the following command.
$ gcloud auth print-access-token""".format(
current=e.cur_version, min_version=helper_min_str)))
return None
# Use git alias "!shell command" syntax so we can configure
# the helper with options. Also git-credential is not
# prefixed when it starts with "!".
return '!{0} auth git-helper --account={1} --ignore-unknown $@'.format(
_GetGcloudScript(full_path),
properties.VALUES.core.account.Get(required=True))
return None
class Git(object):
"""Represents project git repo."""
def __init__(self, project_id, repo_name, uri=None):
"""Constructor.
Args:
project_id: str, The name of the project that has a repository associated
with it.
repo_name: str, The name of the repository to clone.
uri: str, The URI of the repository, or None if it will be inferred from
the name.
Raises:
UnknownRepositoryAliasException: If the repo name is not known to be
associated with the project.
"""
self._project_id = project_id
self._repo_name = repo_name
self._uri = uri or _GetRepositoryURI(project_id, repo_name)
if not self._uri:
raise UnknownRepositoryAliasException()
def GetName(self):
return self._repo_name
def Clone(self, destination_path, dry_run=False, full_path=False):
"""Clone a git repository into a gcloud workspace.
If the resulting clone does not have a .gcloud directory, create one. Also,
sets the credential.helper to use the gcloud credential helper.
Args:
destination_path: str, The relative path for the repository clone.
dry_run: bool, If true do not run but print commands instead.
full_path: bool, If true use the full path to gcloud.
Returns:
str, The absolute path of cloned repository.
Raises:
CannotInitRepositoryException: If there is already a file or directory in
the way of creating this repository.
CannotFetchRepositoryException: If there is a problem fetching the
repository from the remote host, or if the repository is otherwise
misconfigured.
"""
abs_repository_path = os.path.abspath(destination_path)
if os.path.exists(abs_repository_path):
CheckGitVersion() # Do this here, before we start running git commands
if os.listdir(abs_repository_path):
# Raise exception if dir is not empty and not a git repository
raise CannotInitRepositoryException(
'Directory path specified exists and is not empty')
# Make a brand new repository if directory does not exist or
# clone if directory exists and is empty
try:
# If this is a Google-hosted repo, clone with the cred helper.
cmd = ['git', 'clone', self._uri, abs_repository_path]
min_git = _HELPER_MIN
if (platforms.OperatingSystem.Current() ==
platforms.OperatingSystem.WINDOWS):
min_git = _WINDOWS_HELPER_MIN
cred_helper_command = _GetCredHelperCommand(
self._uri, full_path=full_path, min_version=min_git)
if cred_helper_command:
cmd += [
'--config',
'credential.https://source.developers.google.com/.helper=',
'--config',
'credential.https://source.developers.google.com/.helper=' +
cred_helper_command
]
self._RunCommand(cmd, dry_run)
except subprocess.CalledProcessError as e:
raise CannotFetchRepositoryException(e)
return abs_repository_path
def _RunCommand(self, cmd, dry_run):
log.debug('Executing %s', cmd)
if dry_run:
log.out.Print(' '.join(cmd))
else:
subprocess.check_call(cmd)
|
the-stack_0_11323 | from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.core.mail import EmailMultiAlternatives
from django.db import models
from django.template import (
Context,
Template,
TemplateDoesNotExist,
TemplateSyntaxError
)
from django.utils.translation import ugettext_lazy as _lazy
from . import helpers
from .settings import (
ADD_EXTRA_HEADERS,
VALIDATE_ON_SAVE,
CONTEXT_PROCESSORS
)
class EmailTemplateQuerySet(models.query.QuerySet):
def active(self):
"""Returns active templates only."""
return self.filter(is_active=True)
def current(self, name, language=settings.LANGUAGE_CODE):
"""Returns the latest version of a template."""
return self.active().filter(name=name, language=language).order_by('version').last()
def version(self, name, version, language=settings.LANGUAGE_CODE):
"""Returns a specific version of a template."""
return self.active().get(name=name, language=language, version=version)
class EmailTemplate(models.Model):
"""
Email template. Contains HTML and plain text variants.
Each Template object has a unique name:language.version combination, which
means that localisation of templates is managed through having multiple
objects with the same name - there is no inheritence model. This is to
keep it simple:
order-confirmation:en.0
order-confirmation:de.0
order-confirmation:fr.0
Templates contain HTML and plain text content.
"""
CONTENT_TYPE_PLAIN = 'text/plain'
CONTENT_TYPE_HTML = 'text/html'
CONTENT_TYPES = (CONTENT_TYPE_PLAIN, CONTENT_TYPE_HTML)
name = models.CharField(
_lazy('Template name'),
max_length=100,
help_text=_lazy("Template name - must be unique for a given language/version combination."),
db_index=True
)
description = models.CharField(
_lazy('Description'),
max_length=100,
help_text=_lazy("Optional description. e.g. used to differentiate variants ('new header')."), # noqa
blank=True
)
# language is free text and not a choices field as we make no assumption
# as to how the end user is storing / managing languages.
language = models.CharField(
_lazy('Language'),
max_length=20,
default=settings.LANGUAGE_CODE,
help_text=_lazy(
"Used to support localisation of emails, defaults to `settings.LANGUAGE_CODE`, "
"but can be any string, e.g. 'London', 'NYC'."
),
db_index=True
)
version = models.IntegerField(
_lazy('Version (or variant)'),
default=0,
help_text=_lazy("Integer value - can be used for versioning or A/B testing."),
db_index=True
)
subject = models.CharField(
_lazy('Subject line template'),
max_length=100,
help_text=_lazy("Email subject line (may contain template variables)."),
)
body_text = models.TextField(
_lazy('Plain text template'),
help_text=_lazy("Plain text content (may contain template variables)."),
)
body_html = models.TextField(
_lazy('HTML template'),
help_text=_lazy("HTML content (may contain template variables)."),
)
test_context = JSONField(
default=dict,
blank=True,
help_text=_lazy("Dummy JSON used for test rendering (set automatically on first save).")
)
is_active = models.BooleanField(
_lazy("Active (live)"),
help_text=_lazy("Set to False to remove from `current` queryset."),
default=True
)
from_email = models.CharField(
_lazy("Sender"),
max_length=254,
help_text=_lazy(
"Default sender address if none specified. Verbose form is accepted."
),
default=settings.DEFAULT_FROM_EMAIL
)
reply_to = models.CharField(
_lazy("Reply-To"),
max_length=254,
help_text=_lazy("Comma separated list of Reply-To recipients."),
default=settings.DEFAULT_FROM_EMAIL
)
objects = EmailTemplateQuerySet().as_manager()
class Meta:
unique_together = ("name", "language", "version")
def __str__(self):
return "'{}' ({})".format(self.name, self.language)
def __repr__(self):
return (
"<EmailTemplate id={} name='{}' language='{}' version={}>".format(
self.id, self.name, self.language, self.version
)
)
@property
def extra_headers(self):
return{
'X-Appmail-Template': (
'name=%s; language=%s; version=%s' % (self.name, self.language, self.version)
)
}
@property
def reply_to_list(self):
"""Convert the reply_to field to a list."""
return [a.strip() for a in self.reply_to.split(',')]
def save(self, *args, **kwargs):
"""Update dummy context on first save and validate template contents.
Kwargs:
validate: set to False to bypass template validation; defaults
to settings.VALIDATE_ON_SAVE.
"""
if self.pk is None:
self.test_context = helpers.get_context(
self.subject +
self.body_text +
self.body_html
)
validate = kwargs.pop('validate', VALIDATE_ON_SAVE)
if validate:
self.clean()
super(EmailTemplate, self).save(*args, **kwargs)
return self
def clean(self):
"""Validate model - specifically that the template can be rendered."""
validation_errors = {}
validation_errors.update(self._validate_body(EmailTemplate.CONTENT_TYPE_PLAIN))
validation_errors.update(self._validate_body(EmailTemplate.CONTENT_TYPE_HTML))
validation_errors.update(self._validate_subject())
if validation_errors:
raise ValidationError(validation_errors)
def render_subject(self, context, processors=CONTEXT_PROCESSORS):
"""Render subject line."""
ctx = Context(helpers.patch_context(context, processors))
return Template(self.subject).render(ctx)
def _validate_subject(self):
"""Try rendering the body template and capture any errors."""
try:
self.render_subject({})
except TemplateDoesNotExist as ex:
return {'subject': _lazy("Template does not exist: {}".format(ex))}
except TemplateSyntaxError as ex:
return {'subject': str(ex)}
else:
return {}
def render_body(self, context, content_type=CONTENT_TYPE_PLAIN, processors=CONTEXT_PROCESSORS):
"""Render email body in plain text or HTML format."""
assert content_type in EmailTemplate.CONTENT_TYPES, _lazy("Invalid content type.")
ctx = Context(helpers.patch_context(context, processors))
if content_type == EmailTemplate.CONTENT_TYPE_PLAIN:
return Template(self.body_text).render(ctx)
if content_type == EmailTemplate.CONTENT_TYPE_HTML:
return Template(self.body_html).render(ctx)
def _validate_body(self, content_type):
"""Try rendering the body template and capture any errors."""
assert content_type in EmailTemplate.CONTENT_TYPES, _lazy("Invalid content type.")
if content_type == EmailTemplate.CONTENT_TYPE_PLAIN:
field_name = 'body_text'
if content_type == EmailTemplate.CONTENT_TYPE_HTML:
field_name = 'body_html'
try:
self.render_body({}, content_type=content_type)
except TemplateDoesNotExist as ex:
return {field_name: _lazy("Template does not exist: {}".format(ex))}
except TemplateSyntaxError as ex:
return {field_name: str(ex)}
else:
return {}
def create_message(self, context, **email_kwargs):
"""
Return populated EmailMultiAlternatives object.
This function is a helper that will render the template subject and
plain text / html content, as well as populating all of the standard
EmailMultiAlternatives properties.
>>> template = EmailTemplate.objects.get_latest('order_summary')
>>> context = {'first_name': "Bruce", 'last_name'="Lee"}
>>> email = template.create_message(context, to=['[email protected]'])
>>> email.send()
The function supports all of the standard EmailMultiAlternatives
constructor kwargs except for 'subject', 'body' and 'alternatives' - as
these are set from the template (subject, body_text and body_html).
"""
for kw in ('subject', 'body', 'alternatives'):
assert kw not in email_kwargs, _lazy("Invalid create_message kwarg: '{}'".format(kw))
subject = self.render_subject(context)
body = self.render_body(context, content_type=EmailTemplate.CONTENT_TYPE_PLAIN)
html = self.render_body(context, content_type=EmailTemplate.CONTENT_TYPE_HTML)
email_kwargs['reply_to'] = email_kwargs.get('reply_to') or self.reply_to_list
email_kwargs['from_email'] = email_kwargs.get('from_email') or self.from_email
if ADD_EXTRA_HEADERS:
email_kwargs['headers'] = email_kwargs.get('headers', {})
email_kwargs['headers'].update(self.extra_headers)
# alternatives is a list of (content, mimetype) tuples
# https://github.com/django/django/blob/master/django/core/mail/message.py#L435
return EmailMultiAlternatives(
subject=subject,
body=body,
alternatives=[(html, EmailTemplate.CONTENT_TYPE_HTML)],
**email_kwargs
)
def clone(self):
"""Create a copy of the current object, increase version by 1."""
self.pk = None
self.version += 1
return self.save()
|
the-stack_0_11324 | # Desafio 030 -> Crie um programa que leia um numero inteiro e diga se ele é par ou impar
import math
num = int(input('Digite um número inteiro: '))
numd = num % 2
print ('{} é par'.format(num) if numd == 0 else '{} é ímpar'.format(num))
# print('{} é par'.format(num))
#else:
# print('{} é ímpar'.format(num))
|
the-stack_0_11327 | from requests import Session
from m3u8 import loads
import os
from m3u8.model import SegmentList, Segment, find_key
class XET(object):
APPID = '' # APPid
XIAOEID = '' # Cookie XIAOEID
RESOURCEID = '' # ResourceID,这里的resourceid代表课程id
sessionid = '' # Cookie laravel_session
session = Session()
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
'Referer': '',
'Origin': 'http://pc-shop.xiaoe-tech.com',
'Content-Type': 'application/x-www-form-urlencoded'
}
cookie = {
'XIAOEID': XIAOEID,
'laravel_session': sessionid
}
def get_lesson_list(self):
url = 'https://pc-shop.xiaoe-tech.com/{appid}/open/column.resourcelist.get/2.0'.format(appid=self.APPID)
body = {
'data[page_index]': '0',
'data[page_size]': '1000',
'data[order_by]': 'start_at:desc',
'data[resource_id]': self.RESOURCEID,
'data[state]': '0'
}
# 获取当前课程的信息
self.header['Referer'] = 'https://pc-shop.xiaoe-tech.com/{appid}/'.format(appid=self.APPID)
resp = self.session.post(url, data=body, headers=self.header, cookies=self.cookie)
if resp.status_code != 200:
raise Exception('获取课程列表失败')
try:
# 拼接课程id、标题以及资源类型
data = [{'id': lesson['id'], 'name': lesson['title'], 'resource_type': lesson['resource_type']} for lesson
in resp.json()['data']]
except Exception as e:
print("获取课程列表失败")
exit(1)
# 返回课程列表
return data
def get_lesson_hls(self, resource):
'''
:param resource: 这里的resource代表当前课程下的某节课的id
:return:
'''
resource_type = {'2': 'audio.detail.get', '3': 'video.detail.get'}
url = 'https://pc-shop.xiaoe-tech.com/{appid}/open/{resource}/1.0'.format(appid=self.APPID,
resource=resource_type[
str(resource['resource_type'])])
body = {
'data[resource_id]': resource['id']
}
self.header['Referer'] = 'https://pc-shop.xiaoe-tech.com/{appid}/video_details?id={resourceid}'.format(
appid=self.APPID, resourceid=self.RESOURCEID)
resp = self.session.post(url, data=body, headers=self.header, cookies=self.cookie)
if resp.status_code != 200:
raise Exception('获取课程信息失败')
# 返回当前课程的信息
hls = resp.json()['data']
return hls
def video(self, url, media_dir, title, playurl):
'''
:param url: hls 视频流文件
:param media_dir: 下载保存目录
:param title: 视频标题
:param playurl: ts文件地址
:return:
'''
resp = self.session.get(url, headers=self.header)
media = loads(resp.text)
# 拼接ts文件列表
playlist = ["{playurl}{uri}".format(playurl=playurl, uri=uri) for uri in media.segments.uri]
n = 0
new_segments = []
# get ts file list
for url in playlist:
ts_file = os.path.join(media_dir, title, 'm_{num}.ts'.format(num=n))
ts_path = os.path.join(title, 'm_{num}.ts'.format(num=n))
media.data['segments'][n]['uri'] = ts_path
new_segments.append(media.data.get('segments')[n])
# 下载ts文件
resp = self.session.get(url, headers=self.header, cookies=self.cookie)
if resp.status_code != 200:
print('Error: {title} {tsfile}'.format(title=title, tsfile=ts_file))
# 如果文件不存在或者本地文件大小于接口返回大小不一致则保存ts文件
if not os.path.exists(ts_file) or os.stat(ts_file).st_size != resp.headers['content-length']:
with open(ts_file, 'wb') as ts:
ts.write(resp.content)
n += 1
# change m3u8 data
media.data['segments'] = new_segments
# 修改m3u8文件信息
segments = SegmentList(
[Segment(base_uri=None, keyobject=find_key(segment.get('key', {}), media.keys), **segment)
for segment in
media.data.get('segments', [])])
media.segments = segments
# save m3u8 file
m3u8_file = os.path.join(media_dir, '{title}.m3u8'.format(title=title))
if not os.path.exists(m3u8_file):
with open(m3u8_file, 'wb', encoding='utf8') as f:
f.write(media.dumps())
def audio(self, url, media_dir, title):
# 下载音频
resp = self.session.get(url, headers=self.header, stream=True)
if resp.status_code != 200:
print('Error: {title}'.format(title=title))
else:
audio_file = os.path.join(media_dir, title, '{title}.mp3'.format(title=title))
if not os.path.exists(audio_file):
with open(audio_file, 'wb') as f:
f.write(resp.content)
def download(self):
# 设置保存目录
media_dir = 'media'
# 获取课程信息
for resourceid in self.get_lesson_list():
# 课程类型为1和6的直接跳过
if resourceid['resource_type'] == 1 or resourceid['resource_type'] == 6:
continue
data = self.get_lesson_hls(resourceid)
title = data['title']
# 判断media目录是否存在
if not os.path.exists(media_dir):
os.mkdir(media_dir)
# 课程类型为2则代表音频,可直接下载
if resourceid['resource_type'] == 2:
playurl = data['audio_url']
if not os.path.exists(os.path.join(media_dir, title)):
try:
os.mkdir(os.path.join(media_dir, title))
except OSError as e:
title = title.replace('|', '丨')
os.mkdir(os.path.join(media_dir, title))
self.audio(playurl, media_dir, title)
# 课程类型为3则代表视频下载后需要手动拼接
elif resourceid['resource_type'] == 3:
url = data['video_hls']
playurl = url.split('v.f230')[0]
# mkdir media directory
if not os.path.exists(os.path.join(media_dir, title)):
os.mkdir(os.path.join(media_dir, title))
self.video(url, media_dir, title, playurl)
if __name__ == '__main__':
XET().download()
|
the-stack_0_11330 | import logging
import os
import platform
import subprocess
import sys
import warnings
from unittest import skipIf
from pytest import raises, mark
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial import unittest
import scrapy
from scrapy.crawler import Crawler, CrawlerRunner, CrawlerProcess
from scrapy.settings import Settings, default_settings
from scrapy.spiderloader import SpiderLoader
from scrapy.utils.log import configure_logging, get_scrapy_root_handler
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.misc import load_object
from scrapy.extensions.throttle import AutoThrottle
from scrapy.extensions import telnet
from scrapy.utils.test import get_testenv
class BaseCrawlerTest(unittest.TestCase):
def assertOptionIsDefault(self, settings, key):
self.assertIsInstance(settings, Settings)
self.assertEqual(settings[key], getattr(default_settings, key))
class CrawlerTestCase(BaseCrawlerTest):
def setUp(self):
self.crawler = Crawler(DefaultSpider, Settings())
def test_populate_spidercls_settings(self):
spider_settings = {'TEST1': 'spider', 'TEST2': 'spider'}
project_settings = {'TEST1': 'project', 'TEST3': 'project'}
class CustomSettingsSpider(DefaultSpider):
custom_settings = spider_settings
settings = Settings()
settings.setdict(project_settings, priority='project')
crawler = Crawler(CustomSettingsSpider, settings)
self.assertEqual(crawler.settings.get('TEST1'), 'spider')
self.assertEqual(crawler.settings.get('TEST2'), 'spider')
self.assertEqual(crawler.settings.get('TEST3'), 'project')
self.assertFalse(settings.frozen)
self.assertTrue(crawler.settings.frozen)
def test_crawler_accepts_dict(self):
crawler = Crawler(DefaultSpider, {'foo': 'bar'})
self.assertEqual(crawler.settings['foo'], 'bar')
self.assertOptionIsDefault(crawler.settings, 'RETRY_ENABLED')
def test_crawler_accepts_None(self):
crawler = Crawler(DefaultSpider)
self.assertOptionIsDefault(crawler.settings, 'RETRY_ENABLED')
def test_crawler_rejects_spider_objects(self):
with raises(ValueError):
Crawler(DefaultSpider())
class SpiderSettingsTestCase(unittest.TestCase):
def test_spider_custom_settings(self):
class MySpider(scrapy.Spider):
name = 'spider'
custom_settings = {
'AUTOTHROTTLE_ENABLED': True
}
crawler = Crawler(MySpider, {})
enabled_exts = [e.__class__ for e in crawler.extensions.middlewares]
self.assertIn(AutoThrottle, enabled_exts)
class CrawlerLoggingTestCase(unittest.TestCase):
def test_no_root_handler_installed(self):
handler = get_scrapy_root_handler()
if handler is not None:
logging.root.removeHandler(handler)
class MySpider(scrapy.Spider):
name = 'spider'
Crawler(MySpider, {})
assert get_scrapy_root_handler() is None
def test_spider_custom_settings_log_level(self):
log_file = self.mktemp()
class MySpider(scrapy.Spider):
name = 'spider'
custom_settings = {
'LOG_LEVEL': 'INFO',
'LOG_FILE': log_file,
# disable telnet if not available to avoid an extra warning
'TELNETCONSOLE_ENABLED': telnet.TWISTED_CONCH_AVAILABLE,
}
configure_logging()
self.assertEqual(get_scrapy_root_handler().level, logging.DEBUG)
crawler = Crawler(MySpider, {})
self.assertEqual(get_scrapy_root_handler().level, logging.INFO)
info_count = crawler.stats.get_value('log_count/INFO')
logging.debug('debug message')
logging.info('info message')
logging.warning('warning message')
logging.error('error message')
with open(log_file, 'rb') as fo:
logged = fo.read().decode('utf8')
self.assertNotIn('debug message', logged)
self.assertIn('info message', logged)
self.assertIn('warning message', logged)
self.assertIn('error message', logged)
self.assertEqual(crawler.stats.get_value('log_count/ERROR'), 1)
self.assertEqual(crawler.stats.get_value('log_count/WARNING'), 1)
self.assertEqual(
crawler.stats.get_value('log_count/INFO') - info_count, 1)
self.assertEqual(crawler.stats.get_value('log_count/DEBUG', 0), 0)
class SpiderLoaderWithWrongInterface:
def unneeded_method(self):
pass
class CustomSpiderLoader(SpiderLoader):
pass
class CrawlerRunnerTestCase(BaseCrawlerTest):
def test_spider_manager_verify_interface(self):
settings = Settings({
'SPIDER_LOADER_CLASS': 'tests.test_crawler.SpiderLoaderWithWrongInterface'
})
with warnings.catch_warnings(record=True) as w:
self.assertRaises(AttributeError, CrawlerRunner, settings)
self.assertEqual(len(w), 1)
self.assertIn("SPIDER_LOADER_CLASS", str(w[0].message))
self.assertIn("scrapy.interfaces.ISpiderLoader", str(w[0].message))
def test_crawler_runner_accepts_dict(self):
runner = CrawlerRunner({'foo': 'bar'})
self.assertEqual(runner.settings['foo'], 'bar')
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_crawler_runner_accepts_None(self):
runner = CrawlerRunner()
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_deprecated_attribute_spiders(self):
with warnings.catch_warnings(record=True) as w:
runner = CrawlerRunner(Settings())
spiders = runner.spiders
self.assertEqual(len(w), 1)
self.assertIn("CrawlerRunner.spiders", str(w[0].message))
self.assertIn("CrawlerRunner.spider_loader", str(w[0].message))
sl_cls = load_object(runner.settings['SPIDER_LOADER_CLASS'])
self.assertIsInstance(spiders, sl_cls)
class CrawlerProcessTest(BaseCrawlerTest):
def test_crawler_process_accepts_dict(self):
runner = CrawlerProcess({'foo': 'bar'})
self.assertEqual(runner.settings['foo'], 'bar')
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_crawler_process_accepts_None(self):
runner = CrawlerProcess()
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
class ExceptionSpider(scrapy.Spider):
name = 'exception'
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
raise ValueError('Exception in from_crawler method')
class NoRequestsSpider(scrapy.Spider):
name = 'no_request'
def start_requests(self):
return []
@mark.usefixtures('reactor_pytest')
class CrawlerRunnerHasSpider(unittest.TestCase):
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_successful(self):
runner = CrawlerRunner()
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, False)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_successful_for_several(self):
runner = CrawlerRunner()
yield runner.crawl(NoRequestsSpider)
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, False)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_failed(self):
runner = CrawlerRunner()
try:
yield runner.crawl(ExceptionSpider)
except ValueError:
pass
else:
self.fail('Exception should be raised from spider')
self.assertEqual(runner.bootstrap_failed, True)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_failed_for_several(self):
runner = CrawlerRunner()
try:
yield runner.crawl(ExceptionSpider)
except ValueError:
pass
else:
self.fail('Exception should be raised from spider')
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, True)
def test_crawler_runner_asyncio_enabled_true(self):
if self.reactor_pytest == 'asyncio':
CrawlerRunner(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
else:
msg = r"The installed reactor \(.*?\) does not match the requested one \(.*?\)"
with self.assertRaisesRegex(Exception, msg):
CrawlerRunner(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
@defer.inlineCallbacks
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_crawler_process_asyncio_enabled_true(self):
with LogCapture(level=logging.DEBUG) as log:
if self.reactor_pytest == 'asyncio':
runner = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
yield runner.crawl(NoRequestsSpider)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", str(log))
else:
msg = r"The installed reactor \(.*?\) does not match the requested one \(.*?\)"
with self.assertRaisesRegex(Exception, msg):
runner = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
@defer.inlineCallbacks
def test_crawler_process_asyncio_enabled_false(self):
runner = CrawlerProcess(settings={"TWISTED_REACTOR": None})
with LogCapture(level=logging.DEBUG) as log:
yield runner.crawl(NoRequestsSpider)
self.assertNotIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", str(log))
class ScriptRunnerMixin:
def run_script(self, script_name):
script_path = os.path.join(self.script_dir, script_name)
args = (sys.executable, script_path)
p = subprocess.Popen(args, env=get_testenv(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stderr.decode('utf-8')
class CrawlerProcessSubprocess(ScriptRunnerMixin, unittest.TestCase):
script_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'CrawlerProcess')
def test_simple(self):
log = self.run_script('simple.py')
self.assertIn('Spider closed (finished)', log)
self.assertNotIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_asyncio_enabled_no_reactor(self):
log = self.run_script('asyncio_enabled_no_reactor.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_asyncio_enabled_reactor(self):
log = self.run_script('asyncio_enabled_reactor.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
def test_ipv6_default_name_resolver(self):
log = self.run_script('default_name_resolver.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("'downloader/exception_type_count/twisted.internet.error.DNSLookupError': 1,", log)
self.assertIn(
"twisted.internet.error.DNSLookupError: DNS lookup failed: no results for hostname lookup: ::1.",
log)
def test_ipv6_alternative_name_resolver(self):
log = self.run_script('alternative_name_resolver.py')
self.assertIn('Spider closed (finished)', log)
self.assertNotIn("twisted.internet.error.DNSLookupError", log)
def test_reactor_select(self):
log = self.run_script("twisted_reactor_select.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.selectreactor.SelectReactor", log)
@mark.skipif(platform.system() == 'Windows', reason="PollReactor is not supported on Windows")
def test_reactor_poll(self):
log = self.run_script("twisted_reactor_poll.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.pollreactor.PollReactor", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_reactor_asyncio(self):
log = self.run_script("twisted_reactor_asyncio.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
class CrawlerRunnerSubprocess(ScriptRunnerMixin, unittest.TestCase):
script_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'CrawlerRunner')
def test_response_ip_address(self):
log = self.run_script("ip_address.py")
self.assertIn("INFO: Spider closed (finished)", log)
self.assertIn("INFO: Host: not.a.real.domain", log)
self.assertIn("INFO: Type: <class 'ipaddress.IPv4Address'>", log)
self.assertIn("INFO: IP address: 127.0.0.1", log)
|
the-stack_0_11332 | #!python
# coding=utf-8
import logging
from typing import List, Tuple
from itertools import zip_longest, filterfalse
from avro import schema
from confluent_kafka import Producer
from confluent_kafka.avro import AvroProducer, CachedSchemaRegistryClient
# Monkey patch to get hashable avro schemas
# https://issues.apache.org/jira/browse/AVRO-1737
# https://github.com/confluentinc/confluent-kafka-python/issues/122
def hash_func(self):
return hash(str(self))
schema.EnumSchema.__hash__ = hash_func
schema.RecordSchema.__hash__ = hash_func
schema.PrimitiveSchema.__hash__ = hash_func
schema.ArraySchema.__hash__ = hash_func
schema.FixedSchema.__hash__ = hash_func
schema.MapSchema.__hash__ = hash_func
L = logging.getLogger('easyavro')
L.propagate = False
L.addHandler(logging.NullHandler())
def grouper(iterable, batch_size, fillend=False, fillvalue=None):
# Modified from https://docs.python.org/3/library/itertools.html#recipes
# to remove None values
# grouper('ABCDEFG', 3, fillend=True, fillvalue='x') --> ABC DEF Gxx"
# grouper('ABCDEFG', 3, fillend=False) --> ABC DEF G"
"Collect data into fixed-length chunks or blocks"
args = [iter(iterable)] * batch_size
if fillend is False:
return ( tuple(filterfalse(lambda x: x is None, g)) for g in zip_longest(*args, fillvalue=None) )
else:
return zip_longest(*args, fillvalue=fillvalue)
def on_delivery(err, msg):
if err:
L.error(err)
else:
L.debug('Delivered to {} at offset {}'.format(msg.topic(), msg.offset()))
class BaseProducer:
def produce(self, records: List[Tuple], batch=None, flush_timeout=60) -> None:
batch = batch or len(records)
for g, group in enumerate(grouper(records, batch)):
for i, r in enumerate(group):
super().produce(
topic=self.kafka_topic,
key=r[0],
value=r[1],
on_delivery=on_delivery
)
L.debug("{}/{} messages queued".format(i + 1, len(records)))
L.debug("Flushing...")
remaining = self.flush(timeout=flush_timeout)
sent = len(group) - remaining
L.info("Batch {} finished: {} sent, {} pending".format(g, sent, remaining))
self.flush(timeout=flush_timeout)
L.info("Done producing")
class EasyProducer(BaseProducer, Producer):
def __init__(self,
kafka_brokers: List[str],
kafka_topic: str,
debug: bool = False,
kafka_conf: dict = None,
py_conf: dict = None) -> None:
self.kafka_topic = kafka_topic
conf = {
'bootstrap.servers': ','.join(kafka_brokers),
'client.id': self.__class__.__name__,
'api.version.request': 'true',
}
if debug is True:
conf['debug'] = 'msg'
kafka_conf = kafka_conf or {}
py_conf = py_conf or {}
super().__init__(
{**conf, **kafka_conf},
**py_conf
)
class EasyAvroProducer(BaseProducer, AvroProducer):
def __init__(self,
schema_registry_url: str,
kafka_brokers: List[str],
kafka_topic: str,
value_schema: schema.Schema = None,
key_schema: schema.Schema = None,
debug: bool = False,
kafka_conf: dict = None,
py_conf: dict = None) -> None:
self.kafka_topic = kafka_topic
self._client = CachedSchemaRegistryClient(dict(
url=schema_registry_url
))
# Value Schema
if value_schema is None:
vs_name = '{}-value'.format(self.kafka_topic)
_, value_schema, _ = self._client.get_latest_schema(vs_name)
if value_schema is None:
raise ValueError('Schema "{}" not found in registry'.format(vs_name))
# Key Schema
if key_schema is None:
ks_name = '{}-key'.format(self.kafka_topic)
_, key_schema, _ = self._client.get_latest_schema(ks_name)
if key_schema is None:
raise ValueError('Schema "{}" not found in registry'.format(ks_name))
conf = {
'bootstrap.servers': ','.join(kafka_brokers),
'schema.registry.url': schema_registry_url,
'client.id': self.__class__.__name__,
'api.version.request': 'true',
}
if debug is True:
conf['debug'] = 'msg'
kafka_conf = kafka_conf or {}
py_conf = py_conf or {}
super().__init__(
{**conf, **kafka_conf},
default_value_schema=value_schema,
default_key_schema=key_schema,
**py_conf
)
|
the-stack_0_11334 | import asyncio
import logging
from aiohttp import ClientError, ClientSession
from gios import ApiError, InvalidSensorsData, Gios, NoStationError
GIOS_STATION_ID = 117
logging.basicConfig(level=logging.DEBUG)
async def main():
try:
async with ClientSession() as websession:
gios = Gios(GIOS_STATION_ID, websession)
await gios.update()
except (ApiError, NoStationError, InvalidSensorsData, ClientError) as error:
print(f"{error}")
return
data = gios.data
latitude = gios.latitude
longitude = gios.longitude
station_name = gios.station_name
print(f"Longitude: {longitude}")
print(f"Latitude: {latitude}")
print(f"Station name: {station_name}")
print(data)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
|
the-stack_0_11335 | import os
import tensorflow as tf
def assign_to_gpu(gpu=0, ps_dev="/device:CPU:0"):
def _assign(op):
node_def = op if isinstance(op, tf.compat.v1.NodeDef) else op.node_def
if node_def.op == "Variable":
return ps_dev
else:
return "/gpu:%d" % gpu
return _assign
def average_grads_and_vars(tower_grads_and_vars):
def average_dense(grad_and_vars):
if len(grad_and_vars) == 1:
return grad_and_vars[0][0]
grad = grad_and_vars[0][0]
for g, _ in grad_and_vars[1:]:
grad += g
return grad / len(grad_and_vars)
def average_sparse(grad_and_vars):
if len(grad_and_vars) == 1:
return grad_and_vars[0][0]
indices = []
values = []
for g, _ in grad_and_vars:
indices += [g.indices]
values += [g.values]
indices = tf.concat(indices, 0)
values = tf.concat(values, 0) / len(grad_and_vars)
return tf.IndexedSlices(values, indices, grad_and_vars[0][0].dense_shape)
average_grads_and_vars = []
for grad_and_vars in zip(*tower_grads_and_vars):
if grad_and_vars[0][0] is None:
grad = None
elif isinstance(grad_and_vars[0][0], tf.IndexedSlices):
grad = average_sparse(grad_and_vars)
else:
grad = average_dense(grad_and_vars)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads_and_vars.append(grad_and_var)
return average_grads_and_vars
def load_from_checkpoint(saver, logdir):
sess = tf.get_default_session()
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt and ckpt.model_checkpoint_path:
if os.path.isabs(ckpt.model_checkpoint_path):
# Restores from checkpoint with absolute path.
saver.restore(sess, ckpt.model_checkpoint_path)
else:
# Restores from checkpoint with relative path.
saver.restore(sess, os.path.join(logdir, ckpt.model_checkpoint_path))
return True
return False
|
the-stack_0_11337 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
import signal
import sys
import functools
from aiotinder.controllers.tinder import Tinder
facebook_id = ""
facebook_token = ""
async def shutdown(loop: asyncio.events.AbstractEventLoop) -> None:
await asyncio.sleep(0.1)
loop.close()
async def result(tinder: Tinder) -> None:
users = await tinder.prospective_matches()
for user in users:
print("Name: {0}, Age: {1}".format(user.name, user.age))
def main() -> None:
loop = asyncio.get_event_loop()
loop.set_debug(True)
loop.add_signal_handler(signal.SIGINT, functools.partial(shutdown, loop))
tinder = Tinder(facebook_id, facebook_token)
loop.run_until_complete(result(tinder))
sys.exit(1)
if __name__ == "__main__":
main()
|
the-stack_0_11338 | '''
Voice metadata definition.
Copyright (c) 2009, 2013 Peter Parente
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
class Voice(object):
def __init__(self, id, name=None, languages=[], gender=None, age=None):
self.id = id
self.name = name
self.languages = languages
self.gender = gender
self.age = age
def __str__(self):
return '''<Voice id=%(id)s
name=%(name)s
languages=%(languages)s
gender=%(gender)s
age=%(age)s>''' % self.__dict__ |
the-stack_0_11339 | from enum import IntEnum
import functools
import usb.core
import usb.util
from traffic_light.error import TrafficLightError, MultipleTrafficLightsError
CTRL_ENDPOINT = 0x02
ID_VENDOR = 0x0d50
ID_PRODUCT = 0x0008
INTERFACE = 0
class Color(IntEnum):
RED = 0x10
YELLOW = 0x11
GREEN = 0x12
class State(IntEnum):
OFF = 0x0
ON = 0x1
class ClewareTrafficLight:
def __init__(self, address=None):
if address:
self.address = address
self.device = usb.core.find(
address=address,
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
elif len(list(ClewareTrafficLight.find_devices())) > 1:
raise MultipleTrafficLightsError(
"No address is given and there are multiple devices conected! "
"Use 'print_devices' to see a list of connected devices."
)
else:
self.device = usb.core.find(
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
if self.device is None:
raise TrafficLightError('Cleware traffic light not found!')
self.reattach = False
def attach(self):
"""Attaches the device back to the kernel"""
usb.util.dispose_resources(self.device)
if self.reattach:
self.device.attach_kernel_driver(INTERFACE)
def detach(self):
"""Detaches the device from to kernel so it can be used"""
if self.device.is_kernel_driver_active(INTERFACE):
self.device.detach_kernel_driver(INTERFACE)
self.reattach = True
@staticmethod
def find_devices():
"""Returns the raw iterator of all found traffic lights"""
devices = usb.core.find(find_all=True, idVendor=ID_VENDOR, idProduct=ID_PRODUCT)
if devices:
return devices
return []
@staticmethod
def print_devices():
"""Prints a list of all connected traffic lights"""
devices = ClewareTrafficLight.get_devices()
for device in devices:
print(device)
@staticmethod
def get_devices():
"""Returns a list of ClewareTrafficLight instances"""
usb_devices = ClewareTrafficLight.find_devices()
return [ClewareTrafficLight(d.address) for d in usb_devices]
def set_led(self, color, value, timeout=1000):
"""Sets the given state and color of the attached traffic light
Attribute:
color -- the to set color as the enum. E.g. Color.RED
state -- the state to which it should be set. E.g. State.ON
address -- the usb address of a specific traffic light
"""
try:
self.detach()
self.device.write(CTRL_ENDPOINT, [0x00, color, value], timeout=timeout)
except Exception as exc:
raise TrafficLightError(str(exc)) from exc
finally:
self.attach()
def __getattr__(self, name):
"""Parses attribut calls in function"""
args = name.split('_')
try:
color = Color[args[0].upper()]
state = State[args[1].upper()]
except Exception as exc:
raise TrafficLightError("Either the given color or state could not be parsed! Exc: {}"
.format(exc))
return functools.partial(self.set_led, color, state)
def __str__(self):
"""Converts instance into string with important imformations"""
return ("== Cleware Traffic Light ==\n"
"Address: {} \n"
"IdVendor: {} \n"
"IdProduct: {}".format(self.address, ID_VENDOR, ID_PRODUCT))
|
the-stack_0_11340 | from aws_cdk.aws_lambda import Function, Code, Runtime
from aws_cdk.core import Stack
from b_elasticsearch_layer.layer import Layer as ElasticsearchLayer
class TestingInfrastructure(Stack):
def __init__(self, scope: Stack):
super().__init__(
scope=scope,
id=f'TestingStack',
stack_name=f'TestingStack'
)
Function(
scope=self,
id='TestingFunction',
code=Code.from_inline(
'def handler(): return "Hello World!"'
),
handler='index.handler',
runtime=Runtime.PYTHON_3_6,
layers=[ElasticsearchLayer(self, 'TestingElasticsearchLayer')]
)
|
the-stack_0_11341 | import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--results", type=str, required=True)
parser.add_argument("--claims", type=str, required=True)
parser.add_argument("--t5_output_ids", type=str, required=True)
parser.add_argument("--t5_output", type=str, required=True)
args = parser.parse_args()
label_map = {"weak": "NOT_ENOUGH_INFO", "false": "CONTRADICT", "true": "SUPPORT"}
format1_ids = open(args.t5_output_ids, "r")
format1_label = open(args.t5_output, "r")
format1_eval = open(args.results, "w")
labels = [line.split()[0] for line in format1_label.readlines()]
id_lines = format1_ids.readlines()
claim_labels_dict = {}
for idx, line in enumerate(id_lines):
info = line.split()
if info[0] not in claim_labels_dict:
claim_labels_dict[str(info[0])] = {}
claim_labels_dict[info[0]][info[1]] = {"label": label_map[labels[idx]]}
claims_f = open(args.claims, "r").readlines()
all_ids = []
for line in claims_f:
info = json.loads(line)
claim_id = info["id"]
all_ids.append(claim_id)
for key in all_ids:
if str(key) in claim_labels_dict:
format1_eval.write(json.dumps({"claim_id": int(key), "labels": claim_labels_dict[str(key)]})+"\n")
else:
format1_eval.write(json.dumps({"claim_id": int(key), "labels": {}}) + "\n")
format1_eval.close()
format1_label.close()
|
the-stack_0_11343 | #!/usr/bin/env python3
import uuid
import typing
import aiohttp
from aiohttp import web_exceptions
class MumbleException(Exception):
def __init__(self, message: str):
super().__init__()
self.message = message
def ApiSession(timeout: int) -> typing.AsyncContextManager[aiohttp.ClientSession]:
skip_headers = ['User-Agent']
client_timeout = aiohttp.ClientTimeout(total=timeout)
return aiohttp.ClientSession(timeout=client_timeout, skip_auto_headers=skip_headers)
class Spotiflag:
PORT = 17171
def __init__(self, hostname: str, timeout: int):
self.url = f'http://{hostname}:{self.PORT}'
self.timeout = timeout
async def ping(self, answer: str='pong') -> None:
url = f'{self.url}/api/ping/'
async with ApiSession(self.timeout) as session:
async with session.get(url) as response:
if response.status != web_exceptions.HTTPOk.status_code:
raise MumbleException(f'/api/ping/: returns {response.status}')
try:
data = await response.text()
except Exception:
raise MumbleException(f'/api/ping/: can\'t get text')
if data != answer:
raise MumbleException(f'/api/ping/: incorrect answer')
async def list(self) -> typing.List[str]:
url = f'{self.url}/api/list/'
async with ApiSession(self.timeout) as session:
async with session.get(url) as response:
if response.status != web_exceptions.HTTPOk.status_code:
raise MumbleException(f'/api/list/: returns {response.status}')
try:
data = await response.json()
except Exception:
raise MumbleException(f'/api/list/: can\'t get json')
if not isinstance(data, list) or not all(isinstance(x, str) for x in data):
raise MumbleException(f'/api/list/: invalid json structure')
return data
async def generate(self, description: bytes) -> uuid.UUID:
url = f'{self.url}/api/generate/'
async with ApiSession(self.timeout) as session:
async with session.post(url, data=description) as response:
if response.status != web_exceptions.HTTPOk.status_code:
raise MumbleException(f'/api/generate/: returns {response.status}')
try:
data = await response.text()
except Exception:
raise MumbleException(f'/api/generate/: can\'t get id')
try:
id = uuid.UUID(data.strip(), version=4)
except Exception:
raise MumbleException(f'/api/generate/: invalid id format')
return id
async def listen(
self,
id: uuid.UUID,
chunks_count: int,
expected_chunk_size: int=64 * 1024
) -> typing.Optional[bytes]:
url = f'{self.url}/api/listen/{id}/'
chunks = []
async with ApiSession(self.timeout) as session:
offset = 0
for i in range(chunks_count):
session.headers.update({
'Range': f'bytes={offset}-'
})
async with session.get(url) as response:
if response.status == web_exceptions.HTTPNotFound.status_code:
return None
if response.status != web_exceptions.HTTPPartialContent.status_code:
raise MumbleException(f'/api/listen/: returns {response.status}')
try:
chunk = await response.read()
except Exception:
raise MumbleException(f'/api/listen/: can\'t get content')
if len(chunk) != expected_chunk_size:
raise MumbleException(f'/api/listen/: incorrect content size')
chunks.append(chunk)
offset += len(chunk)
return b''.join(chunks)
|
the-stack_0_11344 | import argparse
import json
import os
import subprocess
from bids import BIDSLayout
import datetime
from collections import OrderedDict
from shutil import copy as fileCopy
from shutil import rmtree
def isTrue(arg):
return arg is not None and (arg == 'Y' or arg == '1' or arg == 'True')
def logtext(logfile, textstr):
stamp=datetime.datetime.now().strftime("%m-%d-%y %H:%M:%S%p")
textstring = str(stamp) + ' ' + str(textstr)
print(textstring)
logfile.write(textstring + '\n')
def createDatasetDescription(bidsDir,project):
datasetjson=os.path.join(bidsDir,'dataset_description.json');
if not os.path.exists(datasetjson):
print("Constructing BIDS dataset description")
dataset_description=OrderedDict()
dataset_description['Name'] =project
dataset_description['BIDSVersion']=BIDSVERSION
dataset_description['License']=""
dataset_description['ReferencesAndLinks']=""
with open(datasetjson,'w') as datasetjson:
json.dump(dataset_description,datasetjson)
BIDSVERSION = "1.0.0"
parser = argparse.ArgumentParser(description="Run dcm2niix on every file in a session")
parser.add_argument("--subject", help="Subject Label", required=True)
parser.add_argument("--session_label", help="session Label", nargs='?', required=False)
parser.add_argument("--proc_steps", help="additional proc steps", nargs='?', required=False)
parser.add_argument("--dicomdir", help="Root output directory for DICOM files", required=True)
parser.add_argument("--niftidir", help="Root output directory for NIFTI files", required=True)
parser.add_argument("--workdir", help="working directory for temporary files", required=False,default="/tmp")
parser.add_argument("--bidsconfig", help="path to BIDS config file", required=True)
parser.add_argument("--bidsaction", help="path to BIDS action file", required=False)
parser.add_argument("--overwrite", help="Overwrite NIFTI files if they exist")
parser.add_argument("--cleanup", help="Attempt to clean up temporary files")
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
args, unknown_args = parser.parse_known_args()
subject = args.subject
session_label = args.session_label
if session_label is None:
session_label='nosession'
if not session_label:
session_label='nosession'
overwrite = isTrue(args.overwrite)
cleanup = isTrue(args.cleanup)
dicomdir = args.dicomdir
niftidir = args.niftidir
workdir = args.workdir
logdir = niftidir + "/logs"
bidsactionfile = args.bidsaction
if bidsactionfile is None:
bidsactionfile=''
dcm2bids_config = args.bidsconfig
proc_steps = args.proc_steps
if proc_steps is None:
proc_steps = ''
if not proc_steps:
proc_steps = 'bids'
# Set up working directory
if not os.access(niftidir, os.R_OK):
os.mkdir(niftidir)
if not os.access(logdir, os.R_OK):
os.mkdir(logdir)
# set up log file
TIMESTAMP = datetime.datetime.now().strftime("%m%d%y%H%M%S%p")
LOGFILENAME = 'xnatSession_' + TIMESTAMP + '.log'
LOGFILENAME = os.path.join(logdir,LOGFILENAME)
LOGFILE = open(LOGFILENAME,'w+')
# Download and convert Dicoms to BIDS format
if 'bids' in proc_steps:
os.chdir(workdir)
# find step-specific parameters
step_info=''
proc_steps_list=proc_steps.split(",");
for step_item in proc_steps_list:
if 'bids:' in step_item:
step_info = step_item
break
resourceExists = os.listdir(niftidir)
if not resourceExists or overwrite:
if overwrite:
if session_label == "nosession":
dcm2bids_command = "dcm2bids -d {} -p {} -c {} -o {} --clobber".format(dicomdir, subject, dcm2bids_config, niftidir ).split()
else:
dcm2bids_command = "dcm2bids -d {} -p {} -s {} -c {} -o {} --clobber".format(dicomdir, subject, session_label, dcm2bids_config, niftidir ).split()
else:
if session_label == "nosession":
dcm2bids_command = "dcm2bids -d {} -p {} -c {} -o {}".format(dicomdir, subject, dcm2bids_config, niftidir ).split()
else:
dcm2bids_command = "dcm2bids -d {} -p {} -s {} -c {} -o {}".format(dicomdir, subject, session_label, dcm2bids_config, niftidir ).split()
logtext(LOGFILE, ' '.join(dcm2bids_command))
logtext(LOGFILE, str(subprocess.check_output(dcm2bids_command)))
#delete temporary folder
tmpBidsDir=os.path.join(niftidir,'tmp_dcm2bids')
if cleanup:
try:
logtext(LOGFILE,'Cleaning up %s directory.' % tmpBidsDir)
rmtree(tmpBidsDir)
except OSError:
logtext(LOGFILE, 'problem deleting tmp_dcm2bids directory due to OS error. Please delete manually')
# perform deface
createDatasetDescription(niftidir, "PROJECTNAME")
layout = BIDSLayout(niftidir)
T1w=layout.get(subject=subject, suffix='T1w', extension='nii.gz')
for t1w in T1w:
t1wpath=t1w.path
deface_command = "pydeface --force {}".format(t1wpath).split()
logtext(LOGFILE,"Executing command: " + " ".join(deface_command))
logtext(LOGFILE,subprocess.check_output(deface_command))
logtext (LOGFILE,"Get project BIDS bidsaction map")
if os.path.exists(bidsactionfile):
with open(bidsactionfile) as f:
action = json.load(f)
try:
copyitems = action['copy']
except KeyError:
copyitems = []
logtext (LOGFILE, 'No copy items provided.')
for item in copyitems:
entities={}
entities['extension']=['nii','nii.gz']
try:
dataType = item["dataType"]
entities['datatype']=dataType
except KeyError:
dataType = None
try:
modalityLabel = item["modalityLabel"]
entities['suffix']=modalityLabel
except KeyError:
modalityLabel = None
try:
customLabels = item["customLabels"]
labels = customLabels.split("_")
subjectbids=list(filter(lambda x: "sub-" in x, labels))
if subjectbids:
subjectValue=subjectbids[0].split('-')[1]
entities['subject']=subjectValue
else:
entities['subject']=subject
sessionbids=list(filter(lambda x: "ses-" in x, labels))
if sessionbids:
sessionValue=sessionbids[0].split('-')[1]
entities['session']=sessionValue
elif session_label != "nosession":
entities['session']=session_label
task=list(filter(lambda x: "task-" in x, labels))
if task:
taskValue=task[0].split('-')[1]
entities['task']=taskValue
acquisition=list(filter(lambda x: "acq-" in x, labels))
if acquisition:
acquisitionValue=acquisition[0].split('-')[1]
entities['acquisition']=acquisitionValue
run=list(filter(lambda x: "run-" in x, labels))
if run:
runValue=run[0].split('-')[1]
entities['run']=runValue
except KeyError:
customLabels= None
entities['subject']=subject
if session_label != "nosession":
entities['session']=session_label
files = layout.get(return_type='file', **entities)
if files:
sourcefile = files[0]
entities = layout.parse_file_entities(sourcefile)
entities['extension'] = 'json'
files = layout.get(return_type='file', **entities)
if files:
sourcejson = files[0]
else:
sourcejson = None
else:
sourcefile = None
try:
destination = item["destination"]
except KeyError:
destination = []
logtext (LOGFILE, 'No Destination provided for copy')
if destination and sourcefile and sourcejson:
entities['subject']=subject
try:
dataType = destination["dataType"]
entities['datatype']=dataType
except KeyError:
dataType = None
try:
modalityLabel = destination["modalityLabel"]
entities['suffix']=modalityLabel
except KeyError:
modalityLabel = None
try:
customLabels = destination["customLabels"]
labels = customLabels.split("_")
sessionbids=list(filter(lambda x: "ses-" in x, labels))
if sessionbids:
sessionValue=sessionbids[0].split('-')[1]
entities['session']=sessionValue
task=list(filter(lambda x: "task-" in x, labels))
if task:
taskValue=task[0].split('-')[1]
entities['task']=taskValue
else:
entities.pop('task', None)
acquisition=list(filter(lambda x: "acq-" in x, labels))
if acquisition:
acquisitionValue=acquisition[0].split('-')[1]
entities['acquisition']=acquisitionValue
else:
entities.pop('acquisition', None)
run=list(filter(lambda x: "run-" in x, labels))
if run:
runValue=run[0].split('-')[1]
entities['run']=runValue
else:
entities.pop('run', None)
entities['extension']='nii.gz'
outputfile=os.path.join(niftidir, layout.build_path(entities))
if os.path.exists(sourcefile):
logtext (LOGFILE, "copying %s to %s" %(sourcefile, outputfile))
subprocess.check_output(['cp',sourcefile,outputfile])
else:
logtext (LOGFILE, "ERROR: %s cannot be found. Check bidsaction file logic." % sourcefile)
entities['extension']='json'
outputjson=os.path.join(niftidir, layout.build_path(entities))
if os.path.exists(sourcejson):
logtext (LOGFILE, "copying %s to %s" %(sourcejson, outputjson))
subprocess.check_output(['cp',sourcejson, outputjson])
else:
logtext (LOGFILE, "ERROR: %s cannot be found. Check bidsaction file logic." % sourcejson)
except KeyError:
customLabels= None
else:
logtext (LOGFILE,"Destination or source file could not be found - skipping")
else:
logtext (LOGFILE,"Could not read project BIDS action file - continuing with upload")
##########
LOGFILE.flush()
else:
message = 'Looks like Dcm2bids has already been run. If you want to rerun then set overwrite flag to True.'
logtext (LOGFILE, message)
logtext (LOGFILE, 'All done with session processing.')
LOGFILE.close()
|
the-stack_0_11345 | import os
import requests
import json
import pandas as pd
from datetime import datetime, timedelta
ENV = "sandbox" #Use "sandbox" when testing, and "api" if you have an account at Tradier
API_TOKEN = "" #Fill in your Tradier API Token here
###
#Script starts here
###
def main():
#Get list of symbols from file
filename_in = "symbols.csv"
listOfSymbols = importCSV(filename_in)
#Find Cash Secured Puts
#Parameters: Symbols, min DTE, max DTE
findCashSecuredPuts(listOfSymbols, 10, 47)
###
#API Functions
###
#Get Data from Tradier API
def getAPIData(url):
bearer_token = f"Bearer {API_TOKEN}"
headers={'Authorization': bearer_token, 'Accept': 'application/json'}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return json.loads(response.content.decode('utf-8'))
#Get all the upcoming expirations for given symbol
def getOptionExpirations(symbol):
url = f"https://{ENV}.tradier.com/v1/markets/options/expirations?symbol={symbol}"
expirations_data = getAPIData(url)
expirations = []
if (expirations_data['expirations']):
expirations = expirations_data['expirations']['date']
return expirations
#Retrieve the options chain for given symbol and expiration
def getOptionsChain(symbol, expiration):
url = f"https://{ENV}.tradier.com/v1/markets/options/chains?symbol={symbol}&expiration={expiration}&greeks=true"
options_chain_data = getAPIData(url)
options_chain = []
if (options_chain_data['options']):
options_chain = options_chain_data['options']['option']
return options_chain
#Retrieves latest stock price from Tradier Market API
def getLastStockPrice(symbol):
url = f"https://{ENV}.tradier.com/v1/markets/quotes?symbols={symbol}"
quote_data = getAPIData(url)
last_price = -1
if ('quote' in quote_data['quotes']):
last_price = quote_data['quotes']['quote']['last']
return last_price
###
#Utility functions
###
#Import CSV files using Pandas library
def importCSV(filename_in):
data = pd.read_csv(filename_in)
symbols = data['Symbol'].to_list()
return symbols
#Limit expirations of symbol to provided min_dte (Min Days Until Expiration) and max_dte (Max Days Until Expiration)
def listOfLimitedExpirations(symbol, min_dte, max_dte):
#Get option expirations for symbol
expirations_list = getOptionExpirations(symbol)
expirations = []
if(isinstance(expirations_list, str)):
return []
for expiration_date in expirations_list:
#Extract dates within set DTE
date_object = datetime.strptime(expiration_date,"%Y-%m-%d")
expiration_min_date = datetime.now() + timedelta(min_dte)
expiration_max_date = datetime.now() + timedelta(max_dte)
if (date_object <= expiration_min_date):
continue
if (date_object >= expiration_max_date):
continue
expirations.append(expiration_date)
return expirations
def exportToFile(data, filename_out):
output = pd.DataFrame(data, columns=['Symbol','Expiration','Strike','Bid','Ask','Volume','Delta','Premium'])
output.to_csv(filename_out,index=False)
#Creates a new dictionary with options data
def gatherOptionData(option):
option_data = {}
option_data['symbol'] = option['underlying']
option_data['type'] = option['option_type']
option_data['expiration'] = option['expiration_date']
option_data['strike'] = option['strike']
option_data['bid'] = option['bid']
option_data['ask'] = option['ask']
option_data['volume'] = option['volume']
option_data['open_int'] = option['open_interest']
#Add necessary greeks here
option_greeks = option.get('greeks',None)
if (option_greeks):
option_data['delta'] = option_greeks['delta']
option_data['theta'] = option_greeks['theta']
option_data['gamma'] = option_greeks['gamma']
return option_data
###
# Main function for filtering the PUT options we are looking for
# You will have to set your own critera
# Generally, for minimum critera, you want:
# tight bid/ask spreads (under .15)
# Some liquidity (Looking for volume greater than 0)
# Certain delta, minium premium, etc.
###
def findCashSecuredPuts(ListOfSymbols, minDays, maxDays):
#Adjust these according to your criteria
MAX_BID_ASK_SPREAD = .15
MIN_PRICE = 10
MAX_PRICE = 70
MIN_PREM = .30
MAX_DELTA = -.2
matching_options = []
data_frame = []
for symbol in ListOfSymbols:
print(f"Processing {symbol}...")
#Depending on your list of symbols, you may want to filter by current price, since you will need buying power
last_price = getLastStockPrice(symbol)
if (last_price <= MIN_PRICE or last_price >= MAX_PRICE):
continue
#We only want options expiring within a certain timeframe
expirations_list = listOfLimitedExpirations(symbol, minDays, maxDays)
numOptions = 0
for expiration in expirations_list:
#First we need the options chain
options = getOptionsChain(symbol, expiration)
for option_item in options:
#This will just gather data from option into a more useful dictionary
option = gatherOptionData(option_item)
#Start filtering by your criteria here
#Make sure there is a bid/ask, otherwise there's probably no liquidity
if (option['bid'] is None or option['ask'] is None):
continue
#Estimated premium (this goes by the approx mid price)
premium = round((option['bid'] + option['ask']) / 2,2)
#Check for delta if it exists
delta = -999
if ('delta' in option):
delta = option['delta']
#Filter out the options we actually want
if (option['type'] == "put"
and option['bid'] > 0
and delta >= MAX_DELTA
and premium >= MIN_PREM
and (option['ask'] - option['bid']) <= MAX_BID_ASK_SPREAD
and option['volume'] > 0
):
#Format the output
option_output = '{}, {}, BID:{}, ASK:{}, {}, {}(D), Premium: {}'\
.format(
option['expiration'],
option['strike'],
option['bid'],
option['ask'],
option['volume'],
delta,
premium)
#Separate by symbol
if (numOptions == 0):
matching_options.append(f"Symbol: {symbol}")
numOptions += 1
#Print the screen when a match is found
print(f"Wheel: {option_output}")
#Add data to Pandas DataFrame
data_frame.append([symbol,
option['expiration'],
option['strike'],
option['bid'],
option['ask'],
option['volume'],
delta,
premium])
#Export results to a new csv file
exportToFile(data_frame, "output_cash_secured_puts.csv")
if __name__ == '__main__':
main()
|
the-stack_0_11346 | '''
# Functions
'''
import cv2
import numpy as np
import platform
import time
import sys
from rmracerlib import config as cfg
def contours_detection(mask, frame):
# find shapes
# contours detection
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
if area > AREA_SIZE:
if len(cnt) == 8:
cv2.drawContours(frame, [approx], 0, (0,0,0), 5)
x = approx.ravel()[0]
y = approx.ravel()[1]
cv2.putText(frame, "STOP", (x,y), font, 1, (0,0,0))
return "stop"
# nothing
return None
###
###
### HELPER FUNCTIONS
###
###
def valid_range(x, y, w, h, frame):
'''
This function returns if an roi is in a valid or acceptable part of the image. The reason
for having this is due to extra parts of the frame containing reflections.
'''
left_buf = 10
right_buf = 40
top_buf = 10
centre_buf = 25
height, width = frame.shape[:2]
h_top = int(height / cfg.VR_TOP) # previously h1
h_bot = int(height / cfg.VR_BOTTOM) # previously horizon
v0 = left_buf # furthest left width
v1 = int(width/3) # 1/3rd width
v2 = v1*2 # 2/3rd width
v3 = width - right_buf # furthest right width
if cfg.DRAW_RANGE:
cv2.line(frame, (0, h_top ), (width, h_top ), (255,0,255))
cv2.line(frame, (0, h_bot ), (width, h_bot ), (0,255,255))
cw = True
ch = False
if ( (v0 < x < v1) or (v2 < x < v3) ) and ( (v0 < x+w < v1) or (v2 < x+w < v3) ):
cw = True
if (h_top < y < h_bot) and (h_top < y+h < h_bot): #h0 < y < h2:
ch = True
if ch and cw:
return True
else:
return False
def is_squarish(height, width):
# calculate ratio of sides - anything not square is not worth checking
a = height / width
b = width / height
if (0.5 < a < 2.0) and (0.5 < b < 2.0):
return True
else:
return False
def sign_direction(img):
"""
Turning Sign Detection Part 1
Reads in a ROI and outputs either: right, left or None
"""
# sharpen the ROI so it is clearer for detection
sharpen = cv2.GaussianBlur(img, (3,3), 3)
sharpen = cv2.addWeighted(img, 1.5, sharpen, -0.5, 0)
# convert image to binary
grey = cv2.cvtColor(sharpen, cv2.COLOR_BGR2GRAY)
thresh, binary = cv2.threshold(grey, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# get picture shape information for selecting a smaller ROI
height, width = binary.shape[:2]
# CHECK 1 - calculate ratio of sides - anything not square is not worth checking
a = height / width
b = width / height
if (0.5 < a < 2.0) and (0.5 < b < 2.0):
pass
else:
return None
# CHECK 2 - check the mix of white and black pixels to eliminate false detections
# calculate total number of pixels (TODO: faster way)
total = height * width
# calculate ratios
n_white_pix = np.sum(binary == 255)
w_ratio = int(n_white_pix / total * 100)
n_black_pix = np.sum(binary == 0)
b_ratio = int(n_black_pix / total * 100)
# check
if ( ( 40 <= w_ratio <= 60 ) and ( 40 <= b_ratio <= 60 ) ):
# run the sign detection algorithm
result = direction_check(binary)
if result is not None:
return result
# if we fail any tests, return None
return None
def direction_check(binary):
"""
Turning Sign Dection Part 2
Checks the sign direction based on relevant information in the image
"""
# extract image information
height, width = binary.shape[:2]
# set up our regions at 25%, 50% and 75% marks
# we are only going to look at the center of the binary image
h1 = int(height/4) # was 0
h2 = int(height/2)
h3 = int(h1+h2) # was height
v1 = int(width/4) # was 0
v2 = int(width/2)
v3 = int(v1+v2) # was width
# quadrants / regions
q1Block = binary[h1:h2, v2:v3]
q2Block = binary[h1:h2, v1:v2]
q3Block = binary[h2:h3, v1:v2]
q4Block = binary[h2:h3, v2:v3]
# add up the number of white pixels in each quadrant.
q1Sum = np.sum(q1Block == 255)
q2Sum = np.sum(q2Block == 255)
q3Sum = np.sum(q3Block == 255)
q4Sum = np.sum(q4Block == 255)
# information search - check which region has the most white pixels and then
# determine if the sign is left or right.
if q4Sum > q3Sum: #and q1Sum < q2Sum:
#print("guess: left")
return "left"
elif q4Sum < q3Sum: #and q1Sum > q2Sum:
#print("guess: right")
return "right"
else:
return None
|
the-stack_0_11347 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = ["Click>=6.0",
"watchdog",
"requests",
"pytesseract",
"pdf2image",
"PyPDF2",
"unidecode",
]
setup_requirements = []
test_requirements = []
setup(
author="Justin Keller",
author_email="[email protected]",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
description="REST API Uploader",
entry_points={"console_scripts": ["rest_uploader=rest_uploader.cli:main"]},
install_requires=requirements,
license="MIT license",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords=["rest_uploader", "joplin", "rest-uploader"],
name="rest_uploader",
packages=find_packages(include=["rest_uploader"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/kellerjustin/rest-uploader",
version="0.3.0",
zip_safe=False,
)
|
the-stack_0_11350 | from .base import * # flake8: noqa
#env.bool('DJANGO_DEBUG', default=False)
DEBUG = env('DEBUG')
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
SECRET_KEY = env('DJANGO_SECRET_KEY')
# Compress static files offline
# http://django-compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE
# Turning this on creates causes the server to return 500
# According to the docs if this is set to True then also need to run the compress management commnand
#COMPRESS_OFFLINE = True
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter',
]
ALLOWED_HOSTS = env('DJANGO_ALLOWED_HOSTS')
INSTALLED_APPS += (
'wagtail.contrib.wagtailfrontendcache',
'gunicorn',
)
# to prevent this from blocking web server threads
# (requires the django-celery package):
# http://celery.readthedocs.org/en/latest/configuration.html
import djcelery
djcelery.setup_loader()
CELERY_SEND_TASK_ERROR_EMAILS = True
BROKER_URL = 'redis://'
# Use Redis as the cache backend for extra performance
# (requires the django-redis-cache package):
# http://wagtail.readthedocs.org/en/latest/howto/performance.html#cache
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True,
}
}
}
DEFAULT_FROM_EMAIL = env('EMAIL_FROM')
EMAIL_USE_TLS = True
EMAIL_HOST = env("EMAIL_HOST")
EMAIL_HOST_USER = env('EMAIL_USER')
EMAIL_HOST_PASSWORD = env('EMAIL_PASSWD')
EMAIL_PORT = 587
# LOGGING CONFIGURATION
# Sends an email to site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
|
the-stack_0_11352 | # -*- coding: utf-8 -*-
import os
import re
import time
from bs4 import BeautifulSoup
import requests
import httpx
def rrss(test_str):
regex = r"(@(\w+))"
subst = "<a rel='nofolow norefer' href=\"https://twitter.com/\\2\" target=\"_blank\">\\1</a>"
result = re.sub(regex, subst, test_str, 0, re.IGNORECASE | re.UNICODE)
if result:
test_str = result
regex = r"(#(\w+))"
subst = "<a href=\"https://twitter.com/hashtag/\\2\" target=\"_blank\">\\1</a>"
result = re.sub(regex, subst, test_str, 0, re.IGNORECASE | re.UNICODE)
if result:
test_str = result
regex = r"[^\'\"](https?:\/\/((www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6})\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*))"
subst = " <a href=\"\\1\" target=\"_blank\" rel='nofollow norefer'>\\2</a>"
result = re.sub(regex, subst, test_str, 0, re.IGNORECASE | re.UNICODE)
if result:
test_str = result
return test_str
def href(name):
return "-".join(name.split(" ")).lower()
def id_tuit_text_plain(text,lis):
regex = r"https:\/\/twitter.com/\w+/status/(\d+)"
m = re.search(regex, text)
re.search
if m:
lis.append(m.group(1))
def down_e(url,pined=False):
h = {"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Mobile Safari/537.36"}
r = requests.get(url,headers=h)
bs = BeautifulSoup(r.content.decode("utf8"), "html.parser")
content = bs.find("main",{"class":"content"})
title = content.find("h1", {"class":"entry-title", "itemprop":"headline"}).get_text()
tuit = content.find_all("a",{"href":re.compile(r"https:\/\/t\.co\/(\w+)")})
img = content.find_all("img",limit=4)
img_link = []
tuit_links = []
link_tuit = []
if tuit:
for a in tuit:
tuit_links.append(a.attrs["href"])
tmp = [requests.get(link).url for link in tuit_links]
for i in tmp:
id_tuit_text_plain(i, link_tuit)
if img:
for i in img:
img_link.append(i.attrs["src"])
date = content.find("time", {"class":"entry-time"}).get_text()
categorias = [i.text for i in content.find("span",{"class":"entry-categories"}).find_all("a")]
external = content.find_all("iframe")
contento = []
external_link = []
if external:
try:
for i in external:
external_link.append(i.attrs["src"])
except Exception:
print("Error into exernals links")
# for i in content.find_all("p"):
# if len(i.text) > 6 and i.em == None :
# contento.append(i.text
for p in bs.find_all("p"):
if p.attrs.get("class") == "entry-meta":
continue
elif p.img:
continue
elif p.get_text().startswith("Fuente") and p.a:
continue
elif p.em:
pass
elif p.get_text().startswith("Copyright") and p.a:
break
else:
contento.append(p.get_text())
print("flush")
contento = [rrss(cnt) for cnt in contento]
contento = "*#*".join(contento)
contento = "{} *$* {}".format(title, contento)
return dict(content=contento, categorias=categorias, date=date, img=img_link, external=external_link, link=href(title), tuit=link_tuit, pined=pined) |
the-stack_0_11353 | """
Python re-implementation of "Exploiting the Circulant Structure of
Tracking-by-detection with Kernels"
@book{Henriques2012Exploiting,
title={Exploiting the Circulant Structure of Tracking-by-Detection with Kernels},
author={Henriques, Jo?o F. and Rui, Caseiro and Martins, Pedro and Batista, Jorge},
year={2012},
}
"""
import numpy as np
import cv2
from .base import BaseCF
from lib.utils import gaussian2d_labels,cos_window
from lib.fft_tools import fft2,ifft2
class CSK(BaseCF):
def __init__(self, interp_factor=0.075, sigma=0.2, lambda_=0.01):
super(CSK).__init__()
self.interp_factor = interp_factor
self.sigma = sigma
self.lambda_ = lambda_
def init(self,first_frame,bbox):
if len(first_frame.shape)==3:
assert first_frame.shape[2]==3
first_frame=cv2.cvtColor(first_frame,cv2.COLOR_BGR2GRAY)
first_frame=first_frame.astype(np.float32)
bbox=np.array(bbox).astype(np.int64)
x,y,w,h=tuple(bbox)
self._center=(x+w/2,y+h/2)
self.w,self.h=w,h
self._window=cos_window((int(round(2*w)),int(round(2*h))))
self.crop_size=(int(round(2*w)),int(round(2*h)))
self.x=cv2.getRectSubPix(first_frame,(int(round(2*w)),int(round(2*h))),self._center)/255-0.5
self.x=self.x*self._window
s=np.sqrt(w*h)/16
self.y=gaussian2d_labels((int(round(2*w)),int(round(2*h))),s)
self._init_response_center=np.unravel_index(np.argmax(self.y,axis=None),self.y.shape)
self.alphaf=self._training(self.x,self.y)
def update(self,current_frame,idx,vis=False):
if len(current_frame.shape)==3:
assert current_frame.shape[2]==3
current_frame=cv2.cvtColor(current_frame,cv2.COLOR_BGR2GRAY)
current_frame=current_frame.astype(np.float32)
z=cv2.getRectSubPix(current_frame,(int(round(2*self.w)),int(round(2*self.h))),self._center)/255-0.5
z=z*self._window
self.z=z
responses=self._detection(self.alphaf,self.x,z)
if vis is True:
self.score=responses
curr=np.unravel_index(np.argmax(responses,axis=None),responses.shape)
dy=curr[0]-self._init_response_center[0]
dx=curr[1]-self._init_response_center[1]
x_c, y_c = self._center
x_c -= dx
y_c -= dy
self._center = (x_c, y_c)
new_x=cv2.getRectSubPix(current_frame,(2*self.w,2*self.h),self._center)/255-0.5
new_x=new_x*self._window
self.alphaf=self.interp_factor*self._training(new_x,self.y)+(1-self.interp_factor)*self.alphaf
self.x=self.interp_factor*new_x+(1-self.interp_factor)*self.x
return [self._center[0]-self.w/2,self._center[1]-self.h/2,self.w,self.h]
def _dgk(self, x1, x2):
c = np.fft.fftshift(ifft2(fft2(x1)* np.conj(fft2(x2))))
d = np.dot(x1.flatten().conj(), x1.flatten()) + np.dot(x2.flatten().conj(), x2.flatten()) - 2 * c
k = np.exp(-1 / self.sigma ** 2 * np.clip(d,a_min=0,a_max=None) / np.size(x1))
return k
def _training(self, x, y):
k = self._dgk(x, x)
alphaf = fft2(y) / (fft2(k) + self.lambda_)
return alphaf
def _detection(self, alphaf, x, z):
k = self._dgk(x, z)
responses = np.real(ifft2(alphaf * fft2(k)))
return responses
|
the-stack_0_11354 | from io import BytesIO
from PIL import Image, ImageDraw
from flask import send_file
from utils.endpoint import Endpoint, setup
from utils.textutils import auto_text_size, render_text_with_emoji
@setup
class KnowYourLocation(Endpoint):
params = ['text']
def generate(self, avatars, text, usernames, kwargs):
base = Image.open(self.assets.get('assets/knowyourlocation/knowyourlocation.bmp')).convert('RGBA')
# We need a text layer here for the rotation
canv = ImageDraw.Draw(base)
text = text.split(', ')
if len(text) != 2:
text = ["Separate the items with a", "comma followed by a space"]
top, bottom = text
top_font, top_text = auto_text_size(top, self.assets.get_font('assets/fonts/sans.ttf'), 630)
bottom_font, bottom_text = auto_text_size(bottom,
self.assets.get_font('assets/fonts/sans.ttf'),
539)
render_text_with_emoji(base, canv, (64, 131), top_text, top_font, 'black')
render_text_with_emoji(base, canv, (120, 450), bottom_text, bottom_font, 'black')
base = base.convert('RGB')
b = BytesIO()
base.save(b, format='jpeg')
b.seek(0)
return send_file(b, mimetype='image/jpeg')
|
the-stack_0_11356 | # Copyright 2017 The Wallaroo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import datetime
import errno
import io
import logging
import threading
import time
import socket
import struct
from .errors import TimeoutError
from .logger import INFO2
from .stoppable_thread import StoppableThread
from wallaroo.experimental.connectors import (BaseIter,
BaseSource,
MultiSourceConnector)
try:
basestring
except:
basestring = (str, bytes)
class SingleSocketReceiver(StoppableThread):
"""
Read length or newline encoded data from a socket and append it to an
accumulator list.
Multiple SingleSocketReceivers may write to the same accumulator safely,
so long as they perform atomic writes (e.g. each append adds a single,
complete entry).
"""
__base_name__ = 'SingleSocketReceiver'
def __init__(self, sock, accumulator, mode='framed', header_fmt='>I',
name=None):
super(SingleSocketReceiver, self).__init__()
self.sock = sock
self.accumulator = accumulator
self.mode = mode
self.header_fmt = header_fmt
self.header_length = struct.calcsize(self.header_fmt)
if name:
self.name = '{}:{}:{}'.format(self.__base_name__, name,
sock.fileno())
else:
self.name = '{}:{}'.format(self.__base_name__, sock.fileno())
def try_recv(self, bs, flags=0):
"""
Try to to run `sock.recv(bs)` and return None if error
"""
try:
return self.sock.recv(bs, flags)
except:
return None
def append(self, bs):
if self.mode == 'framed':
self.accumulator.append(bs)
else:
self.accumulator.append(bs + b'\n')
def run(self):
if self.mode == 'framed':
self.run_framed()
else:
self.run_newlines()
def run_newlines(self):
data = []
while not self.stopped():
buf = self.try_recv(1024)
if not buf:
self.stop()
if data:
self.append(b''.join(data))
break
# We must be careful not to accidentally join two separate lines
# nor split a line
split = buf.split(b'\n') # '\n' show as '' in list after split
s0 = split.pop(0)
if s0:
if data:
data.append(s0)
self.append(b''.join(data))
data = []
else:
self.append(s0)
else:
# s0 is '', so first line is a '\n', and overflow is a
# complete message if it isn't empty
if data:
self.append(b''.join(data))
data = []
for s in split[:-1]:
self.append(s)
if split: # not an empty list
if split[-1]: # not an empty string, i.e. it wasn't a '\n'
data.append(split[-1])
time.sleep(0.000001)
def run_framed(self):
while not self.stopped():
header = self.try_recv(self.header_length, socket.MSG_WAITALL)
if not header:
self.stop()
continue
expect = struct.unpack(self.header_fmt, header)[0]
data = self.try_recv(expect, socket.MSG_WAITALL)
if not data:
self.stop()
else:
self.append(b''.join((header, data)))
time.sleep(0.000001)
def stop(self, *args, **kwargs):
super(self.__class__, self).stop(*args, **kwargs)
self.sock.close()
class MultiClientStreamView(object):
def __init__(self, initial_streams, blocking=True):
self.streams = {s.name: s.accumulator for s in initial_streams}
self.positions = {s.name: 0 for s in initial_streams}
self.keys = list(self.positions.keys())
self.key_position = 0
self.blocking = blocking
def add_stream(self, stream):
if stream.name in self.streams:
raise KeyError("Stream {} already in view!".format(stream.name))
self.streams[stream.name] = stream.accumulator
self.positions[stream.name] = 0
self.keys.append(stream.name)
def throw(self, type=None, value=None, traceback=None):
raise StopIteration
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
# sleep condition
origin = self.key_position
while True:
# get current key
cur = self.keys[self.key_position]
# set key for next iteration
self.key_position = (self.key_position + 1) % len(self.keys)
# Can we read from current key?
if self.positions[cur] < len(self.streams[cur]):
# read next value
val = self.streams[cur][self.positions[cur]]
# Increment position
self.positions[cur] += 1
return val
elif self.key_position == origin:
if self.blocking:
# sleep after a full round on all keys produces no value
time.sleep(0.001)
else:
time.sleep(0.001)
return None
# implicit: continue
class TCPReceiver(StoppableThread):
"""
Listen on a (host,port) pair and write any incoming data to an accumulator.
If `port` is 0, an available port will be chosen by the operation system.
`get_connection_info` may be used to obtain the (host, port) pair after
`start()` is called.
`max_connections` specifies the number of total concurrent connections
supported.
`mode` specifices how the receiver handles parsing the network stream
into records. `'newlines'` will split on newlines, and `'framed'` will
use a length-encoded framing, along with the `header_fmt` value (default
mode is `'framed'` with `header_fmt='>I'`).
You can read any data saved to the accumulator (a list) at any time
by reading the `data` attribute of the receiver, although this attribute
is only guaranteed to stop growing after `stop()` has been called.
"""
__base_name__ = 'TCPReceiver'
def __init__(self, host, port=0, max_connections=1000, mode='framed',
split_streams=False, header_fmt='>I'):
"""
Listen on a (host, port) pair for up to max_connections connections.
Each connection is handled by a separate client thread.
"""
super(TCPReceiver, self).__init__()
self.host = host
self.port = port
self.address = '{}.{}'.format(host, port)
self.max_connections = max_connections
self.mode = mode
self.split_streams = split_streams
self.header_fmt = header_fmt
self.header_length = struct.calcsize(self.header_fmt)
# use an in-memory byte buffer
self.data = {}
# Create a socket and start listening
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.clients = []
self.err = None
self.event = threading.Event()
self.start_time = None
self.views = []
def __len__(self):
return sum(map(len, self.data.values()))
def bytes_received(self):
return sum( sum(map(len, acc)) for acc in d.values() )
def get_connection_info(self, timeout=10):
is_connected = self.event.wait(timeout)
if not is_connected:
raise TimeoutError("{} Couldn't get connection info after {}"
" seconds".format(self.__base_name__, timeout))
return self.sock.getsockname()
def run(self):
self.start_time = datetime.datetime.now()
try:
self.sock.bind((self.host, self.port))
self.sock.listen(self.max_connections)
self.host, self.port = self.sock.getsockname()
self.event.set()
while not self.stopped():
try:
(clientsocket, address) = self.sock.accept()
except Exception as err:
try:
if self.stopped():
break
else:
raise err
except OSError as err:
if err.errno == errno.ECONNABORTED:
# [ECONNABORTED] A connection arrived, but it was
# closed while waiting on the listen queue.
# This happens on macOS during normal
# harness shutdown.
return
else:
logging.error("socket accept errno {}"
.format(err.errno))
self.err = err
raise
if self.split_streams:
# Use a counter to identify unique streams
client_accumulator = self.data.setdefault(len(self.data),
[])
else:
# use * to identify the "everything" stream
client_accumulator = self.data.setdefault('*', [])
cl = SingleSocketReceiver(clientsocket,
client_accumulator,
self.mode,
self.header_fmt,
name='{}-{}'.format(
self.__base_name__,
len(self.clients)))
logging.debug("{}:{} accepting connection from ({}, {}) on "
"port {}."
.format(self.__base_name__, self.name, self.host,
self.port, address[1]))
self.clients.append(cl)
if self.views:
for v in self.views:
v.add_stream(cl)
cl.start()
except Exception as err:
self.err = err
raise
def stop(self, *args, **kwargs):
if not self.stopped():
super(TCPReceiver, self).stop(*args, **kwargs)
try:
self.sock.shutdown(socket.SHUT_RDWR)
except OSError as err:
if err.errno == errno.ENOTCONN:
# [ENOTCONN] Connection is already closed or unopened
# and can't be shutdown.
pass
else:
raise
self.sock.close()
for cl in self.clients:
cl.stop()
def view(self, blocking=True):
view = MultiClientStreamView(self.clients, blocking=blocking)
self.views.append(view)
return view
def save(self, path):
files = []
if self.split_streams:
# Save streams separately
for stream, data in self.data.items():
base, suffix = path.rsplit('.', 1)
new_path = '{}.{}.{}'.format(base, stream, suffix)
logging.debug("Saving stream {} to path {}".format(
stream, new_path))
with open(new_path, 'wb') as f:
files.append(new_path)
for item in data:
f.write(item)
f.flush()
else:
# only have stream '*' to save
logging.debug("Saving stream * to path {}".format(path))
with open(path, 'wb') as f:
files.append(path)
for item in self.data['*']:
f.write(item)
f.flush()
return files
class Metrics(TCPReceiver):
__base_name__ = 'Metrics'
class Sink(TCPReceiver):
__base_name__ = 'Sink'
class Sender(StoppableThread):
"""
Send length framed data to a destination (addr).
`address` is the full address in the host:port format
`reader` is a Reader instance
`batch_size` denotes how many records to send at once (default=1)
`interval` denotes the minimum delay between transmissions, in seconds
(default=0.001)
`header_length` denotes the byte length of the length header
`header_fmt` is the format to use for encoding the length using
`struct.pack`
`reconnect` is a boolean denoting whether sender should attempt to
reconnect after a connection is lost.
"""
def __init__(self, address, reader, batch_size=1, interval=0.001,
header_fmt='>I', reconnect=False):
logging.info("Sender({address}, {reader}, {batch_size}, {interval},"
" {header_fmt}, {reconnect}) created".format(
address=address, reader=reader, batch_size=batch_size,
interval=interval, header_fmt=header_fmt,
reconnect=reconnect))
super(Sender, self).__init__()
self.daemon = True
self.reader = reader
self.batch_size = batch_size
self.batch = []
self.interval = interval
self.header_fmt = header_fmt
self.header_length = struct.calcsize(self.header_fmt)
self.address = address
(host, port) = address.split(":")
self.host = host
self.port = int(port)
self.name = 'Sender'
self.error = None
self._bytes_sent = 0
self.reconnect = reconnect
self.pause_event = threading.Event()
self.data = []
self.start_time = None
def pause(self):
self.pause_event.set()
def paused(self):
return self.pause_event.is_set()
def resume(self):
self.pause_event.clear()
def send(self, bs):
try:
self.sock.sendall(bs)
except OSError as err:
if err.errno == 104 or err.errno == 54:
# ECONNRESET on Linux or macOS, respectively
is_econnreset = True
else:
is_econnreset = False
logging.info("socket errno {} ECONNRESET {} stopped {}"
.format(err.errno, is_econnreset, self.stopped()))
self.data.append(bs)
self._bytes_sent += len(bs)
def bytes_sent(self):
return self._bytes_sent
def batch_append(self, bs):
self.batch.append(bs)
def batch_send(self):
if len(self.batch) >= self.batch_size:
self.batch_send_final()
time.sleep(self.interval)
def batch_send_final(self):
if self.batch:
self.send(b''.join(self.batch))
self.batch = []
def run(self):
self.start_time = datetime.datetime.now()
while not self.stopped():
try:
logging.info("Sender connecting to ({}, {})."
.format(self.host, self.port))
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
while not self.stopped():
while self.paused():
# make sure to empty the send buffer before
#entering pause state!
self.batch_send_final()
time.sleep(0.001)
header = self.reader.read(self.header_length)
if not header:
self.maybe_stop()
break
expect = struct.unpack(self.header_fmt, header)[0]
body = self.reader.read(expect)
if not body:
self.maybe_stop()
break
self.batch_append(header + body)
self.batch_send()
time.sleep(0.000000001)
self.batch_send_final()
self.sock.close()
except KeyboardInterrupt:
logging.info("KeyboardInterrupt received.")
self.stop()
break
except Exception as err:
self.error = err
logging.error(err)
if not self.reconnect:
break
if not self.stopped():
logging.info("Waiting 1 second before retrying...")
time.sleep(1)
self.sock.close()
def maybe_stop(self):
if not self.batch:
self.stop()
def stop(self, *args, **kwargs):
if not self.stopped():
logging.log(INFO2, "Sender received stop instruction.")
super(Sender, self).stop(*args, **kwargs)
if self.batch:
logging.warning("Sender stopped, but send buffer size is {}"
.format(len(self.batch)))
def last_sent(self):
if isinstance(self.reader.gen, MultiSequenceGenerator):
return self.reader.gen.last_sent()
else:
raise ValueError("Can only use last_sent on a sender with "
"a MultiSequenceGenerator, or an ALOSender.")
class NoNonzeroError(ValueError):
pass
def first_nonzero_index(seq):
idx = 0
for item in seq:
if item == 0:
idx += 1
else:
return idx
else:
raise NoNonzeroError("No nonzero values found in list")
class Sequence(object):
def __init__(self, index, val=0):
self.index = '{index:07d}'.format(index = index)
self.key = self.index.encode()
self.val = val
def __next__(self):
self.val += 1
return (self.key, self.val)
def __iter__(self):
return self
def next(self):
return self.__next__()
def throw(self, type=None, value=None, traceback=None):
raise StopIteration
class MultiSequenceGenerator(object):
"""
A growable collection of sequence generators.
- Each new generator has its own partition
- Messages are emitted in a round-robin fashion over the generator list
- When a new generator joins, it takes over all new messages until it
catches up
- At stoppage time, all generators are allowed to reach the same final
value
"""
def __init__(self, base_index=0, initial_partitions=1, base_value=0):
self._base_value = base_value
self._next_index = base_index + initial_partitions
self.seqs = [Sequence(x, self._base_value)
for x in range(base_index, self._next_index)]
# self.seqs stores the last value sent for each sequence
self._idx = 0 # the idx of the last sequence sent
self._remaining = []
self.lock = threading.Lock()
def format_value(self, value, partition):
return struct.pack('>IQ{}s'.format(len(partition)), 8+len(partition),
value, partition)
def _next_value_(self):
# Normal operation next value: round robin through the sets
if self._idx >= len(self.seqs):
self._idx = 0
next_seq = self.seqs[self._idx]
self._idx += 1
return next(next_seq)
def _next_catchup_value(self):
# After stop() was called: all sets catch up to current max
try:
idx = first_nonzero_index(self._remaining)
next_seq = self.seqs[idx]
self._remaining[idx] -= 1
return next(next_seq)
except NoNonzeroError:
# reset self._remaining so it can be reused
if not self.max_val:
self._remaining = []
logging.debug("MultiSequenceGenerator: Stop condition "
"reached. Final values are: {}".format(
self.seqs))
self.throw()
def add_sequence(self):
if not self._remaining:
logging.debug("MultiSequenceGenerator: adding new sequence")
self.seqs.append(Sequence(self._next_index, self._base_value))
self._next_index += 1
def stop(self):
logging.info("MultiSequenceGenerator: stop called")
logging.debug("seqs are: {}".format(self.seqs))
with self.lock:
self.max_val = max([seq.val for seq in self.seqs])
self._remaining = [self.max_val - seq.val for seq in self.seqs]
logging.debug("_remaining: {}".format(self._remaining))
def last_sent(self):
return [('{}'.format(key), val) for (key,val) in
[(seq.index, seq.val) for seq in self.seqs]]
def send(self, ignored_arg):
with self.lock:
if self._remaining:
idx, val = self._next_catchup_value()
else:
idx, val = self._next_value_()
return self.format_value(val, idx)
def throw(self, type=None, value=None, traceback=None):
raise StopIteration
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
return self.send(None)
def close(self):
"""Raise GeneratorExit inside generator.
"""
try:
self.throw(GeneratorExit)
except (GeneratorExit, StopIteration):
pass
else:
raise RuntimeError("generator ignored GeneratorExit")
def sequence_generator(stop=1000, start=0, header_fmt='>I', partition=''):
"""
Generate a sequence of integers, encoded as big-endian U64.
`stop` denotes the maximum value of the sequence (inclusive)
`start` denotes the starting value of the sequence (exclusive)
`header_length` denotes the byte length of the length header
`header_fmt` is the format to use for encoding the length using
`struct.pack`
`partition` is a string representing the optional partition key. It is
empty by default.
"""
partition = partition.encode()
size = 8 + len(partition)
fmt = '>Q{}s'.format(len(partition)) if partition else '>Q'
for x in range(start+1, stop+1):
yield struct.pack(header_fmt, size)
if partition:
yield struct.pack(fmt, x, partition)
else:
yield struct.pack(fmt, x)
def iter_generator(items,
to_bytes=lambda s: s.encode()
if isinstance(s, basestring) else str(s).encode(),
header_fmt='>I',
on_next=None):
"""
Generate a sequence of length encoded binary records from an iterator.
`items` is the iterator of items to encode
`to_bytes` is a function for converting items to a bytes
(default:`lambda s: s.encode() if isinstance(s, basestring) else
str(s).encode()`)
`header_fmt` is the format to use for encoding the length using
`struct.pack`
"""
for val in items:
if on_next:
on_next(val)
bs = to_bytes(val)
yield struct.pack(header_fmt, len(bs))
yield bs
def files_generator(files, mode='framed', header_fmt='>I', on_next=None):
"""
Generate a sequence of binary data stubs from a set of files.
- `files`: either a single filepath or a list of filepaths.
The same filepath may be provided multiple times, in which case it will
be read that many times from start to finish.
- `mode`: 'framed' or 'newlines'. If 'framed' is used, `header_fmt` is
used to determine how many bytes to read each time. Default: 'framed'
- `header_fmt`: the format of the length encoding header used in the files
Default: '>I'
"""
if isinstance(files, basestring):
files = [files]
for path in files:
if mode == 'newlines':
for l in newline_file_generator(path):
if on_next:
on_next(l)
yield l
elif mode == 'framed':
for l in framed_file_generator(path, header_fmt):
if on_next:
on_next(l)
yield l
else:
raise ValueError("`mode` must be either 'framed' or 'newlines'")
def newline_file_generator(filepath, header_fmt='>I', on_next=None):
"""
Generate length-encoded strings from a newline-delimited file.
"""
with open(filepath, 'rb') as f:
f.seek(0, 2)
fin = f.tell()
f.seek(0)
while f.tell() < fin:
o = f.readline().strip(b'\n')
if o:
if on_next:
on_next(o)
yield struct.pack(header_fmt, len(o))
yield o
def framed_file_generator(filepath, header_fmt='>I', on_next=None):
"""
Generate length encoded records from a length-framed binary file.
"""
header_length = struct.calcsize(header_fmt)
with open(filepath, 'rb') as f:
while True:
header = f.read(header_length)
if not header:
break
expect = struct.unpack(header_fmt, header)[0]
body = f.read(expect)
if not body:
break
if on_next:
on_next(header + body)
yield header
yield body
class Reader(object):
"""
A BufferedReader interface over a bytes generator
"""
def __init__(self, generator):
self.gen = generator
self.overflow = b''
def read(self, num):
remaining = num
out = io.BufferedWriter(io.BytesIO())
remaining -= out.write(self.overflow)
while remaining > 0:
try:
remaining -= out.write(next(self.gen))
except StopIteration:
break
# first num bytes go to return, remainder to overflow
out.seek(0)
r = out.raw.read(num)
self.overflow = out.raw.read()
return r
class ALOSequenceGenerator(BaseIter, BaseSource):
"""
A sequence generator with a resettable position.
Starts at 1, and stops aftering sending `stop`.
Usage: `ALOSequenceGenerator(partition, stop=1000, data=None)`
if `data` is a list, data generated is appended to it in order
as (position, value) tuples.
"""
def __init__(self, key, stop=None, start=0):
self.partition = key
self.name = key.encode()
self.key = key.encode()
self.position = start
self._stop = stop
self.start = start
self.stopped = False
self.paused = False
def __str__(self):
return ("ALOSequenceGenerator(partition: {}, stopped: {}, point_of_ref: {})"
.format(self.name, self.stopped, self.point_of_ref()))
def point_of_ref(self):
return self.position
def reset(self, pos=None):
if pos is None:
pos = self.start
self.position = pos
def __next__(self):
# This has to be before the increment, otherwise point_of_ref()
# doesn't return the previous position!
if self.stopped:
raise StopIteration
if self._stop is not None:
if self.position >= self._stop:
raise StopIteration
if self.paused:
return (None, self.position)
self.position += 1
val, pos, key = (self.position, self.position, self.key)
payload = struct.pack('>Q{}s'.format(len(key)), val, key)
return (payload, pos)
def close(self):
self.closed = True
def stop(self):
self.stopped = True
def pause(self):
self.paused = True
def resume(self):
self.paused = False
class ALOSender(StoppableThread):
"""
A wrapper for MultiSourceConnector to look like a regular TCP Sender
"""
def __init__(self, sources, version, cookie, program_name, instance_name,
addr):
super(ALOSender, self).__init__()
host, port = addr.split(':')
port = int(port)
self.client = client = MultiSourceConnector(
version,
cookie,
program_name,
instance_name,
host, port)
self.name = "ALOSender_{}".format("-".join(
[source.partition for source in sources]))
self.sources = sources
logging.debug("ALO: sources = {}".format(sources))
self.data = []
self.client.data = self.data
for source in self.sources:
source.data = self.data
self.host = host
self.port = port
self.start_time = None
self.error = None
self.batch = [] # for compatibility with Sender during validations
def run(self):
self.start_time = datetime.datetime.now()
self.client.connect()
for source in self.sources:
self.client.add_source(source)
self.error = self.client.join()
def stop(self, error=None):
logging.debug("ALOSender stop")
for source in self.sources:
logging.debug("source to stop: {}".format(source))
source.stop()
if error is not None:
self.client.shutdown(error=error)
def pause(self):
logging.debug("ALOSender pause: pausing {} sources"
.format(len(self.sources)))
for source in self.sources:
source.pause()
def resume(self):
logging.debug("ALOSender resume: resuming {} sources"
.format(len(self.sources)))
for source in self.sources:
source.resume()
def last_sent(self):
return [(source.partition, source.position) for source in self.sources]
|
the-stack_0_11358 | #!/usr/bin/env python
#
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Update the prebuilt clang from the build server."""
from __future__ import print_function
import argparse
import inspect
import os
import shutil
import subprocess
import sys
THIS_DIR = os.path.realpath(os.path.dirname(__name__))
ANDROID_DIR = os.path.realpath(os.path.join(THIS_DIR, '../..'))
BRANCH = 'aosp-llvm'
def android_path(*args):
return os.path.join(ANDROID_DIR, *args)
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super(ArgParser, self).__init__(
description=inspect.getdoc(sys.modules[__name__]))
self.add_argument(
'build', metavar='BUILD',
help='Build number to pull from the build server.')
self.add_argument(
'-b', '--bug', type=int,
help='Bug to reference in commit message.')
self.add_argument(
'--use-current-branch', action='store_true',
help='Do not repo start a new branch for the update.')
def host_to_build_host(host):
"""Gets the build host name for an NDK host tag.
The Windows builds are done from Linux.
"""
return {
'darwin': 'mac',
'linux': 'linux',
'windows': 'linux',
}[host]
def build_name(host):
"""Gets the build name for a given host.
The build name is either "linux" or "darwin", with any Windows builds
coming from "linux".
"""
return {
'darwin': 'darwin',
'linux': 'linux',
'windows': 'linux',
}[host]
def package_name(build_number, host):
"""Returns the file name for a given package configuration.
>>> package_name('1234', 'linux')
'clang-1234-linux-x86.tar.bz2'
"""
return 'clang-{}-{}-x86.tar.bz2'.format(build_number, host)
def download_build(host, build_number, download_dir):
url_base = 'https://android-build-uber.corp.google.com'
path = 'builds/{branch}-{build_host}-{build_name}/{build_num}'.format(
branch=BRANCH,
build_host=host_to_build_host(host),
build_name=build_name(host),
build_num=build_number)
pkg_name = package_name(build_number, host)
url = '{}/{}/{}'.format(url_base, path, pkg_name)
TIMEOUT = '60' # In seconds.
out_file_path = os.path.join(download_dir, pkg_name)
with open(out_file_path, 'w') as out_file:
print('Downloading {} to {}'.format(url, out_file_path))
subprocess.check_call(
['sso_client', '--location', '--request_timeout', TIMEOUT, url],
stdout=out_file)
return out_file_path
def extract_package(package, install_dir):
cmd = ['tar', 'xf', package, '-C', install_dir]
print('Extracting {}...'.format(package))
subprocess.check_call(cmd)
def update_clang(host, build_number, use_current_branch, download_dir, bug):
host_tag = host + '-x86'
prebuilt_dir = android_path('prebuilts/clang/host', host_tag)
os.chdir(prebuilt_dir)
if not use_current_branch:
subprocess.check_call(
['repo', 'start', 'update-clang-{}'.format(build_number), '.'])
package = download_build(host, build_number, download_dir)
install_subdir = 'clang-' + build_number
extract_package(package, prebuilt_dir)
print('Adding files to index...')
subprocess.check_call(['git', 'add', install_subdir])
version_file_path = os.path.join(install_subdir, 'AndroidVersion.txt')
with open(version_file_path) as version_file:
version = version_file.read().strip()
print('Committing update...')
message_lines = [
'Update prebuilt Clang to build {}.'.format(build_number),
'',
'Built from version {}.'.format(version),
]
if bug is not None:
message_lines.append('')
message_lines.append('Bug: http://b/{}'.format(bug))
message = '\n'.join(message_lines)
subprocess.check_call(['git', 'commit', '-m', message])
def main():
args = ArgParser().parse_args()
download_dir = os.path.realpath('.download')
if os.path.isdir(download_dir):
shutil.rmtree(download_dir)
os.makedirs(download_dir)
try:
hosts = ('darwin', 'linux', 'windows')
for host in hosts:
update_clang(host, args.build, args.use_current_branch,
download_dir, args.bug)
finally:
shutil.rmtree(download_dir)
if __name__ == '__main__':
main()
|
the-stack_0_11364 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CNN completely definable via command line arguments.
Provides create().
Author: Jan Schlüter
"""
import re
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import PickDictKey, PutDictKey, ReceptiveField
from .layers import (nonlinearity, SpatialLogMeanExp, Shift, Crop, Squeeze,
ShakeShake)
class Cat(nn.ModuleList):
"""
Modules applied to the same input and concatenated along the channels.
"""
def forward(self, x):
return torch.cat([module(x) for module in self], dim=1)
class Add(nn.ModuleList):
"""
Modules applied to the same input and then added up.
"""
def forward(self, x):
modules = iter(self)
first = next(modules)
return sum((module(x) for module in modules), first(x))
class Mul(nn.ModuleList):
"""
Modules applied to the same input and then multiplied.
"""
def forward(self, x):
modules = iter(self)
y = next(modules)(x)
for module in modules:
y = y * module(x)
return y
def custom_cnn(input_channels, specification, input_name='input',
output_name='output', default_nonlin='relu', batch_norm=False):
"""
Creates a CNN for the given number of input channels, with an architecture
defined as a comma-separated string of layer definitions. Supported layer
definitions are (with variables in <>, and optional parts in []):
- pad1d:<method>@<size>
- pad2d:<method>@<size>
- crop1d:<size>
- crop2d:<size>
- conv1d:<channels>@<size>[s<stride>][p<pad>][d<dilation>][g<groups>]
- conv2d:<channels>@<size0>x<size1>[s<stride>][p<pad>][d<dilation>][g<groups>]
- pool1d:<method>@<size>[s<stride>][p<pad>][d<dilation]
- pool2d:<method>@<size0>x<size1>[s<stride>][p<pad>][d<dilation>]
- globalpool1d:<method>
- globalpool2d:<method>
- globallmepool:<alpha>[t<trainable>][c<channelwise>][e<exponentiated>]
- bn1d
- bn2d
- groupnorm:<groups>
- dropout:<drop_probability>
- relu
- lrelu
- sigm
- swish
- mish
- bipol:<nonlin>
- shift:<amount>
- bypass (does nothing)
- squeeze:<dim>
- cat[layers1|layers2|...] (apply stacks to same input, then concat)
- add[layers1|layers2|...] (apply stacks to same input, then add)
- shake[layers1|layers2|...] (apply stacks to same input, then shake-shake)
If there is a batch normalization one or two layers after a convolution,
the convolution will not have a bias term.
"""
def read_layers(s):
"""
Yields all layer definitions (as separated by , | [ or ]) as tuples
of the definition string and the following delimiter.
"""
pos = 0
for match in re.finditer(r'[,|[\]]', s):
yield s[pos:match.start()], s[match.start():match.end()]
pos = match.end()
yield s[pos:], None
def read_size(s, t=int, expect_remainder=True):
"""
Read and parse a size (e.g., 1, 1x1, 1x1x1) at the beginning of `s`,
with elements of type `t`. If `expect_remainder`, returns the
remainder, otherwise tries to parse the complete `s` as a size.
"""
if expect_remainder:
# yes, we could use a precompiled regular expression...
p = next((i for i, c in enumerate(s) if c not in '0123456789x'),
len(s))
remainder = s[p:]
s = s[:p]
size = tuple(map(t, s.split('x')))
if len(size) == 1:
size = size[0]
if expect_remainder:
return size, remainder
else:
return size
def size_string(size):
"""
Convert a size integer or tuple back into its string form.
"""
try:
return 'x'.join(map(str, size))
except TypeError:
return str(size)
def read_extra_sizes(s, prefixes, t=int):
"""
Read and parse any extra size definitions prefixed by any of the
allowed prefixes, and returns them as a dictionary. If `prefixes` is
a dictionary, the prefixes (keys) will be translated to the expanded
names (values) in the returned dictionary. Values will be converted
from strings to `t`.
"""
if not isinstance(prefixes, dict):
prefixes = {prefix: prefix for prefix in prefixes}
result = {}
while s:
for prefix, return_key in prefixes.items():
if s.startswith(prefix):
size, s = read_size(s[len(prefix):], t)
result[return_key] = size
break
else:
raise ValueError("unrecognized part in layer definition: "
"%r" % s)
return result
stack = []
layers = []
if input_name:
layers = [PickDictKey(input_name)]
# track receptive field for the full network
receptive_field = ReceptiveField()
# split specification string into definition, delimiter tuples
specification = list(read_layers(specification))
# iterate over it (in a way that allows us to expand macro definitions)
while specification:
layer_def, delim = specification.pop(0)
layer_def = layer_def.split(':')
kind = layer_def[0]
if kind in ('pad1d', 'pad2d'):
method, size = layer_def[1].split('@')
size = read_size(size, expect_remainder=False)
cls = {'reflectpad1d': nn.ReflectionPad1d,
'reflectpad2d': nn.ReflectionPad2d}[method + kind]
layers.append(cls(size))
receptive_field *= ReceptiveField(padding=size)
elif kind in ('crop1d', 'crop2d'):
size = int(layer_def[1])
dimensionality = int(kind[-2])
layers.append(Crop(dimensionality, size))
receptive_field *= ReceptiveField(padding=-size)
elif kind in ('conv1d', 'conv2d'):
channels, remainder = layer_def[1].split('@')
channels = int(channels)
size, remainder = read_size(remainder)
params = dict(stride=1, padding=0, dilation=1, groups=1)
params.update(read_extra_sizes(
remainder, dict(s='stride', p='padding', d='dilation',
g='groups')))
cls = {'conv1d': nn.Conv1d, 'conv2d': nn.Conv2d}[kind]
layers.append(cls(input_channels, channels, size, **params))
input_channels = channels
# effective kernel size:
size = (np.array(size) - 1) * params['dilation'] + 1
receptive_field *= ReceptiveField(size, params['stride'],
params['padding'])
elif kind in ('pool1d', 'pool2d'):
method, size = layer_def[1].split('@')
size, remainder = read_size(size)
params = dict(stride=None, padding=0, dilation=1)
params.update(read_extra_sizes(
remainder, dict(s='stride', p='padding', d='dilation')))
cls = {'maxpool1d': nn.MaxPool1d, 'meanpool1d': nn.AvgPool1d,
'maxpool2d': nn.MaxPool2d, 'meanpool2d': nn.AvgPool2d}[method + kind]
layers.append(cls(size, **params))
# effective kernel size:
size = (np.array(size) - 1) * params['dilation'] + 1
if params['stride'] is None:
params['stride'] = size
receptive_field *= ReceptiveField(size, params['stride'],
params['padding'])
elif kind in ('globalpool1d', 'globalpool2d'):
method = layer_def[1]
cls = {'maxglobalpool1d': nn.AdaptiveMaxPool1d,
'meanglobalpool1d': nn.AdaptiveAvgPool1d,
'maxglobalpool2d': nn.AdaptiveMaxPool2d,
'meanglobalpool2d': nn.AdaptiveAvgPool2d}[method + kind]
layers.append(cls(output_size=1))
# we do not adjust the receptive field; it spans the whole input
elif kind == 'globallmepool':
alpha, remainder = read_size(layer_def[1], float)
params = read_extra_sizes(
remainder, dict(t='trainable', c='per_channel', e='exp'),
t=lambda s: bool(int(s)))
layers.append(SpatialLogMeanExp(alpha, in_channels=input_channels,
keepdim=True, **params))
# we do not adjust the receptive field; it spans the whole input
elif kind == 'bn1d':
if len(layers) >= 1 and hasattr(layers[-1], 'bias'):
layers[-1].register_parameter('bias', None)
elif len(layers) >=2 and hasattr(layers[-2], 'bias'):
layers[-2].register_parameter('bias', None)
layers.append(nn.BatchNorm1d(input_channels))
elif kind == 'bn2d':
if len(layers) >= 1 and hasattr(layers[-1], 'bias'):
layers[-1].register_parameter('bias', None)
elif len(layers) >= 2 and hasattr(layers[-2], 'bias'):
layers[-2].register_parameter('bias', None)
layers.append(nn.BatchNorm2d(input_channels))
elif kind == 'groupnorm':
groups = int(layer_def[1])
layers.append(nn.GroupNorm(groups, input_channels))
elif kind == 'dropout':
p = float(layer_def[1])
layers.append(nn.Dropout(p))
elif kind == 'squeeze':
dim = int(layer_def[1])
layers.append(Squeeze(dim))
elif kind == 'shift':
amount = float(layer_def[1])
layers.append(Shift(amount))
elif kind == 'bypass':
layers.append(nn.Identity())
elif kind == 'cat':
stack.append((layers, input_channels, receptive_field))
stack.append((Cat(), input_channels, receptive_field))
layers = []
receptive_field = ReceptiveField()
elif kind == 'add':
stack.append((layers, input_channels, receptive_field))
stack.append((Add(), input_channels, receptive_field))
layers = []
receptive_field = ReceptiveField()
elif kind == 'mul':
stack.append((layers, input_channels, receptive_field))
stack.append((Mul(), input_channels, receptive_field))
layers = []
receptive_field = ReceptiveField()
elif kind == 'shake':
stack.append((layers, input_channels, receptive_field))
stack.append((ShakeShake(), input_channels, receptive_field))
layers = []
receptive_field = ReceptiveField()
elif kind == '':
pass
elif kind == 'mbconv2d':
# mobile inverted bottleneck convolution layer from MobileNetV2
channels, remainder = layer_def[1].split('@')
channels = int(channels)
size, remainder = read_size(remainder)
params = dict(stride=1, dilation=1, groups=1, expansion=1,
size=size, channels=channels)
params.update(read_extra_sizes(
remainder, dict(s="stride", d="dilation", g="groups",
e="expansion")))
hidden_channels = int(input_channels * params['expansion'])
# define layers
macro = []
# 1x1 channel expansion
if hidden_channels != input_channels:
macro.append('conv2d:%d@1x1g%d' %
(hidden_channels, params['groups']))
if batch_norm:
macro.append('bn2d')
macro.append(default_nonlin)
# channelwise convolution
macro.append('conv2d:%d@%ss%sd%sg%d' %
(hidden_channels, size_string(size),
size_string(params['stride']),
size_string(params['dilation']),
hidden_channels))
if batch_norm:
macro.append('bn2d')
macro.append(default_nonlin)
# linear projection
macro.append('conv2d:%d@1x1g%d' % (channels, params['groups']))
# residual shortcut, if applicable
macro = ','.join(macro)
if params['stride'] == 1 and channels == input_channels:
crop = ((np.array(size) - 1) * params['dilation'] + 1) // 2
macro = 'add[%s|%s]' % ('crop2d:%d' % crop[0], macro)
# push to beginning of remaining layer specifications
specification[:0] = read_layers(macro)
elif kind == 'bipol':
layers.append(nonlinearity('bipol:' + layer_def[1]))
else:
try:
layers.append(nonlinearity(kind))
except KeyError:
raise ValueError('Unknown layer type "%s"' % kind)
if delim is not None and delim in '|]':
if isinstance(layers, list):
layers = nn.Sequential(*layers) if len(layers) > 1 else layers[0]
layers.receptive_field = receptive_field
layers.out_channels = input_channels
# append layers to Cat() or Add()
stack[-1][0].append(layers)
if delim == '|':
# reset input_channels to match input of Cat() or Add()
input_channels = stack[-1][1]
# we expect another set of layers
layers = []
receptive_field = ReceptiveField()
elif delim == ']':
# take the Cat() or Add() from the stack
layers, _, receptive_field = stack.pop()
# append it to what we were building before
stack[-1][0].append(layers)
# and continue there
if isinstance(layers, Cat):
input_channels = sum(path.out_channels for path in layers)
receptive_field *= sum(path.receptive_field for path in layers)
layers, _, _ = stack.pop()
if stack:
raise ValueError('There seems to be a missing "]" bracket.')
if output_name:
layers.append(PutDictKey(output_name))
if isinstance(layers, list):
layers = nn.Sequential(*layers)
layers.receptive_field = receptive_field
layers.out_channels = input_channels
return layers
def create(cfg, shapes, dtypes, num_classes):
"""
Instantiates a Model for the given data shapes and dtypes.
"""
input_channels = shapes['input'][0]
specification = cfg['model.arch']
num_outputs = 1 if num_classes == 2 else num_classes
specification = specification.replace('C', str(num_outputs))
input_name = cfg.get('model.input_name', 'input')
output_name = cfg.get('model.output_name', 'output')
return custom_cnn(input_channels, specification, input_name, output_name,
default_nonlin=cfg.get('model.nonlin', 'relu'),
batch_norm=cfg.get('model.batch_norm', False))
|
the-stack_0_11367 |
# coding: utf-8
# In[1]:
import autograd.numpy as np
import autograd.numpy.random as npr
npr.seed(0)
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from matplotlib.gridspec import GridSpec
import seaborn as sns
sns.set_style("white")
sns.set_context("talk")
color_names = ["windows blue",
"red",
"amber",
"faded green",
"dusty purple",
"orange",
"clay",
"pink",
"greyish",
"mint",
"light cyan",
"steel blue",
"forest green",
"pastel purple",
"salmon",
"dark brown"]
colors = sns.xkcd_palette(color_names)
import ssm
from ssm.variational import SLDSMeanFieldVariationalPosterior, SLDSTriDiagVariationalPosterior
from ssm.util import random_rotation, find_permutation
# Specify whether or not to save figures
save_figures = True
# In[2]:
# Set the parameters of the HMM
T = 200 # number of time bins
K = 5 # number of discrete states
D = 2 # number of latent dimensions
N = 10 # number of observed dimensions
# In[3]:
# Make an LDS with the somewhat interesting dynamics parameters
true_lds = ssm.LDS(N, D, emissions="gaussian")
A0 = .99 * random_rotation(D, theta=np.pi/20)
# S = (1 + 3 * npr.rand(D))
S = np.arange(1, D+1)
R = np.linalg.svd(npr.randn(D, D))[0] * S
A = R.dot(A0).dot(np.linalg.inv(R))
b = npr.randn(D)
true_lds.dynamics.As[0] = A
true_lds.dynamics.bs[0] = b
_, x, y = true_lds.sample(T)
# In[4]:
# Plot the dynamics vector field
xmins = x.min(axis=0)
xmaxs = x.max(axis=0)
npts = 20
true_lds.dynamics.As[0] = A
XX, YY = np.meshgrid(np.linspace(xmins[0], xmaxs[0], npts), np.linspace(xmins[1], xmaxs[1], npts))
XY = np.column_stack((XX.ravel(), YY.ravel(), np.zeros((npts**2, D-2))))
dx = XY.dot(A.T) + b - XY
plt.figure(figsize=(6, 6))
plt.quiver(XX, YY, dx[:,0], dx[:,1], color=colors[0])
plt.plot(x[:,0], x[:,1], '-k', lw=3)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.title("Simulated Latent States")
plt.tight_layout()
if save_figures:
plt.savefig("lds_1.pdf")
# In[5]:
# Plot the dynamics vector field
plt.figure(figsize=(8, 6))
gs = GridSpec(2, 1, height_ratios=(1, N/D))
# Plot the continuous latent states
lim = abs(x).max()
plt.subplot(gs[0])
for d in range(D):
plt.plot(x[:, d] + lim * d, '-k')
plt.yticks(np.arange(D) * lim, ["$x_{}$".format(d+1) for d in range(D)])
plt.xticks([])
plt.xlim(0, T)
plt.title("Simulated Latent States")
lim = abs(y).max()
plt.subplot(gs[1])
for n in range(N):
plt.plot(y[:, n] - lim * n, '-k')
plt.yticks(-np.arange(N) * lim, ["$y_{{ {} }}$".format(n+1) for n in range(N)])
plt.xlabel("time")
plt.xlim(0, T)
plt.title("Simulated Observations")
plt.tight_layout()
if save_figures:
plt.savefig("lds_2.pdf")
# In[6]:
print("Fitting LDS with SVI")
# Create the model and initialize its parameters
lds = ssm.LDS(N, D, emissions="gaussian_orthog")
lds.initialize(y)
# Create a variational posterior
q_mf = SLDSMeanFieldVariationalPosterior(lds, y)
q_mf_elbos = lds.fit(q_mf, y, num_iters=1000, initialize=False)
# Get the posterior mean of the continuous states
q_mf_x = q_mf.mean[0]
# In[7]:
# Smooth the data under the variational posterior
q_mf_y = lds.smooth(q_mf_x, y)
# In[8]:
print("Fitting LDS with SVI using structured variational posterior")
lds = ssm.LDS(N, D, emissions="gaussian_orthog")
lds.initialize(y)
q_struct = SLDSTriDiagVariationalPosterior(lds, y)
q_struct_elbos = lds.fit(q_struct, y, num_iters=1000, initialize=False)
# Get the posterior mean of the continuous states
q_struct_x = q_struct.mean[0]
# Smooth the data under the variational posterior
q_struct_y = lds.smooth(q_struct_x, y)
# In[9]:
# Plot the ELBOs
plt.plot(q_mf_elbos, label="MF")
plt.plot(q_struct_elbos, label="LDS")
plt.xlabel("Iteration")
plt.ylabel("ELBO")
plt.legend()
# In[10]:
plt.figure(figsize=(8,4))
plt.plot(x + 4 * np.arange(D), '-k')
for d in range(D):
plt.plot(q_mf_x[:,d] + 4 * d, '-', color=colors[0], label="MF" if d==0 else None)
plt.plot(q_struct_x[:,d] + 4 * d, '-', color=colors[1], label="Struct" if d==0 else None)
plt.ylabel("$x$")
plt.legend()
# In[11]:
# Plot the smoothed observations
plt.figure(figsize=(8,4))
for n in range(N):
plt.plot(y[:, n] + 4 * n, '-k', label="True" if n == 0 else None)
plt.plot(q_mf_y[:, n] + 4 * n, '--', color=colors[0], label="MF" if n == 0 else None)
plt.plot(q_struct_y[:, n] + 4 * n, ':', color=colors[1], label="Struct" if n == 0 else None)
plt.legend()
plt.xlabel("time")
# # Fit an HMM to the LDS states
# In[13]:
from ssm.models import HMM
N_iters = 50
K = 15
hmm = ssm.HMM(K, D, observations="gaussian")
hmm_lls = hmm.fit(x, method="em", num_em_iters=N_iters)
z = hmm.most_likely_states(x)
# In[14]:
plt.plot(hmm_lls, label="EM")
plt.xlabel("EM Iteration")
plt.ylabel("Log Probability")
plt.legend(loc="lower right")
# In[15]:
# Plot the observation distributions
from hips.plotting.colormaps import white_to_color_cmap
xmins = x.min(axis=0)
xmaxs = x.max(axis=0)
npts = 100
XX, YY = np.meshgrid(np.linspace(xmins[0], xmaxs[0], npts), np.linspace(xmins[1], xmaxs[1], npts))
data = np.column_stack((XX.ravel(), YY.ravel(), np.zeros((npts**2, D-2))))
input = np.zeros((data.shape[0], 0))
mask = np.ones_like(data, dtype=bool)
tag = None
lls = hmm.observations.log_likelihoods(data, input, mask, tag)
plt.figure(figsize=(6, 6))
for k in range(K):
plt.contour(XX, YY, np.exp(lls[:,k]).reshape(XX.shape),
cmap=white_to_color_cmap(colors[k % len(colors)]))
plt.plot(x[z==k, 0], x[z==k, 1], 'o', mfc=colors[k], mec='none', ms=4)
plt.plot(x[:,0], x[:,1], '-k', lw=2, alpha=.5)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.title("Observation Distributions")
plt.tight_layout()
if save_figures:
plt.savefig("lds_3.pdf")
# In[16]:
# Simulate from the HMM fit
smpls = [hmm.sample(T-1, prefix=(z[:1], x[:1])) for _ in range(1)]
# In[17]:
plt.figure(figsize=(8, 6))
lim = abs(x).max()
for d in range(D):
plt.plot(x[:,d] - d * lim, '-k', lw=4)
for i, (_, x_smpl) in enumerate(smpls):
x_smpl = np.concatenate((x[:1], x_smpl))
plt.plot(x_smpl[:,d] - d*lim, '-', lw=1, color=colors[i])
plt.yticks(-np.arange(D) * lim, ["$x_{}$".format(d+1) for d in range(D)])
plt.xlabel("time")
plt.xlim(0, T)
plt.title("True LDS States and Fitted HMM Simulations")
plt.tight_layout()
if save_figures:
plt.savefig("lds_4.pdf")
# In[18]:
# Plot the observation distributions
from hips.plotting.colormaps import white_to_color_cmap
xmins = x.min(axis=0)
xmaxs = x.max(axis=0)
npts = 100
XX, YY = np.meshgrid(np.linspace(xmins[0], xmaxs[0], npts), np.linspace(xmins[1], xmaxs[1], npts))
data = np.column_stack((XX.ravel(), YY.ravel(), np.zeros((npts**2, D-2))))
input = np.zeros((data.shape[0], 0))
mask = np.ones_like(data, dtype=bool)
tag = None
lls = hmm.observations.log_likelihoods(data, input, mask, tag)
plt.figure(figsize=(6, 6))
for k in range(K):
plt.contour(XX, YY, np.exp(lls[:,k]).reshape(XX.shape),
cmap=white_to_color_cmap(colors[k % len(colors)]))
plt.plot(x[z==k, 0], x[z==k, 1], 'o', mfc=colors[k], mec='none', ms=4)
plt.plot(x[:,0], x[:,1], '-k', lw=2, alpha=.5)
for i, (_, x_smpl) in enumerate(smpls):
x_smpl = np.concatenate((x[:1], x_smpl))
plt.plot(x_smpl[:,0], x_smpl[:,1], '-', lw=1, color=colors[i])
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.title("Observation Distributions")
plt.tight_layout()
if save_figures:
plt.savefig("lds_5.pdf")
# # Simulate Poisson data from an Poisson LDS with the same dynamics
# In[19]:
import copy
plds = ssm.LDS(N, D, emissions="poisson_orthog", emission_kwargs=dict(link="softplus"))
plds.dynamics.params = copy.deepcopy(true_lds.dynamics.params)
plds.emissions.ds = 0 * np.ones(N)
_, x_plds, y_plds = plds.sample(T)
# In[20]:
# Plot the dynamics vector field
xmins = x_plds.min(axis=0)
xmaxs = x_plds.max(axis=0)
npts = 20
true_lds.dynamics.As[0] = A
XX, YY = np.meshgrid(np.linspace(xmins[0], xmaxs[0], npts), np.linspace(xmins[1], xmaxs[1], npts))
XY = np.column_stack((XX.ravel(), YY.ravel(), np.zeros((npts**2, D-2))))
dx = XY.dot(A.T) + b - XY
plt.figure(figsize=(6, 6))
plt.quiver(XX, YY, dx[:,0], dx[:,1], color=colors[0])
plt.plot(x_plds[:,0], x_plds[:,1], '-k', lw=3)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.title("Simulated Latent States")
plt.tight_layout()
if save_figures:
plt.savefig("lds_6.pdf")
# In[21]:
# Plot the dynamics vector field
plt.figure(figsize=(8, 6))
gs = GridSpec(2, 1, height_ratios=(1, N/D))
# Plot the continuous latent states
lim = abs(x).max()
plt.subplot(gs[0])
for d in range(D):
plt.plot(x[:, d] + lim * d, '-k')
plt.yticks(np.arange(D) * lim, ["$x_{}$".format(d+1) for d in range(D)])
plt.xticks([])
plt.xlim(0, T)
plt.title("Simulated Latent States")
lim = abs(y).max()
plt.subplot(gs[1])
plt.imshow(y_plds.T, cmap="Greys", aspect="auto")
plt.yticks(np.arange(N), ["$y_{{ {} }}$".format(n+1) for n in range(N)])
plt.xlabel("time")
plt.xlim(0, T)
plt.title("Simulated Poisson Observations")
plt.colorbar()
plt.tight_layout()
if save_figures:
plt.savefig("lds_7.pdf")
|
the-stack_0_11372 | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message strcutures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization."""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70914 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def ser_uint64(u):
rs = b""
for i in range(2):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "Error",
1: "TX",
2: "Block",
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nAccumulatorCheckpoint = header.nAccumulatorCheckpoint
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.nAccumulatorCheckpoint = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.nAccumulatorCheckpoint = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
# VERS Uniqueness
def get_uniqueness(self, prevout):
r = b""
r += struct.pack("<I", prevout.n)
r += ser_uint256(prevout.hash)
return r
def solve_stake(self, prevouts):
target0 = uint256_from_compact(self.nBits)
loop = True
while loop:
for prevout in prevouts:
nvalue, txBlockTime, stakeModifier, hashStake = prevouts[prevout]
target = int(target0 * nvalue / 100) % 2**256
data = b""
data += ser_uint64(stakeModifier)
data += struct.pack("<I", txBlockTime)
# prevout for zPoS is serial hashes hex strings
if isinstance(prevout, COutPoint):
data += self.get_uniqueness(prevout)
else:
data += ser_uint256(uint256_from_str(bytes.fromhex(hashStake)[::-1]))
data += struct.pack("<I", self.nTime)
posHash = uint256_from_str(hash256(data))
if posHash <= target:
self.prevoutStake = prevout
loop = False
break
if loop:
self.nTime += 1
return True
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
if hasattr(self, 'vchBlockSig'):
r += ser_string(self.vchBlockSig)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def sign_block(self, key, low_s=True):
data = b""
data += struct.pack("<i", self.nVersion)
data += ser_uint256(self.hashPrevBlock)
data += ser_uint256(self.hashMerkleRoot)
data += struct.pack("<I", self.nTime)
data += struct.pack("<I", self.nBits)
data += struct.pack("<I", self.nNonce)
data += ser_uint256(self.nAccumulatorCheckpoint)
sha256NoSig = hash256(data)
self.vchBlockSig = key.sign(sha256NoSig, low_s=low_s)
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
|
the-stack_0_11373 | import pytest
from datetime import time, timedelta
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
from pandas import (Series, Timedelta, to_timedelta, isna,
TimedeltaIndex)
from pandas._libs.tslib import iNaT
class TestTimedeltas(object):
_multiprocess_can_split_ = True
def test_to_timedelta(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
assert (to_timedelta('1 days 06:05:01.00003', box=False) ==
conv(d1 + np.timedelta64(6 * 3600 + 5 * 60 + 1, 's') +
np.timedelta64(30, 'us')))
assert (to_timedelta('15.5us', box=False) ==
conv(np.timedelta64(15500, 'ns')))
# empty string
result = to_timedelta('', box=False)
assert result.astype('int64') == iNaT
result = to_timedelta(['', ''])
assert isna(result).all()
# pass thru
result = to_timedelta(np.array([np.timedelta64(1, 's')]))
expected = pd.Index(np.array([np.timedelta64(1, 's')]))
tm.assert_index_equal(result, expected)
# ints
result = np.timedelta64(0, 'ns')
expected = to_timedelta(0, box=False)
assert result == expected
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d', '1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = TimedeltaIndex([np.timedelta64(0, 'ns'), np.timedelta64(
10, 's').astype('m8[ns]')])
expected = to_timedelta([0, 10], unit='s')
tm.assert_index_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
assert result == expected
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
assert result == expected
# arrays of various dtypes
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='s')
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='m')
expected = TimedeltaIndex([np.timedelta64(1, 'm')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='h')
expected = TimedeltaIndex([np.timedelta64(1, 'h')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[s]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[D]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 'D')] * 5)
tm.assert_index_equal(result, expected)
# Test with lists as input when box=false
expected = np.array(np.arange(3) * 1000000000, dtype='timedelta64[ns]')
result = to_timedelta(range(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta(np.arange(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta([0, 1, 2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
# Tests with fractional seconds as input:
expected = np.array(
[0, 500000000, 800000000, 1200000000], dtype='timedelta64[ns]')
result = to_timedelta([0., 0.5, 0.8, 1.2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
def test_to_timedelta_invalid(self):
# bad value for errors parameter
msg = "errors must be one of"
tm.assert_raises_regex(ValueError, msg, to_timedelta,
['foo'], errors='never')
# these will error
pytest.raises(ValueError, lambda: to_timedelta([1, 2], unit='foo'))
pytest.raises(ValueError, lambda: to_timedelta(1, unit='foo'))
# time not supported ATM
pytest.raises(ValueError, lambda: to_timedelta(time(second=1)))
assert to_timedelta(time(second=1), errors='coerce') is pd.NaT
pytest.raises(ValueError, lambda: to_timedelta(['foo', 'bar']))
tm.assert_index_equal(TimedeltaIndex([pd.NaT, pd.NaT]),
to_timedelta(['foo', 'bar'], errors='coerce'))
tm.assert_index_equal(TimedeltaIndex(['1 day', pd.NaT, '1 min']),
to_timedelta(['1 day', 'bar', '1 min'],
errors='coerce'))
# gh-13613: these should not error because errors='ignore'
invalid_data = 'apple'
assert invalid_data == to_timedelta(invalid_data, errors='ignore')
invalid_data = ['apple', '1 days']
tm.assert_numpy_array_equal(
np.array(invalid_data, dtype=object),
to_timedelta(invalid_data, errors='ignore'))
invalid_data = pd.Index(['apple', '1 days'])
tm.assert_index_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
invalid_data = Series(['apple', '1 days'])
tm.assert_series_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
def test_to_timedelta_via_apply(self):
# GH 5458
expected = Series([np.timedelta64(1, 's')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_to_timedelta_on_missing_values(self):
# GH5438
timedelta_NaT = np.timedelta64('NaT')
actual = pd.to_timedelta(Series(['00:00:01', np.nan]))
expected = Series([np.timedelta64(1000000000, 'ns'),
timedelta_NaT], dtype='<m8[ns]')
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
assert actual.value == timedelta_NaT.astype('int64')
actual = pd.to_timedelta(pd.NaT)
assert actual.value == timedelta_NaT.astype('int64')
def test_to_timedelta_on_nanoseconds(self):
# GH 9273
result = Timedelta(nanoseconds=100)
expected = Timedelta('100ns')
assert result == expected
result = Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1,
milliseconds=1, microseconds=1, nanoseconds=1)
expected = Timedelta(694861001001001)
assert result == expected
result = Timedelta(microseconds=1) + Timedelta(nanoseconds=1)
expected = Timedelta('1us1ns')
assert result == expected
result = Timedelta(microseconds=1) - Timedelta(nanoseconds=1)
expected = Timedelta('999ns')
assert result == expected
result = Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2)
expected = Timedelta('990ns')
assert result == expected
pytest.raises(TypeError, lambda: Timedelta(nanoseconds='abc'))
|
the-stack_0_11374 | # -*- coding: utf-8 -*-
"""Development settings and globals."""
from __future__ import absolute_import
from os.path import join, normpath
from .base import *
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': normpath(join(DJANGO_ROOT, 'default.db')),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
########## END CACHE CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1',)
########## END TOOLBAR CONFIGURATION
LOCAL_SETTINGS = True
|
the-stack_0_11375 | from django import forms
from django.db.models import Q
from common.models import User, Attachments, Comment
from contacts.models import Contact
from events.models import Event
from teams.models import Teams
class EventForm(forms.ModelForm):
WEEKDAYS = (('Monday', 'Monday'),
('Tuesday', 'Tuesday'),
('Wednesday', 'Wednesday'),
('Thursday', 'Thursday'),
('Friday', 'Friday'),
('Saturday', 'Saturday'),
('Sunday', 'Sunday'))
recurring_days = forms.MultipleChoiceField(
required=False, choices=WEEKDAYS)
teams_queryset = []
teams = forms.MultipleChoiceField(choices=teams_queryset)
def __init__(self, *args, **kwargs):
request_user = kwargs.pop('request_user', None)
self.obj_instance = kwargs.get('instance', None)
super(EventForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
if request_user.role == 'ADMIN' or request_user.is_superuser:
self.fields['assigned_to'].queryset = User.objects.filter(is_active=True)
self.fields["contacts"].queryset = Contact.objects.filter()
self.fields['assigned_to'].required = True
self.fields["teams"].choices = [(team.get('id'), team.get('name')) for team in
Teams.objects.all().values('id', 'name')]
# elif request_user.google.all():
# self.fields['assigned_to'].queryset = User.objects.none()
# self.fields["contacts"].queryset = Contact.objects.filter(
# Q(assigned_to__in=[request_user]) | Q(created_by=request_user))
# self.fields['assigned_to'].required = False
elif request_user.role == 'USER':
self.fields['assigned_to'].queryset = User.objects.filter(
role='ADMIN')
self.fields["contacts"].queryset = Contact.objects.filter(
Q(assigned_to__in=[request_user]) | Q(created_by=request_user))
self.fields['assigned_to'].required = True
else:
pass
if self.obj_instance:
# self.fields['name'].widget.attrs['readonly'] = True
self.fields['start_date'].widget.attrs['readonly'] = True
self.fields['end_date'].widget.attrs['readonly'] = True
self.fields["teams"].required = False
self.fields['name'].required = True
self.fields['event_type'].required = True
self.fields['contacts'].required = True
self.fields['start_date'].required = True
self.fields['start_time'].required = True
self.fields['end_date'].required = True
self.fields['end_time'].required = True
self.fields['description'].required = False
def clean_recurring_days(self):
recurring_days = self.cleaned_data.get('recurring_days')
if not self.obj_instance:
if self.cleaned_data.get('event_type') == 'Recurring':
if len(recurring_days) < 1:
raise forms.ValidationError('Choose atleast one recurring day')
def clean_name(self):
name = self.cleaned_data.get('name')
if not self.obj_instance:
if Event.objects.filter(name=name).exclude(id=self.instance.id).exists():
raise forms.ValidationError(
'Event with this name already exists.')
return name
def clean_event_type(self):
""" This Validation Is For Keeping The Field Readonly While Editing or Updating"""
event_type = self.cleaned_data.get('event_type')
if self.obj_instance:
return self.obj_instance.event_type
else:
return event_type
def clean_start_date(self):
start_date = self.cleaned_data.get('start_date')
if start_date:
if self.obj_instance:
return self.obj_instance.start_date
else:
return start_date
else:
raise forms.ValidationError('Enter a valid Start date.')
def clean_end_date(self):
end_date = self.cleaned_data.get('end_date')
event_type = self.cleaned_data.get('event_type')
if event_type == 'Recurring':
if self.clean_start_date() == end_date:
raise forms.ValidationError(
'Start Date and End Date cannot be equal for recurring events')
if self.clean_start_date() > end_date:
raise forms.ValidationError(
'End Date cannot be less than start date')
return end_date
def clean_end_time(self):
end_time = self.cleaned_data.get('end_time')
if not self.cleaned_data.get('start_time'):
raise forms.ValidationError('Enter a valid start time.')
if self.cleaned_data.get('start_time') > end_time:
raise forms.ValidationError(
'End Time cannot be less than Start Time')
return end_time
class Meta:
model = Event
fields = (
'name', 'event_type', 'contacts', 'assigned_to', 'start_date', 'start_time',
'end_date', 'end_time', 'description',
)
class EventCommentForm(forms.ModelForm):
comment = forms.CharField(max_length=255, required=True)
class Meta:
model = Comment
fields = ('comment', 'event', 'commented_by')
class EventAttachmentForm(forms.ModelForm):
attachment = forms.FileField(max_length=1001, required=True)
class Meta:
model = Attachments
fields = ('attachment', 'event')
|
the-stack_0_11380 | #!/usr/bin/env python3
# Develop a program that finds all the genes in a bacterial genome.
# Program reads FASTA file of genome sequence
# Genes begin with ATG and end with stop codon
# Genes are at least X amino acids long (default 100)
# Genes may be on either strand
# Genes must be given unique names
# Genes must be reported in a FASTA file as their protein sequence
# Also create a genome report containing the following information
# Size of the genome in bp
# Number of genes
# Percentage of genome that is coding
# Number of genes on the positive strand
# Number of genes on the negative strand
import random
import argparse
import biotools as bt
parser = argparse.ArgumentParser(
description='Prokaryotic gene finder.')
parser.add_argument('--file', required=True, type=str,
metavar='<str>', help='FASTA file')
parser.add_argument('--minorf', required=False, type=int, default=300,
metavar='<int>', help='minimum open reading frame length [%(default)i]')
arg = parser.parse_args()
gcode = {
'AAA' : 'K', 'AAC' : 'N', 'AAG' : 'K', 'AAT' : 'N',
'ACA' : 'T', 'ACC' : 'T', 'ACG' : 'T', 'ACT' : 'T',
'AGA' : 'R', 'AGC' : 'S', 'AGG' : 'R', 'AGT' : 'S',
'ATA' : 'I', 'ATC' : 'I', 'ATG' : 'M', 'ATT' : 'I',
'CAA' : 'Q', 'CAC' : 'H', 'CAG' : 'Q', 'CAT' : 'H',
'CCA' : 'P', 'CCC' : 'P', 'CCG' : 'P', 'CCT' : 'P',
'CGA' : 'R', 'CGC' : 'R', 'CGG' : 'R', 'CGT' : 'R',
'CTA' : 'L', 'CTC' : 'L', 'CTG' : 'L', 'CTT' : 'L',
'GAA' : 'E', 'GAC' : 'D', 'GAG' : 'E', 'GAT' : 'D',
'GCA' : 'A', 'GCC' : 'A', 'GCG' : 'A', 'GCT' : 'A',
'GGA' : 'G', 'GGC' : 'G', 'GGG' : 'G', 'GGT' : 'G',
'GTA' : 'V', 'GTC' : 'V', 'GTG' : 'V', 'GTT' : 'V',
'TAA' : '*', 'TAC' : 'Y', 'TAG' : '*', 'TAT' : 'Y',
'TCA' : 'S', 'TCC' : 'S', 'TCG' : 'S', 'TCT' : 'S',
'TGA' : '*', 'TGC' : 'C', 'TGG' : 'W', 'TGT' : 'C',
'TTA' : 'L', 'TTC' : 'F', 'TTG' : 'L', 'TTT' : 'F',
}
def anti(seq):
forward = 'ACGTRYMKWSBDHV'
reverse = 'TGCAYRKMWSVHBD'
table = seq.maketrans(forward, reverse)
return seq.translate(table)[::-1]
#defines the reverse sequence
def get_orfs(seq, min):
orfs = []
stop_used = {}
for i in range(len(seq) - 2):
codon = seq[i:i+3]
if codon == 'ATG':
atg = i
for j in range(atg + 3, len(seq) - 2, 3):
codon = seq[j:j+3]
if codon == 'TAG' or codon == 'TAA' or codon == 'TGA':
break
stp = j + 2
if stp - atg + 1 > min and stp not in stop_used:
stop_used[stp] = True
orfs.append(seq[atg:stp +1])
return orfs
#looks at genome and creates list of all the orfs, once it finds stop codon it ends orf
#and begins new orf at next start codon
def translate(orf):
pro = []
for i in range(0, len(orf), 3):
codon = orf[i:i+3]
if codon in gcode: pro.append(gcode[codon])
else: pro.append('X')
return ''.join(pro)
#looks at each orf and translates it into amino acids
def comp(seq):
A = seq.count('A')
C = seq.count('C')
G = seq.count('G')
T = seq.count('T')
total = A + C + G + T
return A/total, C/total, G/total, T/total
#tells us what nt frequency is in actual genome
def randseq(length, a, c, g, t):
pool = int(a * 100) * "A" + int(c * 100) * "C" + int(g * 100) * "G" + int(t * 100) * "T"
seq = []
for i in range(length):
seq.append(random.choice(pool))
return ''.join(seq)
#uses nt frequency to make random genome
n = 0
len_orfs = 0
for name, seq in bt.read_fasta(arg.file):
orfs1 = get_orfs(seq, arg.minorf)
orfs2 = get_orfs(anti(seq), arg.minorf)
for orf in orfs1:
n += 1
len_orfs += len(orf)
print(f'>Protein+{n}')
print(translate(orf))
for orf in orfs2:
n += 1
len_orfs += len(orf)
print(f'>Protein-{n}')
print(translate(orf))
print(f'Number of + genes: {len(orfs1)}')
print(f'Number of - genes: {len(orfs2)}')
print(f'Number of genes: {len(orfs1 + orfs2)}')
print(f'Genome size: {len(seq)}')
print(f'Coding nts: {len_orfs}')
print(f'Percentage genome coding: {len_orfs/len(seq)}')
a, c, g, t = comp(seq)
#count of real genome
seq = randseq(int(10000), a, c, g, t)
count = 0
for orf in get_orfs(seq, arg.minorf):
count += 1
for orf in get_orfs(anti(seq), arg.minorf):
count += 1
#counts/prints how many orfs are in the random sequence
print(f'A: {a}, C: {c}, G: {g}, T: {t}')
print(f'Random orfs: {count}')
add_bp = 0
for bp in seq:
add_bp += 1
print(f'Rand_genome size: {add_bp}')
"""
Size of the genome in bp
Number of genes
Percentage of genome that is coding
Number of genes on the positive strand
Number of genes on the negative strand
"""
|
the-stack_0_11381 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import collections
import os
import re
import time
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
# In[2]:
def build_dataset(words, n_words, atleast=1):
count = [["PAD", 0], ["GO", 1], ["EOS", 2], ["UNK", 3]]
counter = collections.Counter(words).most_common(n_words)
counter = [i for i in counter if i[1] >= atleast]
count.extend(counter)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0:
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# In[3]:
lines = open("movie_lines.txt", encoding="utf-8", errors="ignore").read().split("\n")
conv_lines = open("movie_conversations.txt", encoding="utf-8", errors="ignore").read().split("\n")
id2line = {}
for line in lines:
_line = line.split(" +++$+++ ")
if len(_line) == 5:
id2line[_line[0]] = _line[4]
convs = []
for line in conv_lines[:-1]:
_line = line.split(" +++$+++ ")[-1][1:-1].replace("'", "").replace(" ", "")
convs.append(_line.split(","))
questions = []
answers = []
for conv in convs:
for i in range(len(conv) - 1):
questions.append(id2line[conv[i]])
answers.append(id2line[conv[i + 1]])
def clean_text(text):
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "that is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"n'", "ng", text)
text = re.sub(r"'bout", "about", text)
text = re.sub(r"'til", "until", text)
text = re.sub(r"[-()\"#/@;:<>{}`+=~|.!?,]", "", text)
return " ".join([i.strip() for i in filter(None, text.split())])
clean_questions = []
for question in questions:
clean_questions.append(clean_text(question))
clean_answers = []
for answer in answers:
clean_answers.append(clean_text(answer))
min_line_length = 2
max_line_length = 5
short_questions_temp = []
short_answers_temp = []
i = 0
for question in clean_questions:
if len(question.split()) >= min_line_length and len(question.split()) <= max_line_length:
short_questions_temp.append(question)
short_answers_temp.append(clean_answers[i])
i += 1
short_questions = []
short_answers = []
i = 0
for answer in short_answers_temp:
if len(answer.split()) >= min_line_length and len(answer.split()) <= max_line_length:
short_answers.append(answer)
short_questions.append(short_questions_temp[i])
i += 1
question_test = short_questions[500:550]
answer_test = short_answers[500:550]
short_questions = short_questions[:500]
short_answers = short_answers[:500]
# In[4]:
concat_from = " ".join(short_questions + question_test).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(
concat_from, vocabulary_size_from
)
print("vocab from size: %d" % (vocabulary_size_from))
print("Most common words", count_from[4:10])
print("Sample data", data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])
print("filtered vocab size:", len(dictionary_from))
print("% of vocab used: {}%".format(round(len(dictionary_from) / vocabulary_size_from, 4) * 100))
# In[5]:
concat_to = " ".join(short_answers + answer_test).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print("vocab from size: %d" % (vocabulary_size_to))
print("Most common words", count_to[4:10])
print("Sample data", data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])
print("filtered vocab size:", len(dictionary_to))
print("% of vocab used: {}%".format(round(len(dictionary_to) / vocabulary_size_to, 4) * 100))
# In[6]:
GO = dictionary_from["GO"]
PAD = dictionary_from["PAD"]
EOS = dictionary_from["EOS"]
UNK = dictionary_from["UNK"]
# In[7]:
for i in range(len(short_answers)):
short_answers[i] += " EOS"
# In[8]:
class Chatbot:
def __init__(
self,
size_layer,
num_layers,
embedded_size,
from_dict_size,
to_dict_size,
learning_rate,
batch_size,
):
def cells(reuse=False):
return tf.nn.rnn_cell.GRUCell(size_layer, reuse=reuse)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.placeholder(tf.int32, [None])
self.Y_seq_len = tf.placeholder(tf.int32, [None])
batch_size = tf.shape(self.X)[0]
encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))
decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
main = tf.strided_slice(self.X, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
decoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, decoder_input)
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units=size_layer, memory=encoder_embedded
)
rnn_cells = tf.contrib.seq2seq.AttentionWrapper(
cell=tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)]),
attention_mechanism=attention_mechanism,
attention_layer_size=size_layer,
)
_, last_state = tf.nn.dynamic_rnn(rnn_cells, encoder_embedded, dtype=tf.float32)
last_state = tuple(last_state[0][-1] for _ in range(num_layers))
with tf.variable_scope("decoder"):
rnn_cells_dec = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)])
outputs, _ = tf.nn.dynamic_rnn(
rnn_cells_dec, decoder_embedded, initial_state=last_state, dtype=tf.float32
)
self.logits = tf.layers.dense(outputs, to_dict_size)
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(
logits=self.logits, targets=self.Y, weights=masks
)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
y_t = tf.argmax(self.logits, axis=2)
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.Y, masks)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# In[9]:
size_layer = 256
num_layers = 2
embedded_size = 128
learning_rate = 0.001
batch_size = 16
epoch = 20
# In[10]:
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Chatbot(
size_layer,
num_layers,
embedded_size,
len(dictionary_from),
len(dictionary_to),
learning_rate,
batch_size,
)
sess.run(tf.global_variables_initializer())
# In[11]:
def str_idx(corpus, dic):
X = []
for i in corpus:
ints = []
for k in i.split():
ints.append(dic.get(k, UNK))
X.append(ints)
return X
# In[12]:
X = str_idx(short_questions, dictionary_from)
Y = str_idx(short_answers, dictionary_to)
X_test = str_idx(question_test, dictionary_from)
Y_test = str_idx(answer_test, dictionary_from)
# In[13]:
maxlen_question = max([len(x) for x in X]) * 2
maxlen_answer = max([len(y) for y in Y]) * 2
maxlen_question, maxlen_answer
# In[14]:
def pad_sentence_batch(sentence_batch, pad_int, maxlen):
padded_seqs = []
seq_lens = []
max_sentence_len = maxlen
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(maxlen)
return padded_seqs, seq_lens
# In[15]:
for i in range(epoch):
total_loss, total_accuracy = 0, 0
X, Y = shuffle(X, Y)
for k in range(0, len(short_questions), batch_size):
index = min(k + batch_size, len(short_questions))
batch_x, seq_x = pad_sentence_batch(X[k:index], PAD, maxlen_answer)
batch_y, seq_y = pad_sentence_batch(Y[k:index], PAD, maxlen_answer)
predicted, accuracy, loss, _ = sess.run(
[tf.argmax(model.logits, 2), model.accuracy, model.cost, model.optimizer],
feed_dict={
model.X: batch_x,
model.Y: batch_y,
model.X_seq_len: seq_x,
model.Y_seq_len: seq_y,
},
)
total_loss += loss
total_accuracy += accuracy
total_loss /= len(short_questions) / batch_size
total_accuracy /= len(short_questions) / batch_size
print("epoch: %d, avg loss: %f, avg accuracy: %f" % (i + 1, total_loss, total_accuracy))
# In[16]:
for i in range(len(batch_x)):
print("row %d" % (i + 1))
print(
"QUESTION:", " ".join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0, 1, 2, 3]])
)
print(
"REAL ANSWER:",
" ".join([rev_dictionary_to[n] for n in batch_y[i] if n not in [0, 1, 2, 3]]),
)
print(
"PREDICTED ANSWER:",
" ".join([rev_dictionary_to[n] for n in predicted[i] if n not in [0, 1, 2, 3]]),
"\n",
)
# In[17]:
batch_x, seq_x = pad_sentence_batch(X_test[:batch_size], PAD, maxlen_answer)
batch_y, seq_y = pad_sentence_batch(Y_test[:batch_size], PAD, maxlen_answer)
predicted = sess.run(
tf.argmax(model.logits, 2), feed_dict={model.X: batch_x, model.X_seq_len: seq_x}
)
for i in range(len(batch_x)):
print("row %d" % (i + 1))
print(
"QUESTION:", " ".join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0, 1, 2, 3]])
)
print(
"REAL ANSWER:",
" ".join([rev_dictionary_to[n] for n in batch_y[i] if n not in [0, 1, 2, 3]]),
)
print(
"PREDICTED ANSWER:",
" ".join([rev_dictionary_to[n] for n in predicted[i] if n not in [0, 1, 2, 3]]),
"\n",
)
# In[ ]:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.