blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
547e6a3a571c9e2c706f867b40ebd19184612a68 | 4b64dd47fa9321b50875e96298a5f0766ffe97c9 | /adventofcode/2020/day7/run.py | 9046f4736d837a0d56f7717eedabdfc086788e75 | [] | no_license | choupi/puzzle | 2ce01aa85201660da41378c6df093036fa2d3a19 | 736964767717770fe786197aecdf7b170d421c8e | refs/heads/master | 2021-07-23T13:17:45.086526 | 2021-07-20T11:06:28 | 2021-07-20T11:06:28 | 13,580,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | def dfs(bag):
if bag == 'shiny gold':
bag_result[bag] = 1
return 1
if bag in bag_result:
return bag_result[bag]
if bag not in bag_dict or not bag_dict[bag]:
#print(bag)
bag_result[bag] = 0
return 0
for b in bag_dict[bag]:
if b in bag_result and bag_result[b] == 1:
bag_result[bag] = 1
return 1
if dfs(b):
bag_result[bag] = 1
return 1
return 0
bag_dict = {}
with open('input.txt') as f:
#with open('inp') as f:
for l in f:
if 'no other bags.' in l:
continue
bag, contains = l.strip().split(' contain ', 1)
bag = bag[:bag.rindex(' ')]
#print(bag)
contains = [' '.join(c.split(' ')[1:-1]) for c in contains.split(', ')]
#print(bag, contains)
bag_dict[bag] = contains
#print(len(bag_dict))
bag_result = {}
for bag in bag_dict:
if bag in bag_result:
continue
dfs(bag)
print(sum([v for b,v in bag_result.items()])-1)
| [
"[email protected]"
] | |
ce12c3bac2fa1e50590db1267dd69ad54d66dae2 | 1bfca35cb83842000a3e37f81a69627535a12bf6 | /examples/testWhile3.py | 78d24d522215b5cd084dafeb3a2c0b6ab0f53bc6 | [] | no_license | scar86/python_scripts | 4a8a51f15d21f3b71fa8f0cd2131f75612c40613 | 686b1229c6736147b7cfcd2d0bf31e5f12e85e00 | refs/heads/master | 2021-01-11T00:28:31.989712 | 2016-11-04T18:52:21 | 2016-11-04T18:52:21 | 70,526,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | '''Test yourself again: what happens?'''
nums = list()
i = 4
while (i < 9):
nums.append(i)
i = i+2
print(nums)
| [
"[email protected]"
] | |
29131f57a53f289fa1acbf453e12bd04d8254414 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03971/s324739743.py | cf8d957b84e9ee6c1e59d6f5affdedeba9c06742 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | N, A, B = map(int, input().split())
S = input()
ac_count = 0
ac_b_count = 0
for s in S:
if ac_count < A + B:
if s == 'a':
print('Yes')
ac_count += 1
elif s == 'b' and ac_b_count < B:
print('Yes')
ac_count += 1
ac_b_count += 1
else:
print('No')
else:
print('No') | [
"[email protected]"
] | |
b592bfd26e518c213f887d4d3836f718c8a09754 | 4234dc363d0599e93abc1d9a401540ad67702b3b | /clients/client/python/test/test_ui_container.py | c3e92fd3828cd09328d2c9a7225f247880fd3b55 | [
"Apache-2.0"
] | permissive | ninjayoto/sdk | 8065d3f9e68d287fc57cc2ae6571434eaf013157 | 73823009a416905a4ca1f9543f1a94dd21e4e8da | refs/heads/master | 2023-08-28T03:58:26.962617 | 2021-11-01T17:57:24 | 2021-11-01T17:57:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.21
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_client
from ory_client.model.ui_nodes import UiNodes
from ory_client.model.ui_texts import UiTexts
globals()['UiNodes'] = UiNodes
globals()['UiTexts'] = UiTexts
from ory_client.model.ui_container import UiContainer
class TestUiContainer(unittest.TestCase):
"""UiContainer unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUiContainer(self):
"""Test UiContainer"""
# FIXME: construct object with mandatory attributes with example values
# model = UiContainer() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8771d96e92a6351aa1051fd247148c3df97ae325 | f27996a45d59afbd9619f2cb92639e088e6bea3c | /python/geodjango/fishtracking_receivers/manage.py | 5718fae7f83fc3162c599c88b338320c96e1adb6 | [] | no_license | bopopescu/snippets | d7e689b5c74207f716b0f9c57a342b86662f39a5 | 1924cd8c7938dc32b6c1a50137cc7f053d4aafb2 | refs/heads/master | 2021-05-31T12:04:26.588555 | 2016-05-04T14:05:26 | 2016-05-04T14:05:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fishtracking_receivers.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
e9ebc7aaca1f90e2f3771a9aa5a6dcfda029d314 | 762de1c66746267e05d53184d7854934616416ee | /tools/MolSurfGenService/MolSurfaceGen32/chimera/share/AddAttr/gui.py | 6e64524fe36c39941dcf1ec1fd3c40af7584d9e7 | [] | no_license | project-renard-survey/semanticscience | 6e74f5d475cf0ebcd9bb7be6bb9522cf15ed8677 | 024890dba56c3e82ea2cf8c773965117f8cda339 | refs/heads/master | 2021-07-07T21:47:17.767414 | 2017-10-04T12:13:50 | 2017-10-04T12:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | # --- UCSF Chimera Copyright ---
# Copyright (c) 2000 Regents of the University of California.
# All rights reserved. This software provided pursuant to a
# license agreement containing restrictions on its disclosure,
# duplication and use. This notice must be embedded in or
# attached to all copies, including partial copies, of the
# software or any revisions or derivations thereof.
# --- UCSF Chimera Copyright ---
#
# $Id: gui.py 26655 2009-01-07 22:02:30Z gregc $
import chimera
from chimera import replyobj
from chimera.baseDialog import ModelessDialog
import Tkinter, Pmw
from OpenSave import OpenModeless
from AddAttr import addAttributes
class AddAttrDialog(OpenModeless):
title = "Define Attribute"
provideStatus = True
name = "add/change attrs"
help = "ContributedSoftware/defineattrib/defineattrib.html"
def __init__(self):
OpenModeless.__init__(self, clientPos='s', clientSticky='nsew',
historyID="AddAttr")
def Apply(self):
mols = self.molListBox.getvalue()
if not mols:
self.enter()
replyobj.error("No models chosen in dialog\n")
return
for path in self.getPaths():
setAttrs = addAttributes(path, models=mols,
log=self.doLog.get(),
raiseAttrDialog=self.openDialog.get())
if setAttrs == []:
replyobj.error("No attributes were set from"
" file %s\n" % path)
def fillInUI(self, parent):
OpenModeless.fillInUI(self, parent)
from chimera.widgets import MoleculeScrolledListBox
self.molListBox = MoleculeScrolledListBox(self.clientArea,
listbox_selectmode="extended",
labelpos="w", label_text="Restrict to models:")
self.molListBox.grid(row=0, column=0, sticky="nsew")
self.clientArea.rowconfigure(0, weight=1)
self.clientArea.columnconfigure(0, weight=1)
checkButtonFrame = Tkinter.Frame(self.clientArea)
checkButtonFrame.grid(row=1, column=0)
self.openDialog = Tkinter.IntVar(parent)
self.openDialog.set(True)
Tkinter.Checkbutton(checkButtonFrame, variable=self.openDialog,
text="Open Render/Select by Attribute").grid(
row=0, column=0, sticky='w')
self.doLog = Tkinter.IntVar(parent)
self.doLog.set(False)
Tkinter.Checkbutton(checkButtonFrame,
text="Send match info to Reply Log",
variable=self.doLog).grid(row=1, column=0, sticky='w')
from chimera import dialogs
dialogs.register(AddAttrDialog.name, AddAttrDialog)
| [
"alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5"
] | alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5 |
b0ebcf408ec96db2f5de565245fba1fe6890b293 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/6e9ea2f74da3868e106375d8efe39de34707e2ee-<_check_result>-bug.py | 7dbf8d31b95766ecf041fb78095a44679e970643 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,414 | py |
def _check_result(x, fun, status, slack, con, lb, ub, tol, message):
'\n Check the validity of the provided solution.\n\n A valid (optimal) solution satisfies all bounds, all slack variables are\n negative and all equality constraint residuals are strictly non-zero.\n Further, the lower-bounds, upper-bounds, slack and residuals contain\n no nan values.\n\n Parameters\n ----------\n x : 1D array\n Solution vector to original linear programming problem\n fun: float\n optimal objective value for original problem\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n slack : 1D array\n The (non-negative) slack in the upper bound constraints, that is,\n ``b_ub - A_ub @ x``\n con : 1D array\n The (nominally zero) residuals of the equality constraints, that is,\n ``b - A_eq @ x``\n lb : 1D array\n The lower bound constraints on the original variables\n ub: 1D array\n The upper bound constraints on the original variables\n message : str\n A string descriptor of the exit status of the optimization.\n tol : float\n Termination tolerance; see [1]_ Section 4.5.\n\n Returns\n -------\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n message : str\n A string descriptor of the exit status of the optimization.\n '
tol = (np.sqrt(tol) * 10)
contains_nans = (np.isnan(x).any() or np.isnan(fun) or np.isnan(slack).any() or np.isnan(con).any())
if contains_nans:
is_feasible = False
else:
invalid_bounds = ((x < (lb - tol)).any() or (x > (ub + tol)).any())
invalid_slack = ((status != 3) and (slack < (- tol)).any())
invalid_con = ((status != 3) and (np.abs(con) > tol).any())
is_feasible = (not (invalid_bounds or invalid_slack or invalid_con))
if ((status == 0) and (not is_feasible)):
status = 4
message = 'The solution does not satisfy the constraints, yet no errors were raised and there is no certificate of infeasibility or unboundedness. This is known to occur if the `presolve` option is False and the problem is infeasible. If you encounter this under different circumstances, please submit a bug report. Otherwise, please enable presolve.'
elif ((status == 0) and contains_nans):
status = 4
message = "Numerical difficulties were encountered but no errors were raised. This is known to occur if the 'presolve' option is False, 'sparse' is True, and A_eq includes redundant rows. If you encounter this under different circumstances, please submit a bug report. Otherwise, remove linearly dependent equations from your equality constraints or enable presolve."
elif ((status == 2) and is_feasible):
raise ValueError(message)
return (status, message)
| [
"[email protected]"
] | |
83865e2461c7fdd4cb466554d9f685060a332d9a | 345b37bd2d062f4d020b3c974854e016e727afd7 | /black.py | 24c57ca4aee1d54ae862e8e879a4d27f9577f029 | [
"MIT"
] | permissive | AfolabiOlaoluwa/black | 04d458de4aa841e12bdc1e0b577fcee09392915e | 4c086b137e8869166282765a8242808785605278 | refs/heads/master | 2021-04-06T02:26:23.245318 | 2018-03-14T21:38:33 | 2018-03-14T21:38:33 | 125,282,966 | 1 | 1 | MIT | 2018-03-14T22:43:03 | 2018-03-14T22:43:02 | null | UTF-8 | Python | false | false | 47,591 | py | #!/usr/bin/env python3
import asyncio
from asyncio.base_events import BaseEventLoop
from concurrent.futures import Executor, ProcessPoolExecutor
from functools import partial
import keyword
import os
from pathlib import Path
import tokenize
from typing import (
Dict, Generic, Iterable, Iterator, List, Optional, Set, Tuple, TypeVar, Union
)
from attr import attrib, dataclass, Factory
import click
# lib2to3 fork
from blib2to3.pytree import Node, Leaf, type_repr
from blib2to3 import pygram, pytree
from blib2to3.pgen2 import driver, token
from blib2to3.pgen2.parse import ParseError
__version__ = "18.3a0"
DEFAULT_LINE_LENGTH = 88
# types
syms = pygram.python_symbols
FileContent = str
Encoding = str
Depth = int
NodeType = int
LeafID = int
Priority = int
LN = Union[Leaf, Node]
out = partial(click.secho, bold=True, err=True)
err = partial(click.secho, fg='red', err=True)
class NothingChanged(UserWarning):
"""Raised by `format_file` when the reformatted code is the same as source."""
class CannotSplit(Exception):
"""A readable split that fits the allotted line length is impossible.
Raised by `left_hand_split()` and `right_hand_split()`.
"""
@click.command()
@click.option(
'-l',
'--line-length',
type=int,
default=DEFAULT_LINE_LENGTH,
help='How many character per line to allow.',
show_default=True,
)
@click.option(
'--fast/--safe',
is_flag=True,
help='If --fast given, skip temporary sanity checks. [default: --safe]',
)
@click.version_option(version=__version__)
@click.argument(
'src',
nargs=-1,
type=click.Path(exists=True, file_okay=True, dir_okay=True, readable=True),
)
@click.pass_context
def main(ctx: click.Context, line_length: int, fast: bool, src: List[str]) -> None:
"""The uncompromising code formatter."""
sources: List[Path] = []
for s in src:
p = Path(s)
if p.is_dir():
sources.extend(gen_python_files_in_dir(p))
elif p.is_file():
# if a file was explicitly given, we don't care about its extension
sources.append(p)
else:
err(f'invalid path: {s}')
if len(sources) == 0:
ctx.exit(0)
elif len(sources) == 1:
p = sources[0]
report = Report()
try:
changed = format_file_in_place(p, line_length=line_length, fast=fast)
report.done(p, changed)
except Exception as exc:
report.failed(p, str(exc))
ctx.exit(report.return_code)
else:
loop = asyncio.get_event_loop()
executor = ProcessPoolExecutor(max_workers=os.cpu_count())
return_code = 1
try:
return_code = loop.run_until_complete(
schedule_formatting(sources, line_length, fast, loop, executor)
)
finally:
loop.close()
ctx.exit(return_code)
async def schedule_formatting(
sources: List[Path],
line_length: int,
fast: bool,
loop: BaseEventLoop,
executor: Executor,
) -> int:
tasks = {
src: loop.run_in_executor(
executor, format_file_in_place, src, line_length, fast
)
for src in sources
}
await asyncio.wait(tasks.values())
cancelled = []
report = Report()
for src, task in tasks.items():
if not task.done():
report.failed(src, 'timed out, cancelling')
task.cancel()
cancelled.append(task)
elif task.exception():
report.failed(src, str(task.exception()))
else:
report.done(src, task.result())
if cancelled:
await asyncio.wait(cancelled, timeout=2)
out('All done! ✨ 🍰 ✨')
click.echo(str(report))
return report.return_code
def format_file_in_place(src: Path, line_length: int, fast: bool) -> bool:
"""Format the file and rewrite if changed. Return True if changed."""
try:
contents, encoding = format_file(src, line_length=line_length, fast=fast)
except NothingChanged:
return False
with open(src, "w", encoding=encoding) as f:
f.write(contents)
return True
def format_file(
src: Path, line_length: int, fast: bool
) -> Tuple[FileContent, Encoding]:
"""Reformats a file and returns its contents and encoding."""
with tokenize.open(src) as src_buffer:
src_contents = src_buffer.read()
if src_contents.strip() == '':
raise NothingChanged(src)
dst_contents = format_str(src_contents, line_length=line_length)
if src_contents == dst_contents:
raise NothingChanged(src)
if not fast:
assert_equivalent(src_contents, dst_contents)
assert_stable(src_contents, dst_contents, line_length=line_length)
return dst_contents, src_buffer.encoding
def format_str(src_contents: str, line_length: int) -> FileContent:
"""Reformats a string and returns new contents."""
src_node = lib2to3_parse(src_contents)
dst_contents = ""
comments: List[Line] = []
lines = LineGenerator()
elt = EmptyLineTracker()
empty_line = Line()
after = 0
for current_line in lines.visit(src_node):
for _ in range(after):
dst_contents += str(empty_line)
before, after = elt.maybe_empty_lines(current_line)
for _ in range(before):
dst_contents += str(empty_line)
if not current_line.is_comment:
for comment in comments:
dst_contents += str(comment)
comments = []
for line in split_line(current_line, line_length=line_length):
dst_contents += str(line)
else:
comments.append(current_line)
for comment in comments:
dst_contents += str(comment)
return dst_contents
def lib2to3_parse(src_txt: str) -> Node:
"""Given a string with source, return the lib2to3 Node."""
grammar = pygram.python_grammar_no_print_statement
drv = driver.Driver(grammar, pytree.convert)
if src_txt[-1] != '\n':
nl = '\r\n' if '\r\n' in src_txt[:1024] else '\n'
src_txt += nl
try:
result = drv.parse_string(src_txt, True)
except ParseError as pe:
lineno, column = pe.context[1]
lines = src_txt.splitlines()
try:
faulty_line = lines[lineno - 1]
except IndexError:
faulty_line = "<line number missing in source>"
raise ValueError(f"Cannot parse: {lineno}:{column}: {faulty_line}") from None
if isinstance(result, Leaf):
result = Node(syms.file_input, [result])
return result
def lib2to3_unparse(node: Node) -> str:
"""Given a lib2to3 node, return its string representation."""
code = str(node)
return code
T = TypeVar('T')
class Visitor(Generic[T]):
"""Basic lib2to3 visitor that yields things on visiting."""
def visit(self, node: LN) -> Iterator[T]:
if node.type < 256:
name = token.tok_name[node.type]
else:
name = type_repr(node.type)
yield from getattr(self, f'visit_{name}', self.visit_default)(node)
def visit_default(self, node: LN) -> Iterator[T]:
if isinstance(node, Node):
for child in node.children:
yield from self.visit(child)
@dataclass
class DebugVisitor(Visitor[T]):
tree_depth: int = attrib(default=0)
def visit_default(self, node: LN) -> Iterator[T]:
indent = ' ' * (2 * self.tree_depth)
if isinstance(node, Node):
_type = type_repr(node.type)
out(f'{indent}{_type}', fg='yellow')
self.tree_depth += 1
for child in node.children:
yield from self.visit(child)
self.tree_depth -= 1
out(f'{indent}/{_type}', fg='yellow', bold=False)
else:
_type = token.tok_name.get(node.type, str(node.type))
out(f'{indent}{_type}', fg='blue', nl=False)
if node.prefix:
# We don't have to handle prefixes for `Node` objects since
# that delegates to the first child anyway.
out(f' {node.prefix!r}', fg='green', bold=False, nl=False)
out(f' {node.value!r}', fg='blue', bold=False)
KEYWORDS = set(keyword.kwlist)
WHITESPACE = {token.DEDENT, token.INDENT, token.NEWLINE}
FLOW_CONTROL = {'return', 'raise', 'break', 'continue'}
STATEMENT = {
syms.if_stmt,
syms.while_stmt,
syms.for_stmt,
syms.try_stmt,
syms.except_clause,
syms.with_stmt,
syms.funcdef,
syms.classdef,
}
STANDALONE_COMMENT = 153
LOGIC_OPERATORS = {'and', 'or'}
COMPARATORS = {
token.LESS,
token.GREATER,
token.EQEQUAL,
token.NOTEQUAL,
token.LESSEQUAL,
token.GREATEREQUAL,
}
MATH_OPERATORS = {
token.PLUS,
token.MINUS,
token.STAR,
token.SLASH,
token.VBAR,
token.AMPER,
token.PERCENT,
token.CIRCUMFLEX,
token.LEFTSHIFT,
token.RIGHTSHIFT,
token.DOUBLESTAR,
token.DOUBLESLASH,
}
COMPREHENSION_PRIORITY = 20
COMMA_PRIORITY = 10
LOGIC_PRIORITY = 5
STRING_PRIORITY = 4
COMPARATOR_PRIORITY = 3
MATH_PRIORITY = 1
@dataclass
class BracketTracker:
depth: int = attrib(default=0)
bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = attrib(default=Factory(dict))
delimiters: Dict[LeafID, Priority] = attrib(default=Factory(dict))
previous: Optional[Leaf] = attrib(default=None)
def mark(self, leaf: Leaf) -> None:
if leaf.type == token.COMMENT:
return
if leaf.type in CLOSING_BRACKETS:
self.depth -= 1
opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
leaf.opening_bracket = opening_bracket # type: ignore
leaf.bracket_depth = self.depth # type: ignore
if self.depth == 0:
delim = is_delimiter(leaf)
if delim:
self.delimiters[id(leaf)] = delim
elif self.previous is not None:
if leaf.type == token.STRING and self.previous.type == token.STRING:
self.delimiters[id(self.previous)] = STRING_PRIORITY
elif (
leaf.type == token.NAME and
leaf.value == 'for' and
leaf.parent and
leaf.parent.type in {syms.comp_for, syms.old_comp_for}
):
self.delimiters[id(self.previous)] = COMPREHENSION_PRIORITY
elif (
leaf.type == token.NAME and
leaf.value == 'if' and
leaf.parent and
leaf.parent.type in {syms.comp_if, syms.old_comp_if}
):
self.delimiters[id(self.previous)] = COMPREHENSION_PRIORITY
if leaf.type in OPENING_BRACKETS:
self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
self.depth += 1
self.previous = leaf
def any_open_brackets(self) -> bool:
"""Returns True if there is an yet unmatched open bracket on the line."""
return bool(self.bracket_match)
def max_priority(self, exclude: Iterable[LeafID] = ()) -> int:
"""Returns the highest priority of a delimiter found on the line.
Values are consistent with what `is_delimiter()` returns.
"""
return max(v for k, v in self.delimiters.items() if k not in exclude)
@dataclass
class Line:
depth: int = attrib(default=0)
leaves: List[Leaf] = attrib(default=Factory(list))
comments: Dict[LeafID, Leaf] = attrib(default=Factory(dict))
bracket_tracker: BracketTracker = attrib(default=Factory(BracketTracker))
inside_brackets: bool = attrib(default=False)
def append(self, leaf: Leaf, preformatted: bool = False) -> None:
has_value = leaf.value.strip()
if not has_value:
return
if self.leaves and not preformatted:
# Note: at this point leaf.prefix should be empty except for
# imports, for which we only preserve newlines.
leaf.prefix += whitespace(leaf)
if self.inside_brackets or not preformatted:
self.bracket_tracker.mark(leaf)
self.maybe_remove_trailing_comma(leaf)
if self.maybe_adapt_standalone_comment(leaf):
return
if not self.append_comment(leaf):
self.leaves.append(leaf)
@property
def is_comment(self) -> bool:
return bool(self) and self.leaves[0].type == STANDALONE_COMMENT
@property
def is_decorator(self) -> bool:
return bool(self) and self.leaves[0].type == token.AT
@property
def is_import(self) -> bool:
return bool(self) and is_import(self.leaves[0])
@property
def is_class(self) -> bool:
return (
bool(self) and
self.leaves[0].type == token.NAME and
self.leaves[0].value == 'class'
)
@property
def is_def(self) -> bool:
"""Also returns True for async defs."""
try:
first_leaf = self.leaves[0]
except IndexError:
return False
try:
second_leaf: Optional[Leaf] = self.leaves[1]
except IndexError:
second_leaf = None
return (
(first_leaf.type == token.NAME and first_leaf.value == 'def') or
(
first_leaf.type == token.NAME and
first_leaf.value == 'async' and
second_leaf is not None and
second_leaf.type == token.NAME and
second_leaf.value == 'def'
)
)
@property
def is_flow_control(self) -> bool:
return (
bool(self) and
self.leaves[0].type == token.NAME and
self.leaves[0].value in FLOW_CONTROL
)
@property
def is_yield(self) -> bool:
return (
bool(self) and
self.leaves[0].type == token.NAME and
self.leaves[0].value == 'yield'
)
def maybe_remove_trailing_comma(self, closing: Leaf) -> bool:
if not (
self.leaves and
self.leaves[-1].type == token.COMMA and
closing.type in CLOSING_BRACKETS
):
return False
if closing.type == token.RSQB or closing.type == token.RBRACE:
self.leaves.pop()
return True
# For parens let's check if it's safe to remove the comma. If the
# trailing one is the only one, we might mistakenly change a tuple
# into a different type by removing the comma.
depth = closing.bracket_depth + 1 # type: ignore
commas = 0
opening = closing.opening_bracket # type: ignore
for _opening_index, leaf in enumerate(self.leaves):
if leaf is opening:
break
else:
return False
for leaf in self.leaves[_opening_index + 1:]:
if leaf is closing:
break
bracket_depth = leaf.bracket_depth # type: ignore
if bracket_depth == depth and leaf.type == token.COMMA:
commas += 1
if commas > 1:
self.leaves.pop()
return True
return False
def maybe_adapt_standalone_comment(self, comment: Leaf) -> bool:
"""Hack a standalone comment to act as a trailing comment for line splitting.
If this line has brackets and a standalone `comment`, we need to adapt
it to be able to still reformat the line.
This is not perfect, the line to which the standalone comment gets
appended will appear "too long" when splitting.
"""
if not (
comment.type == STANDALONE_COMMENT and
self.bracket_tracker.any_open_brackets()
):
return False
comment.type = token.COMMENT
comment.prefix = '\n' + ' ' * (self.depth + 1)
return self.append_comment(comment)
def append_comment(self, comment: Leaf) -> bool:
if comment.type != token.COMMENT:
return False
try:
after = id(self.last_non_delimiter())
except LookupError:
comment.type = STANDALONE_COMMENT
comment.prefix = ''
return False
else:
if after in self.comments:
self.comments[after].value += str(comment)
else:
self.comments[after] = comment
return True
def last_non_delimiter(self) -> Leaf:
for i in range(len(self.leaves)):
last = self.leaves[-i - 1]
if not is_delimiter(last):
return last
raise LookupError("No non-delimiters found")
def __str__(self) -> str:
if not self:
return '\n'
indent = ' ' * self.depth
leaves = iter(self.leaves)
first = next(leaves)
res = f'{first.prefix}{indent}{first.value}'
for leaf in leaves:
res += str(leaf)
for comment in self.comments.values():
res += str(comment)
return res + '\n'
def __bool__(self) -> bool:
return bool(self.leaves or self.comments)
@dataclass
class EmptyLineTracker:
"""Provides a stateful method that returns the number of potential extra
empty lines needed before and after the currently processed line.
Note: this tracker works on lines that haven't been split yet.
"""
previous_line: Optional[Line] = attrib(default=None)
previous_after: int = attrib(default=0)
previous_defs: List[int] = attrib(default=Factory(list))
def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
"""Returns the number of extra empty lines before and after the `current_line`.
This is for separating `def`, `async def` and `class` with extra empty lines
(two on module-level), as well as providing an extra empty line after flow
control keywords to make them more prominent.
"""
before, after = self._maybe_empty_lines(current_line)
self.previous_after = after
self.previous_line = current_line
return before, after
def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
before = 0
depth = current_line.depth
while self.previous_defs and self.previous_defs[-1] >= depth:
self.previous_defs.pop()
before = (1 if depth else 2) - self.previous_after
is_decorator = current_line.is_decorator
if is_decorator or current_line.is_def or current_line.is_class:
if not is_decorator:
self.previous_defs.append(depth)
if self.previous_line is None:
# Don't insert empty lines before the first line in the file.
return 0, 0
if self.previous_line and self.previous_line.is_decorator:
# Don't insert empty lines between decorators.
return 0, 0
newlines = 2
if current_line.depth:
newlines -= 1
newlines -= self.previous_after
return newlines, 0
if current_line.is_flow_control:
return before, 1
if (
self.previous_line and
self.previous_line.is_import and
not current_line.is_import and
depth == self.previous_line.depth
):
return (before or 1), 0
if (
self.previous_line and
self.previous_line.is_yield and
(not current_line.is_yield or depth != self.previous_line.depth)
):
return (before or 1), 0
return before, 0
@dataclass
class LineGenerator(Visitor[Line]):
"""Generates reformatted Line objects. Empty lines are not emitted.
Note: destroys the tree it's visiting by mutating prefixes of its leaves
in ways that will no longer stringify to valid Python code on the tree.
"""
current_line: Line = attrib(default=Factory(Line))
standalone_comments: List[Leaf] = attrib(default=Factory(list))
def line(self, indent: int = 0) -> Iterator[Line]:
"""Generate a line.
If the line is empty, only emit if it makes sense.
If the line is too long, split it first and then generate.
If any lines were generated, set up a new current_line.
"""
if not self.current_line:
self.current_line.depth += indent
return # Line is empty, don't emit. Creating a new one unnecessary.
complete_line = self.current_line
self.current_line = Line(depth=complete_line.depth + indent)
yield complete_line
def visit_default(self, node: LN) -> Iterator[Line]:
if isinstance(node, Leaf):
for comment in generate_comments(node):
if self.current_line.bracket_tracker.any_open_brackets():
# any comment within brackets is subject to splitting
self.current_line.append(comment)
elif comment.type == token.COMMENT:
# regular trailing comment
self.current_line.append(comment)
yield from self.line()
else:
# regular standalone comment, to be processed later (see
# docstring in `generate_comments()`
self.standalone_comments.append(comment)
normalize_prefix(node)
if node.type not in WHITESPACE:
for comment in self.standalone_comments:
yield from self.line()
self.current_line.append(comment)
yield from self.line()
self.standalone_comments = []
self.current_line.append(node)
yield from super().visit_default(node)
def visit_suite(self, node: Node) -> Iterator[Line]:
"""Body of a statement after a colon."""
children = iter(node.children)
# Process newline before indenting. It might contain an inline
# comment that should go right after the colon.
newline = next(children)
yield from self.visit(newline)
yield from self.line(+1)
for child in children:
yield from self.visit(child)
yield from self.line(-1)
def visit_stmt(self, node: Node, keywords: Set[str]) -> Iterator[Line]:
"""Visit a statement.
The relevant Python language keywords for this statement are NAME leaves
within it.
"""
for child in node.children:
if child.type == token.NAME and child.value in keywords: # type: ignore
yield from self.line()
yield from self.visit(child)
def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
"""A statement without nested statements."""
is_suite_like = node.parent and node.parent.type in STATEMENT
if is_suite_like:
yield from self.line(+1)
yield from self.visit_default(node)
yield from self.line(-1)
else:
yield from self.line()
yield from self.visit_default(node)
def visit_async_stmt(self, node: Node) -> Iterator[Line]:
yield from self.line()
children = iter(node.children)
for child in children:
yield from self.visit(child)
if child.type == token.NAME and child.value == 'async': # type: ignore
break
internal_stmt = next(children)
for child in internal_stmt.children:
yield from self.visit(child)
def visit_decorators(self, node: Node) -> Iterator[Line]:
for child in node.children:
yield from self.line()
yield from self.visit(child)
def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
yield from self.line()
def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
yield from self.visit_default(leaf)
yield from self.line()
def __attrs_post_init__(self) -> None:
"""You are in a twisty little maze of passages."""
v = self.visit_stmt
self.visit_if_stmt = partial(v, keywords={'if', 'else', 'elif'})
self.visit_while_stmt = partial(v, keywords={'while', 'else'})
self.visit_for_stmt = partial(v, keywords={'for', 'else'})
self.visit_try_stmt = partial(v, keywords={'try', 'except', 'else', 'finally'})
self.visit_except_clause = partial(v, keywords={'except'})
self.visit_funcdef = partial(v, keywords={'def'})
self.visit_with_stmt = partial(v, keywords={'with'})
self.visit_classdef = partial(v, keywords={'class'})
self.visit_async_funcdef = self.visit_async_stmt
self.visit_decorated = self.visit_decorators
BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE}
OPENING_BRACKETS = set(BRACKET.keys())
CLOSING_BRACKETS = set(BRACKET.values())
BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS
def whitespace(leaf: Leaf) -> str:
"""Return whitespace prefix if needed for the given `leaf`."""
NO = ''
SPACE = ' '
DOUBLESPACE = ' '
t = leaf.type
p = leaf.parent
if t == token.COLON:
return NO
if t == token.COMMA:
return NO
if t == token.RPAR:
return NO
if t == token.COMMENT:
return DOUBLESPACE
if t == STANDALONE_COMMENT:
return NO
assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
if p.type in {syms.parameters, syms.arglist}:
# untyped function signatures or calls
if t == token.RPAR:
return NO
prev = leaf.prev_sibling
if not prev or prev.type != token.COMMA:
return NO
if p.type == syms.varargslist:
# lambdas
if t == token.RPAR:
return NO
prev = leaf.prev_sibling
if prev and prev.type != token.COMMA:
return NO
elif p.type == syms.typedargslist:
# typed function signatures
prev = leaf.prev_sibling
if not prev:
return NO
if t == token.EQUAL:
if prev.type != syms.tname:
return NO
elif prev.type == token.EQUAL:
# A bit hacky: if the equal sign has whitespace, it means we
# previously found it's a typed argument. So, we're using that, too.
return prev.prefix
elif prev.type != token.COMMA:
return NO
elif p.type == syms.tname:
# type names
prev = leaf.prev_sibling
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type != token.COMMA:
return NO
elif p.type == syms.trailer:
# attributes and calls
if t == token.LPAR or t == token.RPAR:
return NO
prev = leaf.prev_sibling
if not prev:
if t == token.DOT:
prevp = preceding_leaf(p)
if not prevp or prevp.type != token.NUMBER:
return NO
elif t == token.LSQB:
return NO
elif prev.type != token.COMMA:
return NO
elif p.type == syms.argument:
# single argument
if t == token.EQUAL:
return NO
prev = leaf.prev_sibling
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type == token.LPAR:
return NO
elif prev.type == token.EQUAL or prev.type == token.DOUBLESTAR:
return NO
elif p.type == syms.decorator:
# decorators
return NO
elif p.type == syms.dotted_name:
prev = leaf.prev_sibling
if prev:
return NO
prevp = preceding_leaf(p)
if not prevp or prevp.type == token.AT:
return NO
elif p.type == syms.classdef:
if t == token.LPAR:
return NO
prev = leaf.prev_sibling
if prev and prev.type == token.LPAR:
return NO
elif p.type == syms.subscript:
# indexing
if t == token.COLON:
return NO
prev = leaf.prev_sibling
if not prev or prev.type == token.COLON:
return NO
elif p.type in {
syms.test,
syms.not_test,
syms.xor_expr,
syms.or_test,
syms.and_test,
syms.arith_expr,
syms.shift_expr,
syms.yield_expr,
syms.term,
syms.power,
syms.comparison,
}:
# various arithmetic and logic expressions
prev = leaf.prev_sibling
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type in OPENING_BRACKETS:
return NO
if prevp.type == token.EQUAL:
if prevp.parent and prevp.parent.type in {
syms.varargslist, syms.parameters, syms.arglist, syms.argument
}:
return NO
return SPACE
elif p.type == syms.atom:
if t in CLOSING_BRACKETS:
return NO
prev = leaf.prev_sibling
if not prev:
prevp = preceding_leaf(p)
if not prevp:
return NO
if prevp.type in OPENING_BRACKETS:
return NO
if prevp.type == token.EQUAL:
if prevp.parent and prevp.parent.type in {
syms.varargslist, syms.parameters, syms.arglist, syms.argument
}:
return NO
if prevp.type == token.DOUBLESTAR:
if prevp.parent and prevp.parent.type in {
syms.varargslist, syms.parameters, syms.arglist, syms.dictsetmaker
}:
return NO
elif prev.type in OPENING_BRACKETS:
return NO
elif t == token.DOT:
# dots, but not the first one.
return NO
elif (
p.type == syms.listmaker or
p.type == syms.testlist_gexp or
p.type == syms.subscriptlist
):
# list interior, including unpacking
prev = leaf.prev_sibling
if not prev:
return NO
elif p.type == syms.dictsetmaker:
# dict and set interior, including unpacking
prev = leaf.prev_sibling
if not prev:
return NO
if prev.type == token.DOUBLESTAR:
return NO
elif p.type == syms.factor or p.type == syms.star_expr:
# unary ops
prev = leaf.prev_sibling
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type in OPENING_BRACKETS:
return NO
prevp_parent = prevp.parent
assert prevp_parent is not None
if prevp.type == token.COLON and prevp_parent.type in {
syms.subscript, syms.sliceop
}:
return NO
elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
return NO
elif t == token.NAME or t == token.NUMBER:
return NO
elif p.type == syms.import_from and t == token.NAME:
prev = leaf.prev_sibling
if prev and prev.type == token.DOT:
return NO
elif p.type == syms.sliceop:
return NO
return SPACE
def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
"""Returns the first leaf that precedes `node`, if any."""
while node:
res = node.prev_sibling
if res:
if isinstance(res, Leaf):
return res
try:
return list(res.leaves())[-1]
except IndexError:
return None
node = node.parent
return None
def is_delimiter(leaf: Leaf) -> int:
"""Returns the priority of the `leaf` delimiter. Returns 0 if not delimiter.
Higher numbers are higher priority.
"""
if leaf.type == token.COMMA:
return COMMA_PRIORITY
if leaf.type == token.NAME and leaf.value in LOGIC_OPERATORS:
return LOGIC_PRIORITY
if leaf.type in COMPARATORS:
return COMPARATOR_PRIORITY
if (
leaf.type in MATH_OPERATORS and
leaf.parent and
leaf.parent.type not in {syms.factor, syms.star_expr}
):
return MATH_PRIORITY
return 0
def generate_comments(leaf: Leaf) -> Iterator[Leaf]:
"""Cleans the prefix of the `leaf` and generates comments from it, if any.
Comments in lib2to3 are shoved into the whitespace prefix. This happens
in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation
move because it does away with modifying the grammar to include all the
possible places in which comments can be placed.
The sad consequence for us though is that comments don't "belong" anywhere.
This is why this function generates simple parentless Leaf objects for
comments. We simply don't know what the correct parent should be.
No matter though, we can live without this. We really only need to
differentiate between inline and standalone comments. The latter don't
share the line with any code.
Inline comments are emitted as regular token.COMMENT leaves. Standalone
are emitted with a fake STANDALONE_COMMENT token identifier.
"""
if not leaf.prefix:
return
if '#' not in leaf.prefix:
return
before_comment, content = leaf.prefix.split('#', 1)
content = content.rstrip()
if content and (content[0] not in {' ', '!', '#'}):
content = ' ' + content
is_standalone_comment = (
'\n' in before_comment or '\n' in content or leaf.type == token.DEDENT
)
if not is_standalone_comment:
# simple trailing comment
yield Leaf(token.COMMENT, value='#' + content)
return
for line in ('#' + content).split('\n'):
line = line.lstrip()
if not line.startswith('#'):
continue
yield Leaf(STANDALONE_COMMENT, line)
def split_line(line: Line, line_length: int, inner: bool = False) -> Iterator[Line]:
"""Splits a `line` into potentially many lines.
They should fit in the allotted `line_length` but might not be able to.
`inner` signifies that there were a pair of brackets somewhere around the
current `line`, possibly transitively. This means we can fallback to splitting
by delimiters if the LHS/RHS don't yield any results.
"""
line_str = str(line).strip('\n')
if len(line_str) <= line_length and '\n' not in line_str:
yield line
return
if line.is_def:
split_funcs = [left_hand_split]
elif line.inside_brackets:
split_funcs = [delimiter_split]
if '\n' not in line_str:
# Only attempt RHS if we don't have multiline strings or comments
# on this line.
split_funcs.append(right_hand_split)
else:
split_funcs = [right_hand_split]
for split_func in split_funcs:
# We are accumulating lines in `result` because we might want to abort
# mission and return the original line in the end, or attempt a different
# split altogether.
result: List[Line] = []
try:
for l in split_func(line):
if str(l).strip('\n') == line_str:
raise CannotSplit("Split function returned an unchanged result")
result.extend(split_line(l, line_length=line_length, inner=True))
except CannotSplit as cs:
continue
else:
yield from result
break
else:
yield line
def left_hand_split(line: Line) -> Iterator[Line]:
"""Split line into many lines, starting with the first matching bracket pair.
Note: this usually looks weird, only use this for function definitions.
Prefer RHS otherwise.
"""
head = Line(depth=line.depth)
body = Line(depth=line.depth + 1, inside_brackets=True)
tail = Line(depth=line.depth)
tail_leaves: List[Leaf] = []
body_leaves: List[Leaf] = []
head_leaves: List[Leaf] = []
current_leaves = head_leaves
matching_bracket = None
for leaf in line.leaves:
if (
current_leaves is body_leaves and
leaf.type in CLOSING_BRACKETS and
leaf.opening_bracket is matching_bracket # type: ignore
):
current_leaves = tail_leaves
current_leaves.append(leaf)
if current_leaves is head_leaves:
if leaf.type in OPENING_BRACKETS:
matching_bracket = leaf
current_leaves = body_leaves
# Since body is a new indent level, remove spurious leading whitespace.
if body_leaves:
normalize_prefix(body_leaves[0])
# Build the new lines.
for result, leaves in (
(head, head_leaves), (body, body_leaves), (tail, tail_leaves)
):
for leaf in leaves:
result.append(leaf, preformatted=True)
comment_after = line.comments.get(id(leaf))
if comment_after:
result.append(comment_after, preformatted=True)
# Check if the split succeeded.
tail_len = len(str(tail))
if not body:
if tail_len == 0:
raise CannotSplit("Splitting brackets produced the same line")
elif tail_len < 3:
raise CannotSplit(
f"Splitting brackets on an empty body to save "
f"{tail_len} characters is not worth it"
)
for result in (head, body, tail):
if result:
yield result
def right_hand_split(line: Line) -> Iterator[Line]:
"""Split line into many lines, starting with the last matching bracket pair."""
head = Line(depth=line.depth)
body = Line(depth=line.depth + 1, inside_brackets=True)
tail = Line(depth=line.depth)
tail_leaves: List[Leaf] = []
body_leaves: List[Leaf] = []
head_leaves: List[Leaf] = []
current_leaves = tail_leaves
opening_bracket = None
for leaf in reversed(line.leaves):
if current_leaves is body_leaves:
if leaf is opening_bracket:
current_leaves = head_leaves
current_leaves.append(leaf)
if current_leaves is tail_leaves:
if leaf.type in CLOSING_BRACKETS:
opening_bracket = leaf.opening_bracket # type: ignore
current_leaves = body_leaves
tail_leaves.reverse()
body_leaves.reverse()
head_leaves.reverse()
# Since body is a new indent level, remove spurious leading whitespace.
if body_leaves:
normalize_prefix(body_leaves[0])
# Build the new lines.
for result, leaves in (
(head, head_leaves), (body, body_leaves), (tail, tail_leaves)
):
for leaf in leaves:
result.append(leaf, preformatted=True)
comment_after = line.comments.get(id(leaf))
if comment_after:
result.append(comment_after, preformatted=True)
# Check if the split succeeded.
tail_len = len(str(tail).strip('\n'))
if not body:
if tail_len == 0:
raise CannotSplit("Splitting brackets produced the same line")
elif tail_len < 3:
raise CannotSplit(
f"Splitting brackets on an empty body to save "
f"{tail_len} characters is not worth it"
)
for result in (head, body, tail):
if result:
yield result
def delimiter_split(line: Line) -> Iterator[Line]:
"""Split according to delimiters of the highest priority.
This kind of split doesn't increase indentation.
"""
try:
last_leaf = line.leaves[-1]
except IndexError:
raise CannotSplit("Line empty")
delimiters = line.bracket_tracker.delimiters
try:
delimiter_priority = line.bracket_tracker.max_priority(exclude={id(last_leaf)})
except ValueError:
raise CannotSplit("No delimiters found")
current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
for leaf in line.leaves:
current_line.append(leaf, preformatted=True)
comment_after = line.comments.get(id(leaf))
if comment_after:
current_line.append(comment_after, preformatted=True)
leaf_priority = delimiters.get(id(leaf))
if leaf_priority == delimiter_priority:
normalize_prefix(current_line.leaves[0])
yield current_line
current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
if current_line:
if (
delimiter_priority == COMMA_PRIORITY and
current_line.leaves[-1].type != token.COMMA
):
current_line.append(Leaf(token.COMMA, ','))
normalize_prefix(current_line.leaves[0])
yield current_line
def is_import(leaf: Leaf) -> bool:
"""Returns True if the given leaf starts an import statement."""
p = leaf.parent
t = leaf.type
v = leaf.value
return bool(
t == token.NAME and
(
(v == 'import' and p and p.type == syms.import_name) or
(v == 'from' and p and p.type == syms.import_from)
)
)
def normalize_prefix(leaf: Leaf) -> None:
"""Leave existing extra newlines for imports. Remove everything else."""
if is_import(leaf):
spl = leaf.prefix.split('#', 1)
nl_count = spl[0].count('\n')
if len(spl) > 1:
# Skip one newline since it was for a standalone comment.
nl_count -= 1
leaf.prefix = '\n' * nl_count
return
leaf.prefix = ''
PYTHON_EXTENSIONS = {'.py'}
BLACKLISTED_DIRECTORIES = {
'build', 'buck-out', 'dist', '_build', '.git', '.hg', '.mypy_cache', '.tox', '.venv'
}
def gen_python_files_in_dir(path: Path) -> Iterator[Path]:
for child in path.iterdir():
if child.is_dir():
if child.name in BLACKLISTED_DIRECTORIES:
continue
yield from gen_python_files_in_dir(child)
elif child.suffix in PYTHON_EXTENSIONS:
yield child
@dataclass
class Report:
"""Provides a reformatting counter."""
change_count: int = attrib(default=0)
same_count: int = attrib(default=0)
failure_count: int = attrib(default=0)
def done(self, src: Path, changed: bool) -> None:
"""Increment the counter for successful reformatting. Write out a message."""
if changed:
out(f'reformatted {src}')
self.change_count += 1
else:
out(f'{src} already well formatted, good job.', bold=False)
self.same_count += 1
def failed(self, src: Path, message: str) -> None:
"""Increment the counter for failed reformatting. Write out a message."""
err(f'error: cannot format {src}: {message}')
self.failure_count += 1
@property
def return_code(self) -> int:
"""Which return code should the app use considering the current state."""
return 1 if self.failure_count else 0
def __str__(self) -> str:
"""A color report of the current state.
Use `click.unstyle` to remove colors.
"""
report = []
if self.change_count:
s = 's' if self.change_count > 1 else ''
report.append(
click.style(f'{self.change_count} file{s} reformatted', bold=True)
)
if self.same_count:
s = 's' if self.same_count > 1 else ''
report.append(f'{self.same_count} file{s} left unchanged')
if self.failure_count:
s = 's' if self.failure_count > 1 else ''
report.append(
click.style(
f'{self.failure_count} file{s} failed to reformat', fg='red'
)
)
return ', '.join(report) + '.'
def assert_equivalent(src: str, dst: str) -> None:
"""Raises AssertionError if `src` and `dst` aren't equivalent.
This is a temporary sanity check until Black becomes stable.
"""
import ast
import traceback
def _v(node: ast.AST, depth: int = 0) -> Iterator[str]:
"""Simple visitor generating strings to compare ASTs by content."""
yield f"{' ' * depth}{node.__class__.__name__}("
for field in sorted(node._fields):
try:
value = getattr(node, field)
except AttributeError:
continue
yield f"{' ' * (depth+1)}{field}="
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
yield from _v(item, depth + 2)
elif isinstance(value, ast.AST):
yield from _v(value, depth + 2)
else:
yield f"{' ' * (depth+2)}{value!r}, # {value.__class__.__name__}"
yield f"{' ' * depth}) # /{node.__class__.__name__}"
try:
src_ast = ast.parse(src)
except Exception as exc:
raise AssertionError(f"cannot parse source: {exc}") from None
try:
dst_ast = ast.parse(dst)
except Exception as exc:
log = dump_to_file(''.join(traceback.format_tb(exc.__traceback__)), dst)
raise AssertionError(
f"INTERNAL ERROR: Black produced invalid code: {exc}. "
f"Please report a bug on https://github.com/ambv/black/issues. "
f"This invalid output might be helpful: {log}",
) from None
src_ast_str = '\n'.join(_v(src_ast))
dst_ast_str = '\n'.join(_v(dst_ast))
if src_ast_str != dst_ast_str:
log = dump_to_file(diff(src_ast_str, dst_ast_str, 'src', 'dst'))
raise AssertionError(
f"INTERNAL ERROR: Black produced code that is not equivalent to "
f"the source. "
f"Please report a bug on https://github.com/ambv/black/issues. "
f"This diff might be helpful: {log}",
) from None
def assert_stable(src: str, dst: str, line_length: int) -> None:
"""Raises AssertionError if `dst` reformats differently the second time.
This is a temporary sanity check until Black becomes stable.
"""
newdst = format_str(dst, line_length=line_length)
if dst != newdst:
log = dump_to_file(
diff(src, dst, 'source', 'first pass'),
diff(dst, newdst, 'first pass', 'second pass'),
)
raise AssertionError(
f"INTERNAL ERROR: Black produced different code on the second pass "
f"of the formatter. "
f"Please report a bug on https://github.com/ambv/black/issues. "
f"This diff might be helpful: {log}",
) from None
def dump_to_file(*output: str) -> str:
"""Dumps `output` to a temporary file. Returns path to the file."""
import tempfile
with tempfile.NamedTemporaryFile(
mode='w', prefix='blk_', suffix='.log', delete=False
) as f:
for lines in output:
f.write(lines)
f.write('\n')
return f.name
def diff(a: str, b: str, a_name: str, b_name: str) -> str:
"""Returns a udiff string between strings `a` and `b`."""
import difflib
a_lines = [line + '\n' for line in a.split('\n')]
b_lines = [line + '\n' for line in b.split('\n')]
return ''.join(
difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5)
)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c2dc614ebb35d37b1f02d60a7a2b4379aa756714 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/eventgrid/v20200601/list_domain_shared_access_keys.py | 1f55623514778ec7b92005163dffba4572484403 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 2,637 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListDomainSharedAccessKeysResult',
'AwaitableListDomainSharedAccessKeysResult',
'list_domain_shared_access_keys',
]
@pulumi.output_type
class ListDomainSharedAccessKeysResult:
"""
Shared access keys of the Domain.
"""
def __init__(__self__, key1=None, key2=None):
if key1 and not isinstance(key1, str):
raise TypeError("Expected argument 'key1' to be a str")
pulumi.set(__self__, "key1", key1)
if key2 and not isinstance(key2, str):
raise TypeError("Expected argument 'key2' to be a str")
pulumi.set(__self__, "key2", key2)
@property
@pulumi.getter
def key1(self) -> Optional[str]:
"""
Shared access key1 for the domain.
"""
return pulumi.get(self, "key1")
@property
@pulumi.getter
def key2(self) -> Optional[str]:
"""
Shared access key2 for the domain.
"""
return pulumi.get(self, "key2")
class AwaitableListDomainSharedAccessKeysResult(ListDomainSharedAccessKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDomainSharedAccessKeysResult(
key1=self.key1,
key2=self.key2)
def list_domain_shared_access_keys(domain_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDomainSharedAccessKeysResult:
"""
Use this data source to access information about an existing resource.
:param str domain_name: Name of the domain.
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
__args__ = dict()
__args__['domainName'] = domain_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid/v20200601:listDomainSharedAccessKeys', __args__, opts=opts, typ=ListDomainSharedAccessKeysResult).value
return AwaitableListDomainSharedAccessKeysResult(
key1=__ret__.key1,
key2=__ret__.key2)
| [
"[email protected]"
] | |
782df01ee7388692ea2870c9a5f8b636234f32e9 | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/validators/isosurface/colorbar/title/font/_color.py | 3d45a80274dd4a6a4d2f5e5279d0ca5accc9ccbe | [
"MIT"
] | permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 497 | py | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='isosurface.colorbar.title.font',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| [
"[email protected]"
] | |
1e877888ec765a400293dfc038262acb74aba999 | 3baad9ca9756a8dbe6463df6e7f535aa2e0bffa3 | /{{ cookiecutter.site_name }}/{{ cookiecutter.main_module }}.py | 31d3b2b7913d472088c2dc695f0841b9d91b3e82 | [
"MIT"
] | permissive | brettcannon/python-azure-web-app-cookiecutter | 7fcaece747e7cef6d584c236aad4b842b63fa2f0 | e7a3fbc3a724b7bbde43eb5904881d2e0cc07c42 | refs/heads/master | 2023-07-12T07:10:22.594048 | 2017-02-27T20:00:29 | 2017-02-27T20:00:29 | 63,901,465 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | {% if cookiecutter.site_type == "socket" %}
"""An example HTTP server using sockets on Azure Web Apps."""
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import os
import sys
class PythonVersionHandler(BaseHTTPRequestHandler):
def do_GET(self):
charset = "utf-8"
self.send_response(200)
self.send_header("Content-type", "text/plain; charset={}".format(charset))
self.send_header("Content-Length", len(sys.version))
self.end_headers()
self.wfile.write(sys.version.encode(charset))
if __name__ == "__main__":
server_address = "127.0.0.1", int(os.environ.get("PORT", 5555))
server = HTTPServer(server_address, PythonVersionHandler)
server.serve_forever()
{% else %}
"""An example WSGI server on Azure Web Apps."""
import sys
def wsgi_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
yield sys.version.encode()
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('localhost', 5555, wsgi_app)
httpd.serve_forever()
{% endif %}
| [
"[email protected]"
] | |
8cd013a5cfbea88a36682c33babb0f3b7dae5129 | b0c39c21ea63904d3e3c610a06c1e11b0a0c80d9 | /setup.py | 3998246ca1e02d4c827786524e5a89b7b902ab42 | [
"Apache-2.0"
] | permissive | kevenli/FeedIn | d9893d6f7c29d818460da875d5abcb5b9f25b958 | 9b45ba9090d279834ac59887a24154e6ac7f4593 | refs/heads/master | 2021-01-23T00:48:30.404336 | 2015-05-26T06:33:05 | 2015-05-26T06:33:05 | 27,056,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,450 | py | from distutils.core import setup
from setuptools import find_packages
setup(name='FeedIn',
version='0.1',
author='Keven Li',
author_email='[email protected]',
url='https://github.com/kevenli/FeedIn',
download_url='https://github.com/kevenli/FeedIn',
description='Web data fetching engine.',
long_description='A web data fetching engine which can be used in \
easy configuration and has multiple build-in modules.',
packages=find_packages(exclude=('tests', 'tests.*')),
provides=['feedin'],
keywords='web data python fetching',
license='Apache License, Version 2.0',
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'lxml',
'BeautifulSoup',
],
) | [
"[email protected]"
] | |
a89d0a7db49b9c97787f5713a000415bb2870f84 | a97db7d2f2e6de010db9bb70e4f85b76637ccfe6 | /leetcode/743-Network-Delay-Time.py | 89a0689140f2f23f05b225a027399d92382c2f3c | [] | no_license | dongxiaohe/Algorithm-DataStructure | 34547ea0d474464676ffffadda26a92c50bff29f | a9881ac5b35642760ae78233973b1608686730d0 | refs/heads/master | 2020-05-24T20:53:45.689748 | 2019-07-19T03:46:35 | 2019-07-19T03:46:35 | 187,463,938 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | class Solution(object):
def networkDelayTime(self, times, N, K):
routes, seen, minHeap = collections.defaultdict(list), {}, []
for u, v, w in times:
routes[u].append([v, w])
heapq.heappush(minHeap, [0, K])
while minHeap:
time, tmpNode = heapq.heappop(minHeap)
if tmpNode not in seen:
seen[tmpNode] = time
for v, w in routes[tmpNode]:
heapq.heappush(minHeap, [time + w, v])
return max(seen.values()) if N == len(seen) else -1
| [
"[email protected]"
] | |
37c24b3960134c61b5a8710012b9ad3ebf8a62fe | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Python/Scripts/Auto py to exe/build/lib/auto_py_to_exe/dialogs.py | 08ced7a66201b6e9c57607cc3cabb9a7329be462 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:17632a1084b74f79b082631a021c864a01bee63a94b1fb5768945e30f05a405b
size 2899
| [
"[email protected]"
] | |
7461b94a60fcbe15ed116a2853262476e06aaafd | c06d18ac5b87b3b82fc486454c422b119d6c1ee9 | /src/demo/_tensorflow/linear/linear.py | 70f197e8d2ad5074603c813b803127c0355fe803 | [
"MIT"
] | permissive | tangermi/nlp | b3a4c9612e6049463bf12bc9abb7aff06a084ace | aa36b8b20e8c91807be73a252ff7799789514302 | refs/heads/master | 2022-12-09T12:33:15.009413 | 2020-04-03T04:03:24 | 2020-04-03T04:03:24 | 252,056,010 | 0 | 0 | null | 2022-12-08T07:26:55 | 2020-04-01T02:55:05 | Jupyter Notebook | UTF-8 | Python | false | false | 1,092 | py | # -*- coding: utf-8 -*-
import tensorflow as tf
class Linear(tf.keras.Model):
def __init__(self):
super().__init__()
self.dense = tf.keras.layers.Dense(
units=1,
activation=None,
kernel_initializer=tf.zeros_initializer(),
bias_initializer=tf.zeros_initializer()
)
def call(self, input):
output = self.dense(input)
return output
if __name__ == '__main__':
X = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
model = Linear()
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
for i in range(100):
with tf.GradientTape() as tape:
y_pred = model(X) # 调用模型 y_pred = model(X) 而不是显式写出 y_pred = a * X + b
loss = tf.reduce_mean(tf.square(y_pred - y))
grads = tape.gradient(loss, model.variables) # 使用 model.variables 这一属性直接获得模型中的所有变量
optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))
print(model.variables)
| [
"[email protected]"
] | |
f68f3963d1b07205e946987a8cdae6983f09b17b | b32fa26f60e71311a51055122a21fc908d4e9566 | /0x04-python-more_data_structures/3-common_elements.py | 4d0ebf389400a9e00ea67c424e45d465d8bc12a8 | [] | no_license | felipeserna/holbertonschool-higher_level_programming | 3ac4fdc91bf70477285994a1d41a72cd6987a277 | 9529bcdd50834569e25f1e0407922b3703807d45 | refs/heads/master | 2023-06-30T04:34:49.806549 | 2021-08-04T02:42:35 | 2021-08-04T02:42:35 | 259,475,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | #!/usr/bin/python3
def common_elements(set_1, set_2):
common = set_1 & set_2
return(common)
| [
"[email protected]"
] | |
237b5db6e779a7de6c8b385bcac3bf982604e07e | 931aa9c6a44f86e86440c17de62801b26b66fce8 | /constance/LV/getLineUnbalanceAndLosses.py | f92c4871027b8e1d87960321b14354a1e8ea4bb7 | [] | no_license | Constancellc/epg-psopt | 3f1b4a9f9dcaabacf0c7d2a5dbc10947ac0e0510 | 59bdc7951bbbc850e63e813ee635474012a873a4 | refs/heads/master | 2021-06-08T11:33:57.467689 | 2020-04-01T13:19:18 | 2020-04-01T13:19:18 | 96,895,185 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,785 | py | import csv
import random
import copy
import numpy as np
import matplotlib.pyplot as plt
from lv_optimization_new import LVTestFeeder
import pickle
#outfile = '../../../Documents/simulation_results/LV/voltages.csv'
stem = '../../../Documents/ccModels/eulv/'
alpha = 0.328684513701
g = open(stem+'lnsYprims.pkl','rb')
data = pickle.load(g)
g.close()
# first get phases
lds = np.load('../../../Documents/ccModels/loadBuses/eulvLptloadBusesCc-24.npy')
lds = lds.flatten()[0]
phase = []
for i in range(len(lds)):
bus = lds['load'+str(i+1)]
if bus[-1] == '1':
phase.append('A')
elif bus[-1] == '2':
phase.append('B')
elif bus[-1] == '3':
phase.append('C')
# data is a dictionary where the key is the line number and it points to
# [bus a, bus b, Yprim]
# so we need to build up a dictionary of the voltages
a = np.load(stem+'eulvLptaCc060.npy')
My = np.load(stem+'eulvLptMyCc060.npy')
v0 = np.load(stem+'eulvLptV0Cc060.npy')
Y = np.load(stem+'eulvLptYbusCc060.npy')
Y = Y.flatten()[0]
Y = Y.conj()
YNodeOrder = np.load(stem+'eulvNmtYNodeOrderCc060.npy')
buses = []
for node in YNodeOrder:
buses = buses+[node.split('.')[0]]
def get_losses(Vtot):
losses = {}
for line in data:
data0 = data[line]
bus1 = data0[0]
bus2 = data0[1]
Yprim = data0[2]
idx1 = [i for i, x in enumerate(buses) if x == bus1]
idx2 = [i for i, x in enumerate(buses) if x == bus2]
Vidx = Vtot[idx1+idx2]
Iphs = Yprim.dot(Vidx)
Sinj = Vidx*(Iphs.conj())
Sloss = sum(Sinj)
losses[line] = [bus1,bus2,Sloss.real]
return losses
def get_unbalance(Vtot):
unbalance = {}
a = complex(-0.5,0.866)
A = np.array([[complex(1,0),complex(1,0),complex(1,0)],
[complex(1,0),a,a*a],
[complex(1,0),a*a,a]])
A = A*0.333
for line in data:
data0 = data[line]
bus1 = data0[0]
bus2 = data0[1]
Yprim = data0[2]
idx1 = [i for i, x in enumerate(buses) if x == bus1]
idx2 = [i for i, x in enumerate(buses) if x == bus2]
Vidx = Vtot[idx1+idx2]
Iphs = Yprim.dot(Vidx)
Is = np.matmul(A,Iphs[:3])
unbalance[line] = [bus1,bus2,abs(Is[0]),abs(Is[1]),abs(Is[2])]
return unbalance
fdr = LVTestFeeder('manc_models/1',1)
fdr.set_households_NR('../../../Documents/netrev/TC2a/03-Dec-2013.csv')
fdr.set_evs_MEA('../../../Documents/My_Electric_Avenue_Technical_Data/'+
'constance/ST1charges/')
voltages = fdr.get_all_voltages(My,a,alpha,v0)
losses_no_evs = {}
ub_no_evs = {}
print(fdr.predict_losses())
for t in voltages:
ls = get_losses(voltages[t])
ub = get_unbalance(voltages[t])
for l in ls:
if l not in losses_no_evs:
losses_no_evs[l] = 0
ub_no_evs[l] = [0]*3
losses_no_evs[l] += ls[l][2]
for i in range(3):
ub_no_evs[l][i] += ub[l][2+i]
fdr.uncontrolled()
voltages = fdr.get_all_voltages(My,a,alpha,v0)
losses_unc = {}
ub_unc = {}
print(fdr.predict_losses())
for t in voltages:
ls = get_unbalance(voltages[t])
ub = get_unbalance(voltages[t])
for l in ls:
if l not in losses_unc:
losses_unc[l] = 0
ub_unc[l] = [0]*3
losses_unc[l] += ls[l][2]
for i in range(3):
ub_unc[l][i] += ub[l][2+i]
fdr.load_flatten()
voltages = fdr.get_all_voltages(My,a,alpha,v0)
losses_lf = {}
ub_lf = {}
print(fdr.predict_losses())
for t in voltages:
ls = get_unbalance(voltages[t])
ub = get_unbalance(voltages[t])
for l in ls:
if l not in losses_lf:
losses_lf[l] = 0
ub_lf[l] = [0]*3
losses_lf[l] += ls[l][2]
for i in range(3):
ub_lf[l][i] += ub[l][2+i]
fdr.loss_minimise()
voltages = fdr.get_all_voltages(My,a,alpha,v0)
losses_lm = {}
ub_lm = {}
print(fdr.predict_losses())
for t in voltages:
ls = get_unbalance(voltages[t])
ub = get_unbalance(voltages[t])
for l in ls:
if l not in losses_lm:
losses_lm[l] = 0
ub_lm[l] = [0]*3
losses_lm[l] += ls[l][2]
for i in range(3):
ub_lm[l][i] += ub[l][2+i]
fdr.balance_phase2(phase)
voltages = fdr.get_all_voltages(My,a,alpha,v0)
losses_p = {}
ub_p = {}
print(fdr.predict_losses())
for t in voltages:
ls = get_unbalance(voltages[t])
ub = get_unbalance(voltages[t])
for l in ls:
if l not in losses_p:
losses_p[l] = 0
ub_p[l] = [0]*3
losses_p[l] += ls[l][2]
for i in range(3):
ub_p[l][i] += ub[l][2+i]
for i in range(3):
with open('lv test/branch_'+str(i)+'.csv','w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['line','no evs','unc','lf','lm','p'])
for l in losses_unc:
writer.writerow([l,ub_no_evs[l][i],ub_unc[l][i],ub_lf[l][i],
ub_lm[l][i],ub_p[l][i]])
with open('lv test/branch_losses.csv','w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['line','no evs','unc','lf','lm','p'])
for l in losses_unc:
writer.writerow([l,losses_no_evs[l],losses_unc[l],losses_lf[l],
losses_lm[l],losses_p[l]])
'''
busV = {}
for i in range(907):
busV[i+1] = [complex(0,0)]*3
for i in range(3):
busV[1][i] = v0[i]
for i in range(len(voltages)):
bn = int(i/3)+2
pn = i%3
busV[bn][pn] = voltages[i]
lineI = {}
for l in data:
b1 = data[l][0]
b2 = data[l][1]
Yp = data[l][2]
v_ = np.hstack((busV[int(b1)],busV[int(b2)]))
i = np.matmul(Yp,v_)[:3]
iT = 0
for ii in range(3):
iT += abs(i[ii]/1000)
lineI[l] = iT
with open('lv test/no_evs.csv','w') as csvfile:
writer = csv.writer(csvfile)
for l in lineI:
writer.writerow([l,lineI[l]])
busV = {}
for i in range(907):
busV[i+1] = [complex(0,0)]*3
for i in range(3):
busV[1][i] = v0[i]
for i in range(len(voltages)):
bn = int(i/3)+2
pn = i%3
busV[bn][pn] = voltages[i]
lineI = {}
for l in data:
b1 = data[l][0]
b2 = data[l][1]
Yp = data[l][2]
v_ = np.hstack((busV[int(b1)],busV[int(b2)]))
i = np.matmul(Yp,v_)[:3]
iT = 0
for ii in range(3):
iT += abs(i[ii]/1000)
lineI[l] = iT
with open('lv test/uncontrolled.csv','w') as csvfile:
writer = csv.writer(csvfile)
for l in lineI:
writer.writerow([l,lineI[l]])
busV = {}
for i in range(907):
busV[i+1] = [complex(0,0)]*3
for i in range(3):
busV[1][i] = v0[i]
for i in range(len(voltages)):
bn = int(i/3)+2
pn = i%3
busV[bn][pn] = voltages[i]
lineI = {}
for l in data:
b1 = data[l][0]
b2 = data[l][1]
Yp = data[l][2]
v_ = np.hstack((busV[int(b1)],busV[int(b2)]))
i = np.matmul(Yp,v_)[:3]
iT = 0
for ii in range(3):
iT += abs(i[ii]/1000)
lineI[l] = iT
with open('lv test/lf.csv','w') as csvfile:
writer = csv.writer(csvfile)
for l in lineI:
writer.writerow([l,lineI[l]])
busV = {}
for i in range(907):
busV[i+1] = [complex(0,0)]*3
for i in range(3):
busV[1][i] = v0[i]
for i in range(len(voltages)):
bn = int(i/3)+2
pn = i%3
busV[bn][pn] = voltages[i]
lineI = {}
for l in data:
b1 = data[l][0]
b2 = data[l][1]
Yp = data[l][2]
v_ = np.hstack((busV[int(b1)],busV[int(b2)]))
i = np.matmul(Yp,v_)[:3]
iT = 0
for ii in range(3):
iT += abs(i[ii]/1000)
lineI[l] = iT
with open('lv test/lm.csv','w') as csvfile:
writer = csv.writer(csvfile)
for l in lineI:
writer.writerow([l,lineI[l]])
# now I need to work out the line flows from the current injections
'''
| [
"[email protected]"
] | |
9c541ff8948b8d049f61e4e3e61cfa30a9bb0056 | 33170e7fc26b6af2ab61b67aa520c307bbd0e118 | /py/trash/947_predict_0228-4.py | 09ef21e955ea5f5f8ebc8ba007660cc1fa85d498 | [
"MIT"
] | permissive | alaskaw/Microsoft-Malware-Prediction | 26e56adb803184328d1a8f5a3423d5edda7fc400 | 103cbf7c4fc98ae584e1aa9d1c220bb79ddbbd80 | refs/heads/master | 2020-04-28T21:22:06.403542 | 2019-03-14T04:36:01 | 2019-03-14T04:36:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,407 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 16:52:33 2019
@author: kazuki.onodera
"""
import numpy as np
import pandas as pd
import os, gc
from glob import glob
from tqdm import tqdm
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from sklearn.externals import joblib
from sklearn.metrics import roc_auc_score
import utils , utils_cat
utils.start(__file__)
#==============================================================================
SUBMIT_FILE_PATH = '../output/0228-4.csv.gz'
COMMENT = 'nejumi + f009 f014 top50(f019)'
EXE_SUBMIT = True
SEED = np.random.randint(9999)
print('SEED:', SEED)
param = {
'boosting_type': 'gbdt',
'class_weight': None,
'colsample_bytree': 0.71,
'learning_rate': 0.05,
'max_depth': -1,
'min_child_samples': 10,
'min_child_weight': 5,
'min_split_gain': 0,
# 'n_estimators': n_estimators,
'n_jobs': -1,
'num_leaves': 64,
'objective': 'binary',
# 'random_state': seed,
'reg_alpha': 0,
'reg_lambda': 0,
'subsample': 0.71,
'subsample_for_bin': 50000,
'subsample_freq': 1,
'max_bin': 255,
'metric': 'auc',
'nthread': -1,
'verbose': -1,
# 'seed': seed,
# 'device': 'gpu',
# 'gpu_use_dp': False
}
NROUND = 19999
NFOLD = 5
VERBOSE_EVAL = 100
ESR = 100
col_drop = [
'Census_SystemVolumeTotalCapacity',
]
USE_PREF_f019 = ['f019']
feature_f019 = pd.read_csv('LOG/imp_f019.csv').head(50).feature.tolist()
USE_PREF_all = ['f009', 'f014']
RESULT_DICT = {}
RESULT_DICT['file'] = SUBMIT_FILE_PATH
# =============================================================================
# def
# =============================================================================
def get_files(search:str, prefs:list):
files = sorted(glob(search))
# USE_PREF
li = []
for i in files:
for j in prefs:
if j in i:
li.append(i)
break
files = li
[print(i,f) for i,f in enumerate(files)]
return files
# =============================================================================
# load
# =============================================================================
files_tr_f019 = get_files('../data/train_f*.f', USE_PREF_f019)
X_train_f019 = pd.concat([
pd.read_feather(f) for f in tqdm(files_tr_f019, mininterval=30)
], axis=1)[feature_f019]
files_tr_all = get_files('../data/train_f*.f', USE_PREF_all)
X_train_all = pd.concat([
pd.read_feather(f) for f in tqdm(files_tr_all, mininterval=30)
], axis=1)
X_train = pd.concat([X_train_f019, X_train_all, joblib.load('../external/X_train_nejumi.pkl.gz')],
axis=1)
del X_train_f019, X_train_all; gc.collect()
y_train = utils.load_target()['HasDetections']
# drop
if len(col_drop) > 0:
X_train.drop(col_drop, axis=1, inplace=True)
if X_train.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }')
print('no dup :) ')
print(f'X_train.shape {X_train.shape}')
gc.collect()
CAT = list( set(X_train.columns)&set(utils_cat.ALL))
print(f'CAT: {CAT}')
COL = X_train.columns.tolist()
RESULT_DICT['feature size'] = len(COL)
RESULT_DICT['category feature size'] = len(CAT)
# =============================================================================
# all sample
# =============================================================================
dtrain = lgb.Dataset(X_train, y_train.values,
categorical_feature=CAT,
free_raw_data=False)
gc.collect()
#models = []
#for i in range(LOOP):
# param['seed'] = np.random.randint(9999)
# model = lgb.train(params=param, train_set=dtrain,
# num_boost_round=NROUND,
# )
# model.save_model(f'../data/lgb{i}.model')
# models.append(model)
# CV
param['seed'] = np.random.randint(9999)
ret, models = lgb.cv(param, dtrain, NROUND,
nfold=NFOLD,
stratified=True, shuffle=True,
feval=ex.eval_auc,
early_stopping_rounds=ESR,
verbose_eval=VERBOSE_EVAL,
categorical_feature=CAT,
seed=SEED)
for i, model in enumerate(models):
model.save_model(f'../data/lgb{i}.model')
#models = []
#for i in range(LOOP):
# model = lgb.Booster(model_file=f'../data/lgb{i}.model')
# models.append(model)
imp = ex.getImp(models)
imp['split'] /= imp['split'].max()
imp['gain'] /= imp['gain'].max()
imp['total'] = imp['split'] + imp['gain']
imp.sort_values('total', ascending=False, inplace=True)
imp.reset_index(drop=True, inplace=True)
imp.to_csv(f'LOG/imp_{__file__}.csv', index=False)
utils.savefig_imp(imp, f'LOG/imp_{__file__}.png', x='total')
RESULT_DICT['nfold'] = NFOLD
RESULT_DICT['seed'] = SEED
RESULT_DICT['eta'] = param['learning_rate']
RESULT_DICT['NROUND'] = NROUND
RESULT_DICT['train AUC'] = ret['auc-mean'][-1]
del dtrain, X_train, y_train; gc.collect()
# =============================================================================
# test
# =============================================================================
files_te = get_files('../data/test_f*.f', USE_PREF_f019+USE_PREF_all)
X_test = pd.concat([
pd.read_feather(f) for f in tqdm(files_te, mininterval=30)
]+[joblib.load('../external/X_test_nejumi.pkl.gz')], axis=1)[COL]
gc.collect()
if X_test.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X_test.columns[X_test.columns.duplicated()] }')
print('no dup :) ')
print(f'X_test.shape {X_test.shape}')
y_pred = pd.Series(0, index=X_test.index)
for model in tqdm(models):
y_pred += pd.Series(model.predict(X_test)).rank()
y_pred /= y_pred.max()
sub = pd.read_csv('../input/sample_submission.csv.zip')
sub['HasDetections'] = y_pred.values
print('corr with best')
sub_best = pd.read_csv(utils.SUB_BEST)
print('with mybest:', sub['HasDetections'].corr( sub_best['HasDetections'], method='spearman') )
sub_best['HasDetections'] = np.load(utils.SUB_nejumi)
print('with nejumi:', sub['HasDetections'].corr( sub_best['HasDetections'], method='spearman') )
print("""
# =============================================================================
# write down these info to benchmark.xlsx
# =============================================================================
""")
[print(f'{k:<25}: {RESULT_DICT[k]}') for k in RESULT_DICT]
print("""
# =============================================================================
""")
# save
sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip')
#utils.to_pkl_gzip(sub[['HasDetections']], SUBMIT_FILE_PATH.replace('.csv.gz', f'_{SEED}.pkl'))
# =============================================================================
# submission
# =============================================================================
if EXE_SUBMIT:
print('submit')
utils.submit(SUBMIT_FILE_PATH, COMMENT)
#==============================================================================
utils.end(__file__)
#utils.stop_instance()
| [
"[email protected]"
] | |
ef3126368dbc5fb7408a2d35f7fc575b6e8fb814 | 5aee5e9274aad752f4fc1940030e9844ef8be17d | /HeavyIonsAnalysis/JetAnalysis/python/jets/akPu7CaloJetSequence_pPb_jec_cff.py | d5e8f0b11759a74be3f22036f437b49b4dd08852 | [] | no_license | jiansunpurdue/5316_dmesonreco_hiforest | 1fb65af11ea673646efe1b25bd49e88de9bf3b44 | a02224ad63160d91aab00ed2f92d60a52f0fd348 | refs/heads/master | 2021-01-22T02:53:43.471273 | 2014-04-26T16:10:12 | 2014-04-26T16:10:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,574 | py |
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import *
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
akPu7Calomatch = patJetGenJetMatch.clone(
src = cms.InputTag("akPu7CaloJets"),
matched = cms.InputTag("ak7HiGenJetsCleaned")
)
akPu7Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akPu7CaloJets"),
matched = cms.InputTag("genParticles")
)
akPu7Calocorr = patJetCorrFactors.clone(
useNPV = False,
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akPu7CaloJets"),
payload = "AKPu7Calo_HI"
)
akPu7CalopatJets = patJets.clone(jetSource = cms.InputTag("akPu7CaloJets"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPu7Calocorr")),
genJetMatch = cms.InputTag("akPu7Calomatch"),
genPartonMatch = cms.InputTag("akPu7Caloparton"),
jetIDMap = cms.InputTag("akPu7CaloJetID"),
addBTagInfo = False,
addTagInfos = False,
addDiscriminators = False,
addAssociatedTracks = False,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
embedCaloTowers = False,
embedPFCandidates = False
)
akPu7CaloJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akPu7CalopatJets"),
genjetTag = 'ak7HiGenJetsCleaned',
rParam = 0.7,
matchJets = cms.untracked.bool(False),
matchTag = 'akPu7PFpatJets',
pfCandidateLabel = cms.untracked.InputTag('particleFlowTmp'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator")
)
akPu7CaloJetSequence_mc = cms.Sequence(
akPu7Calomatch
*
akPu7Caloparton
*
akPu7Calocorr
*
akPu7CalopatJets
*
akPu7CaloJetAnalyzer
)
akPu7CaloJetSequence_data = cms.Sequence(akPu7Calocorr
*
akPu7CalopatJets
*
akPu7CaloJetAnalyzer
)
akPu7CaloJetSequence_jec = akPu7CaloJetSequence_mc
akPu7CaloJetSequence_mix = akPu7CaloJetSequence_mc
akPu7CaloJetSequence = cms.Sequence(akPu7CaloJetSequence_jec)
akPu7CaloJetAnalyzer.genPtMin = cms.untracked.double(1)
| [
"[email protected]"
] | |
bc4dde6205e2dc08c3f1b2c7b8d97523b58c76b8 | 8b00e2b136636841b38eb182196e56f4721a1e4c | /trio/_core/_exceptions.py | 45f21d389ae8d6f15662d6ff796adfea373bad80 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | xyicheng/trio | 77c8c1e08e3aa4effe8cf04e879720ccfcdb7d33 | fa091e2e91d196c2a57b122589a166949ea03103 | refs/heads/master | 2021-01-23T00:05:59.618483 | 2017-03-16T04:25:05 | 2017-03-16T04:25:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | import attr
# Re-exported
__all__ = [
"TrioInternalError", "RunFinishedError", "WouldBlock",
"Cancelled", "PartialResult",
]
class TrioInternalError(Exception):
"""Raised by :func:`run` if we encounter a bug in trio, or (possibly) a
misuse of one of the low-level :mod:`trio.hazmat` APIs.
This should never happen! If you get this error, please file a bug.
Unfortunately, if you get this error it also means that all bets are off –
trio doesn't know what is going on and its normal invariants may be void.
(For example, we might have "lost track" of a task. Or lost track of all
tasks.) Again, though, this shouldn't happen.
"""
pass
TrioInternalError.__module__ = "trio"
class RunFinishedError(RuntimeError):
"""Raised by ``run_in_trio_thread`` and similar functions if the
corresponding call to :func:`trio.run` has already finished.
"""
pass
RunFinishedError.__module__ = "trio"
class WouldBlock(Exception):
"""Raised by ``X_nowait`` functions if ``X`` would block.
"""
pass
WouldBlock.__module__ = "trio"
class Cancelled(BaseException):
"""Raised by blocking calls if the surrounding scope has been cancelled.
You should let this exception propagate, to be caught by the relevant
cancel scope. To remind you of this, it inherits from
:exc:`BaseException`, like :exc:`KeyboardInterrupt` and
:exc:`SystemExit`.
.. note::
In the US it's also common to see this word spelled "canceled", with
only one "l". This is a `recent
<https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=5&smoothing=3&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__
and `US-specific
<https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=18&smoothing=3&share=&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__
innovation, and even in the US both forms are still commonly used. So
for consistency with the rest of the world and with "cancellation"
(which always has two "l"s), trio uses the two "l" spelling
everywhere.
"""
_scope = None
Cancelled.__module__ = "trio"
@attr.s(slots=True, frozen=True)
class PartialResult:
# XX
bytes_sent = attr.ib()
| [
"[email protected]"
] | |
f9c568a46854f97c14938d17f5845aa1f9cf72f9 | 915ea8bcabf4da0833d241050ef226100f7bd233 | /SDKs/Python/test/test_contract_item.py | d3f8d89ca8fd4f3b3678876eb22038d67bad2eb9 | [
"BSD-2-Clause"
] | permissive | parserrr/API-Examples | 03c3855e2aea8588330ba6a42d48a71eb4599616 | 0af039afc104316f1722ee2ec6d2881abd3fbc07 | refs/heads/master | 2020-07-10T22:17:24.906233 | 2019-08-26T03:06:21 | 2019-08-26T03:06:21 | 204,382,917 | 0 | 0 | null | 2019-08-26T02:48:16 | 2019-08-26T02:48:15 | null | UTF-8 | Python | false | false | 922 | py | # coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.contract_item import ContractItem # noqa: E501
from swagger_client.rest import ApiException
class TestContractItem(unittest.TestCase):
"""ContractItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testContractItem(self):
"""Test ContractItem"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.contract_item.ContractItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
537ecd9ff7dea52514e94a67ec8488f4a88abd28 | 10f1f4ce92c83d34de1531e8e891f2a074b3fefd | /graph/gcn_utils/feeder.py | 9b012bf3355a26228cac9c53bbd94c997bfe56d8 | [
"MIT"
] | permissive | sourabhyadav/test_track | d88c4d35753d2b21e3881fc10233bf7bbb1e2cec | d2b4813aaf45dd35db5de3036eda114ef14d5022 | refs/heads/master | 2021-01-06T12:38:56.883549 | 2020-02-05T07:08:46 | 2020-02-05T07:08:46 | 241,328,706 | 1 | 0 | MIT | 2020-02-18T10:06:14 | 2020-02-18T10:06:13 | null | UTF-8 | Python | false | false | 2,751 | py | '''
Author: Guanghan Ning
E-mail: [email protected]
October 24th, 2018
Feeder of Siamese Graph Convolutional Networks for Pose Tracking
Code partially borrowed from:
https://github.com/yysijie/st-gcn/blob/master/feeder/feeder.py
'''
# sys
import os
import sys
import numpy as np
import random
import pickle
import json
# torch
import torch
import torch.nn as nn
from torchvision import datasets, transforms
# operation
from . import tools
import random
class Feeder(torch.utils.data.Dataset):
""" Feeder of PoseTrack Dataset
Arguments:
data_path: the path to '.npy' data, the shape of data should be (N, C, T, V, M)
num_person_in: The number of people the feeder can observe in the input sequence
num_person_out: The number of people the feeder in the output sequence
debug: If true, only use the first 100 samples
"""
def __init__(self,
data_path,
data_neg_path,
ignore_empty_sample=True,
debug=False):
self.debug = debug
self.data_path = data_path
self.neg_data_path = data_neg_path
self.ignore_empty_sample = ignore_empty_sample
self.load_data()
def load_data(self):
with open(self.data_path, 'rb') as handle:
self.graph_pos_pair_list_all = pickle.load(handle)
with open(self.neg_data_path, 'rb') as handle:
self.graph_neg_pair_list_all = pickle.load(handle)
# output data shape (N, C, T, V, M)
self.N = min(len(self.graph_pos_pair_list_all) , len(self.graph_neg_pair_list_all)) #sample
self.C = 2 #channel
self.T = 1 #frame
self.V = 15 #joint
self.M = 1 #person
def __len__(self):
return self.N
def __iter__(self):
return self
def __getitem__(self, index):
# randomly add negative samples
random_num = random.uniform(0, 1)
if random_num > 0.5:
#if False:
# output shape (C, T, V, M)
# get data
sample_graph_pair = self.graph_pos_pair_list_all[index]
label = 1 # a pair should match
else:
sample_graph_pair = self.graph_neg_pair_list_all[index]
label = 0 # a pair does not match
data_numpy_pair = []
for siamese_id in range(2):
# fill data_numpy
data_numpy = np.zeros((self.C, self.T, self.V, 1))
pose = sample_graph_pair[:][siamese_id]
data_numpy[0, 0, :, 0] = [x[0] for x in pose]
data_numpy[1, 0, :, 0] = [x[1] for x in pose]
data_numpy_pair.append(data_numpy)
return data_numpy_pair[0], data_numpy_pair[1], label
| [
"[email protected]"
] | |
05a2d22595769aabb8ba1288219cbc5896aff69b | 837fcd0d7e40de15f52c73054709bd40264273d2 | /practices_loop-master/sum_user_quit.py | 7d4bd070a2e7a364a41b6719421b8247f5090e2f | [] | no_license | NEHAISRANI/Python_Programs | dee9e05ac174a4fd4dd3ae5e96079e10205e18f9 | aa108a56a0b357ca43129e59377ac35609919667 | refs/heads/master | 2020-11-25T07:20:00.484973 | 2020-03-08T12:17:39 | 2020-03-08T12:17:39 | 228,554,399 | 0 | 1 | null | 2020-10-01T06:41:20 | 2019-12-17T07:04:31 | Python | UTF-8 | Python | false | false | 333 | py | #In this program if user input 4 then sum all numbers from starting to ending. if user input quit then program exit"
user=raw_input("enter your number")
index=1
var1=0
while index<=user:
if user=="quit":
break
user=int(user)
if index<=user:
var1=var1+index
index=index+1
if var1!=0:
print var1
| [
"[email protected]"
] | |
ae4c1c1b0df6cf9a31d0f6d154fe645dd8e7fe8e | fd5c2d6e8a334977cda58d4513eb3385b431a13a | /extract_census_doc.py | a1445f608f735d677f398b8b2b123c44cf91d16e | [
"MIT"
] | permissive | censusreporter/census-api | 817c616b06f6b1c70c7b3737f82f45a80544c44d | c8d2c04c7be19cdee1000001772adda541710a80 | refs/heads/master | 2023-07-28T06:17:26.572796 | 2023-07-05T20:37:03 | 2023-07-05T20:37:03 | 9,879,953 | 146 | 52 | MIT | 2022-07-11T07:16:19 | 2013-05-06T05:24:57 | Python | UTF-8 | Python | false | false | 7,414 | py | #!/bin/python
import psycopg2
import psycopg2.extras
import json
from collections import OrderedDict
conn = psycopg2.connect(database='postgres')
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
state = 'IL'
logrecno = '89' # Evanston city, IL
def sum(data, *columns):
def reduce_fn(x, y):
if x and y:
return x + y
elif x and not y:
return x
elif y and not x:
return y
else:
return None
return reduce(reduce_fn, map(lambda col: data[col], columns))
def maybe_int(i):
return int(i) if i else i
doc = dict(population=dict(), geography=dict(), education=dict())
cur.execute("SELECT * FROM acs2010_1yr.geoheader WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['geography'] = dict(name=data['name'],
stusab=data['stusab'],
sumlevel=data['sumlevel'])
cur.execute("SELECT * FROM acs2010_1yr.B01002 WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['population']['median_age'] = dict(total=maybe_int(data['b010020001']),
male=maybe_int(data['b010020002']),
female=maybe_int(data['b010020003']))
cur.execute("SELECT * FROM acs2010_1yr.B01003 WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['population']['total'] = maybe_int(data['b010030001'])
cur.execute("SELECT * FROM acs2010_1yr.B01001 WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['population']['gender'] = OrderedDict([
('0-9', dict(male=maybe_int(sum(data, 'b010010003', 'b010010004')),
female=maybe_int(sum(data, 'b010010027', 'b010010028')))),
('10-19', dict(male=maybe_int(sum(data, 'b010010005', 'b010010006', 'b010010007')),
female=maybe_int(sum(data, 'b010010029', 'b010010030', 'b010010031')))),
('20-29', dict(male=maybe_int(sum(data, 'b010010008', 'b010010009', 'b010010010', 'b010010011')),
female=maybe_int(sum(data, 'b010010032', 'b010010033', 'b010010034', 'b010010035')))),
('30-39', dict(male=maybe_int(sum(data, 'b010010012', 'b010010013')),
female=maybe_int(sum(data, 'b010010036', 'b010010037')))),
('40-49', dict(male=maybe_int(sum(data, 'b010010014', 'b010010015')),
female=maybe_int(sum(data, 'b010010038', 'b010010039')))),
('50-59', dict(male=maybe_int(sum(data, 'b010010016', 'b010010017')),
female=maybe_int(sum(data, 'b010010040', 'b010010041')))),
('60-69', dict(male=maybe_int(sum(data, 'b010010018', 'b010010019', 'b010010020', 'b010010021')),
female=maybe_int(sum(data, 'b010010042', 'b010010043', 'b010010044', 'b010010045')))),
('70-79', dict(male=maybe_int(sum(data, 'b010010022', 'b010010023')),
female=maybe_int(sum(data, 'b010010046', 'b010010047')))),
('80+', dict(male=maybe_int(sum(data, 'b010010024', 'b010010025')),
female=maybe_int(sum(data, 'b010010048', 'b010010049'))))
])
cur.execute("SELECT * FROM acs2010_1yr.B15001 WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['education']['attainment'] = OrderedDict([
('<9th Grade', maybe_int(sum(data, 'b150010004', 'b150010012', 'b150010020', 'b150010028', 'b150010036', 'b150010045', 'b150010053', 'b150010061', 'b150010069', 'b150010077'))),
('9th-12th Grade (No Diploma)', maybe_int(sum(data, 'b150010005', 'b150010013', 'b150010021', 'b150010029', 'b150010037', 'b150010046', 'b150010054', 'b150010062', 'b150010070', 'b150010078'))),
('High School Grad/GED/Alt', maybe_int(sum(data, 'b150010006', 'b150010014', 'b150010022', 'b150010030', 'b150010038', 'b150010047', 'b150010055', 'b150010063', 'b150010071', 'b150010079'))),
('Some College (No Degree)', maybe_int(sum(data, 'b150010007', 'b150010015', 'b150010023', 'b150010031', 'b150010039', 'b150010048', 'b150010056', 'b150010064', 'b150010072', 'b150010080'))),
('Associate Degree', maybe_int(sum(data, 'b150010008', 'b150010016', 'b150010024', 'b150010032', 'b150010040', 'b150010049', 'b150010057', 'b150010065', 'b150010073', 'b150010081'))),
('Bachelor Degree', maybe_int(sum(data, 'b150010009', 'b150010017', 'b150010025', 'b150010033', 'b150010041', 'b150010050', 'b150010058', 'b150010066', 'b150010074', 'b150010082'))),
('Graduate or Professional Degree', maybe_int(sum(data, 'b150010010', 'b150010018', 'b150010026', 'b150010034', 'b150010042', 'b150010051', 'b150010059', 'b150010067', 'b150010075', 'b150010083')))
])
cur.execute("SELECT * FROM acs2010_1yr.C16001 WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['language'] = OrderedDict([
('English Only', maybe_int(data['c160010002'])),
('Spanish', maybe_int(data['c160010003'])),
('French', maybe_int(data['c160010004'])),
('German', maybe_int(data['c160010005'])),
('Slavic', maybe_int(data['c160010006'])),
('Other Indo-European', maybe_int(data['c160010007'])),
('Korean', maybe_int(data['c160010008'])),
('Chinese', maybe_int(data['c160010009'])),
('Vietnamese', maybe_int(data['c160010010'])),
('Tagalong', maybe_int(data['c160010011'])),
('Other Asian', maybe_int(data['c160010012'])),
('Other & Unspecified', maybe_int(data['c160010013']))
])
cur.execute("SELECT * FROM acs2010_1yr.B27010 WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['insurance'] = OrderedDict([
('No Insurance', maybe_int(sum(data, 'b270100017', 'b270100033', 'b270100050', 'b270100053'))),
('Employer Only', maybe_int(sum(data, 'b270100004', 'b270100020', 'b270100036', 'b270100054'))),
('Direct-Purchase Only', maybe_int(sum(data, 'b270100005', 'b270100021', 'b270100037', 'b270100055'))),
('Medicare Only', maybe_int(sum(data, 'b270100006', 'b270100022', 'b270100038' ))),
('Medicaid/Means-Tested Only', maybe_int(sum(data, 'b270100007', 'b270100023', 'b270100039' ))),
('Tricare/Military Only', maybe_int(sum(data, 'b270100008', 'b270100024', 'b270100040', 'b270100056'))),
('VA Health Care Only', maybe_int(sum(data, 'b270100009', 'b270100025', 'b270100041', 'b270100057'))),
('Employer+Direct Purchase', maybe_int(sum(data, 'b270100011', 'b270100027', 'b270100043', 'b270100058'))),
('Employer+Medicare', maybe_int(sum(data, 'b270100012', 'b270100028', 'b270100044', 'b270100059'))),
('Direct+Medicare', maybe_int(sum(data, 'b270100045', 'b270100060'))),
('Medicare+Medicaid', maybe_int(sum(data, 'b270100013', 'b270100029', 'b270100046', 'b270100061'))),
('Other Private-Only', maybe_int(sum(data, 'b270100014', 'b270100030', 'b270100047', 'b270100062'))),
('Other Public-Only', maybe_int(sum(data, 'b270100015', 'b270100031', 'b270100048', 'b270100064'))),
('Other', maybe_int(sum(data, 'b270100016', 'b270100032', 'b270100049', 'b270100065')))
])
print json.dumps(doc, indent=2)
| [
"[email protected]"
] | |
bc23b9d69210017a402610181ac43e53d89e6aa2 | a5a489f8a268e3d13286fa7ca000d3f26d10d263 | /Basic_Stat/hypothesis_test.py | 812bfd5a97f557194fda9a43b59d377e9ed321c2 | [] | no_license | jusui/Data_Science | cd36e1b9d675be7b5deb98a6034ce57339f09b41 | 7d2ffea15532e35ea64597b3d6f53752a1d4322e | refs/heads/master | 2021-09-24T06:38:55.588645 | 2018-10-04T14:50:08 | 2018-10-04T14:50:08 | 110,791,573 | 0 | 0 | null | 2018-08-04T15:44:22 | 2017-11-15T06:13:28 | Jupyter Notebook | UTF-8 | Python | false | false | 33 | py | # coding:utf-8
import numpy as np | [
"[email protected]"
] | |
e90dcd78bc4629be7d9cf48e3f2d6f93f21ae201 | cdcbe6ea97dd870357998b17f0cdedec0636781d | /extra_apps/xadmin/views/dashboard.py | a6b43c1b335ff64a990a360ffc76b0dbe1ea8264 | [] | no_license | supermanfeng/eduplatform | 8815fad056ac9d1206f219220453f9f7e7382128 | a4288c7af7f4dd980a3f4f2e337899cdf9d15b43 | refs/heads/master | 2022-12-09T23:04:57.239321 | 2018-04-10T11:11:11 | 2018-04-10T11:11:11 | 128,721,691 | 1 | 0 | null | 2022-12-08T00:51:44 | 2018-04-09T05:57:55 | Python | UTF-8 | Python | false | false | 23,644 | py | from django import forms
from django.apps import apps
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse, NoReverseMatch
from django.template.context_processors import csrf
from django.db.models.base import ModelBase
from django.forms.forms import DeclarativeFieldsMetaclass
from django.forms.utils import flatatt
from django.template import loader
from django.http import Http404
from django.test.client import RequestFactory
from django.utils.encoding import force_unicode, smart_unicode
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.http import urlencode, urlquote
from django.views.decorators.cache import never_cache
from xadmin import widgets as exwidgets
from xadmin.layout import FormHelper
from xadmin.models import UserSettings, UserWidget
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.views.base import CommAdminView, ModelAdminView, filter_hook, csrf_protect_m
from xadmin.views.edit import CreateAdminView
from xadmin.views.list import ListAdminView
from xadmin.util import unquote
import copy
class WidgetTypeSelect(forms.Widget):
def __init__(self, widgets, attrs=None):
super(WidgetTypeSelect, self).__init__(attrs)
self._widgets = widgets
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
final_attrs['class'] = 'nav nav-pills nav-stacked'
output = [u'<ul%s>' % flatatt(final_attrs)]
options = self.render_options(force_unicode(value), final_attrs['id'])
if options:
output.append(options)
output.append(u'</ul>')
output.append('<input type="hidden" id="%s_input" name="%s" value="%s"/>' %
(final_attrs['id'], name, force_unicode(value)))
return mark_safe(u'\n'.join(output))
def render_option(self, selected_choice, widget, id):
if widget.widget_type == selected_choice:
selected_html = u' class="active"'
else:
selected_html = ''
return (u'<li%s><a onclick="' +
'javascript:$(this).parent().parent().find(\'>li\').removeClass(\'active\');$(this).parent().addClass(\'active\');' +
'$(\'#%s_input\').attr(\'value\', \'%s\')' % (id, widget.widget_type) +
'"><h4><i class="%s"></i> %s</h4><p>%s</p></a></li>') % (
selected_html,
widget.widget_icon,
widget.widget_title or widget.widget_type,
widget.description)
def render_options(self, selected_choice, id):
# Normalize to strings.
output = []
for widget in self._widgets:
output.append(self.render_option(selected_choice, widget, id))
return u'\n'.join(output)
class UserWidgetAdmin(object):
model_icon = 'fa fa-dashboard'
list_display = ('widget_type', 'page_id', 'user')
list_filter = ['user', 'widget_type', 'page_id']
list_display_links = ('widget_type',)
user_fields = ['user']
hidden_menu = True
wizard_form_list = (
(_(u"Widget Type"), ('page_id', 'widget_type')),
(_(u"Widget Params"), {'callback':
"get_widget_params_form", 'convert': "convert_widget_params"})
)
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'widget_type':
widgets = widget_manager.get_widgets(self.request.GET.get('page_id', ''))
form_widget = WidgetTypeSelect(widgets)
return forms.ChoiceField(choices=[(w.widget_type, w.description) for w in widgets],
widget=form_widget, label=_('Widget Type'))
if 'page_id' in self.request.GET and db_field.name == 'page_id':
kwargs['widget'] = forms.HiddenInput
field = super(
UserWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
return field
def get_widget_params_form(self, wizard):
data = wizard.get_cleaned_data_for_step(wizard.steps.first)
widget_type = data['widget_type']
widget = widget_manager.get(widget_type)
fields = copy.deepcopy(widget.base_fields)
if 'id' in fields:
del fields['id']
return DeclarativeFieldsMetaclass("WidgetParamsForm", (forms.Form,), fields)
def convert_widget_params(self, wizard, cleaned_data, form):
widget = UserWidget()
value = dict([(f.name, f.value()) for f in form])
widget.set_value(value)
cleaned_data['value'] = widget.value
cleaned_data['user'] = self.user
def get_list_display(self):
list_display = super(UserWidgetAdmin, self).get_list_display()
if not self.user.is_superuser:
list_display.remove('user')
return list_display
def queryset(self):
if self.user.is_superuser:
return super(UserWidgetAdmin, self).queryset()
return UserWidget.objects.filter(user=self.user)
def update_dashboard(self, obj):
try:
portal_pos = UserSettings.objects.get(
user=obj.user, key="dashboard:%s:pos" % obj.page_id)
except UserSettings.DoesNotExist:
return
pos = [[w for w in col.split(',') if w != str(
obj.id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
def delete_model(self):
self.update_dashboard(self.obj)
super(UserWidgetAdmin, self).delete_model()
def delete_models(self, queryset):
for obj in queryset:
self.update_dashboard(obj)
super(UserWidgetAdmin, self).delete_models(queryset)
site.register(UserWidget, UserWidgetAdmin)
class WidgetManager(object):
_widgets = None
def __init__(self):
self._widgets = {}
def register(self, widget_class):
self._widgets[widget_class.widget_type] = widget_class
return widget_class
def get(self, name):
return self._widgets[name]
def get_widgets(self, page_id):
return self._widgets.values()
widget_manager = WidgetManager()
class WidgetDataError(Exception):
def __init__(self, widget, errors):
super(WidgetDataError, self).__init__(str(errors))
self.widget = widget
self.errors = errors
class BaseWidget(forms.Form):
template = 'xadmin/widgets/base.html'
description = 'Base Widget, don\'t use it.'
widget_title = None
widget_icon = 'fa fa-plus-square'
widget_type = 'base'
base_title = None
id = forms.IntegerField(label=_('Widget ID'), widget=forms.HiddenInput)
title = forms.CharField(label=_('Widget Title'), required=False, widget=exwidgets.AdminTextInputWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
self.admin_site = dashboard.admin_site
self.request = dashboard.request
self.user = dashboard.request.user
self.convert(data)
super(BaseWidget, self).__init__(data)
if not self.is_valid():
raise WidgetDataError(self, self.errors.as_text())
self.setup()
def setup(self):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
self.helper = helper
self.id = self.cleaned_data['id']
self.title = self.cleaned_data['title'] or self.base_title
if not (self.user.is_superuser or self.has_perm()):
raise PermissionDenied
@property
def widget(self):
context = {'widget_id': self.id, 'widget_title': self.title, 'widget_icon': self.widget_icon,
'widget_type': self.widget_type, 'form': self, 'widget': self}
context.update(csrf(self.request))
self.context(context)
return loader.render_to_string(self.template, context)
def context(self, context):
pass
def convert(self, data):
pass
def has_perm(self):
return False
def save(self):
value = dict([(f.name, f.value()) for f in self])
user_widget = UserWidget.objects.get(id=self.id)
user_widget.set_value(value)
user_widget.save()
def static(self, path):
return self.dashboard.static(path)
def vendor(self, *tags):
return self.dashboard.vendor(*tags)
def media(self):
return forms.Media()
@widget_manager.register
class HtmlWidget(BaseWidget):
widget_type = 'html'
widget_icon = 'fa fa-file-o'
description = _(
u'Html Content Widget, can write any html content in widget.')
content = forms.CharField(label=_(
'Html Content'), widget=exwidgets.AdminTextareaWidget, required=False)
def has_perm(self):
return True
def context(self, context):
context['content'] = self.cleaned_data['content']
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
def __iter__(self):
from xadmin import site as g_admin_site
for m, ma in g_admin_site._registry.items():
yield ('%s.%s' % (m._meta.app_label, m._meta.model_name),
m._meta.verbose_name)
class ModelChoiceField(forms.ChoiceField):
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
forms.Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.widget.choices = self.choices
def __deepcopy__(self, memo):
result = forms.Field.__deepcopy__(self, memo)
return result
def _get_choices(self):
return ModelChoiceIterator(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
def to_python(self, value):
if isinstance(value, ModelBase):
return value
app_label, model_name = value.lower().split('.')
return apps.get_model(app_label, model_name)
def prepare_value(self, value):
if isinstance(value, ModelBase):
value = '%s.%s' % (value._meta.app_label, value._meta.model_name)
return value
def valid_value(self, value):
value = self.prepare_value(value)
for k, v in self.choices:
if value == smart_unicode(k):
return True
return False
class ModelBaseWidget(BaseWidget):
app_label = None
model_name = None
model_perm = 'change'
model = ModelChoiceField(label=_(u'Target Model'), widget=exwidgets.AdminSelectWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
super(ModelBaseWidget, self).__init__(dashboard, data)
def setup(self):
self.model = self.cleaned_data['model']
self.app_label = self.model._meta.app_label
self.model_name = self.model._meta.model_name
super(ModelBaseWidget, self).setup()
def has_perm(self):
return self.dashboard.has_model_perm(self.model, self.model_perm)
def filte_choices_model(self, model, modeladmin):
return self.dashboard.has_model_perm(model, self.model_perm)
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.app_label,
self.model_name, name), args=args, kwargs=kwargs)
class PartialBaseWidget(BaseWidget):
def get_view_class(self, view_class, model=None, **opts):
admin_class = self.admin_site._registry.get(model) if model else None
return self.admin_site.get_view_class(view_class, admin_class, **opts)
def get_factory(self):
return RequestFactory()
def setup_request(self, request):
request.user = self.user
request.session = self.request.session
return request
def make_get_request(self, path, data={}, **extra):
req = self.get_factory().get(path, data, **extra)
return self.setup_request(req)
def make_post_request(self, path, data={}, **extra):
req = self.get_factory().post(path, data, **extra)
return self.setup_request(req)
@widget_manager.register
class QuickBtnWidget(BaseWidget):
widget_type = 'qbutton'
description = _(u'Quick button Widget, quickly open any page.')
template = "xadmin/widgets/qbutton.html"
base_title = _(u"Quick Buttons")
widget_icon = 'fa fa-caret-square-o-right'
def convert(self, data):
self.q_btns = data.pop('btns', [])
def get_model(self, model_or_label):
if isinstance(model_or_label, ModelBase):
return model_or_label
else:
return apps.get_model(*model_or_label.lower().split('.'))
def context(self, context):
btns = []
for b in self.q_btns:
btn = {}
if 'model' in b:
model = self.get_model(b['model'])
if not self.user.has_perm("%s.view_%s" % (model._meta.app_label, model._meta.model_name)):
continue
btn['url'] = reverse("%s:%s_%s_%s" % (self.admin_site.app_name, model._meta.app_label,
model._meta.model_name, b.get('view', 'changelist')))
btn['title'] = model._meta.verbose_name
btn['icon'] = self.dashboard.get_model_icon(model)
else:
try:
btn['url'] = reverse(b['url'])
except NoReverseMatch:
btn['url'] = b['url']
if 'title' in b:
btn['title'] = b['title']
if 'icon' in b:
btn['icon'] = b['icon']
btns.append(btn)
context.update({'btns': btns})
def has_perm(self):
return True
@widget_manager.register
class ListWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'list'
description = _(u'Any Objects list Widget.')
template = "xadmin/widgets/list.html"
model_perm = 'view'
widget_icon = 'fa fa-align-justify'
def convert(self, data):
self.list_params = data.pop('params', {})
self.list_count = data.pop('count', 10)
def setup(self):
super(ListWidget, self).setup()
if not self.title:
self.title = self.model._meta.verbose_name_plural
req = self.make_get_request("", self.list_params)
self.list_view = self.get_view_class(ListAdminView, self.model)(req)
if self.list_count:
self.list_view.list_per_page = self.list_count
def context(self, context):
list_view = self.list_view
list_view.make_result_list()
base_fields = list_view.base_list_display
if len(base_fields) > 5:
base_fields = base_fields[0:5]
context['result_headers'] = [c for c in list_view.result_headers(
).cells if c.field_name in base_fields]
context['results'] = [[o for i, o in
enumerate(filter(lambda c: c.field_name in base_fields, r.cells))]
for r in list_view.results()]
context['result_count'] = list_view.result_count
context['page_url'] = self.model_admin_url('changelist') + "?" + urlencode(self.list_params)
@widget_manager.register
class AddFormWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'addform'
description = _(u'Add any model object Widget.')
template = "xadmin/widgets/addform.html"
model_perm = 'add'
widget_icon = 'fa fa-plus'
def setup(self):
super(AddFormWidget, self).setup()
if self.title is None:
self.title = _('Add %s') % self.model._meta.verbose_name
req = self.make_get_request("")
self.add_view = self.get_view_class(
CreateAdminView, self.model, list_per_page=10)(req)
self.add_view.instance_forms()
def context(self, context):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
context.update({
'addform': self.add_view.form_obj,
'addhelper': helper,
'addurl': self.add_view.model_admin_url('add'),
'model': self.model
})
def media(self):
return self.add_view.media + self.add_view.form_obj.media + self.vendor('xadmin.plugin.quick-form.js')
class Dashboard(CommAdminView):
widget_customiz = True
widgets = []
title = _(u"Dashboard")
icon = None
def get_page_id(self):
return self.request.path
def get_portal_key(self):
return "dashboard:%s:pos" % self.get_page_id()
@filter_hook
def get_widget(self, widget_or_id, data=None):
try:
if isinstance(widget_or_id, UserWidget):
widget = widget_or_id
else:
widget = UserWidget.objects.get(user=self.user, page_id=self.get_page_id(), id=widget_or_id)
wid = widget_manager.get(widget.widget_type)
class widget_with_perm(wid):
def context(self, context):
super(widget_with_perm, self).context(context)
context.update({'has_change_permission': self.request.user.has_perm('xadmin.change_userwidget')})
wid_instance = widget_with_perm(self, data or widget.get_value())
return wid_instance
except UserWidget.DoesNotExist:
return None
@filter_hook
def get_init_widget(self):
portal = []
widgets = self.widgets
for col in widgets:
portal_col = []
for opts in col:
try:
widget = UserWidget(user=self.user, page_id=self.get_page_id(), widget_type=opts['type'])
widget.set_value(opts)
widget.save()
portal_col.append(self.get_widget(widget))
except (PermissionDenied, WidgetDataError):
widget.delete()
continue
portal.append(portal_col)
UserSettings(
user=self.user, key="dashboard:%s:pos" % self.get_page_id(),
value='|'.join([','.join([str(w.id) for w in col]) for col in portal])).save()
return portal
@filter_hook
def get_widgets(self):
if self.widget_customiz:
portal_pos = UserSettings.objects.filter(
user=self.user, key=self.get_portal_key())
if len(portal_pos):
portal_pos = portal_pos[0].value
widgets = []
if portal_pos:
user_widgets = dict(
[(uw.id, uw) for uw in UserWidget.objects.filter(user=self.user, page_id=self.get_page_id())])
for col in portal_pos.split('|'):
ws = []
for wid in col.split(','):
try:
widget = user_widgets.get(int(wid))
if widget:
ws.append(self.get_widget(widget))
except Exception, e:
import logging
logging.error(e, exc_info=True)
widgets.append(ws)
return widgets
return self.get_init_widget()
@filter_hook
def get_title(self):
return self.title
@filter_hook
def get_context(self):
new_context = {
'title': self.get_title(),
'icon': self.icon,
'portal_key': self.get_portal_key(),
'columns': [('col-sm-%d' % int(12 / len(self.widgets)), ws) for ws in self.widgets],
'has_add_widget_permission': self.has_model_perm(UserWidget, 'add') and self.widget_customiz,
'add_widget_url': self.get_admin_url(
'%s_%s_add' % (UserWidget._meta.app_label, UserWidget._meta.model_name)) +
"?user=%s&page_id=%s&_redirect=%s" % (
self.user.id, self.get_page_id(), urlquote(self.request.get_full_path()))
}
context = super(Dashboard, self).get_context()
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response('xadmin/views/dashboard.html', self.get_context())
@csrf_protect_m
def post(self, request, *args, **kwargs):
if 'id' in request.POST:
widget_id = request.POST['id']
if request.POST.get('_delete', None) != 'on':
widget = self.get_widget(widget_id, request.POST.copy())
widget.save()
else:
try:
widget = UserWidget.objects.get(
user=self.user, page_id=self.get_page_id(), id=widget_id)
widget.delete()
try:
portal_pos = UserSettings.objects.get(user=self.user,
key="dashboard:%s:pos" % self.get_page_id())
pos = [[w for w in col.split(',') if w != str(
widget_id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
except Exception:
pass
except UserWidget.DoesNotExist:
pass
return self.get(request)
@filter_hook
def get_media(self):
media = super(Dashboard, self).get_media() + \
self.vendor('xadmin.page.dashboard.js', 'xadmin.page.dashboard.css')
if self.widget_customiz:
media = media + self.vendor('xadmin.plugin.portal.js')
for ws in self.widgets:
for widget in ws:
media = media + widget.media()
return media
class ModelDashboard(Dashboard, ModelAdminView):
title = _(u"%s Dashboard")
def get_page_id(self):
return 'model:%s/%s' % self.model_info
@filter_hook
def get_title(self):
return self.title % force_unicode(self.obj)
def init_request(self, object_id, *args, **kwargs):
self.obj = self.get_object(unquote(object_id))
if not self.has_view_permission(self.obj):
raise PermissionDenied
if self.obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_unicode(self.opts.verbose_name), 'key': escape(object_id)})
@filter_hook
def get_context(self):
new_context = {
'has_change_permission': self.has_change_permission(self.obj),
'object': self.obj,
}
context = Dashboard.get_context(self)
context.update(ModelAdminView.get_context(self))
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response(self.get_template_list('views/model_dashboard.html'), self.get_context())
| [
"[email protected]"
] | |
470cfb5e9ae74a30f9f96b586becfb3043effda3 | 233f97c6f360d478bf975016dd9e9c2be4a64adb | /guvi_4_3_8.py | 5474fe795104649ed224413b4b7e015287da17e6 | [] | no_license | unknownboyy/GUVI | 3dbd1bb2bc6b3db52f5f79491accd6c56a2dec45 | d757dd473c4f5eef526a516cf64a1757eb235869 | refs/heads/master | 2020-03-27T00:07:12.449280 | 2019-03-19T12:57:03 | 2019-03-19T12:57:03 | 145,595,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | print("guvi_4_3_8.py") | [
"[email protected]"
] | |
5d565e7d89b2cf7e44965b839844bcc6a47e0e56 | ecbbc5cf8b49de00dd956386ea7cf31951aecbf8 | /src/KalmanFilter.py | d0005ea5d794108215ebbe567191ff497c0fe45c | [] | no_license | connorlee77/ardrone_stateestimation | 9e49339c6d916a146a709acc4adf947453c9d626 | 253722cf1940fd368bc10dcd90be0c0113bb4339 | refs/heads/master | 2021-01-10T13:13:57.845898 | 2016-03-18T08:53:18 | 2016-03-18T08:53:18 | 53,226,979 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | import numpy as np
import matplotlib.pyplot as plt
import rospy
class KalmanFilter:
def __init__(self, A, P, R, Q, H, B, dimension):
self.A = A
self.P = P
self.x_k = 0
self.kalmanGain = 0
self.R = R #constant
self.Q = Q #constant
self.H = H
self.B = B
self.dimensions = dimension
def predictState(self, u_k):
#rospy.loginfo("predict_state1")
#rospy.loginfo(self.x_k)
self.x_k = np.add(
np.dot(self.A, self.x_k),
np.dot(self.B, u_k))
#rospy.loginfo("predict_state2")
#rospy.loginfo(self.x_k)
self.P = np.add(np.dot(
np.dot(self.A, self.P),
np.transpose(self.A)), self.Q)
def getKalmanGain(self):
first = np.dot(self.P, np.transpose(self.H))
second = np.linalg.inv(
np.add(
np.dot(
np.dot(self.H, self.P),
np.transpose(self.H)),
self.R))
self.kalmanGain = np.dot(first, second)
def update(self, z_k):
residual = np.subtract(
z_k,
np.dot(
self.H,
self.x_k))
#chad = z_k
#rospy.loginfo("update1")
#rospy.loginfo(chad)
self.x_k = np.add(self.x_k, np.dot(self.kalmanGain, residual))
#rospy.loginfo("update2")
#rospy.loginfo(self.x_k)
self.P = np.dot(
np.subtract(
np.identity(self.dimensions),
np.dot(
self.kalmanGain,
self.H)),
self.P)
| [
"[email protected]"
] | |
aa0a9e73022a1268c8dc56985d5d5848748aa64e | 3fe272eea1c91cc5719704265eab49534176ff0d | /scripts/item/consume_2439898.py | fdc636b193089e8c5f0e75eb0dac9c8a17c50c85 | [
"MIT"
] | permissive | Bratah123/v203.4 | e72be4843828def05592298df44b081515b7ca68 | 9cd3f31fb2ef251de2c5968c75aeebae9c66d37a | refs/heads/master | 2023-02-15T06:15:51.770849 | 2021-01-06T05:45:59 | 2021-01-06T05:45:59 | 316,366,462 | 1 | 0 | MIT | 2020-12-18T17:01:25 | 2020-11-27T00:50:26 | Java | UTF-8 | Python | false | false | 217 | py | # Created by MechAviv
# Valentine Damage Skin | (2439898)
if sm.addDamageSkin(2439898):
sm.chat("'Valentine Damage Skin' Damage Skin has been added to your account's damage skin collection.")
sm.consumeItem() | [
"[email protected]"
] | |
acc0cbbbbef590f361a5a6744807f18458d0e078 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/130/usersdata/228/34476/submittedfiles/al8.py | 99d23561646b83280774cd80f4ab4ad83803ccaf | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | # -*- coding: utf-8 -*-
n=int(input('digite um valor:')
nfat=1
for i in range(2,n+1):
nfat=nfat+i
print(nfat)
| [
"[email protected]"
] | |
8e9f1d89a0a10175a73f79346baaea3a012c4479 | 3a5ea75a5039207104fd478fb69ac4664c3c3a46 | /vega/algorithms/nas/modnas/estim/dist_backend/base.py | 1725fd222057fa4b91024747947592087e159828 | [
"MIT"
] | permissive | fmsnew/vega | e3df25efa6af46073c441f41da4f2fdc4929fec5 | 8e0af84a57eca5745fe2db3d13075393838036bb | refs/heads/master | 2023-06-10T04:47:11.661814 | 2021-06-26T07:45:30 | 2021-06-26T07:45:30 | 285,174,199 | 0 | 0 | MIT | 2020-08-11T14:19:09 | 2020-08-05T03:59:49 | Python | UTF-8 | Python | false | false | 1,712 | py | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Distributed remote client and server."""
import threading
class RemoteBase():
"""Distributed remote client class."""
def __init__(self):
super().__init__()
self.on_done = None
self.on_failed = None
def call(self, func, *args, on_done=None, on_failed=None, **kwargs):
"""Call function on remote client with callbacks."""
self.on_done = on_done
self.on_failed = on_failed
self.th_rpc = threading.Thread(target=self.rpc, args=(func,) + args, kwargs=kwargs)
self.th_rpc.start()
def close(self):
"""Close the remote client."""
raise NotImplementedError
def rpc(self, func, *args, **kwargs):
"""Call function on remote client."""
raise NotImplementedError
def on_rpc_done(self, ret):
"""Invoke callback when remote call finishes."""
self.ret = ret
self.on_done(ret)
def on_rpc_failed(self, ret):
"""Invoke callback when remote call fails."""
self.on_failed(ret)
class WorkerBase():
"""Distributed remote worker (server) class."""
def run(self, estim):
"""Run worker."""
raise NotImplementedError
def close(self):
"""Close worker."""
raise NotImplementedError
| [
"[email protected]"
] | |
682039f30aaa220caa90f937bbaf5bd7075dd986 | fad752f7e4ae9c9fae7a472634a712249fb6f83f | /sato/cli.py | 9697a09e053b96555f2b63cdabb75bc724fcc61c | [
"Apache-2.0"
] | permissive | VIDA-NYU/sato | 895da0de833681335ec5122c4487555d2285f351 | 8fb51787b36114df13f54c1acd11df12a66ad3e4 | refs/heads/master | 2021-07-13T16:55:53.621521 | 2020-11-26T01:01:07 | 2020-11-26T01:01:07 | 225,955,500 | 0 | 0 | Apache-2.0 | 2019-12-04T20:56:16 | 2019-12-04T20:56:15 | null | UTF-8 | Python | false | false | 2,252 | py | import click
import pandas as pd
from sato.predict import evaluate
@click.command('predict')
@click.option(
'-n', '--count',
default=1000,
help='Sample size'
)
@click.argument(
'src',
nargs=-1,
type=click.Path(file_okay=True, dir_okay=False, exists=True)
)
def run_predict(count, src):
"""Predict column types for CSV file(s)."""
for filename in src:
# This is a very basic attempt to determine the file compression and
# delimiter from the suffix. Currently, the following four oprions are
# recognized: '.csv', '.csv.gz', '.tsv', '.tsv.gz'. Files ending with
# '.gz' are assumed to be compressed by 'gzip' all other files are
# considered as uncompressed. The delimiter for '.csv' files is ',' and
# for '.tsv' files the delimiter is '\t'.
if filename.endswith('.csv'):
compression = None
delimiter = ','
elif filename.endswith('.csv.gz'):
compression = 'gzip'
delimiter = ','
elif filename.endswith('.tsv'):
compression = None
delimiter = '\t'
elif filename.endswith('.tsv.gz'):
compression = 'gzip'
delimiter = '\t'
else:
raise ValueError('unrecognized file format')
try:
df = pd.read_csv(
filename,
delimiter=delimiter,
compression=compression,
low_memory=False
)
rows = df.shape[0]
print('\n{}'.format(filename))
print('{}'.format('-' * len(filename)))
if rows == 0:
# Skip empty files.
continue
if rows > count:
# Take sample for large files.
df = df.sample(n=count, random_state=1)
# Evaluate data frame to get predicted coluumn labels.
labels = evaluate(df)
for i in range(len(df.columns)):
print('%s: %s' % (df.columns[i], labels[i]))
except Exception as ex:
print('error {}'.format(ex))
@click.group()
def cli(): # pragma: no cover
"""Command line interface for SATO."""
pass
cli.add_command(run_predict)
| [
"[email protected]"
] | |
f531d8e47a46f16095ff0a4522cfedaf5eca3518 | b8688a6c1824335808182768c3349624722abba6 | /uamqp/constants.py | 987bcaef27fd21d840f5b9e8ca36ca97fd73228c | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | gdooper/azure-uamqp-python | 65d64e19190921c16cc65947ddcb01f686cd4277 | 8a71c86c7598b439afea28f216a97437b3ebaaed | refs/heads/master | 2020-03-30T00:33:55.710726 | 2018-05-29T16:06:34 | 2018-05-29T16:06:34 | 150,530,862 | 0 | 0 | MIT | 2018-09-27T04:57:31 | 2018-09-27T04:57:31 | null | UTF-8 | Python | false | false | 3,876 | py | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from enum import Enum
from uamqp import c_uamqp
DEFAULT_AMQPS_PORT = 5671
AUTH_EXPIRATION_SECS = c_uamqp.AUTH_EXPIRATION_SECS
AUTH_REFRESH_SECS = c_uamqp.AUTH_REFRESH_SECS
STRING_FILTER = b"apache.org:selector-filter:string"
OPERATION = b"operation"
READ_OPERATION = b"READ"
MGMT_TARGET = b"$management"
MESSAGE_SEND_RETRIES = 3
BATCH_MESSAGE_FORMAT = c_uamqp.AMQP_BATCH_MESSAGE_FORMAT
MAX_FRAME_SIZE_BYTES = c_uamqp.MAX_FRAME_SIZE_BYTES
MAX_MESSAGE_LENGTH_BYTES = c_uamqp.MAX_MESSAGE_LENGTH_BYTES
class MessageState(Enum):
WaitingToBeSent = 0
WaitingForAck = 1
Complete = 2
Failed = 3
DONE_STATES = (MessageState.Complete, MessageState.Failed)
class MessageReceiverState(Enum):
Idle = c_uamqp.MESSAGE_RECEIVER_STATE_IDLE
Opening = c_uamqp.MESSAGE_RECEIVER_STATE_OPENING
Open = c_uamqp.MESSAGE_RECEIVER_STATE_OPEN
Closing = c_uamqp.MESSAGE_RECEIVER_STATE_CLOSING
Error = c_uamqp.MESSAGE_RECEIVER_STATE_ERROR
class MessageSendResult(Enum):
Ok = c_uamqp.MESSAGE_SEND_OK
Error = c_uamqp.MESSAGE_SEND_ERROR
Timeout = c_uamqp.MESSAGE_SEND_TIMEOUT
Cancelled = c_uamqp.MESSAGE_SEND_CANCELLED
class MessageSenderState(Enum):
Idle = c_uamqp.MESSAGE_SENDER_STATE_IDLE
Opening = c_uamqp.MESSAGE_SENDER_STATE_OPENING
Open = c_uamqp.MESSAGE_SENDER_STATE_OPEN
Closing = c_uamqp.MESSAGE_SENDER_STATE_CLOSING
Error = c_uamqp.MESSAGE_SENDER_STATE_ERROR
class ManagementLinkState(Enum):
Ok = c_uamqp.AMQP_MANAGEMENT_OPEN_OK
Error = c_uamqp.AMQP_MANAGEMENT_OPEN_ERROR
Cancelled = c_uamqp.AMQP_MANAGEMENT_OPEN_CANCELLED
class ManagementOperationResult(Enum):
Ok = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_OK
Error = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_ERROR
BadStatus = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_FAILED_BAD_STATUS
Closed = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_INSTANCE_CLOSED
class Role(Enum):
Sender = c_uamqp.ROLE_SENDER
Receiver = c_uamqp.ROLE_RECEIVER
class SenderSettleMode(Enum):
Unsettled = c_uamqp.SENDER_SETTLE_MODE_UNSETTLED
Settled = c_uamqp.SENDER_SETTLE_MODE_SETTLED
Mixed = c_uamqp.SENDER_SETTLE_MODE_MIXED
class ReceiverSettleMode(Enum):
PeekLock = c_uamqp.RECEIVER_SETTLE_MODE_PEEKLOCK
ReceiveAndDelete = c_uamqp.RECEIVER_SETTLE_MODE_RECEIVEANDDELETE
class CBSOperationResult(Enum):
Ok = c_uamqp.CBS_OPERATION_RESULT_OK
Error = c_uamqp.CBS_OPERATION_RESULT_CBS_ERROR
Failed = c_uamqp.CBS_OPERATION_RESULT_OPERATION_FAILED
Closed = c_uamqp.CBS_OPERATION_RESULT_INSTANCE_CLOSED
class CBSOpenState(Enum):
Ok = c_uamqp.CBS_OPEN_COMPLETE_OK
Error = c_uamqp.CBS_OPEN_COMPLETE_ERROR
Cancelled = c_uamqp.CBS_OPEN_COMPLETE_CANCELLED
class CBSAuthStatus(Enum):
Ok = c_uamqp.AUTH_STATUS_OK
Idle = c_uamqp.AUTH_STATUS_IDLE
InProgress = c_uamqp.AUTH_STATUS_IN_PROGRESS
Timeout = c_uamqp.AUTH_STATUS_TIMEOUT
RefreshRequired = c_uamqp.AUTH_STATUS_REFRESH_REQUIRED
Expired = c_uamqp.AUTH_STATUS_EXPIRED
Error = c_uamqp.AUTH_STATUS_ERROR
Failure = c_uamqp.AUTH_STATUS_FAILURE
class MgmtExecuteResult(Enum):
Ok = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_OK
Error = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_ERROR
Failed = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_FAILED_BAD_STATUS
Closed = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_INSTANCE_CLOSED
class MgmtOpenStatus(Enum):
Ok = c_uamqp.AMQP_MANAGEMENT_OPEN_OK
Error = c_uamqp.AMQP_MANAGEMENT_OPEN_ERROR
Cancelled = c_uamqp.AMQP_MANAGEMENT_OPEN_CANCELLED
| [
"[email protected]"
] | |
b61e50e76ad27bc63647d402ed7b18c3b7bc2aae | 9d1701a88644663277342f3a12d9795cd55a259c | /CSC148/07 Sorting/runtime.py | 6d1020dee852cd090d7eccdd33874dd33c64eccf | [] | no_license | xxcocoymlxx/Study-Notes | cb05c0e438b0c47b069d6a4c30dd13ab97e4ee6d | c7437d387dc2b9a8039c60d8786373899c2e28bd | refs/heads/master | 2023-01-13T06:09:11.005038 | 2020-05-19T19:37:45 | 2020-05-19T19:37:45 | 252,774,764 | 2 | 0 | null | 2022-12-22T15:29:26 | 2020-04-03T15:44:44 | Jupyter Notebook | UTF-8 | Python | false | false | 3,989 | py | VIDEO:
https://www.youtube.com/watch?v=6Ol2JbwoJp0
NOTES ON THE PDF:
def max_segment_sum(L):
'''(list of int) -> int
Return maximum segment sum of L.
'''
max_so_far = 0
for lower in range(len(L)):
for upper in range(lower, len(L)):
sum = 0
for i in range(lower, upper + 1):
sum = sum + L[i]
max_so_far = max(max_so_far, sum)
return max_so_far
What is the running time of this algorithm? We want an answer in terms of n, not clock time
I want you to find the statement that executes most often; count the number of times that it runs
Statement that runs most often is one in the inner-most loop.
sum = sum + L[i]
Now let's upper-bound the number of times that this statement runs
lower loop runs n times.
Upper loop runs at most n times for each iteration of the lower loop
i loop runs at most n iterations for each iteration of the upper loop.
Now we can upper-bound the total number of times that the inner-most statement runs.
At most n*n*n = n^3
So we have an n^3 algorithm.
More precise: 2+2n^2+n^3 steps
Is it worth it? Or should we just stick to n^3
Prove that 2+2n^2+n^3 is O(n^3).
This means that we have to show 2+2n^2+n^3 is eventually <= kn^3 for some k > 0.
2+2n^2+n^3
<= 2n^3+2n^2+n^3
= 3n^3+2n^2
<= 3n^3+2n^3
= 5n^3
This is our proof that 2+2n^2+n^3 is O(n^3).
----------
We know that the segment-sum code is O(n^3).
Is the code O(n^4) too? Yes
Is it O(n^5)? Yes
Is it O(2^n)? yes
Is it O(n^2)? No
Big oh is an upper bound. If you make it worse (e.g. n^3 to n^4), it's just a worse upper bound. Still technically correct though.
But I want the most accurate bound; lowest upper bound.
----------
I'd like the big oh runtime for the following function.
O(1), O(log n), O(n), O(n log n), O(n^2), O(n^3), ... O(2^n)...
-I want the worst-case upper bound
def bigoh1(n):
sum = 0
for i in range(100, n):
sum = sum+1
print(sum)
It's O(n). It takes something like n-100 steps, which you can prove is O(n)!
----------
Let's do an ordering of best (fastest) to worst (slowest) algorithm efficiencies:
The best one is O(1). Constant-time algorithm
No matter how big your input, your runtime does not increase.
Example:
def f(n):
print('hello world')
-Return the first element of a list.
-Return the maximum of two characters.
Between constant and linear is O(log n)
Example: binary search
Getting worse...
O(n), linear algorithm.
-Printing all elements in a list
-finding the maximum element in a list
A little bit worse is O(n log n)
Examples: quicksort (on average), mergesort
Slower is O(n^2): bubble sort, insertion sort, selection sort
Slower is O(n^3): maximum segment sum code
Slower is O(n^4), O(n^5)...
...
Eventually you get so bad that you can't even use them in practice
O(2^n). As n increases by 1, you double the amount of time you take
Even worse...
O(n!). Like the permutation approach to finding all anagrams
O(n^n)
Huge difference between O(n^k) polynomials and O(k^n) exponential functions.
O(n^2) and O(2^n): very different.
O(n^2)is computable for reasonable-sized input; O(2^n) is not.
----------
I'd like the big oh runtime for each of these functions.
e.g. O(1), O(log n), O(n), O(n log n), O(n^2), O(n^3), ... O(2^n)...
-I want the worst-case upper bound
def bigoh1(n):
sum = 0
for i in range(100, n):
sum = sum+1
print(sum)
O(n)
def bigoh2(n):
sum = 0
for i in range(1, n // 2):
sum = sum + 1
for j in range(1, n * n):
sum = sum + 1
print(sum)
First loop is n steps, second is n^2 steps.
n+n^2 = o(n^2)
def bigoh3(n):
sum = 0
if n % 2 == 0:
for j in range(1, n * n):
sum = sum + 1
else:
for k in range(5, n + 1):
sum = sum + k
print(sum)
If n is even, we do n^2 work. If n is odd, we do n work.
Remember that we want the worst-case.
O(n^2)
def bigoh4(m, n):
sum = 0
for i in range(1, n + 1):
for j in range(1, m + 1):
sum = sum + 1
print(sum)
O(n*m)
Not O(n^2). Not O(m^2).
| [
"[email protected]"
] | |
dd7a3ac6d291dc2db98817190f8813c458576953 | 66dd570bf5945dcbd183ed3c0cf897c0359cbccd | /python/python语法/pyexercise/Exercise03_09.py | 4560a8df9de30b98aa5d9640c98b118b4dc4a3be | [] | no_license | SamJ2018/LeetCode | 302cc97626220521c8847d30b99858e63fa509f3 | 784bd0b1491050bbd80f5a0e2420467b63152d8f | refs/heads/master | 2021-06-19T10:30:37.381542 | 2021-02-06T16:15:01 | 2021-02-06T16:15:01 | 178,962,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | # Obtain input
name = input("Enter employee's name: ")
hours = eval(input("Enter number of hours worked in a week: "))
payRate = eval(input("Enter hourly pay rate: "))
fedTaxWithholdingRate = eval(input("Enter federal tax withholding rate: "))
stateTaxWithholdingRate = eval(input("Enter state tax withholding rate: "))
grossPay = hours * payRate
fedTaxWithholding = grossPay * fedTaxWithholdingRate
stateTaxWithholding = grossPay * stateTaxWithholdingRate
totalDeduction = fedTaxWithholding + stateTaxWithholding
netPay = grossPay - totalDeduction
# Obtain output
out = "Employee Name: " + name + "\n\n"
out += "Hours Worked: " + str(hours) + '\n'
out += "Pay Rate: $" + str(payRate) + '\n'
out += "Gross Pay: $" + str(grossPay) + '\n'
out += "Deductions:\n"
out += " Federal Withholding (" + str(fedTaxWithholdingRate * 100) + \
"%): $" + str(int(fedTaxWithholding * 100) / 100.0) + '\n'
out += " State Withholding (" + str(stateTaxWithholdingRate * 100) + "%):" + \
" $" + str(int(stateTaxWithholding * 100) / 100.0) + '\n';
out += " Total Deduction:" + " $" + \
str(int(totalDeduction * 100) / 100.0) + '\n'
out += "Net Pay:" + " $" + str(int(netPay * 100) / 100.0)
print(out)
| [
"[email protected]"
] | |
e3ede7d4acdd774e7b8621e60be2e1b12dc0f0e1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02845/s251805975.py | a8e1b9dedbc87deeb6d7dd5ca8fac2fa7aa26e80 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
def main():
n = int(readline())
a = list(map(int, readline().split()))
cnt = [0] * 3
ans = 1
for x in a:
p = cnt.count(x)
if p == 0:
return print(0)
ans *= p
ans %= MOD
cnt[cnt.index(x)] += 1
print(ans)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d41c69e29c794cbabb1c2e1f208a21b4bf0f2f48 | 0e8b6f94467c25dd2440f7e2ea1519244e689620 | /MarlinJobs/CalibrationConfigFiles/Stage27Config_5x5_30x30.py | 3435a6f9a3f6a73455fa0470d23dcbb790425599 | [] | no_license | StevenGreen1/HighEnergyPhotonAnalysis | 97a661eaca2efd00472f1969855c724c9d505369 | 8a82ac57f56aad5bdbe99d4a5afb771592bc1725 | refs/heads/master | 2021-01-10T14:08:50.550184 | 2015-10-12T12:43:47 | 2015-10-12T12:43:47 | 43,491,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | # Calibration config file for testing
# Digitisation Constants - ECal
CalibrECal = 42.4326603502
# Digitisation Constants ILDCaloDigi - HCal
CalibrHCalBarrel = 49.057884929
CalibrHCalEndcap = 54.1136311832
CalibrHCalOther = 29.2180288685
# Digitisation Constants NewLDCCaloDigi - HCal
CalibrHCal = -1
# Digitisation Constants - Muon Chamber
CalibrMuon = 56.7
# MIP Peak position in directed corrected SimCaloHit energy distributions
# used for realistic ECal and HCal digitisation options
CalibrECalMIP = -1
CalibrHCalMIP = 0.0004925
# MIP Peak position in directed corrected CaloHit energy distributions
# used for MIP definition in PandoraPFA
ECalToMIPCalibration = 158.73
HCalToMIPCalibration = 40.8163
MuonToMIPCalibration = 10.101
# EM and Had Scale Settings
ECalToEMGeVCalibration = 1.00062269867
HCalToEMGeVCalibration = 1.00062269867
ECalToHadGeVCalibration = 1.08773337955
HCalToHadGeVCalibration = 1.04823493932
# Pandora Threshold Cuts
ECalMIPThresholdPandora = 0.5
HCalMIPThresholdPandora = 0.3
# Hadronic Energy Truncation in HCal PandoraPFA
MaxHCalHitHadronicEnergy = 1000000.0
# Timing ECal
ECalBarrelTimeWindowMax = 1000000.0
ECalEndcapTimeWindowMax = 1000000.0
# Timing HCal
HCalBarrelTimeWindowMax = 1000000.0
HCalEndcapTimeWindowMax = 1000000.0
| [
"[email protected]"
] | |
8eff0f0a7ccda0cc6e4779d87cd907c9f72549f8 | f04fb8bb48e38f14a25f1efec4d30be20d62388c | /哈希表/204. 计数质数.py | 2bd3e79467b7525a3d7e1a7e82f4074be703fff9 | [] | no_license | SimmonsChen/LeetCode | d8ef5a8e29f770da1e97d295d7123780dd37e914 | 690b685048c8e89d26047b6bc48b5f9af7d59cbb | refs/heads/master | 2023-09-03T01:16:52.828520 | 2021-11-19T06:37:19 | 2021-11-19T06:37:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | """
统计所有小于非负整数 n 的质数的数量。
示例 1:
输入:n = 10
输出:4
解释:小于 10 的质数一共有 4 个, 它们是 2, 3, 5, 7 。
"""
from math import sqrt
class Solution(object):
# 题意是统计[2, n] 中质数的个数
def countPrimes(self, n):
"""
:type n: int
:rtype: int
"""
if n < 2:
return 0
# 初始化标记数组,假设都是质数
isPrim = [True] * n
isPrim[0] = False
res = 0
for i in range(2, n):
if isPrim[i]:
res += 1
for j in range(i * i, n, i):
isPrim[j] = False
return res
if __name__ == '__main__':
s = Solution()
print(s.countPrimes(10))
| [
"[email protected]"
] | |
6ffe2a06880751514bb23ef6b2258b10f8257c43 | 14d7f5f83b6f84871ff6ebfa0af4c17b7115a33f | /remote_sensing/MODIS_data_test_v3.py | 1f15cb363abab3ce4c3e8caedc88d88198bb5e8d | [] | no_license | tonychangmsu/Python_Scripts | 8ca7bc841c94dcab36743bce190357ac2b1698a5 | 036f498b1fc68953d90aac15f0a5ea2f2f72423b | refs/heads/master | 2016-09-11T14:32:17.133399 | 2016-03-28T16:34:40 | 2016-03-28T16:34:40 | 10,370,475 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,468 | py | #Title: MODIS_data_test.py
#Author: Tony Chang
#Abstract: Test for opening MODIS data and examining the various bands
#Creation Date: 04/14/2015
#Modified Dates: 01/20/2016, 01/26/2016, 01/28/2016, 01/29/2016, 02/01/2016
#local directory : K:\\NASA_data\\scripts
import numpy as np
import matplotlib.pyplot as plt
import os
os.chdir("K:\\NASA_data\\scripts")
import time
import MODIS_acquire as moda
import MODIS_tassel_cap as tas
import MODIS_process as mproc
import tiff_write as tw
#MODIS file name as
# 7 char (product name .)
# 8 char (A YYYYDDD .)
# 6 char (h XX v YY .) #tile index
# 3 char (collection version .) #typically 005
# 14 char (julian date of production YYYYDDDHHMMSS)
if __name__ == "__main__":
start = time.time()
#since we have the date, let's try to get all the data from that date together.
htile = 9
vtile = 4
factor = 0.0001
year = 2000
#we would iterate through the year
begin_year = 2000
end_year = 2015
wd = 'G:\\NASA_remote_data\\MOD09A1'
mod_list, mod_dates = moda.mod_file_search(wd, year, True)
#then iterate through theses list values
scene = 0
mod_data, dnames = moda.mod_acquire_by_file(mod_list[scene]) #this is the full dataset
band_query = 1
#get the files needed
files_to_mosaic = moda.mod_date_dataset_list(wd, mod_dates[scene])
nonproj_mosaics = mproc.mosaic_files(files_to_mosaic, reproj = False)
reproj_mosaics = mproc.mosaic_files(files_to_mosaic, reproj = True, method = 0)
#inspect the cloud effects on the nonproj and reproj mosaics
#looks like it comes from band 5! 1230-1250, ,Leaf/Canopy Differences
#not much can be done about that if this is prevalent. In the mean time, we should just implement
#the processing and use the QC to fix the problem
#at this point we would like to transform the data. Then we can apply the reprojection
#need to be careful here, do we reproject before transform or after? before...
transformed = tas.tassel_cap_transform(nonproj_mosaics[:7]) #don't want to include the qc data
#check out the tasseled_cap again. getting some striping for some reason.
tw.tiff_write_gdal(transformed[0], 'K:\\NASA_data\\test\\test_clip.tif')
tw.tiff_write(out, x_size, y_size, cell_size, ymax, xmin, 'K:\\NASA_data\\test\\test_clip.tif')
#tas_array = moda.datasets_to_array(transformed, False)
#find the bounding box by the netCDF from TOPOWX
#GYE coordinates
xmin = -112.39583333837999 #112 23 45
xmax = -108.19583334006 #108 11 45
ymin = 42.279166659379996 #42 16 45
ymax = 46.195833324479999 #46 11 45
aoa = [xmin, xmax, ymin, ymax]
clip = mproc.clip_wgs84_scene(aoa, transformed[0])
#some problems with the reprojection process?
#NO..getting some strange stripe artifacts from the tasselled cap, but could be inherant in the MOD09 data itself...
#all this works now. So now perform this for all the MODIS data and store it in a netCDF4 file that
#is continuous for each year.
#write the file to check it out
tw.tiff_write(clip, np.shape(clip)[1], np.shape(clip)[0], cell_size, ymax, xmin, 'K:\\NASA_data\\test\\', 'test_clip.tif')
#now just write this function for netCDF4
#then save to a netCDF4 file
#then repeat for all the data.
end = time.time()
print('run time :%s'%(end-start)) #takes about 25-30 seconds
'''
mproc.plot_refl(mod_array)
#plot all the reflectances
#see which is faster
import time
start = time.time()
b,g,w = tas.tassel_cap_transform(mod_array)
end = time.time()
mproc.plot_tassel_cap(b,g,w)
'''
| [
"[email protected]"
] | |
6c88d27d3b37ee3630d08d1654d8b7b2c1a7f640 | dce7ca1ebab403bf7c23b77368ee26a2dd4475b6 | /tests/test_cos.py | cd57475224ee19e74c5d9fa421f172e8a7f9fb4b | [] | no_license | qcymkxyc/Graduate | 3b7e89b3f44141d9fd011c15690f902674a9e979 | 2afedacaaa3a0f4d9bbc13596d967ec8808d43d6 | refs/heads/master | 2022-12-10T12:32:37.326653 | 2018-11-10T07:49:13 | 2018-11-10T07:49:16 | 148,103,320 | 0 | 0 | null | 2022-12-08T01:14:09 | 2018-09-10T05:25:40 | Python | UTF-8 | Python | false | false | 317 | py | import unittest
from app.util import cos
class COSTestCase(unittest.TestCase):
"""
腾讯云测试
"""
def test_cos_upload(self):
"""
腾讯云cos上传测试
"""
cos.upload_binary_file(b"abcde","login_success.txt")
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
d20be627a406e2379a3cd53a20a70ac4b5852db4 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/01_netCDF_extraction/merra902Combine/284-tideGauge.py | 255f5e1573a5a697bd3fef71c7b6f3022772b778 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 17 11:28:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
# dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraLocalized"
dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined"
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 284
y = 285
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#check for empty folders
if len(os.listdir()) == 0:
continue
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0'], axis = 1, inplace=True)
#sort based on date as merra files are scrambled
pred.sort_values(by = 'date', inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
tg_name = str(tg)+"_"+tg_name;
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"[email protected]"
] | |
5833e03ed33a8ec7549369840b1fa07513ad8d85 | 4cb40963ebc95a9e4cdd5725ac4ae882594a363d | /tests/influence/_core/test_tracin_self_influence.py | 0f327ce3fbc6230024bf4d2190c00f2750105f8c | [
"BSD-3-Clause"
] | permissive | NarineK/captum-1 | 59592277aed8c97dd8effed4af953676381d50c8 | a08883f1ba3abc96ace06b11883893419b187d09 | refs/heads/master | 2022-12-23T22:39:50.502939 | 2022-08-01T16:30:43 | 2022-08-01T16:30:43 | 215,140,394 | 1 | 0 | null | 2019-10-14T20:36:19 | 2019-10-14T20:36:19 | null | UTF-8 | Python | false | false | 5,906 | py | import tempfile
from typing import Callable
import torch
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import TracInCPFast
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
from torch.utils.data import DataLoader
class TestTracInSelfInfluence(BaseTest):
@parameterized.expand(
[
(reduction, constructor, unpack_inputs)
for unpack_inputs in [True, False]
for (reduction, constructor) in [
("none", DataInfluenceConstructor(TracInCP)),
(
"sum",
DataInfluenceConstructor(
TracInCP,
name="TracInCPFastRandProjTests",
sample_wise_grads_per_batch=True,
),
),
("sum", DataInfluenceConstructor(TracInCPFast)),
("mean", DataInfluenceConstructor(TracInCPFast)),
]
],
name_func=build_test_name_func(),
)
def test_tracin_self_influence(
self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool
) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
(
net,
train_dataset,
) = get_random_model_and_data(tmpdir, unpack_inputs, return_test_data=False)
# compute tracin_scores of training data on training data
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
# calculate influence scores, using the training data as the test batch
train_scores = tracin.influence(
train_dataset.samples,
train_dataset.labels,
k=None,
unpack_inputs=unpack_inputs,
)
# calculate self_tracin_scores
self_tracin_scores = tracin.self_influence(
DataLoader(train_dataset, batch_size=batch_size),
outer_loop_by_checkpoints=False,
)
# check that self_tracin scores equals the diagonal of influence scores
assertTensorAlmostEqual(
self,
torch.diagonal(train_scores),
self_tracin_scores,
delta=0.01,
mode="max",
)
# check that setting `outer_loop_by_checkpoints=False` and
# `outer_loop_by_checkpoints=True` gives the same self influence scores
self_tracin_scores_by_checkpoints = tracin.self_influence(
DataLoader(train_dataset, batch_size=batch_size),
outer_loop_by_checkpoints=True,
)
assertTensorAlmostEqual(
self,
self_tracin_scores_by_checkpoints,
self_tracin_scores,
delta=0.01,
mode="max",
)
@parameterized.expand(
[
(reduction, constructor, unpack_inputs)
for unpack_inputs in [True, False]
for (reduction, constructor) in [
("none", DataInfluenceConstructor(TracInCP)),
(
"sum",
DataInfluenceConstructor(
TracInCP,
sample_wise_grads_per_batch=True,
),
),
("sum", DataInfluenceConstructor(TracInCPFast)),
("mean", DataInfluenceConstructor(TracInCPFast)),
]
],
name_func=build_test_name_func(),
)
def test_tracin_self_influence_dataloader_vs_single_batch(
self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool
) -> None:
# tests that the result of calling the public method `self_influence` for a
# DataLoader of batches is the same as when the batches are collated into a
# single batch
with tempfile.TemporaryDirectory() as tmpdir:
(
net,
train_dataset,
) = get_random_model_and_data(tmpdir, unpack_inputs, return_test_data=False)
# create a single batch representing the entire dataset
single_batch = next(
iter(DataLoader(train_dataset, batch_size=len(train_dataset)))
)
# create a dataloader that yields batches from the dataset
dataloader = DataLoader(train_dataset, batch_size=5)
# create tracin instance
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
# compute self influence using `self_influence` when passing in a single
# batch
single_batch_self_influence = tracin.self_influence(single_batch)
# compute self influence using `self_influence` when passing in a
# dataloader with the same examples
dataloader_self_influence = tracin.self_influence(dataloader)
# the two self influences should be equal
assertTensorAlmostEqual(
self,
single_batch_self_influence,
dataloader_self_influence,
delta=0.01, # due to numerical issues, we can't set this to 0.0
mode="max",
)
| [
"[email protected]"
] | |
c6ae34b2b23ff9afcccd235018498cdb235efb99 | 6f0e74cdc81f78ffc5dbc1b2db1cef8cbec950c4 | /aws_interface/cloud/logic/delete_function_test.py | 7a62e2c7c9241aa10726b393c1fa616aa7aa066f | [
"Apache-2.0"
] | permissive | hubaimaster/aws-interface | 125b3a362582b004a16ccd5743d7bdff69777db5 | 5823a4b45ffb3f7b59567057855ef7b5c4c4308d | refs/heads/master | 2023-01-19T15:43:38.352149 | 2023-01-12T01:38:00 | 2023-01-12T01:38:00 | 149,847,881 | 57 | 10 | Apache-2.0 | 2023-01-12T01:39:49 | 2018-09-22T05:17:43 | JavaScript | UTF-8 | Python | false | false | 742 | py |
from cloud.permission import Permission, NeedPermission
# Define the input output format of the function.
# This information is used when creating the *SDK*.
info = {
'input_format': {
'test_name': 'str',
},
'output_format': {
'success': 'bool',
}
}
@NeedPermission(Permission.Run.Logic.delete_function_test)
def do(data, resource):
partition = 'logic-function-test'
body = {}
params = data['params']
test_name = params.get('test_name')
items, _ = resource.db_query(partition, [{'option': None, 'field': 'test_name', 'value': test_name, 'condition': 'eq'}])
for item in items:
success = resource.db_delete_item(item['id'])
body['success'] = success
return body
| [
"[email protected]"
] | |
6291a6042041500296cbde2708740f0bf984e374 | 0bb3bc8eea74d316377bb1f88a8600162d83d98a | /test_demo/dianping_food_top100.py | ddf32f2ecd1973f9a3ea2ec62336876b0d284b9a | [] | no_license | WangYongjun1990/spider | 10a1f03c26a083b8a1b5e25a9180f69d50994d73 | f13d756790a19d1465624f6c8b1f0ecb87870f51 | refs/heads/master | 2020-03-08T09:16:08.748865 | 2018-04-16T01:54:26 | 2018-04-16T01:54:26 | 128,042,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | # -*- coding:utf-8 -*-
"""
File Name: `test_dianping_top100`.py
Version:
Description: 爬取南京评价最高的100家餐厅信息,对应网页 http://www.dianping.com/shoplist/search/5_10_0_score
Author: wangyongjun
Date: 2018/4/13 11:45
"""
import requests
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
}
def dianping_food_top100():
url = 'http://www.dianping.com/mylist/ajax/shoprank?cityId=5&shopType=10&rankType=score&categoryId=0'
try:
r = requests.get(url, headers=headers, timeout=10, proxies=None, verify=False)
# print r.text
except Exception as e:
print e
shop_list = r.json().get('shopBeans')
print shop_list
print type(shop_list), len(shop_list)
for shop_dict in shop_list:
print shop_dict['shopName'], shop_dict['score1'], shop_dict['score2'], shop_dict['score3'], shop_dict['avgPrice']
if __name__ == "__main__":
dianping_food_top100() | [
"[email protected]"
] | |
4a1fc4dc9297f3161f4f30e0492a815011a04b8c | 747012e5b750cdc67748798c09b3ce1eb819568f | /strategy/migrations/0002_auto_20170703_1645.py | 3a98d12dd70048ac2070500f701c0c01dc044e67 | [
"MIT"
] | permissive | moshthepitt/probsc | da30c3829d5b8bf42804950320f006c78d2b94aa | 9b8cab206bb1c41238e36bd77f5e0573df4d8e2d | refs/heads/master | 2020-06-06T11:46:05.573933 | 2018-01-10T20:42:51 | 2018-01-10T20:42:51 | 192,730,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-03 13:45
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('strategy', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='objective',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='children', to='strategy.Objective', verbose_name='Contributes to'),
),
]
| [
"[email protected]"
] | |
0f4787e023609643731531af8d73e021450dd660 | ca4e57a6861f1e24d1521bf5b775aee3b6db7725 | /bitonic.py | 47601639559708727cbcf4862e71d39937310f86 | [] | no_license | mathi98/madhu | e296a477f3684a596c74a228c9ce867f1f60c3f8 | cae2adb19ccf7c7f12212d694cd0d09614cd5d81 | refs/heads/master | 2020-05-23T01:06:54.830389 | 2019-06-28T14:13:07 | 2019-06-28T14:13:07 | 186,582,298 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | k=int(input())
a=[int(x) for x in input().split()]
print(max(a))
| [
"[email protected]"
] | |
9fb6a68ceb3cf80621af5ba80af61427c4540b14 | e1450725c9637e15709064aaa48bc4e053a213d5 | /tests/test_funcptrdecl.py | a4d3a4d89874a4fe3280f0584e431cc6717bed5d | [] | no_license | gotoc/PyCParser-1 | 9d4e4c40a8c24923a689b1a0e3ebd4f07528d75b | b00cdd67a688792c0bc49b383a36199c50cc5cf2 | refs/heads/master | 2021-01-20T10:54:25.196102 | 2014-09-11T12:27:29 | 2014-09-11T12:27:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,717 | py | import sys
sys.path += [".."]
from pprint import pprint
from cparser import *
import test
testcode = """
int16_t (*f)();
int16_t (*g)(char a, void*);
int (*h);
// ISO/IEC 9899:TC3 : C99 standard
int fx(void), *fip(), (*pfi)(); // example 1, page 120
int (*apfi[3])(int *x, int *y); // example 2, page 120
int (*fpfi(int (*)(long), int))(int, ...); // example 3, page 120
"""
state = test.parse(testcode)
f = state.vars["f"]
g = state.vars["g"]
assert f.name == "f"
assert isinstance(f.type, CFuncPointerDecl)
assert f.type.type == CStdIntType("int16_t")
assert f.type.args == []
assert isinstance(g.type, CFuncPointerDecl)
gargs = g.type.args
assert isinstance(gargs, list)
assert len(gargs) == 2
assert isinstance(gargs[0], CFuncArgDecl)
assert gargs[0].name == "a"
assert gargs[0].type == CBuiltinType(("char",))
assert gargs[1].name is None
assert gargs[1].type == CBuiltinType(("void","*"))
h = state.vars["h"]
assert h.type == CPointerType(CBuiltinType(("int",)))
fx = state.funcs["fx"] # fx is a function `int (void)`
assert fx.type == CBuiltinType(("int",))
assert fx.args == []
fip = state.funcs["fip"] # fip is a function `int* (void)`
assert fip.type == CPointerType(CBuiltinType(("int",)))
assert fip.args == []
pfi = state.vars["pfi"] # pfi is a function-ptr to `int ()`
assert isinstance(pfi.type, CFuncPointerDecl)
assert pfi.type.type == CBuiltinType(("int",))
assert pfi.type.args == []
apfi = state.vars["apfi"] # apfi is an array of three function-ptrs `int (int*,int*)`
# ...
fpfi = state.funcs["fpfi"] # function which returns a func-ptr
# the function has the parameters `int(*)(long), int`
# the func-ptr func returns `int`
# the func-ptr func has the parameters `int, ...`
| [
"[email protected]"
] | |
a25245a35cacaea636067ccaec32d3b7094f710e | e5c9fc4dc73536e75cf4ab119bbc642c28d44591 | /src/leetcodepython/math/hamming_distance_461.py | 6ee39b31c590979bec6f64edd79227ce8fd40f94 | [
"MIT"
] | permissive | zhangyu345293721/leetcode | 0a22034ac313e3c09e8defd2d351257ec9f285d0 | 50f35eef6a0ad63173efed10df3c835b1dceaa3f | refs/heads/master | 2023-09-01T06:03:18.231266 | 2023-08-31T15:23:03 | 2023-08-31T15:23:03 | 163,050,773 | 101 | 29 | null | 2020-12-09T06:26:35 | 2018-12-25T05:58:16 | Java | UTF-8 | Python | false | false | 1,473 | py | # encoding='utf-8'
'''
/**
* This is the solution of No. 461 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/hamming-distance/
* <p>
* The description of problem is as follow:
* ==========================================================================================================
* 两个整数之间的汉明距离指的是这两个数字对应二进制位不同的位置的数目。
* <p>
* 给出两个整数 x 和 y,计算它们之间的汉明距离。
* <p>
* 注意:
* 0 ≤ x, y < 231.
* <p>
* 示例:
* <p>
* 输入: x = 1, y = 4
* <p>
* 输出: 2
* <p>
* 解释:
* 1 (0 0 0 1)
* 4 (0 1 0 0)
* ↑ ↑
* <p>
* 上面的箭头指出了对应二进制位不同的位置。
* <p>
* 来源:力扣(LeetCode)
* ==========================================================================================================
*
* @author zhangyu ([email protected])
*/'''
class Solution:
def hamming_distance(self, x: int, y: int) -> int:
'''
汉明距离
Args:
x: 数值x
y: 数值y
Returns:
距离
'''
c = x ^ y
res = 0
while c > 0:
res += (c & 1)
c = c >> 1
return res
if __name__ == '__main__':
x = 1
y = 4
solution = Solution()
res = solution.hamming_distance(x, y)
print(res)
assert res == 2
| [
"[email protected]"
] | |
3637a41ea27d8219504f33dd65eda2ea0971739d | dd256415176fc8ab4b63ce06d616c153dffb729f | /aditya-works-feature-python_programming (1)/aditya-works-feature-python_programming/Assigment_5_01-Aug-2019/Assigment_5_5.py | 24aa63c26add06b9baeb2c0235963e5db861b091 | [] | no_license | adityapatel329/python_works | 6d9c6b4a64cccbe2717231a7cfd07cb350553df3 | 6cb8b2e7f691401b1d2b980f6d1def848b0a71eb | refs/heads/master | 2020-07-24T17:15:39.839826 | 2019-09-12T07:53:28 | 2019-09-12T07:53:28 | 207,993,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | def accept():
name = input("Enter your string : ")
val= []
for i in name:
val.append(ord(i))
print(sum(val)/len(val))
accept()
| [
"[email protected]"
] | |
599f4edbf8bbbcf5be1ba76d41791b9964071018 | 35a6f5a26ea97ebed8ab34619a8eec51719d2cc0 | /Python_Basic/17 文件操作/5 seek函数.py | 115eb71e6b1003cafcc78f9afeea357211ceaa76 | [] | no_license | PandaCoding2020/pythonProject | c3644eda22d993b3b866564384ed10441786e6c5 | 26f8a1e7fbe22bab7542d441014edb595da39625 | refs/heads/master | 2023-02-25T14:52:13.542434 | 2021-02-03T13:42:41 | 2021-02-03T13:42:41 | 331,318,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | """
语法:文件对象.seek(偏移量,超始位置) 0开头,1当前 2结尾
目标:
1.r改变读取文件指针:改变读取数据开始位置或把文件指针放结尾(无法读取数据)
2.a改变读取文件指针,做到可以读到数据
"""
# 1.1.改变读取数据开始位置
# f.seek(2, 0)
# 1.2.把文件指针放到结尾(无法读取数据)
# f.seek(0, 2)
# f = open('test.txt', 'r+')
# f.seek(2, 0)
# con = f.read()
# print(con)
#
# f.close()
# 2.把文件指针放到结尾(无法读取数据)
f = open('test.txt', 'a+')
f.seek(0, 0)
con = f.read()
print(con)
f.close()
| [
"[email protected]"
] | |
ca0312e44c689d8a119737d9102edca66c6d0e32 | 757433be241afbff1c138d77daf13397f858aef3 | /scorpio/urls.py | 166247c53f8b21e7f1bf3184baad8bf10b8db329 | [
"MIT"
] | permissive | RockefellerArchiveCenter/scorpio | 1f9d152bb440bb98c007f652fa644602e3b8b483 | f308cac3880ba9008d3aadfdc66a4062d4d27492 | refs/heads/base | 2023-08-20T22:34:32.085492 | 2023-08-07T17:00:58 | 2023-08-07T17:00:58 | 215,400,734 | 0 | 1 | MIT | 2023-09-08T21:09:13 | 2019-10-15T21:33:10 | Python | UTF-8 | Python | false | false | 1,601 | py | """scorpio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from asterism.views import PingView
from django.contrib import admin
from django.urls import include, re_path
from rest_framework.schemas import get_schema_view
from indexer.views import (IndexAddView, IndexDeleteView, IndexResetView,
IndexRunViewSet)
from .routers import ScorpioRouter
router = ScorpioRouter()
router.register(r'index-runs', IndexRunViewSet, 'indexrun')
schema_view = get_schema_view(
title="Scorpio API",
description="Endpoints for Scorpio microservice application."
)
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path(r'^index/add/', IndexAddView.as_view(), name='index-add'),
re_path(r'^index/delete/', IndexDeleteView.as_view(), name='index-delete'),
re_path(r'^index/reset/', IndexResetView.as_view(), name='index-reset'),
re_path(r'^status/', PingView.as_view(), name='ping'),
re_path(r'^schema/', schema_view, name='schema'),
re_path(r'^', include(router.urls)),
]
| [
"[email protected]"
] | |
f428c560237217ad3f5dd49edbabd5734a5b4eff | 0a679896fbe96a8a0a59ad9f4f55edb4aa044a93 | /Duplicate File Handler/task/handler.py | 040a40e81fc3f6eef361f3690d7a85ad20d01559 | [] | no_license | TogrulAga/Duplicate-File-Handler | 5b7bd9c9508ae3ee96751bc3e56ebaccc44c46f9 | 66fef381572c0e6697330463b0b720c2dbca82e6 | refs/heads/master | 2023-06-30T07:07:24.524591 | 2021-08-06T15:47:00 | 2021-08-06T15:47:00 | 393,424,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,500 | py | import os
import argparse
import hashlib
class FileHandler:
def __init__(self, directory):
self.directory = directory
self.file_format = None
self.sorting_option = None
self.files_dict = dict()
self.dict_items = None
self.numbered_dict = dict()
self.get_format()
self.get_sorting_option()
self.walk_dir()
self.list_same_sized_files()
self.check_duplicates()
self.delete_files()
def get_format(self):
self.file_format = input("Enter file format:\n")
def get_sorting_option(self):
print("Size sorting options:")
print("1. Descending")
print("2. Ascending\n")
while True:
self.sorting_option = int(input("Enter a sorting option:\n"))
print()
if self.sorting_option not in (1, 2):
print("\nWrong option\n")
else:
break
def walk_dir(self):
for root, directories, filenames in os.walk(self.directory):
for file in filenames:
if self.file_format != "":
if self.file_format != os.path.splitext(file)[-1].split(".")[-1]:
continue
file_path = os.path.join(root, file)
file_size = os.path.getsize(file_path)
if file_size in self.files_dict.keys():
self.files_dict[file_size].append(file_path)
else:
self.files_dict[file_size] = [file_path]
def list_same_sized_files(self):
if self.sorting_option == 1:
dict_items = list(reversed(sorted(self.files_dict.items())))
elif self.sorting_option == 2:
dict_items = sorted(self.files_dict.items())
for size, files in dict_items:
print(f"{size} bytes")
for file in files:
print(file)
print()
self.dict_items = dict_items
def check_duplicates(self):
while True:
answer = input("Check for duplicates?\n")
if answer not in ("yes", "no"):
continue
else:
break
if answer == "no":
return
else:
n_duplicate = 1
for size, files in self.dict_items:
print(f"\n{size} bytes")
hash_dict = dict()
for file in files:
hash_maker = hashlib.md5()
with open(file, "rb") as f:
hash_maker.update(f.read())
if hash_maker.hexdigest() not in hash_dict.keys():
hash_dict[hash_maker.hexdigest()] = [file]
else:
hash_dict[hash_maker.hexdigest()].append(file)
for key, values in hash_dict.items():
if len(values) > 1:
print(f"Hash: {key}")
for value in values:
print(f"{n_duplicate}. {value}")
self.numbered_dict[n_duplicate] = value
n_duplicate += 1
def delete_files(self):
while True:
answer = input("Delete files?\n")
if answer not in ("yes", "no"):
continue
else:
break
if answer == "no":
return
else:
while True:
answer = input("Enter file numbers to delete:\n")
try:
files_to_delete = list(map(int, answer.split()))
if len(files_to_delete) == 0:
raise ValueError
if any(n not in self.numbered_dict.keys() for n in files_to_delete):
raise ValueError
break
except ValueError:
print("\nWrong format\n")
freed_space = 0
for file in files_to_delete:
freed_space += os.path.getsize(self.numbered_dict[file])
os.remove(self.numbered_dict[file])
print(f"Total freed up space: {freed_space} bytes")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("directory").required = False
args = parser.parse_args()
if args.directory is None:
print("Directory is not specified")
file_handler = FileHandler(args.directory)
| [
"[email protected]"
] | |
6c36391267af20d2d0df7f255c2d1d4f98c496d0 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part003650.py | 2809c442b3ba17c08e9f9aa9bc7b006e27b8a3e8 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,946 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher38258(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.2.2.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher38258._instance is None:
CommutativeMatcher38258._instance = CommutativeMatcher38258()
return CommutativeMatcher38258._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 38257
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 38259
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp2 = subjects.popleft()
subjects3 = deque(tmp2._args)
# State 38260
if len(subjects3) >= 1:
tmp4 = subjects3.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp4)
except ValueError:
pass
else:
pass
# State 38261
if len(subjects3) >= 1:
tmp6 = subjects3.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2', tmp6)
except ValueError:
pass
else:
pass
# State 38262
if len(subjects3) == 0:
pass
# State 38263
if len(subjects) == 0:
pass
# 0: x**j*f
yield 0, subst3
subjects3.appendleft(tmp6)
subjects3.appendleft(tmp4)
subjects.appendleft(tmp2)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp8 = subjects.popleft()
associative1 = tmp8
associative_type1 = type(tmp8)
subjects9 = deque(tmp8._args)
matcher = CommutativeMatcher38265.get()
tmp10 = subjects9
subjects9 = []
for s in tmp10:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp10, subst0):
pass
if pattern_index == 0:
pass
# State 38270
if len(subjects) == 0:
pass
# 0: x**j*f
yield 0, subst1
subjects.appendleft(tmp8)
return
yield
from .generated_part003651 import *
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"[email protected]"
] | |
0e3f366f9b2f023474aa0f26b034f046a6e738bd | 4ade37d929b07b1eea07337b9cc843661a66e6d0 | /trails/feeds/nothink.py | f40ae15122ffc7c0e6f962eac4765945bd5dded1 | [
"MIT"
] | permissive | Dm2333/maltrail | bade5c99583b99f4ad1128aef295e95c977d82b1 | 2f32e0c3ff65544fc07ad3787d4d9b210f975b85 | refs/heads/master | 2021-04-12T10:44:25.125653 | 2018-03-20T11:50:40 | 2018-03-20T11:50:40 | 126,193,051 | 1 | 0 | MIT | 2018-03-21T14:40:05 | 2018-03-21T14:40:03 | Python | UTF-8 | Python | false | false | 674 | py | #!/usr/bin/env python
"""
Copyright (c) 2014-2018 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
from core.common import retrieve_content
__url__ = "http://www.nothink.org/blacklist/blacklist_malware_irc.txt"
__check__ = "Malware IRC"
__info__ = "potential malware site"
__reference__ = "nothink.org"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line] = (__info__, __reference__)
return retval
| [
"[email protected]"
] | |
a4df1412e80429c8ca05612b28da392af78863c4 | 06fa1aefc051ee0d6c325afef13dfcc14e52c6e4 | /ulmo/runs/SSL/MODIS/v2/ssl_modis_v2.py | 26d4899ae5c17be07d9a144ad971c07e06b78342 | [] | no_license | cxzhangqi/ulmo | 98e7b2783720b13f0e31a8bdf6ae70ab2a217bc7 | c1c570e75332243b8a2a16a8d6c68544e1ba02cd | refs/heads/main | 2023-06-30T19:15:04.279986 | 2021-07-31T19:45:51 | 2021-07-31T19:45:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,898 | py | """ Module for Ulmo analysis on VIIRS 2013"""
import os
import numpy as np
import time
import h5py
import numpy as np
from tqdm.auto import trange
import argparse
import h5py
import umap
from ulmo import io as ulmo_io
from ulmo.utils import catalog as cat_utils
from ulmo.ssl import analysis as ssl_analysis
from ulmo.ssl.util import adjust_learning_rate
from ulmo.ssl.util import set_optimizer, save_model
from ulmo.ssl import latents_extraction
from ulmo.ssl.train_util import Params, option_preprocess
from ulmo.ssl.train_util import modis_loader, set_model
from ulmo.ssl.train_util import train_model
from IPython import embed
def parse_option():
"""
This is a function used to parse the arguments in the training.
Returns:
args: (dict) dictionary of the arguments.
"""
parser = argparse.ArgumentParser("argument for training.")
parser.add_argument("--opt_path", type=str, help="path of 'opt.json' file.")
parser.add_argument("--func_flag", type=str, help="flag of the function to be execute: 'train' or 'evaluate' or 'umap'.")
# JFH Should the default now be true with the new definition.
parser.add_argument('--debug', default=False, action='store_true',
help='Debug?')
args = parser.parse_args()
return args
def ssl_v2_umap(debug=False, orig=False):
"""Run a UMAP analysis on all the MODIS L2 data
Args:
debug (bool, optional): [description]. Defaults to False.
orig (bool, optional): [description]. Defaults to False.
"""
# Load table
tbl_file = 's3://modis-l2/Tables/MODIS_L2_std.parquet'
modis_tbl = ulmo_io.load_main_table(tbl_file)
modis_tbl['U0'] = 0.
modis_tbl['U1'] = 0.
# Train the UMAP
# Split
train = modis_tbl.pp_type == 1
valid = modis_tbl.pp_type == 0
y2010 = modis_tbl.pp_file == 's3://modis-l2/PreProc/MODIS_R2019_2010_95clear_128x128_preproc_std.h5'
valid_tbl = modis_tbl[valid & y2010].copy()
# Latents file (subject to move)
if debug:
latents_train_file = 's3://modis-l2/SSL_MODIS_R2019_2010_latents_v2/modis_R2019_2010_latents_last_v2.h5'
else:
latents_train_file = 's3://modis-l2/SSL/SSL_v2_2012/latents/MODIS_R2019_2010_95clear_128x128_latents_std.h5'
# Load em in
basefile = os.path.basename(latents_train_file)
if not os.path.isfile(basefile):
print("Downloading latents (this is *much* faster than s3 access)...")
ulmo_io.download_file_from_s3(basefile, latents_train_file)
print("Done")
hf = h5py.File(basefile, 'r')
latents_train = hf['modis_latents_v2_train'][:]
latents_valid = hf['modis_latents_v2_valid'][:]
print("Latents loaded")
# Check
assert latents_valid.shape[0] == len(valid_tbl)
print("Running UMAP..")
reducer_umap = umap.UMAP()
latents_mapping = reducer_umap.fit(latents_train)
print("Done..")
# Loop on em all
latent_files = ulmo_io.list_of_bucket_files('modis-l2',
prefix='SSL/SSL_v2_2012/latents/')
for latents_file in latent_files:
basefile = os.path.basename(latents_file)
year = int(basefile[12:16])
# Download?
if not os.path.isfile(basefile):
print(f"Downloading {latents_file} (this is *much* faster than s3 access)...")
ulmo_io.download_file_from_s3(basefile, latents_train_file)
print("Done")
# Load and apply
hf = h5py.File(basefile, 'r')
'''
if 'train' in hf.keys():
latents_train = hf['train'][:]
train_embedding = latents_mapping.transform(latents_train)
'''
# THIS LINE IS WRONG. FIX WHEN THE REST IS FIXED
latents_valid = hf['train'][:]
valid_embedding = latents_mapping.transform(latents_valid)
# Save to table
embed(header='118 of ssl modis 2012')
yidx = modis_tbl.pp_file == f's3://modis-l2/PreProc/MODIS_R2019_{year}_95clear_128x128_preproc_std.h5'
valid_idx = valid & yidx
modis_tbl.loc[valid_idx, 'U0'] = valid_embedding[:,0]
modis_tbl.loc[valid_idx, 'U1'] = valid_embedding[:,1]
'''
train_idx = train & yidx
if np.sum(train_idx) > 0:
modis_tbl.loc[train_idx, 'U0'] = train_embedding[:,0]
modis_tbl.loc[train_idx, 'U1'] = train_embedding[:,1]
'''
hf.close()
# Clean up
os.remove(basefile)
# Vet
assert cat_utils.vet_main_table(valid_tbl, cut_prefix='modis_')
def main_train(opt_path: str):
"""Train the model
Args:
opt_path (str): Path + filename of options file
"""
# loading parameters json file
opt = Params(opt_path)
opt = option_preprocess(opt)
# build data loader
train_loader = modis_loader(opt)
# build model and criterion
model, criterion = set_model(opt, cuda_use=opt.cuda_use)
# build optimizer
optimizer = set_optimizer(opt, model)
# training routine
for epoch in trange(1, opt.epochs + 1):
adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
time1 = time.time()
loss = train_model(train_loader, model, criterion,
optimizer, epoch, opt, cuda_use=opt.cuda_use)
time2 = time.time()
print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))
if epoch % opt.save_freq == 0:
# Save locally
save_file = 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch)
save_model(model, optimizer, opt, epoch, save_file)
# Save to s3
s3_file = os.path.join(
opt.s3_outdir, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))
ulmo_io.upload_file_to_s3(save_file, s3_file)
# save the last model local
save_file = 'last.pth'
save_model(model, optimizer, opt, opt.epochs, save_file)
# Save to s3
s3_file = os.path.join(opt.s3_outdir, 'last.pth')
ulmo_io.upload_file_to_s3(save_file, s3_file)
def main_evaluate(opt_path, model_file,
preproc='_std', debug=False):
"""
This function is used to obtain the latents of the trained models
for all of MODIS
Args:
opt_path: (str) option file path.
model_file: (str) s3 filename
preproc: (str, optional)
"""
opt = option_preprocess(Params(opt_path))
model_base = os.path.basename(model_file)
ulmo_io.download_file_from_s3(model_base, model_file)
# Data files
all_pp_files = ulmo_io.list_of_bucket_files(
'modis-l2', 'PreProc')
pp_files = []
for ifile in all_pp_files:
if preproc in ifile:
pp_files.append(ifile)
# Loop on files
key_train, key_valid = "train", "valid"
if debug:
pp_files = pp_files[0:1]
for ifile in pp_files:
print("Working on ifile")
data_file = os.path.basename(ifile)
if not os.path.isfile(data_file):
ulmo_io.download_file_from_s3(data_file,
f's3://modis-l2/PreProc/{data_file}')
# Read
with h5py.File(data_file, 'r') as file:
if 'train' in file.keys():
train=True
else:
train=False
# Setup
latents_file = data_file.replace('_preproc', '_latents')
latents_path = os.path.join(opt.latents_folder, latents_file)
if train:
print("Starting train evaluation")
latents_extraction.model_latents_extract(opt, data_file,
'train', model_base, latents_file, key_train)
print("Extraction of Latents of train set is done.")
print("Starting valid evaluation")
latents_extraction.model_latents_extract(opt, data_file,
'valid', model_base, latents_file, key_valid)
print("Extraction of Latents of valid set is done.")
# Push to s3
print("Uploading to s3..")
ulmo_io.upload_file_to_s3(latents_file, latents_path)
# Remove data file
os.remove(data_file)
print(f'{data_file} removed')
if __name__ == "__main__":
# get the argument of training.
args = parse_option()
# run the 'main_train()' function.
if args.func_flag == 'train':
print("Training Starts.")
main_train(args.opt_path)
print("Training Ends.")
# run the "main_evaluate()" function.
if args.func_flag == 'evaluate':
print("Evaluation Starts.")
main_evaluate(args.opt_path,
's3://modis-l2/SSL/SSL_v2_2012/last.pth',
debug=args.debug)
print("Evaluation Ends.")
# run the umap
if args.func_flag == 'umap':
print("UMAP Starts.")
ssl_v2_umap(debug=args.debug)
print("UMAP Ends.")
| [
"[email protected]"
] | |
dd7f146df693ac042cde1345a5080c70862c344e | 222a7d69a78f1350772c9c8bfb0b36c640e5cd6e | /MarlinJobs/CalibrationConfigFiles/Stage59Config_5x5_30x30.py | 2b94d6d91472c95d504b20257b87d7e3b5afb347 | [] | no_license | StevenGreen1/JERDetailed | 2a8cb30ec32781791ba163e5125bcdb87239e9a4 | 27ed19dc0930570f16019b2c7820ae715dd0ec57 | refs/heads/master | 2021-01-17T06:55:11.384992 | 2016-08-10T14:41:38 | 2016-08-10T14:41:38 | 44,620,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | # Calibration config file for testing
# Digitisation Constants - ECal
CalibrECal = 42.3662496409
# Digitisation Constants - HCal
CalibrHCalBarrel = 50.3504586994
CalibrHCalEndcap = 55.6419000329
CalibrHCALOther = 30.5873671511
# Digitisation Constants - Muon Chamber
CalibrMuon = 56.7
# MIP Peak position in directed corrected SimCaloHit energy distributions
# used for realistic ECal and HCal digitisation options
CalibrECalMIP = 0.0001475
CalibrHCalMIP = 0.0004925
# MIP Peak position in directed corrected CaloHit energy distributions
# used for MIP definition in PandoraPFA
ECalToMIPCalibration = 153.846
HCalToMIPCalibration = 36.1011
MuonToMIPCalibration = 10.101
# EM and Had Scale Settings
ECalToEMGeVCalibration = 1.00215973193
HCalToEMGeVCalibration = 1.00215973193
ECalToHadGeVCalibration = 1.12219237098
HCalToHadGeVCalibration = 1.05372579725
# Pandora Threshold Cuts
ECalMIPThresholdPandora = 0.5
HCalMIPThresholdPandora = 0.3
# Hadronic Energy Truncation in HCal PandoraPFA
MaxHCalHitHadronicEnergy = 1000000.0
# Timing ECal
ECalBarrelTimeWindowMax = 300.0
ECalEndcapTimeWindowMax = 300.0
# Timing HCal
HCalBarrelTimeWindowMax = 300.0
HCalEndcapTimeWindowMax = 300.0
| [
"[email protected]"
] | |
2a62f1bef54bfd2cb7615ca2e9e0483f7ca9fd76 | 5ab2ccf70fddd30ea88155f2a5adb0711bf3dc9a | /Chap10/factorsingles.py | 5d413a283dcbbe5de549074b7b5cbee0eafea399 | [] | no_license | jdukosse/LOI_Python_course-SourceCode | 32d66fd79344e9ab9412a6da373f2093b39cad92 | bf13907dacf5b6e95f84885896c8f478dd208011 | refs/heads/master | 2020-12-05T23:27:53.862508 | 2020-01-24T13:42:28 | 2020-01-24T13:42:28 | 232,276,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | n = int(input("Please enter a positive integer: "))
factors = [x for x in range(1, n + 1) if n % x == 0]
print("Factors of", n, ":", factors)
| [
"[email protected]"
] | |
9461f02ac4fdcbf48b760055e18b17a595c5d8e0 | 5451997d7b691679fd213d6473b21f184a5c9402 | /pymaze/wsgi.py | 4aff83a8a210e68f9e6d3d976da790c63895747e | [
"MIT"
] | permissive | TerryHowe/pymaze | 9ba54c7d328abf94f6709593795a587f28be752b | a5b7e90b5019a5f99a7f80317796ace72ca0754f | refs/heads/master | 2022-05-01T07:39:17.896430 | 2022-04-23T10:41:48 | 2022-04-23T10:41:48 | 89,522,507 | 1 | 0 | MIT | 2022-04-23T10:41:49 | 2017-04-26T20:13:13 | Python | UTF-8 | Python | false | false | 390 | py | """
WSGI config for pymaze project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pymaze.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
b8d7a99ad4e5d9a13b4ce30cd3d4f23c799f5ddd | 6e928e1651713f945c980bca6d6c02ac5dce249a | /task1/3.py | 7cd2b6493d849db45fc01607283f0cb988c3dd8e | [] | no_license | Akzhan12/pp2 | 97334158b442383df32583ee6c0b9cab92a3ef45 | 56e33fd9119955ea8349172bf3f2cc5fbd814142 | refs/heads/main | 2023-06-28T08:30:11.068397 | 2021-07-29T08:34:43 | 2021-07-29T08:34:43 | 337,359,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | a = list(map(int, input().strip().split()))
print(*a[::-1]) | [
"[email protected]"
] | |
5eff0169132197e41737862349d9ad181777010a | fe8f7febac1ff93b829256cdfd0be69e94498c76 | /python/fluent_python/code/clockdeco_param.py | 4700886d4acf8383701a414070e3f4635df7f784 | [] | no_license | bioShaun/notebook | c438eba1d29b736704c3f5325faf15ad61a1e9d5 | ce5f477a78554ed0d4ea5344057c19e32eb6c2b8 | refs/heads/master | 2020-03-26T16:16:06.458545 | 2018-08-23T00:54:53 | 2018-08-23T00:54:53 | 145,090,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | import time
import functools
DEFAULT_FMT = '[{elapsed:0.8f}s] {name}({args}) -> {result}'
def clock(fmt=DEFAULT_FMT):
def decorate(func):
def clocked(*_args, **kwargs):
t0 = time.time()
_result = func(*_args, **kwargs)
elapsed = time.time() - t0
name = func.__name__
arg_lst = []
if _args:
arg_lst.append(', '.join(repr(arg) for arg in _args))
if kwargs:
pairs = ['%s=%r' % (k, w)
for k, w in sorted(kwargs.items * ())]
arg_lst.append(', '.join(pairs))
args = ', '.join(arg_lst)
result = repr(_result)
print(fmt.format(**locals()))
return _result
return clocked
return decorate
if __name__ == '__main__':
@clock()
def snooze(seconds):
time.sleep(seconds)
for i in range(3):
snooze(.123)
| [
"[email protected]"
] | |
e7e3c115506553ab1cbc5ca31ff9c0144325dd24 | 16e266cf50a712ed29a4097e34504aac0281e6cb | /Functions/venv/lib/python3.6/site-packages/_TFL/_SDG/_C/Macro.py | 75f2950512e90bf9922859188d30c81a9164101c | [
"BSD-3-Clause"
] | permissive | felix-ogutu/PYTHON-PROJECTS | 9dd4fdcfff6957830587b64c5da3b5c3ade3a27e | 8c1297dbda495078509d06a46f47dc7ee60b6d4e | refs/heads/master | 2023-06-05T04:41:36.727376 | 2021-06-25T20:36:52 | 2021-06-25T20:36:52 | 380,348,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,540 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2007 TTTech Computertechnik AG. All rights reserved
# Schönbrunnerstraße 7, A--1040 Wien, Austria. [email protected]
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# TFL.SDG.C.Macro
#
# Purpose
# C-macro definitions
#
# Revision Dates
# 11-Aug-2004 (MG) Creation
# 12-Aug-2004 (MG) `Macro_Block.children_group_names` added
# 12-Aug-2004 (MG) Convert the `args` paremeter from `None` to `""` and
# from `""` to `None` for backward compatibility
# 12-Aug-2004 (MG) `description` added to formats
# 13-Aug-2004 (CT) `Macro.c_format` simplified
# (`%(name)s` instead of `%(::.name:)s`)
# 24-Aug-2004 (CT) Spurious space after macro name removed from `h_format`
# and `c_format`
# 24-Aug-2004 (MG) `Macro_Block.children_group_names` removed
# 7-Oct-2004 (CED) `Define_Constant` added
# 8-Feb-2005 (CED) `apidoc_tex_format` defined here and necessary changes
# made
# 9-Feb-2005 (MBM/CED) formal changes to `apidoc_tex_format`
# 22-Feb-2005 (MBM) Removed <> from index entry
# 24-Feb-2005 (MBM) Changed index entry structure
# 9-Aug-2005 (CT) Call to `tex_quoted` added
# 30-Oct-2006 (CED) `Preprocessor_Error` added
# 9-Mar-2007 (CED) Accepting integer as value of `Define_Constant`
# 17-Apr-2007 (CED) `Define_Constant` improved to print parantheses around
# `value`
# 23-Jul-2007 (CED) Activated absolute_import
# 06-Aug-2007 (CED) Future import removed again
# 26-Feb-2012 (MG) `__future__` imports added
# ««revision-date»»···
#--
from __future__ import absolute_import, division, print_function, unicode_literals
from _TFL import TFL
import _TFL._SDG._C.Node
import _TFL._SDG._C.Statement
import _TFL.tex_quoted
import textwrap
class _Macro_ (TFL.SDG.C.Node) :
"""Base class of all preprocessor commands (defines, if, ifdef, ...)"""
cgi = None
def _update_scope (self, scope) :
### why do we need this ???? MGL, 11-Aug-2004
self.scope = scope
for c in self.children :
c._update_scope (scope)
# end def _update_scope
# end class _Macro_
class Macro (_Macro_, TFL.SDG.Leaf) :
"""C-macro defintion"""
init_arg_defaults = dict \
( name_len = 0
, scope = TFL.SDG.C.C
, args = None
, lines = None
)
front_args = ("name", "args")
rest_args = "lines"
m_head = ""
h_format = c_format = """
#%(m_head)s%(name)s%(:head=(¡tail=):.args:)s %(:sep_eol= \\:.lines:)s
>%(::*description:)s
"""
def __init__ (self, * args, ** kw) :
self.__super.__init__ (* args, ** kw)
if self.args is None :
self.args = ""
elif self.args == "" :
self.args = None
# end def __init__
# end class Macro
class Define (Macro) :
"""A C-macro #define stament"""
m_head = "define "
init_arg_defaults = dict \
( def_file = "unknown"
, explanation = ""
)
_apidoc_head = \
"""%(::@_name_comment:)-{output_width - indent_anchor}s
\\hypertarget{%(name)s}{}
\\subsubsection{\\texttt{%(name)s}}
\\index{FT-COM API>\\texttt{%(name)s}}
\\ttindex{%(name)s}
\\begin{description}
>\\item %(::*description:)s \\\\
>\\item \\textbf{File:} \\\\ \\texttt{%(def_file)s} \\\\
"""
_apidoc_tail = \
""">%(::>@_explanation:)-{output_width - indent_anchor}s
\\end{description}
>
"""
_apidoc_middle = \
""">\\item \\textbf{Function declaration:} \\\\
>>\\texttt{%(name)s (%(args)s)} \\\\
"""
apidoc_tex_format = "".join \
( [ _apidoc_head
, _apidoc_middle
, _apidoc_tail
]
)
def _name_comment (self, ** kw) :
format_prec = int (kw ["format_prec"])
result = \
( "%% --- %s %s"
% ( self.name
, "-" * ( format_prec - len (self.name) - 7
)
)
)
return [result]
# end def _name_comment
def _explanation (self, ** kw) :
if not self.explanation :
yield ""
return
yield "\\item \\textbf{Description:} \\\\"
format_prec = max (int (kw ["format_prec"]), 4)
wrapper = textwrap.TextWrapper (width = format_prec)
for l in wrapper.wrap (TFL.tex_quoted (self.explanation)) :
yield l
# end def _explanation
# end class Define
class Define_Constant (Define) :
"""A C-macro #define stament, defining a constant value"""
init_arg_defaults = dict \
( name_len = 0
, scope = TFL.SDG.C.C
, name = None
, value = None
)
front_args = ("name", "value")
h_format = c_format = """
#%(m_head)s%(name)s %(:head=(¡tail=):.value:)s
>%(::*description:)s
"""
_apidoc_middle = \
""">\\item \\textbf{Value:} %(value)s
"""
apidoc_tex_format = "".join \
( [ Define._apidoc_head
, _apidoc_middle
, Define._apidoc_tail
]
)
_autoconvert = dict \
( value = lambda s, k, v : str (v)
)
# end class Define_Constant
class Macro_Block (_Macro_, TFL.SDG.C.Stmt_Group) :
"""Block of macro definitions"""
Ancestor = TFL.SDG.C.Stmt_Group
# end class Macro_Block
class Preprocessor_Error (_Macro_) :
"""A C preprocessor error statement"""
m_head = "error "
init_arg_defaults = dict \
( scope = TFL.SDG.C.HC
, error_msg = ""
)
front_args = ("error_msg", )
h_format = c_format = """
#%(m_head) s%(error_msg)s
"""
# end class Preprocessor_Error
if __name__ != "__main__" :
TFL.SDG.C._Export ("*", "_Macro_")
### __END__ TFL.SDG.C.Macro | [
"[email protected]"
] | |
f7aae61ca9fb68f5eef8a568456a9cbeba341313 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4461294.3.spec | e94005b8acdac761a61712abf74af485df3afd67 | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,999 | spec | {
"id": "mgm4461294.3",
"metadata": {
"mgm4461294.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 3202869,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 311,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 47,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/100.preprocess.removed.fna.gz"
},
"150.dereplication.info": {
"compression": null,
"description": null,
"size": 778,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/150.dereplication.info"
},
"150.dereplication.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 3202872,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/150.dereplication.passed.fna.gz"
},
"150.dereplication.passed.fna.stats": {
"compression": null,
"description": null,
"size": 311,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/150.dereplication.passed.fna.stats"
},
"150.dereplication.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 50,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/150.dereplication.removed.fna.gz"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 4895,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 3202865,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 1830,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 311,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/299.screen.passed.fna.stats"
},
"350.genecalling.coding.faa.gz": {
"compression": "gzip",
"description": null,
"size": 2108466,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/350.genecalling.coding.faa.gz"
},
"350.genecalling.coding.faa.stats": {
"compression": null,
"description": null,
"size": 122,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/350.genecalling.coding.faa.stats"
},
"350.genecalling.coding.fna.gz": {
"compression": "gzip",
"description": null,
"size": 2985893,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/350.genecalling.coding.fna.gz"
},
"350.genecalling.coding.fna.stats": {
"compression": null,
"description": null,
"size": 315,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/350.genecalling.coding.fna.stats"
},
"350.genecalling.info": {
"compression": null,
"description": null,
"size": 714,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/350.genecalling.info"
},
"425.usearch.rna.fna.gz": {
"compression": "gzip",
"description": null,
"size": 354214,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/425.usearch.rna.fna.gz"
},
"425.usearch.rna.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/425.usearch.rna.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 354917,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 2758,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 46,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 49129,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 17935,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 14881,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 135878,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/450.rna.sims.gz"
},
"450.rna.sims.info": {
"compression": null,
"description": null,
"size": 1376,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/450.rna.sims.info"
},
"550.cluster.aa90.faa.gz": {
"compression": "gzip",
"description": null,
"size": 2053494,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/550.cluster.aa90.faa.gz"
},
"550.cluster.aa90.faa.stats": {
"compression": null,
"description": null,
"size": 122,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/550.cluster.aa90.faa.stats"
},
"550.cluster.aa90.info": {
"compression": null,
"description": null,
"size": 1080,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/550.cluster.aa90.info"
},
"550.cluster.aa90.mapping": {
"compression": null,
"description": null,
"size": 39670,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/550.cluster.aa90.mapping"
},
"550.cluster.aa90.mapping.stats": {
"compression": null,
"description": null,
"size": 48,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/550.cluster.aa90.mapping.stats"
},
"640.loadAWE.info": {
"compression": null,
"description": null,
"size": 114,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/640.loadAWE.info"
},
"650.superblat.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 5002524,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/650.superblat.expand.lca.gz"
},
"650.superblat.expand.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 2572055,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/650.superblat.expand.ontology.gz"
},
"650.superblat.expand.protein.gz": {
"compression": "gzip",
"description": null,
"size": 5348131,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/650.superblat.expand.protein.gz"
},
"650.superblat.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 2224457,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/650.superblat.sims.filter.gz"
},
"650.superblat.sims.gz": {
"compression": "gzip",
"description": null,
"size": 11207948,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/650.superblat.sims.gz"
},
"650.superblat.sims.info": {
"compression": null,
"description": null,
"size": 1343,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/650.superblat.sims.info"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 3596622,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 43897,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 1592364,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 2357826,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 1763366,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 18935197,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 894,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 120,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 157,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 113,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 800,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 2749,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 61,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 8883,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 13688,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 4837,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 1033,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 23134,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 87,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 48044,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4461294.3/file/999.done.species.stats"
}
},
"id": "mgm4461294.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4461294.3"
}
},
"raw": {
"mgm4461294.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4461294.3"
}
}
} | [
"[email protected]"
] | |
03c89f87bc946fe9d2a1f054e5f392aa88cc88c2 | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /carbon/common/script/net/httpAuth.py | 4e0d808e60ebe4b4b14cadffc1f8dc510f115517 | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,364 | py | #Embedded file name: carbon/common/script/net\httpAuth.py
import base
import cherrypy
import httpJinja
import macho
import blue
import const
import base64
from datetime import datetime
SESSION_KEY = '_cp_username'
AUTH_LOGIN_URL = '/auth/login'
DEFAULT_URL = '/default.py'
def CreateSession(username, password):
session = base.CreateSession()
session.esps = ESPSession(None, session.sid)
session.esps.contents['username'] = username
session.esps.contents['password'] = password
return session
def EndSession():
cherrypy.session.delete()
cherrypy.lib.sessions.expire()
def CheckCredentials(username, password):
sess = CreateSession(username, password)
if macho.mode == 'client':
cherrypy.session['machoSession'] = sess
return
auth = base.GetServiceSession('cherry').ConnectToAnyService('authentication')
sptype = const.userConnectTypeServerPages
try:
sessstuff, _ = auth.Login(sess.sid, username, password, None, sptype, cherrypy.request.remote.ip)
except UserError:
return u'Incorrect username or password'
except Exception:
return u'Incorrect username or password'
session = CreateSession(username, password)
sessstuff['role'] |= sess.role
for otherSession in base.FindSessions('userid', [sessstuff['userid']]):
otherSession.LogSessionHistory('Usurped by user %s via HTTP using local authentication' % username)
base.CloseSession(otherSession)
cherrypy.session['machoSession'] = sess
sess.SetAttributes(sessstuff)
def CheckAuth(*args, **kwargs):
assets = cherrypy.request.config.get('tools.staticdir.dir')
cherrypy.request.beginTime = datetime.now()
if assets not in cherrypy.request.path_info:
conditions = cherrypy.request.config.get('auth.require', None)
if conditions is not None:
pathInfo = cherrypy.request.path_info
if len(cherrypy.request.query_string):
pathInfo = '%s?%s' % (pathInfo, cherrypy.request.query_string)
if pathInfo in [AUTH_LOGIN_URL, DEFAULT_URL]:
authLogin = AUTH_LOGIN_URL
else:
authLogin = '%s?from_page=%s' % (AUTH_LOGIN_URL, base64.urlsafe_b64encode(pathInfo))
username = cherrypy.session.get(SESSION_KEY)
if username:
cherrypy.request.login = username
for condition in conditions:
if not condition():
raise cherrypy.HTTPRedirect(authLogin)
else:
raise cherrypy.HTTPRedirect(authLogin)
cherrypy.tools.auth = cherrypy.Tool('before_handler', CheckAuth)
def Require(*conditions):
def decorate(f):
if not hasattr(f, '_cp_config'):
f._cp_config = dict()
if 'auth.require' not in f._cp_config:
f._cp_config['auth.require'] = []
f._cp_config['auth.require'].extend(conditions)
return f
return decorate
def MemberOf(groupName):
def check():
return cherrypy.request.login == 'joe' and groupName == 'admin'
return check()
def NameIs(required_username):
return lambda : required_username == cherrypy.request.login
def AnyOf(*conditions):
def check():
for condition in conditions:
if condition():
return True
return False
return check()
def AllOf(*conditions):
def check():
for condition in conditions:
if not condition():
return False
return True
return check
class ESPSession:
def __init__(self, owner, sid):
self.codePage = 0
self.contents = {}
self.LCID = 0
self.sessionID = sid
self.timeout = 20
self.authenticated = 0
self.username = ''
self.password = ''
self.owner = owner
self.flatkokudeig = blue.os.GetWallclockTimeNow()
self.remappings = {}
class AuthController(object):
__guid__ = 'httpAuth.AuthController'
def on_login(self, username):
"""Called on successful login"""
pass
def on_logout(self, username):
"""Called on logout"""
pass
def get_loginform(self, username, msg = None, from_page = '/'):
sp = cherrypy.sm.GetService('SP')
try:
background_color = sp.Color()
except Exception:
background_color = sp.Color()
return {'msg': msg,
'style': 'background-color: %s; color: black' % background_color,
'sp': sp.Title(),
'server': cherrypy.prefs.clusterName,
'generate_time': datetime.now() - cherrypy.request.beginTime,
'username': 'sp' if prefs.clusterMode == 'LOCAL' else ''}
@cherrypy.expose
@cherrypy.tools.jinja(template='AuthController_login.html')
def login(self, username = None, password = None, from_page = '/'):
if username is None or password is None:
return self.get_loginform('', from_page=from_page)
error_msg = CheckCredentials(username, password)
if error_msg:
return self.get_loginform(username, error_msg, from_page)
cherrypy.session.regenerate()
cherrypy.session[SESSION_KEY] = cherrypy.request.login = username
self.on_login(username)
if from_page != '/':
from_page = base64.urlsafe_b64decode(str(from_page))
raise cherrypy.HTTPRedirect(from_page or '/')
@cherrypy.expose
def logout(self, from_page = '/'):
sess = cherrypy.session
username = sess.get(SESSION_KEY, None)
sess[SESSION_KEY] = None
if username:
cherrypy.request.login = None
self.on_logout(username)
if 'machoSession' in cherrypy.session:
sess = cherrypy.session['machoSession']
sess.LogSessionHistory('Web session closed by logging out %s' % str(session.userid))
base.CloseSession(sess)
EndSession()
raise cherrypy.HTTPRedirect(from_page or '/')
exports = {'httpAuth.CreateSession': CreateSession,
'httpAuth.EndSession': EndSession,
'httpAuth.CheckCredentials': CheckCredentials,
'httpAuth.CheckAuth': CheckAuth,
'httpAuth.Require': Require,
'httpAuth.MemberOf': MemberOf,
'httpAuth.NameIs': NameIs,
'httpAuth.AnyOf': AnyOf,
'httpAuth.AllOf': AllOf}
| [
"[email protected]"
] | |
027954b13256b665ac1641929f4678fcdca3ee95 | a1657a0c5c8f3f8b51b98074293e2f2e9b16e6f4 | /libs/pipeline_model/tensorflow/core/framework/function_pb2.py | ed2c403e475d8930c09fb9f953cc005855abe240 | [
"Apache-2.0"
] | permissive | PipelineAI/pipeline | e8067636f5844dea0653aef84bd894ca2e700fc6 | 0f26e3eaad727c1d10950f592fe1949ece8153aa | refs/heads/master | 2023-01-07T15:27:33.741088 | 2022-10-25T23:01:51 | 2022-10-25T23:01:51 | 38,730,494 | 2,596 | 512 | Apache-2.0 | 2020-01-30T23:00:08 | 2015-07-08T03:49:23 | Jsonnet | UTF-8 | Python | false | true | 12,092 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/function.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import attr_value_pb2 as tensorflow_dot_core_dot_framework_dot_attr__value__pb2
from tensorflow.core.framework import node_def_pb2 as tensorflow_dot_core_dot_framework_dot_node__def__pb2
from tensorflow.core.framework import op_def_pb2 as tensorflow_dot_core_dot_framework_dot_op__def__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/function.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n(tensorflow/core/framework/function.proto\x12\ntensorflow\x1a*tensorflow/core/framework/attr_value.proto\x1a(tensorflow/core/framework/node_def.proto\x1a&tensorflow/core/framework/op_def.proto\"j\n\x12\x46unctionDefLibrary\x12)\n\x08\x66unction\x18\x01 \x03(\x0b\x32\x17.tensorflow.FunctionDef\x12)\n\x08gradient\x18\x02 \x03(\x0b\x32\x17.tensorflow.GradientDef\"\xaa\x02\n\x0b\x46unctionDef\x12$\n\tsignature\x18\x01 \x01(\x0b\x32\x11.tensorflow.OpDef\x12/\n\x04\x61ttr\x18\x05 \x03(\x0b\x32!.tensorflow.FunctionDef.AttrEntry\x12%\n\x08node_def\x18\x03 \x03(\x0b\x32\x13.tensorflow.NodeDef\x12-\n\x03ret\x18\x04 \x03(\x0b\x32 .tensorflow.FunctionDef.RetEntry\x1a\x42\n\tAttrEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.tensorflow.AttrValue:\x02\x38\x01\x1a*\n\x08RetEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\";\n\x0bGradientDef\x12\x15\n\rfunction_name\x18\x01 \x01(\t\x12\x15\n\rgradient_func\x18\x02 \x01(\tB/\n\x18org.tensorflow.frameworkB\x0e\x46unctionProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_attr__value__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_node__def__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_op__def__pb2.DESCRIPTOR,])
_FUNCTIONDEFLIBRARY = _descriptor.Descriptor(
name='FunctionDefLibrary',
full_name='tensorflow.FunctionDefLibrary',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='function', full_name='tensorflow.FunctionDefLibrary.function', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gradient', full_name='tensorflow.FunctionDefLibrary.gradient', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=182,
serialized_end=288,
)
_FUNCTIONDEF_ATTRENTRY = _descriptor.Descriptor(
name='AttrEntry',
full_name='tensorflow.FunctionDef.AttrEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.FunctionDef.AttrEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.FunctionDef.AttrEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=479,
serialized_end=545,
)
_FUNCTIONDEF_RETENTRY = _descriptor.Descriptor(
name='RetEntry',
full_name='tensorflow.FunctionDef.RetEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.FunctionDef.RetEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.FunctionDef.RetEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=547,
serialized_end=589,
)
_FUNCTIONDEF = _descriptor.Descriptor(
name='FunctionDef',
full_name='tensorflow.FunctionDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='signature', full_name='tensorflow.FunctionDef.signature', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attr', full_name='tensorflow.FunctionDef.attr', index=1,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='node_def', full_name='tensorflow.FunctionDef.node_def', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ret', full_name='tensorflow.FunctionDef.ret', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_FUNCTIONDEF_ATTRENTRY, _FUNCTIONDEF_RETENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=291,
serialized_end=589,
)
_GRADIENTDEF = _descriptor.Descriptor(
name='GradientDef',
full_name='tensorflow.GradientDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='function_name', full_name='tensorflow.GradientDef.function_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gradient_func', full_name='tensorflow.GradientDef.gradient_func', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=591,
serialized_end=650,
)
_FUNCTIONDEFLIBRARY.fields_by_name['function'].message_type = _FUNCTIONDEF
_FUNCTIONDEFLIBRARY.fields_by_name['gradient'].message_type = _GRADIENTDEF
_FUNCTIONDEF_ATTRENTRY.fields_by_name['value'].message_type = tensorflow_dot_core_dot_framework_dot_attr__value__pb2._ATTRVALUE
_FUNCTIONDEF_ATTRENTRY.containing_type = _FUNCTIONDEF
_FUNCTIONDEF_RETENTRY.containing_type = _FUNCTIONDEF
_FUNCTIONDEF.fields_by_name['signature'].message_type = tensorflow_dot_core_dot_framework_dot_op__def__pb2._OPDEF
_FUNCTIONDEF.fields_by_name['attr'].message_type = _FUNCTIONDEF_ATTRENTRY
_FUNCTIONDEF.fields_by_name['node_def'].message_type = tensorflow_dot_core_dot_framework_dot_node__def__pb2._NODEDEF
_FUNCTIONDEF.fields_by_name['ret'].message_type = _FUNCTIONDEF_RETENTRY
DESCRIPTOR.message_types_by_name['FunctionDefLibrary'] = _FUNCTIONDEFLIBRARY
DESCRIPTOR.message_types_by_name['FunctionDef'] = _FUNCTIONDEF
DESCRIPTOR.message_types_by_name['GradientDef'] = _GRADIENTDEF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FunctionDefLibrary = _reflection.GeneratedProtocolMessageType('FunctionDefLibrary', (_message.Message,), dict(
DESCRIPTOR = _FUNCTIONDEFLIBRARY,
__module__ = 'tensorflow.core.framework.function_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.FunctionDefLibrary)
))
_sym_db.RegisterMessage(FunctionDefLibrary)
FunctionDef = _reflection.GeneratedProtocolMessageType('FunctionDef', (_message.Message,), dict(
AttrEntry = _reflection.GeneratedProtocolMessageType('AttrEntry', (_message.Message,), dict(
DESCRIPTOR = _FUNCTIONDEF_ATTRENTRY,
__module__ = 'tensorflow.core.framework.function_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.FunctionDef.AttrEntry)
))
,
RetEntry = _reflection.GeneratedProtocolMessageType('RetEntry', (_message.Message,), dict(
DESCRIPTOR = _FUNCTIONDEF_RETENTRY,
__module__ = 'tensorflow.core.framework.function_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.FunctionDef.RetEntry)
))
,
DESCRIPTOR = _FUNCTIONDEF,
__module__ = 'tensorflow.core.framework.function_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.FunctionDef)
))
_sym_db.RegisterMessage(FunctionDef)
_sym_db.RegisterMessage(FunctionDef.AttrEntry)
_sym_db.RegisterMessage(FunctionDef.RetEntry)
GradientDef = _reflection.GeneratedProtocolMessageType('GradientDef', (_message.Message,), dict(
DESCRIPTOR = _GRADIENTDEF,
__module__ = 'tensorflow.core.framework.function_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GradientDef)
))
_sym_db.RegisterMessage(GradientDef)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\016FunctionProtosP\001\370\001\001'))
_FUNCTIONDEF_ATTRENTRY.has_options = True
_FUNCTIONDEF_ATTRENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_FUNCTIONDEF_RETENTRY.has_options = True
_FUNCTIONDEF_RETENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
d1832ec2bedb704f090af6d27a3a27a0abf67623 | 8bb4060c4a41d1ef1b31c59fb8b9bc375e3e2ba4 | /setup.py | c26e6e1cb822af51c1da20528c39ff488e7edd81 | [] | no_license | hanxianzhai/distribution | a6c5f96bb954e7e18bae0d6a7ac6976fae59d332 | 628f670f4ed39478007e3402a77653f6596d0529 | refs/heads/master | 2021-04-01T06:21:29.086943 | 2020-03-18T03:55:28 | 2020-03-18T03:55:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | import config
from init import app
if __name__ == '__main__':
app.run(
host='0.0.0.0',
port=config.app_conf["server"]["port"],
debug=False
)
| [
"[email protected]"
] | |
6a405e8f55909b6ed9222b949bef9230edd24b17 | abfa0fcab2bc9a9c3cccbc3a8142cdd4b2a66ee9 | /698-Partition to K Equal Sum Subsets.py | 8aceeaa11fdcd8709c3a984236173baf0a4fbd70 | [] | no_license | JinnieJJ/leetcode | 20e8ccf3f8919028c53e0f0db86bcc2fbc7b6272 | 26c6ee936cdc1914dc3598c5dc74df64fa7960a1 | refs/heads/master | 2021-04-15T09:18:08.450426 | 2021-03-06T01:53:27 | 2021-03-06T01:53:27 | 126,275,814 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | class Solution:
def canPartitionKSubsets(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
sums = [0] * k
subsum = sum(nums) / k
nums.sort(reverse=True)
l = len(nums)
def walk(i):
if i == l:
return len(set(sums)) == 1
for j in range(k):
sums[j] += nums[i]
if sums[j] <= subsum and walk(i+1):
return True
sums[j] -= nums[i]
if sums[j] == 0:
break
return False
return walk(0)
| [
"[email protected]"
] | |
1f0f69d04585b8216b8268a4c3dc0e5868618db7 | 2dd560dc468af0af4ca44cb4cd37a0b807357063 | /Leetcode/1289. Minimum Falling Path Sum II/solution2.py | e9ebe9c9ba9a53af13d879fb8d254dac546a99d0 | [
"MIT"
] | permissive | hi0t/Outtalent | 460fe4a73788437ba6ce9ef1501291035c8ff1e8 | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | refs/heads/master | 2023-02-26T21:16:56.741589 | 2021-02-05T13:36:50 | 2021-02-05T13:36:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | class Solution:
def minFallingPathSum(self, arr: List[List[int]]) -> int:
m = len(arr)
n = len(arr[0])
@lru_cache(None)
def count(i: int, j: int) -> int:
if i >= m: return 0
m1 = m2 = inf
k1 = k2 = 0
for k in range(n):
if j == k: continue
if arr[i][k] < m1:
m2 = m1
m1 = arr[i][k]
k2 = k1
k1 = k
elif arr[i][k] < m2:
m2 = arr[i][k]
k2 = k
return min(m1 + count(i + 1, k1), m2 + count(i + 1, k2))
return count(0, -1)
| [
"[email protected]"
] | |
7a79bff67cf9d6148338e6e1465395f08c394acb | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv2/lib/python3.8/site-packages/ansible/module_utils/facts/hardware/aix.py | 442f4a95486811ff2e1f40a4627017bb23de131b | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 9,947 | py | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
class AIXHardware(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
"""
platform = 'AIX'
def populate(self, collected_facts=None):
hardware_facts = {}
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
dmi_facts = self.get_dmi_facts()
vgs_facts = self.get_vgs_facts()
mount_facts = self.get_mount_facts()
devices_facts = self.get_device_facts()
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(vgs_facts)
hardware_facts.update(mount_facts)
hardware_facts.update(devices_facts)
return hardware_facts
def get_cpu_facts(self):
cpu_facts = {}
cpu_facts['processor'] = []
rc, out, err = self.module.run_command("/usr/sbin/lsdev -Cc processor")
if out:
i = 0
for line in out.splitlines():
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
cpu_facts['processor_count'] = int(i)
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
data = out.split(' ')
cpu_facts['processor'] = data[1]
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
if out:
data = out.split(' ')
cpu_facts['processor_cores'] = int(data[1])
return cpu_facts
def get_memory_facts(self):
memory_facts = {}
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat -v")
for line in out.splitlines():
data = line.split()
if 'memory pages' in line:
pagecount = int(data[0])
if 'free pages' in line:
freecount = int(data[0])
memory_facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
memory_facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.splitlines()
data = lines[1].split()
swaptotal_mb = int(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
memory_facts['swaptotal_mb'] = swaptotal_mb
memory_facts['swapfree_mb'] = int(swaptotal_mb * (100 - percused) / 100)
return memory_facts
def get_dmi_facts(self):
dmi_facts = {}
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
dmi_facts['firmware_version'] = data[1].strip('IBM,')
lsconf_path = self.module.get_bin_path("lsconf")
if lsconf_path:
rc, out, err = self.module.run_command(lsconf_path)
if rc == 0 and out:
for line in out.splitlines():
data = line.split(':')
if 'Machine Serial Number' in line:
dmi_facts['product_serial'] = data[1].strip()
if 'LPAR Info' in line:
dmi_facts['lpar_info'] = data[1].strip()
if 'System Model' in line:
dmi_facts['product_name'] = data[1].strip()
return dmi_facts
def get_vgs_facts(self):
"""
Get vg and pv Facts
rootvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk0 active 546 0 00..00..00..00..00
hdisk1 active 546 113 00..00..00..21..92
realsyncvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk74 active 1999 6 00..00..00..00..06
testvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk105 active 999 838 200..39..199..200..200
hdisk106 active 999 599 200..00..00..199..200
"""
vgs_facts = {}
lsvg_path = self.module.get_bin_path("lsvg")
xargs_path = self.module.get_bin_path("xargs")
cmd = "%s -o | %s %s -p" % (lsvg_path, xargs_path, lsvg_path)
if lsvg_path and xargs_path:
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc == 0 and out:
vgs_facts['vgs'] = {}
for m in re.finditer(r'(\S+):\n.*FREE DISTRIBUTION(\n(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*)+', out):
vgs_facts['vgs'][m.group(1)] = []
pp_size = 0
cmd = "%s %s" % (lsvg_path, m.group(1))
rc, out, err = self.module.run_command(cmd)
if rc == 0 and out:
pp_size = re.search(r'PP SIZE:\s+(\d+\s+\S+)', out).group(1)
for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*', m.group(0)):
pv_info = {'pv_name': n.group(1),
'pv_state': n.group(2),
'total_pps': n.group(3),
'free_pps': n.group(4),
'pp_size': pp_size
}
vgs_facts['vgs'][m.group(1)].append(pv_info)
return vgs_facts
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
# AIX does not have mtab but mount command is only source of info (or to use
# api calls to get same info)
mount_path = self.module.get_bin_path('mount')
rc, mount_out, err = self.module.run_command(mount_path)
if mount_out:
for line in mount_out.split('\n'):
fields = line.split()
if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]):
if re.match('^/', fields[0]):
# normal mount
mount_facts['mounts'].append({'mount': fields[1],
'device': fields[0],
'fstype': fields[2],
'options': fields[6],
'time': '%s %s %s' % (fields[3], fields[4], fields[5])})
else:
# nfs or cifs based mount
# in case of nfs if no mount options are provided on command line
# add into fields empty string...
if len(fields) < 8:
fields.append("")
mount_facts['mounts'].append({'mount': fields[2],
'device': '%s:%s' % (fields[0], fields[1]),
'fstype': fields[3],
'options': fields[7],
'time': '%s %s %s' % (fields[4], fields[5], fields[6])})
return mount_facts
def get_device_facts(self):
device_facts = {}
device_facts['devices'] = {}
lsdev_cmd = self.module.get_bin_path('lsdev', True)
lsattr_cmd = self.module.get_bin_path('lsattr', True)
rc, out_lsdev, err = self.module.run_command(lsdev_cmd)
for line in out_lsdev.splitlines():
field = line.split()
device_attrs = {}
device_name = field[0]
device_state = field[1]
device_type = field[2:]
lsattr_cmd_args = [lsattr_cmd, '-E', '-l', device_name]
rc, out_lsattr, err = self.module.run_command(lsattr_cmd_args)
for attr in out_lsattr.splitlines():
attr_fields = attr.split()
attr_name = attr_fields[0]
attr_parameter = attr_fields[1]
device_attrs[attr_name] = attr_parameter
device_facts['devices'][device_name] = {
'state': device_state,
'type': ' '.join(device_type),
'attributes': device_attrs
}
return device_facts
class AIXHardwareCollector(HardwareCollector):
_platform = 'AIX'
_fact_class = AIXHardware
| [
"[email protected]"
] | |
2efe378579a32f494f6942fa0ac13a700a233957 | cffee94b843fff699f68eaae972ed829858fbb0d | /typings/mediafile/mutagen/mp3/__init__.pyi | da26b2285df4dd3b5373082919fadc979a486824 | [
"MIT"
] | permissive | Josef-Friedrich/phrydy | 3b5fae00d3d7210821dc9037d00f9432e1df3c2d | c6e17e8b9e24678ec7672bff031d0370bfa8b6f8 | refs/heads/main | 2023-08-25T12:11:47.333984 | 2023-08-08T14:50:08 | 2023-08-08T14:50:08 | 66,490,323 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,255 | pyi | """
This type stub file was generated by pyright.
"""
from __future__ import division
from functools import partial
from io import BytesIO
from mutagen._util import BitReader, cdata, iterbytes
"""
http://www.codeproject.com/Articles/8295/MPEG-Audio-Frame-Header
http://wiki.hydrogenaud.io/index.php?title=MP3
"""
class LAMEError(Exception): ...
class LAMEHeader:
"""http://gabriel.mp3-tech.org/mp3infotag.html"""
vbr_method = ...
lowpass_filter = ...
quality = ...
vbr_quality = ...
track_peak = ...
track_gain_origin = ...
track_gain_adjustment = ...
album_gain_origin = ...
album_gain_adjustment = ...
encoding_flags = ...
ath_type = ...
bitrate = ...
encoder_delay_start = ...
encoder_padding_end = ...
source_sample_frequency_enum = ...
unwise_setting_used = ...
stereo_mode = ...
noise_shaping = ...
mp3_gain = ...
surround_info = ...
preset_used = ...
music_length = ...
music_crc = ...
header_crc = ...
def __init__(self, xing, fileobj) -> None:
"""Raises LAMEError if parsing fails"""
...
def guess_settings(self, major, minor):
"""Gives a guess about the encoder settings used. Returns an empty
string if unknown.
The guess is mostly correct in case the file was encoded with
the default options (-V --preset --alt-preset --abr -b etc) and no
other fancy options.
Args:
major (int)
minor (int)
Returns:
text
"""
...
@classmethod
def parse_version(cls, fileobj):
"""Returns a version string and True if a LAMEHeader follows.
The passed file object will be positioned right before the
lame header if True.
Raises LAMEError if there is no lame version info.
"""
...
class XingHeaderError(Exception): ...
class XingHeaderFlags:
FRAMES = ...
BYTES = ...
TOC = ...
VBR_SCALE = ...
class XingHeader:
frames = ...
bytes = ...
toc = ...
vbr_scale = ...
lame_header = ...
lame_version = ...
lame_version_desc = ...
is_info = ...
def __init__(self, fileobj) -> None:
"""Parses the Xing header or raises XingHeaderError.
The file position after this returns is undefined.
"""
...
def get_encoder_settings(self): # -> Literal['']:
"""Returns the guessed encoder settings"""
...
@classmethod
def get_offset(cls, info): # -> Literal[36, 21, 13]:
"""Calculate the offset to the Xing header from the start of the
MPEG header including sync based on the MPEG header's content.
"""
...
class VBRIHeaderError(Exception): ...
class VBRIHeader:
version = ...
quality = ...
bytes = ...
frames = ...
toc_scale_factor = ...
toc_frames = ...
toc = ...
def __init__(self, fileobj) -> None:
"""Reads the VBRI header or raises VBRIHeaderError.
The file position is undefined after this returns
"""
...
@classmethod
def get_offset(cls, info): # -> Literal[36]:
"""Offset in bytes from the start of the MPEG header including sync"""
...
| [
"[email protected]"
] | |
14ecb79893f2a150fcc1e6200c9e85886e0f7225 | e282226e8fda085f4c64c044327eceb3388e94ce | /mainapp/api/urls.py | 1b3871642a15056f10650c9fb8bffcec8a5d906f | [] | no_license | Pavlenkovv/REST-API | 2bf36f40104a51f2735ce3dd3eebcf274061a1a2 | 352d0bd24e88fdb793e658c5b6eaffa97b56062c | refs/heads/main | 2023-03-15T22:45:50.121953 | 2021-03-07T07:56:31 | 2021-03-07T07:56:31 | 344,887,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .api_views import AuthorViewSet, NewsPostViewSet, CommentViewSet
router = DefaultRouter()
router.register(r"newsposts", NewsPostViewSet, basename="user")
router.register(r"author", AuthorViewSet)
router.register(r"comment", CommentViewSet)
urlpatterns = [path("api/", include(router.urls))]
urlpatterns += router.urls
| [
"[email protected]"
] | |
1b41395082d1617e92cb4539c977d7f616a594fc | ecd630f54fefa0a8a4937ac5c6724f9a3bb215c3 | /projeto/avalista/migrations/0022_auto_20200910_1230.py | 8922215b9bc4a928404f7c8043839ce3aebed4a8 | [] | no_license | israelwerther/Esctop_Israel_Estoque | 49968751464a38c473298ed876da7641efedf8de | d6ab3e502f2a97a0d3036351e59c2faa267c0efd | refs/heads/master | 2023-01-07T20:21:38.381593 | 2020-11-12T17:35:14 | 2020-11-12T17:35:14 | 258,642,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # Generated by Django 3.0.7 on 2020-09-10 12:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('avalista', '0021_avalista_fiador_n_operacao'),
]
operations = [
migrations.AlterField(
model_name='avalista',
name='fiador_agencia',
field=models.CharField(blank=True, max_length=15, null=True, verbose_name='Nº agência'),
),
migrations.AlterField(
model_name='avalista',
name='fiador_conta',
field=models.CharField(blank=True, max_length=15, null=True, verbose_name='Nº conta'),
),
]
| [
"[email protected]"
] | |
6fcd77974cc305566c9496941a87ef64cb688e50 | 66fda6586a902f8043b1f5e9532699babc7b591a | /lib_openshift/models/v1_deployment_trigger_image_change_params.py | cdb5495ce392554744c8473da2b748a72362bdae | [
"Apache-2.0"
] | permissive | chouseknecht/lib_openshift | 86eff74b4659f05dfbab1f07d2d7f42b21e2252d | 02b0e4348631e088e72a982a55c214b30a4ab9d9 | refs/heads/master | 2020-12-11T05:23:17.081794 | 2016-07-28T20:15:39 | 2016-07-28T20:15:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,799 | py | # coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1DeploymentTriggerImageChangeParams(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
def __init__(self, automatic=None, container_names=None, _from=None, last_triggered_image=None):
"""
V1DeploymentTriggerImageChangeParams - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'automatic': 'bool',
'container_names': 'list[str]',
'_from': 'V1ObjectReference',
'last_triggered_image': 'str'
}
self.attribute_map = {
'automatic': 'automatic',
'container_names': 'containerNames',
'_from': 'from',
'last_triggered_image': 'lastTriggeredImage'
}
self._automatic = automatic
self._container_names = container_names
self.__from = _from
self._last_triggered_image = last_triggered_image
@property
def automatic(self):
"""
Gets the automatic of this V1DeploymentTriggerImageChangeParams.
Automatic means that the detection of a new tag value should result in a new deployment.
:return: The automatic of this V1DeploymentTriggerImageChangeParams.
:rtype: bool
"""
return self._automatic
@automatic.setter
def automatic(self, automatic):
"""
Sets the automatic of this V1DeploymentTriggerImageChangeParams.
Automatic means that the detection of a new tag value should result in a new deployment.
:param automatic: The automatic of this V1DeploymentTriggerImageChangeParams.
:type: bool
"""
self._automatic = automatic
@property
def container_names(self):
"""
Gets the container_names of this V1DeploymentTriggerImageChangeParams.
ContainerNames is used to restrict tag updates to the specified set of container names in a pod.
:return: The container_names of this V1DeploymentTriggerImageChangeParams.
:rtype: list[str]
"""
return self._container_names
@container_names.setter
def container_names(self, container_names):
"""
Sets the container_names of this V1DeploymentTriggerImageChangeParams.
ContainerNames is used to restrict tag updates to the specified set of container names in a pod.
:param container_names: The container_names of this V1DeploymentTriggerImageChangeParams.
:type: list[str]
"""
self._container_names = container_names
@property
def _from(self):
"""
Gets the _from of this V1DeploymentTriggerImageChangeParams.
From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.
:return: The _from of this V1DeploymentTriggerImageChangeParams.
:rtype: V1ObjectReference
"""
return self.__from
@_from.setter
def _from(self, _from):
"""
Sets the _from of this V1DeploymentTriggerImageChangeParams.
From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.
:param _from: The _from of this V1DeploymentTriggerImageChangeParams.
:type: V1ObjectReference
"""
self.__from = _from
@property
def last_triggered_image(self):
"""
Gets the last_triggered_image of this V1DeploymentTriggerImageChangeParams.
LastTriggeredImage is the last image to be triggered.
:return: The last_triggered_image of this V1DeploymentTriggerImageChangeParams.
:rtype: str
"""
return self._last_triggered_image
@last_triggered_image.setter
def last_triggered_image(self, last_triggered_image):
"""
Sets the last_triggered_image of this V1DeploymentTriggerImageChangeParams.
LastTriggeredImage is the last image to be triggered.
:param last_triggered_image: The last_triggered_image of this V1DeploymentTriggerImageChangeParams.
:type: str
"""
self._last_triggered_image = last_triggered_image
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
f9242da26ab0e85261149acc3935789753a44160 | 0cafca9e27e70aa47b3774a13a537f45410f13f7 | /idb/ipc/push.py | c7f6d1ab8f6e77317e6d081e0655d31ebf0c16a5 | [
"MIT"
] | permissive | fakeNetflix/facebook-repo-idb | 18b67ca6cfa0edd3fa7b9c4940fec6c3f0ccfa73 | eb4ed5a7dc4a14b224a22e833294d7366fe4725e | refs/heads/master | 2023-01-05T13:19:40.755318 | 2019-08-16T15:23:45 | 2019-08-16T15:25:00 | 203,098,477 | 1 | 0 | MIT | 2023-01-04T07:33:09 | 2019-08-19T04:31:16 | Objective-C | UTF-8 | Python | false | false | 1,039 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from idb.common.stream import stream_map
from idb.common.tar import generate_tar
from idb.grpc.idb_pb2 import Payload, PushRequest, PushResponse
from idb.grpc.stream import Stream, drain_to_stream
from idb.grpc.types import CompanionClient
async def daemon(
client: CompanionClient, stream: Stream[PushResponse, PushRequest]
) -> None:
async with client.stub.push.open() as companion:
await companion.send_message(await stream.recv_message())
if client.is_local:
generator = stream
else:
paths = [request.payload.file_path async for request in stream]
generator = stream_map(
generate_tar(paths=paths),
lambda chunk: PushRequest(payload=Payload(data=chunk)),
)
response = await drain_to_stream(
stream=companion, generator=generator, logger=client.logger
)
await stream.send_message(response)
| [
"[email protected]"
] | |
181269644d8602fc2dcb673b30857f2da8b2b11f | 6deafbf6257a5c30f084c3678712235c2c31a686 | /Toolz/sqlmap/tamper/least.py | 53a8a6aadefe283a268fd3ad7a0c5fd1f51f2a67 | [
"Unlicense",
"LicenseRef-scancode-generic-cla",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"LicenseRef-scancode-commercial-license",
"LicenseRef-scancode-other-permissive"
] | permissive | thezakman/CTF-Heaven | 53fcb4a72afa821ad05d8cc3b309fb388f958163 | 4b52a2178922f1502ab00fa8fc156d35e1dc653f | refs/heads/master | 2023-04-05T18:20:54.680378 | 2023-03-21T13:47:45 | 2023-03-21T13:47:45 | 167,290,879 | 182 | 24 | Unlicense | 2022-11-29T21:41:30 | 2019-01-24T02:44:24 | Python | UTF-8 | Python | false | false | 1,126 | py | #!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import re
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.HIGHEST
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces greater than operator ('>') with 'LEAST' counterpart
Tested against:
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass weak and bespoke web application firewalls that
filter the greater than character
* The LEAST clause is a widespread SQL command. Hence, this
tamper script should work against majority of databases
>>> tamper('1 AND A > B')
'1 AND LEAST(A,B+1)=B+1'
"""
retVal = payload
if payload:
match = re.search(r"(?i)(\b(AND|OR)\b\s+)([^>]+?)\s*>\s*(\w+|'[^']+')", payload)
if match:
_ = "%sLEAST(%s,%s+1)=%s+1" % (match.group(1), match.group(3), match.group(4), match.group(4))
retVal = retVal.replace(match.group(0), _)
return retVal
| [
"[email protected]"
] | |
788ecb8dfd993ef9d68c1c979145bef4be44c7a1 | 516dea668ccdc13397fd140f9474939fa2d7ac10 | /enterprisebanking/middlewares.py | ad1d6a91a6ff2f6a7afebb8c4d5c122ae4ea0f71 | [] | no_license | daniel-kanchev/enterprisebanking | 08f1162647a0820aafa5a939e64c1cceb7844977 | bdb7bc4676419d7dcfe47ca8e817774ad031b585 | refs/heads/main | 2023-04-09T19:29:30.892047 | 2021-04-07T08:10:15 | 2021-04-07T08:10:15 | 355,463,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,670 | py | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class enterprisebankingSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class enterprisebankingDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
a8691c22467753872cc6ea65d244c12c491dc815 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_nationality.py | 4e1dcbd9aa26fd3af3fbdc1264cb9f070b10fdb7 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py |
#calss header
class _NATIONALITY():
def __init__(self,):
self.name = "NATIONALITY"
self.definitions = [u'the official right to belong to a particular country: ', u'a group of people of the same race, religion, traditions, etc.: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
8fb5e452de9da869a55ccca9cd00839bdadeeeab | 3bfa43cd86d1fb3780f594c181debc65708af2b8 | /algorithms/sort/heap_sort.py | 0f1953ff4b5ac7e3fd902dd4f15744131c3cc8bf | [] | no_license | ninjaboynaru/my-python-demo | 2fdb6e75c88e07519d91ee8b0e650fed4a2f9a1d | d679a06a72e6dc18aed95c7e79e25de87e9c18c2 | refs/heads/master | 2022-11-06T14:05:14.848259 | 2020-06-21T20:10:05 | 2020-06-21T20:10:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,610 | py |
"""
<https://docs.python.org/3/library/heapq.html>
<https://www.youtube.com/watch?v=AEAmgbls8TM&feature=youtu.be>
Steps:
1. Put every item in the list into a heap
2. Each step get the smallest item from the heap, put the smallest into
a new list
3. Repeat until the heap is empty
```python
from heapq import heappush, heappop
This is the simple version with python module
def heap_sort(lst):
h = []
for val in lst:
heappush(h, val)
return [heappop(h) for i in range(len(h))]
```
There is also inplace heap sort
Steps:
1. Heapification (Bottom-up heapify the array)
1. Sink nodes in reverse order, sink(k)
2. After sinking, guaranteed that tree rooted at position k is a heap
2. Delete the head of the heap, delete the last item from the heap, swap
the last item in the root, and sink(0)
Time complexity: O(N log(N))
Space complexity: O(1)
The definition of sink(k):
Steps:
1. If k-th item is larger than one of its child, swap it with its child.
the children of k-th item is the (2*k+1) and (2*k+2).
(if the item is larger than both of the children, swap with the smaller one)
2. Repeat this until the end of the heap array.
Example:
3, 0, 1, 7, 9, 2
Heapifiy:
9
7 2
3 0 1
Delete head of heap, and sink(0):
7
3 2
1 0
Delete head of heap, and sink(0):
3
1 2
0
Delete head of heap, and sink(0):
2
1 0
Delete head of heap, and sink(0):
1
0
Delete head of heap, and sink(0):
0
"""
def heap_sort(lst):
def sink(start, end):
""" MaxHeap sink.
If lst[start] is smaller than its children, sink down till the end.
"""
left = 2*start + 1
right = 2*start + 2
swap_pos = None
if left > end:
return
if right > end or lst[left] > lst[right]:
swap_pos = left
else:
swap_pos = right
if swap_pos:
temp = lst[start]
lst[start] = lst[swap_pos]
lst[swap_pos] = temp
sink(swap_pos, end)
# Bottom-up heapify the array
for k in range(len(lst)-1, -1, -1):
sink(k, len(lst)-1)
# print(lst)
# Delete the head of the heap, delete the last item from the heap, swap
# the last item in the root, and sink(0)
for end in range(len(lst) - 1, 0, -1):
first = lst[0]
lst[0] = lst[end]
lst[end] = first
sink(0, end-1)
# print(lst)
if __name__ == "__main__":
lst = [3, 0, 1, 7, 9, 2]
heap_sort(lst)
print(lst)
| [
"[email protected]"
] | |
265d01952ab7506e909f20767daaeac5d52864e4 | 4ce2cff60ddbb9a3b6fc2850187c86f866091b13 | /tfrecords/src/wai/tfrecords/object_detection/dataset_tools/create_oid_tf_record.py | 271fd0aac175d399dda9b528a9a311145f48cfc1 | [
"MIT",
"Apache-2.0"
] | permissive | 8176135/tensorflow | 18cb8a0432ab2a0ea5bacd03309e647f39cb9dd0 | 2c3b4b1d66a80537f3e277d75ec1d4b43e894bf1 | refs/heads/master | 2020-11-26T05:00:56.213093 | 2019-12-19T08:13:44 | 2019-12-19T08:13:44 | 228,970,478 | 0 | 0 | null | 2019-12-19T03:51:38 | 2019-12-19T03:51:37 | null | UTF-8 | Python | false | false | 5,240 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Creates TFRecords of Open Images dataset for object detection.
Example usage:
python object_detection/dataset_tools/create_oid_tf_record.py \
--input_box_annotations_csv=/path/to/input/annotations-human-bbox.csv \
--input_image_label_annotations_csv=/path/to/input/annotations-label.csv \
--input_images_directory=/path/to/input/image_pixels_directory \
--input_label_map=/path/to/input/labels_bbox_545.labelmap \
--output_tf_record_path_prefix=/path/to/output/prefix.tfrecord
CSVs with bounding box annotations and image metadata (including the image URLs)
can be downloaded from the Open Images GitHub repository:
https://github.com/openimages/dataset
This script will include every image found in the input_images_directory in the
output TFRecord, even if the image has no corresponding bounding box annotations
in the input_annotations_csv. If input_image_label_annotations_csv is specified,
it will add image-level labels as well. Note that the information of whether a
label is positivelly or negativelly verified is NOT added to tfrecord.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import contextlib2
import pandas as pd
import tensorflow as tf
from wai.tfrecords.object_detection.dataset_tools import oid_tfrecord_creation
from wai.tfrecords.object_detection.dataset_tools import tf_record_creation_util
from wai.tfrecords.object_detection.utils import label_map_util
tf.flags.DEFINE_string('input_box_annotations_csv', None,
'Path to CSV containing image bounding box annotations')
tf.flags.DEFINE_string('input_images_directory', None,
'Directory containing the image pixels '
'downloaded from the OpenImages GitHub repository.')
tf.flags.DEFINE_string('input_image_label_annotations_csv', None,
'Path to CSV containing image-level labels annotations')
tf.flags.DEFINE_string('input_label_map', None, 'Path to the label map proto')
tf.flags.DEFINE_string(
'output_tf_record_path_prefix', None,
'Path to the output TFRecord. The shard index and the number of shards '
'will be appended for each output shard.')
tf.flags.DEFINE_integer('num_shards', 100, 'Number of TFRecord shards')
FLAGS = tf.flags.FLAGS
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
required_flags = [
'input_box_annotations_csv', 'input_images_directory', 'input_label_map',
'output_tf_record_path_prefix'
]
for flag_name in required_flags:
if not getattr(FLAGS, flag_name):
raise ValueError('Flag --{} is required'.format(flag_name))
label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map)
all_box_annotations = pd.read_csv(FLAGS.input_box_annotations_csv)
if FLAGS.input_image_label_annotations_csv:
all_label_annotations = pd.read_csv(FLAGS.input_image_label_annotations_csv)
all_label_annotations.rename(
columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)
else:
all_label_annotations = None
all_images = tf.gfile.Glob(
os.path.join(FLAGS.input_images_directory, '*.jpg'))
all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images]
all_image_ids = pd.DataFrame({'ImageID': all_image_ids})
all_annotations = pd.concat(
[all_box_annotations, all_image_ids, all_label_annotations])
tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids))
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, FLAGS.output_tf_record_path_prefix,
FLAGS.num_shards)
for counter, image_data in enumerate(all_annotations.groupby('ImageID')):
tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
counter)
image_id, image_annotations = image_data
# In OID image file names are formed by appending ".jpg" to the image ID.
image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg')
with tf.gfile.Open(image_path) as image_file:
encoded_image = image_file.read()
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
image_annotations, label_map, encoded_image)
if tf_example:
shard_idx = int(image_id, 16) % FLAGS.num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
] | |
8da1f2b67b46206e3835fdfee41f7365ac844f46 | 577f03954ec69ed82eaea32c62c8eba9ba6a01c1 | /py/testdir_ec2_only/test_parse_covtype20x_s3.py | d6207e11b1f8763b5cd9fdd1466e72b472d7c03f | [
"Apache-2.0"
] | permissive | ledell/h2o | 21032d784a1a4bb3fe8b67c9299f49c25da8146e | 34e271760b70fe6f384e106d84f18c7f0adb8210 | refs/heads/master | 2020-02-26T13:53:01.395087 | 2014-12-29T04:14:29 | 2014-12-29T04:14:29 | 24,823,632 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,962 | py | import unittest, sys, random, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
print "Will build clouds with incrementing heap sizes and import folder/parse"
@classmethod
def tearDownClass(cls):
# the node state is gone when we tear down the cloud, so pass the ignore here also.
h2o.tear_down_cloud(sandboxIgnoreErrors=True)
def test_parse_covtype20x_loop_s3(self):
bucket = 'home-0xdiag-datasets'
importFolderPath = "standard"
csvFilename = "covtype20x.data"
csvPathname = importFolderPath + "/" + csvFilename
timeoutSecs = 500
trialMax = 3
for tryHeap in [4,12]:
print "\n", tryHeap,"GB heap, 1 jvm per host, import folder,", \
"then parse 'covtype20x.data'"
h2o.init(java_heap_GB=tryHeap)
# don't raise exception if we find something bad in h2o stdout/stderr?
h2o.nodes[0].sandboxIgnoreErrors = True
for trial in range(trialMax):
hex_key = csvFilename + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='s3', hex_key=hex_key,
timeoutSecs=timeoutSecs, retryDelaySecs=10, pollTimeoutSecs=60)
elapsed = time.time() - start
print "parse result:", parseResult['destination_key']
print "Trial #", trial, "completed in", elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
removeKeyResult = h2o.nodes[0].remove_key(key=hex_key)
h2o.tear_down_cloud()
# sticky ports? wait a bit.
time.sleep(5)
if __name__ == '__main__':
h2o.unit_main()
| [
"[email protected]"
] | |
b2c6540ba4582aa077ad54bbf8c43422c96bc68e | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/comp/trnsmtderrpktshist1d.py | 132265e9aed2a406e03e9466df4b0697c29e891b | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,008 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class TrnsmtdErrPktsHist1d(Mo):
"""
A class that represents historical statistics for transmitted error packets in a 1 day sampling interval. This class updates every hour.
"""
meta = StatsClassMeta("cobra.model.comp.TrnsmtdErrPktsHist1d", "transmitted error packets")
counter = CounterMeta("error", CounterCategory.COUNTER, "packets", "transmitted error packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "errorCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "errorPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "errorMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "errorMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "errorAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "errorSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "errorThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "errorTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "errorRate"
meta._counters.append(counter)
counter = CounterMeta("drop", CounterCategory.COUNTER, "packets", "transmitted dropped packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "dropCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "dropPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "dropMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "dropMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "dropAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "dropSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "dropThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "dropTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "dropRate"
meta._counters.append(counter)
meta.moClassName = "compTrnsmtdErrPktsHist1d"
meta.rnFormat = "HDcompTrnsmtdErrPkts1d-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical transmitted error packets stats in 1 day"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.comp.Hv")
meta.parentClasses.add("cobra.model.comp.HpNic")
meta.parentClasses.add("cobra.model.comp.VNic")
meta.parentClasses.add("cobra.model.comp.Vm")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.comp.TrnsmtdErrPktsHist")
meta.rnPrefixes = [
('HDcompTrnsmtdErrPkts1d-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "dropAvg", "dropAvg", 7749, PropCategory.IMPLICIT_AVG)
prop.label = "transmitted dropped packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropAvg", prop)
prop = PropMeta("str", "dropCum", "dropCum", 7745, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "transmitted dropped packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("dropCum", prop)
prop = PropMeta("str", "dropMax", "dropMax", 7748, PropCategory.IMPLICIT_MAX)
prop.label = "transmitted dropped packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropMax", prop)
prop = PropMeta("str", "dropMin", "dropMin", 7747, PropCategory.IMPLICIT_MIN)
prop.label = "transmitted dropped packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropMin", prop)
prop = PropMeta("str", "dropPer", "dropPer", 7746, PropCategory.IMPLICIT_PERIODIC)
prop.label = "transmitted dropped packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPer", prop)
prop = PropMeta("str", "dropRate", "dropRate", 7753, PropCategory.IMPLICIT_RATE)
prop.label = "transmitted dropped packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("dropRate", prop)
prop = PropMeta("str", "dropSpct", "dropSpct", 7750, PropCategory.IMPLICIT_SUSPECT)
prop.label = "transmitted dropped packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("dropSpct", prop)
prop = PropMeta("str", "dropThr", "dropThr", 7751, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "transmitted dropped packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("dropThr", prop)
prop = PropMeta("str", "dropTr", "dropTr", 7752, PropCategory.IMPLICIT_TREND)
prop.label = "transmitted dropped packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("dropTr", prop)
prop = PropMeta("str", "errorAvg", "errorAvg", 7770, PropCategory.IMPLICIT_AVG)
prop.label = "transmitted error packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("errorAvg", prop)
prop = PropMeta("str", "errorCum", "errorCum", 7766, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "transmitted error packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("errorCum", prop)
prop = PropMeta("str", "errorMax", "errorMax", 7769, PropCategory.IMPLICIT_MAX)
prop.label = "transmitted error packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("errorMax", prop)
prop = PropMeta("str", "errorMin", "errorMin", 7768, PropCategory.IMPLICIT_MIN)
prop.label = "transmitted error packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("errorMin", prop)
prop = PropMeta("str", "errorPer", "errorPer", 7767, PropCategory.IMPLICIT_PERIODIC)
prop.label = "transmitted error packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("errorPer", prop)
prop = PropMeta("str", "errorRate", "errorRate", 7774, PropCategory.IMPLICIT_RATE)
prop.label = "transmitted error packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("errorRate", prop)
prop = PropMeta("str", "errorSpct", "errorSpct", 7771, PropCategory.IMPLICIT_SUSPECT)
prop.label = "transmitted error packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("errorSpct", prop)
prop = PropMeta("str", "errorThr", "errorThr", 7772, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "transmitted error packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("errorThr", prop)
prop = PropMeta("str", "errorTr", "errorTr", 7773, PropCategory.IMPLICIT_TREND)
prop.label = "transmitted error packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("errorTr", prop)
prop = PropMeta("str", "index", "index", 5957, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
5d0a2f7e05ee7c3731f9b7550e0d5d9f8625cb88 | 78c08cd3ef66836b44373280a333c040ccb99605 | /ostap/fitting/tests/test_fitting_convolution.py | 3f980fbf093211f18849b15254d2f25697d8e7a7 | [
"BSD-3-Clause"
] | permissive | Pro100Tema/ostap | 11ccbc546068e65aacac5ddd646c7550086140a7 | 1765304fce43714e1f51dfe03be0daa5aa5d490f | refs/heads/master | 2023-02-24T08:46:07.532663 | 2020-01-27T13:46:30 | 2020-01-27T13:46:30 | 200,378,716 | 0 | 0 | BSD-3-Clause | 2019-08-03T13:28:08 | 2019-08-03T13:28:07 | null | UTF-8 | Python | false | false | 3,426 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) Ostap developers.
# =============================================================================
# @file test_fitting_convolution.py
# Test module for ostap/fitting/convolution.py
# =============================================================================
""" Test module for ostap/fitting/convolution.py
"""
# =============================================================================
__author__ = "Ostap developers"
__all__ = () ## nothing to import
# =============================================================================
import ROOT, random
import ostap.fitting.roofit
import ostap.fitting.models as Models
from ostap.core.core import cpp, VE, dsID
from ostap.logger.utils import rooSilent
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ or '__builtin__' == __name__ :
logger = getLogger ( 'test_fitting_convolution' )
else :
logger = getLogger ( __name__ )
# =============================================================================
## make
x = ROOT.RooRealVar ( 'x', 'test' , 1 , 10 )
models = set()
# =============================================================================
## Asymmetric Laplace
# =============================================================================
def test_laplace():
logger.info ('Test Asymmetric Laplace shape' )
laplace = Models.AsymmetricLaplace_pdf ( name = 'AL',
xvar = x ,
mean = 5 ,
slope = 1 )
from ostap.fitting.convolution import Convolution_pdf
## constant resolution
laplace_1 = Convolution_pdf ( name = 'L1' , pdf = laplace, resolution = 0.75 )
## resolution PDF
from ostap.fitting.resolution import ResoApo2
rAp = ResoApo2 ( 'A' , x , 0.75 )
## resolution as PDF
laplace_2 = Convolution_pdf ( name = 'L2' , pdf = laplace, resolution = rAp )
laplace.draw( silent = True )
laplace_1.draw( silent = True )
laplace_2.draw()
models.add ( laplace )
models.add ( laplace_1 )
models.add ( laplace_2 )
# =============================================================================
## check that everything is serializable
# =============================================================================
def test_db() :
logger.info('Saving all objects into DBASE')
import ostap.io.zipshelve as DBASE
from ostap.utils.timing import timing
with timing( name = 'Save everything to DBASE'), DBASE.tmpdb() as db :
db['models' ] = models
db.ls()
# =============================================================================
if '__main__' == __name__ :
test_laplace () ## Laplace-function + background
## check finally that everything is serializeable:
test_db ()
# =============================================================================
# The END
# =============================================================================
| [
"[email protected]"
] | |
f66e5ca5bccba463ba1c7ea0e178e85c4982a93f | 3e5ecad4d2f681f2f4f749109cc99deea1209ea4 | /Dacon/solar1/test04_solar9.py | 0f9e499e4f86263fff68de5a667aeda9b729cb92 | [] | no_license | SunghoonSeok/Study | f41ede390079037b2090e6df20e5fb38f2e59b8f | 50f02b9c9bac904cd4f6923b41efabe524ff3d8a | refs/heads/master | 2023-06-18T06:47:55.545323 | 2021-07-05T00:47:55 | 2021-07-05T00:47:55 | 324,866,762 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,798 | py | # 7일의 데이터로 2일의 target값 구하기
# 시간별로 데이터를 나눠서 훈련
import numpy as np
import pandas as pd
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, Input, LSTM, Dropout, Conv1D, Flatten, MaxPooling1D, GRU, SimpleRNN
from tensorflow.keras.backend import mean, maximum
# 필요 함수 정의
# GHI추가
def Add_features(data):
data['cos'] = np.cos(np.pi/2 - np.abs(data['Hour']%12 - 6)/6*np.pi/2)
data.insert(1,'GHI',data['DNI']*data['cos']+data['DHI'])
data.drop(['cos'], axis= 1, inplace = True)
return data
# 데이터 몇일씩 자르는 함수
def split_x(data, size):
x = []
for i in range(len(data)-size+1):
subset = data[i : (i+size)]
x.append([item for item in subset])
print(type(x))
return np.array(x)
# quantile loss 관련 함수
def quantile_loss(q, y_true, y_pred):
err = (y_true - y_pred)
return K.mean(K.maximum(q*err, (q-1)*err), axis=-1)
quantiles = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# 데이터 컬럼을 7개만 쓰겠다
def preprocess_data(data):
data = Add_features(data)
temp = data.copy()
temp = temp[['GHI', 'DHI', 'DNI', 'WS', 'RH', 'T','TARGET']]
return temp.iloc[:, :]
# 모델, Conv1D사용
def DaconModel():
model = Sequential()
model.add(Conv1D(256,2, padding='same', input_shape=(7, 7),activation='relu'))
model.add(Conv1D(128,2, padding='same',activation='relu'))
model.add(Conv1D(64,2, padding='same',activation='relu'))
model.add(Conv1D(32,2, padding='same',activation='relu'))
model.add(Flatten())
model.add(Dense(64,activation='relu'))
model.add(Dense(32,activation='relu'))
model.add(Dense(16,activation='relu'))
model.add(Dense(8,activation='relu'))
model.add(Dense(1))
return model
# optimizer 불러오기
from tensorflow.keras.optimizers import Adam, Adadelta, Adamax, Adagrad
from tensorflow.keras.optimizers import RMSprop, SGD, Nadam
# 컴파일 훈련 함수, optimizer 변수처리하여 lr=0.002부터 줄여나가도록 한다
# lr을 for문 밖에 두면 초기화가 되지 않으니 명심할것
# 총 48(시간수)*9(quantile)*2(Day7,8)개의 체크포인트모델이 생성됨
def only_compile(a, x_train, y_train, x_val, y_val):
for q in quantiles:
print('Day'+str(i)+' ' +str(q)+'실행중입니다.')
model = DaconModel()
optimizer = Adam(lr=0.002)
model.compile(loss = lambda y_true,y_pred: quantile_loss(q,y_true,y_pred), optimizer = optimizer, metrics = [lambda y,y_pred: quantile_loss(q,y,y_pred)])
filepath = f'c:/data/test/solar/checkpoint/solar_checkpoint5_time{i}-{a}-{q}.hdf5'
cp = ModelCheckpoint(filepath, save_best_only=True, monitor = 'val_loss')
model.fit(x_train,y_train,epochs = epochs, batch_size = bs, validation_data = (x_val,y_val),callbacks = [es,lr,cp])
return
# 1. 데이터
train = pd.read_csv('c:/data/test/solar/train/train.csv')
sub = pd.read_csv('c:/data/test/solar/sample_submission.csv')
# 데이터 npy로 바꾸기
data = train.values
print(data.shape)
np.save('c:/data/test/solar/train.npy', arr=data)
data =np.load('c:/data/test/solar/train.npy')
# 전치를 활용한 데이터 시간별 묶음
data = data.reshape(1095, 48, 9)
data = np.transpose(data, axes=(1,0,2))
print(data.shape)
data = data.reshape(48*1095,9)
df = train.copy()
df.loc[:,:] = data
df.to_csv('c:/data/test/solar/train_trans.csv', index=False)
# 시간별 모델 따로 생성
train_trans = pd.read_csv('c:/data/test/solar/train_trans.csv')
train_data = preprocess_data(train_trans) # (52560,7)
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
es = EarlyStopping(monitor = 'val_loss', patience = 15)
lr = ReduceLROnPlateau(monitor = 'val_loss', patience = 5, factor = 0.5, verbose = 1)
# for문으로 시간, quantile, day7,8 을 구분하여 체크포인트 생성
for i in range(48):
train_sort = train_data[1095*(i):1095*(i+1)]
train_sort = np.array(train_sort)
y = train_sort[7:,-1] #(1088,)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(train_sort)
train_sort = scaler.transform(train_sort)
x = split_x(train_sort, 7)
x = x[:-2,:] #(1087,7,7)
y1 = y[:-1] #(1087,)
y2 = y[1:] #(1087,)
from sklearn.model_selection import train_test_split
x_train, x_val, y1_train, y1_val, y2_train, y2_val = train_test_split(x, y1, y2, train_size=0.8, shuffle=True, random_state=32)
epochs = 1000
bs = 32
only_compile(0, x_train, y1_train, x_val, y1_val)
only_compile(1, x_train, y2_train, x_val, y2_val)
| [
"[email protected]"
] | |
f1c1d1272813db29b692fe04bc813b6a679526fc | 34599596e145555fde0d4264a1d222f951f49051 | /pcat2py/class/20dbcc2a-5cc5-11e4-af55-00155d01fe08.py | b39c4aee05264d664cba5c47aa38bafddd842eb2 | [
"MIT"
] | permissive | phnomcobra/PCAT2PY | dc2fcbee142ce442e53da08476bfe4e68619346d | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | refs/heads/master | 2021-01-11T02:23:30.669168 | 2018-02-13T17:04:03 | 2018-02-13T17:04:03 | 70,970,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 961 | py | #!/usr/bin/python
################################################################################
# 20dbcc2a-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# [email protected]
# [email protected]
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20dbcc2a-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Auditpol Value
enabled = cli.get_auditpol(r'Special Logon', 'Success')
# Output Lines
self.output = [r'Special Logon', ('Success=' + str(enabled))]
if enabled:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.set_auditpol(r'Special Logon', 'Success', True)
| [
"[email protected]"
] | |
390ee336f83088e3f9b8609b7c854dfa3f4ea232 | 2e5e990955957cf04367ef6eedd62e6add7ccdc7 | /oms_cms/backend/api/v2/social_networks/serializers.py | 24a77bc22571a871c6dfb51890fd85f061a40858 | [
"BSD-3-Clause"
] | permissive | RomanYarovoi/oms_cms | 3dfcd19ff03b351dc754f73f4a0d8a9986cf28ec | 49c6789242d7a35e81f4f208c04b18fb79249be7 | refs/heads/master | 2021-07-06T18:49:51.021820 | 2020-10-15T05:52:55 | 2020-10-15T05:52:55 | 196,556,814 | 0 | 0 | BSD-3-Clause | 2020-10-15T05:52:57 | 2019-07-12T10:07:29 | JavaScript | UTF-8 | Python | false | false | 312 | py | from rest_framework import serializers
from oms_cms.backend.social_networks.models import SocialNetworks
class SocialNetworksSerializer(serializers.ModelSerializer):
"""Сериализация социальных сетей"""
class Meta:
model = SocialNetworks
fields = '__all__'
| [
"[email protected]"
] | |
2cac3d08334c146dd3333f471c8ee1fa6546c71d | bc9c1a4da0d5bbf8d4721ee7ca5163f488e88a57 | /research/urls.py | fe0aeb667e57278015b49196ad14403f92bec46d | [] | no_license | mit-teaching-systems-lab/newelk | 77f43666f3c70be4c31fdfc6d4a6e9c629c71656 | a2e6665bfcf9e2ea12fde45319027ee4a848f93c | refs/heads/master | 2022-12-13T20:50:17.632513 | 2019-10-03T19:02:01 | 2019-10-03T19:02:01 | 132,154,880 | 0 | 4 | null | 2022-12-08T01:26:56 | 2018-05-04T15:04:20 | Python | UTF-8 | Python | false | false | 222 | py | from django.urls import path
from . import views
urlpatterns = [
# path('chatlogs/', views.streaming_chat_csv),
# path('answerlogs/', views.streaming_answers_view),
path("feedback/", views.toggle_feedback)
]
| [
"[email protected]"
] | |
068c3a2719668d0fbb119a48641c6c1176aefbd9 | 7b4cbaa1e7bab897e34acba06f73ac17760d394a | /sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_synchronization_status.py | 222415631e7bf3e8af75e91992c1a625ab626c3d | [
"Apache-2.0"
] | permissive | nHurD/argo | 0fab7f56179c848ad8a77a9f8981cb62b4a71d09 | f4a65b11a184f7429d0615a6fa65bc2cea4cc425 | refs/heads/master | 2023-01-13T04:39:54.793473 | 2022-12-18T04:48:37 | 2022-12-18T04:48:37 | 227,931,854 | 0 | 2 | Apache-2.0 | 2019-12-13T22:24:19 | 2019-12-13T22:24:18 | null | UTF-8 | Python | false | false | 12,163 | py | """
Argo Workflows API
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argoproj.github.io/argo-workflows/ # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.io_argoproj_workflow_v1alpha1_mutex_status import IoArgoprojWorkflowV1alpha1MutexStatus
from argo_workflows.model.io_argoproj_workflow_v1alpha1_semaphore_status import IoArgoprojWorkflowV1alpha1SemaphoreStatus
globals()['IoArgoprojWorkflowV1alpha1MutexStatus'] = IoArgoprojWorkflowV1alpha1MutexStatus
globals()['IoArgoprojWorkflowV1alpha1SemaphoreStatus'] = IoArgoprojWorkflowV1alpha1SemaphoreStatus
class IoArgoprojWorkflowV1alpha1SynchronizationStatus(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'mutex': (IoArgoprojWorkflowV1alpha1MutexStatus,), # noqa: E501
'semaphore': (IoArgoprojWorkflowV1alpha1SemaphoreStatus,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'mutex': 'mutex', # noqa: E501
'semaphore': 'semaphore', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""IoArgoprojWorkflowV1alpha1SynchronizationStatus - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
mutex (IoArgoprojWorkflowV1alpha1MutexStatus): [optional] # noqa: E501
semaphore (IoArgoprojWorkflowV1alpha1SemaphoreStatus): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""IoArgoprojWorkflowV1alpha1SynchronizationStatus - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
mutex (IoArgoprojWorkflowV1alpha1MutexStatus): [optional] # noqa: E501
semaphore (IoArgoprojWorkflowV1alpha1SemaphoreStatus): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"[email protected]"
] | |
64ad76f77783d4b8a4cb1b9d87b673ea62470bf1 | f566dfc5ce189d30696b9bf8b7e8bf9b1ef45614 | /Example/DQN_SimpleMaze/DoubleDQN_SimpleMazeTwoD.py | a8615b896bcd6023b12a714b7533a963e26b7691 | [] | no_license | yangyutu/DeepReinforcementLearning-PyTorch | 3dac4ad67fa3a6301d65ca5c63532f2a278e21d7 | 7af59cb883e24429d42a228584cfc96c42f6d35b | refs/heads/master | 2022-08-16T13:46:30.748383 | 2022-07-30T05:47:47 | 2022-07-30T05:47:47 | 169,829,723 | 12 | 6 | null | null | null | null | UTF-8 | Python | false | false | 2,382 | py |
from Agents.DQN.DQN import DQNAgent
from Agents.Core.MLPNet import MultiLayerNetRegression
import json
from torch import optim
from copy import deepcopy
from Env.CustomEnv.SimpleMazeTwoD import SimpleMazeTwoD
import numpy as np
import matplotlib.pyplot as plt
import torch
torch.manual_seed(1)
def plotPolicy(policy, nbActions):
idx, idy = np.where(policy >=0)
action = policy[idx,idy]
plt.scatter(idx, idy, c = action, marker='s', s = 10)
# for i in range(nbActions):
# idx, idy = np.where(policy == i)
# plt.plot(idx,idy, )
# first construct the neutral network
config = dict()
mapName = 'map.txt'
config['trainStep'] = 1000
config['epsThreshold'] = 0.1
config['targetNetUpdateStep'] = 100
config['memoryCapacity'] = 2000
config['trainBatchSize'] = 32
config['gamma'] = 0.9
config['learningRate'] = 0.003
config['netGradClip'] = 1
config['logFlag'] = True
config['logFileName'] = 'SimpleMazeLog/DoubleQtraj' + mapName
config['logFrequency'] = 50
config['netUpdateOption'] = 'doubleQ'
env = SimpleMazeTwoD(mapName)
N_S = env.stateDim
N_A = env.nbActions
netParameter = dict()
netParameter['n_feature'] = N_S
netParameter['n_hidden'] = [100]
netParameter['n_output'] = N_A
policyNet = MultiLayerNetRegression(netParameter['n_feature'],
netParameter['n_hidden'],
netParameter['n_output'])
print(policyNet.state_dict())
targetNet = deepcopy(policyNet)
optimizer = optim.Adam(policyNet.parameters(), lr=config['learningRate'])
agent = DQNAgent(policyNet, targetNet, env, optimizer, torch.nn.MSELoss() ,N_S, N_A, config=config)
policy = deepcopy(env.map)
for i in range(policy.shape[0]):
for j in range(policy.shape[1]):
if env.map[i, j] == 0:
policy[i, j] = -1
else:
policy[i, j] = agent.getPolicy(np.array([i, j]))
np.savetxt('DoubleQSimpleMazePolicyBeforeTrain' + mapName + '.txt', policy, fmt='%d', delimiter='\t')
plotPolicy(policy, N_A)
agent.train()
policy = deepcopy(env.map)
for i in range(policy.shape[0]):
for j in range(policy.shape[1]):
if env.map[i, j] == 0:
policy[i, j] = -1
else:
policy[i, j] = agent.getPolicy(np.array([i, j]))
np.savetxt('DoubleQSimpleMazePolicyAfterTrain' + mapName +'.txt', policy, fmt='%d', delimiter='\t')
plotPolicy(policy, N_A) | [
"[email protected]"
] | |
5a8b3968a4cc55cdc7a8bc045270be33a8d29f1b | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/AlphaPowerSystem-MIB.py | c83f2dc059508d3a6ad59c5621b516f5335d4221 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 87,886 | py | #
# PySNMP MIB module AlphaPowerSystem-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/AlphaPowerSystem-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:33:13 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, MibIdentifier, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ObjectIdentity, Unsigned32, enterprises, ModuleIdentity, Counter32, Counter64, IpAddress, TimeTicks, Integer32, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "MibIdentifier", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ObjectIdentity", "Unsigned32", "enterprises", "ModuleIdentity", "Counter32", "Counter64", "IpAddress", "TimeTicks", "Integer32", "iso")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
alpha = ModuleIdentity((1, 3, 6, 1, 4, 1, 7309))
if mibBuilder.loadTexts: alpha.setLastUpdated('201102220000Z')
if mibBuilder.loadTexts: alpha.setOrganization('Alpha Technologies')
if mibBuilder.loadTexts: alpha.setContactInfo('Alpha Technologies 7700 Riverfront Gate Burnaby, BC V5J 5M4 Canada Tel: 1-604-436-5900 Fax: 1-604-436-1233')
if mibBuilder.loadTexts: alpha.setDescription('This MIB defines the information block(s) available in system controllers as defined by the following list: - dcPwrSysDevice: Cordex series of Controllers')
dcpower = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4))
dcPwrSysDevice = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1))
dcPwrSysVariable = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 1))
dcPwrSysString = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 2))
dcPwrSysTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 3))
dcPwrSysOutputsTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4))
dcPwrSysRelayTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 1))
dcPwrSysAnalogOpTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 2))
dcPwrSysAlrmsTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5))
dcPwrSysRectAlrmTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 1))
dcPwrSysDigAlrmTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 2))
dcPwrSysCurrAlrmTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 3))
dcPwrSysVoltAlrmTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 4))
dcPwrSysBattAlrmTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 5))
dcPwrSysTempAlrmTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 6))
dcPwrSysCustomAlrmTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 7))
dcPwrSysMiscAlrmTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 8))
dcPwrSysCtrlAlrmTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 9))
dcPwrSysAdioAlrmTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 10))
dcPwrSysConvAlrmTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 11))
dcPwrSysInvAlrmTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 12))
dcPwrSysInputsTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6))
dcPwrSysDigIpTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 1))
dcPwrSysCntrlrIpTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 2))
dcPwrSysRectIpTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 3))
dcPwrSysCustomIpTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 4))
dcPwrSysConvIpTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 5))
dcPwrSysTimerIpTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 6))
dcPwrSysCounterIpTbl = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 7))
dcPwrExternalControls = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 8))
dcPwrVarbindNameReference = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 9))
dcPwrSysChargeVolts = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysChargeVolts.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysChargeVolts.setDescription('This value indicates the present battery voltage. The integer value represent a two digit fix decimal (Value = real voltage * 100) in Volts.')
dcPwrSysDischargeVolts = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysDischargeVolts.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDischargeVolts.setDescription('This value indicates the present load voltage. The integer value represent a two digit fix decimal (Value = real voltage * 100) in Volts.')
dcPwrSysChargeAmps = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysChargeAmps.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysChargeAmps.setDescription('This value indicates the present battery currrent. The integer value represent a two digit fix decimal (Value = real current * 100) in Amps.')
dcPwrSysDischargeAmps = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysDischargeAmps.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDischargeAmps.setDescription('This value indicates the present load current. The integer value represent a two digit fix decimal (Value = real current * 100) in Amps.')
dcPwrSysMajorAlarm = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysMajorAlarm.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMajorAlarm.setDescription('Major Alarm')
dcPwrSysMinorAlarm = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysMinorAlarm.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMinorAlarm.setDescription('Minor Alarm')
dcPwrSysSiteName = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 2, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysSiteName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysSiteName.setDescription('Site Name')
dcPwrSysSiteCity = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 2, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysSiteCity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysSiteCity.setDescription('Site City')
dcPwrSysSiteRegion = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 2, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysSiteRegion.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysSiteRegion.setDescription('Site Region')
dcPwrSysSiteCountry = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 2, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysSiteCountry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysSiteCountry.setDescription('Site Country')
dcPwrSysContactName = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 2, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysContactName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysContactName.setDescription('Contact Name')
dcPwrSysPhoneNumber = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 2, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysPhoneNumber.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysPhoneNumber.setDescription('Phone Number')
dcPwrSysSiteNumber = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 2, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysSiteNumber.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysSiteNumber.setDescription('Site Number')
dcPwrSysSystemType = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 2, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysSystemType.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysSystemType.setDescription('The type of system being monitored by the agent.')
dcPwrSysSystemSerial = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 2, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysSystemSerial.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysSystemSerial.setDescription('The serial number of the monitored system.')
dcPwrSysSystemNumber = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 2, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysSystemNumber.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysSystemNumber.setDescription('The number of the monitored system.')
dcPwrSysSoftwareVersion = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 2, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysSoftwareVersion.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysSoftwareVersion.setDescription('The version of software running on the monitored system.')
dcPwrSysSoftwareTimestamp = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 2, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysSoftwareTimestamp.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysSoftwareTimestamp.setDescription('The time stamp of the software running on the monitored system.')
dcPwrSysRelayCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRelayCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRelayCount.setDescription('Number of relay variables in system controller relay table.')
dcPwrSysRelayTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 1, 2), )
if mibBuilder.loadTexts: dcPwrSysRelayTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRelayTable.setDescription('A table of DC power system controller rectifier relay output variables.')
dcPwrSysRelayEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 1, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysRelayIndex"))
if mibBuilder.loadTexts: dcPwrSysRelayEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRelayEntry.setDescription('An entry into the DC power system controller relay output group.')
dcPwrSysRelayIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRelayIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRelayIndex.setDescription('The index of the relay variable in the power system controller relay output group.')
dcPwrSysRelayName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRelayName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRelayName.setDescription('The description of the relay variable as reported by the DC power system controller relay output group.')
dcPwrSysRelayIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRelayIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRelayIntegerValue.setDescription('The integer value of the relay variable as reported by the DC power system controller relay output group.')
dcPwrSysRelayStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 1, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRelayStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRelayStringValue.setDescription('The string value of the relay variable as reported by the DC power system controller relay output group.')
dcPwrSysRelaySeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRelaySeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRelaySeverity.setDescription('The integer value of relay severity level of the extra variable as reported by the DC power system controller relay output group.')
dcPwrSysAnalogOpCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysAnalogOpCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAnalogOpCount.setDescription('Number of analog output variables in system controller analog output table.')
dcPwrSysAnalogOpTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 2, 2), )
if mibBuilder.loadTexts: dcPwrSysAnalogOpTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAnalogOpTable.setDescription('A table of DC power system controller analog output variables.')
dcPwrSysAnalogOpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 2, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysAnalogOpIndex"))
if mibBuilder.loadTexts: dcPwrSysAnalogOpEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAnalogOpEntry.setDescription('An entry into the DC power system controller analog output group.')
dcPwrSysAnalogOpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysAnalogOpIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAnalogOpIndex.setDescription('The index of the analog variable in the power system controller analog output group.')
dcPwrSysAnalogOpName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 2, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysAnalogOpName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAnalogOpName.setDescription('The description of the analog variable as reported by the DC power system controller analog output group.')
dcPwrSysAnalogOpIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysAnalogOpIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAnalogOpIntegerValue.setDescription('The integer value of the analog variable as reported by the DC power system controller analog output group.')
dcPwrSysAnalogOpStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 2, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysAnalogOpStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAnalogOpStringValue.setDescription('The string value of the analog variable as reported by the DC power system controller analog output group.')
dcPwrSysAnalogOpSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 4, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysAnalogOpSeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAnalogOpSeverity.setDescription('The integer value of analog severity level of the extra variable as reported by the DC power system controller analog output group.')
dcPwrSysRectAlrmCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRectAlrmCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectAlrmCount.setDescription('Number of rectifier alarm variables in system controller alarm table.')
dcPwrSysRectAlrmTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 1, 2), )
if mibBuilder.loadTexts: dcPwrSysRectAlrmTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectAlrmTable.setDescription('A table of DC power system controller rectifier alarm variables.')
dcPwrSysRectAlrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 1, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysRectAlrmIndex"))
if mibBuilder.loadTexts: dcPwrSysRectAlrmEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectAlrmEntry.setDescription('An entry into the DC power system controller rectifier alarm group.')
dcPwrSysRectAlrmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRectAlrmIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectAlrmIndex.setDescription('The index of the alarm variable in the DC power system controller table rectifier alarm group.')
dcPwrSysRectAlrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRectAlrmName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectAlrmName.setDescription('The description of the alarm variable as reported by the DC power system controller rectifier alarm group.')
dcPwrSysRectAlrmIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRectAlrmIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectAlrmIntegerValue.setDescription('The integer value of the alarm variable as reported by the DC power system controller rectifier alarm group.')
dcPwrSysRectAlrmStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 1, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRectAlrmStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectAlrmStringValue.setDescription('The string value of the alarm variable as reported by the DC power system controller rectifier alarm group.')
dcPwrSysRectAlrmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRectAlrmSeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectAlrmSeverity.setDescription('The integer value of alarm severity level of the extra variable as reported by the DC power system controller rectifier alarm group.')
dcPwrSysDigAlrmCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysDigAlrmCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigAlrmCount.setDescription('Number of digital alarm variables in system controller alarm table.')
dcPwrSysDigAlrmTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 2, 2), )
if mibBuilder.loadTexts: dcPwrSysDigAlrmTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigAlrmTable.setDescription('A table of DC power system controller digital alarm variables.')
dcPwrSysDigAlrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 2, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysDigAlrmIndex"))
if mibBuilder.loadTexts: dcPwrSysDigAlrmEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigAlrmEntry.setDescription('An entry into the DC power system controller digital alarm group.')
dcPwrSysDigAlrmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysDigAlrmIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigAlrmIndex.setDescription('The index of the alarm variable in the DC power system controller table digital alarm group.')
dcPwrSysDigAlrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 2, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysDigAlrmName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigAlrmName.setDescription('The description of the alarm variable as reported by the DC power system controller digital alarm group.')
dcPwrSysDigAlrmIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysDigAlrmIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigAlrmIntegerValue.setDescription('The integer value of the alarm variable as reported by the DC power system controller digital alarm group.')
dcPwrSysDigAlrmStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 2, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysDigAlrmStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigAlrmStringValue.setDescription('The string value of the alarm variable as reported by the DC power system controller digital alarm group.')
dcPwrSysDigAlrmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysDigAlrmSeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigAlrmSeverity.setDescription('The integer value of alarm severity level of the extra variable as reported by the DC power system controller digital alarm group.')
dcPwrSysCurrAlrmCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 3, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCurrAlrmCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCurrAlrmCount.setDescription('Number of current alarm variables in system controller alarm table.')
dcPwrSysCurrAlrmTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 3, 2), )
if mibBuilder.loadTexts: dcPwrSysCurrAlrmTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCurrAlrmTable.setDescription('A table of DC power system controller current alarm variables.')
dcPwrSysCurrAlrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 3, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysCurrAlrmIndex"))
if mibBuilder.loadTexts: dcPwrSysCurrAlrmEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCurrAlrmEntry.setDescription('An entry into the DC power system controller current alarm group.')
dcPwrSysCurrAlrmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCurrAlrmIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCurrAlrmIndex.setDescription('The index of the alarm variable in the DC power system controller table current alarm group.')
dcPwrSysCurrAlrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 3, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCurrAlrmName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCurrAlrmName.setDescription('The description of the alarm variable as reported by the DC power system controller current alarm group.')
dcPwrSysCurrAlrmIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 3, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCurrAlrmIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCurrAlrmIntegerValue.setDescription('The integer value of the alarm variable as reported by the DC power system controller current alarm group.')
dcPwrSysCurrAlrmStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 3, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCurrAlrmStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCurrAlrmStringValue.setDescription('The string value of the alarm variable as reported by the DC power system controller current alarm group.')
dcPwrSysCurrAlrmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 3, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCurrAlrmSeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCurrAlrmSeverity.setDescription('The integer value of alarm severity level of the extra variable as reported by the DC power system controller current alarm group.')
dcPwrSysVoltAlrmCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 4, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysVoltAlrmCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysVoltAlrmCount.setDescription('Number of voltage alarm variables in system controller alarm table.')
dcPwrSysVoltAlrmTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 4, 2), )
if mibBuilder.loadTexts: dcPwrSysVoltAlrmTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysVoltAlrmTable.setDescription('A table of DC power system controller voltage alarm variables.')
dcPwrSysVoltAlrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 4, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysVoltAlrmIndex"))
if mibBuilder.loadTexts: dcPwrSysVoltAlrmEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysVoltAlrmEntry.setDescription('An entry into the DC power system controller voltage alarm group.')
dcPwrSysVoltAlrmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysVoltAlrmIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysVoltAlrmIndex.setDescription('The index of the alarm variable in the DC power system controller table voltage alarm group.')
dcPwrSysVoltAlrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 4, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysVoltAlrmName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysVoltAlrmName.setDescription('The description of the alarm variable as reported by the DC power system controller voltage alarm group.')
dcPwrSysVoltAlrmIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 4, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysVoltAlrmIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysVoltAlrmIntegerValue.setDescription('The integer value of the alarm variable as reported by the DC power system controller voltage alarm group.')
dcPwrSysVoltAlrmStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 4, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysVoltAlrmStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysVoltAlrmStringValue.setDescription('The string value of the alarm variable as reported by the DC power system controller voltage alarm group.')
dcPwrSysVoltAlrmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 4, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysVoltAlrmSeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysVoltAlrmSeverity.setDescription('The integer value of alarm severity level of the extra variable as reported by the DC power system controller voltage alarm group.')
dcPwrSysBattAlrmCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 5, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysBattAlrmCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysBattAlrmCount.setDescription('Number of battery alarm variables in system controller alarm table.')
dcPwrSysBattAlrmTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 5, 2), )
if mibBuilder.loadTexts: dcPwrSysBattAlrmTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysBattAlrmTable.setDescription('A table of DC power system controller battery alarm variables.')
dcPwrSysBattAlrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 5, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysBattAlrmIndex"))
if mibBuilder.loadTexts: dcPwrSysBattAlrmEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysBattAlrmEntry.setDescription('An entry into the DC power system controller battery alarm group.')
dcPwrSysBattAlrmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 5, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysBattAlrmIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysBattAlrmIndex.setDescription('The index of the alarm variable in the DC power system controller table battery alarm group.')
dcPwrSysBattAlrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 5, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysBattAlrmName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysBattAlrmName.setDescription('The description of the alarm variable as reported by the DC power system controller battery alarm group.')
dcPwrSysBattAlrmIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 5, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysBattAlrmIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysBattAlrmIntegerValue.setDescription('The integer value of the alarm variable as reported by the DC power system controller battery alarm group.')
dcPwrSysBattAlrmStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 5, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysBattAlrmStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysBattAlrmStringValue.setDescription('The string value of the alarm variable as reported by the DC power system controller battery alarm group.')
dcPwrSysBattAlrmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 5, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysBattAlrmSeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysBattAlrmSeverity.setDescription('The integer value of alarm severity level of the extra variable as reported by the DC power system controller battery alarm group.')
dcPwrSysTempAlrmCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 6, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysTempAlrmCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTempAlrmCount.setDescription('Number of temperature alarm variables in system controller alarm table.')
dcPwrSysTempAlrmTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 6, 2), )
if mibBuilder.loadTexts: dcPwrSysTempAlrmTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTempAlrmTable.setDescription('A table of DC power system controller temperature alarm variables.')
dcPwrSysTempAlrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 6, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysTempAlrmIndex"))
if mibBuilder.loadTexts: dcPwrSysTempAlrmEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTempAlrmEntry.setDescription('An entry into the DC power system controller temperature alarm group.')
dcPwrSysTempAlrmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 6, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysTempAlrmIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTempAlrmIndex.setDescription('The index of the alarm variable in the DC power system controller table temperature alarm group.')
dcPwrSysTempAlrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 6, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysTempAlrmName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTempAlrmName.setDescription('The description of the alarm variable as reported by the DC power system controller temperature alarm group.')
dcPwrSysTempAlrmIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 6, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysTempAlrmIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTempAlrmIntegerValue.setDescription('The integer value of the alarm variable as reported by the DC power system controller temperature alarm group.')
dcPwrSysTempAlrmStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 6, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysTempAlrmStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTempAlrmStringValue.setDescription('The string value of the alarm variable as reported by the DC power system controller temperature alarm group.')
dcPwrSysTempAlrmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 6, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysTempAlrmSeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTempAlrmSeverity.setDescription('The integer value of alarm severity level of the extra variable as reported by the DC power system controller temperature alarm group.')
dcPwrSysCustomAlrmCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 7, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCustomAlrmCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomAlrmCount.setDescription('Number of custom alarm variables in system controller alarm table.')
dcPwrSysCustomAlrmTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 7, 2), )
if mibBuilder.loadTexts: dcPwrSysCustomAlrmTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomAlrmTable.setDescription('A table of DC power system controller custom alarm variables.')
dcPwrSysCustomAlrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 7, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysCustomAlrmIndex"))
if mibBuilder.loadTexts: dcPwrSysCustomAlrmEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomAlrmEntry.setDescription('An entry into the DC power system controller custom alarm group.')
dcPwrSysCustomAlrmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 7, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCustomAlrmIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomAlrmIndex.setDescription('The index of the alarm variable in the DC power system controller table custom alarm group.')
dcPwrSysCustomAlrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 7, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCustomAlrmName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomAlrmName.setDescription('The description of the alarm variable as reported by the DC power system controller custom alarm group.')
dcPwrSysCustomAlrmIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 7, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCustomAlrmIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomAlrmIntegerValue.setDescription('The integer value of the alarm variable as reported by the DC power system controller custom alarm group.')
dcPwrSysCustomAlrmStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 7, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCustomAlrmStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomAlrmStringValue.setDescription('The string value of the alarm variable as reported by the DC power system controller custom alarm group.')
dcPwrSysCustomAlrmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 7, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCustomAlrmSeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomAlrmSeverity.setDescription('The integer value of alarm severity level of the extra variable as reported by the DC power system controller custom alarm group.')
dcPwrSysMiscAlrmCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 8, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysMiscAlrmCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMiscAlrmCount.setDescription('Number of misc alarm variables in system controller alarm table.')
dcPwrSysMiscAlrmTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 8, 2), )
if mibBuilder.loadTexts: dcPwrSysMiscAlrmTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMiscAlrmTable.setDescription('A table of DC power system controller misc alarm variables.')
dcPwrSysMiscAlrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 8, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysMiscAlrmIndex"))
if mibBuilder.loadTexts: dcPwrSysMiscAlrmEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMiscAlrmEntry.setDescription('An entry into the DC power system controller misc alarm group.')
dcPwrSysMiscAlrmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 8, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysMiscAlrmIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMiscAlrmIndex.setDescription('The index of the alarm variable in the DC power system controller table misc alarm group.')
dcPwrSysMiscAlrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 8, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysMiscAlrmName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMiscAlrmName.setDescription('The description of the alarm variable as reported by the DC power system controller misc alarm group.')
dcPwrSysMiscAlrmIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 8, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysMiscAlrmIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMiscAlrmIntegerValue.setDescription('The integer value of the alarm variable as reported by the DC power system controller misc alarm group.')
dcPwrSysMiscAlrmStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 8, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysMiscAlrmStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMiscAlrmStringValue.setDescription('The string value of the alarm variable as reported by the DC power system controller misc alarm group.')
dcPwrSysMiscAlrmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 8, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysMiscAlrmSeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMiscAlrmSeverity.setDescription('The integer value of alarm severity level of the extra variable as reported by the DC power system controller misc alarm group.')
dcPwrSysCtrlAlrmCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 9, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmCount.setDescription('The number of control alarm variables.')
dcPwrSysCtrlAlrmTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 9, 2), )
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmTable.setDescription('A table of control alarm variables.')
dcPwrSysCtrlAlrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 9, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysCtrlAlrmIndex"))
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmEntry.setDescription('An entry of the control alarm group')
dcPwrSysCtrlAlrmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 9, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmIndex.setDescription('The index of the alarm variable in the control alarm group.')
dcPwrSysCtrlAlrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 9, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmName.setDescription('The description of the alarm variable as reported by the control alarm group.')
dcPwrSysCtrlAlrmIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 9, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmIntegerValue.setDescription('The integer value of the alarm variable as reported by the control alarm group.')
dcPwrSysCtrlAlrmStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 9, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmStringValue.setDescription('The string value of the alarm variable as reported by the control alarm group.')
dcPwrSysCtrlAlrmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 9, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmSeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCtrlAlrmSeverity.setDescription('The integer value of alarm severity level of the extra variable as reported by the control alarm group.')
dcPwrSysAdioAlrmCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 10, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysAdioAlrmCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAdioAlrmCount.setDescription('Number of control alarm variables in Adio alarm table.')
dcPwrSysAdioAlrmTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 10, 2), )
if mibBuilder.loadTexts: dcPwrSysAdioAlrmTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAdioAlrmTable.setDescription('A table of Adio alarm variables.')
dcPwrSysAdioAlrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 10, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysAdioAlrmIndex"))
if mibBuilder.loadTexts: dcPwrSysAdioAlrmEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAdioAlrmEntry.setDescription('An entry into the Adio alarm group.')
dcPwrSysAdioAlrmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 10, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysAdioAlrmIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAdioAlrmIndex.setDescription('The index of the alarm variable in the table Adio alarm group.')
dcPwrSysAdioAlrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 10, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysAdioAlrmName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAdioAlrmName.setDescription('The description of the alarm variable as reported by the Adio alarm group.')
dcPwrSysAdioAlrmIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 10, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysAdioAlrmIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAdioAlrmIntegerValue.setDescription('The integer value of the alarm variable as reported by the Adio alarm group.')
dcPwrSysAdioAlrmStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 10, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysAdioAlrmStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAdioAlrmStringValue.setDescription('The string value of the alarm variable as reported by the Adio alarm group.')
dcPwrSysAdioAlrmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 10, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysAdioAlrmSeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAdioAlrmSeverity.setDescription('The integer value of alarm severity level of the extra variable as reported by the control alarm group.')
dcPwrSysConvAlrmCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 11, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysConvAlrmCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvAlrmCount.setDescription('Number of Converter alarm variables in system controller alarm table.')
dcPwrSysConvAlrmTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 11, 2), )
if mibBuilder.loadTexts: dcPwrSysConvAlrmTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvAlrmTable.setDescription('A table of Converter alarm variables.')
dcPwrSysConvAlrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 11, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysConvAlrmIndex"))
if mibBuilder.loadTexts: dcPwrSysConvAlrmEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvAlrmEntry.setDescription('An entry into the Converter alarm group.')
dcPwrSysConvAlrmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 11, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysConvAlrmIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvAlrmIndex.setDescription('The index of the alarm variable in the DC power system controller table Converter alarm group.')
dcPwrSysConvAlrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 11, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysConvAlrmName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvAlrmName.setDescription('The description of the alarm variable as reported by the Converter alarm group.')
dcPwrSysConvAlrmIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 11, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysConvAlrmIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvAlrmIntegerValue.setDescription('The integer value of the alarm variable as reported by the Converter alarm group.')
dcPwrSysConvAlrmStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 11, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysConvAlrmStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvAlrmStringValue.setDescription('The string value of the alarm variable as reported by the Converter alarm group.')
dcPwrSysConvAlrmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 11, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysConvAlrmSeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvAlrmSeverity.setDescription('The integer value of alarm severity level of the extra variable as reported by the Converter alarm group.')
dcPwrSysInvAlrmCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 12, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysInvAlrmCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysInvAlrmCount.setDescription('Number of alarm variables in system controller alarm table')
dcPwrSysInvAlrmTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 12, 2), )
if mibBuilder.loadTexts: dcPwrSysInvAlrmTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysInvAlrmTable.setDescription('A table of power system controller Inv alarm variables')
dcPwrSysInvAlrmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 12, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysInvAlrmIndex"))
if mibBuilder.loadTexts: dcPwrSysInvAlrmEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysInvAlrmEntry.setDescription('An entry into the power system controller Inv alarm group')
dcPwrSysInvAlrmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 12, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysInvAlrmIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysInvAlrmIndex.setDescription('')
dcPwrSysInvAlrmName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 12, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysInvAlrmName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysInvAlrmName.setDescription('')
dcPwrSysInvAlrmIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 12, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysInvAlrmIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysInvAlrmIntegerValue.setDescription('')
dcPwrSysInvAlrmStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 12, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysInvAlrmStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysInvAlrmStringValue.setDescription('')
dcPwrSysInvAlrmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 5, 12, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysInvAlrmSeverity.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysInvAlrmSeverity.setDescription('')
dcPwrSysDigIpCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysDigIpCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigIpCount.setDescription('Number of digital input variables in system controller digital input table.')
dcPwrSysDigIpTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 1, 2), )
if mibBuilder.loadTexts: dcPwrSysDigIpTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigIpTable.setDescription('A table of digital input variables.')
dcPwrSysDigIpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 1, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysDigIpIndex"))
if mibBuilder.loadTexts: dcPwrSysDigIpEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigIpEntry.setDescription('An entry into the digital input group.')
dcPwrSysDigIpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysDigIpIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigIpIndex.setDescription('The index of the digital input variable in the table digital input group.')
dcPwrSysDigIpName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysDigIpName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigIpName.setDescription('The description of the digital input variable as reported by the digital input group.')
dcPwrSysDigIpIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysDigIpIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigIpIntegerValue.setDescription('The integer value of the digital input variable as reported by the digital input group.')
dcPwrSysDigIpStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 1, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysDigIpStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysDigIpStringValue.setDescription('The string value of the digital input variable as reported by the digital input group.')
dcPwrSysCntrlrIpCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCntrlrIpCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCntrlrIpCount.setDescription('Number of controller input variables in system controller controller input table.')
dcPwrSysCntrlrIpTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 2, 2), )
if mibBuilder.loadTexts: dcPwrSysCntrlrIpTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCntrlrIpTable.setDescription('A table of controller input variables.')
dcPwrSysCntrlrIpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 2, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysCntrlrIpIndex"))
if mibBuilder.loadTexts: dcPwrSysCntrlrIpEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCntrlrIpEntry.setDescription('An entry into the controller input group.')
dcPwrSysCntrlrIpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCntrlrIpIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCntrlrIpIndex.setDescription('The index of the controller input variable in the table controller input group.')
dcPwrSysCntrlrIpName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 2, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCntrlrIpName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCntrlrIpName.setDescription('The description of the controller input variable as reported by the controller input group.')
dcPwrSysCntrlrIpIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCntrlrIpIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCntrlrIpIntegerValue.setDescription('The integer value of the controller input variable as reported by the controller input group.')
dcPwrSysCntrlrIpStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 2, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCntrlrIpStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCntrlrIpStringValue.setDescription('The string value of the controller input variable as reported by the controller input group.')
dcPwrSysRectIpCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 3, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRectIpCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectIpCount.setDescription('Number of rectifier input variables in system controller rectifier input table.')
dcPwrSysRectIpTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 3, 2), )
if mibBuilder.loadTexts: dcPwrSysRectIpTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectIpTable.setDescription('A table of rectifier input variables.')
dcPwrSysRectIpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 3, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysRectIpIndex"))
if mibBuilder.loadTexts: dcPwrSysRectIpEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectIpEntry.setDescription('An entry into the rectifier input group.')
dcPwrSysRectIpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRectIpIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectIpIndex.setDescription('The index of the rectifier input variable in the table rectifier input group.')
dcPwrSysRectIpName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 3, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRectIpName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectIpName.setDescription('The description of the rectifier input variable as reported by the rectifier input group.')
dcPwrSysRectIpIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 3, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRectIpIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectIpIntegerValue.setDescription('The integer value of the rectifier input variable as reported by the rectifier input group.')
dcPwrSysRectIpStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 3, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysRectIpStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRectIpStringValue.setDescription('The string value of the rectifier input variable as reported by the rectifier input group.')
dcPwrSysCustomIpCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 4, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCustomIpCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomIpCount.setDescription('Number of custom input variables in system controller custom input table.')
dcPwrSysCustomIpTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 4, 2), )
if mibBuilder.loadTexts: dcPwrSysCustomIpTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomIpTable.setDescription('A table of digital custom variables.')
dcPwrSysCustomIpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 4, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysCustomIpIndex"))
if mibBuilder.loadTexts: dcPwrSysCustomIpEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomIpEntry.setDescription('An entry into the custom input group.')
dcPwrSysCustomIpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCustomIpIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomIpIndex.setDescription('The index of the custom input variable in the table custom input group.')
dcPwrSysCustomIpName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 4, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCustomIpName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomIpName.setDescription('The description of the custom input variable as reported by the custom input group.')
dcPwrSysgCustomIpIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 4, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dcPwrSysgCustomIpIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysgCustomIpIntegerValue.setDescription('The integer value of the custom input variable as reported by the custom input group.')
dcPwrSysCustomIpStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 4, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCustomIpStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCustomIpStringValue.setDescription('The string value of the custom input variable as reported by the custom input group.')
dcPwrSysConvIpCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 5, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysConvIpCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvIpCount.setDescription('Number of Converter input variables in system controller Converter input table.')
dcPwrSysConvIpTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 5, 2), )
if mibBuilder.loadTexts: dcPwrSysConvIpTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvIpTable.setDescription('A table of Converter input variables.')
dcPwrSysConvIpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 5, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysConvIpIndex"))
if mibBuilder.loadTexts: dcPwrSysConvIpEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvIpEntry.setDescription('An entry into the Converter input group.')
dcPwrSysConvIpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 5, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysConvIpIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvIpIndex.setDescription('The index of the Converter input variable in the table Converter input group.')
dcPwrSysConvIpName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 5, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysConvIpName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvIpName.setDescription('The description of the Converter input variable as reported by the Converter input group.')
dcPwrSysConvIpIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 5, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysConvIpIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvIpIntegerValue.setDescription('The integer value of the Converter input variable as reported by the Converter input group.')
dcPwrSysConvIpStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 5, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysConvIpStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysConvIpStringValue.setDescription('The string value of the Converter input variable as reported by the Converter input group.')
dcPwrSysTimerIpCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 6, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysTimerIpCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTimerIpCount.setDescription('Number of Timer input variables in system controller Timer input table.')
dcPwrSysTimerIpTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 6, 2), )
if mibBuilder.loadTexts: dcPwrSysTimerIpTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTimerIpTable.setDescription('A table of Timer input variables')
dcPwrSysTimerIpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 6, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysTimerIpIndex"))
if mibBuilder.loadTexts: dcPwrSysTimerIpEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTimerIpEntry.setDescription('An entry into the Timer input group')
dcPwrSysTimerIpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 6, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysTimerIpIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTimerIpIndex.setDescription('The index of the Timer input variable in the table Timer input group.')
dcPwrSysTimerIpName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 6, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysTimerIpName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTimerIpName.setDescription('The description of the Timer input variable as reported by the Timer input group.')
dcPwrSysTimerIpIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 6, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysTimerIpIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTimerIpIntegerValue.setDescription('The integer value of the Timer input variable as reported by the Timer input group.')
dcPwrSysTimerIpStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 6, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysTimerIpStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTimerIpStringValue.setDescription('The string value of the Timer input variable as reported by the Timer input group.')
dcPwrSysCounterIpCount = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 7, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCounterIpCount.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCounterIpCount.setDescription('Number of Counter input variables in system controller Counter input table.')
dcPwrSysCounterIpTable = MibTable((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 7, 2), )
if mibBuilder.loadTexts: dcPwrSysCounterIpTable.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCounterIpTable.setDescription('A table of Counter input variables.')
dcPwrSysCounterIpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 7, 2, 1), ).setIndexNames((0, "AlphaPowerSystem-MIB", "dcPwrSysCounterIpIndex"))
if mibBuilder.loadTexts: dcPwrSysCounterIpEntry.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCounterIpEntry.setDescription('An entry into the Counter input group.')
dcPwrSysCounterIpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 7, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCounterIpIndex.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCounterIpIndex.setDescription('The index of the Counter input variable in the table Counter input group.')
dcPwrSysCounterIpName = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 7, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCounterIpName.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCounterIpName.setDescription('The description of the Counter input variable as reported by the Counter input group.')
dcPwrSysCounterIpIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 7, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1000000000, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCounterIpIntegerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCounterIpIntegerValue.setDescription('The integer value of the Counter input variable as reported by the Counter input group.')
dcPwrSysCounterIpStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 7309, 4, 1, 6, 7, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysCounterIpStringValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysCounterIpStringValue.setDescription('The string value of the Counter input variable as reported by the Counter input group.')
dcPwrSysTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 7309, 4, 1, 3, 0))
dcPwrSysAlarmActiveTrap = NotificationType((1, 3, 6, 1, 4, 1, 7309, 4, 1, 3, 0, 1)).setObjects(("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmStringValue"), ("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmIndex"), ("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmSeverity"), ("AlphaPowerSystem-MIB", "dcPwrSysSiteName"), ("AlphaPowerSystem-MIB", "dcPwrSysTimeStamp"), ("AlphaPowerSystem-MIB", "dcPwrSysAlarmTriggerValue"))
if mibBuilder.loadTexts: dcPwrSysAlarmActiveTrap.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAlarmActiveTrap.setDescription('A trap issued when one of the alarms on the became active.')
dcPwrSysAlarmClearedTrap = NotificationType((1, 3, 6, 1, 4, 1, 7309, 4, 1, 3, 0, 2)).setObjects(("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmStringValue"), ("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmIndex"), ("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmSeverity"), ("AlphaPowerSystem-MIB", "dcPwrSysSiteName"), ("AlphaPowerSystem-MIB", "dcPwrSysAlarmTriggerValue"))
if mibBuilder.loadTexts: dcPwrSysAlarmClearedTrap.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAlarmClearedTrap.setDescription('A trap issued when one of the active alarms on the is cleared.')
dcPwrSysRelayTrap = NotificationType((1, 3, 6, 1, 4, 1, 7309, 4, 1, 3, 0, 3)).setObjects(("AlphaPowerSystem-MIB", "dcPwrSysRelayIntegerValue"), ("AlphaPowerSystem-MIB", "dcPwrSysRelayStringValue"), ("AlphaPowerSystem-MIB", "dcPwrSysRelayIndex"), ("AlphaPowerSystem-MIB", "dcPwrSysRelaySeverity"), ("AlphaPowerSystem-MIB", "dcPwrSysSiteName"))
if mibBuilder.loadTexts: dcPwrSysRelayTrap.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysRelayTrap.setDescription('A trap issued from a change in state in one of the relays on the DC power system controller.')
dcPwrSysComOKTrap = NotificationType((1, 3, 6, 1, 4, 1, 7309, 4, 1, 3, 0, 4)).setObjects(("AlphaPowerSystem-MIB", "dcPwrSysSiteName"))
if mibBuilder.loadTexts: dcPwrSysComOKTrap.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysComOKTrap.setDescription('A trap to indicate that communications with a DC power system controller has been established.')
dcPwrSysComErrTrap = NotificationType((1, 3, 6, 1, 4, 1, 7309, 4, 1, 3, 0, 5)).setObjects(("AlphaPowerSystem-MIB", "dcPwrSysSiteName"))
if mibBuilder.loadTexts: dcPwrSysComErrTrap.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysComErrTrap.setDescription('A trap to indicate that communications with a DC power system controller has been lost.')
dcPwrSysAgentStartupTrap = NotificationType((1, 3, 6, 1, 4, 1, 7309, 4, 1, 3, 0, 6)).setObjects(("AlphaPowerSystem-MIB", "dcPwrSysSiteName"))
if mibBuilder.loadTexts: dcPwrSysAgentStartupTrap.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAgentStartupTrap.setDescription('A trap to indicate that the agent software has started up.')
dcPwrSysAgentShutdownTrap = NotificationType((1, 3, 6, 1, 4, 1, 7309, 4, 1, 3, 0, 7)).setObjects(("AlphaPowerSystem-MIB", "dcPwrSysSiteName"))
if mibBuilder.loadTexts: dcPwrSysAgentShutdownTrap.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAgentShutdownTrap.setDescription('A trap to indicate that the agent software has shutdown.')
dcPwrSysMajorAlarmActiveTrap = NotificationType((1, 3, 6, 1, 4, 1, 7309, 4, 1, 3, 0, 8)).setObjects(("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmStringValue"), ("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmIndex"), ("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmSeverity"), ("AlphaPowerSystem-MIB", "dcPwrSysSiteName"))
if mibBuilder.loadTexts: dcPwrSysMajorAlarmActiveTrap.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMajorAlarmActiveTrap.setDescription('A trap issued as a summary of DC power system status. It is sent when the system goes into in Major Alarm')
dcPwrSysMajorAlarmClearedTrap = NotificationType((1, 3, 6, 1, 4, 1, 7309, 4, 1, 3, 0, 9)).setObjects(("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmStringValue"), ("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmIndex"), ("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmSeverity"), ("AlphaPowerSystem-MIB", "dcPwrSysSiteName"))
if mibBuilder.loadTexts: dcPwrSysMajorAlarmClearedTrap.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMajorAlarmClearedTrap.setDescription('A trap issued as a summary of DC power system status. It is sent when the system comes out of Major Alarm')
dcPwrSysMinorAlarmActiveTrap = NotificationType((1, 3, 6, 1, 4, 1, 7309, 4, 1, 3, 0, 10)).setObjects(("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmStringValue"), ("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmIndex"), ("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmSeverity"), ("AlphaPowerSystem-MIB", "dcPwrSysSiteName"))
if mibBuilder.loadTexts: dcPwrSysMinorAlarmActiveTrap.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMinorAlarmActiveTrap.setDescription('A trap issued as a summary of DC power system status. It is sent when the system goes into in Minor Alarm')
dcPwrSysMinorAlarmClearedTrap = NotificationType((1, 3, 6, 1, 4, 1, 7309, 4, 1, 3, 0, 11)).setObjects(("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmStringValue"), ("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmIndex"), ("AlphaPowerSystem-MIB", "dcPwrSysRectAlrmSeverity"), ("AlphaPowerSystem-MIB", "dcPwrSysSiteName"))
if mibBuilder.loadTexts: dcPwrSysMinorAlarmClearedTrap.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysMinorAlarmClearedTrap.setDescription('A trap issued as a summary of DC power system status. It is sent when the system comes out of Minor Alarm')
dcPwrSysResyncAlarms = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 8, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dcPwrSysResyncAlarms.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysResyncAlarms.setDescription('Send/Resend all active alarms that were previously sent through SNMP notification.')
dcPwrSysAlarmTriggerValue = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 9, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysAlarmTriggerValue.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysAlarmTriggerValue.setDescription('')
dcPwrSysTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 7309, 4, 1, 9, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dcPwrSysTimeStamp.setStatus('current')
if mibBuilder.loadTexts: dcPwrSysTimeStamp.setDescription('')
mibBuilder.exportSymbols("AlphaPowerSystem-MIB", dcPwrSysAnalogOpTable=dcPwrSysAnalogOpTable, dcPwrSysDigAlrmStringValue=dcPwrSysDigAlrmStringValue, dcPwrSysCntrlrIpTable=dcPwrSysCntrlrIpTable, dcPwrSysDigAlrmCount=dcPwrSysDigAlrmCount, dcPwrSysAdioAlrmEntry=dcPwrSysAdioAlrmEntry, dcPwrSysTempAlrmTbl=dcPwrSysTempAlrmTbl, dcPwrSysMiscAlrmName=dcPwrSysMiscAlrmName, dcPwrSysConvAlrmStringValue=dcPwrSysConvAlrmStringValue, dcPwrSysSiteRegion=dcPwrSysSiteRegion, dcPwrSysRelayTable=dcPwrSysRelayTable, dcPwrSysTempAlrmIntegerValue=dcPwrSysTempAlrmIntegerValue, dcPwrSysCustomAlrmIntegerValue=dcPwrSysCustomAlrmIntegerValue, dcPwrSysVariable=dcPwrSysVariable, dcPwrSysCtrlAlrmCount=dcPwrSysCtrlAlrmCount, dcPwrSysRectAlrmSeverity=dcPwrSysRectAlrmSeverity, dcPwrSysResyncAlarms=dcPwrSysResyncAlarms, dcPwrSysRelayIndex=dcPwrSysRelayIndex, dcPwrSysMiscAlrmStringValue=dcPwrSysMiscAlrmStringValue, PYSNMP_MODULE_ID=alpha, dcPwrSysCustomAlrmEntry=dcPwrSysCustomAlrmEntry, dcPwrSysCntrlrIpTbl=dcPwrSysCntrlrIpTbl, dcPwrSysSoftwareVersion=dcPwrSysSoftwareVersion, dcPwrSysInvAlrmIntegerValue=dcPwrSysInvAlrmIntegerValue, dcPwrExternalControls=dcPwrExternalControls, dcPwrSysSystemNumber=dcPwrSysSystemNumber, dcPwrSysSiteNumber=dcPwrSysSiteNumber, dcPwrSysMajorAlarmClearedTrap=dcPwrSysMajorAlarmClearedTrap, dcPwrSysCtrlAlrmSeverity=dcPwrSysCtrlAlrmSeverity, dcPwrSysChargeAmps=dcPwrSysChargeAmps, dcPwrSysDigAlrmEntry=dcPwrSysDigAlrmEntry, dcPwrSysConvIpCount=dcPwrSysConvIpCount, dcPwrSysVoltAlrmIndex=dcPwrSysVoltAlrmIndex, dcPwrSysTempAlrmEntry=dcPwrSysTempAlrmEntry, dcPwrSysConvIpIndex=dcPwrSysConvIpIndex, dcPwrSysSystemSerial=dcPwrSysSystemSerial, dcPwrSysDischargeAmps=dcPwrSysDischargeAmps, dcPwrSysString=dcPwrSysString, dcPwrSysRectAlrmTable=dcPwrSysRectAlrmTable, dcPwrSysCustomAlrmName=dcPwrSysCustomAlrmName, dcPwrSysAgentShutdownTrap=dcPwrSysAgentShutdownTrap, dcPwrSysBattAlrmStringValue=dcPwrSysBattAlrmStringValue, dcPwrSysInvAlrmIndex=dcPwrSysInvAlrmIndex, dcPwrSysCounterIpCount=dcPwrSysCounterIpCount, dcPwrSysTimerIpIntegerValue=dcPwrSysTimerIpIntegerValue, dcPwrSysRelayTrap=dcPwrSysRelayTrap, dcPwrSysCustomAlrmIndex=dcPwrSysCustomAlrmIndex, dcPwrSysMiscAlrmSeverity=dcPwrSysMiscAlrmSeverity, dcPwrSysCounterIpTable=dcPwrSysCounterIpTable, dcPwrSysMiscAlrmIndex=dcPwrSysMiscAlrmIndex, dcPwrSysCounterIpEntry=dcPwrSysCounterIpEntry, dcPwrSysComOKTrap=dcPwrSysComOKTrap, dcPwrSysAnalogOpIndex=dcPwrSysAnalogOpIndex, dcPwrSysDigAlrmTable=dcPwrSysDigAlrmTable, dcPwrSysDigAlrmIndex=dcPwrSysDigAlrmIndex, dcPwrSysSiteCountry=dcPwrSysSiteCountry, dcPwrSysCurrAlrmStringValue=dcPwrSysCurrAlrmStringValue, dcPwrSysAdioAlrmIndex=dcPwrSysAdioAlrmIndex, dcPwrSysCustomIpTable=dcPwrSysCustomIpTable, dcPwrSysTimerIpName=dcPwrSysTimerIpName, dcPwrSysTimerIpStringValue=dcPwrSysTimerIpStringValue, dcPwrSysVoltAlrmIntegerValue=dcPwrSysVoltAlrmIntegerValue, dcPwrSysBattAlrmName=dcPwrSysBattAlrmName, dcPwrSysAdioAlrmSeverity=dcPwrSysAdioAlrmSeverity, dcPwrSysCntrlrIpStringValue=dcPwrSysCntrlrIpStringValue, dcPwrSysConvAlrmTbl=dcPwrSysConvAlrmTbl, dcPwrSysConvIpName=dcPwrSysConvIpName, dcPwrSysCntrlrIpCount=dcPwrSysCntrlrIpCount, dcPwrSysRectIpEntry=dcPwrSysRectIpEntry, dcPwrSysInvAlrmTable=dcPwrSysInvAlrmTable, dcPwrSysTimerIpIndex=dcPwrSysTimerIpIndex, dcPwrSysCounterIpIntegerValue=dcPwrSysCounterIpIntegerValue, dcPwrSysRectAlrmIntegerValue=dcPwrSysRectAlrmIntegerValue, dcPwrSysTempAlrmSeverity=dcPwrSysTempAlrmSeverity, dcPwrSysDigIpTbl=dcPwrSysDigIpTbl, dcPwrSysCtrlAlrmIndex=dcPwrSysCtrlAlrmIndex, dcPwrSysCntrlrIpName=dcPwrSysCntrlrIpName, dcPwrSysCustomIpCount=dcPwrSysCustomIpCount, dcPwrSysAlarmActiveTrap=dcPwrSysAlarmActiveTrap, dcPwrSysMinorAlarmClearedTrap=dcPwrSysMinorAlarmClearedTrap, dcPwrSysOutputsTbl=dcPwrSysOutputsTbl, dcPwrSysConvAlrmName=dcPwrSysConvAlrmName, dcPwrSysRectAlrmStringValue=dcPwrSysRectAlrmStringValue, dcPwrSysDigIpCount=dcPwrSysDigIpCount, dcPwrSysRectAlrmTbl=dcPwrSysRectAlrmTbl, dcPwrSysChargeVolts=dcPwrSysChargeVolts, dcPwrSysTrap=dcPwrSysTrap, dcPwrSysDigAlrmIntegerValue=dcPwrSysDigAlrmIntegerValue, dcPwrSysConvIpTbl=dcPwrSysConvIpTbl, dcPwrSysDigIpIndex=dcPwrSysDigIpIndex, dcPwrSysgCustomIpIntegerValue=dcPwrSysgCustomIpIntegerValue, dcPwrSysAdioAlrmName=dcPwrSysAdioAlrmName, dcPwrSysComErrTrap=dcPwrSysComErrTrap, dcPwrSysConvAlrmIndex=dcPwrSysConvAlrmIndex, dcPwrSysTempAlrmStringValue=dcPwrSysTempAlrmStringValue, dcPwrSysCntrlrIpIntegerValue=dcPwrSysCntrlrIpIntegerValue, dcPwrSysRectIpTable=dcPwrSysRectIpTable, dcPwrSysDigAlrmName=dcPwrSysDigAlrmName, dcPwrSysConvIpTable=dcPwrSysConvIpTable, dcPwrSysMiscAlrmEntry=dcPwrSysMiscAlrmEntry, dcPwrSysDevice=dcPwrSysDevice, dcPwrSysVoltAlrmStringValue=dcPwrSysVoltAlrmStringValue, dcPwrSysRectAlrmName=dcPwrSysRectAlrmName, dcPwrSysTimerIpEntry=dcPwrSysTimerIpEntry, dcPwrSysSystemType=dcPwrSysSystemType, dcPwrSysCtrlAlrmTable=dcPwrSysCtrlAlrmTable, dcPwrSysConvIpEntry=dcPwrSysConvIpEntry, dcPwrSysSiteCity=dcPwrSysSiteCity, dcPwrSysAnalogOpIntegerValue=dcPwrSysAnalogOpIntegerValue, dcPwrSysCtrlAlrmStringValue=dcPwrSysCtrlAlrmStringValue, dcPwrSysAnalogOpSeverity=dcPwrSysAnalogOpSeverity, dcPwrSysInvAlrmStringValue=dcPwrSysInvAlrmStringValue, dcPwrSysInvAlrmTbl=dcPwrSysInvAlrmTbl, dcPwrSysRectIpCount=dcPwrSysRectIpCount, dcPwrSysConvIpIntegerValue=dcPwrSysConvIpIntegerValue, dcPwrSysVoltAlrmCount=dcPwrSysVoltAlrmCount, dcPwrSysRectIpIndex=dcPwrSysRectIpIndex, dcPwrSysRectIpName=dcPwrSysRectIpName, dcPwrSysDigIpStringValue=dcPwrSysDigIpStringValue, dcPwrSysRectIpIntegerValue=dcPwrSysRectIpIntegerValue, dcPwrSysRelayStringValue=dcPwrSysRelayStringValue, dcPwrSysCustomIpEntry=dcPwrSysCustomIpEntry, dcPwrSysRectAlrmIndex=dcPwrSysRectAlrmIndex, dcPwrSysCurrAlrmTbl=dcPwrSysCurrAlrmTbl, dcPwrSysMiscAlrmCount=dcPwrSysMiscAlrmCount, dcPwrSysBattAlrmIndex=dcPwrSysBattAlrmIndex, dcPwrSysMinorAlarm=dcPwrSysMinorAlarm, dcPwrSysSoftwareTimestamp=dcPwrSysSoftwareTimestamp, dcPwrSysAdioAlrmTbl=dcPwrSysAdioAlrmTbl, dcPwrSysAdioAlrmIntegerValue=dcPwrSysAdioAlrmIntegerValue, alpha=alpha, dcPwrSysCurrAlrmIntegerValue=dcPwrSysCurrAlrmIntegerValue, dcPwrSysBattAlrmTable=dcPwrSysBattAlrmTable, dcPwrSysAlarmTriggerValue=dcPwrSysAlarmTriggerValue, dcPwrSysCurrAlrmCount=dcPwrSysCurrAlrmCount, dcPwrSysCurrAlrmEntry=dcPwrSysCurrAlrmEntry, dcPwrSysBattAlrmEntry=dcPwrSysBattAlrmEntry, dcPwrSysAdioAlrmStringValue=dcPwrSysAdioAlrmStringValue, dcPwrSysInvAlrmEntry=dcPwrSysInvAlrmEntry, dcPwrSysVoltAlrmSeverity=dcPwrSysVoltAlrmSeverity, dcPwrSysCounterIpName=dcPwrSysCounterIpName, dcPwrSysCurrAlrmSeverity=dcPwrSysCurrAlrmSeverity, dcPwrSysCntrlrIpEntry=dcPwrSysCntrlrIpEntry, dcPwrSysDigAlrmSeverity=dcPwrSysDigAlrmSeverity, dcPwrSysTimeStamp=dcPwrSysTimeStamp, dcPwrSysCustomAlrmTbl=dcPwrSysCustomAlrmTbl, dcPwrSysVoltAlrmTable=dcPwrSysVoltAlrmTable, dcPwrSysConvAlrmEntry=dcPwrSysConvAlrmEntry, dcPwrSysVoltAlrmEntry=dcPwrSysVoltAlrmEntry, dcPwrSysAnalogOpStringValue=dcPwrSysAnalogOpStringValue, dcPwrSysRelayTbl=dcPwrSysRelayTbl, dcPwrSysDischargeVolts=dcPwrSysDischargeVolts, dcPwrSysVoltAlrmName=dcPwrSysVoltAlrmName, dcPwrSysConvIpStringValue=dcPwrSysConvIpStringValue, dcPwrSysCtrlAlrmEntry=dcPwrSysCtrlAlrmEntry, dcPwrSysBattAlrmTbl=dcPwrSysBattAlrmTbl, dcPwrSysInputsTbl=dcPwrSysInputsTbl, dcPwrSysRectAlrmEntry=dcPwrSysRectAlrmEntry, dcPwrSysAgentStartupTrap=dcPwrSysAgentStartupTrap, dcPwrSysMajorAlarmActiveTrap=dcPwrSysMajorAlarmActiveTrap, dcPwrSysBattAlrmCount=dcPwrSysBattAlrmCount, dcPwrVarbindNameReference=dcPwrVarbindNameReference, dcPwrSysCustomAlrmCount=dcPwrSysCustomAlrmCount, dcPwrSysBattAlrmIntegerValue=dcPwrSysBattAlrmIntegerValue, dcPwrSysInvAlrmCount=dcPwrSysInvAlrmCount, dcPwrSysTempAlrmName=dcPwrSysTempAlrmName, dcpower=dcpower, dcPwrSysCustomAlrmSeverity=dcPwrSysCustomAlrmSeverity, dcPwrSysTempAlrmTable=dcPwrSysTempAlrmTable, dcPwrSysRectIpTbl=dcPwrSysRectIpTbl, dcPwrSysMajorAlarm=dcPwrSysMajorAlarm, dcPwrSysCustomAlrmStringValue=dcPwrSysCustomAlrmStringValue, dcPwrSysCurrAlrmIndex=dcPwrSysCurrAlrmIndex, dcPwrSysConvAlrmIntegerValue=dcPwrSysConvAlrmIntegerValue, dcPwrSysInvAlrmSeverity=dcPwrSysInvAlrmSeverity, dcPwrSysTimerIpTbl=dcPwrSysTimerIpTbl, dcPwrSysDigIpIntegerValue=dcPwrSysDigIpIntegerValue, dcPwrSysRelayIntegerValue=dcPwrSysRelayIntegerValue, dcPwrSysAlrmsTbl=dcPwrSysAlrmsTbl, dcPwrSysRelayEntry=dcPwrSysRelayEntry, dcPwrSysCurrAlrmName=dcPwrSysCurrAlrmName, dcPwrSysCtrlAlrmIntegerValue=dcPwrSysCtrlAlrmIntegerValue, dcPwrSysTimerIpTable=dcPwrSysTimerIpTable, dcPwrSysCustomIpStringValue=dcPwrSysCustomIpStringValue, dcPwrSysConvAlrmSeverity=dcPwrSysConvAlrmSeverity, dcPwrSysAdioAlrmTable=dcPwrSysAdioAlrmTable, dcPwrSysDigAlrmTbl=dcPwrSysDigAlrmTbl, dcPwrSysAdioAlrmCount=dcPwrSysAdioAlrmCount, dcPwrSysAnalogOpName=dcPwrSysAnalogOpName, dcPwrSysCustomIpTbl=dcPwrSysCustomIpTbl, dcPwrSysCounterIpStringValue=dcPwrSysCounterIpStringValue, dcPwrSysMiscAlrmIntegerValue=dcPwrSysMiscAlrmIntegerValue, dcPwrSysRelayCount=dcPwrSysRelayCount, dcPwrSysRectIpStringValue=dcPwrSysRectIpStringValue, dcPwrSysDigIpEntry=dcPwrSysDigIpEntry, dcPwrSysAnalogOpEntry=dcPwrSysAnalogOpEntry, dcPwrSysBattAlrmSeverity=dcPwrSysBattAlrmSeverity, dcPwrSysMiscAlrmTable=dcPwrSysMiscAlrmTable, dcPwrSysRelayName=dcPwrSysRelayName, dcPwrSysAnalogOpCount=dcPwrSysAnalogOpCount, dcPwrSysCounterIpIndex=dcPwrSysCounterIpIndex, dcPwrSysInvAlrmName=dcPwrSysInvAlrmName, dcPwrSysConvAlrmCount=dcPwrSysConvAlrmCount, dcPwrSysCurrAlrmTable=dcPwrSysCurrAlrmTable, dcPwrSysVoltAlrmTbl=dcPwrSysVoltAlrmTbl, dcPwrSysAnalogOpTbl=dcPwrSysAnalogOpTbl, dcPwrSysMiscAlrmTbl=dcPwrSysMiscAlrmTbl, dcPwrSysContactName=dcPwrSysContactName, dcPwrSysTempAlrmCount=dcPwrSysTempAlrmCount, dcPwrSysTraps=dcPwrSysTraps, dcPwrSysCounterIpTbl=dcPwrSysCounterIpTbl, dcPwrSysConvAlrmTable=dcPwrSysConvAlrmTable, dcPwrSysCustomIpIndex=dcPwrSysCustomIpIndex, dcPwrSysSiteName=dcPwrSysSiteName, dcPwrSysRelaySeverity=dcPwrSysRelaySeverity, dcPwrSysCtrlAlrmTbl=dcPwrSysCtrlAlrmTbl, dcPwrSysDigIpName=dcPwrSysDigIpName, dcPwrSysCntrlrIpIndex=dcPwrSysCntrlrIpIndex, dcPwrSysDigIpTable=dcPwrSysDigIpTable, dcPwrSysCustomIpName=dcPwrSysCustomIpName, dcPwrSysRectAlrmCount=dcPwrSysRectAlrmCount, dcPwrSysCtrlAlrmName=dcPwrSysCtrlAlrmName, dcPwrSysTempAlrmIndex=dcPwrSysTempAlrmIndex, dcPwrSysMinorAlarmActiveTrap=dcPwrSysMinorAlarmActiveTrap, dcPwrSysCustomAlrmTable=dcPwrSysCustomAlrmTable, dcPwrSysTimerIpCount=dcPwrSysTimerIpCount, dcPwrSysPhoneNumber=dcPwrSysPhoneNumber, dcPwrSysAlarmClearedTrap=dcPwrSysAlarmClearedTrap)
| [
"[email protected]"
] | |
f92d14e56e3f2106526540e9015138bc89fc3d77 | c12008fee6b319ccc683956d0a171a00e12debb0 | /everyday/e191020.py | 53e6428caf621fada6c4bfabfffe7d54a1250dd8 | [] | no_license | yrnana/algorithm | 70c7b34c82b15598494103bdb49b4aefc7c53548 | 783e4f9a45baf8d6b5900e442d32c2b6f73487d0 | refs/heads/master | 2022-04-13T23:50:53.914225 | 2020-04-01T12:41:14 | 2020-04-01T12:41:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | def solution(arr):
l = len(arr)
n = 0
for i in range(l):
if arr[i] != 0:
swap(arr, i, n)
n += 1
return arr
def swap(arr, i, j):
tmp = arr[i]
arr[i] = arr[j]
arr[j] = tmp
print(solution([0, 5, 0, 3, -1]))
print(solution([3, 0, 3]))
| [
"[email protected]"
] | |
bf880139591dc7c773d8e6bf7be78b1c793a73ef | 364b36d699d0a6b5ddeb43ecc6f1123fde4eb051 | /_downloads_1ed/fig_poisson_continuous.py | 686b96403de5b92c73a2308049b03cfd324a149b | [] | no_license | astroML/astroml.github.com | eae3bfd93ee2f8bc8b5129e98dadf815310ee0ca | 70f96d04dfabcd5528978b69c217d3a9a8bc370b | refs/heads/master | 2022-02-27T15:31:29.560052 | 2022-02-08T21:00:35 | 2022-02-08T21:00:35 | 5,871,703 | 2 | 5 | null | 2022-02-08T21:00:36 | 2012-09-19T12:55:23 | HTML | UTF-8 | Python | false | false | 3,102 | py | """
Unbinned Poisson Data
---------------------
Figure 5.14
Regression of unbinned data. The distribution of N = 500 data points is shown
in the left panel; the true pdf is shown by the solid curve. Note that although
the data are binned in the left panel for visualization purposes, the analysis
is performed on the unbinned data. The right panel shows the likelihood for the
slope a (eq. 5.88) for three different sample sizes. The input value is
indicated by the vertical dotted line.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.stats.random import linear
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def linprob_logL(x, a, xmin, xmax):
x = x.ravel()
a = a.reshape(a.shape + (1,))
mu = 0.5 * (xmin + xmax)
W = (xmax - xmin)
return np.sum(np.log(a * (x - mu) + 1. / W), -1)
#----------------------------------------------------------------------
# Draw the data from the linear distribution
np.random.seed(0)
N = 500
a_true = 0.01
xmin = 0.0
xmax = 10.0
lin_dist = linear(xmin, xmax, a_true)
data = lin_dist.rvs(N)
x = np.linspace(xmin - 1, xmax + 1, 1000)
px = lin_dist.pdf(x)
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(left=0.12, right=0.95, wspace=0.28,
bottom=0.15, top=0.9)
# left panel: plot the model and a histogram of the data
ax1 = fig.add_subplot(121)
ax1.hist(data, bins=np.linspace(0, 10, 11), normed=True,
histtype='stepfilled', fc='gray', alpha=0.5)
ax1.plot(x, px, '-k')
ax1.set_xlim(-1, 11)
ax1.set_ylim(0, 0.18)
ax1.set_xlabel('$x$')
ax1.set_ylabel('$p(x)$')
# right panel: construct and plot the likelihood
ax2 = fig.add_subplot(122)
ax2.xaxis.set_major_locator(plt.MultipleLocator(0.01))
a = np.linspace(-0.01, 0.02, 1000)
Npts = (500, 100, 20)
styles = ('-k', '--b', '-.g')
for n, s in zip(Npts, styles):
logL = linprob_logL(data[:n], a, xmin, xmax)
logL = np.exp(logL - logL.max())
logL /= logL.sum() * (a[1] - a[0])
ax2.plot(a, logL, s, label=r'$\rm %i\ pts$' % n)
ax2.legend(loc=2, prop=dict(size=8))
ax2.set_xlim(-0.011, 0.02)
ax2.set_xlabel('$a$')
ax2.set_ylabel('$p(a)$')
# vertical line: in newer matplotlib versions, use ax.vlines([a_true])
ylim = ax2.get_ylim()
ax2.plot([a_true, a_true], ylim, ':k', lw=1)
ax2.set_ylim(ylim)
plt.show()
| [
"[email protected]"
] | |
6f7629ccc4c0086b3e895b41224590449279acb5 | 60ec81571533bbfda62ed3b383c3ae984af005a8 | /recipes_exam/recipes_exam/urls.py | c7848c3dcbca30149e5701b86d5f782f2068f718 | [] | no_license | fingerman/django-projects | f35f4a39810b0db6294bfe689c30ad62947839b9 | ba3606abf7d77025ff08ffaffb64110ea2f4f92c | refs/heads/master | 2023-02-27T05:18:53.600833 | 2021-01-31T21:07:25 | 2021-01-31T21:07:25 | 334,756,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | """recipes_exam URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('app.urls')),
]
| [
"[email protected]"
] | |
6c16e2c8f646a76de7c95d1bce0bd8207155521e | 5d0dd50d7f7bf55126834292140ed66306e59f10 | /MIGRATE/msgpack_to_sql.py | 4ce966fdef93c6b79fcabe824ec1177b571c63de | [] | no_license | JellyWX/tracker-bot | 32d2c8666a7c6ca0835aa94695be4ccd7fc37bb5 | b0909c4883b0ee6e0300a163e94ea0d69dffa062 | refs/heads/master | 2021-05-02T16:14:11.638292 | 2018-04-26T19:47:50 | 2018-04-26T19:47:50 | 120,670,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | import msgpack
import sqlite3
with open('../DATA/USER_DATA', 'rb') as f:
data = msgpack.unpack(f, encoding='utf8')
connection = sqlite3.connect('../DATA/data.db')
cursor = connection.cursor()
for user, values in data.items():
command = '''CREATE TABLE u{user} (
game VARCHAR(50),
time INT
)
'''.format(user=user)
cursor.execute(command)
for game, time in values.items():
command = '''INSERT INTO u{user} (game, time)
VALUES (?, ?);'''.format(user=user)
cursor.execute(command, (game, time))
connection.commit()
connection.close()
| [
"[email protected]"
] | |
d7919c38e0ac4b378ccf1771060a7670a3744ca6 | ece0d321e48f182832252b23db1df0c21b78f20c | /engine/2.80/scripts/freestyle/styles/apriori_density.py | 1de2c4c033457e302c229c3c7014b55c0b8010d7 | [
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"PSF-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-2-Clause",
"Unlicense"
] | permissive | byteinc/Phasor | 47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9 | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | refs/heads/master | 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 | Unlicense | 2022-10-21T07:02:37 | 2019-03-15T00:58:08 | Python | UTF-8 | Python | false | false | 1,743 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Filename : apriori_density.py
# Author : Stephane Grabli
# Date : 04/08/2005
# Purpose : Draws lines having a high a prior density
from freestyle.chainingiterators import ChainPredicateIterator
from freestyle.predicates import (
AndUP1D,
NotUP1D,
QuantitativeInvisibilityUP1D,
TrueBP1D,
TrueUP1D,
pyHighViewMapDensityUP1D,
)
from freestyle.shaders import (
ConstantColorShader,
ConstantThicknessShader,
)
from freestyle.types import Operators
Operators.select(AndUP1D(QuantitativeInvisibilityUP1D(0), pyHighViewMapDensityUP1D(0.1,5)))
bpred = TrueBP1D()
upred = AndUP1D(QuantitativeInvisibilityUP1D(0), pyHighViewMapDensityUP1D(0.0007,5))
Operators.bidirectional_chain(ChainPredicateIterator(upred, bpred), NotUP1D(QuantitativeInvisibilityUP1D(0)))
shaders_list = [
ConstantThicknessShader(2),
ConstantColorShader(0.0, 0.0, 0.0, 1.0)
]
Operators.create(TrueUP1D(), shaders_list)
| [
"[email protected]"
] | |
b341d8e48eb23d4b830ebca10113720caf32a3d5 | d9f85e88424c03072b2939e1e0681c4a28595c91 | /matrixstats.py | 029f8de3b6299f5014b614f677c049cae10ec691 | [
"BSD-2-Clause"
] | permissive | Cadair/chat_stats | 1d826377c95462d555a3d5df1a840f1b9f32c9b3 | c34648d80b67f8e66a9a8adcad92147644ad7923 | refs/heads/master | 2022-09-05T21:26:13.611506 | 2022-08-05T10:22:33 | 2022-08-05T10:22:33 | 160,004,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,373 | py | """
This file contains a set of helpers for analysing the history of groups of
matrix rooms.
"""
import unicodedata
import re
import datetime
from urllib.parse import quote
from collections import defaultdict
from matrix_client.errors import MatrixRequestError
import pandas as pd
import numpy as np
__all__ = ['get_rooms_in_space', 'calculate_active_senders', 'get_display_names', 'load_messages', 'get_len_key', 'flatten_dicts', 'filter_events_by_messages', 'print_sorted_value',
'print_sorted_len', 'get_rooms_in_community', 'events_to_dataframe',
'get_all_messages_for_room', 'get_all_events']
def slugify(value, allow_unicode=False):
"""
Taken from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value.lower())
return re.sub(r'[-\s]+', '-', value).strip('-_')
def get_all_messages_for_room(api, room_id, stop_time=None):
"""
Use the matrix ``/messages`` API to back-paginate through the whole history
of a room.
This will probably not work unless your homeserver has all the events
locally.
"""
token = ""
messages = []
try:
token = api.get_room_messages(room_id, token, "b")['end']
except MatrixRequestError:
print("Can't get messages for room...")
return messages
for i in range(100):
try:
m1 = api.get_room_messages(room_id, token, "b", limit=5000)
except MatrixRequestError:
break
if not m1['chunk']:
break
token = m1['end']
# TODO: I am pretty sure this doesn't work
if stop_time:
stop_time = int(pd.Timestamp("2019/01/01").to_pydatetime().timestamp()*1000)
times = [e['origin_server_ts'] for e in m1['chunk']]
stopping = np.less(times, stop_time).nonzero()[0]
if len(stopping) > (len(times)/1.1):
messages += m1['chunk']
return messages
messages += m1['chunk']
return messages
def events_to_dataframe(list_o_json):
"""
Given a list of json events extract the interesting info into a pandas
Dataframe.
"""
extract_keys = ("origin_server_ts", "sender",
"event_id", "type", "content")
df = defaultdict(list)
df["body"] = []
for event in list_o_json:
if "body" in event['content']:
df["body"].append(event['content']['body'])
else:
df['body'].append(None)
for k in extract_keys:
v = event[k]
df[k].append(v)
df["origin_server_ts"] = [datetime.datetime.fromtimestamp(ts/1000) for ts in df['origin_server_ts']]
return pd.DataFrame(df).set_index("origin_server_ts")
def get_all_events(api, rooms, cache=None, refresh_cache=False, stop_time=None):
"""
Get all events in rooms.
If cache is a filename it will be loaded with `pandas.HDFStore`,
if refresh_cache is true then the cache will be saved after
getting the messages from the server.
"""
# key = slugify(key).replace("-", "_")
if cache and not refresh_cache:
store = pd.HDFStore(cache)
cache = {key[1:]: store.get(key) for key in store.keys()}
missing_keys = rooms.keys() - cache.keys()
for key in missing_keys:
print(f"fetching events for {key}")
cache[key] = events_to_dataframe(get_all_messages_for_room(api, rooms[key], stop_time=stop_time))
store[key] = cache[key]
for key in cache.keys() - rooms.keys():
cache.pop(key)
store.close()
return cache
else:
messages = {}
with pd.HDFStore(cache) as store:
for key, id in rooms.items():
print(f"fetching events for {key}")
df = events_to_dataframe(get_all_messages_for_room(api, id, stop_time=stop_time))
messages[key] = df
store.put(key, df)
return messages
def get_rooms_in_community(api, communtiy):
"""
Get a mapping of canonical alias (localpart) to room id for all rooms in a
communtiy.
"""
rooms = api._send("GET", "/groups/{}/rooms".format(quote(communtiy)))
ids = {}
for room in rooms['chunk']:
ca = room.get('canonical_alias')
if not ca:
continue
name = ca.split(":")[0][1:]
name = name.replace("-", "_")
ids[name] = room['room_id']
return ids
def get_rooms_in_space(api, space, recursive=False):
"""
Get a mapping of name to room id for all rooms in a
space.
If recursive is true then rooms from all subspaces will be listed.
"""
space_roomid = space
if space.startswith("#"):
space_roomid = api.get_room_id(space)
room_create = api._send("GET", f"/rooms/{quote(space_roomid)}/state/m.room.create")
if room_create["type"] != "m.space":
raise TypeError("Room is not a space")
room_state = api._send("GET", f"/rooms/{quote(space_roomid)}/state")
ids = {}
for event in room_state:
if event["type"] != "m.space.child":
continue
room_id = event["state_key"]
room_state = api._send("GET", f"/rooms/{quote(room_id)}/state")
create = [ev for ev in room_state if ev["type"] == "m.room.create"][0]
if create["content"].get("type") == "m.space":
continue
name = [ev for ev in room_state if ev["type"] == "m.room.name"]
if not name:
print(f"Room {room_id} has no name, skipping")
continue
name = name[0]["content"]["name"]
ids[name] = room_id
return ids
def get_room_aliases_in_community(api, community):
rooms = api._send("GET", "/groups/{}/rooms".format(quote(community)))
ids = {}
for room in rooms['chunk']:
ca = room.get('canonical_alias')
if not ca:
continue
name = ca.split(":")[0][1:]
name = name.replace("-", "_")
ids[name] = ca
return ids
def print_sorted_len(adict, reverse=True):
for k in sorted(adict, key=lambda k: len(adict[k]), reverse=reverse):
m = adict[k]
print(f"{k}: {len(m)}")
def print_sorted_value(adict, reverse=True):
for k in sorted(adict, key=adict.__getitem__, reverse=reverse):
m = adict[k]
print(f"{k}: {m}")
def filter_events_by_messages(events, ignore_github=False):
"""
Filter events so that only "m.room.message" events are kept.
events should be a dict of room events as returned by ``get_all_events``.
"""
messages = {k: v[v['type'] == "m.room.message"] for k, v in events.items()}
if ignore_github:
messages = {k: v[v['sender'] != "@_neb_github_=40_cadair=3amatrix.org:matrix.org"] for k, v in messages.items()}
return messages
def flatten_dicts(dicts):
"""
Flatten all the dicts, but assume there are no key conflicts.
"""
out = {}
for adict in dicts.values():
for key, value in adict.items():
out[key] = value
return out
def get_display_names(api, senders, template=None):
display_names = []
for s in senders:
m = True
if s == "@Cadair:matrix.org":
s = "@cadair:cadair.com"
if template is not None and ":" not in s:
s = template.format(s=s)
m = False
try:
dn = api.get_display_name(s)
except Exception:
dn = s
if m:
dn += "*"
display_names.append(dn)
return display_names
def load_messages(api, ids, refresh_cache=False,
stop_time=None, ignore_github=False,
ignore_rooms=None):
# Get all the messages in all the rooms
events = {group: get_all_events(api, cids, cache=f"{group}_messages.h5",
refresh_cache=refresh_cache,
stop_time=stop_time)
for group, cids in ids.items()}
if not ignore_rooms:
ignore_rooms = []
events = {group: {k: v for k, v in events.items() if not v.empty and k not in ignore_rooms}
for group, events in events.items()}
# Filter by actual messages
messages = {group: filter_events_by_messages(gevents) for group, gevents in events.items()}
for gmessages in messages.values():
for m in gmessages.values():
m.loc[:, 'usender'] = [a.split(":")[0][1:].split("_")[-1] if "slack" in a else a for a in m['sender']]
# Add a message length column
for gmessages in messages.values():
for group, df in gmessages.items():
x = df['body'].apply(lambda x: len(x) if x else 0)
df.loc[:, 'body_len'] = x
return events, messages
def get_len_key(adict, reverse=True):
n_messages = {}
for k in sorted(adict, key=lambda k: len(adict[k]), reverse=reverse):
m = adict[k]
n_messages[k] = len(m)
return n_messages
def calculate_active_senders(api, all_messages, top_n=20, template=None):
"""
Return a top_n long df group of number of messages and average length.
"""
groupbys = {group: am.groupby("usender") for group, am in all_messages.items()}
active_senders = {group: pd.DataFrame(groupby.count()['body'].sort_values(ascending=False))
for group, groupby in groupbys.items()}
for group, df in active_senders.items():
df.columns = ['number_of_messages']
df['mean_body_len'] = groupbys[group].mean()
df['median_body_len'] = groupbys[group].median()
for group, df in active_senders.items():
df.loc[:top_n, 'display_name'] = get_display_names(api, df.index[:top_n], template=template)
df = df[:top_n]
df = df.reset_index()
df = df.set_index("display_name")
active_senders[group] = df
return active_senders
| [
"[email protected]"
] | |
5ecff5ad5fe3286e9a8e813f3c9de2d599229c34 | 781116645c0d60de13596aac81a76c791ed0c18a | /kivy_garden/flower/__init__.py | 6793aaafcc1aa355b42b381f1800e9714707bb6e | [
"MIT"
] | permissive | matham/flower | 503dae3446110da05ecd2a527b3459f7e1bcadb3 | e7c71346563573197ae304ceb343bff14e54a5cd | refs/heads/master | 2020-05-24T22:33:43.761720 | 2019-05-19T08:56:14 | 2019-05-19T08:56:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | """
Demo flower
============
Defines the Kivy garden :class:`FlowerLabel` class which is the widget provided
by the demo flower.
"""
from kivy.uix.label import Label
__all__ = ('FlowerLabel', )
__version__ = '0.1.0.dev0'
class FlowerLabel(Label):
def __init__(self, **kwargs):
super(FlowerLabel, self).__init__(**kwargs, text='Demo flower')
| [
"[email protected]"
] | |
9d95173045444ddceac7aaebc34b8f75adf12995 | fff26da96c4b324cdbc0315c3fdf1fe2ccbf6bf0 | /.history/test_celegans_corrected_weights_20210615130634.py | a875acee9236154c606750101651e4d37fd22fd9 | [] | no_license | izzortsi/spreading-activation-networks | ebcd38477a4d4c6139a82b0dd7da3d79a0e3f741 | f2cf0bf519af746f148fa7a4ea4d78d16ba6af87 | refs/heads/dev | 2023-06-28T03:49:34.265268 | 2021-06-15T18:07:51 | 2021-06-15T18:07:51 | 376,718,907 | 0 | 0 | null | 2021-06-15T18:07:51 | 2021-06-14T06:01:52 | Python | UTF-8 | Python | false | false | 3,390 | py | # %%
import graph_tool.all as gt
import numpy as np
import numpy.random as npr
# import matplotlib.colors as mplc
from matplotlib import cm
import matplotlib.colors as mplc
import os, sys
from gi.repository import Gtk, Gdk, GdkPixbuf, GObject, GLib
from plot_functions import *
# %%
def init_elegans_net():
g = gt.collection.data["celegansneural"]
g.ep.weight = g.new_ep("double")
norm_eweights = minmax(g.ep.value.a)
g.ep.weight.a = norm_eweights
del g.ep["value"]
del g.gp["description"]
del g.gp["readme"]
del g.vp["label"]
g.vp.state = g.new_vertex_property("int")
g.vp.activation = g.new_vertex_property("float")
n_vertices = g.num_vertices()
n_edges = g.num_edges()
activations = npr.normal(size=n_vertices)
activations = minmax(activations)
g.vp.state.a = np.full(n_vertices, 0)
g.vp.activation.a = activations
return g
# %%
def init_graph(g):
treemap = gt.min_spanning_tree(g)
gmst = gt.GraphView(g, efilt=treemap)
gtclos = gt.transitive_closure(gmst)
return {"g": g, "gmst": gmst, "gtc": gtclos}
def minmax(a):
a = (a - np.min(a))
return a/np.max(a)
# %%
"""
def set_graph(type="gtc")
type being either the original graph "g", the MST of it
"gmst" or the transitive closure of the MST "gtc". Defaults
to "gtc".
"""
def set_graph(type="gtc"):
g = init_elegans_net()
graphs = init_graph(g)
g = graphs["g"]
gmst = graphs["gmst"]
gtc = graphs["gtc"]
return g, gmst, gtc
# %%
# %%
####DYNAMICS PARAMETERS
SPIKE_THRESHOLD = 0.90
POTENTIAL_LOSS = 0.8
MAX_COUNT = 600
#OFFSCREEN = True
OFFSCREEN = sys.argv[1] == "offscreen" if len(sys.argv) > 1 else False
# %%
g, gmst, gtc = set_graph()
# %%
g = gmst
# %%
set(list(map(tuple, gtc.get_all_edges(151))))
# %%
count = 0
# %%
def update_state():
global count, g
spiker_activation = np.max(g.vp.activation.a)
spiker = gt.find_vertex(g, g.vp.activation, spiker_activation)[0]
nbs = g.get_out_neighbors(spiker)
nbsize = len(nbs)
if nbsize != 0:
spread_val = spiker_activation/nbsize
for nb in nbs:
w = g.ep.weight[g.edge(spiker, nb)]
g.vp.activation[nb] += spread_val*w
g.vp.activation[spiker] -= spread_val*w
else:
if g.vp.activation[spiker] >= 1:
pass
#if g.vp.activation[nb] >= SPIKE_THRESHOLD:
win.graph.regenerate_surface()
win.graph.queue_draw()
if OFFSCREEN:
pixbuf = win.get_pixbuf()
pixbuf.savev(r'./frames/san%06d.png' % count, 'png', [], [])
count += 1
if count >= MAX_COUNT:
sys.exit(0)
return True
# %%
pos = gt.sfdp_layout(g)
PLOT_PARAMS = plot_params(g, None)
if OFFSCREEN and not os.path.exists("./frames"):
os.mkdir("./frames")
# This creates a GTK+ window with the initial graph layout
if not OFFSCREEN:
win = gt.GraphWindow(g,
pos,
geometry=(720, 720),
vertex_shape="circle",
**PLOT_PARAMS,
)
else:
win = Gtk.OffscreenWindow()
win.set_default_size(720, 720)
win.graph = gt.GraphWidget(g,
pos,
vertex_shape="circle",
**PLOT_PARAMS,
)
win.add(win.graph)
# %%
cid = GLib.idle_add(update_state)
win.connect("delete_event", Gtk.main_quit)
win.show_all()
Gtk.main()
# %%
# %%
| [
"[email protected]"
] | |
25b61e304b936c5e84ffe57f9d196cca268179ff | 63b864deda44120067eff632bbb4969ef56dd573 | /object_detection/ssd/Config.py | f444dc728514a6492170e0eaf1c5d65542716889 | [] | no_license | lizhe960118/Deep-Learning | d134592c327decc1db12cbe19d9a1c85a5056086 | 7d2c4f3a0512ce4bd2f86c9f455da9866d16dc3b | refs/heads/master | 2021-10-29T06:15:04.749917 | 2019-07-19T15:27:25 | 2019-07-19T15:27:25 | 152,355,392 | 5 | 2 | null | 2021-10-12T22:19:33 | 2018-10-10T03:06:44 | Jupyter Notebook | UTF-8 | Python | false | false | 481 | py | import os.path as osp
sk = [ 15, 30, 60, 111, 162, 213, 264 ]
feature_map = [ 38, 19, 10, 5, 3, 1 ]
steps = [ 8, 16, 32, 64, 100, 300 ]
image_size = 300
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
MEANS = (104, 117, 123)
batch_size = 2
data_load_number_worker = 0
lr = 1e-3
momentum = 0.9
weight_decacy = 5e-4
gamma = 0.1
VOC_ROOT = osp.join('./', "VOCdevkit/")
dataset_root = VOC_ROOT
use_cuda = True
lr_steps = (80000, 100000, 120000)
max_iter = 120000
class_num = 21 | [
"[email protected]"
] | |
2235add0ce48477a2a58d68f369f8cd3ba1fbf2b | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python/generated/swaggeraemosgi/model/com_adobe_granite_frags_impl_check_http_header_flag_properties.py | b32110895772ddda09288d935ee3f1e98dbd4215 | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 7,658 | py | """
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0-pre.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from swaggeraemosgi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from swaggeraemosgi.model.config_node_property_string import ConfigNodePropertyString
globals()['ConfigNodePropertyString'] = ConfigNodePropertyString
class ComAdobeGraniteFragsImplCheckHttpHeaderFlagProperties(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'feature_name': (ConfigNodePropertyString,), # noqa: E501
'feature_description': (ConfigNodePropertyString,), # noqa: E501
'http_header_name': (ConfigNodePropertyString,), # noqa: E501
'http_header_valuepattern': (ConfigNodePropertyString,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'feature_name': 'feature.name', # noqa: E501
'feature_description': 'feature.description', # noqa: E501
'http_header_name': 'http.header.name', # noqa: E501
'http_header_valuepattern': 'http.header.valuepattern', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ComAdobeGraniteFragsImplCheckHttpHeaderFlagProperties - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
feature_name (ConfigNodePropertyString): [optional] # noqa: E501
feature_description (ConfigNodePropertyString): [optional] # noqa: E501
http_header_name (ConfigNodePropertyString): [optional] # noqa: E501
http_header_valuepattern (ConfigNodePropertyString): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"[email protected]"
] | |
c714879ab292decf242cb272a4d05560414fb170 | 72d010d00355fc977a291c29eb18aeb385b8a9b0 | /LV2_LX2_LC2_LD2/ParamMap.py | 12d64819be32886c056b2489f3ffb2779ffe3981 | [] | no_license | maratbakirov/AbletonLive10_MIDIRemoteScripts | bf0749c5c4cce8e83b23f14f671e52752702539d | ed1174d9959b20ed05fb099f0461bbc006bfbb79 | refs/heads/master | 2021-06-16T19:58:34.038163 | 2021-05-09T11:46:46 | 2021-05-09T11:46:46 | 203,174,328 | 0 | 0 | null | 2019-08-19T13:04:23 | 2019-08-19T13:04:22 | null | UTF-8 | Python | false | false | 2,876 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/LV2_LX2_LC2_LD2/ParamMap.py
# Compiled at: 2018-04-23 20:27:04
from __future__ import absolute_import, print_function, unicode_literals
import Live
class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
class ParamMap:
u"""Class to help with device mapping"""
__module__ = __name__
def __init__(self, parent):
ParamMap.realinit(self, parent)
def realinit(self, parent):
self.parent = parent
self.params_with_listener = []
self.param_callbacks = []
def log(self, string):
self.parent.log(string)
def logfmt(self, fmt, *args):
args2 = []
for i in range(0, len(args)):
args2 += [args[i].__str__()]
str = fmt % tuple(args2)
return self.log(str)
def param_add_callback(self, script_handle, midi_map_handle, param, min, max, cc, channel):
callback = lambda : self.on_param_value_changed(param, min, max, cc, channel)
param.add_value_listener(callback)
self.params_with_listener += [param]
self.param_callbacks += [callback]
ParamMap.forward_cc(script_handle, midi_map_handle, channel, cc)
def receive_midi_note(self, channel, status, note_no, note_vel):
pass
def receive_midi_cc(self, chan, cc_no, cc_value):
pass
def forward_cc(script_handle, midi_map_handle, chan, cc):
Live.MidiMap.forward_midi_cc(script_handle, midi_map_handle, chan, cc)
forward_cc = Callable(forward_cc)
def forward_note(script_handle, midi_map_handle, chan, note):
Live.MidiMap.forward_midi_note(script_handle, midi_map_handle, chan, note)
forward_note = Callable(forward_note)
def map_with_feedback(midi_map_handle, channel, cc, parameter, mode):
feedback_rule = Live.MidiMap.CCFeedbackRule()
feedback_rule.channel = channel
feedback_rule.cc_value_map = tuple()
feedback_rule.delay_in_ms = -1.0
feedback_rule.cc_no = cc
Live.MidiMap.map_midi_cc_with_feedback_map(midi_map_handle, parameter, channel, cc, mode, feedback_rule, False)
Live.MidiMap.send_feedback_for_parameter(midi_map_handle, parameter)
map_with_feedback = Callable(map_with_feedback)
def on_param_value_changed(self, param, min, max, cc, channel):
pass
def remove_mappings(self):
for i in range(0, len(self.params_with_listener)):
param = self.params_with_listener[i]
callback = self.param_callbacks[i]
try:
if param.value_has_listener(callback):
param.remove_value_listener(callback)
except:
continue
self.params_with_listener = []
self.param_callbacks = []
| [
"[email protected]"
] | |
bf61729fa718b439998532f367204e3cf8b93cf6 | 35fe9e62ab96038705c3bd09147f17ca1225a84e | /a10_ansible/library/a10_ipv6_neighbor_static.py | 9c058e6fee3024c46ed849ab350ff96c39149478 | [] | no_license | bmeidell/a10-ansible | 6f55fb4bcc6ab683ebe1aabf5d0d1080bf848668 | 25fdde8d83946dadf1d5b9cebd28bc49b75be94d | refs/heads/master | 2020-03-19T08:40:57.863038 | 2018-03-27T18:25:40 | 2018-03-27T18:25:40 | 136,226,910 | 0 | 0 | null | 2018-06-05T19:45:36 | 2018-06-05T19:45:36 | null | UTF-8 | Python | false | false | 6,211 | py | #!/usr/bin/python
REQUIRED_NOT_SET = (False, "One of ({}) must be set.")
REQUIRED_MUTEX = (False, "Only one of ({}) can be set.")
REQUIRED_VALID = (True, "")
DOCUMENTATION = """
module: a10_static
description:
-
author: A10 Networks 2018
version_added: 1.8
options:
ipv6-addr:
description:
- IPV6 address
mac:
description:
- MAC Address
ethernet:
description:
- Ethernet port (Port Value)
trunk:
description:
- Trunk group
tunnel:
description:
- Tunnel interface
vlan:
description:
- VLAN ID
uuid:
description:
- uuid of the object
"""
EXAMPLES = """
"""
ANSIBLE_METADATA = """
"""
# Hacky way of having access to object properties for evaluation
AVAILABLE_PROPERTIES = {"ethernet","ipv6_addr","mac","trunk","tunnel","uuid","vlan",}
# our imports go at the top so we fail fast.
from a10_ansible.axapi_http import client_factory
from a10_ansible import errors as a10_ex
def get_default_argspec():
return dict(
a10_host=dict(type='str', required=True),
a10_username=dict(type='str', required=True),
a10_password=dict(type='str', required=True, no_log=True),
state=dict(type='str', default="present", choices=["present", "absent"])
)
def get_argspec():
rv = get_default_argspec()
rv.update(dict(
ethernet=dict(
type='str'
),
ipv6_addr=dict(
type='str' , required=True
),
mac=dict(
type='str'
),
trunk=dict(
type='str'
),
tunnel=dict(
type='str'
),
uuid=dict(
type='str'
),
vlan=dict(
type='str' , required=True
),
))
return rv
def new_url(module):
"""Return the URL for creating a resource"""
# To create the URL, we need to take the format string and return it with no params
url_base = "/axapi/v3/ipv6/neighbor/static/{ipv6-addr}+{vlan}"
f_dict = {}
f_dict["ipv6-addr"] = ""
f_dict["vlan"] = ""
return url_base.format(**f_dict)
def existing_url(module):
"""Return the URL for an existing resource"""
# Build the format dictionary
url_base = "/axapi/v3/ipv6/neighbor/static/{ipv6-addr}+{vlan}"
f_dict = {}
f_dict["ipv6-addr"] = module.params["ipv6-addr"]
f_dict["vlan"] = module.params["vlan"]
return url_base.format(**f_dict)
def build_envelope(title, data):
return {
title: data
}
def build_json(title, module):
rv = {}
for x in AVAILABLE_PROPERTIES:
v = module.params.get(x)
if v:
rx = x.replace("_", "-")
rv[rx] = module.params[x]
return build_envelope(title, rv)
def validate(params):
# Ensure that params contains all the keys.
requires_one_of = sorted([])
present_keys = sorted([x for x in requires_one_of if params.get(x)])
errors = []
marg = []
if not len(requires_one_of):
return REQUIRED_VALID
if len(present_keys) == 0:
rc,msg = REQUIRED_NOT_SET
marg = requires_one_of
elif requires_one_of == present_keys:
rc,msg = REQUIRED_MUTEX
marg = present_keys
else:
rc,msg = REQUIRED_VALID
if not rc:
errors.append(msg.format(", ".join(marg)))
return rc,errors
def exists(module):
try:
module.client.get(existing_url(module))
return True
except a10_ex.NotFound:
return False
def create(module, result):
payload = build_json("static", module)
try:
post_result = module.client.post(new_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.Exists:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def delete(module, result):
try:
module.client.delete(existing_url(module))
result["changed"] = True
except a10_ex.NotFound:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def update(module, result):
payload = build_json("static", module)
try:
post_result = module.client.put(existing_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def present(module, result):
if not exists(module):
return create(module, result)
else:
return update(module, result)
def absent(module, result):
return delete(module, result)
def run_command(module):
run_errors = []
result = dict(
changed=False,
original_message="",
message=""
)
state = module.params["state"]
a10_host = module.params["a10_host"]
a10_username = module.params["a10_username"]
a10_password = module.params["a10_password"]
# TODO(remove hardcoded port #)
a10_port = 443
a10_protocol = "https"
valid, validation_errors = validate(module.params)
map(run_errors.append, validation_errors)
if not valid:
result["messages"] = "Validation failure"
err_msg = "\n".join(run_errors)
module.fail_json(msg=err_msg, **result)
module.client = client_factory(a10_host, a10_port, a10_protocol, a10_username, a10_password)
if state == 'present':
result = present(module, result)
elif state == 'absent':
result = absent(module, result)
return result
def main():
module = AnsibleModule(argument_spec=get_argspec())
result = run_command(module)
module.exit_json(**result)
# standard ansible module imports
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main() | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.