blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8f0559a6949b29f1325ea7e4b0952a514e72b342 | 2a17e6a5d78849469b2094ec11f8a51e86475128 | /DIU_HS/settings.py | 36d64d8a3fb733268c23cd3ad16ffc365d0de70c | [] | no_license | maxhasan882/DIU_HS | fbe25b5d22dded5171b7bd9c31a75c16f03a7f8a | cbffe3b3799e46afe492064ecb45b617e8ff536b | refs/heads/master | 2020-07-29T07:54:07.332060 | 2019-09-20T09:12:00 | 2019-09-20T09:12:00 | 209,721,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,506 | py |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '-qqcz^f-332ox2t)s(b$d&slmg^c+q@m!--w*7_%w_pckp(gdq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'rest_framework',
'dblayer',
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DIU_HS.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DIU_HS.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'diuhsDB',
'USER': 'postgres',
'PASSWORD': 'mithu1996',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
INTERNAL_IPS = [
'127.0.0.1',
] | [
"[email protected]"
] | |
d6a2692395d973722c538c781b8fecfa4e62647b | c53fcab99e84ccfe6d9f1455e7471892fbd6661e | /kubeface/commands/copy.py | 63edb408d96fc9594aa6ee83d89f860d51b393cf | [
"Apache-2.0"
] | permissive | proj4spes/kubeface | 3af558ae05f1fd89b2d93e81ce479094ef3f4b8f | 443d7432e6d2f8e4d20b6326e98fabeec7ad68b6 | refs/heads/master | 2021-04-03T02:53:10.284569 | 2017-06-22T19:40:30 | 2017-06-22T19:40:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | '''
Copy files, including support for google storage buckets.
'''
import sys
import argparse
import logging
from .. import storage
from ..common import configure_logging
from .. import serialization
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("source")
parser.add_argument("destination")
parser.add_argument(
"--no-error",
action="store_true",
default=False,
help="")
parser.add_argument(
"--quiet",
action="store_true",
default=False,
help="")
parser.add_argument(
"--verbose",
action="store_true",
default=False,
help="")
parser.add_argument(
"--print-deserialized",
action="store_true",
default=False,
help="")
def run(argv=sys.argv[1:]):
args = parser.parse_args(argv)
configure_logging(args)
logging.info("Reading: %s" % args.source)
input_handle = storage.get(args.source)
if args.print_deserialized:
deserialized = serialization.load(input_handle)
input_handle.seek(0)
print(deserialized)
if args.destination == "-":
print(input_handle.read())
else:
logging.info("Writing: %s" % args.destination)
storage.put(args.destination, input_handle)
logging.info("Completed.")
| [
"[email protected]"
] | |
4209e5499b98a104adc9693ae8356a5bc01c7ae4 | 30cf02eb3c15da89db2e6efd3d405e92d0c8df36 | /src/pyobo/sources/gwascentral_study.py | 0f00a9432c87ccdffe5a7ed6c2fc786e107b0af4 | [
"MIT"
] | permissive | shunsunsun/pyobo | f53e5e6a4bb0b3ea135312cd8a54c905a52bd754 | 407c8f15873eb84cb5351ccc6e6ae0e8e3add22a | refs/heads/master | 2023-04-04T01:13:16.456853 | 2021-04-05T15:57:33 | 2021-04-05T15:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,162 | py | # -*- coding: utf-8 -*-
"""Converter for GWAS Central."""
import logging
import tarfile
from typing import Iterable
from xml.etree import ElementTree
from pyobo.struct import Obo, Reference, Term, has_part
from pyobo.utils.path import ensure_path
logger = logging.getLogger(__name__)
VERSION = 'jan2021'
URL = f'http://www.gwascentral.org/docs/GC_{VERSION}.tar.gz'
PREFIX = 'gwascentral.study'
def get_obo():
"""Get GWAS Central Studies as OBO."""
return Obo(
ontology=PREFIX,
name='GWAS Central Study',
iter_terms=iterate_terms,
iter_terms_kwargs=dict(version=VERSION),
data_version=VERSION,
typedefs=[has_part],
auto_generated_by=f'bio2obo:{PREFIX}',
)
def _get_term_from_tree(tree: ElementTree.ElementTree) -> Term:
name = tree.find('name').text
description = tree.find('description').text
if description:
description = description.strip().replace('\n', ' ')
identifier = tree.find('identifier').text
term = Term(
reference=Reference(PREFIX, identifier, name),
definition=description,
)
for experiment in tree.findall('experiments'):
experiment_name = experiment.find('name').text
experiment_id = experiment.find('identifier').text
term.append_relationship(has_part, Reference(
'gwascentral.experiment',
identifier=experiment_id,
name=experiment_name,
))
return term
def iterate_terms(version: str) -> Iterable[Term]:
"""Iterate over GWAS Central Study terms."""
path = ensure_path(PREFIX, url=URL, version=version)
with tarfile.open(path) as tar_file:
for tar_info in tar_file:
if not tar_info.path.endswith('.xml'):
continue
with tar_file.extractfile(tar_info) as file:
try:
tree = ElementTree.parse(file)
except ElementTree.ParseError:
logger.warning('malformed XML in %s', tar_info.path)
continue
yield _get_term_from_tree(tree)
if __name__ == '__main__':
get_obo().write_default()
| [
"[email protected]"
] | |
e9c5f30f1bc8ea3b6321a8daf805d87181566bb1 | e9ee565cfff9e6b2a1ea6f73368f4a8948274795 | /src/pybel/repository.py | 8fc43dbcd8c9585d14524792df384d3f7421bf00 | [
"MIT"
] | permissive | pybel/pybel | 7e79530b454e23ae48486a5c0e3207744b7fa139 | ed66f013a77f9cbc513892b0dad1025b8f68bb46 | refs/heads/master | 2022-08-26T18:41:25.724850 | 2022-02-11T12:22:35 | 2022-02-11T12:22:35 | 68,376,693 | 133 | 40 | MIT | 2022-02-11T12:11:24 | 2016-09-16T12:09:49 | Python | UTF-8 | Python | false | false | 18,905 | py | # -*- coding: utf-8 -*-
"""Utilities for BEL repositories."""
import json
import logging
import os
import sys
import time
from dataclasses import dataclass, field
from itertools import chain
from typing import Any, Iterable, Mapping, Optional, Set, TextIO, Tuple, Union
import click
import pandas as pd
from tqdm.autonotebook import tqdm
from .cli import (
connection_option,
host_option,
password_option,
user_option,
verbose_option,
)
from .constants import CITATION
from .io import from_bel_script, to_bel_commons, to_indra_statements
from .io.api import dump, load
from .manager import Manager
from .manager.citation_utils import enrich_pubmed_citations
from .struct import BELGraph
from .struct.operations import union
from .version import get_version
__all__ = [
"BELMetadata",
"BELRepository",
"append_click_group",
]
logger = logging.getLogger(__name__)
private_option = click.option("--private", is_flag=True)
OUTPUT_KWARGS = {
"nodelink.json": dict(indent=2, sort_keys=True),
"cx.json": dict(indent=2, sort_keys=True),
"jgif.json": dict(indent=2, sort_keys=True),
}
@dataclass
class BELMetadata:
"""A container for BEL document metadata."""
name: Optional[str] = None
version: Optional[str] = None
description: Optional[str] = None
authors: Optional[str] = None
contact: Optional[str] = None
license: Optional[str] = None
copyright: Optional[str] = None
disclaimer: Optional[str] = None
def new(self) -> BELGraph:
"""Generate a new BEL graph with the given metadata."""
graph = BELGraph()
self.update(graph)
return graph
def update(self, graph: BELGraph) -> None:
"""Update the BEL graph's metadata."""
if self.name:
graph.name = self.name
if self.version:
graph.version = self.version
if self.authors:
graph.authors = self.authors
if self.description:
graph.description = self.description
if self.contact:
graph.contact = self.contact
if self.license:
graph.licenses = self.license
if self.copyright:
graph.copyright = self.copyright
if self.disclaimer:
graph.disclaimer = self.disclaimer
@dataclass
class BELRepository:
"""A container for a BEL repository."""
directory: str
output_directory: Optional[str] = None
bel_cache_name: str = "_cache.bel"
metadata: Optional[BELMetadata] = None
formats: Tuple[str, ...] = ("pickle", "nodelink.json")
#: Must include {file_name} and {extension}
cache_fmt: str = "{file_name}.{extension}"
global_summary_ext: str = "summary.tsv"
warnings_ext: str = "warnings.tsv"
#: Arguments passed to :func:`pybel.from_path` during compilation
from_path_kwargs: Mapping[str, Any] = field(default_factory=dict)
#: The location where the summary DataFrame will be output as a TSV.
bel_summary_path: str = field(init=False)
def __post_init__(self) -> None: # noqa: D105
if self.output_directory is None:
self.output_directory = self.directory
self.bel_summary_path = self._build_cache_ext_path(
root=self.output_directory,
file_name=self.bel_cache_name,
extension=self.global_summary_ext.lstrip("."),
)
def _get_global_cache_path_by_extension(self, extension: str) -> str:
return self._build_cache_ext_path(self.output_directory, self.bel_cache_name, extension)
def _build_warnings_path(self, root: str, file_name: str) -> str:
return self._build_cache_ext_path(root, file_name, self.warnings_ext.lstrip("."))
def _build_summary_path(self, root: str, file_name: str) -> str:
return self._build_cache_ext_path(root, file_name, "summary.json")
def _build_cache_ext_path(self, root: str, file_name: str, extension: str) -> str:
return os.path.join(
root,
self.cache_fmt.format(file_name=file_name, extension=extension.lstrip(".")),
)
def walk(self) -> Iterable[Tuple[str, Iterable[str], Iterable[str]]]:
"""Recursively walk this directory."""
return os.walk(self.directory)
def iterate_bel(self) -> Iterable[Tuple[str, str]]:
"""Yield all paths to BEL documents."""
for root, _dirs, file_names in self.walk():
for file_name in sorted(file_names):
if not file_name.startswith("_") and file_name.endswith(".bel"):
yield root, file_name
def clear_global_cache(self) -> None:
"""Clear the global cache."""
self._remove_root_file_name(self.output_directory, self.bel_cache_name)
def clear_local_caches(self) -> None:
"""Clear all caches of BEL documents in the repository."""
for root, file_name in self.iterate_bel():
self._remove_root_file_name(root, file_name)
def clear_local_warned(self) -> None:
"""Clear caches for BEL documents with errors."""
for root, file_name in self.iterate_bel():
if self._has_warnings(root, file_name):
self._remove_root_file_name(root, file_name)
def _has_warnings(self, root: str, file_name: str) -> bool:
return os.path.exists(self._build_warnings_path(root, file_name))
def _remove_root_file_name(self, root: str, file_name: str) -> None:
for _, path in self._iterate_extension_path(root, file_name):
if os.path.exists(path):
os.remove(path)
def _iterate_extension_path(self, root: str, file_name: str) -> Iterable[Tuple[str, str]]:
for extension in self.formats:
yield extension, self._build_cache_ext_path(root, file_name, extension)
def _import_local(self, root: str, file_name: str) -> Optional[BELGraph]:
for _, path in self._iterate_extension_path(root, file_name):
if os.path.exists(path):
return load(path)
return None
def _import_global(self) -> Optional[BELGraph]:
return self._import_local(self.output_directory, self.bel_cache_name)
def _export_local(self, graph: BELGraph, root: str, file_name: str) -> None:
for extension, path in self._iterate_extension_path(root, file_name):
kwargs = OUTPUT_KWARGS.get(extension, {})
dump(graph, path, **kwargs)
with open(self._build_summary_path(root, file_name), "w") as file:
json.dump(graph.summarize.dict(), file, indent=2)
if graph.warnings:
logger.info(f" - {graph.number_of_warnings()} warnings")
warnings_path = self._build_warnings_path(root, file_name)
warnings_df = pd.DataFrame(
[
(
exc.line_number,
exc.position,
exc.line,
exc.__class__.__name__,
str(exc),
)
for _, exc, _ in graph.warnings
],
columns=["Line Number", "Position", "Line", "Error", "Message"],
)
warnings_df.to_csv(warnings_path, sep="\t", index=False)
def _export_global(self, graph: BELGraph) -> None:
self._export_local(graph, self.output_directory, self.bel_cache_name)
def get_graph(
self,
manager: Optional[Manager] = None,
use_cached: bool = True,
use_tqdm: bool = False,
tqdm_kwargs: Optional[Mapping[str, Any]] = None,
from_path_kwargs: Optional[Mapping[str, Any]] = None,
) -> BELGraph:
"""Get a combine graph."""
if use_cached:
graph = self._import_global()
if graph is not None:
return graph
graphs = self.get_graphs(
manager=manager,
use_tqdm=use_tqdm,
tqdm_kwargs=tqdm_kwargs,
from_path_kwargs=from_path_kwargs,
)
graph = union(graphs.values())
if self.metadata is not None:
self.metadata.update(graph)
self._get_summary_df_from_graphs(graphs)
self._export_global(graph)
return graph
def get_indra_statements(self, **kwargs):
"""Get INDRA statements for all graphs.
:rtype: List[indra.statements.Statement]
"""
return list(chain.from_iterable(to_indra_statements(graph) for graph in self.get_graphs(**kwargs).values()))
def get_graphs(
self,
manager: Optional[Manager] = None,
use_cached: bool = True,
use_tqdm: bool = False,
tqdm_kwargs: Optional[Mapping[str, Any]] = None,
from_path_kwargs: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, BELGraph]:
"""Get a mapping of all graphs' paths to their compiled BEL graphs."""
if manager is None:
manager = Manager()
paths = self.iterate_bel()
if use_tqdm:
paths = tqdm(list(paths), **(tqdm_kwargs or {}))
rv = {}
for root, file_name in paths:
path = os.path.join(root, file_name)
if use_cached:
graph = self._import_local(root, file_name)
if graph is not None:
rv[path] = graph
continue
_from_path_kwargs = from_path_kwargs or {}
_from_path_kwargs.update(self.from_path_kwargs)
try:
graph = rv[path] = from_bel_script(path, manager=manager, **_from_path_kwargs)
graph.path = os.path.relpath(os.path.join(root, file_name), self.directory)
except Exception as exc:
logger.warning(f"problem with {path}: {exc}")
continue
enrich_pubmed_citations(graph=graph, manager=manager)
self._export_local(graph, root, file_name)
return rv
def get_summary_df(
self,
manager: Optional[Manager] = None,
use_cached: bool = False,
use_tqdm: bool = False,
tqdm_kwargs: Optional[Mapping[str, Any]] = None,
from_path_kwargs: Optional[Mapping[str, Any]] = None,
save: Union[bool, str, TextIO] = True,
) -> pd.DataFrame:
"""Get a pandas DataFrame summarizing the contents of all graphs in the repository."""
graphs = self.get_graphs(
manager=manager,
use_cached=use_cached,
use_tqdm=use_tqdm,
tqdm_kwargs=tqdm_kwargs,
from_path_kwargs=from_path_kwargs,
)
return self._get_summary_df_from_graphs(graphs, save=save)
def _get_summary_df_from_graphs(self, graphs, save: Union[str, bool, TextIO] = True):
summary_dicts = {
os.path.relpath(path, self.directory): graph.summarize.dict() for path, graph in graphs.items()
}
df = pd.DataFrame.from_dict(summary_dicts, orient="index")
if isinstance(save, str):
df.to_csv(save, sep="\t")
elif save:
df.to_csv(self.bel_summary_path, sep="\t")
return df
def build_cli(self): # noqa: D202
"""Build a command line interface."""
@click.group(help=f"Tools for the BEL repository at {self.directory} using PyBEL v{get_version()}")
@click.pass_context
def main(ctx):
"""Group the commands."""
ctx.obj = self
append_click_group(main)
return main
def get_extensions(self, root: str, file_name: str) -> Set[str]:
"""Get all compiled files for the given BEL."""
# TODO check that this is a valid BEL path!
return {extension for extension, path in self._iterate_extension_path(root, file_name) if os.path.exists(path)}
def _get_global_caches(self):
return self.get_extensions(self.output_directory, self.bel_cache_name)
def _iterate_citations(self, **kwargs) -> Iterable[Tuple[str, str]]:
"""List all citations in documents in this repository."""
for _, _, data in self.get_graph(**kwargs).edges(data=True):
citation = data.get(CITATION)
if citation is not None:
yield citation.namespace, citation.identifier
def _write_caches(bel_repository: BELRepository, root: str, file_name: str):
extensions = ", ".join(sorted(bel_repository.get_extensions(root, file_name)))
has_warnings = os.path.exists(bel_repository._build_warnings_path(root, file_name))
try:
with open(bel_repository._build_summary_path(root, file_name)) as file:
summary = json.load(file)
except FileNotFoundError:
summary = None
if extensions and has_warnings:
s = click.style("✘️ ", fg="red")
elif extensions and not has_warnings:
s = click.style("✔︎ ", fg="green")
else:
s = click.style("? ", fg="yellow", bold=True)
path = os.path.join(root, file_name)
s += os.path.relpath(path, bel_repository.directory)
if extensions:
s += click.style(f" ({extensions})", fg="green")
if summary:
s += click.style(
f' ({summary["Number of Nodes"]} nodes, {summary["Number of Edges"]} edges)',
fg="blue",
)
click.echo(s)
def append_click_group(group: click.Group) -> None: # noqa: D202, C901
"""Append a :py:class:`click.Group`."""
@group.command()
@click.pass_obj
def ls(bel_repository: BELRepository):
"""List the contents of the repository."""
global_caches = bel_repository._get_global_caches()
if global_caches:
click.secho("Global Cache", fg="red", bold=True)
_write_caches(
bel_repository,
bel_repository.output_directory,
bel_repository.bel_cache_name,
)
click.secho("Local Caches", fg="red", bold=True)
for root, file_name in bel_repository.iterate_bel():
_write_caches(bel_repository, root, file_name)
@group.command()
@click.pass_obj
def citations(repository: BELRepository):
"""List citations in the repository."""
for database, reference in sorted(set(repository._iterate_citations(use_tqdm=True)), key=lambda x: int(x[1])):
click.echo(f"{database}\t{reference}")
@group.command()
@host_option
@user_option
@password_option
@click.option("-s", "--sleep", type=int, default=3, help="Seconds to sleep between sending")
@private_option
@click.pass_obj
def upload_separate(
repository: BELRepository,
host: str,
user: str,
password: str,
sleep: int,
private: bool,
):
"""Upload all to BEL Commons."""
it = tqdm(repository.get_graphs().items())
for name, graph in it:
res = to_bel_commons(graph, host=host, user=user, password=password, public=not private)
res_json = res.json()
task_id = res_json.get("task_id")
if task_id is not None:
it.write(f"task:{task_id} - {name}")
it.write(f'see: {host.rstrip("/")}/api/task/{task_id}')
time.sleep(sleep)
else:
it.write(f"problem with {name}: {res_json}")
@group.command()
@host_option
@user_option
@password_option
@private_option
@click.pass_obj
def upload_combine(repository: BELRepository, host: str, user: str, password: str, private: bool):
"""Upload the combine graph."""
graph = repository.get_graph()
res = to_bel_commons(graph, host=host, user=user, password=password, public=not private)
res_json = res.json()
task_id = res_json.get("task_id")
if task_id is not None:
click.echo(f"task:{task_id} - {graph}")
click.echo(f'see: {host.rstrip("/")}/api/task/{task_id}')
else:
click.echo(f"problem with {graph.name}: {res_json}")
@group.command()
@click.confirmation_option()
@click.pass_obj
def uncache(bel_repository: BELRepository):
"""Clear the cached data for the repository."""
bel_repository.clear_global_cache()
bel_repository.clear_local_caches()
@group.command()
@click.confirmation_option()
@click.pass_obj
def uncache_global(bel_repository: BELRepository):
"""Clear the cached data for the repository."""
bel_repository.clear_global_cache()
@group.command()
@click.confirmation_option()
@click.pass_obj
def uncache_local(bel_repository: BELRepository):
"""Clear the cached data for the repository."""
bel_repository.clear_local_caches()
@group.command()
@click.confirmation_option()
@click.pass_obj
def uncache_warned(bel_repository: BELRepository):
"""Clear the cached data for the documents that have warnings."""
bel_repository.clear_local_warned()
@group.command()
@connection_option
@click.option("-r", "--reload", is_flag=True)
@click.option("--no-tqdm", is_flag=True)
@verbose_option
@click.pass_obj
def compile(bel_repository: BELRepository, connection: str, reload: bool, no_tqdm: bool):
"""Summarize the repository."""
if reload:
bel_repository.clear_global_cache()
bel_repository.clear_local_caches()
manager = Manager(connection=connection)
graph = bel_repository.get_graph(
manager=manager,
use_cached=(not reload),
use_tqdm=(not no_tqdm),
tqdm_kwargs=dict(
desc="Loading BEL",
leave=False,
),
from_path_kwargs=dict(
use_tqdm=(not no_tqdm),
tqdm_kwargs=dict(
leave=False,
),
),
)
click.echo(graph.summarize.str())
@group.command()
@click.argument("file", type=click.File("w"))
@click.pass_obj
def html(bel_repository: BELRepository, file: TextIO):
"""Output an HTML summary."""
graph = bel_repository.get_graph()
try:
from pybel_tools.assembler.html import to_html_file
except ImportError:
click.secho("pybel_tools.assembler.html is not available", fg="red")
sys.exit(1)
else:
to_html_file(graph, file)
@click.group()
@click.version_option()
@click.option(
"-d",
"--directory",
default=os.getcwd(),
type=click.Path(file_okay=False, dir_okay=True, exists=True),
help="Defaults to current working directory",
)
@click.pass_context
def main(ctx, directory: str):
"""Command line interface for bel-repository."""
ctx.obj = BELRepository(directory=directory)
append_click_group(main)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
4a4aedbcae688967b5b85e60e73a727908c934a5 | 4fc016459e4c78680c61488c771eb6b7eb20d5fe | /Python-Algorithms-DataStructure/src/leet/104_MaximumDepthofBinaryTree.py | f6cf6d7dcb193f73f277665a5f23cbafd59f85b0 | [] | no_license | coremedy/Python-Algorithms-DataStructure | 7c318de68fd9694377a0a4369d8dbeb49e1e17aa | 3873502679a5def6af4be03028542f07d059d1a9 | refs/heads/master | 2021-01-25T07:34:17.714241 | 2015-11-05T10:17:40 | 2015-11-05T10:17:40 | 27,949,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | '''
Created on 2015-08-02
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {integer}
def maxDepth(self, root):
if root is None:
return 0
return self.DFS(0, root)
def DFS(self, depth, node):
if node is None:
return depth
return max(self.DFS(depth + 1, node.left), self.DFS(depth + 1, node.right))
if __name__ == '__main__':
pass | [
"[email protected]"
] | |
7526e1a07f83c8b237e6f892e95f0b2f235bb4b0 | 8fb5319079f3d9a5524a4fa44dc9fdeb4e578a33 | /Contours/counting_coins.py | 3d0b8461f126be4dabeaf660096bdf9d2180144c | [] | no_license | KhairulIzwan/Python-OpenCV-Basics | 1dc414a07d25b2800f3a6f4eb7edf375e891b92b | 2bcf3536c9d5225188dce7c081600459a7b1ebb0 | refs/heads/main | 2023-04-26T17:37:10.838035 | 2021-05-23T03:11:36 | 2021-05-23T03:11:36 | 369,949,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | #!/usr/bin/env python
import numpy as np
import argparse
import cv2
# use argparse to handle parsing our command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True,
help = "Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (11, 11), 0)
cv2.imshow("Image", image)
edged = cv2.Canny(blurred, 30, 150)
cv2.imshow("Edges", edged)
_, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print "I count %d coins in this image" % (len(cnts))
coins = image.copy()
cv2.drawContours(coins, cnts, -1, (0, 255, 0), 2)
cv2.imshow("Coins", coins)
cv2.waitKey(0)
for (i, c) in enumerate(cnts):
(x, y, w, h) = cv2.boundingRect(c)
# print "Coin #%d" % (i + 1)
coinBar = "Coin #%d" % (i + 1)
coin = image[y:y + h, x:x + w]
cv2.imshow(coinBar, coin)
mask = np.zeros(image.shape[:2], dtype = "uint8")
((centerX, centerY), radius) = cv2.minEnclosingCircle(c)
cv2.circle(mask, (int(centerX), int(centerY)), int(radius), 255, -1)
mask = mask[y:y + h, x:x + w]
cv2.imshow("Masked Coin", cv2.bitwise_and(coin, coin, mask=mask))
cv2.waitKey(0)
cv2.destroyWindow(coinBar)
| [
"[email protected]"
] | |
d04a3cc08125307c425fc4a3bbdbde890ec4fcda | b5aa43c8db450c3bcacc8f28897eab684a8032a1 | /data/games/missiles/states/level_fail.py | 1968f989a08e959a762da5f55ffd79a9dd9aa27b | [] | no_license | iminurnamez/Python_Arcade_Collab | 29a74cf2a6264969de9bae3c4a6ed23d6282e793 | 67702414ed30addd1bf46339bb458df34ed88f2a | refs/heads/master | 2021-04-15T07:32:18.573004 | 2018-05-13T14:29:19 | 2018-05-13T14:29:19 | 126,644,972 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | import pygame as pg
from data.components.state_machine import _State
from data.core.tools import scaled_mouse_pos
from data.core import constants as prog_constants
from data.components.labels import Label, ButtonGroup
from data.components.special_buttons import NeonButton
from .. import constants
class LevelFail(_State):
def __init__(self, controller):
super(LevelFail, self).__init__(controller)
def startup(self, persistent):
self.persist = persistent
font = prog_constants.FONTS["Fixedsys500c"]
sr = constants.SCREEN_RECT
color = constants.LOW_LIGHT_GREEN
level_num = self.persist["player"].level_num
self.labels = [
Label(font, 48, "Level {} Failed".format(level_num), color,
{"midtop": (sr.centerx, 5)}),
Label(font, 32, "All your cities are", color,
{"midbottom": (sr.centerx, 200)}),
Label(font, 32, "belong to dust", color,
{"midtop": (sr.centerx, 200)})]
self.buttons = ButtonGroup()
NeonButton((373, 630), "OK", 32, self.to_high_scores,
None, self.buttons)
def to_high_scores(self, *args):
self.persist["player"].clear_save()
self.done = True
self.next = "HIGH_SCORES"
def get_event(self, event, scale):
self.buttons.get_event(event)
def update(self, surface, keys, current_time, dt, scale):
self.buttons.update(scaled_mouse_pos(scale))
self.draw(surface)
def draw(self, surface):
surface.fill(constants.BACKGROUND_BASE)
for label in self.labels:
label.draw(surface)
self.buttons.draw(surface) | [
"[email protected]"
] | |
8ded148e045cac0a80cad6edcc563d60c127de9e | 07996c7f93e7b200146cd314520100cf99d003bd | /raw data/40_tos_with_paragraph/code/crawlParagraph/venv/bin/pip | 78b57aad8638f50daca262ddf547c190564af8a6 | [] | no_license | tjuyanghw/data_policy_analyzer | 31ae683128ca5241fa8f0cb67e2f1132820c2d02 | 010a44ff024bd6d97b21f409f6c62f969e1fdc55 | refs/heads/master | 2022-07-02T19:23:14.141170 | 2020-05-13T16:24:11 | 2020-05-13T16:24:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | #!/Users/xiaoyue/scrapyenv/maliciousLibrarySpider/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | ||
b29a1e598f2e2fc26af73f214978b0998c04b461 | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_7/NISS/common_shamil_v3/hr_violation_punishment/__openerp__.py | 719dad2e987819e3bcda88edba479c93f4c3fcff | [] | no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# NCTR, Nile Center for Technology Research
# Copyright (C) 2013-2014 NCTR (<http://www.nctr.sd>).
#
##############################################################################
{
'name': 'Employee Violation and Punishment',
'version': '1.1',
'author': 'NCTR',
'category': 'Human Resources',
'website': 'http://www.nctr.sd',
'summary': 'Employee Violation and Punishment',
'description': """
Employee Violation and Punishment
==========================
""",
'images' : ['images/violations.png'],
'depends': ['hr_payroll_custom'],
'data': [
'security/ir.model.access.csv',
'hr_violation_punishment_view.xml',
'report/hr_report.xml',
'hr_violations_punishment_workflow.xml',
'wizard/emp_violations_punishments.xml',
],
'demo': [],
'test': [],
'installable': True,
'application': True,
'auto_install': False,
'css': [ ],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
26726fa6fa874f79a109c4fc897e9f4671bd5ca7 | 439386f9097632d44d31d1f599df76ec2820d072 | /性能项目/统一大厅常规checklist/1601/DFQP/src/uilib/exchange_page.py | 100b9e01ac952280dbe97665969e45d263c46165 | [] | no_license | YiFeng0755/testcase | 33693f0940a6497aa40e2e51a0535c9eb6c12b29 | edc19480c3e94cbcbf004aa9d20099ec6d1b9304 | refs/heads/master | 2020-04-28T04:34:28.232022 | 2019-03-11T11:13:25 | 2019-03-11T11:13:25 | 146,287,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#Author: MindyZhang
'''
兑换奖品场景
'''
from appiumcenter.element import Element
class Exchange_Page(Element):
pass | [
"[email protected]"
] | |
a5a75d90679c6ca3fd506ea8dfbafd949dc61360 | d488f052805a87b5c4b124ca93494bc9b78620f7 | /google-cloud-sdk/lib/googlecloudsdk/core/updater/release_notes.py | 977fe1c29e08b001c9d41029efce76a4f5bf998e | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | PacktPublishing/DevOps-Fundamentals | 5ce1fc938db66b420691aa8106ecfb3f9ceb1ace | 60597e831e08325c7e51e8557591917f7c417275 | refs/heads/master | 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null | UTF-8 | Python | false | false | 7,835 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains utilities for comparing RELEASE_NOTES between Cloud SDK versions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core.document_renderers import render_document
from googlecloudsdk.core.updater import installers
from googlecloudsdk.core.util import encoding
from six.moves import StringIO
class ReleaseNotes(object):
"""Represents a parsed RELEASE_NOTES file.
The file should have the general structure of:
# Google Cloud SDK - Release Notes
Copyright 2014-2015 Google Inc. All rights reserved.
## 0.9.78 (2015/09/16)
* Note
* Note 2
## 0.9.77 (2015/09/09)
* Note 3
"""
# This regex matches each version section in the release notes file.
# It uses lookaheads and lookbehinds to be able to ensure double newlines
# without consuming them (because they are needed as part of the match of the
# next version section. This translates to a line starting with '##' preceded
# by a blank line that has a version string and description. It then consumes
# all lines until it hits a newline that is not followed by a blank line and
# another line starting with '##"
_VERSION_SPLIT_REGEX = (
r'(?<=\n)\n## +(?P<version>\S+).*\n(?:\n.*(?!\n\n## ))+.')
MAX_DIFF = 15
@classmethod
def FromURL(cls, url, command_path=None):
"""Parses release notes from the given URL.
Any error in downloading or parsing release notes is logged and swallowed
and None is returned.
Args:
url: str, The URL to download and parse.
command_path: str, The command that is calling this for instrumenting
the user agent for the download.
Returns:
ReleaseNotes, the parsed release notes or None if an error occurred.
"""
try:
response = installers.ComponentInstaller.MakeRequest(url, command_path)
if not response:
return None
code = response.getcode()
if code and code != 200:
return None
text = response.read()
text = encoding.Decode(text)
return cls(text)
# pylint: disable=broad-except, We don't want any failure to download or
# parse the release notes to block an update. Returning None here will
# print a generic message of where the user can go to view the release
# notes online.
except Exception:
log.debug('Failed to download [{url}]'.format(url=url), exc_info=True)
return None
def __init__(self, text):
"""Parse the release notes from the given text.
Args:
text: str, The text of the release notes to parse.
Returns:
ReleaseNotes, the parsed release notes.
"""
self._text = text.replace('\r\n', '\n')
versions = []
for m in re.finditer(ReleaseNotes._VERSION_SPLIT_REGEX, self._text):
versions.append((m.group('version'), m.group().strip()))
# [(version string, full version text including header), ...]
self._versions = versions
def GetVersionText(self, version):
"""Gets the release notes text for the given version.
Args:
version: str, The version to get the release notes for.
Returns:
str, The release notes or None if the version does not exist.
"""
index = self._GetVersionIndex(version)
if index is None:
return None
return self._versions[index][1]
def _GetVersionIndex(self, version):
"""Gets the index of the given version in the list of parsed versions.
Args:
version: str, The version to get the index for.
Returns:
int, The index of the given version or None if not found.
"""
for i, (v, _) in enumerate(self._versions):
if v == version:
return i
return None
def Diff(self, start_version, end_version):
"""Creates a diff of the release notes between the two versions.
The release notes are returned in reversed order (most recent first).
Args:
start_version: str, The version at which to start the diff. This should
be the later of the two versions. The diff will start with this version
and go backwards in time until end_version is hit. If None, the diff
will start at the most recent entry.
end_version: str, The version at which to stop the diff. This should be
the version you are currently on. The diff is accumulated until this
version it hit. This version is not included in the diff. If None,
the diff will include through the end of all release notes.
Returns:
[(version, text)], The list of release notes in the diff from most recent
to least recent. Each item is a tuple of the version string and the
release notes text for that version. Returns None if either of the
versions are not present in the release notes.
"""
if start_version:
start_index = self._GetVersionIndex(start_version)
if start_index is None:
return None
else:
start_index = 0
if end_version:
end_index = self._GetVersionIndex(end_version)
if end_index is None:
return None
else:
end_index = len(self._versions)
return self._versions[start_index:end_index]
def PrintReleaseNotesDiff(release_notes_url, current_version, latest_version):
"""Prints the release notes diff based on your current version.
If any of the arguments are None, a generic message will be printed telling
the user to go to the web to view the release notes. If the release_notes_url
is also None, it will print the developers site page for the SDK.
Args:
release_notes_url: str, The URL to download the latest release notes from.
current_version: str, The current version of the SDK you have installed.
latest_version: str, The version you are about to update to.
"""
if release_notes_url and current_version and latest_version:
notes = ReleaseNotes.FromURL(release_notes_url)
if notes:
release_notes_diff = notes.Diff(latest_version, current_version)
else:
release_notes_diff = None
else:
release_notes_diff = None
if not release_notes_diff:
# We failed to print the release notes. Send people to a nice web page with
# the release notes.
log.status.write(
'For the latest full release notes, please visit:\n {0}\n\n'.format(
config.INSTALLATION_CONFIG.release_notes_url))
return
if len(release_notes_diff) > ReleaseNotes.MAX_DIFF:
log.status.Print("""\
A lot has changed since your last upgrade. For the latest full release notes,
please visit:
{0}
""".format(config.INSTALLATION_CONFIG.release_notes_url))
return
log.status.Print("""\
The following release notes are new in this upgrade.
Please read carefully for information about new features, breaking changes,
and bugs fixed. The latest full release notes can be viewed at:
{0}
""".format(config.INSTALLATION_CONFIG.release_notes_url))
full_text = StringIO()
for _, text in release_notes_diff:
full_text.write(text)
full_text.write('\n')
full_text.seek(0)
render_document.RenderDocument('text', full_text, log.status)
log.status.Print()
| [
"[email protected]"
] | |
5ae862e9d518c2f20efcded062ee983747e72c04 | 4778bb52672e5bfd3bc227fd46bd3e2262146788 | /check_pickle_data.py | 77936b0793afdc053427dfe6f921049916de9d4f | [] | no_license | vyshor/NTU_timetable_generator | cf5d2914a52d41ca1087259fafe215d3298cfd3d | e7223fd98da718232af85e960bddc9e88ee02e5d | refs/heads/master | 2021-06-02T09:12:44.419674 | 2021-05-20T14:25:04 | 2021-05-20T14:25:04 | 135,579,641 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | import pandas as pd
import pickle
import os.path
if os.path.isfile('database.p'):
with open('database.p', 'rb') as f:
store = pickle.load(f)
print(store.keys())
print([x for x in store.keys() if 'CZ' in x])
print(store['HE9091']) | [
"[email protected]"
] | |
9d2088838424734104abac49d03bc31bad104416 | ca48bab2e2ffca8bb351050791f3b94bccc886b9 | /final report/interpreter2.py | 95afe2a50ddc908c1b87ab7b75229f75451ed525 | [] | no_license | haaksmash/QUI | ff394205bd3c3c089d23c0de66bcc4de6bc4e65b | f1cc2b3e999bebc7811598bde0f3ffddba216e65 | refs/heads/master | 2020-04-06T03:35:15.499196 | 2011-12-10T09:21:55 | 2011-12-10T09:21:55 | 2,872,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | $ python -i FileModel.py
>>> f = FileModel()
>>> f.size = "really big"
Traceback (most recent call last):
...
...
fields.ValidationError: Could not convert to int: really big
>>> f.size = 100
>>> f.size
100
>>> | [
"[email protected]"
] | |
21c63146676fd30217432916e59f7094633339a4 | 1a9852fe468f18e1ac3042c09286ccda000a4135 | /Specialist Certificate in Data Analytics Essentials/DataCamp/05-Working_with_Dates_and_Times/e23_march_29_throughout_a_decade.py | f8a0f897b1922372263e9afbb7bb4c04be5da9a8 | [] | no_license | sarmabhamidipati/UCD | 452b2f1e166c1079ec06d78e473730e141f706b2 | 101ca3152207e2fe67cca118923896551d5fee1c | refs/heads/master | 2023-08-14T15:41:24.312859 | 2021-09-22T17:33:01 | 2021-09-22T17:33:01 | 386,592,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | """
For example, in the United Kingdom, as of the time this lesson was written, Daylight Saving begins on the last Sunday
in March. Let's look at the UTC offset for March 29, at midnight, for the years 2000 to 2010.
Using tz, set the timezone for dt to be 'Europe/London'.
Within the for loop:
Use the .replace() method to change the year for dt to be y.
Call .isoformat() on the result to observe the results.
"""
# Import datetime and tz
from datetime import datetime
from dateutil import tz
# Create starting date
dt = datetime(2000, 3, 29, tzinfo=tz.gettz('Europe/London'))
# Loop over the dates, replacing the year, and print the ISO timestamp
for y in range(2000, 2011):
print(dt.replace(year=y).isoformat())
| [
"[email protected]"
] | |
fa469309fe18cbe3e77032ace895be4cfa02963f | aa7049506e929693941436f93e22b13ff3122650 | /clubs/migrations/0002_club_club_picture.py | 95e50125132ee8e4eda71cd2d4fd2b4b1f9cfa77 | [] | no_license | austinbrovick/bellevue_college_hackathon | 24aa5f1ef64c4a4b85dd50e1f6dd628be15f3817 | 2ad9fa6c5ea79e8a34d55df8e21838aeb8fd044f | refs/heads/master | 2021-05-31T16:08:32.770057 | 2016-05-21T16:54:46 | 2016-05-21T16:54:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-20 07:24
from __future__ import unicode_literals
import clubs.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clubs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='club',
name='club_picture',
field=models.ImageField(blank=True, null=True, upload_to=clubs.models.upload_location),
),
]
| [
"[email protected]"
] | |
9475e978727f421d6640b6c19aa2463bef419be8 | e9e717e8dd8d05ccf39170492721559076312a50 | /{{ cookiecutter.repo_name }}/src/transform.py | 37973295657a02ee98c67018a41f22b4433f8016 | [
"MIT"
] | permissive | alexkyllo/workbench-py | bf9ca182eb86ddfb828887ee459a63212373c79d | c0f56450a416fda6905b2f8ee087d414bcc0dd95 | refs/heads/master | 2022-12-08T12:02:01.038914 | 2020-09-04T05:28:33 | 2020-09-04T05:28:33 | 291,903,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | """transform.py
fit a transformer on test data to transform
test and training data.
"""
import os
import logging
import dotenv
import click
import joblib
from sklearn import preprocessing, impute, pipeline, compose
@click.command()
@click.argument("input_file", type=click.Path(exists=True))
@click.argument("output_file", type=click.Path)
@click.option("pipeline_file", type=click.Path)
@click.option("--fit/--no-fit", default=False, help="Fit the transformer")
def transform(input_file, output_file, pipeline_file, fit):
"""
Transform INPUT_FILE to OUTPUT_FILE using serialized PIPELINE_FILE.
If --fit specified, a pipeline is created, fitted on the data,
and written to PIPELINE_FILE.
Otherwise, a pipeline is read from PIPELINE_FILE and used to transform
the data only.
"""
logger = logging.getLogger(__name__)
logger.info("Reading %s", input_file)
if fit:
# create the pipeline, fit_transform it on the data, and
# save to pipeline_file
joblib.dump(pipeline, pipeline_file)
else:
# read and deserialize the pipeline from pipeline_file
pipeline = joblib.load(pipeline_file)
def main():
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
dotenv.load_dotenv(dotenv.find_dotenv())
transform()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
4240df00eb5010e26f95c087f229324170c9f756 | 18a6b272d4c55b24d9c179ae1e58959674e53afe | /tf_rl/examples/Sutton_RL_Intro/ch4_DP/value_iteration.py | 60e7e8461ec89060fd9007c1bb8e4dffbb0be478 | [
"MIT"
] | permissive | Rowing0914/TF2_RL | 6cce916f409b3d4ef2a5a40a0611908f20d08b2c | c1b7f9b376cbecf01deb17f76f8e761035ed336a | refs/heads/master | 2022-12-10T09:58:57.456415 | 2021-05-23T02:43:21 | 2021-05-23T02:43:21 | 233,476,950 | 9 | 1 | MIT | 2022-12-08T07:02:42 | 2020-01-12T23:53:48 | Python | UTF-8 | Python | false | false | 854 | py | # Following the algo in section 4.4 Value Iteration
from policy_evaluation import Policy_Evaluation
import sys
import numpy as np
if "../" not in sys.path:
sys.path.append("../")
from utils.envs.grid_world import GridworldEnv
def Value_Iteration(env, policy, state_value, gamma, theta):
state_value = Policy_Evaluation(env, policy, state_value, gamma, theta).flatten()
for s in range(env.nS):
policy[s] = np.eye(env.nA)[np.argmax(policy[s])]
return (policy)
if __name__ == '__main__':
env = GridworldEnv()
state_value = np.zeros(env.nS)
policy = np.ones([env.nS, env.nA]) / env.nA
gamma = 1
theta = 0.00001
print("===== Training Started =====")
policy = Value_Iteration(env, policy, state_value, gamma, theta)
print("===== Training Finished =====")
print(policy)
print(state_value)
| [
"[email protected]"
] | |
ae317d3819b06f5de71f3da6f88fc4df21141864 | b593247a2bf162819eea6820b6a25c7a659d2f76 | /Unit 07 Lists and Functions/01 Lists and Functions/1 List Recap/4-Removing elements from lists.py | 8f667b9e1e80ddb0e19190278409ab25d8eb16c0 | [] | no_license | Angelpacman/codecademy-py3 | d4d727857a8894fec5dd3d78c00f3f25f31979dc | 729d232a8732e53bdf0131246b043354ed933614 | refs/heads/master | 2020-03-28T02:50:31.431167 | 2019-01-26T01:07:01 | 2019-01-26T01:07:01 | 147,601,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | n = [1, 3, 5]
# Removes 1 from the list,
# NOT the item at index 1
n.remove(1)
# Another possible solution, will remove the item at the given index:
del(n[0])
# Another possible solution will remove the item at index from the list and return it to you:
n.pop(0)
print (n)
| [
"[email protected]"
] | |
6cf0154e33520dc042d50a3f03c9ef013abaeca8 | 1e5c6f4b08d9470fce248cf39e6dccce40e90a41 | /codes/11/vpython_mouse.py | 1dfa5be57b729a6fc2531903cb36ec3f2576e212 | [] | no_license | misaiya99/scipybook2 | 1529cfb7f800df2ef7ce024a86281af16e343a37 | 734ba177b4705cc25da695d42a8cbada7cd22bd9 | refs/heads/master | 2020-03-10T21:26:23.595494 | 2017-08-25T09:48:07 | 2017-08-25T09:48:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | # -*- coding: utf-8 -*-
from visual import *
text = label(pos=(0, -2, 0))
sphere(pos=(0,2,0))
box(pos = (2, 0, 0))
ray = arrow(pos=(0,0,0), color=(1,0,0))
while True:
rate(30)
texts = []
for attrname in ["pos", "pick", "pickpos", "camera", "ray"]:
texts.append("%s=%s" % (attrname, getattr(scene.mouse, attrname)))
texts.append("project=%s" %
scene.mouse.project(normal=scene.forward, point=scene.center))
text.text = "\n".join(texts)
ray.axis = scene.mouse.ray
if scene.mouse.events > 0:
event = scene.mouse.getevent()
print(("press=%s, click=%s, drag=%s, drop=%s, release=%s" % (
event.press, event.click, event.drag, event.drop, event.release
)))
| [
"[email protected]"
] | |
4bcd800c07e4277b3973998a8b7011e197ab5888 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-dataworks-public/aliyunsdkdataworks_public/request/v20200518/UpdateTableThemeRequest.py | 4d6f11d6dc5e74a7089425fa39c49b429212d945 | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,746 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class UpdateTableThemeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'UpdateTableTheme')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_ThemeId(self):
return self.get_query_params().get('ThemeId')
def set_ThemeId(self,ThemeId):
self.add_query_param('ThemeId',ThemeId)
def get_ProjectId(self):
return self.get_query_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_query_param('ProjectId',ProjectId) | [
"[email protected]"
] | |
4d53d7f73ebb9720864f89da0c2327cfa136e2c2 | 54ddb3f38cd09ac25213a7eb8743376fe778fee8 | /topic_05_data_structure/practice/zip_1_common.py | 4236c882354be343b387f643ccd4d9be6d9b4296 | [] | no_license | ryndovaira/leveluppythonlevel1_300321 | dbfd4ee41485870097ee490f652751776ccbd7ab | 0877226e6fdb8945531775c42193a90ddb9c8a8b | refs/heads/master | 2023-06-06T07:44:15.157913 | 2021-06-18T11:53:35 | 2021-06-18T11:53:35 | 376,595,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,648 | py | """
Функция zip_common.
Принимает 3 аргумента: список, строку и кортеж.
Возвращает список (list) с тройками значений из каждого аргумента.
ВНИМАНИЕ: для строки один элемент = один символ
(Порядок проверки именно такой:)
Если вместо list передано что-то другое, то возвращать строку 'First arg must be list!'.
Если вместо str передано что-то другое, то возвращать строку 'Second arg must be str!'.
Если вместо tuple передано что-то другое, то возвращать строку 'Third arg must be tuple!'.
Если list пуст, то возвращать строку 'Empty list!'.
Если str пуст, то возвращать строку 'Empty str!'.
Если tuple пуст, то возвращать строку 'Empty tuple!'.
Если list, str и tuple различного размера, обрезаем до минимального (стандартный zip).
"""
def zip_common(my_list, my_str, my_tuple):
if type(my_list) != list:
return 'First arg must be list!'
if type(my_str) != str:
return 'Second arg must be str!'
if type(my_tuple) != tuple:
return 'Third arg must be tuple!'
if len(my_list) == 0:
return 'Empty list!'
if len(my_str) == 0:
return 'Empty str!'
if len(my_tuple) == 0:
return 'Empty tuple!'
return list(zip(my_list, my_str, my_tuple))
| [
"[email protected]"
] | |
ada60d2e8fc354bdf8b960331d4a2c3dd3495c84 | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/gui/shared/actions/__init__.py | dd15a0c04bdd65ad5d78e61ce0b7c7d736212659 | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 9,956 | py | # 2017.08.29 21:49:32 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/shared/actions/__init__.py
import BigWorld
from adisp import process
from debug_utils import LOG_DEBUG, LOG_ERROR
from gui.Scaleform.Waiting import Waiting
from gui.Scaleform.framework import ViewTypes
from gui.app_loader import g_appLoader
from gui.prb_control.settings import FUNCTIONAL_FLAG
from gui.shared import g_eventBus, EVENT_BUS_SCOPE
from gui.shared.actions.chains import ActionsChain
from gui.shared.events import LoginEventEx, GUICommonEvent
from helpers import dependency
from predefined_hosts import g_preDefinedHosts, getHostURL
from skeletons.connection_mgr import IConnectionManager
from skeletons.gui.lobby_context import ILobbyContext
from skeletons.gui.login_manager import ILoginManager
__all__ = ('LeavePrbModalEntity', 'DisconnectFromPeriphery', 'ConnectToPeriphery', 'PrbInvitesInit', 'ActionsChain')
class Action(object):
def __init__(self):
super(Action, self).__init__()
self._completed = False
self._running = False
def invoke(self):
pass
def isInstantaneous(self):
return True
def isRunning(self):
return self._running
def isCompleted(self):
return self._completed
CONNECT_TO_PERIPHERY_DELAY = 2.0
class LeavePrbModalEntity(Action):
def __init__(self):
super(LeavePrbModalEntity, self).__init__()
self._running = False
def invoke(self):
from gui.prb_control.dispatcher import g_prbLoader
dispatcher = g_prbLoader.getDispatcher()
if dispatcher:
state = dispatcher.getFunctionalState()
if state.hasModalEntity:
factory = dispatcher.getControlFactories().get(state.ctrlTypeID)
if factory:
ctx = factory.createLeaveCtx(flags=FUNCTIONAL_FLAG.SWITCH)
if ctx:
self._running = True
self.__doLeave(dispatcher, ctx)
else:
LOG_ERROR('Leave modal entity. Can not create leave ctx', state)
else:
LOG_ERROR('Leave modal entity. Factory is not found', state)
else:
LOG_DEBUG('Leave modal entity. Player has not prebattle')
self._completed = True
def isInstantaneous(self):
return False
@process
def __doLeave(self, dispatcher, ctx):
self._completed = yield dispatcher.leave(ctx)
if self._completed:
LOG_DEBUG('Leave modal entity. Player left prebattle.')
else:
LOG_DEBUG('Leave modal entity. Action was failed.')
self._running = False
class SelectPrb(Action):
def __init__(self, prbAction):
super(SelectPrb, self).__init__()
self._running = False
self._prbAction = prbAction
def invoke(self):
from gui.prb_control.dispatcher import g_prbLoader
dispatcher = g_prbLoader.getDispatcher()
if dispatcher:
self._running = True
self.__doSelect(dispatcher)
def isInstantaneous(self):
return False
@process
def __doSelect(self, dispatcher):
self._completed = yield dispatcher.doSelectAction(self._prbAction)
if self._completed:
LOG_DEBUG('Select prebattle entity. Player has joined prebattle.')
else:
LOG_DEBUG('Select prebattle entity. Action was failed.')
self._running = False
class DisconnectFromPeriphery(Action):
connectionMgr = dependency.descriptor(IConnectionManager)
def __init__(self):
super(DisconnectFromPeriphery, self).__init__()
def isInstantaneous(self):
return False
def invoke(self):
self._running = True
g_appLoader.goToLoginByRQ()
def isRunning(self):
app = g_appLoader.getApp()
if app:
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
view = app.containerManager.getView(ViewTypes.DEFAULT)
if view and view.settings.alias == VIEW_ALIAS.LOGIN and view.isCreated() and self.connectionMgr.isDisconnected():
LOG_DEBUG('Disconnect action. Player came to login')
self._completed = True
self._running = False
return self._running
class ConnectToPeriphery(Action):
loginManager = dependency.descriptor(ILoginManager)
lobbyContext = dependency.descriptor(ILobbyContext)
connectionMgr = dependency.descriptor(IConnectionManager)
def __init__(self, peripheryID):
super(ConnectToPeriphery, self).__init__()
self.__host = g_preDefinedHosts.periphery(peripheryID)
self.__endTime = None
self.__credentials = self.lobbyContext.getCredentials()
return
def isInstantaneous(self):
return False
def isRunning(self):
if self.__endTime and self.__endTime <= BigWorld.time():
self.__endTime = None
self.__doConnect()
return super(ConnectToPeriphery, self).isRunning()
def invoke(self):
if self.__host and self.__credentials:
if len(self.__credentials) < 2:
self._completed = False
LOG_ERROR('Connect action. Login info is invalid')
return
login, token2 = self.__credentials
if not login or not token2:
self._completed = False
LOG_ERROR('Connect action. Login info is invalid')
return
self._running = True
self.__endTime = BigWorld.time() + CONNECT_TO_PERIPHERY_DELAY
Waiting.show('login')
else:
LOG_ERROR('Connect action. Login info is invalid')
self._completed = False
self._running = False
def __doConnect(self):
login, token2 = self.__credentials
self.__addHandlers()
self.loginManager.initiateRelogin(login, token2, getHostURL(self.__host, token2))
def __addHandlers(self):
g_eventBus.addListener(LoginEventEx.ON_LOGIN_QUEUE_CLOSED, self.__onLoginQueueClosed, scope=EVENT_BUS_SCOPE.LOBBY)
self.connectionMgr.onConnected += self.__onConnected
self.connectionMgr.onRejected += self.__onRejected
def __removeHandlers(self):
g_eventBus.removeListener(LoginEventEx.ON_LOGIN_QUEUE_CLOSED, self.__onLoginQueueClosed, scope=EVENT_BUS_SCOPE.LOBBY)
self.connectionMgr.onConnected -= self.__onConnected
self.connectionMgr.onRejected -= self.__onRejected
def __onConnected(self):
self.__removeHandlers()
self._completed = True
self._running = False
def __onRejected(self, status, responseData):
self.__removeHandlers()
self._completed = False
self._running = False
def __onLoginQueueClosed(self, _):
self.__removeHandlers()
self._completed = False
self._running = False
LOG_DEBUG('Connect action. Player exit from login queue')
class PrbInvitesInit(Action):
def __init__(self):
super(PrbInvitesInit, self).__init__()
def isInstantaneous(self):
return False
def invoke(self):
from gui.prb_control.dispatcher import g_prbLoader
invitesManager = g_prbLoader.getInvitesManager()
if invitesManager:
if invitesManager.isInited():
LOG_DEBUG('Invites init action. Invites init action. List of invites is build')
self._completed = True
else:
self._running = True
invitesManager.onInvitesListInited += self.__onInvitesListInited
else:
LOG_ERROR('Invites init action. Invites manager not found')
self._completed = False
def __onInvitesListInited(self):
from gui.prb_control.dispatcher import g_prbLoader
invitesManager = g_prbLoader.getInvitesManager()
if invitesManager:
LOG_DEBUG('Invites init action. List of invites is build')
invitesManager.onInvitesListInited -= self.__onInvitesListInited
else:
LOG_ERROR('Invites manager not found')
self._completed = True
self._running = False
class WaitFlagActivation(Action):
def __init__(self):
super(WaitFlagActivation, self).__init__()
self._isActive = False
def activate(self):
LOG_DEBUG('Flag is activated')
self._isActive = True
def inactivate(self):
LOG_DEBUG('Flag is inactivated')
self._isActive = False
def invoke(self):
if not self._isActive:
self._running = True
else:
self._completed = True
def isRunning(self):
if self._isActive:
self._running = False
self._completed = True
return self._running
def isInstantaneous(self):
return False
class OnLobbyInitedAction(Action):
def __init__(self, onInited = None):
super(OnLobbyInitedAction, self).__init__()
self.__isLobbyInited = False
self.__onInited = onInited
g_eventBus.addListener(GUICommonEvent.LOBBY_VIEW_LOADED, self.__onLobbyInited)
def invoke(self):
self._running = True
self._completed = False
if self.__isLobbyInited:
onInited = self.__onInited
if onInited and callable(onInited):
onInited()
self._completed = True
self._running = False
def __onLobbyInited(self, _):
self.__isLobbyInited = True
g_eventBus.removeListener(GUICommonEvent.LOBBY_VIEW_LOADED, self.__onLobbyInited)
self.invoke()
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\shared\actions\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:49:33 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
6856e89aa1d898a889e5af7dae23b5576017b49c | 292cec77b5003a2f80360d0aee77556d12d990f7 | /typings/filetype/types/video.pyi | 9b61193428fe1cf6d3aee36815c69fc32f0d96e0 | [
"Apache-2.0"
] | permissive | yubozhao/BentoML | 194a6ec804cc1c6dbe7930c49948b6707cbc3c5f | d4bb5cbb90f9a8ad162a417103433b9c33b39c84 | refs/heads/master | 2022-12-17T00:18:55.555897 | 2022-12-06T00:11:39 | 2022-12-06T00:11:39 | 178,978,385 | 3 | 0 | Apache-2.0 | 2020-12-01T18:17:15 | 2019-04-02T01:53:53 | Python | UTF-8 | Python | false | false | 2,296 | pyi | """
This type stub file was generated by pyright.
"""
from .base import Type
from .isobmff import IsoBmff
class Mp4(IsoBmff):
"""
Implements the MP4 video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> bool:
...
class M4v(Type):
"""
Implements the M4V video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Mkv(Type):
"""
Implements the MKV video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf):
...
class Webm(Type):
"""
Implements the WebM video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf):
...
class Mov(IsoBmff):
"""
Implements the MOV video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Avi(Type):
"""
Implements the AVI video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Wmv(Type):
"""
Implements the WMV video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Flv(Type):
"""
Implements the FLV video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class Mpeg(Type):
"""
Implements the MPEG video type matcher.
"""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf): # -> Literal[False]:
...
class M3gp(Type):
"""Implements the 3gp image type matcher."""
MIME = ...
EXTENSION = ...
def __init__(self) -> None:
...
def match(self, buf):
...
| [
"[email protected]"
] | |
9b750390731edd5a1a683067240907563877df45 | 7a66ff970580297ba50b0d4bdd0406352071c05a | /Pyscience/3. numpy.py | 5662327a3911c27438e44e19446518f84358e67d | [] | no_license | zero-big/Python-Basic | 1ab3da9d09983d937b410ca9ec1741424ebaa3ae | 5cd2eaa822aedb46a79283a6007b900a3c9665c8 | refs/heads/master | 2023-08-03T13:10:22.556732 | 2021-09-24T11:35:50 | 2021-09-24T11:35:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,391 | py | import numpy as np
# 1. 배열 만들기 : array
b = np.array([2, 4, 6, 8])
print(b) # [2 4 6 8]
# ndim : 랭크를 반환
print(b.ndim) # 1
# size : 배열에 있는 값의 총 개수 반환
print(b.size) # 4
# shape : 각 랭크에 있는 값의 개수 반환
print(b.shape) # (4,)
a = np.arange(10)
print(a) # [0 1 2 3 4 5 6 7 8 9]
print(a.ndim) # 1
print(a.shape) # (10,)
print(a.size) # 10
a = np.arange(7, 11)
print(a) # [ 7 8 9 10]
f = np.arange(2.0, 9.8, 0.3)
print(f)
# [2. 2.3 2.6 2.9 3.2 3.5 3.8 4.1 4.4 4.7 5. 5.3 5.6 5.9 6.2 6.5 6.8 7.1
# 7.4 7.7 8. 8.3 8.6 8.9 9.2 9.5 9.8]
g = np.arange(10, 4, -1.5, dtype=np.float)
print(g) # [10. 8.5 7. 5.5]
a = np.zeros((3,))
print(a) # [0. 0. 0.]
print(a.ndim) # 1
print(a.shape) # (3,)
print(a.size) # 3
b = np.zeros((2, 4))
print(b)
# [[0. 0. 0. 0.]
# [0. 0. 0. 0.]]
print(b.ndim) # 2
print(b.shape) # (2, 4)
print(b.size) # 8
k = np.ones((3, 5))
print(k)
# [[1. 1. 1. 1. 1.]
# [1. 1. 1. 1. 1.]
# [1. 1. 1. 1. 1.]]
m = np.random.random((3, 5))
print(m)
# [[0.92144665 0.79460743 0.98429623 0.5172086 0.0727177 ]
# [0.3467992 0.07082806 0.06713763 0.92576145 0.37867405]
# [0.57972622 0.02252859 0.66872603 0.70532502 0.7316084 ]]
a = np.arange(10)
a = a.reshape(2, 5)
print(a)
# [[0 1 2 3 4]
# [5 6 7 8 9]]
print(a.ndim) # 2
print(a.shape) # (2, 5)
print(a.size) # 10
a = a.reshape(5, 2)
print(a)
# [[0 1]
# [2 3]
# [4 5]
# [6 7]
# [8 9]]
print(a.ndim) # 2
print(a.shape) # (5, 2)
print(a.size) # 10
a.shape = (2, 5)
print(a)
# 배열 연산
from numpy import *
a = arange(4)
a *= 3
print(a) # [0 3 6 9]
plain_list = list(range(4))
print(plain_list) # [0, 1, 2, 3]
plain_list = [num*3 for num in plain_list]
print(plain_list) # [0, 3, 6, 9]
a = zeros((2, 5)) + 17.0
print(a)
# [[17. 17. 17. 17. 17.]
# [17. 17. 17. 17. 17.]]
# @ : 행렬 곱
a = np.array([[1,2], [3,4]])
b = a @ a
print(b)
# [[ 7 10]
# [15 22]]
# 선형 대수
# 4x + 5y = 20
# x + 2y = 13
coefficients = np.array([ [4,5], [1,2]])
dependents = np.array([20, 13])
answer = np.linalg.solve(coefficients, dependents)
print(answer)
# [-8.33333333 10.66666667]
print(4 * answer[0] + 5 * answer[1] ) # 20.0
print(1 * answer[0] + 2 * answer[1] ) # 13.0
product = np.dot(coefficients, answer)
print(product) # [20. 13.]
print(np.allclose(product, dependents)) # True
| [
"[email protected]"
] | |
60da30781917abab3957aa8014520618378468ed | 9905901a2beae3ff4885fbc29842b3c34546ffd7 | /nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/responder/responderpolicy.py | c615cd853ff5b01cace69af2e386a58b6b117f46 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | culbertm/NSttyPython | f354ebb3dbf445884dbddb474b34eb9246261c19 | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | refs/heads/master | 2020-04-22T17:07:39.654614 | 2019-02-13T19:07:23 | 2019-02-13T19:07:23 | 170,530,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,054 | py | #
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class responderpolicy(base_resource) :
""" Configuration for responder policy resource. """
def __init__(self) :
self._name = None
self._rule = None
self._action = None
self._undefaction = None
self._comment = None
self._logaction = None
self._appflowaction = None
self._newname = None
self._hits = None
self._undefhits = None
self._builtin = None
self.___count = None
@property
def name(self) :
r"""Name for the responder policy.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Can be changed after the responder policy is added.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my responder policy" or 'my responder policy').
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name for the responder policy.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Can be changed after the responder policy is added.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my responder policy" or 'my responder policy').
"""
try :
self._name = name
except Exception as e:
raise e
@property
def rule(self) :
r"""Default syntax expression that the policy uses to determine whether to respond to the specified request.
"""
try :
return self._rule
except Exception as e:
raise e
@rule.setter
def rule(self, rule) :
r"""Default syntax expression that the policy uses to determine whether to respond to the specified request.
"""
try :
self._rule = rule
except Exception as e:
raise e
@property
def action(self) :
r"""Name of the responder action to perform if the request matches this responder policy. There are also some built-in actions which can be used. These are:
* NOOP - Send the request to the protected server instead of responding to it.
* RESET - Reset the client connection by closing it. The client program, such as a browser, will handle this and may inform the user. The client may then resend the request if desired.
* DROP - Drop the request without sending a response to the user.
"""
try :
return self._action
except Exception as e:
raise e
@action.setter
def action(self, action) :
r"""Name of the responder action to perform if the request matches this responder policy. There are also some built-in actions which can be used. These are:
* NOOP - Send the request to the protected server instead of responding to it.
* RESET - Reset the client connection by closing it. The client program, such as a browser, will handle this and may inform the user. The client may then resend the request if desired.
* DROP - Drop the request without sending a response to the user.
"""
try :
self._action = action
except Exception as e:
raise e
@property
def undefaction(self) :
r"""Action to perform if the result of policy evaluation is undefined (UNDEF). An UNDEF event indicates an internal error condition. Only the above built-in actions can be used.
"""
try :
return self._undefaction
except Exception as e:
raise e
@undefaction.setter
def undefaction(self, undefaction) :
r"""Action to perform if the result of policy evaluation is undefined (UNDEF). An UNDEF event indicates an internal error condition. Only the above built-in actions can be used.
"""
try :
self._undefaction = undefaction
except Exception as e:
raise e
@property
def comment(self) :
r"""Any type of information about this responder policy.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
r"""Any type of information about this responder policy.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def logaction(self) :
r"""Name of the messagelog action to use for requests that match this policy.
"""
try :
return self._logaction
except Exception as e:
raise e
@logaction.setter
def logaction(self, logaction) :
r"""Name of the messagelog action to use for requests that match this policy.
"""
try :
self._logaction = logaction
except Exception as e:
raise e
@property
def appflowaction(self) :
r"""AppFlow action to invoke for requests that match this policy.
"""
try :
return self._appflowaction
except Exception as e:
raise e
@appflowaction.setter
def appflowaction(self, appflowaction) :
r"""AppFlow action to invoke for requests that match this policy.
"""
try :
self._appflowaction = appflowaction
except Exception as e:
raise e
@property
def newname(self) :
r"""New name for the responder policy. Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) hash (#), space ( ), at (@), equals (=), colon (:), and underscore characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my responder policy" or 'my responder policy').<br/>Minimum length = 1.
"""
try :
return self._newname
except Exception as e:
raise e
@newname.setter
def newname(self, newname) :
r"""New name for the responder policy. Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) hash (#), space ( ), at (@), equals (=), colon (:), and underscore characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my responder policy" or 'my responder policy').<br/>Minimum length = 1
"""
try :
self._newname = newname
except Exception as e:
raise e
@property
def hits(self) :
r"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def undefhits(self) :
r"""Number of policy UNDEF hits.
"""
try :
return self._undefhits
except Exception as e:
raise e
@property
def builtin(self) :
r"""Flag to determine if responder policy is built-in or not.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL.
"""
try :
return self._builtin
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(responderpolicy_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.responderpolicy
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
r""" Use this API to add responderpolicy.
"""
try :
if type(resource) is not list :
addresource = responderpolicy()
addresource.name = resource.name
addresource.rule = resource.rule
addresource.action = resource.action
addresource.undefaction = resource.undefaction
addresource.comment = resource.comment
addresource.logaction = resource.logaction
addresource.appflowaction = resource.appflowaction
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ responderpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].rule = resource[i].rule
addresources[i].action = resource[i].action
addresources[i].undefaction = resource[i].undefaction
addresources[i].comment = resource[i].comment
addresources[i].logaction = resource[i].logaction
addresources[i].appflowaction = resource[i].appflowaction
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete responderpolicy.
"""
try :
if type(resource) is not list :
deleteresource = responderpolicy()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ responderpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ responderpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
r""" Use this API to update responderpolicy.
"""
try :
if type(resource) is not list :
updateresource = responderpolicy()
updateresource.name = resource.name
updateresource.rule = resource.rule
updateresource.action = resource.action
updateresource.undefaction = resource.undefaction
updateresource.comment = resource.comment
updateresource.logaction = resource.logaction
updateresource.appflowaction = resource.appflowaction
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ responderpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].rule = resource[i].rule
updateresources[i].action = resource[i].action
updateresources[i].undefaction = resource[i].undefaction
updateresources[i].comment = resource[i].comment
updateresources[i].logaction = resource[i].logaction
updateresources[i].appflowaction = resource[i].appflowaction
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of responderpolicy resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = responderpolicy()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ responderpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ responderpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def rename(cls, client, resource, new_name) :
r""" Use this API to rename a responderpolicy resource.
"""
try :
renameresource = responderpolicy()
if type(resource) == cls :
renameresource.name = resource.name
else :
renameresource.name = resource
return renameresource.rename_resource(client,new_name)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the responderpolicy resources that are configured on netscaler.
"""
try :
if not name :
obj = responderpolicy()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = responderpolicy()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [responderpolicy() for _ in range(len(name))]
obj = [responderpolicy() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = responderpolicy()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of responderpolicy resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = responderpolicy()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the responderpolicy resources configured on NetScaler.
"""
try :
obj = responderpolicy()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of responderpolicy resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = responderpolicy()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class responderpolicy_response(base_response) :
def __init__(self, length=1) :
self.responderpolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.responderpolicy = [responderpolicy() for _ in range(length)]
| [
"[email protected]"
] | |
4bc2b97cfdf5ecd84e54794669f4b1629022175a | d3efc82dfa61fb82e47c82d52c838b38b076084c | /utils/insertOrder.py | 85952c6096e1e1cff45f6714581d1c7d9b599c2b | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from QueryStkPriceQty import *
import time
a = []
i = 0
def insertOrder(order_client_id):
case_goal = {
'case_ID': 'ATC-103-19',
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
stkparm = QueryStkPriceQty('999999', '2', '0', '2', '0', 'B', case_goal['期望状态'], Api)
wt_reqs = {
'business_type':Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 200
}
wt_reqs['order_client_id'] = order_client_id
Api.trade.InsertOrder(wt_reqs)
# 报单分页查询
def test_orderpage(self):
def pagedate(data, req_count, order_sequence, query_reference, request_id, is_last):
#print data,is_last
global i
for k in data.keys():
if 'order_cancel_xtp_id' in k:
i +=1
a.append(i)
Api.trade.setQueryOrderByPageHandle(pagedate)
Api.trade.QueryOrdersByPage({'req_count':13,'reference':198})
time.sleep(0.5)
rs = a[-1]
self.assertEqual(rs, 3)
# 成交分页查询
def test_tradepage():
def pagedate(data, req_count, trade_sequence, query_reference, request_id, is_last):
print data,is_last
Api.trade.setQueryTradeByPageHandle(pagedate)
Api.trade.QueryTradesByPage({'req_count':10,'reference':0})
time.sleep(0.5)
if __name__ == '__main__':
'''
for i in range(100):
order_client_id = i+1
#print order_client_id
Api.trade.Login()
insertOrder(order_client_id)
'''
#test_orderpage()
test_tradepage()
| [
"[email protected]"
] | |
9f80c09591aba84706bbcb6441e4cdc592592d31 | a425842a51deab915fc4319b3226cef3f49e53ea | /build/extriPACK/industrial_calibration/rgbd_depth_correction/catkin_generated/pkg.installspace.context.pc.py | 52826087801e42a5ebac29a053ae0e8b3d52ccb0 | [] | no_license | Sinchiguano/Part-Localization-For-Robotic-Arm | 1458204e52f34354cbd0e8e1bff1dfaf6caefe1c | ebc1ed19da171ff4b5a52a3a031ae3049b0b9eb8 | refs/heads/master | 2021-10-08T19:49:53.455680 | 2018-12-16T20:03:04 | 2018-12-16T20:03:04 | 155,774,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/usr/include;/usr/local/include;/usr/include/eigen3".split(';') if "/usr/include;/usr/local/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "cv_bridge;geometry_msgs;industrial_extrinsic_cal;message_filters;nodelet;pcl_conversions;pcl_msgs;pcl_ros;pluginlib;roscpp;sensor_msgs;std_srvs;target_finder;tf;tf_conversions".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "/usr/lib/x86_64-linux-gnu/libboost_thread.so;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libboost_date_time.so;/usr/lib/x86_64-linux-gnu/libboost_atomic.so;/usr/lib/x86_64-linux-gnu/libpthread.so;/usr/local/lib/libceres.a".split(';') if "/usr/lib/x86_64-linux-gnu/libboost_thread.so;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libboost_date_time.so;/usr/lib/x86_64-linux-gnu/libboost_atomic.so;/usr/lib/x86_64-linux-gnu/libpthread.so;/usr/local/lib/libceres.a" != "" else []
PROJECT_NAME = "rgbd_depth_correction"
PROJECT_SPACE_DIR = "/home/casch/yumi_ws/install"
PROJECT_VERSION = "0.1.0"
| [
"[email protected]"
] | |
1ec1082c420c57632e1d8fbdbff3c24e3f426d14 | ae7ba9c83692cfcb39e95483d84610715930fe9e | /yubinbai/pcuva-problems/UVa 11262 - Weird Fence/EdmondsKarp.py | a3cd3aa934f2d8f5f06832cbe4c94dceea41b641 | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | '''
Created on 2013-6-24
@author: Yubin Bai
'''
from _collections import deque
INF = 1 << 32
def edmondsKarp(graph, s, t):
def augmentPath(v, minEdge):
if (v == s): # managed to get back to source
f[0] = minEdge # minEdge of the path
return
elif (v in p): # augment if there is a path
# we need AdjMat for fast lookup here
augmentPath(p[v], min(minEdge, graph[p[v]][v]))
graph[p[v]][v] -= f[0] # forward edges -> decrease
graph[v][p[v]] += f[0] # backward edges -> increase
p = {} # parent map to reconstruct path
f = [0] # global variables, use list as mutable
max_flow = 0
while True: # this will be run max O(VE) times
f[0] = 0
q = deque()
dist = {s: 0} # O(E) BFS and record path p
q.append(s)
while q:
u = q.popleft() # queue: layer by layer!
if (u == t):
break # modification 1: reach sink t, stop BFS
for v in graph[u]: # for each neighbors of u
# modification 2: also check AdjMat as edges may disappear
if graph[u][v] > 0 and v not in dist:
dist[v] = dist[u] + 1 # then v is reachable from u
q.append(v) # enqueue v for next steps
p[v] = u # modification 3: parent of v->first is u
augmentPath(t, INF) # path augmentation in O(V)
if (f[0] == 0):
break # seems that we cannot pass any more flow
max_flow += f[0]
return max_flow
if __name__ == '__main__':
graph = {1: {1: 0, 2: 0, 3: 70, 4: 30}, 3: {1: 0, 2: 25, 3: 0, 4: 5},
4: {1: 0, 2: 70, 3: 0, 4: 0}, 2: {1: 0, 2: 0, 3: 0, 4: 0}}
max_flow = edmondsKarp(graph, 1, 2)
print("Max flow = %d\n" % max_flow)
| [
"[email protected]"
] | |
2b0ced7fa82699bf40379314a33e83ddcdf35160 | 7e9c0243c48bbf0ddca9779ef03fc13bb9ac0496 | /candle.py | ef7ed0eabce07b078b04bab06a40c9c69cbbb75e | [] | no_license | suchismitarout/tt | c47f1f59659d2678392e2f0c3aaee8cfaa147ff4 | 54a5b625a82dab854b679050d67e340e74d71edd | refs/heads/master | 2020-09-16T20:25:34.146741 | 2019-11-25T06:52:07 | 2019-11-25T06:52:07 | 223,880,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | def birthdayCakeCandles(ar):
max_ele = ar[0]
count = 0
for i in range(len(ar)):
if ar[i] > max_ele:
max_ele = ar[i]
for j in ar:
if j == max_ele:
count +=1
# for j in ar:
# if j == max_ele:
# count +=1
return count
candle = birthdayCakeCandles([44,53,31,27,77,60,66,77,26,36])
print(candle) | [
"[email protected]"
] | |
b6e1772e50d34d8983fa946e90fc6fe835b3c834 | 7bdaa27c14549db8ecd13d055cfbd43bbfd69d9a | /book_rest_api/book_rest_api/urls.py | 9bc7c2b763cded18056ef8acfb50445b2ee24dcd | [] | no_license | Ksieciu/Book-Rest-API | 860158266e55a36a57f9cd0d0f7c99233dc53a4c | 9fb5e13c86e6fb5c07fb84dded78dd906986600a | refs/heads/main | 2023-02-17T20:12:02.356460 | 2021-01-04T10:42:14 | 2021-01-04T10:42:14 | 306,314,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | """book_rest_api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from .views import books_list_redirect_view
urlpatterns = [
path('', books_list_redirect_view),
path('admin/', admin.site.urls),
path('api/', include('books.api.urls')),
]
| [
"[email protected]"
] | |
36c872660985e40574ed8cd4d854c3dbefe2099d | d001abba19711d678f2ba09dfbd5c84357be6bb0 | /src/contest/codeforces/339div2/B/main.py | 9cbb98053c0a27ee12cd1b6a0d3180ed2d02074c | [] | no_license | cormoran/CompetitiveProgramming | 89f8b3ceda97985d32b8cd91056b49abeb243e6f | fa0e479ab299f53984fa7541d088c10c447fb6e4 | refs/heads/master | 2020-04-17T19:59:49.724498 | 2020-03-28T15:46:26 | 2020-03-28T15:46:26 | 65,995,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | #!/usr/bin/env python3
n = int(input())
a = list(map(int,input().split()))
ans = 1
for i in a:
ans *= i
print(ans)
| [
"[email protected]"
] | |
3630fd00235c8d64e6fa8c41cb6b0031acc8d051 | 996967405d3ee07e011ee0f0404d03b6d04d3492 | /dataloader/get_coco/select_image.py | e4e45d53306e8d53996618fd3de1138d855286eb | [] | no_license | wyyy04/MyRepository | 797936fc757a2eee4793d5b1b47ebf8b57216ab8 | 91f1a7ff969e91d9649b96796c5827c9910a8183 | refs/heads/main | 2023-02-22T09:56:21.926013 | 2021-01-27T15:34:00 | 2021-01-27T15:34:00 | 315,524,193 | 0 | 0 | null | 2020-11-24T07:30:05 | 2020-11-24T05:05:28 | null | UTF-8 | Python | false | false | 504 | py | from readtxt import loadDataset
import os
import shutil
#从COCO训练集中选取motivations_clean中训练和测试使用的所有图片
rdir='D:\download\\train2014'#源目录
odir='D:\data'#目标目录
data = loadDataset()
data = data[:,0]
print(data)
for im_name in data:
print(im_name) #文件名
r = os.path.join(rdir,im_name)
o = os.path.join(odir,im_name) #得到源文件&目标文件完整目录
print(r,o)
shutil.copy(r,o) # 复制文件到目标路径;移动move
| [
"[email protected]"
] | |
5418cdc24aab8459761ed1b04e45bfb5f5f36b94 | 327befeb9bbb8aee75c24c5ef78d859f35428ebd | /src/python/grpcio/grpc/framework/crust/_calls.py | bff940d74710da3a6be3a2c099cf8e661f51f910 | [
"BSD-3-Clause"
] | permissive | CharaD7/grpc | 33b64f8eabf09014b1bc739b77809aed7a058633 | 062ad488881839d2637b7a191ade5b87346b4597 | refs/heads/master | 2023-07-08T19:36:00.065815 | 2016-01-04T17:28:15 | 2016-01-04T17:28:15 | 49,012,756 | 1 | 0 | BSD-3-Clause | 2023-07-06T01:32:59 | 2016-01-04T17:39:11 | C | UTF-8 | Python | false | false | 8,621 | py | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility functions for invoking RPCs."""
from grpc.framework.crust import _control
from grpc.framework.interfaces.base import utilities
from grpc.framework.interfaces.face import face
_ITERATOR_EXCEPTION_LOG_MESSAGE = 'Exception iterating over requests!'
_EMPTY_COMPLETION = utilities.completion(None, None, None)
def _invoke(
end, group, method, timeout, protocol_options, initial_metadata, payload,
complete):
rendezvous = _control.Rendezvous(None, None)
subscription = utilities.full_subscription(
rendezvous, _control.protocol_receiver(rendezvous))
operation_context, operator = end.operate(
group, method, subscription, timeout, protocol_options=protocol_options,
initial_metadata=initial_metadata, payload=payload,
completion=_EMPTY_COMPLETION if complete else None)
rendezvous.set_operator_and_context(operator, operation_context)
outcome = operation_context.add_termination_callback(rendezvous.set_outcome)
if outcome is not None:
rendezvous.set_outcome(outcome)
return rendezvous, operation_context, outcome
def _event_return_unary(
receiver, abortion_callback, rendezvous, operation_context, outcome, pool):
if outcome is None:
def in_pool():
abortion = rendezvous.add_abortion_callback(abortion_callback)
if abortion is None:
try:
receiver.initial_metadata(rendezvous.initial_metadata())
receiver.response(next(rendezvous))
receiver.complete(
rendezvous.terminal_metadata(), rendezvous.code(),
rendezvous.details())
except face.AbortionError:
pass
else:
abortion_callback(abortion)
pool.submit(_control.pool_wrap(in_pool, operation_context))
return rendezvous
def _event_return_stream(
receiver, abortion_callback, rendezvous, operation_context, outcome, pool):
if outcome is None:
def in_pool():
abortion = rendezvous.add_abortion_callback(abortion_callback)
if abortion is None:
try:
receiver.initial_metadata(rendezvous.initial_metadata())
for response in rendezvous:
receiver.response(response)
receiver.complete(
rendezvous.terminal_metadata(), rendezvous.code(),
rendezvous.details())
except face.AbortionError:
pass
else:
abortion_callback(abortion)
pool.submit(_control.pool_wrap(in_pool, operation_context))
return rendezvous
def blocking_unary_unary(
end, group, method, timeout, with_call, protocol_options, initial_metadata,
payload):
"""Services in a blocking fashion a unary-unary servicer method."""
rendezvous, unused_operation_context, unused_outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, payload,
True)
if with_call:
return next(rendezvous), rendezvous
else:
return next(rendezvous)
def future_unary_unary(
end, group, method, timeout, protocol_options, initial_metadata, payload):
"""Services a value-in value-out servicer method by returning a Future."""
rendezvous, unused_operation_context, unused_outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, payload,
True)
return rendezvous
def inline_unary_stream(
end, group, method, timeout, protocol_options, initial_metadata, payload):
"""Services a value-in stream-out servicer method."""
rendezvous, unused_operation_context, unused_outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, payload,
True)
return rendezvous
def blocking_stream_unary(
end, group, method, timeout, with_call, protocol_options, initial_metadata,
payload_iterator, pool):
"""Services in a blocking fashion a stream-in value-out servicer method."""
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, None,
False)
if outcome is None:
def in_pool():
for payload in payload_iterator:
rendezvous.consume(payload)
rendezvous.terminate()
pool.submit(_control.pool_wrap(in_pool, operation_context))
if with_call:
return next(rendezvous), rendezvous
else:
return next(rendezvous)
else:
if with_call:
return next(rendezvous), rendezvous
else:
return next(rendezvous)
def future_stream_unary(
end, group, method, timeout, protocol_options, initial_metadata,
payload_iterator, pool):
"""Services a stream-in value-out servicer method by returning a Future."""
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, None,
False)
if outcome is None:
def in_pool():
for payload in payload_iterator:
rendezvous.consume(payload)
rendezvous.terminate()
pool.submit(_control.pool_wrap(in_pool, operation_context))
return rendezvous
def inline_stream_stream(
end, group, method, timeout, protocol_options, initial_metadata,
payload_iterator, pool):
"""Services a stream-in stream-out servicer method."""
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, None,
False)
if outcome is None:
def in_pool():
for payload in payload_iterator:
rendezvous.consume(payload)
rendezvous.terminate()
pool.submit(_control.pool_wrap(in_pool, operation_context))
return rendezvous
def event_unary_unary(
end, group, method, timeout, protocol_options, initial_metadata, payload,
receiver, abortion_callback, pool):
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, payload,
True)
return _event_return_unary(
receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
def event_unary_stream(
end, group, method, timeout, protocol_options, initial_metadata, payload,
receiver, abortion_callback, pool):
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, payload,
True)
return _event_return_stream(
receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
def event_stream_unary(
end, group, method, timeout, protocol_options, initial_metadata, receiver,
abortion_callback, pool):
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, None,
False)
return _event_return_unary(
receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
def event_stream_stream(
end, group, method, timeout, protocol_options, initial_metadata, receiver,
abortion_callback, pool):
rendezvous, operation_context, outcome = _invoke(
end, group, method, timeout, protocol_options, initial_metadata, None,
False)
return _event_return_stream(
receiver, abortion_callback, rendezvous, operation_context, outcome, pool)
| [
"[email protected]"
] | |
99322f6227003e759ed09c79ea1c5a528744cc15 | d54e1b89dbd0ec5baa6a018464a419e718c1beac | /Python from others/字典/wk_05_字符串的定义.py | 3baed27ffc0bce82ef9cf4051ead9b8ab46134e4 | [] | no_license | cjx1996/vscode_Pythoncode | eda438279b7318e6cb73211e26107c7e1587fdfb | f269ebf7ed80091b22334c48839af2a205a15549 | refs/heads/master | 2021-01-03T19:16:18.103858 | 2020-05-07T13:51:31 | 2020-05-07T13:51:31 | 240,205,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | string = "hello world"
for c in string:
print(c) | [
"[email protected]"
] | |
716119ca0680e969a5c9b15d2f93c196e377873b | 7b4e9342d42be2b55af5dc23a8abedd672d68e99 | /MobileApps/libs/flows/web/jweb/eventing_plugin.py | 83d4c6d1bad5eef4658ff26f41ebc08452999a87 | [] | no_license | Amal548/QAMA | af5bb335c92a90b461f1ee9a3870435d83d46802 | b5230c51d3bc7bb04b3448d1a1fe5a076d8898d5 | refs/heads/master | 2023-07-12T09:17:04.624677 | 2021-08-06T08:01:11 | 2021-08-06T08:01:11 | 389,595,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,065 | py | from MobileApps.libs.flows.web.jweb.jweb_flow import JwebFlow
import json
class EventingPlugin(JwebFlow):
flow_name = "eventing_plugin"
########################################################################################################################
# #
# ACTION FLOWS #
# #
########################################################################################################################
def select_eventing_dispatch_open(self):
"""
clicks the eventing dispatch open item
:return:
"""
self.driver.click("eventing_dispatch_open_item")
def select_eventing_dispatch_open(self):
"""
clicks the eventing dispatch close item
:return:
"""
self.driver.click("eventing_dispatch_close_item")
def select_eventing_plugin_test(self):
"""
clicks the eventing plugin test button
:return:
"""
self.driver.swipe(direction="up")
self.driver.click("eventing_test_button")
def eventing_test_result(self):
"""
:return: eventing test result text
"""
return self.driver.wait_for_object("eventing_test_result_txt").text
def add_listener_multiple_event_results(self):
"""
:return: add multiple event result text
"""
return self.driver.wait_for_object("multiple_event_result_text").text
def add_listener_event_result(self):
"""
:return: add listener test result
"""
return json.loads(self.driver.get_attribute(obj_name="add_listener_test_result_txt", attribute="value"))
def add_listener_test_result(self):
"""
:return: add listener test result text
"""
self.driver.swipe(direction="down")
return self.driver.wait_for_object("add_listener_test_result_text").text
def select_add_listener_pop_up_close_btn(self):
"""
clicks the add listener pop up close btn
:return:
"""
self.driver.click("add_listener_pop_up_close_btn")
def get_add_listener_pop_up_toast_text(self):
"""
:return: main and sub text found from the toast pop up notification
"""
pop_up_toast_text = {}
pop_up_toast_text['main_text'] = self.driver.wait_for_object("pop_up_toast_text", index=0).text
pop_up_toast_text['sub_text'] = self.driver.wait_for_object("pop_up_toast_text", index=1).text
return pop_up_toast_text
def select_add_listener_test_btn(self):
"""
clicks the add listener test btn
:return:
"""
self.driver.click("eventing_add_listener_btn")
def enter_add_listener_event(self, option):
"""
sends name of event listener in Eventing.addListener() tab
:param option:
:return:
"""
self.driver.send_keys("eventing_native_element_listener_field", option)
def enter_name_field(self,option):
"""
sends the name field
:param option:
:return:
"""
self.driver.send_keys("eventing_name_field", option)
def enter_data_field(self,option):
"""
sends the data field
:param option:
:return:
"""
self.driver.send_keys("eventing_data_field", option)
def select_jarvis_event_option_test(self):
"""
clicks the send jarvis event test btn
:return:
"""
self.driver.click("eventing_send_jarvis_test_btn")
def jarvis_event_option_test_result(self):
"""
:return: text after clicking jarvis event option test btn
"""
return self.driver.find_object("eventing_jarvis_options_test_result").text | [
"[email protected]"
] | |
5effb4f8168c2ae2b22c3d5bdf47fbc2371234a7 | 08c7f146d82da572731f6ad0fd7d96bd4553f3d8 | /backend/wispy_bread_26347/settings.py | 440dca6d8ada9cc66236256b5fe96e07ed38d97b | [] | no_license | crowdbotics-apps/wispy-bread-26347 | 9c7b081b280e709f6eb5dccd3d38e7be306c18a8 | 04532cb6c4ac227bd104c2210e9997cdc5ff530d | refs/heads/master | 2023-05-01T09:20:01.995863 | 2021-05-07T19:06:03 | 2021-05-07T19:06:03 | 365,329,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,117 | py | """
Django settings for wispy_bread_26347 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wispy_bread_26347.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wispy_bread_26347.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
9902ebd2e00cc805ec5bdc9703e6ca797ea372dc | 41ede4fd3bfba1bff0166bca7aee80dcf21434c6 | /suvari/gtk2chain/reverses/xcb-util/actions.py | 25adb86a956a71e443321f8a2ef6661d3e2d6833 | [] | no_license | pisilinux/playground | a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c | e4e12fff8a847ba210befc8db7e2af8556c3adf7 | refs/heads/master | 2022-08-12T23:03:27.609506 | 2022-08-11T18:28:19 | 2022-08-11T18:28:19 | 8,429,459 | 16 | 22 | null | 2022-08-11T18:28:20 | 2013-02-26T09:37:11 | Python | UTF-8 | Python | false | false | 572 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import shelltools
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.autoreconf("-vif")
autotools.configure("--disable-static \
--with-pic")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("README")
| [
"[email protected]"
] | |
878a8d6f13a4d962da19b20180204a0a90f19306 | 74c368b2511fd62cb4f71db64bd728d0354d7191 | /refinenet/datasets.py | 6ea166c7a35a6e2ea5c30236b9881e9fa3bc3e65 | [] | no_license | nocotan/RefineNet | 318e8867eca263127e573323f0225934adcf77b8 | 05e5a465807016b913f1f2d58a14c0fdad72beed | refs/heads/master | 2021-04-03T06:07:40.295234 | 2018-03-20T14:48:44 | 2018-03-20T14:48:44 | 124,654,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,754 | py | # -*- coding: utf-8 -*-
import os
import random
import cv2
import numpy as np
import PIL.Image
from chainer.dataset import dataset_mixin
class ImageDataset(dataset_mixin.DatasetMixin):
def __init__(self, data_dir, data_list, crop_size=(300, 300)):
self.data_dir = data_dir
self.data_list = os.path.join(self.data_dir, data_list)
self.crop_size = crop_size
self.crop_h = self.crop_size[0]
self.crop_w = self.crop_size[1]
self.img_ids = [i_id.strip() for i_id in open(self.data_list)]
self.files = []
for name in self.img_ids:
img_file = os.path.join(self.data_dir, "images/%s.jpg" % name)
label_file = os.path.join(self.data_dir, "labels/%s.png" % name)
self.files.append({
"image": img_file,
"label": label_file,
"name": name,
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
f_scale = 0.5 + random.randint(0, 11) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale,
interpolation=cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale,
interpolation=cv2.INTER_NEAREST)
return image, label
def get_example(self, i):
datafiles = self.files[i]
image = cv2.imread(datafiles["image"], cv2.IMREAD_COLOR)
label = np.asarray(PIL.Image.open(datafiles["label"]), dtype=np.int32)
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.int32)
image -= (128, 128, 128)
img_h, img_w = label.shape
pad_h = max(self.crop_size[0] - img_h, 0)
pad_w = max(self.crop_size[1] - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(255,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image.transpose((2, 0, 1))
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy()
| [
"[email protected]"
] | |
9935830816782ca4bbe14f5537a51ca72ff16bc6 | b109001ec3ca8aa4b2cfc4d4520d8644c58ad5e0 | /navigation/Mappers.py | e6b134df0a24b3ea97c7ed69c07d70c972f65cf3 | [] | no_license | Chandanpanda/navigation-benchmark | b3e25e3672150413299a3d2566ad601156317acf | d83431d6648ac1147f53056ed32ce2caae4f702d | refs/heads/master | 2021-10-24T04:42:56.436909 | 2019-01-31T12:43:48 | 2019-01-31T12:43:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,626 | py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import ceil,floor
import math
from .Reprojection import getMapSizeInCells, project2dPClIntoWorldMap, ReprojectLocal2Global
def DepthToLocal3D(depth, fx, fy, cx, cy):
r"""Projects depth map to 3d point cloud
with origin in the camera focus
"""
device = depth.device
h,w = depth.squeeze().size()
npts = h*w
x = torch.linspace(0, w-1, w).to(device)
y = torch.linspace(0, h-1, h).to(device)
xv, yv = torch.meshgrid([x, y])
dfl = depth.t().flatten()
return torch.cat([(dfl *(xv.flatten() - cx) / fx).unsqueeze(-1), #x
(dfl *(yv.flatten() - cy) / fy).unsqueeze(-1), #y
dfl.unsqueeze(-1)], dim = 1) #z
def pointCloud2ObstaclesNonDifferentiable(pts3D,
map_size = 40,
cell_size = 0.2):
r"""Counts number of 3d points in 2d map cell
height is sum-pooled.
"""
device = pts3D.device
map_size_in_cells = getMapSizeInCells(map_size,cell_size) - 1
init_map = torch.zeros((map_size_in_cells,map_size_in_cells), device = device)
if len(pts3D) <= 1:
return init_map
num_pts,dim = pts3D.size()
pts2D = torch.cat([pts3D[:,2:3],pts3D[:,0:1]], dim = 1)
data_idxs = torch.round(project2dPClIntoWorldMap(pts2D, map_size, cell_size))
if len(data_idxs) > 10:
u, counts = np.unique(data_idxs.detach().cpu().numpy(), axis=0, return_counts = True)
init_map[u[:,0],u[:,1] ] = torch.from_numpy(counts).to(dtype=torch.float32, device=device)
return init_map
class DirectDepthMapper(nn.Module):
r"""Estimates obstacle map given the depth image
ToDo: replace numpy histogram counting with differentiable
pytorch soft count like in
https://papers.nips.cc/paper/7545-unsupervised-learning-of-shape-and-pose-with-differentiable-point-clouds.pdf
"""
def __init__(self,
#fx = 0,
#fy = 0,
#cx = 0,
#cy = 0,
camera_height = 0,
near_th = 0.1, far_th = 4.0, h_min = 0.0, h_max = 1.0,
map_size = 40, map_cell_size = 0.1,
device = torch.device('cpu'),
**kwargs):
super(DirectDepthMapper, self).__init__()
self.device = device
#self.fx = fx
#self.fy = fy
#self.cx = cx
#self.cy = cy
self.near_th = near_th
self.far_th = far_th
self.h_min_th = h_min
self.h_max_th = h_max
self.camera_height = camera_height
self.map_size_meters = map_size
self.map_cell_size = map_cell_size
return
def forward(self, depth, pose = torch.eye(4).float()):
self.device = depth.device
#Works for FOV = 45 degrees in minos/sensors.yml. Should be adjusted, if FOV changed
self.fx = float(depth.size(1))# / 2.0
self.fy = float(depth.size(0))# / 2.0
self.cx = int(self.fx)//2 - 1
self.cy = int(self.fy)//2 - 1
pose = pose.to(self.device)
local_3d_pcl = DepthToLocal3D(depth, self.fx, self.fy, self.cx, self.cy)
idxs = (torch.abs(local_3d_pcl[:,2]) < self.far_th) * (torch.abs(local_3d_pcl[:,2]) >= self.near_th)
survived_points = local_3d_pcl[idxs]
if len(survived_points) < 20:
map_size_in_cells = getMapSizeInCells(self.map_size_meters,self.map_cell_size) - 1
init_map = torch.zeros((map_size_in_cells,map_size_in_cells), device = self.device)
return init_map
global_3d_pcl = ReprojectLocal2Global(survived_points, pose)[:,:3]
#Because originally y looks down and from agent camera height
global_3d_pcl[:,1] = -global_3d_pcl[:,1] + self.camera_height
idxs = (global_3d_pcl[:,1] > self.h_min_th) * (global_3d_pcl[:,1] < self.h_max_th)
global_3d_pcl = global_3d_pcl[idxs]
obstacle_map = pointCloud2ObstaclesNonDifferentiable(
global_3d_pcl,
self.map_size_meters,
self.map_cell_size)
return obstacle_map
class SparseDepthMapper(nn.Module):
r"""Estimates obstacle map given the 3d points from ORBSLAM
Does not work well.
"""
def __init__(self,
fx = 0,
fy = 0,
cx = 0,
cy = 0,
camera_height = 0,
near_th = 0.1, far_th = 4.0, h_min = 0.0, h_max = 1.0,
map_size = 40, map_cell_size = 0.1,
device = torch.device('cpu'),
**kwargs):
super(SparseDepthMapper, self).__init__()
self.device = device
self.fx = fx
self.fy = fy
self.cx = cx
self.cy = cy
self.near_th = near_th
self.far_th = far_th
self.h_min_th = h_min
self.h_max_th = h_max
self.camera_height = camera_height
self.map_size_meters = map_size
self.map_cell_size = map_cell_size
return
def forward(self, sparse_depth, pose = torch.eye(4).float()):
global_3d_pcl = sparse_depth
#Because originally y looks down and from agent camera height
global_3d_pcl[:,1] = -global_3d_pcl[:,1]# + self.camera_height
idxs = (global_3d_pcl[:,1] > self.h_min_th) * (global_3d_pcl[:,1] < self.h_max_th)
global_3d_pcl = global_3d_pcl[idxs]
obstacle_map = pointCloud2ObstaclesNonDifferentiable(
global_3d_pcl,
self.map_size_meters,
self.map_cell_size)
return obstacle_map | [
"[email protected]"
] | |
d399b2d3a8ff12446dacbf96a4e46f7b8f5d2e92 | 52555a17cdb6058565696585c978c9012b0bfad7 | /examples/synthetic/park2_4/park2_4_mf.py | b8d64549b8c4f770d2f4fd70d7fcabdc1ba4bee4 | [
"MIT"
] | permissive | kirthevasank/dragonfly | 8685d6aff272bd262d9b47c455fc1f1dc77a42aa | 8e09d5ba602d14922455bf09bdd4ca0fa09ef3ee | refs/heads/master | 2020-05-02T00:38:35.252889 | 2019-05-17T03:40:23 | 2019-05-17T03:40:23 | 177,675,339 | 3 | 0 | MIT | 2019-05-06T04:07:41 | 2019-03-25T22:39:37 | Python | UTF-8 | Python | false | false | 489 | py | """
Parkd function with multi-fidelity.
-- [email protected]
"""
# pylint: disable=invalid-name
from park2_4 import park2_4_z
# Write a function like this called 'obj'.
def park2_4_mf(z, x):
""" Computes the Parkd function. """
return park2_4_z(z[0], x)
def objective(z, x):
""" Objective. """
return park2_4_mf(z, x)
def cost(z):
""" Cost function. """
return 0.05 + 0.95 * z[0]**1.5
def main(z, x):
""" main function. """
return park2_4_mf(z, x), cost(z)
| [
"[email protected]"
] | |
50b6850399802b4c26d8204b660c997e56c67b3b | b4e4cd7eae81f27b006fc28f79631db3e894572f | /Budgetsystem/Budgetsystem/urls (2021_07_03 17_35_37 UTC).py | fa7ccfa6a1aed7eb645c59bc59ef56cb1e81632c | [] | no_license | mariachacko93/recipe-budget-bill | effe369c7a873b7e59e4e22cacb7e247fb44bfa7 | 6ad231febe9f3c837536067a9ddd096a2ae6a2bf | refs/heads/master | 2023-06-18T06:41:02.536377 | 2021-07-14T05:54:39 | 2021-07-14T05:54:39 | 385,829,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | """Budgetsystem URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path("budget/",include("budget.urls")),
]
| [
"[email protected]"
] | |
1a2a244f5a7ffd2c4a3c4534e593dc75e9823e55 | 49b827bb587d50c5092837749a7d5b88c024e854 | /experiments/ACOSlite/HDF5_to_GeoJSON.py | 722867db0ee2e86786f2b64806e22f0365deda70 | [] | no_license | SpaceAppsXploration/oco-2-data-network | 7d836bf77cf79a5aac1cd22b02c75af316432b56 | 7d1fd709c7c219c83b7ea9f8075f7df46b460f23 | refs/heads/master | 2020-12-11T05:43:45.979066 | 2015-07-18T08:56:29 | 2015-07-18T08:56:29 | 34,137,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 15:23:47 2015
@author: jacopo
"""
import json
from pprint import pprint
import h5py
#
# TO DOs
#
# 1. Add the reference to Sensors ontology
# ACOS LITE file in the same directory
f = h5py.File('ACOSv3.4r02_L3_20100101_000000_20130515_000000.h5', libver='earliest')
xco2 = f['xco2']
lon = f['lon']
lat = f['lat']
lon_bnds = f['lon_bnds']
lat_bnds = f['lat_bnds']
xco2_set = xco2[0,0,0,:]
geo = {"type" : "FeatureCollection",
"features" : [
{
"type" : "Feature",
"geometry" : {"type": "Point",
"coordinates" : [lat[0], lon[0]]
}
},
{
"type" : "Feature",
"geometry" : {
"type" : "polygon",
"coordinates" : [
[
lon_bnds[0,0],
lat_bnds[0,0]
],
[
lon_bnds[0,0],
lat_bnds[0,1]
],
[
lon_bnds[0,1],
lat_bnds[0,0]
],
[
lon_bnds[0,1],
lat_bnds[0,1]
]
]
},
"properties": {
"xco2" : xco2_set[12]
}
}
]
}
#with open('geo.json', 'w') as outfile:
#json.dump(geo, outfile)
# print a JSON with the quantity of xco2 for the given geometry
print(json.dumps(geo, indent=4))
| [
"[email protected]"
] | |
ff20f97e522dad036e7df019b8c4e0a5caae626a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_unguents.py | 87d4634aa61496578132ed4c4606ab4ff28ddf79 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.nouns._unguent import _UNGUENT
#calss header
class _UNGUENTS(_UNGUENT, ):
def __init__(self,):
_UNGUENT.__init__(self)
self.name = "UNGUENTS"
self.specie = 'nouns'
self.basic = "unguent"
self.jsondata = {}
| [
"[email protected]"
] | |
58b2baef07663c5e82c8e96e9e9e199a40108943 | af685f9625dc3fc1892171df396ed46155caa092 | /WORC/resources/fastr_tools/worc/bin/FeatureConverter_tool.py | 84635983ccc0a62c9a1aa63c19be4a548ed16b53 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | MStarmans91/WORC | b66d7de70e2f3acab5100a3431855216b31bd7b1 | f267b3d05c8193939aa4f43e47c6e24f9307864e | refs/heads/master | 2023-08-17T14:02:29.566811 | 2023-08-15T08:58:42 | 2023-08-15T08:58:42 | 92,295,542 | 65 | 20 | NOASSERTION | 2023-08-15T08:58:44 | 2017-05-24T13:31:31 | Python | UTF-8 | Python | false | false | 2,404 | py | #!/usr/bin/env python
# Copyright 2017-2020 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from WORC.featureprocessing.FeatureConverter import FeatureConverter
def main():
parser = argparse.ArgumentParser(description='Radiomics classification')
parser.add_argument('-feat_in', '--feat_in', metavar='feat_in',
nargs='+', dest='feat_in', type=str, required=True,
help='Patient features input of first modality (HDF)')
parser.add_argument('-toolbox', '--toolbox', metavar='toolbox', nargs='+',
dest='toolbox', type=str, required=True,
help='Toolbox used for feature calculation')
parser.add_argument('-cf', '--conf', metavar='config', nargs='+',
dest='cf', type=str, required=True,
help='Configuration')
parser.add_argument('-feat_out', '--feat_out', metavar='feat_out',
nargs='+', dest='feat_out', type=str, required=True,
default=None,
help='Patient features input of second modality (HDF)')
args = parser.parse_args()
# Convert several input arguments from lists to strings
if type(args.feat_in) is list:
args.feat_in = ''.join(args.feat_in)
if type(args.toolbox) is list:
args.toolbox = ''.join(args.toolbox)
if type(args.cf) is list:
args.cf = ''.join(args.cf)
if type(args.feat_out) is list:
args.feat_out = ''.join(args.feat_out)
# Run converter
FeatureConverter(feat_in=args.feat_in,
toolbox=args.toolbox,
config=args.cf,
feat_out=args.feat_out)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
90f4696555e174ef011d383417a37633f1b0867b | 54d2887e3c910f68366bd0aab3c692d54245e22a | /abc/abc_042_125/abc089/a.py | 0311aa625dd7cd2c28dad38a37460155d16513a9 | [] | no_license | Kevinrobot34/atcoder | 7aec367fd2c6b589e9d583dae7b3c7520ce9fa12 | 482ea508f098f81e4f19522fe518dd22c781aca9 | refs/heads/master | 2022-07-10T23:44:45.290022 | 2022-06-29T11:30:26 | 2022-06-29T11:30:26 | 158,081,477 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | n = int(input())
print(n // 3)
| [
"[email protected]"
] | |
4b68733a5da1facd4daa9d36b3eafb06d1b7bea2 | 79a484e91a8df432a0ded93806a1e8237df7c253 | /umibukela/migrations/0020_auto_20170124_1443.py | 03d19703ba05730c59fd74bd2588eed73576e207 | [
"MIT"
] | permissive | OpenUpSA/umibukela | 7ba14397ad543154d3a32ebfd84e89aa07f7011e | 34c1a29a429b88c2f574e9120cfe93ba524633da | refs/heads/master | 2023-07-26T19:45:12.531887 | 2023-07-10T15:53:07 | 2023-07-10T15:53:07 | 47,106,932 | 0 | 0 | MIT | 2023-02-02T01:36:59 | 2015-11-30T09:03:27 | Python | UTF-8 | Python | false | false | 618 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0019_auto_20170124_1252'),
]
operations = [
migrations.AlterField(
model_name='cycleresultset',
name='monitors',
field=models.ManyToManyField(help_text=b"Only monitors for the current partner are shown. If you update the Partner you'll have to save and edit this Cycle Result Set again to see the available monitors.", to='umibukela.Monitor', blank=True),
),
]
| [
"[email protected]"
] | |
d44ba106ea8aff1d8cf7dd57c7ddf30bbbeb3023 | aebacedc43afabf8ce54bb25f4cbe040441dcba4 | /appscripts/appscripts-acer-120311/prefcns13.py | 18829a38e63f5364d6b331c5b7b1cc4b9e340e4e | [] | no_license | swanandgore/rappertk | 84e968447597494645ac0c9868358fc6a194197a | d1a5d5e0d096dfc23237e29bfd983183ca1e2fbd | refs/heads/master | 2020-05-17T07:59:43.613762 | 2014-08-20T12:13:56 | 2014-08-20T12:13:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,302 | py |
import os, shutil, re
import geometry
from xray import cif2mtz, uniqueify, sfall, mtz2hkl, cns_generate, cns_anneal, sgCCP4toCNS, fft, omitmap, mapman
from procrun import proc_run_exitOnError as execCmd
from xcheck import XrayScorer, XrayRanker
from data import sgtable
from evalCAtrace import comparePhiPsiOmegaChi
from pdbr import protein, isAAres
import prot2res
from pref import removeZeroLines
from pref import fixCNSop
from data import sgtable , long2shortHM
from scplacement import SCplacement
from loopbuild import Multiloop
import prepareChain
from stump import getCRYST , getRESO
ccp4args = {
0: [{"reftype":"restrained", "wa":0.20, "breftype":"ISOTROPIC", "ncyc":20}], #on native
1: [{"reftype":"unrestrained", "wa":0.75, "breftype":"OVER", "ncyc":20, "assignBfac":[20,30]}, #on catrace
{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":40}], #on catrace
2: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[60,90]}],
3: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[60,90]}],
4: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[60,90]}],
5: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[60,90]}],
6: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[60,90]}],
7: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
8: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
9: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
10: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
11: [{"reftype":"restrained", "wa":0.75, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
12: [{"reftype":"restrained", "wa":0.50, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
13: [{"reftype":"restrained", "wa":0.20, "breftype":"ISOTROPIC", "ncyc":20, "assignBfac":[15,25]}],
14: [{"reftype":"restrained", "wa":0.20, "breftype":"ISOTROPIC", "ncyc":40, "assignBfac":[ 5, 6]}],
}
cnsArgs = {}
for cycle in range(20) : cnsArgs[cycle] = {} ; cnsArgs[cycle]["num_cycles"] = 2 ; cnsArgs[cycle]["temperature"] = 5000
#cnsArgs[0]["wa"] = -1 ; cnsArgs[0]["num_cycles"] = 1 ; cnsArgs[0]["temperature"] = 50
#cnsArgs[1]["wa"] = -1 ; cnsArgs[1]["num_cycles"] = 1 ; cnsArgs[1]["temperature"] = 50
def cnsRefinement(mtzin,pdbin, mtzout,pdbout, a,b,c,alpha,beta,gamma,sg,reso, cnsArgs,cycle, extraTOPfile=None, extraPARfile=None) :
mtz2hkl(mtzin, "cns.hkl")
cns_generate(pdbin, "generate.mtf", "generate.pdb", extraTOPfile, extraPARfile, "generate.log")
removeZeroLines("generate.pdb") ## ???
wa = -1 ; harmCA = None
if cnsArgs[cycle].has_key("harmCA") and cnsArgs[cycle]["harmCA"] != None : harmCA = 1
cns_anneal(a, b, c, alpha, beta, gamma, sgCCP4toCNS[sg], reso,
"cns.hkl", "generate.mtf", "generate.pdb", extraPARfile, "anneal%d.log"%cycle, wa, cnsArgs[cycle]["num_cycles"], cnsArgs[cycle]["temperature"], harmCA)
removeZeroLines("anneal.pdb") ## ???
fixCNSop("anneal.pdb")
os.rename("anneal.pdb", pdbout)
sfall(pdbout, "rfree.mtz", mtzout, reso)
mapman("anneal_2fofc.map", mtzout+"2fofc.map")
mapman("anneal_fc.map", mtzout+"fc.map")
#moleman(pdbout)
def main() :
import optparse ; parser = optparse.OptionParser()
parser.add_option("--dir-xyzout", action='store', type='string', dest='dir_xyzout', help='to create all the files during refinement. it shdnt be already present.')
parser.add_option("--xyzin", action='store', type='string', dest='pdbfile', help='starting pdb containing a model of pdb-ligand complex')
parser.add_option("--hklin", action='store', type='string', dest='sf', help='structure factors file')
parser.add_option("--a", action='store', type='float', dest='a', help='cell dimension a')
parser.add_option("--b", action='store', type='float', dest='b', help='cell dimension b')
parser.add_option("--c", action='store', type='float', dest='c', help='cell dimension c')
parser.add_option("--alpha", action='store', type='float', dest='alpha', help='cell angle alpha')
parser.add_option("--beta", action='store', type='float', dest='beta', help='cell angle beta')
parser.add_option("--gamma", action='store', type='float', dest='gamma', help='cell angle gamma')
parser.add_option("--sg", action='store', type='string', dest='sg', help='cell spacegroup, in CCP4 notation')
parser.add_option("--resolution", action='store', type='float', dest='resolution', help='resolution of the data')
parser.add_option("--use-ca-restraints", action='store', dest='caRes', help='[True/False], Apply positional restraints on the C-alpha atoms',default="True")
parser.add_option("--use-sc-restraints", action='store', dest='scRes',type= 'string', help='[True/False], Apply positional restraints on the centroid of the sidechain atoms',default="True",)
parser.add_option("--ca-restraint-radius", action='store', type='float', dest='caRad', help='radius of spherical restraint on CA position', default=1)
parser.add_option("--sc-centroid-restraint-radius", action='store', type='float', dest='scRad', help='radius of spherical restraint on sidechain centroid', default=2)
parser.add_option("--sidechain-vdw-reduction", action='store', type='float', dest='scReduction', help='factor to reduce effective vdw dist in case of sidechains', default= 0.75)
parser.add_option("--population-size", action='store', type='int', dest='popsize', help='population size for PopulationStrategy', default=100)
parser.add_option("--verbose", action='store', type='int', dest='verbose', help='0 means least verbosity etc.', default=0)
parser.add_option("--backtrack", action='store', type='string', dest='backtrack', help='use backtracking version of PopulationStrategy. eg 4X5 will set backtrack numsteps and stepsize to 4,5 respectively. not used by default.', default=None)
parser.add_option("--rotamerlib", action='store', type='string', dest='rotLib', help='[PRL/SCL1.0/SCL0.5/SCL0.2] Name of rotamer library to use when building side chains ', default='SCL1.0')
parser.add_option("--add-sidechains", action='store', type='string', dest='addsc', help='Build missing side chains ', default='False')
parser.add_option("--use-given-rotamer", action='store', type='string', dest='userot', help='Use given rotamer', default='False')
parser.add_option("--randomize", action='store', type='int', dest='randomize', help='seed for randomizing', default=None)
parser.add_option("--mconly", action='store', type='string', dest='mconly', help='[True/False] Build mainchain only', default="False")
parser.add_option("--sconly", action='store', type='string', dest='sconly', help='[True/False] Build side chains only, can only be used when MAP/MTZ file is given. See web page for further details', default="False")
parser.add_option("--opsax", action='store', type='string', dest='opsax', help='[True/False] Reassign side chains with OPSAX, will only be used when MTZ or MAP file is given', default="True")
parser.add_option("--attempts", action='store', type='int', dest='natt', help='Number of attempts made to build section', default=5)
parser.add_option("--cacaCutoff", action='store', type='float', dest='cacaCutoff', help='Minimum distance ( angstrom ) between adjacent Calpha atoms in order to detect a chain-break', default=5.)
################# Electron density parameters ####################################
parser.add_option("--FP", action='store', type='string', dest='f1label', help='Column label for FP in MTZ file', default=None)
parser.add_option("--SIGFP", action='store', type='string', dest='sigf1label', help='Column label for sigFP in MTZ file', default=None)
parser.add_option("--FC", action='store', type='string', dest='f2label', help='Column label for FC in MTZ file', default=None)
parser.add_option("--PHIC", action='store', type='string', dest='phiclabel', help='Column label for PHIC in MTZ file', default=None)
parser.add_option("--use-FreeR", action='store', type='string', dest='usefreer', help='[True/False] Use FreeR set ? ', default="False")
parser.add_option("--FreeR", action='store', type='string', dest='freeRlabel', help='Column label for FreeR in MTZ file', default=None)
parser.add_option("--n", action='store', type='int', dest='n', help='Value of n for difference map calculations nFo-(n-1)Fc', default=2)
############# Residues to be modelled ####################################
parser.add_option("--rebuild-poor-regions-only", action='store', type='string', dest='poorOnly', help='[True/False] Rebuild regions ofinput structure with poor fit to an electron density map. Residues to be rebuilt are identified using a real space correlation coefficientscore, the cut-off for which is set using --poor-fit-threshold.', default="False")
parser.add_option("--poor-fit-threshold", action='store', type='float', dest='poorThreshold', help='Correlation coefficient threshold to identify poor fitting regions', default=0.9)
parser.add_option("--loopseq", action='store', type='string', dest='loopres', help='Amino acid sequence for loop to be built', default=None)
parser.add_option("--use-loopclosure-restraints", action='store', type='string', dest='closure', help='Use geometric restraints to ensure closure of loop with anchor residues', default= "True")
parser.add_option("--start", action='store', type='int', dest='start', help='Residue number to start building from ', default=None)
parser.add_option("--stop", action='store', type='int', dest='stop', help='Residue number to stop building at', default=None)
parser.add_option("--chainid", action='store', type='string', dest='chainid', help='Chain ID of section to be built.', default=None)
parser.add_option("--modelN2C", action='store', type='string', dest='modelN2C', help='[True/False] Model fragment without loop closure restraints. Used in conjunction with --start, --stop, --chainid. Requires --use-ca-restraints True ', default="False")
######### Ouptut parameters #############################################
parser.add_option("--models-get-native-bfactors", action='store', type='string', dest='nativeBfac', help='[True/False] Assign B-factors of remodelled atoms to original values', default="False")
parser.add_option("--default-mainchain-b-factor", action='store', type='float', dest='mcBfac', help='The value of B-factor assigned to the newly built main chain atoms', default=20.)
parser.add_option("--default-sidechain-b-factor", action='store', type='float', dest='scBfac', help='The value of B-factor assigned to the newly built side chain atoms', default=30.)
### Electron density parametets #########################################
parser.add_option("--minimum-sig", action='store', type='float', dest='minXSig', help='Minimum sigma ', default=0.25)
parser.add_option("--maximum-sig", action='store', type='float', dest='maxXSig', help='Maximum sigma ', default=2.0)
########## Optional restraints ##########################################
parser.add_option("--make-ed-optional", action='store', type='string', dest='edOpt', help='[True/False] If False, then the mainchain will be unconditionally forced to lie in positive density. If True then positive density restraint on the mainchain will be made optional.This is useful when tracing through a structure with regions in very poor (non-existent) density', default= "False")
parser.add_option("--make-all-restraints-optional", action='store', type='string', dest='allOpt', help='[True / False ] If True, then all restraints will be made optional', default="False")
(options, args) = parser.parse_args()
if not os.path.isdir(options.dir_xyzout) : os.mkdir(options.dir_xyzout)
shutil.copyfile(options.pdbfile, "%s/0.model0.pdb" % options.dir_xyzout)
shutil.copyfile(options.sf, "%s/rfree.mtz" % options.dir_xyzout)
os.chdir(options.dir_xyzout)
if (options.a == None or options.b == None or options.c == None or options.alpha == None or options.beta == None or options.gamma == None) :
options.a,options.b,options.c,options.alpha , options.beta , options.gamma,d1 = getCRYST(options.pdbfile)
if (options.a == None or options.b == None or options.c == None or options.alpha== None or options.beta==None or options.gamma == None ):
print "CRYST card cannot be read from coordinate file. Please input cell paramater a, b , c , alpha, beta , gamma = ",options.a , options.b , options.c , options.alpha , options.beta , options.gamma
import sys ; sys.exit()
if options.sg == None :
d1,d2,d3,d4 , d5 , d6, options.sg = getCRYST(options.pdbfile)
if options.sg == None :
print "Please input space group " , options.sg ; import sys ; sys.exit()
ss = ""
for sg1 in options.sg:
if sg1 in ["\n","\t","\s"]:
continue
else :
ss = ss+sg1
options.sg = ss
if options.sg in long2shortHM.keys():
shortsg = long2shortHM[options.sg]
options.sg = shortsg
if options.sg not in sgtable.keys():
print "Check --sg , Not recognised [%s][%d]"%( options.sg, len(options.sg))
import sys ; sys.exit()
if options.resolution == None :
options.resolution = getRESO(options.pdbfile)
if (options.resolution == None):
print "Please input resolution " , options.resolution
import sys ; sys.exit()
numRefCycles = 20 ; startCycle = 0
for cycle in range(startCycle, numRefCycles) :
if cycle > 5 : userot = 1
else : userot = 0
xscorecutoff = options.poorThreshold
if options.sconly != 'True':
if cycle == 15 : options.scRad *= 2
#if cycle < 10 : xscorecutoff = 0.8
#else : xscorecutoff = 0.9
#if cycle == 0 :
# scvdwr = .75 ; options.popsize = 500
modelIn = "0.model%d.pdb" % cycle
cnsout = "cns%d.pdb" % cycle
rtkmodel = "model%d.pdb" % (cycle+1) # rappertk model to be generated in this cycle
if options.f2label != None and options.phiclabel != None and cycle == 0 :
shutil.copyfile("rfree.mtz", "phased.mtz")
else :
sfall(modelIn, "rfree.mtz", "phased.mtz")
phasedmtz = "phased%d.mtz" % cycle # phase the str factors with current model
#cnsphasedmtz = "phased%d.mtz" % cycle # phase the str factors with current model
if not os.path.isfile(cnsout) :
cnsRefinement("phased.mtz", modelIn, phasedmtz, cnsout,
options.a, options.b, options.c, options.alpha, options.beta, options.gamma, options.sg, options.resolution,
cnsArgs, cycle)
from pref13 import main as prefRapperMain
#sfall(cnsout, phasedmtz , cnsphasedmtz)
prefRapperMain(cnsout,rtkmodel,options.dir_xyzout,None,phasedmtz,options.caRes,options.scRes,options.caRad,options.scRad,options.scReduction,options.popsize,options.verbose,options.backtrack,options.rotLib,1,options.mconly,options.sconly,options.opsax,options.natt,options.cacaCutoff,options.a,options.b,options.c,options.alpha,options.beta,options.gamma,options.sg,options.resolution,options.f1label,options.sigf1label,"FC","PHIC",options.usefreer,options.freeRlabel,options.n,options.poorOnly,xscorecutoff,options.loopres,options.start,options.stop,options.chainid,options.modelN2C,options.nativeBfac,options.mcBfac,options.scBfac,options.minXSig,options.maxXSig,options.edOpt,options.allOpt,options.closure,options.addsc,options.userot,"cns")
# prefRapperMain(cnsout,rtkmodel,options.dir_xyzout,None,phasedmtz,options.caRes,options.scRes,options.caRad,options.scRad,scvdwr,popsize,options.verbose,options.backtrack,rotlib, 1 , "False", "False" , "True" , 5 , 5.0 ,options.a,options.b,options.c,options.alpha,options.beta,options.gamma,options.sg,options.resolution,"FP","SIGFP",None,None,"True","FreeR_flag",2,"True",xscoreCutoff,None,None,None,None,"False","False",20.0,30.0,0.25,2.0,"False","False")
if __name__ == "__main__" :
main()
import sys ; sys.exit(0)
from scplacement import SCplacement
import prepareChain
scPrepC, useGivenRot, useDEE = prepareChain.PrepareChain("PRL"), 1, 1
badresids = ["VAL 85 ", "ASP 86 ", "TYR 68 ", "TYR 90 ",],
SCplacement("premodel2.pdb", 0.5, "mmm.pdb", "dotfile", useDEE, "phased1.mtz2fofc.map", "FP", "FC", "PHIC", "2F1-F2", 0, 5, None, useGivenRot,
badresids, scPrepC).run()
import sys ; sys.exit(0)
replaceWaters("model1.pdb", "rtk0.map")
| [
"[email protected]"
] | |
2087f66359a6383aadf0b06ec31295815bc2ae13 | 2c8ed67a9e54b98a9b432f5a66287e4523497d65 | /python/hsreplay/elements.py | 26ca5f8871e7e77da22c26d41bcde04d629b64d6 | [
"MIT",
"Python-2.0",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | EvilNuff/HSReplay | 79915a87df182d3af3c4a7ed8fb3f9e84135e106 | 26fd02cbfbff7f5a6fec0573d227d3e1aff417bd | refs/heads/master | 2021-01-12T08:00:48.065962 | 2016-10-14T22:03:48 | 2016-10-14T22:03:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,857 | py | from dateutil.parser import parse as parse_timestamp
from hearthstone.hslog import packets
from .utils import ElementTree
def node_for_tagname(tag):
for k, v in globals().items():
if k.endswith("Node") and v.tagname == tag:
return v
raise ValueError("No matching node for tag %r" % (tag))
class Node(object):
attributes = ()
tagname = None
def __init__(self, *args):
self._attributes = {}
self.nodes = []
for k, arg in zip(("ts", ) + self.attributes, args):
setattr(self, k, arg)
def __repr__(self):
return "<%s>" % (self.__class__.__name__)
@classmethod
def from_xml(cls, xml):
if xml.tag != cls.tagname:
raise ValueError("%s.from_xml() called with %r, not %r" % (
cls.__name__, xml.tag, cls.tagname
))
ts = xml.attrib.get("ts")
if ts:
ts = parse_timestamp(ts)
ret = cls(ts)
for element in xml:
ecls = node_for_tagname(element.tag)
node = ecls.from_xml(element)
for attrname in ecls.attributes:
setattr(node, attrname, element.attrib.get(attrname))
ret.nodes.append(node)
return ret
def append(self, node):
self.nodes.append(node)
def xml(self):
element = ElementTree.Element(self.tagname)
for node in self.nodes:
element.append(node.xml())
for attr in self.attributes:
attrib = getattr(self, attr, None)
if attrib is not None:
if isinstance(attrib, bool):
attrib = str(attrib).lower()
elif isinstance(attrib, int):
# Check for enums
attrib = str(int(attrib))
element.attrib[attr] = attrib
if self.timestamp and self.ts:
element.attrib["ts"] = self.ts.isoformat()
for k, v in self._attributes.items():
element.attrib[k] = v
return element
class GameNode(Node):
tagname = "Game"
attributes = ("id", "reconnecting")
timestamp = True
packet_class = packets.PacketTree
@property
def players(self):
return self.nodes[1:3]
def export(self):
tree = self.packet_class(self.ts)
create_game = self.nodes[0].export()
for player in self.players:
create_game.players.append(player.export())
tree.packets.append(create_game)
for node in self.nodes[3:]:
tree.packets.append(node.export())
return tree
class GameEntityNode(Node):
tagname = "GameEntity"
attributes = ("id", )
timestamp = False
packet_class = packets.CreateGame
def export(self):
packet = self.packet_class(self.ts, int(self.id))
for node in self.nodes:
packet.tags.append(node.export())
return packet
class PlayerNode(Node):
tagname = "Player"
attributes = (
"id", "playerID", "accountHi", "accountLo", "name",
"rank", "legendRank", "cardback"
)
timestamp = False
packet_class = packets.CreateGame.Player
def export(self):
packet = self.packet_class(
self.ts, int(self.id), int(self.playerID),
int(self.accountHi), int(self.accountLo)
)
packet.name = self.name
for node in self.nodes:
if node.tagname == "Tag":
packet.tags.append(node.export())
return packet
def xml(self):
ret = super(PlayerNode, self).xml()
deck = getattr(self, "deck", None)
if deck is not None:
element = ElementTree.Element("Deck")
ret.append(element)
for card in deck:
e = ElementTree.Element("Card")
e.attrib["id"] = card
element.append(e)
return ret
class DeckNode(Node):
tagname = "Deck"
attributes = ()
timestamp = False
packet_class = None
class CardNode(Node):
tagname = "Card"
attributes = ("id", "premium")
timestamp = False
packet_class = None
class FullEntityNode(Node):
tagname = "FullEntity"
attributes = ("id", "cardID")
timestamp = False
packet_class = packets.FullEntity
def export(self):
packet = self.packet_class(self.ts, int(self.id), self.cardID)
for node in self.nodes:
packet.tags.append(node.export())
return packet
class ShowEntityNode(Node):
tagname = "ShowEntity"
attributes = ("entity", "cardID")
timestamp = False
packet_class = packets.ShowEntity
def export(self):
packet = self.packet_class(self.ts, int(self.entity), self.cardID)
for node in self.nodes:
packet.tags.append(node.export())
return packet
class BlockNode(Node):
tagname = "Block"
attributes = ("entity", "type", "index", "target")
timestamp = True
packet_class = packets.Block
def export(self):
index = int(self.index) if self.index is not None else -1
packet = self.packet_class(
self.ts, int(self.entity or 0), int(self.type), index,
None, None, int(self.target or 0)
)
for node in self.nodes:
packet.packets.append(node.export())
packet.ended = True
return packet
class MetaDataNode(Node):
tagname = "MetaData"
attributes = ("meta", "data", "info")
timestamp = False
packet_class = packets.MetaData
def export(self):
packet = self.packet_class(
self.ts, int(self.meta), int(self.data or 0), int(self.info)
)
for node in self.nodes:
packet.info.append(node.export())
return packet
class MetaDataInfoNode(Node):
tagname = "Info"
attributes = ("index", "entity")
timestamp = False
def export(self):
return int(self.entity)
class TagNode(Node):
tagname = "Tag"
attributes = ("tag", "value")
timestamp = False
def export(self):
return (int(self.tag), int(self.value))
class TagChangeNode(Node):
tagname = "TagChange"
attributes = ("entity", "tag", "value")
timestamp = False
packet_class = packets.TagChange
def export(self):
return self.packet_class(self.ts, int(self.entity), int(self.tag), int(self.value))
class HideEntityNode(Node):
tagname = "HideEntity"
attributes = ("entity", "zone")
timestamp = True
packet_class = packets.HideEntity
def export(self):
return self.packet_class(self.ts, int(self.entity), int(self.zone))
class ChangeEntityNode(Node):
tagname = "ChangeEntity"
attributes = ("entity", "cardID")
timestamp = True
packet_class = packets.ChangeEntity
def export(self):
packet = self.packet_class(self.ts, int(self.entity), self.cardID)
for node in self.nodes:
packet.tags.append(node.export())
return packet
##
# Choices
class ChoicesNode(Node):
tagname = "Choices"
attributes = ("entity", "id", "taskList", "type", "min", "max", "source")
timestamp = True
packet_class = packets.Choices
def export(self):
taskList = int(self.taskList) if self.taskList else None
packet = self.packet_class(
self.ts, int(self.entity or 0), int(self.id), taskList,
int(self.type), int(self.min), int(self.max)
)
packet.source = self.source
for node in self.nodes:
packet.choices.append(node.export())
return packet
class ChoiceNode(Node):
tagname = "Choice"
attributes = ("index", "entity")
timestamp = False
def export(self):
return int(self.entity)
class ChosenEntitiesNode(Node):
tagname = "ChosenEntities"
attributes = ("entity", "id")
timestamp = True
packet_class = packets.ChosenEntities
def export(self):
packet = self.packet_class(self.ts, int(self.entity), int(self.id))
for node in self.nodes:
packet.choices.append(node.export())
return packet
class SendChoicesNode(Node):
tagname = "SendChoices"
attributes = ("id", "type")
timestamp = True
packet_class = packets.SendChoices
def export(self):
packet = self.packet_class(self.ts, int(self.id), int(self.type))
for node in self.nodes:
packet.choices.append(node.export())
return packet
##
# Options
class OptionsNode(Node):
tagname = "Options"
attributes = ("id", )
timestamp = True
packet_class = packets.Options
def export(self):
packet = self.packet_class(self.ts, int(self.id))
for i, node in enumerate(self.nodes):
packet.options.append(node.export(i))
return packet
class OptionNode(Node):
tagname = "Option"
attributes = ("index", "entity", "type")
timestamp = False
packet_class = packets.Option
def export(self, id):
optype = "option"
packet = self.packet_class(self.ts, int(self.entity or 0), id, int(self.type), optype)
for i, node in enumerate(self.nodes):
packet.options.append(node.export(i))
return packet
class SubOptionNode(Node):
tagname = "SubOption"
attributes = ("index", "entity")
timestamp = False
packet_class = packets.Option
def export(self, id):
optype = "subOption"
type = None
packet = self.packet_class(self.ts, int(self.entity), id, type, optype)
for i, node in enumerate(self.nodes):
packet.options.append(node.export(i))
return packet
class OptionTargetNode(Node):
tagname = "Target"
attributes = ("index", "entity")
timestamp = False
packet_class = packets.Option
def export(self, id):
optype = "target"
type = None
return self.packet_class(self.ts, int(self.entity), id, type, optype)
class SendOptionNode(Node):
tagname = "SendOption"
attributes = ("option", "subOption", "target", "position")
timestamp = True
packet_class = packets.SendOption
def export(self):
return self.packet_class(
self.ts, int(self.option), int(self.subOption), int(self.target), int(self.position)
)
| [
"[email protected]"
] | |
d21050a17e15ff92bccfbce4604ba90af3d3d95f | 56818903f60b5e7b88645f88badc92bfa5d2c65f | /automlcli/settings.py | 05d100770da7b6b2f4c87b22a2dd400e38345549 | [
"MIT"
] | permissive | altescy/automlcli | 23e82ad957ac8cbeb43d734741dd8dfb9b24b0ff | ec57ac57df5d9d9f8a7ef79bb7a96a86801f32f4 | refs/heads/main | 2023-04-29T03:57:06.181052 | 2021-05-23T12:19:34 | 2021-05-23T12:19:34 | 341,651,976 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | from pathlib import Path
# colt settings
DEFAULT_COLT_SETTING = {
"typekey": "type",
}
# automlcli directory settings
AUTOMLCLI_ROOT = Path.home() / ".automlcli"
# plugin settings
LOCAL_PLUGINS_FILENAME = ".automlcli_plugins"
GLOBAL_PLUGINS_FILENAME = AUTOMLCLI_ROOT / "plugins"
| [
"[email protected]"
] | |
0f904e64473e0a25754c0b977e1599a61fcaaa7b | 660e35c822423685aea19d038daa8356722dc744 | /account_statement_ofx/tests/__init__.py | eef3074bc7837bf7d59e074cce70d4916358feba | [] | no_license | saifkazi/tryton_modules | a05cb4a90ae2c46ba39d60d2005ffc18ce5e44bb | 94bd3a4e3fd86556725cdff33b314274dcb20afd | refs/heads/main | 2023-05-05T12:20:02.059236 | 2021-05-19T10:46:37 | 2021-05-19T10:46:37 | 368,768,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
try:
from trytond.modules.account_statement_ofx.tests.test_account_statement_ofx import suite # noqa: E501
except ImportError:
from .test_account_statement_ofx import suite
__all__ = ['suite']
| [
"[email protected]"
] | |
fd8ac21a8d9b8432a25e4625bc8ff3e90e64da60 | 64cad428fb95a4815f83a90ee44144e1b4b44766 | /env/bin/django-admin.py | 3a80150dc43fbf285f554927972b5e4eddee0a13 | [] | no_license | virginiah894/Api | 5ddcd0eca325d2967d9bbb634ff5bc89d68f6e24 | 96392c7c20d0e25dc2b751a44a3cd379531fafc4 | refs/heads/master | 2022-11-11T10:14:41.153391 | 2020-07-04T14:40:58 | 2020-07-04T14:40:58 | 277,127,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | #!/home/access/Documents/perry projects/Django-APIs/env/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
3a7cafd0b8495001f094ed73028cc04915094f23 | b96f7c01df9417aaf4408e794b1edcc501921c6f | /pirates/coderedemption/CodeRedemptionUD.py | ebf302c6eaa53f6a706c9ea2ff6aa52f277ff69f | [] | no_license | Puggyblue999/PiratesOfTheCarribeanOnline | 492b5feec3dace921026ab1ec64603c208869a62 | 5c7eff12c3821d337404be0face368a5a899fff1 | refs/heads/master | 2021-01-22T15:10:54.858772 | 2015-06-25T20:30:11 | 2015-06-25T20:30:11 | 38,146,060 | 4 | 4 | null | 2015-07-01T18:58:11 | 2015-06-27T04:01:44 | Python | UTF-8 | Python | false | false | 126 | py | from direct.distributed.DistributedObjectUD import DistributedObjectUD
class CodeRedemptionUD(DistributedObjectUD):
pass
| [
"[email protected]"
] | |
062d01992b4ff6403439725111428e675235023b | ca12492b8fe66e34d7152a5118a573175b0a176f | /backend/wallet/migrations/0001_initial.py | 06c04d1d09b8fdac41184f9f6cca8bc684953e59 | [] | no_license | crowdbotics-apps/asile-mobile-22968 | 3d02c0de123ba1b13d79a098ea7eb543658d5f8f | c5005ad17c262f87bdd8eefb89145ee75fdca168 | refs/heads/master | 2023-01-24T17:16:53.239439 | 2020-11-25T08:42:33 | 2020-11-25T08:42:33 | 315,842,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,944 | py | # Generated by Django 2.2.17 on 2020-11-25 05:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task_profile', '0001_initial'),
('task', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CustomerWallet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.FloatField()),
('expiration_date', models.DateTimeField()),
('last_transaction', models.DateTimeField()),
('customer', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customerwallet_customer', to='task_profile.CustomerProfile')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_token', models.CharField(max_length=255)),
('payment_account', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('wallet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='paymentmethod_wallet', to='wallet.CustomerWallet')),
],
),
migrations.CreateModel(
name='TaskerWallet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.FloatField(max_length=254)),
('expiration_date', models.DateTimeField()),
('last_transaction', models.DateTimeField()),
('tasker', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskerwallet_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='TaskerPaymentAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_token', models.CharField(max_length=255)),
('payment_account', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('wallet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taskerpaymentaccount_wallet', to='wallet.TaskerWallet')),
],
),
migrations.CreateModel(
name='PaymentTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.FloatField()),
('tip', models.FloatField()),
('tracking_id', models.CharField(max_length=50)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paymenttransaction_customer', to='task_profile.CustomerProfile')),
('payment_method', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paymenttransaction_payment_method', to='wallet.PaymentMethod')),
('tasker', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paymenttransaction_tasker', to='task_profile.TaskerProfile')),
('transaction', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paymenttransaction_transaction', to='task.TaskTransaction')),
],
),
]
| [
"[email protected]"
] | |
86aad0348b322a2f956b6383ab4d9264b7a71afd | 0ebec1e899789ae2597c01bae7ca2c3382c4266d | /session5/a_customising_plots.py | 0a6ea8df7506befcbf9f9e859b1a2d01d340e160 | [
"Apache-2.0"
] | permissive | TugdualSarazin/MACT20.21_Digital_tools_Big_Data_part_1 | 02fda6b401bcdad2a240de00960ff0dbc61fc94d | b43b9f50ec42bb413c2c3a090cf11f9886676c58 | refs/heads/main | 2023-01-13T20:51:44.000981 | 2020-11-09T12:25:11 | 2020-11-09T12:25:11 | 313,076,622 | 0 | 0 | Apache-2.0 | 2020-11-15T16:44:29 | 2020-11-15T16:44:28 | null | UTF-8 | Python | false | false | 2,890 | py | # encoding: utf-8
##################################################
# This script shows uses the pandas and matplotlib libraries to produce different kind of plots
# It also combines data from two sources and create multiple plots
# Find extra documentation about data frame here:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.scatter.html
##################################################
#
##################################################
# Author: Diego Pajarito
# Copyright: Copyright 2020, IAAC
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: Diego Pajarito
# Email: [email protected]
# Status: development
##################################################
# We need to import pandas library as well as the plot library matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# We read the file for population data and gross domestic product
amb_mplts = pd.read_csv('../data/catalunya/AMB_municipalities_min.csv')
lu_mplts = pd.read_csv('../data/luxembourg/population.csv', skiprows=[2,3])
# First, we filter data for a single country, mind the way to select only columns having numeric data
pop_cat = amb_mplts['population']
area_cat = amb_mplts['area']
pop_lu = lu_mplts[['Year', '2020']]
pop_lu.columns = ['canton', 'population']
pop_lu_1821 = lu_mplts[['Year', '1821']]
pop_lu_1821.columns = ['canton', 'population']
# Plots allow basic configuration of visual features. Here some of the most common
colors = np.random.rand(len(pop_cat))
plt.scatter(x=pop_cat, y=area_cat, c=colors)
plt.show()
# Charts can also use lines to represent patterns from different subsets
for value in lu_mplts['Year']:
a_pop = lu_mplts[lu_mplts['Year'] == value]
a_pop = a_pop.iloc[0, 1:15]
plt.plot(a_pop)
plt.show()
# try to customise axis
#plt.xticks(np.arange(0, 2020, 100))
plt.yticks(np.arange(0,175000, 50000))
# There are different ways to represent data density,
# this 2d histogram shows population and area distribution
plt.hist2d(pop_cat, area_cat)
plt.show()
# We can create the arrangement for multiple plots and compare the differences in patterns
fig, axs = plt.subplots(2, 2, sharex=False, sharey=False)
axs[0, 0].scatter(x=pop_cat, y=area_cat, c=colors)
axs[1, 0].hist2d(pop_cat, area_cat, bins=20)
axs[0, 1].scatter(x=pop_lu['population'], y=pop_lu_1821['population'])
axs[1, 1].hist2d(x=pop_lu['population'], y=pop_lu_1821['population'], bins=20)
plt.show()
# We can create the arrangement for multiple plots and compare the differences in patterns
fig, axs = plt.subplots(1, 2, sharex=True, sharey=True)
axs[0].scatter(x=pop_lu['population'], y=pop_lu_1821['population'])
axs[1].hist2d(x=pop_lu['population'], y=pop_lu_1821['population'], bins=20)
plt.show()
| [
"[email protected]"
] | |
8c6977a6a88267049f29f0ab21620a01356f8d36 | 39cb67781018e23428312610ded87c5d384bb690 | /swinger.py | 23a441d49c82499b30ed56afe259a80e11ef8692 | [] | no_license | yi75798/Swinger | afd8e528cc1bcce3a4db83ce54def54372619717 | b158c4f358fbebe655627969231cf1f0276cf708 | refs/heads/master | 2022-02-25T14:10:25.104740 | 2019-10-25T07:24:12 | 2019-10-25T07:24:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,314 | py | # -*- coding: utf-8 -*-
import nltk, json, pickle
import itertools
from random import shuffle
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.probability import FreqDist, ConditionalFreqDist
import sklearn
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
def bag_of_words(words):
return dict([(word, True) for word in words])
def bigram(words, score_fn=BigramAssocMeasures.chi_sq, n=1000):
bigram_finder = BigramCollocationFinder.from_words(words) #把文本变成双词搭配的形式
bigrams = bigram_finder.nbest(score_fn, n) #使用了卡方统计的方法,选择排名前1000的双词
return bag_of_words(bigrams)
def bigram_words(words, score_fn=BigramAssocMeasures.chi_sq, n=1000):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return bag_of_words(words + bigrams) #所有词和(信息量大的)双词搭配一起作为特征
def create_word_scores():
posWords = json.load(open('p.json','r'))
negWords = json.load(open('n.json','r'))
posWords = list(itertools.chain(*posWords)) #把多维数组解链成一维数组
negWords = list(itertools.chain(*negWords)) #同理
word_fd = FreqDist() #可统计所有词的词频
cond_word_fd = ConditionalFreqDist() #可统计积极文本中的词频和消极文本中的词频
for word in posWords:
word_fd[word] += 1
cond_word_fd['pos'][word] += 1
for word in negWords:
word_fd[word] += 1
cond_word_fd['neg'][word] += 1
pos_word_count = cond_word_fd['pos'].N() #积极词的数量
neg_word_count = cond_word_fd['neg'].N() #消极词的数量
total_word_count = pos_word_count + neg_word_count
word_scores = {}
for word, freq in word_fd.items():
pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count) #计算积极词的卡方统计量,这里也可以计算互信息等其它统计量
neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count) #同理
word_scores[word] = pos_score + neg_score #一个词的信息量等于积极卡方统计量加上消极卡方统计量
return word_scores #包括了每个词和这个词的信息量
def create_word_bigram_scores():
posdata = json.load(open('p.json','r'))
negdata = json.load(open('n.json','r'))
posWords = list(itertools.chain(*posdata))
negWords = list(itertools.chain(*negdata))
bigram_finder = BigramCollocationFinder.from_words(posWords)
bigram_finder = BigramCollocationFinder.from_words(negWords)
posBigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq, 5000)
negBigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq, 5000)
pos = posWords + posBigrams #词和双词搭配
neg = negWords + negBigrams
word_fd = FreqDist()
cond_word_fd = ConditionalFreqDist()
for word in pos:
word_fd[word] += 1
cond_word_fd['pos'][word] += 1
for word in neg:
word_fd[word] += 1
cond_word_fd['neg'][word] += 1
pos_word_count = cond_word_fd['pos'].N()
neg_word_count = cond_word_fd['neg'].N()
total_word_count = pos_word_count + neg_word_count
word_scores = {}
for word, freq in word_fd.items():
pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count)
neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count)
word_scores[word] = pos_score + neg_score
return word_scores
def find_best_words(word_scores, number):
best_vals = sorted(word_scores.items(), key=lambda x: -x[1])[:number] #把词按信息量倒序排序。number是特征的维度,是可以不断调整直至最优的
best_words = set([w for w, s in best_vals])
return best_words
def score(classifier, name):
classifier = SklearnClassifier(classifier) #在nltk 中使用scikit-learn 的接口
classifier.train(train) #训练分类器
pickle.dump(classifier, open(name + '.pickle','wb'))
pred = classifier.classify_many(test) #对开发测试集的数据进行分类,给出预测的标签
return accuracy_score(tag_test, pred) #对比分类预测结果和人工标注的正确结果,给出分类器准确度
def best_word_features(words):
return dict([(word, True) for word in words if word in best_words])
def pos_features(feature_extraction_method):
posFeatures = []
for i in pos:
posWords = [feature_extraction_method(i),'pos'] #为积极文本赋予"pos"
posFeatures.append(posWords)
return posFeatures
def neg_features(feature_extraction_method):
negFeatures = []
for j in neg:
negWords = [feature_extraction_method(j),'neg'] #为消极文本赋予"neg"
negFeatures.append(negWords)
return negFeatures
pos_review = json.load(open('p.json','r'))
neg_review = json.load(open('n.json','r'))
word_scores_1 = create_word_scores()
word_scores_2 = create_word_bigram_scores()
shuffle(pos_review) #把积极文本的排列随机化
pos = pos_review
neg = neg_review
posFeatures = pos_features(bag_of_words) #使用所有词作为特征
negFeatures = neg_features(bag_of_words)
train = posFeatures+negFeatures
# train = posFeatures[174:]+negFeatures[174:]
# devtest = posFeatures[124:174]+negFeatures[124:174]
test = posFeatures+negFeatures
test, tag_test = zip(*test)
# dev, tag_dev = zip(*devtest) #把开发测试集(已经经过特征化和赋予标签了)分为数据和标签
print('BernoulliNB`s accuracy is %f' %score(BernoulliNB(), 'BernoulliNB'))
print('MultinomiaNB`s accuracy is %f' %score(MultinomialNB(), 'MultinomialNB'))
print('LogisticRegression`s accuracy is %f' %score(LogisticRegression(), 'LogisticRegression'))
print('SVC`s accuracy is %f' %score(SVC(), 'SVC'))
print('LinearSVC`s accuracy is %f' %score(LinearSVC(), 'LinearSVC'))
print('NuSVC`s accuracy is %f' %score(NuSVC(), 'NuSVC')) | [
"[email protected]"
] | |
90f01e806124c7ca87d8fa588c9283d06b53bfcb | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2485/60623/234199.py | d1fc22fa3226c63bdda6a1c2a234b5d3b02955ce | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | # 给定一个单词数组,按排序顺序(计数的递增顺序)一起打印所有字符相同组的计数。
# 例如,如果给定的数组是{“ cat”,“ dog”,“ tac”,“ god”,“ act”},则分组的字谜是“(dog,god)(cat,tac,act)”。因此输出为2 3
size=int(input())
a=0
while a<size:
b=input()#也没有用
strList=input().split()
i=0
while i<len(strList):
l=list(strList[i])
#列表的sort是针对自己,而字典的sort则是返回一个排好序的,但本身并没有排好序
l.sort()
s="".join(l)
strList[i]=s
i=i+1
strList.sort()
j=0
k=1
myList=[]
while j<len(strList):
if j==len(strList)-1:
break
if(strList[j]==strList[j+1]):
k=k+1
else:
myList.append(k)
k=1
j=j+1
myList.append(k)
myList.sort()
m=0
while m<len(myList):
if m!=len(myList)-1:
print(""+myList[m]+" ", end='')
else:
print(myList[m])
m=m+1
a=a+1 | [
"[email protected]"
] | |
69464c3b9cc44fc360e52b78b6397ca102998b16 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=43/sched.py | 96e86f88d4ec9f87aad6a16a3dbd922eb335bcd5 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | -S 1 -X RUN -Q 0 -L 2 132 400
-S 0 -X RUN -Q 0 -L 2 104 300
-S 0 -X RUN -Q 0 -L 2 93 300
-S 0 -X RUN -Q 0 -L 2 56 300
-S 2 -X RUN -Q 1 -L 1 50 400
-S 2 -X RUN -Q 1 -L 1 46 300
-S 2 -X RUN -Q 1 -L 1 45 150
-S 2 -X RUN -Q 1 -L 1 42 125
-S 3 -X RUN -Q 2 -L 1 35 175
-S 3 -X RUN -Q 2 -L 1 34 125
-S 3 -X RUN -Q 2 -L 1 32 200
-S 3 -X RUN -Q 2 -L 1 28 125
-S 4 -X RUN -Q 3 -L 1 24 125
-S 4 -X RUN -Q 3 -L 1 21 125
-S 4 -X RUN -Q 3 -L 1 20 175
-S 4 -X RUN -Q 3 -L 1 8 100
| [
"[email protected]"
] | |
ea531889bf01ff9b71405fc6ad2e84ec1a764813 | ba8f5d23d9878a25b30a32cf16e8833f93b25853 | /source_py2/python_toolbox/nifty_collections/emitting_weak_key_default_dict.py | 46c4c7701214a78895301bc8c7a7931a9b878581 | [
"MIT"
] | permissive | nastako/python_toolbox | af520cbec1468c8e0aae0b3b1c467ca5623af45b | 9713fd728608818630ee409ac6a6fdaf863af31b | refs/heads/master | 2020-12-11T09:07:19.681161 | 2015-01-16T21:26:37 | 2015-01-16T21:26:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,427 | py | # Copyright 2009-2015 Ram Rachum.
# This program is distributed under the MIT license.
'''
Defines the `EmittingWeakKeyDefaultDict` class.
See its documentation for more details.
'''
from .weak_key_default_dict import WeakKeyDefaultDict
class EmittingWeakKeyDefaultDict(WeakKeyDefaultDict):
'''
A key that references keys weakly, has a default factory, and emits.
This is a combination of `weakref.WeakKeyDictionary` and
`collections.defaultdict`, which emits every time it's modified.
The keys are referenced weakly, so if there are no more references to the
key, it gets removed from this dict.
If a "default factory" is supplied, when a key is attempted that doesn't
exist the default factory will be called to create its new value.
Every time that a change is made, like a key is added or removed or gets
its value changed, we do `.emitter.emit()`.
'''
def __init__(self, emitter, *args, **kwargs):
super(EmittingWeakKeyDefaultDict, self).__init__(*args, **kwargs)
self.emitter = emitter
def set_emitter(self, emitter):
'''Set the emitter that will be emitted every time a change is made.'''
self.emitter = emitter
def __setitem__(self, key, value):
result = \
super(EmittingWeakKeyDefaultDict, self).__setitem__(key, value)
if self.emitter:
self.emitter.emit()
return result
def __delitem__(self, key):
result = super(EmittingWeakKeyDefaultDict, self).__delitem__(key)
if self.emitter:
self.emitter.emit()
return result
def pop(self, key, *args):
""" D.pop(k[,d]) -> v, remove specified key and return the
corresponding value. If key is not found, d is returned if given,
otherwise KeyError is raised """
result = super(EmittingWeakKeyDefaultDict, self).pop(key, *args)
if self.emitter:
self.emitter.emit()
return result
def popitem(self):
""" D.popitem() -> (k, v), remove and return some (key, value)
pair as a 2-tuple; but raise KeyError if D is empty """
result = super(EmittingWeakKeyDefaultDict, self).popitem()
if self.emitter:
self.emitter.emit()
return result
def clear(self):
""" D.clear() -> None. Remove all items from D. """
result = super(EmittingWeakKeyDefaultDict, self).clear()
if self.emitter:
self.emitter.emit()
return result
def __repr__(self):
return '%s(%s, %s, %s)' % (
type(self).__name__,
self.emitter,
self.default_factory,
dict(self)
)
def __reduce__(self):
"""
__reduce__ must return a 5-tuple as follows:
- factory function
- tuple of args for the factory function
- additional state (here None)
- sequence iterator (here None)
- dictionary iterator (yielding successive (key, value) pairs
This API is used by pickle.py and copy.py.
"""
if self.default_factory:
parameters = (self.emitter, self.default_factory)
else: # not self.default_factory
parameters = (self.emitter)
return (type(self), parameters, None, None, self.iteritems()) | [
"[email protected]"
] | |
f6204b97311c0a68ac6fb78da25a8bf916595d58 | 69e318f2b60175108bc74ee669bfe16287a71cb6 | /plugins/modules/fortios_system_dns.py | db4b40e5be9e400dcaf07963bd254f700f9a60db | [] | no_license | chillancezen/ansible-galaxy-fortios-collection | 5268a5fd97fb4594772349b8d89cb818ec54b3bd | 66a331cd4493d1b0f49798d5c2cd6ef5aeba84d3 | refs/heads/master | 2022-04-09T19:20:59.073193 | 2020-03-26T07:17:09 | 2020-03-26T07:17:09 | 250,185,374 | 0 | 0 | null | 2020-03-26T07:06:16 | 2020-03-26T07:06:16 | null | UTF-8 | Python | false | false | 11,555 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_dns
short_description: Configure DNS in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and dns category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
system_dns:
description:
- Configure DNS.
default: null
type: dict
suboptions:
cache_notfound_responses:
description:
- Enable/disable response from the DNS server when a record is not in cache.
type: str
choices:
- disable
- enable
dns_cache_limit:
description:
- Maximum number of records in the DNS cache.
type: int
dns_cache_ttl:
description:
- Duration in seconds that the DNS cache retains information.
type: int
domain:
description:
- Search suffix list for hostname lookup.
type: list
suboptions:
domain:
description:
- DNS search domain list separated by space (maximum 8 domains)
required: true
type: str
ip6_primary:
description:
- Primary DNS server IPv6 address.
type: str
ip6_secondary:
description:
- Secondary DNS server IPv6 address.
type: str
primary:
description:
- Primary DNS server IP address.
type: str
retry:
description:
- Number of times to retry (0 - 5).
type: int
secondary:
description:
- Secondary DNS server IP address.
type: str
source_ip:
description:
- IP address used by the DNS server as its source IP.
type: str
timeout:
description:
- DNS query timeout interval in seconds (1 - 10).
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure DNS.
fortios_system_dns:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
system_dns:
cache_notfound_responses: "disable"
dns_cache_limit: "4"
dns_cache_ttl: "5"
domain:
-
domain: "<your_own_value>"
ip6_primary: "<your_own_value>"
ip6_secondary: "<your_own_value>"
primary: "<your_own_value>"
retry: "11"
secondary: "<your_own_value>"
source_ip: "84.230.14.43"
timeout: "14"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_dns_data(json):
option_list = ['cache_notfound_responses', 'dns_cache_limit', 'dns_cache_ttl',
'domain', 'ip6_primary', 'ip6_secondary',
'primary', 'retry', 'secondary',
'source_ip', 'timeout']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_dns(data, fos):
vdom = data['vdom']
system_dns_data = data['system_dns']
filtered_data = underscore_to_hyphen(filter_system_dns_data(system_dns_data))
return fos.set('system',
'dns',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_dns']:
resp = system_dns(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"system_dns": {
"required": False, "type": "dict", "default": None,
"options": {
"cache_notfound_responses": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"dns_cache_limit": {"required": False, "type": "int"},
"dns_cache_ttl": {"required": False, "type": "int"},
"domain": {"required": False, "type": "list",
"options": {
"domain": {"required": True, "type": "str"}
}},
"ip6_primary": {"required": False, "type": "str"},
"ip6_secondary": {"required": False, "type": "str"},
"primary": {"required": False, "type": "str"},
"retry": {"required": False, "type": "int"},
"secondary": {"required": False, "type": "str"},
"source_ip": {"required": False, "type": "str"},
"timeout": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
150c5fd8c3bd60bd78428844d15da7c125771b39 | 3937f340aafed20f7b3bb9e36a62d0dfe6ca985d | /CrispyProject/WebApp/forms.py | 02e71ff1a3beb904dbd38af4006ac6e1f687b0b3 | [] | no_license | sunnywralph/Django7AM | 8f6f7e52847882d35ee7f7c4c263c5e90c79b6da | ffef6c6e5ab5231416ca743ebae299622eab9791 | refs/heads/master | 2022-04-25T15:04:45.733504 | 2020-05-05T14:53:57 | 2020-05-05T14:53:57 | 261,496,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from django import forms
from WebApp.models import Person
# Fields with Validation
class EmpForm(forms.ModelForm):
class Meta:
model = Person
fields = '__all__'
| [
"[email protected]"
] | |
4335e43e879c0ef68bff953743aa51e096e7bc6b | abfa70e1da5b4ba8e465cdc046fa36e81386744a | /base_ml/10.5.Iris_RandomForest_Enum.py | cb324f72bde8cb4ca167d5b6c13a703a16f9b8bc | [] | no_license | superman666ai/crazy_project | f850819ff2287e345b67500111733bafa5629d1f | 99dcba0fe246ecaf3f556f747d44731a04231921 | refs/heads/master | 2020-05-15T09:32:56.523875 | 2019-05-16T00:57:23 | 2019-05-16T00:57:23 | 182,179,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,796 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.ensemble import RandomForestClassifier
def iris_type(s):
it = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
return it[s]
# 'sepal length', 'sepal width', 'petal length', 'petal width'
iris_feature = u'花萼长度', u'花萼宽度', u'花瓣长度', u'花瓣宽度'
if __name__ == "__main__":
mpl.rcParams['font.sans-serif'] = [u'SimHei'] # 黑体 FangSong/KaiTi
mpl.rcParams['axes.unicode_minus'] = False
path = '../data/8.iris.data' # 数据文件路径
data = np.loadtxt(path, dtype=float, delimiter=',', converters={4: iris_type}, encoding="utf-8")
x_prime, y = np.split(data, (4,), axis=1)
feature_pairs = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]
plt.figure(figsize=(10, 9), facecolor='#FFFFFF')
for i, pair in enumerate(feature_pairs):
# 准备数据
x = x_prime[:, pair]
# 随机森林
clf = RandomForestClassifier(n_estimators=200, criterion='entropy', max_depth=4)
rf_clf = clf.fit(x, y.ravel())
# 画图
N, M = 500, 500 # 横纵各采样多少个值
x1_min, x1_max = x[:, 0].min(), x[:, 0].max() # 第0列的范围
x2_min, x2_max = x[:, 1].min(), x[:, 1].max() # 第1列的范围
t1 = np.linspace(x1_min, x1_max, N)
t2 = np.linspace(x2_min, x2_max, M)
x1, x2 = np.meshgrid(t1, t2) # 生成网格采样点
x_test = np.stack((x1.flat, x2.flat), axis=1) # 测试点
# 训练集上的预测结果
y_hat = rf_clf.predict(x)
y = y.reshape(-1)
c = np.count_nonzero(y_hat == y) # 统计预测正确的个数
# print '特征: ', iris_feature[pair[0]], ' + ', iris_feature[pair[1]],
# print '\t预测正确数目:', c,
# print '\t准确率: %.2f%%' % (100 * float(c) / float(len(y)))
# 显示
cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
y_hat = rf_clf.predict(x_test) # 预测值
y_hat = y_hat.reshape(x1.shape) # 使之与输入的形状相同
plt.subplot(2, 3, i+1)
plt.pcolormesh(x1, x2, y_hat, cmap=cm_light) # 预测值
plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', cmap=cm_dark) # 样本
plt.xlabel(iris_feature[pair[0]], fontsize=14)
plt.ylabel(iris_feature[pair[1]], fontsize=14)
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.grid()
plt.tight_layout(2.5)
plt.subplots_adjust(top=0.92)
plt.suptitle(u'随机森林对鸢尾花数据的两特征组合的分类结果', fontsize=18)
plt.show()
| [
"[email protected]"
] | |
9dd940b5933f26f0c5f47d581160b7ba22f31fb6 | bea2e5924a62b76a767b3eb915abb3f95a225926 | /tensorflow_privacy/privacy/dp_query/dp_query.py | 480241019dde4b557a7fb073ff3ecc44002afc1b | [
"Apache-2.0",
"MIT"
] | permissive | tensorflow/privacy | 741ddc106e9b73384a1356bf915dc8f7f97ce768 | c92610e37aa340932ed2d963813e0890035a22bc | refs/heads/master | 2023-09-03T20:42:21.040653 | 2023-08-30T19:53:38 | 2023-08-30T19:54:08 | 162,747,292 | 1,881 | 493 | Apache-2.0 | 2023-09-14T19:55:15 | 2018-12-21T18:46:46 | Python | UTF-8 | Python | false | false | 12,074 | py | # Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interface for differentially private query mechanisms.
The DPQuery class abstracts the differential privacy mechanism needed by DP-SGD.
The nomenclature is not specific to machine learning, but rather comes from
the differential privacy literature. Therefore, instead of talking about
examples, minibatches, and gradients, the code talks about records, samples and
queries. For more detail, please see the paper here:
https://arxiv.org/pdf/1812.06210.pdf
A common usage paradigm for this class is centralized DP-SGD training on a
fixed set of training examples, which we call "standard DP-SGD training."
In such training, SGD applies as usual by computing gradient updates from a set
of training examples that form a minibatch. However, each minibatch is broken
up into disjoint "microbatches." The gradient of each microbatch is computed
and clipped to a maximum norm, with the "records" for all such clipped gradients
forming a "sample" that constitutes the entire minibatch. Subsequently, that
sample can be "queried" to get an averaged, noised gradient update that can be
applied to model parameters.
In order to prevent inaccurate accounting of privacy parameters, the only
means of inspecting the gradients and updates of SGD training is via the use
of the below interfaces, and through the accumulation and querying of a
"sample state" abstraction. Thus, accessing data is indirect on purpose.
The DPQuery class also allows the use of a global state that may change between
samples. In the common situation where the privacy mechanism remains unchanged
throughout the entire training process, the global state is usually None.
"""
import abc
import collections
import tensorflow as tf
class DPQuery(metaclass=abc.ABCMeta):
"""Interface for differentially private query mechanisms.
Differential privacy is achieved by processing records to bound sensitivity,
accumulating the processed records (usually by summing them) and then
adding noise to the aggregated result. The process can be repeated to compose
applications of the same mechanism, possibly with different parameters.
The DPQuery interface specifies a functional approach to this process. A
global state maintains state that persists across applications of the
mechanism. For each application, the following steps are performed:
1. Use the global state to derive parameters to use for the next sample of
records.
2. Initialize a sample state that will accumulate processed records.
3. For each record:
a. Process the record.
b. Accumulate the record into the sample state.
4. Get the result of the mechanism, possibly updating the global state to use
in the next application.
5. Derive metrics from the global state.
Here is an example using the GaussianSumQuery. Assume there is some function
records_for_round(round) that returns an iterable of records to use on some
round.
```
dp_query = tensorflow_privacy.GaussianSumQuery(
l2_norm_clip=1.0, stddev=1.0)
global_state = dp_query.initial_global_state()
for round in range(num_rounds):
sample_params = dp_query.derive_sample_params(global_state)
sample_state = dp_query.initial_sample_state()
for record in records_for_round(round):
sample_state = dp_query.accumulate_record(
sample_params, sample_state, record)
result, global_state = dp_query.get_noised_result(
sample_state, global_state)
metrics = dp_query.derive_metrics(global_state)
# Do something with result and metrics...
```
"""
def initial_global_state(self):
"""Returns the initial global state for the DPQuery.
The global state contains any state information that changes across
repeated applications of the mechanism. The default implementation returns
just an empty tuple for implementing classes that do not have any persistent
state.
This object must be processable via tf.nest.map_structure.
Returns:
The global state.
"""
return ()
def derive_sample_params(self, global_state):
"""Given the global state, derives parameters to use for the next sample.
For example, if the mechanism needs to clip records to bound the norm,
the clipping norm should be part of the sample params. In a distributed
context, this is the part of the state that would be sent to the workers
so they can process records.
Args:
global_state: The current global state.
Returns:
Parameters to use to process records in the next sample.
"""
del global_state # unused.
return ()
@abc.abstractmethod
def initial_sample_state(self, template=None):
"""Returns an initial state to use for the next sample.
For typical `DPQuery` classes that are aggregated by summation, this should
return a nested structure of zero tensors of the appropriate shapes, to
which processed records will be aggregated.
Args:
template: A nested structure of tensors, TensorSpecs, or numpy arrays used
as a template to create the initial sample state. It is assumed that the
leaves of the structure are python scalars or some type that has
properties `shape` and `dtype`.
Returns: An initial sample state.
"""
pass
def preprocess_record(self, params, record):
"""Preprocesses a single record.
This preprocessing is applied to one client's record, e.g. selecting vectors
and clipping them to a fixed L2 norm. This method can be executed in a
separate TF session, or even on a different machine, so it should not depend
on any TF inputs other than those provided as input arguments. In
particular, implementations should avoid accessing any TF tensors or
variables that are stored in self.
Args:
params: The parameters for the sample. In standard DP-SGD training, the
clipping norm for the sample's microbatch gradients (i.e., a maximum
norm magnitude to which each gradient is clipped)
record: The record to be processed. In standard DP-SGD training, the
gradient computed for the examples in one microbatch, which may be the
gradient for just one example (for size 1 microbatches).
Returns:
A structure of tensors to be aggregated.
"""
del params # unused.
return record
@abc.abstractmethod
def accumulate_preprocessed_record(self, sample_state, preprocessed_record):
"""Accumulates a single preprocessed record into the sample state.
This method is intended to only do simple aggregation, typically just a sum.
In the future, we might remove this method and replace it with a way to
declaratively specify the type of aggregation required.
Args:
sample_state: The current sample state. In standard DP-SGD training, the
accumulated sum of previous clipped microbatch gradients.
preprocessed_record: The preprocessed record to accumulate.
Returns:
The updated sample state.
"""
pass
def accumulate_record(self, params, sample_state, record):
"""Accumulates a single record into the sample state.
This is a helper method that simply delegates to `preprocess_record` and
`accumulate_preprocessed_record` for the common case when both of those
functions run on a single device. Typically this will be a simple sum.
Args:
params: The parameters for the sample. In standard DP-SGD training, the
clipping norm for the sample's microbatch gradients (i.e., a maximum
norm magnitude to which each gradient is clipped)
sample_state: The current sample state. In standard DP-SGD training, the
accumulated sum of previous clipped microbatch gradients.
record: The record to accumulate. In standard DP-SGD training, the
gradient computed for the examples in one microbatch, which may be the
gradient for just one example (for size 1 microbatches).
Returns:
The updated sample state. In standard DP-SGD training, the set of
previous microbatch gradients with the addition of the record argument.
"""
preprocessed_record = self.preprocess_record(params, record)
return self.accumulate_preprocessed_record(sample_state,
preprocessed_record)
@abc.abstractmethod
def merge_sample_states(self, sample_state_1, sample_state_2):
"""Merges two sample states into a single state.
This can be useful if aggregation is performed hierarchically, where
multiple sample states are used to accumulate records and then
hierarchically merged into the final accumulated state. Typically this will
be a simple sum.
Args:
sample_state_1: The first sample state to merge.
sample_state_2: The second sample state to merge.
Returns:
The merged sample state.
"""
pass
@abc.abstractmethod
def get_noised_result(self, sample_state, global_state):
"""Gets the query result after all records of sample have been accumulated.
The global state can also be updated for use in the next application of the
DP mechanism.
Args:
sample_state: The sample state after all records have been accumulated. In
standard DP-SGD training, the accumulated sum of clipped microbatch
gradients (in the special case of microbatches of size 1, the clipped
per-example gradients).
global_state: The global state, storing long-term privacy bookkeeping.
Returns:
A tuple `(result, new_global_state, event)` where:
* `result` is the result of the query,
* `new_global_state` is the updated global state, and
* `event` is the `DpEvent` that occurred.
In standard DP-SGD training, the result is a gradient update comprising a
noised average of the clipped gradients in the sample state---with the
noise and averaging performed in a manner that guarantees differential
privacy.
"""
pass
def derive_metrics(self, global_state):
"""Derives metric information from the current global state.
Any metrics returned should be derived only from privatized quantities.
Args:
global_state: The global state from which to derive metrics.
Returns:
A `collections.OrderedDict` mapping string metric names to tensor values.
"""
del global_state
return collections.OrderedDict()
def _zeros_like(arg):
"""A `zeros_like` function that also works for `tf.TensorSpec`s."""
try:
arg = tf.convert_to_tensor(value=arg)
except (TypeError, ValueError):
pass
return tf.zeros(arg.shape, arg.dtype)
def _safe_add(x, y):
"""Adds x and y but if y is None, simply returns x."""
return x if y is None else tf.add(x, y)
class SumAggregationDPQuery(DPQuery):
"""Base class for DPQueries that aggregate via sum."""
def initial_sample_state(self, template=None):
"""Implements `tensorflow_privacy.DPQuery.initial_sample_state`."""
return tf.nest.map_structure(_zeros_like, template)
def accumulate_preprocessed_record(self, sample_state, preprocessed_record):
"""Implements `tensorflow_privacy.DPQuery.accumulate_preprocessed_record`.
"""
return tf.nest.map_structure(_safe_add, sample_state, preprocessed_record)
def merge_sample_states(self, sample_state_1, sample_state_2):
"""Implements `tensorflow_privacy.DPQuery.merge_sample_states`."""
return tf.nest.map_structure(tf.add, sample_state_1, sample_state_2)
| [
"[email protected]"
] | |
0e5cd2b71cfca2920b63884ab1b03dedd57aecaa | 11763b1150a3a05db89c13dcd6152f8fcca87eaa | /designs/linear/homomorphic/latticebased/qtpiepublickey3.py | 19c3c2cc331daaa31305c3217bbc670ba8c7c944 | [] | no_license | acad2/crypto | 343c32fa25aaec73e169290579fc3d02c4b226f6 | cb283df4101fcd618a0478a0018273f00d0734ae | refs/heads/master | 2021-08-19T06:36:26.068033 | 2017-11-25T00:41:03 | 2017-11-25T00:41:03 | 113,048,326 | 2 | 0 | null | 2017-12-04T13:49:02 | 2017-12-04T13:49:01 | null | UTF-8 | Python | false | false | 2,559 | py | raise NotImplementedError("q needs to be randomized")
from math import log
from crypto.utilities import random_integer, modular_inverse, big_prime, modular_subtraction
N = 90539821999601667010016498433538092350601848065509335050382778168697877622963864208930434463149476126948597274673237394102007067278620641565896411613073030816577188842779580374266789048335983054644275218968175557708746520394332802669663905398219996016670100164984335380923506018480655093350503827781686978776229638642089304344631494761269485972746732373941020070672786206415658964116130730308165771888427795803742667890483359830546442752189681755577087465203943328026696639053982199960166701001649843353809235060184806550933505038277816869787762296386420893043446314947612694859727467323739410200706727862064156589641161307303081657718884277958037426678904833598305464427521896817555770874652039433280266966390539821999601667010016498433538092350601848065509335050382778168697877622963864208930434463149476126948597274673237394102007067278620641565896411613073030816577188842779580374266789048335983054644275218968175557708746520394332802669663
#1 + 33 + 32 = 66
# prq + e
# q + pirie
#65 + 97 + 32 =
#pq1 + e1 * (pq2 + e2)
#pq1(pq2 + e2) + e1(pq2 + e2)
#ppq1q2 + pq1e2 + pq2e1 + e1e2
# prrq1q2 + rq1e2 + rq2e1 + e1e2
#pq1 + e1 + pq2 + e2
#p(q1 + q2) + e1 + e2
def generate_pi(pi_size=65, n=N):
pi = random_integer(pi_size)
assert log(n, 2) - log(pi, 2) > 256, log(n, 2) - log(pi, 2)
return pi
def generate_pq(private_key, q_size=32, n=N):
p = modular_inverse(private_key, n)
q = random_integer(q_size)
pq = (p * q) % n
assert log(n, 2) - log(pq, 2) < 256
assert log(n, 2) - log(modular_inverse(pq, n), 2) < 256, (log(n, 2), log(n - modular_inverse(pq, n), 2))
return pq, q
def generate_keypair():
pi = generate_pi()
pq, q = generate_pq(pi)
public_key = pq
private_key = (pi, q)
return public_key, private_key
def encrypt(q, public_key, e_size=32, n=N):
assert n == N
e = random_integer(e_size)
return ((public_key * q) + e) % n
def decrypt(ciphertext, private_key, n=N, operation_count=1):
pi, r = private_key
pie_q = (pi * ciphertext) % n
q = pie_q % pi
return q / (r ** operation_count)
def test_encrypt_decrypt():
from unittesting import test_asymmetric_encrypt_decrypt
test_asymmetric_encrypt_decrypt("qtpiepublickey3", generate_keypair, encrypt, decrypt, iterations=10000)
if __name__ == "__main__":
test_encrypt_decrypt()
| [
"[email protected]"
] | |
c731e200e23ca2544520bae18655637937d939d8 | 325fde42058b2b82f8a4020048ff910cfdf737d7 | /src/storage-blob-preview/azext_storage_blob_preview/vendored_sdks/azure_mgmt_storage/v2021_01_01/aio/operations/_usages_operations.py | 4fb31d3652c09b18a8730846a6ade439f9280d2f | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ebencarek/azure-cli-extensions | 46b0d18fe536fe5884b00d7ffa30f54c7d6887d1 | 42491b284e38f8853712a5af01836f83b04a1aa8 | refs/heads/master | 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 | MIT | 2020-10-09T18:21:52 | 2020-05-06T01:25:58 | Python | UTF-8 | Python | false | false | 5,262 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations:
"""UsagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_location(
self,
location: str,
**kwargs
) -> AsyncIterable["_models.UsageListResult"]:
"""Gets the current usage count and the limit for the resources of the location under the
subscription.
:param location: The location of the Azure Storage resource.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2021_01_01.models.UsageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_location.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('UsageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages'} # type: ignore
| [
"[email protected]"
] | |
346257657882bc451e07c22b1ab4eab2cfa406a0 | a1f014188a5567b701efbf5f296f88a572304689 | /python_code/飞机大战/全屏和调整窗口大小,图像变换/py_查看系统分辨率.py | 31b238c74e8003ecdc86b11b19ae5064b2bdea21 | [] | no_license | YL-python/yl_python_code | 6cbecf96fd37cc74e108939d6898a92b3539ec2a | d286c73b6d109cd996c7fd79d67d90c47afb31d3 | refs/heads/master | 2022-11-17T07:42:54.021558 | 2020-01-07T08:43:08 | 2020-01-07T08:43:08 | 219,503,414 | 0 | 1 | null | 2022-10-21T19:00:35 | 2019-11-04T13:04:35 | Python | UTF-8 | Python | false | false | 59 | py | import pygame
pygame.init()
pygame.display.list_modes()
| [
"[email protected]"
] | |
eeaf201358b733d340ba20b8541a19ccc863938e | 8e7279bc3de368e85129b8e59f12cbdbd8621da1 | /myenv/bin/gifmaker.py | b0dd02f4131ba154bff4296b4730f87a960d0ce0 | [] | no_license | banziha104/dstagram2 | 34f5dca6deb9c19c03fa523d6e4b1c97f60e14d4 | 12dbecb4a727fe67faffc1b2208bd4b5152a8672 | refs/heads/master | 2021-07-09T23:51:17.262219 | 2017-10-10T11:18:45 | 2017-10-10T11:18:45 | 105,170,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | #!/Users/iyeongjun/Downloads/dstagram_2nd/myenv/bin/python3.6
#
# The Python Imaging Library
# $Id$
#
# convert sequence format to GIF animation
#
# history:
# 97-01-03 fl created
#
# Copyright (c) Secret Labs AB 1997. All rights reserved.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from PIL import Image
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print("GIFMAKER -- create GIF animations")
print("Usage: gifmaker infile outfile")
sys.exit(1)
im = Image.open(sys.argv[1])
im.save(sys.argv[2], save_all=True)
| [
"[email protected]"
] | |
d9c94007b05b243ba95ace0dae93928d09561f45 | bf0800eee5a43f600ab3ebd99d3486846d9f4834 | /blog/views.py | aa024c57c30b7e613d9d778655bff923cef2a3e5 | [] | no_license | wonsik1012/my-first-blog | 6de17de4bd13a9d36650ad2070b07190461dbd3e | e0124e41b8dabf20e23af5d969e55a2238841dba | refs/heads/master | 2020-07-03T10:07:38.321239 | 2019-08-13T11:48:21 | 2019-08-13T11:48:21 | 201,874,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,924 | py | from django.shortcuts import render
from django.utils import timezone
from .models import Post
from django.shortcuts import render, redirect, render_to_response
from django.http import HttpResponse
from django.template.loader import get_template
from django.template.context import RequestContext
import folium
from IPython.display import HTML, display
import numpy as np
import osmnx as ox
import networkx as nx
from sklearn.neighbors import KDTree
import folium
import folium.plugins
import pandas as pd
from folium import plugins
import requests
import googlemaps
import numbers
import math
from geopy.geocoders import Nominatim
def show_map(request):
G = ox.graph_from_place('종로구')
a = ox.elevation.add_node_elevations(G, 'AIzaSyBQYn4uBzdjr1ULXYqfn_z7lUWoIXYQB1Q', max_locations_per_batch=350, pause_duration=0.02)
b =ox.elevation.add_edge_grades(G, add_absolute=True)
nodes,edge = ox.graph_to_gdfs(b)
edge.head()
gmaps_key = "AIzaSyBQYn4uBzdjr1ULXYqfn_z7lUWoIXYQB1Q"
gmaps = googlemaps.Client(key=gmaps_key)
geolocator = Nominatim()
class GeoUtil:
"""
Geographical Utils
"""
@staticmethod
def degree2radius(degree):
return degree * (math.pi/180)
@staticmethod
def get_harversion_distance(x1, y1, x2, y2, round_decimal_digits=5):
if x1 is None or y1 is None or x2 is None or y2 is None:
return None
assert isinstance(x1, numbers.Number) and -180 <= x1 and x1 <= 180
assert isinstance(y1, numbers.Number) and -90 <= y1 and y1 <= 90
assert isinstance(x2, numbers.Number) and -180 <= x2 and x2 <= 180
assert isinstance(y2, numbers.Number) and -90 <= y2 and y2 <= 90
R = 6371 # 지구의 반경(단위: km)
dLon = GeoUtil.degree2radius(x2-x1)
dLat = GeoUtil.degree2radius(y2-y1)
a = math.sin(dLat/2) * math.sin(dLat/2) \
+ (math.cos(GeoUtil.degree2radius(y1)) \
*math.cos(GeoUtil.degree2radius(y2)) \
*math.sin(dLon/2) * math.sin(dLon/2))
b = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
return round(R * b, round_decimal_digits)
def seeshortestway(x1,x2):
#loc1 = ox.geocode(x1)
#loc2 = ox.geocode(x2)
place1=gmaps.geocode(x1)
lat1=place1[0]['geometry']['location']['lat']
lng1=place1[0]['geometry']['location']['lng']
place2=gmaps.geocode(x2)
lat2=place2[0]['geometry']['location']['lat']
lng2=place2[0]['geometry']['location']['lng']
loc1=(lat1,lng1)
loc2=(lat2,lng2)
#KD트리를 이용하면 최단거리를 쉽고 효율적으로 찾아준다.
tree = KDTree(nodes[['y', 'x']], metric='euclidean')
loc1_idx = tree.query([loc1], k=1, return_distance=False)[0]
loc2_idx = tree.query([loc2], k=1, return_distance=False)[0]
closest_node_to_loc1 = nodes.iloc[loc1_idx].index.values[0]
closest_node_to_loc2 = nodes.iloc[loc2_idx].index.values[0]
route = nx.shortest_path(G, closest_node_to_loc1,closest_node_to_loc2, weight='length')
onlygodoroute = nx.shortest_path(G, closest_node_to_loc1,closest_node_to_loc2, weight='grade_abs')
impedanceroute = nx.shortest_path(G, closest_node_to_loc1,closest_node_to_loc2, weight='impedance')
#distance=nx.shortest_path_length(G, closest_node_to_loc1,closest_node_to_loc2)
graderoute = []
impedance = []
for i in range(len(onlygodoroute)):
lng = G.node[onlygodoroute[i]]['x']
lat = G.node[onlygodoroute[i]]['y']
b = [lat,lng]
graderoute.append(b)
for i in range(len(impedanceroute)):
lng = G.node[impedanceroute[i]]['x']
lat = G.node[impedanceroute[i]]['y']
b = [lat,lng]
impedance.append(b)
m = ox.plot_route_folium(G, route, route_color='navy',tiles='stamen toner')
antpath = plugins.AntPath(locations=graderoute,color='purple')
antpath.add_to(m)
antpath = plugins.AntPath(locations=impedance,color='red')
antpath.add_to(m)
#folium.PolyLine(graderoute, color="purple", weight=4).add_to(m)
#folium.PolyLine(impedance, color="red", weight=4).add_to(m)
kw = {
'prefix': 'fa',
'color': 'green',
'icon': 'arrow-up'
}
ka = {
'prefix': 'fa',
'color': 'blue',
'icon': 'arrow-up'
}
icon1 = folium.Icon(angle=45, **kw)
folium.Marker(location=loc1, icon=icon1,popup=x1, tooltip='출발').add_to(m)
icon2 = folium.Icon(angle=180, **ka)
folium.Marker(location=loc2, icon=icon2, popup=x2,tooltip='도착').add_to(m)
#lium.Marker(location=loc1,
# icon=folium.Icon(color='red'), popup=x1, tooltip='출발').add_to(m)
#folium.Marker(location=loc2,
#icon=folium.Icon(color='blue'),popup=x2, tooltip='도착').add_to(m)
dobo=4
add = []
for i in range(len(route)-1):
lng1 = G.node[route[i]]['x']
lat1 = G.node[route[i]]['y']
lng2 = G.node[route[i+1]]['x']
lat2 = G.node[route[i+1]]['y']
result =GeoUtil.get_harversion_distance(lng1,lat1,lng2,lat2)
add.append(result)
noroundkm = sum(add)
km = round(noroundkm,1)
noroundminute = (km/dobo)*60
minute = round(noroundminute,1)
print('거리는',km,'KM 이며, ','시간은', minute,'분 걸립니다.')
return m
m=seeshortestway('안국역 3호선', '북촌생활사박물관')
a = m.save("blog/templates/blog/map.html")
context = {'my_map': m}
return render(request, 'blog/map.html', context)
| [
"[email protected]"
] | |
64f4aecdc4ba0856009744c04d0a8cef73e58ae7 | 77db6591c5884204d6016bfa89b33691bac38813 | /load.py | b2a9c8a3983643329620f4d7f7cd949b5ccd27f0 | [] | no_license | jbukoski/iltf-signal-webmap-suite | 4fc0aafa977e911a1071872f7adbaf2e7d0da37c | b8374e9cfcc80501a8f632721a7cb9b76e668f6b | refs/heads/master | 2021-03-27T11:20:37.174667 | 2020-12-31T18:03:20 | 2020-12-31T18:03:20 | 79,853,039 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | import os
from django.contrib.gis.utils import LayerMapping
from . import models
*_shp = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', '', '*.shp'))
def run(verbose=True):
*_lm = LayerMapping(
models.*, *_shp, *_mapping,
transform=False, encoding='iso-8859-1'
)
*_lm.save(strict=True, verbose=verbose)
| [
"[email protected]"
] | |
7e9e19a672cc628c6de3bc8e5c6bc27e7e767af9 | e0ecbc6352c442370e90238ae7dd71f4fb78cfa9 | /visual_question_answer/model.py | 76ddc06e1dbeaa8d56d4f2b52510d2d2034316d4 | [] | no_license | xiaogang00/Deep-learning-on-Tensorflow | 22a13b4f9f4db5934b710fdd887d9e88d22ef72d | 7f9cd740bf330ae2fc7ae77636ded068e2aa405c | refs/heads/master | 2021-01-17T14:34:14.490989 | 2017-10-27T03:48:14 | 2017-10-27T03:48:14 | 84,092,564 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,652 | py | import math
import os
import tensorflow as tf
import numpy as np
from base_model import *
from utils.nn import *
from episodic_memory import *
class QuestionAnswerer(BaseModel):
def build(self):
""" Build the model. """
self.build_cnn()
self.build_rnn()
def build_cnn(self):
""" Build the CNN. """
print("Building the CNN part...")
if self.cnn_model=='vgg16':
self.build_vgg16()
elif self.cnn_model=='resnet50':
self.build_resnet50()
elif self.cnn_model=='resnet101':
self.build_resnet101()
else:
self.build_resnet152()
print("CNN part built.")
def build_vgg16(self):
""" Build the VGG16 net. """
bn = self.params.batch_norm
imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)
is_train = tf.placeholder(tf.bool)
conv1_1_feats = convolution(imgs, 3, 3, 64, 1, 1, 'conv1_1')
conv1_1_feats = batch_norm(conv1_1_feats, 'bn1_1', is_train, bn, 'relu')
conv1_2_feats = convolution(conv1_1_feats, 3, 3, 64, 1, 1, 'conv1_2')
conv1_2_feats = batch_norm(conv1_2_feats, 'bn1_2', is_train, bn, 'relu')
pool1_feats = max_pool(conv1_2_feats, 2, 2, 2, 2, 'pool1')
conv2_1_feats = convolution(pool1_feats, 3, 3, 128, 1, 1, 'conv2_1')
conv2_1_feats = batch_norm(conv2_1_feats, 'bn2_1', is_train, bn, 'relu')
conv2_2_feats = convolution(conv2_1_feats, 3, 3, 128, 1, 1, 'conv2_2')
conv2_2_feats = batch_norm(conv2_2_feats, 'bn2_2', is_train, bn, 'relu')
pool2_feats = max_pool(conv2_2_feats, 2, 2, 2, 2, 'pool2')
conv3_1_feats = convolution(pool2_feats, 3, 3, 256, 1, 1, 'conv3_1')
conv3_1_feats = batch_norm(conv3_1_feats, 'bn3_1', is_train, bn, 'relu')
conv3_2_feats = convolution(conv3_1_feats, 3, 3, 256, 1, 1, 'conv3_2')
conv3_2_feats = batch_norm(conv3_2_feats, 'bn3_2', is_train, bn, 'relu')
conv3_3_feats = convolution(conv3_2_feats, 3, 3, 256, 1, 1, 'conv3_3')
conv3_3_feats = batch_norm(conv3_3_feats, 'bn3_3', is_train, bn, 'relu')
pool3_feats = max_pool(conv3_3_feats, 2, 2, 2, 2, 'pool3')
conv4_1_feats = convolution(pool3_feats, 3, 3, 512, 1, 1, 'conv4_1')
conv4_1_feats = batch_norm(conv4_1_feats, 'bn4_1', is_train, bn, 'relu')
conv4_2_feats = convolution(conv4_1_feats, 3, 3, 512, 1, 1, 'conv4_2')
conv4_2_feats = batch_norm(conv4_2_feats, 'bn4_2', is_train, bn, 'relu')
conv4_3_feats = convolution(conv4_2_feats, 3, 3, 512, 1, 1, 'conv4_3')
conv4_3_feats = batch_norm(conv4_3_feats, 'bn4_3', is_train, bn, 'relu')
pool4_feats = max_pool(conv4_3_feats, 2, 2, 2, 2, 'pool4')
conv5_1_feats = convolution(pool4_feats, 3, 3, 512, 1, 1, 'conv5_1')
conv5_1_feats = batch_norm(conv5_1_feats, 'bn5_1', is_train, bn, 'relu')
conv5_2_feats = convolution(conv5_1_feats, 3, 3, 512, 1, 1, 'conv5_2')
conv5_2_feats = batch_norm(conv5_2_feats, 'bn5_2', is_train, bn, 'relu')
conv5_3_feats = convolution(conv5_2_feats, 3, 3, 512, 1, 1, 'conv5_3')
conv5_3_feats = batch_norm(conv5_3_feats, 'bn5_3', is_train, bn, 'relu')
self.permutation = self.get_permutation(14, 14)
conv5_3_feats.set_shape([self.batch_size, 14, 14, 512])
conv5_3_feats_flat = self.flatten_feats(conv5_3_feats, 512)
self.conv_feats = conv5_3_feats_flat
self.conv_feat_shape = [196, 512]
self.imgs = imgs
self.is_train = is_train
def basic_block(self, input_feats, name1, name2, is_train, bn, c, s=2):
""" A basic block of ResNets. """
branch1_feats = convolution_no_bias(input_feats, 1, 1, 4*c, s, s, name1+'_branch1')
branch1_feats = batch_norm(branch1_feats, name2+'_branch1', is_train, bn, None)
branch2a_feats = convolution_no_bias(input_feats, 1, 1, c, s, s, name1+'_branch2a')
branch2a_feats = batch_norm(branch2a_feats, name2+'_branch2a', is_train, bn, 'relu')
branch2b_feats = convolution_no_bias(branch2a_feats, 3, 3, c, 1, 1, name1+'_branch2b')
branch2b_feats = batch_norm(branch2b_feats, name2+'_branch2b', is_train, bn, 'relu')
branch2c_feats = convolution_no_bias(branch2b_feats, 1, 1, 4*c, 1, 1, name1+'_branch2c')
branch2c_feats = batch_norm(branch2c_feats, name2+'_branch2c', is_train, bn, None)
output_feats = branch1_feats + branch2c_feats
output_feats = nonlinear(output_feats, 'relu')
return output_feats
def basic_block2(self, input_feats, name1, name2, is_train, bn, c):
""" Another basic block of ResNets. """
branch2a_feats = convolution_no_bias(input_feats, 1, 1, c, 1, 1, name1+'_branch2a')
branch2a_feats = batch_norm(branch2a_feats, name2+'_branch2a', is_train, bn, 'relu')
branch2b_feats = convolution_no_bias(branch2a_feats, 3, 3, c, 1, 1, name1+'_branch2b')
branch2b_feats = batch_norm(branch2b_feats, name2+'_branch2b', is_train, bn, 'relu')
branch2c_feats = convolution_no_bias(branch2b_feats, 1, 1, 4*c, 1, 1, name1+'_branch2c')
branch2c_feats = batch_norm(branch2c_feats, name2+'_branch2c', is_train, bn, None)
output_feats = input_feats + branch2c_feats
output_feats = nonlinear(output_feats, 'relu')
return output_feats
def build_resnet50(self):
""" Build the ResNet50 net. """
bn = self.params.batch_norm
imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)
is_train = tf.placeholder(tf.bool)
conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')
conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, bn, 'relu')
pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')
res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, bn, 64, 1)
res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, bn, 64)
res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, bn, 64)
res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, bn, 128)
res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, bn, 128)
res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, bn, 128)
res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, bn, 128)
res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, bn, 256)
res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, bn, 256)
res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, bn, 256)
res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, bn, 256)
res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, bn, 256)
res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, bn, 256)
res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, bn, 512)
res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, bn, 512)
res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, bn, 512)
self.permutation = self.get_permutation(7, 7)
res5c_feats.set_shape([self.batch_size, 7, 7, 2048])
res5c_feats_flat = self.flatten_feats(res5c_feats, 2048)
self.conv_feats = res5c_feats_flat
self.conv_feat_shape = [49, 2048]
self.imgs = imgs
self.is_train = is_train
def build_resnet101(self):
""" Build the ResNet101 net. """
bn = self.params.batch_norm
imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)
is_train = tf.placeholder(tf.bool)
conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')
conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, bn, 'relu')
pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')
res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, bn, 64, 1)
res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, bn, 64)
res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, bn, 64)
res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, bn, 128)
temp = res3a_feats
for i in range(1, 4):
temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, bn, 128)
res3b3_feats = temp
res4a_feats = self.basic_block(res3b3_feats, 'res4a', 'bn4a', is_train, bn, 256)
temp = res4a_feats
for i in range(1, 23):
temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, bn, 256)
res4b22_feats = temp
res5a_feats = self.basic_block(res4b22_feats, 'res5a', 'bn5a', is_train, bn, 512)
res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, bn, 512)
res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, bn, 512)
self.permutation = self.get_permutation(7, 7)
res5c_feats.set_shape([self.batch_size, 7, 7, 2048])
res5c_feats_flat = self.flatten_feats(res5c_feats, 2048)
self.conv_feats = res5c_feats_flat
self.conv_feat_shape = [49, 2048]
self.imgs = imgs
self.is_train = is_train
def build_resnet152(self):
""" Build the ResNet152 net. """
bn = self.params.batch_norm
imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)
is_train = tf.placeholder(tf.bool)
conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')
conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, bn, 'relu')
pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')
res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, bn, 64, 1)
res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, bn, 64)
res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, bn, 64)
res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, bn, 128)
temp = res3a_feats
for i in range(1, 8):
temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, bn, 128)
res3b7_feats = temp
res4a_feats = self.basic_block(res3b7_feats, 'res4a', 'bn4a', is_train, bn, 256)
temp = res4a_feats
for i in range(1, 36):
temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, bn, 256)
res4b35_feats = temp
res5a_feats = self.basic_block(res4b35_feats, 'res5a', 'bn5a', is_train, bn, 512)
res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, bn, 512)
res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, bn, 512)
self.permutation = self.get_permutation(7, 7)
res5c_feats.set_shape([self.batch_size, 7, 7, 2048])
res5c_feats_flat = self.flatten_feats(res5c_feats, 2048)
self.conv_feats = res5c_feats_flat
self.conv_feat_shape = [49, 2048]
self.img_files = img_files
self.is_train = is_train
def get_permutation(self, height, width):
""" Get the permutation corresponding to a snake-like walk as decribed by the paper. Used to flatten the convolutional feats. """
permutation = np.zeros(height*width, np.int32)
for i in range(height):
for j in range(width):
permutation[i*width+j] = i*width+j if i%2==0 else (i+1)*width-j-1
return permutation
def flatten_feats(self, feats, channels):
""" Flatten the feats. """
temp1 = tf.reshape(feats, [self.batch_size, -1, channels])
temp1 = tf.transpose(temp1, [1, 0, 2])
temp2 = tf.gather(temp1, self.permutation)
temp2 = tf.transpose(temp2, [1, 0, 2])
return temp2
def build_rnn(self):
""" Build the RNN. """
print("Building the RNN part...")
params = self.params
bn = params.batch_norm
is_train = self.is_train
batch_size = self.batch_size
dim_hidden = params.dim_hidden
dim_embed = params.dim_embed
max_ques_len = params.max_ques_len
num_facts = self.conv_feat_shape[0]
dim_fact = self.conv_feat_shape[1]
num_words = self.word_table.num_words
self.word_weight = np.exp(-np.array(self.word_table.word_freq)*self.class_balancing_factor)
if not self.train_cnn:
facts = tf.placeholder(tf.float32, [batch_size, num_facts, dim_fact])
else:
facts = self.conv_feats
questions = tf.placeholder(tf.int32, [batch_size, max_ques_len])
question_lens = tf.placeholder(tf.int32, [batch_size])
answers = tf.placeholder(tf.int32, [batch_size])
answer_weights = tf.placeholder(tf.float32, [batch_size])
gru = tf.nn.rnn_cell.GRUCell(dim_hidden)
# Initialize the word embedding
idx2vec = np.array([self.word_table.word2vec[self.word_table.idx2word[i]] for i in range(num_words)])
if params.fix_embed_weight:
emb_w = tf.convert_to_tensor(idx2vec, tf.float32)
else:
emb_w = weight('emb_w', [num_words, dim_embed], init_val=idx2vec, group_id=1)
# Encode the questions
with tf.variable_scope('Question'):
word_list = tf.unpack(questions, axis=1)
ques_embed = [tf.nn.embedding_lookup(emb_w, word) for word in word_list]
ques_embed = tf.transpose(tf.pack(ques_embed), [1, 0, 2])
all_states, final_state = tf.nn.dynamic_rnn(gru, ques_embed, dtype=tf.float32)
question_enc = []
for k in range(batch_size):
current_ques_enc = tf.slice(all_states, [k, question_lens[k]-1, 0], [1, 1, dim_hidden])
question_enc.append(tf.squeeze(current_ques_enc))
question_enc = tf.pack(question_enc)
#ques_enc = final_state
# Encode the facts
with tf.name_scope('InputFusion'):
with tf.variable_scope('Forward'):
forward_states, _ = tf.nn.dynamic_rnn(gru, facts, dtype=tf.float32)
with tf.variable_scope('Backward'):
reversed_facts = tf.reverse(facts, [False, True, False])
backward_states, _ = tf.nn.dynamic_rnn(gru, reversed_facts, dtype=tf.float32)
backward_states = tf.reverse(backward_states, [False, True, False])
facts_enc = forward_states + backward_states
# Episodic Memory Update
with tf.variable_scope('EpisodicMemory'):
episode = EpisodicMemory(dim_hidden, num_facts, question_enc, facts_enc, params.attention, is_train, bn)
memory = tf.identity(question_enc)
# Tied memory weights
if params.tie_memory_weight:
with tf.variable_scope('Layer') as scope:
for t in range(params.memory_step):
if params.memory_update == 'gru':
memory = gru(episode.new_fact(memory), memory)[0]
else:
fact = episode.new_fact(memory)
expanded_memory = tf.concat(1, [memory, fact, question_enc])
memory = fully_connected(expanded_memory, dim_hidden, 'EM_fc', group_id=1)
memory = batch_norm(memory, 'EM_bn', is_train, bn, 'relu')
scope.reuse_variables()
# Untied memory weights
else:
for t in range(params.memory_step):
with tf.variable_scope('Layer%d' %t) as scope:
if params.memory_update == 'gru':
memory = gru(episode.new_fact(memory), memory)[0]
else:
fact = episode.new_fact(memory)
expanded_memory = tf.concat(1, [memory, fact, question_enc])
memory = fully_connected(expanded_memory, dim_hidden, 'EM_fc', group_id=1)
memory = batch_norm(memory, 'EM_bn', is_train, bn, 'relu')
memory = dropout(memory, 0.5, is_train)
# Compute the result
with tf.variable_scope('Result'):
expanded_memory = tf.concat(1, [memory, question_enc])
logits = fully_connected(expanded_memory, num_words, 'dec', group_id=1)
results = tf.argmax(logits, 1)
all_probs = tf.nn.softmax(logits)
probs = tf.reduce_max(all_probs, 1)
# Compute the loss
with tf.variable_scope('Loss'):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, answers)
loss0 = cross_entropy * answer_weights
loss0 = tf.reduce_sum(loss0) / tf.reduce_sum(answer_weights)
if self.train_cnn:
loss1 = params.weight_decay * (tf.add_n(tf.get_collection('l2_0')) + tf.add_n(tf.get_collection('l2_1')))
else:
loss1 = params.weight_decay * tf.add_n(tf.get_collection('l2_1'))
loss = loss0 + loss1
# Build the solver
if params.solver == 'adam':
solver = tf.train.AdamOptimizer(params.learning_rate)
elif params.solver == 'momentum':
solver = tf.train.MomentumOptimizer(params.learning_rate, params.momentum)
elif params.solver == 'rmsprop':
solver = tf.train.RMSPropOptimizer(params.learning_rate, params.decay, params.momentum)
else:
solver = tf.train.GradientDescentOptimizer(params.learning_rate)
tvars = tf.trainable_variables()
gs, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), 3.0)
opt_op = solver.apply_gradients(zip(gs, tvars), global_step=self.global_step)
self.facts = facts
self.questions = questions
self.question_lens = question_lens
self.answers = answers
self.answer_weights = answer_weights
self.loss = loss
self.loss0 = loss0
self.loss1 = loss1
self.opt_op = opt_op
self.results = results
self.probs = probs
print("RNN part built.")
def get_feed_dict(self, batch, is_train, feats=None):
""" Get the feed dictionary for the current batch. """
if is_train:
# training phase
img_files, questions, question_lens, answers = batch
imgs = self.img_loader.load_imgs(img_files)
answer_weights = self.word_weight[answers]
if self.train_cnn:
return {self.imgs: imgs, self.questions: questions, self.question_lens: question_lens, self.answers: answers, self.answer_weights: answer_weights, self.is_train: is_train}
else:
return {self.facts: feats, self.questions: questions, self.question_lens: question_lens, self.answers: answers, self.answer_weights: answer_weights, self.is_train: is_train}
else:
# testing or validation phase
img_files, questions, question_lens = batch
imgs = self.img_loader.load_imgs(img_files)
if self.train_cnn:
return {self.imgs: imgs, self.questions: questions, self.question_lens: question_lens, self.is_train: is_train}
else:
return {self.facts: feats, self.questions: questions, self.question_lens: question_lens, self.is_train: is_train}
| [
"[email protected]"
] | |
29e16f0faaa4866bc0815c2235ece255f754032e | d5eb2fe5d49b581562ae2bc660d08ca80a03d331 | /PythonSandbox/src/misc/num_digits_in_integer.py | 6757434647672cd9e95d213417d05eed2cbab5ac | [] | no_license | mcxu/code-sandbox | fd5aa2e593057901d281a0e74db8957777b06cf3 | a785231582bda8578f79982e2dcddd2f2ab559b4 | refs/heads/master | 2023-07-10T02:07:24.180947 | 2023-07-08T03:31:48 | 2023-07-08T03:31:48 | 130,493,607 | 4 | 2 | null | 2023-01-15T22:53:29 | 2018-04-21T16:49:40 | Python | UTF-8 | Python | false | false | 130 | py | '''
Given an integer n, return a map showing the counts of each single digit in n.
Condition: Do not convert n into string.
'''
| [
"[email protected]"
] | |
5ea5fe910e1ef86b506005a39e879e50f77d83f4 | d532b85841b459c61d88d380e88dd08d29836d43 | /solutions/473_matchsticks_to_square.py | 99245aad8635cdcb9f58acde68ea9d0399c61f3b | [
"MIT"
] | permissive | YiqunPeng/leetcode_pro | ad942468df5506de9dc48a4019933f658e2a3121 | 4a508a982b125a3a90ea893ae70863df7c99cc70 | refs/heads/master | 2022-05-15T09:32:02.699180 | 2022-05-14T16:32:17 | 2022-05-14T16:32:17 | 182,453,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | class Solution:
def makesquare(self, matchsticks: List[int]) -> bool:
l = sum(matchsticks)
if l % 4 != 0:
return False
size = l // 4
matchsticks.sort()
return self._dfs(matchsticks, len(matchsticks)-1, [size] * 4)
def _dfs(self, ms, pos, sizes):
if pos == -1:
return sum(sizes) == 0
for i in range(4):
if sizes[i] < ms[pos]:
continue
sizes[i] -= ms[pos]
if self._dfs(ms, pos - 1, sizes):
return True
sizes[i] += ms[pos]
return False
| [
"[email protected]"
] | |
85f970aac1289aa71773cf2f9f5fee61ae7a289f | a939e018333a9ecd26ddc618f99835b7eb381686 | /mapred_parser/user_merger/.svn/text-base/reducer.py.svn-base | c71b89a519663ca1e57f7a5f17e75be85bb0ab96 | [] | no_license | cash2one/crawl_youtube | bff5ba254001c2f31f770e55a4aca39bc54e45ee | 0dc40186a1d89da2b00f29d4f4edfdc5470eb4fc | refs/heads/master | 2021-01-16T22:30:17.800282 | 2016-02-18T11:50:09 | 2016-02-18T11:50:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,709 | #!/usr/bin/python
# coding=utf-8
import os
import sys
import time
import base64
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../')
from le_crawler.proto.video.ttypes import OriginalUser
from le_crawler.common.utils import str2mediavideo, thrift2str
user_merge_field = set(['user_name', 'url', 'portrait_url', 'play_num', 'fans_num'])
class MergeItem:
def __init__(self):
self.reset('')
def reset(self, user_url=None):
self._data = []
self._user_url = user_url
self._url = None # used only in which length of self._data is 1
self._user = None
def get_user_url(self):
return self._user_url
def add_item(self, user_url, out_type, url, data_base64):
is_out_video = out_type == 'video'
self._data.append((data_base64, is_out_video))
self._url = url
def _merge_user(self, datas):
new_user = OriginalUser()
for k, v in new_user.__dict__.iteritems():
if k not in user_merge_field or v:
continue
for data in datas:
old_v = getattr(data[0].user, k)
if old_v:
setattr(new_user, k, old_v)
if k != 'url':
for item in user_merge_field:
old_v = getattr(data[0].user, item)
if not getattr(new_user, item) and old_v:
setattr(new_user, item, old_v)
break
new_user.update_time = int(time.time())
self._user = new_user
def _print_video(self, datas):
for data in datas:
data[0].user = self._user
video_str = thrift2str(data[0])
if not video_str:
sys.stderr.write('ERROR: failed in thrift2str. %s\n' % data[0].url)
continue
video_base64 = base64.b64encode(video_str)
if not video_base64:
sys.stderr.write('ERROR: failed in base64 encode. %s\n' % data[0].url)
continue
print 'unique' + '\t' + data[0].url + '\t' + str(self._user_url) + '\t' + video_base64
if data[1]:
print 'video' + '\t' + data[0].url + '\t' + str(self._user_url) + '\t' + video_base64
def print_item(self):
if not self._data:
return
if len(self._data) == 1:
print 'unique' + '\t' + self._url + '\t' + str(self._user_url) + '\t' + self._data[0][0]
if self._data[0][1]:
print 'video' + '\t' + self._url + '\t' + str(self._user_url) + '\t' + self._data[0][0]
return
for idx, data_group in enumerate(self._data):
try:
data = str2mediavideo(base64.b64decode(data_group[0]))
except:
sys.stderr.write('ERROR: failed in base64 decode. %s\n' % self._user_url)
self._data[idx] = (data, data_group[1])
self._data = [item for item in self._data if item[0]]
self._data.sort(cmp=lambda x, y: (y[0].user.update_time or 0) - (x[0].user.update_time or 0))
self._merge_user(self._data)
self._print_video(self._data)
def main():
merge_item = MergeItem()
while 1:
line = sys.stdin.readline()
if not line:
break
line_data = line.strip().split('\t', 3)
if len(line_data) != 4:
sys.stderr.write(str(len(line_data)) + ' ' + str(line_data) + '\n')
continue
user_url, url, out_type, data_base64 = line_data
if user_url == 'None':
print 'unique' + '\t' + url + '\t' + user_url + '\t' + data_base64
if out_type == 'video':
print 'video' + '\t' + url + '\t' + user_url + '\t' + data_base64
continue
if user_url == merge_item.get_user_url():
merge_item.add_item(user_url, out_type, url, data_base64)
else:
merge_item.print_item()
merge_item.reset(user_url)
merge_item.add_item(user_url, out_type, url, data_base64)
merge_item.print_item()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | ||
3f2cee0071989d5dddcf5e06d71d0c53ccf74a79 | 19ddab74600f71700a6b693281d0180d5271f295 | /leetcode/119_杨辉三角2.py | 5ca9f3c4a447aa5bf40bb8293558abdaa26cfa73 | [] | no_license | zhulf0804/Coding.Python | 4d55a430da1a8077c81feba65c13ac654aaf094a | 46ab03e23d15ebd5434ef4dd5ae99130000b00a5 | refs/heads/master | 2022-09-14T18:40:59.880941 | 2022-08-20T08:25:51 | 2022-08-20T08:25:51 | 213,113,482 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | from typing import List
class Solution:
def getRow(self, rowIndex: int) -> List[int]:
if rowIndex == 0:
return [1]
if rowIndex == 1:
return [1, 1]
pre = [1, 1]
for i in range(2, rowIndex+1):
cur = [1] * (i + 1)
for j in range(1, i):
cur[j] = pre[j] + pre[j - 1]
pre = cur
return cur
rowIndex = 3
s = Solution()
print(s.getRow(3)) | [
"[email protected]"
] | |
48c1e248c0c54b9df4c45b1abc82c3c75f4870a9 | ae65873c3584cef7139066b224daad04410af6d2 | /MySQL.py | a042fc4c7860d159e362459d73edbfefca29ad33 | [] | no_license | rajatkashyap/Python | 2240c7472d07803c460c7a55d570e20694b694f9 | f74c85c65b0e209a5f7ab25b653d42835222faaf | refs/heads/master | 2022-06-25T19:20:52.847498 | 2022-06-08T14:40:45 | 2022-06-08T14:40:45 | 145,714,257 | 0 | 0 | null | 2022-04-25T00:18:37 | 2018-08-22T13:39:14 | Python | UTF-8 | Python | false | false | 324 | py | from mysql.connector import (connection)
cnx = connection.MySQLConnection(user='root', password='rajat',host='127.0.0.1',database='db')
cursor = cnx.cursor()
query=("select * from jobs")
cursor.execute(query)
for (city_id,city_name,country_id,x) in cursor:
print city_id,city_name,country_id
cnx.close()
| [
"[email protected]"
] | |
b2d051a597b6cfb783aa64205d665a477b8ac166 | ee561aa019a80f621007f82bdb21fe6ed8b6278f | /devel/turtlebot3-melodic-devel/turtlebot3_example/cmake/turtlebot3_example-genmsg-context.py | 09017c533fb2e6caa64d1dea49cebe24fae06a11 | [] | no_license | allanwhledu/agv_edu_prj | 4fb5fbf14cf0a14edd57ee9bd87903dc25d4d4f2 | 643a8a96ca7027529332f25208350de78c07e33d | refs/heads/master | 2020-09-23T23:32:54.430035 | 2019-12-04T07:47:55 | 2019-12-04T07:47:55 | 225,613,426 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3Action.msg;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3ActionGoal.msg;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3ActionResult.msg;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3ActionFeedback.msg;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3Goal.msg;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3Result.msg;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg/Turtlebot3Feedback.msg"
services_str = ""
pkg_name = "turtlebot3_example"
dependencies_str = "std_msgs;geometry_msgs;actionlib_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "turtlebot3_example;/home/sjtuwhl/ROBOTLAB_WS/devel/share/turtlebot3_example/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg;actionlib_msgs;/opt/ros/melodic/share/actionlib_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = 'TRUE' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"[email protected]"
] | |
040a421fd08c22b88661d55b4267176f9cdbbae8 | 161d43b73438c4423949f4d4898e44f015691a0e | /GaussianProcess/__init__.py | 41d152d0ef74cf3acec2895b134b155ca3abafde | [] | no_license | apetri/CFHTLens_analysis | a44f754114a6a6129088f0771cc558baed987462 | b19343b43b54870f7950bcd9ea76bbe829448c44 | refs/heads/master | 2020-05-21T22:06:24.551906 | 2017-12-14T16:17:08 | 2017-12-14T16:17:08 | 16,521,933 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | from .interpolation import *
__version__="0.1" | [
"[email protected]"
] | |
9427dd2eb8619763631b53850f3d848d5866e9e7 | eacff46eda2c6b509449979a16002b96d4645d8e | /Collections-a-installer/community-general-2.4.0/plugins/modules/monitoring/airbrake_deployment.py | 3e7938bfba10ac8e1d2080f7ed8ae71ed9589628 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | d-amien-b/simple-getwordpress | 5e6d4d15d5f87124ab591e46b63fec552998fdc3 | da90d515a0aa837b633d50db4d91d22b031c04a2 | refs/heads/master | 2023-04-08T22:13:37.347545 | 2021-04-06T09:25:51 | 2021-04-06T09:25:51 | 351,698,069 | 0 | 0 | MIT | 2021-03-31T16:16:45 | 2021-03-26T07:30:00 | HTML | UTF-8 | Python | false | false | 6,696 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Bruce Pennypacker <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: airbrake_deployment
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Patrick Humpal (@phumpal)"
short_description: Notify airbrake about app deployments
description:
- Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)).
- Parameter I(token) has been deprecated for community.general 0.2.0. Please remove entry.
options:
project_id:
description:
- Airbrake PROJECT_ID
required: false
type: str
version_added: '0.2.0'
project_key:
description:
- Airbrake PROJECT_KEY.
required: false
type: str
version_added: '0.2.0'
environment:
description:
- The airbrake environment name, typically 'production', 'staging', etc.
required: true
type: str
user:
description:
- The username of the person doing the deployment
required: false
type: str
repo:
description:
- URL of the project repository
required: false
type: str
revision:
description:
- A hash, number, tag, or other identifier showing what revision from version control was deployed
required: false
type: str
version:
description:
- A string identifying what version was deployed
required: false
type: str
version_added: '1.0.0'
url:
description:
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
required: false
default: "https://api.airbrake.io/api/v4/projects/"
type: str
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
type: bool
token:
description:
- This parameter (API token) has been deprecated in community.general 0.2.0. Please remove it from your tasks.
required: false
type: str
requirements: []
'''
EXAMPLES = '''
- name: Notify airbrake about an app deployment
community.general.airbrake_deployment:
project_id: '12345'
project_key: 'AAAAAA'
environment: staging
user: ansible
revision: '4.2'
- name: Notify airbrake about an app deployment, using git hash as revision
community.general.airbrake_deployment:
project_id: '12345'
project_key: 'AAAAAA'
environment: staging
user: ansible
revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15'
version: '0.2.0'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six.moves.urllib.parse import urlencode
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=False, no_log=True, type='str'),
project_id=dict(required=False, no_log=True, type='str'),
project_key=dict(required=False, no_log=True, type='str'),
environment=dict(required=True, type='str'),
user=dict(required=False, type='str'),
repo=dict(required=False, type='str'),
revision=dict(required=False, type='str'),
version=dict(required=False, type='str'),
url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'),
validate_certs=dict(default=True, type='bool'),
),
supports_check_mode=True,
required_together=[('project_id', 'project_key')],
mutually_exclusive=[('project_id', 'token')],
)
# Build list of params
params = {}
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
if module.params["token"]:
# v2 API documented at https://airbrake.io/docs/legacy-xml-api/#tracking-deploys
if module.params["environment"]:
params["deploy[rails_env]"] = module.params["environment"]
if module.params["user"]:
params["deploy[local_username]"] = module.params["user"]
if module.params["repo"]:
params["deploy[scm_repository]"] = module.params["repo"]
if module.params["revision"]:
params["deploy[scm_revision]"] = module.params["revision"]
# version not supported in v2 API; omit
module.deprecate("Parameter 'token' is deprecated since community.general 0.2.0. Please remove "
"it and use 'project_id' and 'project_key' instead",
version='3.0.0', collection_name='community.general') # was Ansible 2.14
params["api_key"] = module.params["token"]
# Allow sending to Airbrake compliant v2 APIs
if module.params["url"] == 'https://api.airbrake.io/api/v4/projects/':
url = 'https://api.airbrake.io/deploys.txt'
else:
url = module.params["url"]
# Send the data to airbrake
data = urlencode(params)
response, info = fetch_url(module, url, data=data)
if module.params["project_id"] and module.params["project_key"]:
# v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4
if module.params["environment"]:
params["environment"] = module.params["environment"]
if module.params["user"]:
params["username"] = module.params["user"]
if module.params["repo"]:
params["repository"] = module.params["repo"]
if module.params["revision"]:
params["revision"] = module.params["revision"]
if module.params["version"]:
params["version"] = module.params["version"]
# Build deploy url
url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"]
json_body = module.jsonify(params)
# Build header
headers = {'Content-Type': 'application/json'}
# Notify Airbrake of deploy
response, info = fetch_url(module, url, data=json_body,
headers=headers, method='POST')
if info['status'] == 200 or info['status'] == 201:
module.exit_json(changed=True)
else:
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e23d8667131d425957a2d499281cc4a48a30ab3c | 3ea7233cca492f36130e6e2da253409e90c97526 | /netneurotools/freesurfer.py | 0b73400b3235e911137fbdea37eb01f861ed03fb | [
"BSD-3-Clause"
] | permissive | giuliabaracc/netneurotools | b26aa43ec9a34bb2ce3da43a734c955edd375327 | 8532cc136261b6b70e40a63070a968a9b2519c3a | refs/heads/master | 2020-12-20T10:14:57.141250 | 2020-01-24T16:21:54 | 2020-01-24T16:21:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,795 | py | # -*- coding: utf-8 -*-
"""
Functions for working with FreeSurfer data and parcellations
"""
import os
import os.path as op
import warnings
from nibabel.freesurfer import read_annot, read_geometry
import numpy as np
from scipy.ndimage.measurements import _stats, labeled_comprehension
from scipy.spatial.distance import cdist
from .datasets import fetch_fsaverage
from .stats import gen_spinsamples
from .utils import check_fs_subjid, run
def apply_prob_atlas(subject_id, gcs, hemi, *, orig='white', annot=None,
ctab=None, subjects_dir=None, use_cache=True,
quiet=False):
"""
Creates an annotation file for `subject_id` by applying atlas in `gcs`
Runs subprocess calling FreeSurfer's "mris_ca_label" function; as such,
FreeSurfer must be installed and accesible on the local system path.
Parameters
----------
subject_id : str
FreeSurfer subject ID
gcs : str
Filepath to .gcs file containing classifier array
hemi : {'lh', 'rh'}
Hemisphere corresponding to `gcs` file
orig : str, optional
Original surface to which to apply classifer. Default: 'white'
annot : str, optional
Path to output annotation file to generate. If set to None, the name is
created from the provided `hemi` and `gcs`. If provided as a
relative path, it is assumed to stem from `subjects_dir`/`subject_id`.
Default: None
ctab : str, optional
Path to colortable corresponding to `gcs`. Default: None
subjects_dir : str, optional
Path to FreeSurfer subject directory. If not set, will inherit from
the environmental variable $SUBJECTS_DIR. Default: None
use_cache : bool, optional
Whether to check for existence of `annot` in directory specified by
`{subjects_dir}/{subject_id}/label' and use that, if it exists. If
False, will create a new annot file. Default: True
quiet : bool, optional
Whether to restrict status messages. Default: False
Returns
-------
annot : str
Path to generated annotation file
"""
cmd = 'mris_ca_label {opts}{subject_id} {hemi} {hemi}.sphere.reg ' \
'{gcs} {annot}'
if hemi not in ['rh', 'lh']:
raise ValueError('Provided hemisphere designation `hemi` must be one '
'of \'rh\' or \'lh\'. Provided: {}'.format(hemi))
if not op.isfile(gcs):
raise ValueError('Cannot find specified `gcs` file {}.'.format(gcs))
subject_id, subjects_dir = check_fs_subjid(subject_id, subjects_dir)
# add all the options together, as specified
opts = ''
if ctab is not None and op.isfile(ctab):
opts += '-t {} '.format(ctab)
if orig is not None:
opts += '-orig {} '.format(orig)
if subjects_dir is not None:
opts += '-sdir {} '.format(subjects_dir)
else:
subjects_dir = os.environ['SUBJECTS_DIR']
# generate output filename
if annot is None:
base = '{}.{}.annot'.format(hemi, gcs[:-4])
annot = op.join(subjects_dir, subject_id, 'label', base)
else:
# if not a full path, assume relative from subjects_dir/subject_id
if not annot.startswith(op.abspath(os.sep)):
annot = op.join(subjects_dir, subject_id, annot)
# if annotation file doesn't exist or we explicitly want to make a new one
if not op.isfile(annot) or not use_cache:
run(cmd.format(opts=opts, subject_id=subject_id, hemi=hemi,
gcs=gcs, annot=annot),
quiet=quiet)
return annot
def _decode_list(vals):
""" List decoder
"""
return [l.decode() if hasattr(l, 'decode') else l for l in vals]
def find_parcel_centroids(*, lhannot, rhannot, version='fsaverage',
surf='sphere', drop=None):
"""
Returns vertex coords corresponding to centroids of parcels in annotations
Note that using any other `surf` besides the default of 'sphere' may result
in centroids that are not directly within the parcels themselves due to
sulcal folding patterns.
Parameters
----------
{lh,rh}annot : str
Path to .annot file containing labels of parcels on the {left,right}
hemisphere. These must be specified as keyword arguments to avoid
accidental order switching.
version : str, optional
Specifies which version of `fsaverage` provided annotation files
correspond to. Must be one of {'fsaverage', 'fsaverage3', 'fsaverage4',
'fsaverage5', 'fsaverage6'}. Default: 'fsaverage'
surf : str, optional
Specifies which surface projection of fsaverage to use for finding
parcel centroids. Default: 'sphere'
drop : list, optional
Specifies regions in {lh,rh}annot for which the parcel centroid should
not be calculated. If not specified, centroids for 'unknown' and
'corpuscallosum' are not calculated. Default: None
Returns
-------
centroids : (N, 3) numpy.ndarray
xyz coordinates of vertices closest to the centroid of each parcel
defined in `lhannot` and `rhannot`
hemiid : (N,) numpy.ndarray
Array denoting hemisphere designation of coordinates in `centroids`,
where `hemiid=0` denotes the left and `hemiid=1` the right hemisphere
"""
if drop is None:
drop = [
'unknown', 'corpuscallosum', # default FreeSurfer
'Background+FreeSurfer_Defined_Medial_Wall' # common alternative
]
drop = _decode_list(drop)
surfaces = fetch_fsaverage(version)[surf]
centroids, hemiid = [], []
for n, (annot, surf) in enumerate(zip([lhannot, rhannot], surfaces)):
vertices, faces = read_geometry(surf)
labels, ctab, names = read_annot(annot)
names = _decode_list(names)
for lab in np.unique(labels):
if names[lab] in drop:
continue
coords = np.atleast_2d(vertices[labels == lab].mean(axis=0))
roi = vertices[np.argmin(cdist(vertices, coords), axis=0)[0]]
centroids.append(roi)
hemiid.append(n)
return np.row_stack(centroids), np.asarray(hemiid)
def parcels_to_vertices(data, *, lhannot, rhannot, drop=None):
"""
Projects parcellated `data` to vertices defined in annotation files
Assigns np.nan to all ROIs in `drop`
Parameters
----------
data : (N,) numpy.ndarray
Parcellated data to be projected to vertices. Parcels should be ordered
by [left, right] hemisphere; ordering within hemisphere should
correspond to the provided annotation files.
{lh,rh}annot : str
Path to .annot file containing labels of parcels on the {left,right}
hemisphere. These must be specified as keyword arguments to avoid
accidental order switching.
drop : list, optional
Specifies regions in {lh,rh}annot that are not present in `data`. NaNs
will be inserted in place of the these regions in the returned data. If
not specified, 'unknown' and 'corpuscallosum' are assumed to not be
present. Default: None
Reurns
------
projected : numpy.ndarray
Vertex-level data
"""
if drop is None:
drop = [
'unknown', 'corpuscallosum', # default FreeSurfer
'Background+FreeSurfer_Defined_Medial_Wall' # common alternative
]
drop = _decode_list(drop)
data = np.vstack(data)
# check this so we're not unduly surprised by anything...
n_vert = expected = 0
for a in [lhannot, rhannot]:
vn, _, names = read_annot(a)
n_vert += len(vn)
names = _decode_list(names)
expected += len(names) - len(set(drop) & set(names))
if expected != len(data):
raise ValueError('Number of parcels in provided annotation files '
'differs from size of parcellated data array.\n'
' EXPECTED: {} parcels\n'
' RECEIVED: {} parcels'
.format(expected, len(data)))
projected = np.zeros((n_vert, data.shape[-1]), dtype=data.dtype)
start = end = n_vert = 0
for annot in [lhannot, rhannot]:
# read files and update end index for `data`
labels, ctab, names = read_annot(annot)
names = _decode_list(names)
todrop = set(names) & set(drop)
end += len(names) - len(todrop) # unknown and corpuscallosum
# get indices of unknown and corpuscallosum and insert NaN values
inds = sorted([names.index(f) for f in todrop])
inds = [f - n for n, f in enumerate(inds)]
currdata = np.insert(data[start:end], inds, np.nan, axis=0)
# project to vertices and store
projected[n_vert:n_vert + len(labels), :] = currdata[labels]
start = end
n_vert += len(labels)
return np.squeeze(projected)
def vertices_to_parcels(data, *, lhannot, rhannot, drop=None):
"""
Reduces vertex-level `data` to parcels defined in annotation files
Takes average of vertices within each parcel, excluding np.nan values
(i.e., np.nanmean). Assigns np.nan to parcels for which all vertices are
np.nan.
Parameters
----------
data : (N,) numpy.ndarray
Vertex-level data to be reduced to parcels
{lh,rh}annot : str
Path to .annot file containing labels to parcels on the {left,right}
hemisphere
drop : list, optional
Specifies regions in {lh,rh}annot that should be removed from the
parcellated version of `data`. If not specified, 'unknown' and
'corpuscallosum' will be removed. Default: None
Reurns
------
reduced : numpy.ndarray
Parcellated `data`, without regions specified in `drop`
"""
if drop is None:
drop = [
'unknown', 'corpuscallosum', # default FreeSurfer
'Background+FreeSurfer_Defined_Medial_Wall' # common alternative
]
drop = _decode_list(drop)
data = np.vstack(data)
n_parc = expected = 0
for a in [lhannot, rhannot]:
vn, _, names = read_annot(a)
expected += len(vn)
names = _decode_list(names)
n_parc += len(names) - len(set(drop) & set(names))
if expected != len(data):
raise ValueError('Number of vertices in provided annotation files '
'differs from size of vertex-level data array.\n'
' EXPECTED: {} vertices\n'
' RECEIVED: {} vertices'
.format(expected, len(data)))
reduced = np.zeros((n_parc, data.shape[-1]), dtype=data.dtype)
start = end = n_parc = 0
for annot in [lhannot, rhannot]:
# read files and update end index for `data`
labels, ctab, names = read_annot(annot)
names = _decode_list(names)
indices = np.unique(labels)
end += len(labels)
for idx in range(data.shape[-1]):
# get average of vertex-level data within parcels
# set all NaN values to 0 before calling `_stats` because we are
# returning sums, so the 0 values won't impact the sums (if we left
# the NaNs then all parcels with even one NaN entry would be NaN)
currdata = np.squeeze(data[start:end, idx])
isna = np.isnan(currdata)
counts, sums = _stats(np.nan_to_num(currdata), labels, indices)
# however, we do need to account for the NaN values in the counts
# so that our means are similar to what we'd get from e.g.,
# np.nanmean here, our "sums" are the counts of NaN values in our
# parcels
_, nacounts = _stats(isna, labels, indices)
counts = (np.asanyarray(counts, dtype=float)
- np.asanyarray(nacounts, dtype=float))
with np.errstate(divide='ignore', invalid='ignore'):
currdata = sums / counts
# get indices of unkown and corpuscallosum and delete from parcels
inds = sorted([names.index(f) for f in set(drop) & set(names)])
currdata = np.delete(currdata, inds)
# store parcellated data
reduced[n_parc:n_parc + len(names) - len(inds), idx] = currdata
start = end
n_parc += len(names) - len(inds)
return np.squeeze(reduced)
def _get_fsaverage_coords(version='fsaverage', surface='sphere'):
"""
Gets vertex coordinates for specified `surface` of fsaverage `version`
Parameters
----------
version : str, optional
One of {'fsaverage', 'fsaverage3', 'fsaverage4', 'fsaverage5',
'fsaverage6'}. Default: 'fsaverage'
surface : str, optional
Surface for which to return vertex coordinates. Default: 'sphere'
Returns
-------
coords : (N, 3) numpy.ndarray
xyz coordinates of vertices for {left,right} hemisphere
hemiid : (N,) numpy.ndarray
Array denoting hemisphere designation of entries in `coords`, where
`hemiid=0` denotes the left and `hemiid=1` the right hemisphere
"""
# get coordinates and hemisphere designation for spin generation
lhsphere, rhsphere = fetch_fsaverage(version)[surface]
coords, hemi = [], []
for n, sphere in enumerate([lhsphere, rhsphere]):
coords.append(read_geometry(sphere)[0])
hemi.append(np.ones(len(coords[-1])) * n)
return np.row_stack(coords), np.hstack(hemi)
def spin_data(data, *, lhannot, rhannot, version='fsaverage', n_rotate=1000,
spins=None, drop=None, seed=None, verbose=False,
return_cost=False):
"""
Projects parcellated `data` to surface, rotates, and re-parcellates
Projection to the surface uses `{lh,rh}annot` files. Rotation uses vertex
coordinates from the specified fsaverage `version` and relies on
:func:`netneurotools.stats.gen_spinsamples`. Re-parcellated data will not
be exactly identical to original values due to re-averaging process.
Parcels subsumed by regions in `drop` will be listed as NaN.
Parameters
----------
data : (N,) numpy.ndarray
Parcellated data to be rotated. Parcels should be ordered by [left,
right] hemisphere; ordering within hemisphere should correspond to the
provided `{lh,rh}annot` annotation files.
{lh,rh}annot : str
Path to .annot file containing labels to parcels on the {left,right}
hemisphere
version : str, optional
Specifies which version of `fsaverage` provided annotation files
correspond to. Must be one of {'fsaverage', 'fsaverage3', 'fsaverage4',
'fsaverage5', 'fsaverage6'}. Default: 'fsaverage'
n_rotate : int, optional
Number of rotations to generate. Default: 1000
spins : array_like, optional
Pre-computed spins to use instead of generating them on the fly. If not
provided will use other provided parameters to create them. Default:
None
drop : list, optional
Specifies regions in {lh,rh}annot that are not present in `data`. NaNs
will be inserted in place of the these regions in the returned data. If
not specified, 'unknown' and 'corpuscallosum' are assumed to not be
present. Default: None
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation. Default: None
verbose : bool, optional
Whether to print occasional status messages. Default: False
return_cost : bool, optional
Whether to return cost array (specified as Euclidean distance) for each
coordinate for each rotation. Currently this option is not supported if
pre-computed `spins` are provided. Default: True
Returns
-------
rotated : (N, `n_rotate`) numpy.ndarray
Rotated `data
cost : (N, `n_rotate`,) numpy.ndarray
Cost (specified as Euclidean distance) of re-assigning each coordinate
for every rotation in `spinsamples`. Only provided if `return_cost` is
True.
"""
if drop is None:
drop = [
'unknown', 'corpuscallosum', # default FreeSurfer
'Background+FreeSurfer_Defined_Medial_Wall' # common alternative
]
# get coordinates and hemisphere designation for spin generation
vertices = parcels_to_vertices(data, lhannot=lhannot, rhannot=rhannot,
drop=drop)
if spins is None:
coords, hemiid = _get_fsaverage_coords(version, 'sphere')
if len(vertices) != len(coords):
raise ValueError('Provided annotation files have a different '
'number of vertices than the specified fsaverage '
'surface.\n ANNOTATION: {} vertices\n '
'FSAVERAGE: {} vertices'
.format(len(vertices), len(coords)))
spins, cost = gen_spinsamples(coords, hemiid, n_rotate=n_rotate,
seed=seed, verbose=verbose)
else:
spins = np.asarray(spins, dtype='int32')
if len(spins) != len(vertices):
raise ValueError('Provided `spins` array has a different number '
'of vertices than the provided annotation files.'
'\n ANNOTATION: {} vertices\n SPINS: '
'{} vertices\n'
.format(len(vertices), len(spins)))
if spins.shape[-1] != n_rotate:
warnings.warn('Shape of provided `spins` array does not match '
'number of rotations requested with `n_rotate`. '
'Ignoring specified `n_rotate` parameter and using '
'all provided `spins`.')
n_rotate = spins.shape[-1]
if return_cost:
raise ValueError('Cannot `return_cost` when `spins` are provided.')
spun = np.zeros(data.shape + (n_rotate,))
for n in range(n_rotate):
if verbose:
msg = f'Reducing vertices to parcels: {n:>5}/{n_rotate}'
print(msg, end='\b' * len(msg), flush=True)
spun[..., n] = vertices_to_parcels(vertices[spins[:, n]],
lhannot=lhannot, rhannot=rhannot,
drop=drop)
if verbose:
print(' ' * len(msg) + '\b' * len(msg), end='', flush=True)
if return_cost:
return spun, cost
return spun
def spin_parcels(*, lhannot, rhannot, version='fsaverage', n_rotate=1000,
drop=None, seed=None, return_cost=False, **kwargs):
"""
Rotates parcels in `{lh,rh}annot` and re-assigns based on maximum overlap
Vertex labels are rotated with :func:`netneurotools.stats.gen_spinsamples`
and a new label is assigned to each *parcel* based on the region maximally
overlapping with its boundaries.
Parameters
----------
{lh,rh}annot : str
Path to .annot file containing labels to parcels on the {left,right}
hemisphere
version : str, optional
Specifies which version of `fsaverage` provided annotation files
correspond to. Must be one of {'fsaverage', 'fsaverage3', 'fsaverage4',
'fsaverage5', 'fsaverage6'}. Default: 'fsaverage'
n_rotate : int, optional
Number of rotations to generate. Default: 1000
drop : list, optional
Specifies regions in {lh,rh}annot that are not present in `data`. NaNs
will be inserted in place of the these regions in the returned data. If
not specified, 'unknown' and 'corpuscallosum' are assumed to not be
present. Default: None
return_cost : bool, optional
Whether to return cost array (specified as Euclidean distance) for each
coordinate for each rotation Default: True
kwargs : key-value, optional
Key-value pairs passed to :func:`netneurotools.stats.gen_spinsamples`
Returns
-------
spinsamples : (N, `n_rotate`) numpy.ndarray
Resampling matrix to use in permuting data parcellated with labels from
{lh,rh}annot, where `N` is the number of parcels. Indices of -1
indicate that the parcel was completely encompassed by regions in
`drop` and should be ignored.
cost : (N, `n_rotate`,) numpy.ndarray
Cost (specified as Euclidean distance) of re-assigning each coordinate
for every rotation in `spinsamples`. Only provided if `return_cost` is
True.
"""
def overlap(vals):
""" Returns most common non-negative value in `vals`; -1 if all neg
"""
vals = np.asarray(vals)
vals, counts = np.unique(vals[vals > 0], return_counts=True)
try:
return vals[counts.argmax()]
except ValueError:
return -1
if drop is None:
drop = [
'unknown', 'corpuscallosum', # default FreeSurfer
'Background+FreeSurfer_Defined_Medial_Wall' # common alternative
]
drop = _decode_list(drop)
# get vertex-level labels (set drop labels to - values)
vertices, end = [], 0
for n, annot in enumerate([lhannot, rhannot]):
labels, ctab, names = read_annot(annot)
names = _decode_list(names)
todrop = set(names) & set(drop)
inds = [names.index(f) - n for n, f in enumerate(todrop)]
labs = np.arange(len(names) - len(inds)) + (end - (len(inds) * n))
insert = np.arange(-1, -(len(inds) + 1), -1)
vertices.append(np.insert(labs, inds, insert)[labels])
end += len(names)
vertices = np.hstack(vertices)
labels = np.unique(vertices)
mask = labels > -1
# get coordinates and hemisphere designation for spin generation
coords, hemiid = _get_fsaverage_coords(version, 'sphere')
if len(vertices) != len(coords):
raise ValueError('Provided annotation files have a different number '
'of vertices than the specified fsaverage surface.\n'
' ANNOTATION: {} vertices\n'
' FSAVERAGE: {} vertices'
.format(len(vertices), len(coords)))
# spin and assign regions based on max overlap
spins, cost = gen_spinsamples(coords, hemiid, n_rotate=n_rotate, **kwargs)
regions = np.zeros((len(labels[mask]), n_rotate), dtype='int32')
for n in range(n_rotate):
regions[:, n] = labeled_comprehension(vertices[spins[:, n]], vertices,
labels, overlap, int, -1)[mask]
if return_cost:
return regions, cost
return regions
| [
"[email protected]"
] | |
6b4427adecbd6d4a38872c33dcbca2e3d68aeb29 | e8ae11e5017507da59e2e92d423b6a1994490de4 | /env/lib/python2.7/site-packages/azure/batch/models/pool_delete_options.py | d959c796b779edb07a5117788f554dc19bb6cab6 | [] | no_license | teopeurt/ansible-ubuntu-server | 613d00cea28bc6531acf4a39aeeb9cd0baa2a391 | b5b6127d2ee9723c5088443efe2ffb8ae30cfea7 | refs/heads/master | 2021-06-28T12:49:50.935753 | 2017-07-31T17:34:33 | 2017-07-31T17:34:33 | 98,912,808 | 0 | 1 | null | 2020-07-24T00:05:31 | 2017-07-31T17:32:56 | Makefile | UTF-8 | Python | false | false | 3,192 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolDeleteOptions(Model):
"""Additional parameters for the Pool_Delete operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id identifier in the response.
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. If not specified, this
header will be automatically populated with the current system clock
time.
:type ocp_date: datetime
:param if_match: An ETag is specified. Specify this header to perform the
operation only if the resource's ETag is an exact match as specified.
:type if_match: str
:param if_none_match: An ETag is specified. Specify this header to
perform the operation only if the resource's ETag does not match the
specified ETag.
:type if_none_match: str
:param if_modified_since: Specify this header to perform the operation
only if the resource has been modified since the specified date/time.
:type if_modified_since: datetime
:param if_unmodified_since: Specify this header to perform the operation
only if the resource has not been modified since the specified date/time.
:type if_unmodified_since: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=None, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
| [
"[email protected]"
] | |
202131d751e30e0b6464079f63e290c45a89c07a | 6cdff1cccb229bd98c7b7fce0ad3df32e4f04557 | /tests/conftest.py | 4eb8621c176f7ad405450bd91027044cc1498eb9 | [] | no_license | MITLibraries/workflow | fb8cbdf809702318c8d7c64307da90c0acda28cc | 63a17c3021e2bc0e0b13d22246ce3f13295349ca | refs/heads/main | 2023-03-04T10:38:12.270942 | 2021-07-08T18:06:16 | 2021-07-08T18:06:16 | 211,862,997 | 2 | 1 | null | 2023-02-08T01:14:43 | 2019-09-30T13:12:20 | Python | UTF-8 | Python | false | false | 3,108 | py | from collections import namedtuple
import json
from unittest import mock
import boto3
from moto import mock_ecs, mock_ec2
from moto.ec2.utils import generate_instance_identity_document
import pytest
from manager.cluster import Cluster
@pytest.fixture(autouse=True)
def aws_credentials(monkeypatch):
monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'foo')
monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'correct horse battery staple')
monkeypatch.setenv('AWS_SESSION_TOKEN', 'baz')
monkeypatch.setenv('AWS_DEFAULT_REGION', 'us-east-1')
@pytest.fixture
def cluster():
"""Create the mock Airflow cluster.
moto doesn't support the Fargate launch type, so we have to pretend
like we're going to launch our containers in EC2. There's a little
hand waving to make this work. moto comes with some predefined images
that seem to work fine.
Also see the ``patch_cluster_config`` fixture below.
"""
C = namedtuple('C', ['name', 'scheduler', 'worker', 'web'])
cluster = C('airflow-test', 'airflow-test-scheduler',
'airflow-test-worker', 'airflow-test-web')
with mock_ecs(), mock_ec2():
ec2_client = boto3.client('ec2')
ec2 = boto3.resource('ec2')
ecs = boto3.client('ecs')
image = ec2_client.describe_images()['Images'][0]
instance = ec2.create_instances(ImageId=image['ImageId'], MinCount=1,
MaxCount=1)[0]
doc = json.dumps(generate_instance_identity_document(instance))
ecs.create_cluster(clusterName=cluster.name)
ecs.register_container_instance(cluster=cluster.name,
instanceIdentityDocument=doc)
for service in cluster[1:]:
ecs.register_task_definition(family=service,
containerDefinitions=[])
ecs.create_service(cluster=cluster.name,
serviceName=service,
desiredCount=1,
taskDefinition=f'{service}:1')
ecs.update_service(cluster=cluster.name,
service=cluster.worker,
desiredCount=3)
yield cluster
@pytest.fixture(autouse=True)
def patch_cluster_config():
"""Patch the private config method on Cluster.
moto does not add the networkConfiguration to the service description.
Rather than just patching the whole thing, this effectively provides a
runtime decorator on the ``Cluster.__get_config`` method to augment the
reponse.
"""
def wraps(f):
def wrapped(*args, **kwargs):
network_config = {
'awsvpcConfiguration': {
'subnets': ['awesome-subnet', 'dumb-subnet']
}
}
res = f(*args, **kwargs)
[r.update(networkConfiguration=network_config) for r in res]
return res
return wrapped
func = wraps(Cluster._Cluster__get_config)
with mock.patch.object(Cluster, '_Cluster__get_config', func):
yield
| [
"[email protected]"
] | |
e16189f36956843b3dfa3909dccea36da75ad30e | 5de4aed3d9a9230404150d4c3c553ea05ac4e088 | /afm/logger.py | c872f1d55b593e4a85f55bd2fb43d16e0e878e5a | [] | no_license | UfSoft/afm | db4df3189095aa916b3a3f770d5366bb3e0a9b74 | 2e85c65389a10f7bed032956b0c603bbb2af2dac | refs/heads/master | 2021-01-19T13:25:08.121356 | 2009-10-29T15:24:49 | 2009-10-29T15:24:49 | 26,618,925 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,483 | py | # -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8 et
# ==============================================================================
# Copyright © 2009 UfSoft.org - Pedro Algarvio <[email protected]>
#
# License: BSD - Please view the LICENSE file for additional information.
# ==============================================================================
import logging
from twisted.internet import defer
LoggingLoggerClass = logging.getLoggerClass()
class Logging(LoggingLoggerClass):
def __init__(self, logger_name='afm', level=logging.DEBUG):
LoggingLoggerClass.__init__(self, logger_name, level)
@defer.inlineCallbacks
def debug(self, msg, *args, **kwargs):
yield LoggingLoggerClass.debug(self, msg, *args, **kwargs)
@defer.inlineCallbacks
def info(self, msg, *args, **kwargs):
yield LoggingLoggerClass.info(self, msg, *args, **kwargs)
@defer.inlineCallbacks
def warning(self, msg, *args, **kwargs):
yield LoggingLoggerClass.warning(self, msg, *args, **kwargs)
warn = warning
@defer.inlineCallbacks
def error(self, msg, *args, **kwargs):
yield LoggingLoggerClass.error(self, msg, *args, **kwargs)
@defer.inlineCallbacks
def critical(self, msg, *args, **kwargs):
yield LoggingLoggerClass.critical(self, msg, *args, **kwargs)
@defer.inlineCallbacks
def exception(self, msg, *args, **kwargs):
yield LoggingLoggerClass.exception(self, msg, *args, **kwargs)
| [
"[email protected]"
] | |
05c686e57349070729fed3e90271c155029b76cb | 25ad906181ae94f3423a5330b06112faaf1d0059 | /python网络数据采集/my_爬虫_进阶_之路/scrapy框架/my_spiders/curl转python/test.py | 1063f2c551f1e27ed86e7b64a5c2a17899e79e4f | [] | no_license | liuhe3647/python | 5ee0aff3f2bbff864fdb86db0371d0a07745dc26 | 4368cab542f4d2b1ecc845ff996e8898a9aaaca6 | refs/heads/master | 2022-04-18T15:56:45.263684 | 2020-04-18T03:43:18 | 2020-04-18T03:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,293 | py | # coding:utf-8
'''
@author = super_fazai
@File : test.py
@connect : [email protected]
'''
from __future__ import unicode_literals
from ftfy import fix_text
from random import randint
from urllib.parse import (
urlparse,
parse_qsl,
urlencode,)
from fzutils.ip_pools import (
fz_ip_pool,
ip_proxy_pool,
sesame_ip_pool,
tri_ip_pool,
get_random_proxy_ip_from_ip_pool,)
from fzutils.spider.fz_aiohttp import AioHttp
from fzutils.spider.fz_driver import (
BaseDriver,
CHROME,
FIREFOX,)
from fzutils.spider.fz_phantomjs import CHROME_DRIVER_PATH
from fzutils.url_utils import unquote_plus
from fzutils.img_utils import save_img_through_url
from fzutils.spider.fz_driver import PHONE
from fzutils.common_utils import _print
from fzutils.data.excel_utils import read_info_from_excel_file
from fzutils.data.list_utils import list_remove_repeat_dict_plus
from fzutils.internet_utils import (
str_cookies_2_dict,
_get_url_contain_params,
tuple_or_list_params_2_dict_params,
driver_cookies_list_2_str,)
from fzutils.qrcode_utils import decode_qrcode
from fzutils.spider.fz_requests import (
PROXY_TYPE_HTTP,
PROXY_TYPE_HTTPS,)
from fzutils.spider.selector import *
from fzutils.spider.async_always import *
from fzutils.spider.selenium_always import *
FIREFOX_DRIVER_PATH = '/Users/afa/myFiles/tools/geckodriver'
# headers = {
# 'Accept-Encoding': 'br, gzip, deflate',
# 'Connection': 'keep-alive',
# 'Accept': '*/*',
# 'Host': 'alisitecdn.m.taobao.com',
# 'User-Agent': 'iPhone7,1(iOS/11.0) AliApp(TB/8.4.0) Weex/0.20.0 1242x2208',
# 'Accept-Language': 'zh-cn',
# }
#
# params = (
# ('pathInfo', 'shop/impression'),
# ('userId', '3012445016'),
# ('shopId', '380157209'),
# ('pageId', '0'),
# )
# url = 'https://alisitecdn.m.taobao.com/pagedata/shop/impression'
# body = Requests.get_url_body(
# url=url,
# headers=headers,
# params=params,
# cookies=None,
# ip_pool_type=tri_ip_pool)
# # print(body)
# data = json_2_dict(
# json_str=body,
# default_res={}).get('module', {})
# # pprint(data)
# # 服务电话的js
# # print(data.get('module', {}).get('moduleSpecs', {}).get('shop_base_info', {}).get('moduleCode', ''))
#
# def wash_ori_data(ori_data:dict):
# """
# 清洗原始data
# :return:
# """
# try:
# ori_data.pop('moduleSpecs')
# ori_data.pop('moduleList')
# except:
# pass
#
# return ori_data
#
# data = wash_ori_data(ori_data=data)
# pprint(data)
# wireshark
# iOS (ip.addr == 192.168.3.2 or ip.src == 192.168.3.2) and ssl
# meizu (ip.addr == 192.168.3.4 or ip.src == 192.168.3.4) and (ssl or http)
# charles
# https://campaigncdn.m.taobao.com/moduledata/downgrade.htm?dataId=taobao
# https://alisitecdn.m.taobao.com/pagedata/shop/index?pathInfo=shop/index&userId=201249601&shopId=58640118&pageId=1860970
# https://alisitecdn.m.taobao.com/pagedata/shop/impression?pathInfo=shop/impression&userId=201249601&shopId=58640118&pageId=0
# wireshark
# $ sudo /Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --ssl-key-log-file=/Users/afa/sslkeylog.log
# android (ip.addr == 192.168.3.4 or ip.src == 192.168.3.4) and ssl
# company_info
# headers = {
# 'Connection': 'keep-alive',
# 'Cache-Control': 'max-age=0',
# 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': get_random_pc_ua(),
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
# # 'Referer': 'http://z.go2.cn/product/oaamaeq.html',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
# }
# url = 'http://diteni.go2.cn/'
# body = Requests.get_url_body(
# url=url,
# headers=headers,
# ip_pool_type=tri_ip_pool,)
# print(body)
#
# company_name_selector = {
# 'method': 'css',
# 'selector': 'a.merchant-title ::text'
# }
# company_name = parse_field(
# parser=company_name_selector,
# target_obj=body,
# )
# print(company_name)
# 源自百家号
# 百度某作者的文章
# 必传
# cookies = {
# 'BAIDUID': '1666ADBB95B083DBB2DA29E9BEFCB50B:FG=1',
# 'BIDUPSID': '1666ADBB95B083DBB2DA29E9BEFCB50B',
# # 'PSTM': '1553750958',
# # 'locale': 'zh',
# }
#
# headers = {
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
# 'User-Agent': get_random_phone_ua(),
# 'Accept': '*/*',
# # 'Referer': 'https://author.baidu.com/home?type=profile&action=profile&mthfr=box_share&context=%7B%22from%22%3A%22ugc_share%22%2C%22app_id%22%3A%221617808623102717%22%7D&from=singlemessage&isappinstalled=0',
# 'Connection': 'keep-alive',
# }
#
# params = (
# ('type', 'article'),
# ('tab', '2'),
# ('uk', 'sCWQteHJevYiu1bvIiKrEw'), # 非定值, 看分享出来文章的uk
# # ('ctime', '15564325069740'),
# ('num', '14'),
# # ('_', '1556502637335'),
# ('callback', 'jsonp2'),
# )
# url = 'https://author.baidu.com/list'
# body = Requests.get_url_body(
# url=url,
# headers=headers,
# params=params,
# cookies=cookies,
# ip_pool_type=tri_ip_pool,)
# # print(body)
#
# data = json_2_dict(
# json_str=re.compile('\((.*)\)').findall(body)[0],
# )
# pprint(data)
# 视频信息接口
# params = (
# ('callback', 'tvp_request_getinfo_callback_654434'),
# ('platform', '11001'),
# ('charge', '0'),
# ('otype', 'json'),
# ('ehost', 'http://post.mp.qq.com'),
# ('sphls', '0'),
# ('sb', '1'),
# ('nocache', '0'),
# # ('_rnd', '1557917186'),
# # ('guid', 'daf25a829d645f1196b61df6417e87bf'),
# ('appVer', 'V2.0Build9502'),
# ('vids', 'm0866r0q1xn'),
# ('defaultfmt', 'auto'),
# # ('_qv_rmt', 'AI5PT6eoA15978I5x='),
# # ('_qv_rmt2', 'Kt7fT8OE157116tsw='),
# ('sdtfrom', 'v3010'),
# ('_', '1557917186891'),
# )
# body = Requests.get_url_body(
# url='http://h5vv.video.qq.com/getinfo',
# headers=headers,
# params=params,
# ip_pool_type=tri_ip_pool,
# num_retries=5,)
# print(body)
# data = json_2_dict(
# json_str=re.compile('\((.*)\)').findall(body)[0],
# default_res={})
# pprint(data)
# ** 咪咕视频根据视频id进行视频信息获取
# import requests
#
# headers = {
# 'Proxy-Connection': 'keep-alive',
# 'terminalId': 'h5',
# # 'X-UP-CLIENT-CHANNEL-ID': '0131_10010001005',
# 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
# 'Accept': 'application/json',
# # 'clientId': '36854075131aeac30ca17f1b54649196',
# 'userId': '',
# 'userToken': '',
# 'appId': 'miguvideo',
# 'SDKCEId': '',
# 'Origin': 'http://m.miguvideo.com',
# 'Referer': 'http://m.miguvideo.com/mgs/msite/prd/detail.html?cid=652525090',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
# }
#
# params = (
# ('contId', '652525090'),
# ('rateType', '3'),
# # ('clientId', '36854075131aeac30ca17f1b54649196'),
# # ('channelId', '0131_10010001005'),
# )
#
# response = requests.get('http://webapi.miguvideo.com/gateway/playurl/v2/play/playurlh5', headers=headers, params=params, verify=False)
# print(response.text)
# 百度app的小视频发现接口, 其中的全屏视频文章可直接被抓取
# headers = {
# 'Host': 'mbd.baidu.com',
# 'Connection': 'keep-alive',
# 'Content-Length': '4557',
# 'X-BD-QUIC': '1',
# 'Content-Type': 'application/x-www-form-urlencoded',
# 'X-BDBoxApp-NetEngine': '3',
# 'User-Agent': get_random_phone_ua(), # 'Mozilla/5.0 (iPad; CPU OS 13_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 SP-engine/2.18.0'
# # 'X-Bd-Traceid': '644a9f61e6cc425e8df842d2cb926de9',
# 'Accept': '*/*',
# # 'X-TurboNet-Info': '2.13.2679.177',
# 'Accept-Encoding': 'gzip, deflate',
# }
#
# params = (
# ('action', 'feed'),
# ('cmd', '210'),
# # ('maid', '_a2S8_aq28_qa28qiPSOtj8Pvag3h2aajiXT8jukvNlza-uNzuB3uli6-u_KO-ifY0HJ8lukSugkuXa90ivhI_PSv8oIi2ihgCSaa_asS8_M82uazxqSC'),
# ('refresh', '1'),
# ('imgtype', 'webp'),
# ('cfrom', '1099a'),
# ('from', '1099a'),
# ('network', '1_0'),
# ('osbranch', 'i0'),
# ('osname', 'baiduboxapp'),
# ('service', 'bdbox'),
# # ('sid', '1027585_4-2600_6645-1027088_2-1027514_1-1027521_1-1027598_3-3081_8171-5238_7311-2696_6930-1027056_2-3057_8089-5618_8591-1027583_1-1027195_1-1027384_2-1027255_3-1027604_1-5456_8016-1026924_1-5306_7565-1027258_2-3270_8882-2946_7781-1027230_2-5524_8269-1027659_1-2929_7702-1027285_1-1027328_5-1027599_1-1472_3438-5579_8458-3037_8036-1027425_3-1027641_1-1027564_2-3000026_2-1027249_1-1027654_1-1027525_2-5529_8280-1027151_2-5566_8411-1027577_2-5562_8387-1027102_1-5571_8441-1027346_1-1021859_1-5409_7877-3039_8040-5586_8486-5546_8581-1027597_2-1027562_1-1027251_1-5525_8271-1021774_1-2512_6387-2859_7452-1027460_2-1027128_2-1027379_1-1027652_2-2939_7745-1027218_1-1027225_1-1026985_1'),
# ('sst', '0'),
# ('st', '0'),
# ('ua', '1668_2224_iphone_11.22.0.17_0'),
# ('uid', 'E4317D7927A4F423B2A894710C308D015F8D69D51OMTBGHBERB'),
# # ('ut', 'iPad7,3_13.3.1'),
# # ('zid', '9iAc0yzbau51GKO563M1gzHzaPoPDD_d8nXwjCKxdBLITCmV4uqwJmkYrkuarE6BQqUXF7INisVWgScgYhwZ0qQ'),
# )
#
# data = {
# # 'data': '{\n "upload_ids" : [\n {\n "clk" : 0,\n "id" : "sv_5653763656459563687",\n "show" : 0,\n "clk_ts" : 0,\n "show_ts" : 0\n },\n {\n "clk" : 0,\n "id" : "sv_3599925748637729943",\n "show" : 0,\n "clk_ts" : 0,\n "show_ts" : 0\n },\n {\n "clk" : 0,\n "id" : "sv_5250727945753531281",\n "show" : 0,\n "clk_ts" : 0,\n "show_ts" : 0\n },\n {\n "clk" : 0,\n "id" : "sv_4823468498756614746",\n "show" : 1,\n "clk_ts" : 0,\n "show_ts" : 1587165880\n },\n {\n "clk" : 0,\n "id" : "sv_4439062174156612467",\n "show" : 1,\n "clk_ts" : 0,\n "show_ts" : 1587165886\n },\n {\n "clk" : 0,\n "id" : "sv_5248424962721750237",\n "show" : 1,\n "clk_ts" : 0,\n "show_ts" : 1587165886\n },\n {\n "clk" : 0,\n "id" : "sv_4130330140644084020",\n "show" : 1,\n "clk_ts" : 0,\n "show_ts" : 1587165880\n },\n {\n "clk" : 0,\n "id" %3...'
# 'data': dumps({
# "upload_ids" : [
# {
# "clk" : 0,
# "id" : "sv_5653763656459563687",
# "show" : 0,
# "clk_ts" : 0,
# "show_ts" : 0
# },
# {
# "clk" : 0,
# "id" : "sv_3599925748637729943",
# "show" : 0,
# "clk_ts" : 0,
# "show_ts" : 0
# },
# {
# "clk" : 0,
# "id" : "sv_5250727945753531281",
# "show" : 0,
# "clk_ts" : 0,
# "show_ts" : 0
# },
# {
# "clk" : 0,
# "id" : "sv_4823468498756614746",
# "show" : 1,
# "clk_ts" : 0,
# "show_ts" : datetime_to_timestamp(get_shanghai_time()), # 1587165880
# },
# {
# "clk" : 0,
# "id" : "sv_4439062174156612467",
# "show" : 1,
# "clk_ts" : 0,
# "show_ts" : datetime_to_timestamp(get_shanghai_time())
# },
# {
# "clk" : 0,
# "id" : "sv_5248424962721750237",
# "show" : 1,
# "clk_ts" : 0,
# "show_ts" : datetime_to_timestamp(get_shanghai_time())
# },
# {
# "clk" : 0,
# "id" : "sv_4130330140644084020",
# "show" : 1,
# "clk_ts" : 0,
# "show_ts" : datetime_to_timestamp(get_shanghai_time())
# },
# ]})
# }
# body = Requests.get_url_body(
# method='post',
# url='https://mbd.baidu.com/searchbox',
# headers=headers,
# params=params,
# # cookies=cookies,
# data=data,
# ip_pool_type=tri_ip_pool,
# proxy_type=PROXY_TYPE_HTTPS,
# num_retries=6,)
# data = json_2_dict(
# json_str=body).get('data', {}).get('210', {}).get('itemlist', {}).get('items', [])
# # pprint(data)
#
# for item in data:
# try:
# _mode = item.get('data', {}).get('mode', '')
# assert _mode != ''
# title = item.get('data', {}).get('title', '')
# assert title != ''
# article_url = item.get('data', {}).get('videoInfo', {}).get('pageUrl', '')
# print('mode: {}, title: {}, article_url: {}'.format(_mode, title, article_url))
# except Exception:
# continue
# 根据百度app的金华本地接口列表数据(包含视频)
# 测试发现其中返回的数据中图文文章的prefetch_html字段打开的页面图片都是异常的(图片只能在百度app里面调起), pass
# headers = {
# 'Host': 'mbd.baidu.com',
# 'Connection': 'keep-alive',
# # 'Content-Length': '601',
# 'X-BDBoxApp-NetEngine': '3',
# 'Accept': 'application/json, text/plain, */*',
# 'Content-Type': 'application/x-www-form-urlencoded',
# # 'X-Bd-Traceid': '16fe51d50af744aa9f405a6674a0ece3',
# # 'X-TurboNet-Info': '2.13.2679.177',
# 'User-Agent': get_random_phone_ua(), # 'BaiduBoxApp/11.22.0 iPad; CPU OS 13_3_1 like Mac OS X'
# 'Accept-Encoding': 'gzip, deflate',
# }
#
# params = (
# ('action', 'feed'),
# ('cmd', '206'),
# ('refresh', '0'),
# ('cfrom', '1099a'),
# ('from', '1099a'),
# ('network', '1_0'),
# ('osbranch', 'i0'),
# ('osname', 'baiduboxapp'),
# # ('puid', '_avrijOq2iAqAqqqB'),
# ('service', 'bdbox'),
# # ('sid', '5279_7493-5343_7673-1027255_3-1027249_1-3108_8246-1027599_1-5420_7915-5159_7064-5318_7602-5505_8213-2387_6070-5546_8581-3200_8608-5409_7877-1027056_2-3057_8089-1768_6301-2849_7423-1027525_2-3085_8180-3188_8547-5276_7485-5177_7115-5566_8411-5482_8122-1027088_2-5247_7339-2411_6133-5553_8355-5351_7695-3022_7980-5358_7713-2583_6589-1027151_2-2964_7829-5270_7472-2422_6166-3092_8204-5344_7676-5525_8271-5557_8366-1027564_2-5508_8414-5297_7538-1027652_2-5426_7932-5291_7522-5309_7573-5188_7161-2558_7271-1027384_2-2966_7835-5164_7078-5295_7533-5618_8591-1869_4509-5568_8429-1027604_1-1027379_1-1027654_1-5288_7517-3072_8145-3234_8756-5306_7565-2119_5266-1549_3643-2702_6941-5397_7837-5292_7525-5605_8537-5189_7164-3195_8561-2929_7702-1027562_1-5623_8610-5456_8016-3281_8984-5571_8441-2762_7136-5437_7972-5399_7843-1027251_1-1027195_1-5382_7800-3021_7978-3037_8036-5305_7560-1027102_1-1026985_1-1027583_1-5434_7961-5524_8269-2939_7745-5529_8280-2132_5301-5287_7515-1021859_1-1027577_2-2962_7825-1027346_1-2512_6387-1027128_2-5511_8234-5562_8387-1026924_1-1892_4570-5302_7555-1027460_2-5253_7382-5540_8312-5191_7167-2859_7452-5258_7413-5380_7796-3000026_2-1021774_1-5501_8201-2696_6930-5337_8416-5356_7706-1027230_2-5208_7208-3270_8882-3068_8126-2701_6939-1027218_1-5495_8181-5244_7333-3095_8211-3081_8171-2429_6181-2720_7764-1027225_1-3094_8208-5354_7701-3066_8262-2407_6127-1756_4144-1027425_3-5290_7521-5289_7518-3008_7953-1472_3438-3051_8075-571_1173-5488_8587-5260_7422-5196_7178-5326_7620-5514_8240-5539_8310-5586_8486-1027514_1-965_2041-1027258_2-5274_7482-5465_8048-2991_7919-5474_8088-5238_7311-2949_7792-5304_7558-1027521_1-3269_8880-5341_7661-5396_7836-2734_7019-5277_7487-1027659_1-5229_7291-2862_7464-3039_8040-1027328_5-1027641_1-1027597_2-2946_7781-2520_6890-1027285_1-5476_8091-3150_8396-5579_8458-3038_8037-3246_8805-5621_8606-2163_5390-1027585_4-2600_6645-5551_8343-5507_8218-5552_8352-1027598_3-5387_7815-2466_6272'),
# ('sst', '0'),
# ('st', '0'),
# ('ua', '1668_2224_iphone_11.22.0.17_0'),
# ('uid', 'E4317D7927A4F423B2A894710C308D015F8D69D51OMTBGHBERB'),
# ('ut', 'iPad7,3_13.3.1'),
# # ('zid', '9iAc0yzbau51GKO563M1gzHzaPoPDD_d8nXwjCKxdBLL_jVT_hAYpPuHPN7r33duZtuXxOapOpFhVJsy0VCBMVg'),
# )
#
# data = {
# # 'data': '{"direction":"auto","refresh_type":0,"bundleVersion":"2.80.57","source":"bdbox_feed_attentiontab","upload_ids":[],"info":{"location":"120.072277,28.962932,---"},"data":{"tab_id":"109999333","tab_name":"","is_sub":0,"last_update_time":0,"session_id":"1587166932496","click_id":"f7c2394b4a3a374e9565268449e1f8b7","refresh_index":1,"refresh_count":1,"refresh_state":4,"pre_render":0,"context":{}}}'
# 'data': dumps({
# 'bundleVersion': '2.80.57',
# 'data': {
# # 'click_id': 'f7c2394b4a3a374e9565268449e1f8b7',
# 'context': {},
# 'is_sub': 0,
# 'last_update_time': 0,
# 'pre_render': 0,
# 'refresh_count': 1,
# 'refresh_index': 1,
# 'refresh_state': 4,
# 'session_id': get_now_13_bit_timestamp(),
# 'tab_id': '109999333',
# 'tab_name': ''
# },
# 'direction': 'auto',
# 'info': {'location': '120.072277,28.962932,---'},
# 'refresh_type': 0,
# 'source': 'bdbox_feed_attentiontab',
# 'upload_ids': []
# })
# }
#
# body = Requests.get_url_body(
# method='post',
# url='https://mbd.baidu.com/searchbox',
# headers=headers,
# params=params,
# data=data,
# ip_pool_type=tri_ip_pool,
# proxy_type=PROXY_TYPE_HTTPS,
# num_retries=6,
# )
# assert body != ''
#
# data = json_2_dict(
# json_str=body,
# logger=None,).get('data', {}).get('206', {}).get('itemlist', {}).get('items', [])
# # pprint(data)
#
# for item in data:
# try:
# title = item.get('data', {}).get('title', '')
# assert title != ''
# _mode = item.get('data', {}).get('mode', '')
# assert _mode != ''
# if _mode == 'video':
# article_url = item.get('data', {}).get('videoInfo', {}).get('pageUrl', '')
# else:
# # 跳过图文文章, 因为其中图片只能在百度app里面调起
# # article_url = item.get('data', {}).get('prefetch_html', '')
# continue
# assert article_url != ''
#
# print('mode: {}, title: {}, article_url: {}'.format(_mode, title, article_url))
# except Exception as e:
# continue | [
"[email protected]"
] | |
bb6c3b64422418aee867dca1dbec924a6ffc67c5 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4455603.3.spec | 4b1c58937e8cd790c55d073e3e63567e8ee9b14f | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,310 | spec | {
"id": "mgm4455603.3",
"metadata": {
"mgm4455603.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 109524,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 2247,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 307,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 448,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1143,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 99301,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 442,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 20019,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 308,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 119071,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 48,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 87329,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 37078,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 24398,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 246589,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 7961,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 4699,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 13553,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 197,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 18035,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 2435468,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 100,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 13,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 69,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 308,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 27,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 868,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 1230,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 413,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 198,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 26099,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 79,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 4741,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4455603.3/file/999.done.species.stats"
}
},
"id": "mgm4455603.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4455603.3"
}
},
"raw": {
"mgm4455603.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4455603.3"
}
}
} | [
"[email protected]"
] | |
7bf9961f9abe963c51fc315c0be7e3c57d39a529 | 1b8a99a4ff80da51dc81dd8354bf9bf1cbd25a8b | /2023/longest_nice_substring.py | a832858e6eafd4d321f1afb296fd2304b2ca0cb5 | [] | no_license | eronekogin/leetcode | ea639eebe0cd70af9eb4cba59bc68f636d7b3e0c | edb870f83f0c4568cce0cacec04ee70cf6b545bf | refs/heads/master | 2023-08-16T10:35:57.164176 | 2023-08-14T11:25:33 | 2023-08-14T11:25:33 | 163,679,450 | 0 | 0 | null | 2021-09-09T12:04:44 | 2018-12-31T15:33:06 | Python | UTF-8 | Python | false | false | 449 | py | """
https://leetcode.com/problems/longest-nice-substring/
"""
class Solution:
def longestNiceSubstring(self, s: str) -> str:
if not s:
return ''
ss = set(s)
for i, c in enumerate(s):
if c.swapcase() not in ss:
left = self.longestNiceSubstring(s[:i])
right = self.longestNiceSubstring(s[i + 1:])
return max(left, right, key=len)
return s
| [
"[email protected]"
] | |
1607a3e3331e20d9281ee04b374c3d4ea110cb01 | c2849586a8f376cf96fcbdc1c7e5bce6522398ca | /ch21/ex21-15.pybench2.py | a110d546ad7c35d5e88ae11bbd6ee12cc27e3857 | [] | no_license | freebz/Learning-Python | 0559d7691517b4acb0228d1cc76de3e93915fb27 | 7f577edb6249f4bbcac4f590908b385192dbf308 | refs/heads/master | 2020-09-23T01:48:24.009383 | 2019-12-02T12:26:40 | 2019-12-02T12:26:40 | 225,371,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | # pybench2.py
...
def runner(stmts, pythons=None, tracemd=False):
for (number, repeat, setup, stmt) in stmts:
if not pythons:
...
best = min(timeit.repeat(
setup=setup, stmt=stmt, number=number, repeat=repeat))
else:
setup = setup.replace('\t', ' ' * 4)
setup = ' '.join('-s "%s"' % line for line in setup.split('\n'))
...
for (ispy3, python) in pythons:
...
cmd = '%s -m timeit -n %s -r %s %s %s' %
(python, number, repeat, setup, args)
# pybench2_cases.py
import pybench2, sys
...
stmts = [ # (num,rep,setup,stmt)
(0, 0, "", "[x ** 2 for x in range(1000)]"),
(0, 0, "", "res=[]\nfor x in range(1000): res.append(x ** 2)")
(0, 0, "def f(x):\n\treturn x",
"[f(x) for x in 'spam' * 2500]"),
(0, 0, "def f(x):\n\treturn x",
"res=[]\nfor x in 'spam' * 2500:\n\tres.append(f(x))"),
(0, 0, "L = [1, 2, 3, 4, 5]", "for i in range(len(L)): L[i] += 1"),
(0, 0, "L = [1, 2, 3, 4, 5]", "i=0\nwhile i < len(L):\n\tL[i] += 1\n\ti += 1")]
...
pybench2.runner(stmts, pythons, tracemd)
| [
"[email protected]"
] | |
000fe5fe6d7a41642db55280e7a0463e118c759e | 80301f1cffc5afce13256e2ecab6323c5df00194 | /en.fc/py/E0001.py | fe101d453fb481e13f8ba0eeffa341ab4c59e54f | [] | no_license | ZhenjianYang/SoraVoiceScripts | c1ddf7c1bbcb933243754f9669bd6b75777c87b9 | 94a948090aba0f63b10b2c69dc845dc99c822fc4 | refs/heads/master | 2023-04-18T04:54:44.306652 | 2023-04-06T11:15:17 | 2023-04-06T11:15:17 | 103,167,541 | 43 | 11 | null | 2021-03-06T08:52:54 | 2017-09-11T17:36:55 | Python | UTF-8 | Python | false | false | 28,509 | py | from ED6ScenarioHelper import *
def main():
SetCodePage("ms932")
CreateScenaFile(
FileName = 'E0001 ._SN',
MapName = 'event',
Location = 'E0001.x',
MapIndex = 1,
MapDefaultBGM = "ed60010",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'General Morgan', # 9
'Royal Army Soldier A', # 10
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH02080 ._CH', # 00
'ED6_DT07/CH01300 ._CH', # 01
)
AddCharChipPat(
'ED6_DT07/CH02080P._CP', # 00
'ED6_DT07/CH01300P._CP', # 01
)
DeclNpc(
X = -7752,
Z = -2000,
Y = 4527,
Direction = 270,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -7116,
Z = -2000,
Y = -197,
Direction = 270,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
ScpFunction(
"Function_0_FA", # 00, 0
"Function_1_146", # 01, 1
"Function_2_147", # 02, 2
"Function_3_15D", # 03, 3
"Function_4_FF8", # 04, 4
"Function_5_163D", # 05, 5
)
def Function_0_FA(): pass
label("Function_0_FA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x64, 5)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x64, 4)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x64, 3)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x65, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_115")
OP_A2(0x328)
Event(0, 3)
label("loc_115")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x7F, 2)), scpexpr(EXPR_END)), "loc_123")
OP_A3(0x3FA)
Event(0, 5)
label("loc_123")
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x0), scpexpr(EXPR_END)),
(100, "loc_12F"),
(SWITCH_DEFAULT, "loc_145"),
)
label("loc_12F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x65, 0)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x65, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_142")
OP_A2(0x329)
Event(0, 4)
label("loc_142")
Jump("loc_145")
label("loc_145")
Return()
# Function_0_FA end
def Function_1_146(): pass
label("Function_1_146")
Return()
# Function_1_146 end
def Function_2_147(): pass
label("Function_2_147")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_15C")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("Function_2_147")
label("loc_15C")
Return()
# Function_2_147 end
def Function_3_15D(): pass
label("Function_3_15D")
ClearMapFlags(0x1)
EventBegin(0x0)
OP_6D(1000, 5000, -3500, 0)
SetChrPos(0x101, 1000, 5000, -3590, 225)
SetChrPos(0x102, -360, 5000, -3840, 135)
SetChrPos(0x103, 730, 5000, -4940, 315)
ChrTalk( #0
0x101,
(
"#002FWe checked it over, but it looks\x01",
"like there's nobody inside...\x02",
)
)
CloseMessageWindow()
ChrTalk( #1
0x102,
(
"#012FThere's a high possibility the\x01",
"passengers were transferred\x01",
"to the sky bandits' airship.\x02\x03",
"And then to wherever their\x01",
"hideout is...\x02",
)
)
CloseMessageWindow()
ChrTalk( #2
0x101,
(
"#002FAgreed.\x02\x03",
"But this sucks... Right when\x01",
"I thought we had some clues,\x01",
"we're back to zero.\x02",
)
)
CloseMessageWindow()
ChrTalk( #3
0x103,
(
"#020FCome on, cheer up already.\x02\x03",
"It's not like every clue has\x01",
"completely vanished.\x02\x03",
"Why do you think the sky bandits\x01",
"hid the airliner in a place like this?\x02",
)
)
CloseMessageWindow()
ChrTalk( #4
0x101,
"#002FHuh...?\x02",
)
CloseMessageWindow()
ChrTalk( #5
0x103,
(
"#020FAs far as I can tell, the orbal energy in the ship\x01",
"has completely stopped.\x02\x03",
"Which means that the orbal engine was stripped\x01",
"from the aircraft.\x02\x03",
"Furthermore, the sky bandits made multiple trips\x01",
"to carry off a large amount of cargo.\x02\x03",
"Considering the time and risk involved, don't you\x01",
"think it would have been more effective just to\x01",
"take the entire airliner to their hideout?\x02",
)
)
CloseMessageWindow()
ChrTalk( #6
0x101,
(
"#000FYeah, that does seem a little\x01",
"odd that they didn't...\x02\x03",
"So, why'd they hide the airliner\x01",
"here then?\x02\x03",
"Umm, all I can think of is that\x01",
"they did it in order to...\x02",
)
)
CloseMessageWindow()
FadeToDark(300, 0, 100)
RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0x18), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Menu(
0,
10,
10,
0,
(
"[Sort the cargo.]\x01", # 0
"[Move the hostages aboard their own aircraft.]\x01", # 1
"[Steal the orbal engine.]\x01", # 2
"[Keep clear of the Royal Army's search party.]\x01", # 3
"[Ditch the Linde, because their hideout is somewhere weird.]\x01", # 4
)
)
MenuEnd(0x0)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0xFFFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_5F(0x0)
OP_56(0x0)
FadeToBright(300, 0)
Switch(
(scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_END)),
(0, "loc_6DC"),
(1, "loc_79C"),
(2, "loc_852"),
(3, "loc_90E"),
(4, "loc_A06"),
(SWITCH_DEFAULT, "loc_A2E"),
)
label("loc_6DC")
ChrTalk( #7
0x103,
(
"#026FIt's true this may have been a\x01",
"good place to sort the cargo\x01",
"because of the space...\x02\x03",
"However, it doesn't account for\x01",
"the fact that they didn't take\x01",
"the airliner to their hideout.\x02",
)
)
CloseMessageWindow()
Jump("loc_A2E")
label("loc_79C")
ChrTalk( #8
0x103,
(
"#026FIt's true they would have needed to\x01",
"land in order to move the hostages...\x02\x03",
"However, it doesn't account for\x01",
"the fact that they didn't take\x01",
"the airliner to their hideout.\x02",
)
)
CloseMessageWindow()
Jump("loc_A2E")
label("loc_852")
ChrTalk( #9
0x103,
(
"#026FIt's true they would have needed\x01",
"to land in order to remove the\x01",
"orbal engine...\x02\x03",
"However, it doesn't account for\x01",
"the fact that they didn't take\x01",
"the airliner to their hideout.\x02",
)
)
CloseMessageWindow()
Jump("loc_A2E")
label("loc_90E")
ChrTalk( #10
0x103,
(
"#026FIt's true the airliner is rather\x01",
"large and easily seen...\x02\x03",
"And in that sense, it would seem highly\x01",
"likely that they would leave it in a\x01",
"different place than their hideout.\x02\x03",
"However, that alone couldn't be\x01",
"considered a decisive reason.\x02",
)
)
CloseMessageWindow()
Jump("loc_A2E")
label("loc_A06")
ChrTalk( #11
0x103,
"#020FYes, that's exactly right.\x02",
)
CloseMessageWindow()
Jump("loc_A2E")
label("loc_A2E")
ChrTalk( #12
0x103,
(
"#020FFrom my guess, I would imagine\x01",
"that their hideout is in a slightly\x01",
"peculiar place.\x02\x03",
"Maybe 10 to 15 arge in size...\x02\x03",
"In short, a peculiar place on which\x01",
"only a small aircraft like the sky\x01",
"bandits' airship could land.\x02",
)
)
CloseMessageWindow()
ChrTalk( #13
0x101,
"#000FInteresting...\x02",
)
CloseMessageWindow()
ChrTalk( #14
0x102,
(
"#012FHow about terrain covered with\x01",
"extreme differences in height,\x01",
"like mountains and ravines...?\x02\x03",
"That seems like a likely place\x01",
"for the sky bandits' hideout.\x02",
)
)
CloseMessageWindow()
ChrTalk( #15
0x103,
(
"#020FYes, that's what I've been\x01",
"thinking, too.\x02\x03",
"However, if that's the case...then we\x01",
"may be unable to do anything else.\x02\x03",
"There's the possibility that their\x01",
"hideout may be in a place we can't\x01",
"reach by foot.\x02",
)
)
CloseMessageWindow()
ChrTalk( #16
0x101,
"#002FTh-Then what CAN we do?\x02",
)
CloseMessageWindow()
ChrTalk( #17
0x103,
(
"#022FWell...\x02\x03",
"I hate to say it, but we may have\x01",
"to share our conclusions with the\x01",
"army and ask for their cooperation.\x02\x03",
"Because they're the ones with\x01",
"the patrol ships.\x02",
)
)
CloseMessageWindow()
ChrTalk( #18
0x101,
(
"#004FWhat...? Now you're trying to tell\x01",
"us we should go crawling back to\x01",
"the army and ask them for help?!\x02",
)
)
CloseMessageWindow()
ChrTalk( #19
0x102,
(
"#012FEither way, we still have to report\x01",
"to them about the airliner.\x02\x03",
"Personally speaking, I still think\x01",
"we should cooperate with the army,\x01",
"whatever their attitude may be.\x02\x03",
"Especially if that means bringing\x01",
"the hostages back safe and sound.\x02",
)
)
CloseMessageWindow()
ChrTalk( #20
0x101,
(
"#002FI guess you're right...\x02\x03",
"This isn't the time or place to be\x01",
"letting my personal feelings get\x01",
"the best of me.\x02",
)
)
CloseMessageWindow()
ChrTalk( #21
0x103,
(
"#020FFor the time being, let's get back\x01",
"to the guild and report our findings\x01",
"to Lugran.\x02\x03",
"We should be able to contact\x01",
"the Haken Gate if we use the\x01",
"orbal telephone.\x02",
)
)
CloseMessageWindow()
EventEnd(0x0)
Return()
# Function_3_15D end
def Function_4_FF8(): pass
label("Function_4_FF8")
EventBegin(0x0)
ClearChrFlags(0x9, 0x80)
ClearChrFlags(0x8, 0x80)
ChrTalk( #22
0x101,
(
"#004FHuh?!\x02\x03",
"Wh-What the heck?!\x02",
)
)
CloseMessageWindow()
ChrTalk( #23
0x102,
(
"#017FGreat... Now this was something\x01",
"I did not expect.\x02",
)
)
CloseMessageWindow()
ChrTalk( #24
0x103,
(
"#025FI wonder if we should be glad,\x01",
"since they've saved us the trouble of\x01",
"having to contact them...\x02",
)
)
CloseMessageWindow()
ChrTalk( #25
0x9,
(
"We have found a suspicious\x01",
"armed group!\x02",
)
)
CloseMessageWindow()
ChrTalk( #26
0x9,
"Put your hands in the air! All of you!\x02",
)
CloseMessageWindow()
ChrTalk( #27
0x9,
(
"What is this world coming to? A woman\x01",
"and two kids are the sky bandits...?\x01",
"Though the girl DOES look shifty...\x02",
)
)
CloseMessageWindow()
ChrTalk( #28
0x101,
(
"#009FH-Hey! I do not! And who are\x01",
"you calling sky bandits?!\x02\x03",
"Can't you see this shiny emblem\x01",
"on my chest?!\x02",
)
)
CloseMessageWindow()
NpcTalk( #29
0x8,
"Man's Voice",
"Hmph! The bracer emblem, huh...?\x02",
)
CloseMessageWindow()
NpcTalk( #30
0x8,
"Man's Voice",
(
"I hope you don't think for a moment\x01",
"something like that proves your\x01",
"innocence.\x02",
)
)
CloseMessageWindow()
ChrTalk( #31
0x101,
"#004FG-General Morgan?!\x02",
)
CloseMessageWindow()
ChrTalk( #32
0x102,
"#014FWhy are you here...?\x02",
)
CloseMessageWindow()
ChrTalk( #33
0x8,
(
"#160FAfter looking over the reports of my men, I\x01",
"found this place to have been insufficiently\x01",
"investigated, so I came to see for myself...\x02\x03",
"Who would have thought the lot of you\x01",
"were conspiring with the sky bandits?\x02",
)
)
CloseMessageWindow()
ChrTalk( #34
0x103,
(
"#022FMight I get you to stop with the\x01",
"accusations, General?\x02\x03",
"We happened to find this place\x01",
"one step ahead of your men.\x02",
)
)
CloseMessageWindow()
ChrTalk( #35
0x8,
(
"#160FIf that's the truth, then why don't\x01",
"you tell me where the sky bandits\x01",
"are?\x02\x03",
"Are the hostages inside that\x01",
"airliner?\x02",
)
)
CloseMessageWindow()
ChrTalk( #36
0x102,
(
"#012FWe almost had the sky bandits,\x01",
"but they managed to escape...\x02\x03",
"And there are no hostages to be\x01",
"found here.\x02",
)
)
CloseMessageWindow()
ChrTalk( #37
0x8,
(
"#160FHmph! It looks like the truth\x01",
"has come out...\x02\x03",
"Most likely, you notified the sky\x01",
"bandits to let them know we were\x01",
"coming!\x02",
)
)
CloseMessageWindow()
ChrTalk( #38
0x101,
(
"#005FW-Wait a minute here!\x01",
"How about you cut with the crap!\x02",
)
)
CloseMessageWindow()
ChrTalk( #39
0x8,
(
"#162FMy thoughts exactly!\x02\x03",
"All right, men!\x01",
"Take them into custody!\x02",
)
)
CloseMessageWindow()
OP_A2(0x3FB)
NewScene("ED6_DT01/T1410 ._SN", 100, 0, 0)
IdleLoop()
Return()
# Function_4_FF8 end
def Function_5_163D(): pass
label("Function_5_163D")
OP_77(0x0, 0x0, 0x0, 0x0, 0x0)
ClearMapFlags(0x1)
EventBegin(0x0)
OP_6D(-2670, 5000, -10370, 0)
OP_6C(315000, 0)
OP_6B(2400, 0)
SetChrFlags(0x101, 0x80)
SetChrFlags(0x102, 0x80)
SetChrPos(0x104, 0, 5000, -10200, 180)
SetChrPos(0x103, 0, 5000, -1650, 180)
FadeToBright(2000, 0)
OP_0D()
SetMessageWindowPos(72, 320, 56, 3)
SetChrName("Man's Voice")
AnonymousTalk( #40
(
"\x07\x00And that's about the gist of\x01",
"the sky bandit incident that\x01",
"occurred in northern Liberl...\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #41
"\x07\x00...\x02",
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #42
(
"\x07\x00And to think that the bankrupt\x01",
"Capua family drifted all the way\x01",
"down here.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #43
(
"\x07\x00You might be contacted by Liberl\x01",
"regarding the incident, so deal\x01",
"with it as you see fit.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #44
"\x07\x00...\x02",
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #45
(
"\x07\x00Yeah, it turns out I wasn't able to\x01",
"meet him in the end. It seems like\x01",
"something else must have come up.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #46
(
"\x07\x00Also, the connection with the sky bandit\x01",
"incident is still unknown, but it's clear\x01",
"that another power is at work here.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #47
"\x07\x00...\x02",
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #48
(
"\x07\x00No, it's not like that at all. I've also\x01",
"become acquainted with an interesting\x01",
"bunch.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #49
(
"\x07\x00The food's great, and there are babes\x01",
"everywhere. This is unquestionably\x01",
"my kind of country.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #50
(
"\x07\x00Maybe I'll just take up permanent\x01",
"residence here while I'm at it.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #51
"\x07\x00...\x02",
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #52
(
"\x07\x00All right, all right.\x01",
"There's no need to throw a fit.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #53
(
"\x07\x00Anyway, see what else you can find out.\x01",
"Just don't get caught looking into things\x01",
"by the chancellor.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #54
"\x07\x00I'll contact you again...my dear friend.\x02",
)
CloseMessageWindow()
OP_56(0x0)
OP_77(0xFF, 0xFF, 0xFF, 0x7D000, 0x0)
Sleep(1000)
ChrTalk( #55
0x104,
(
"#030FHa ha. I love messing with that guy.\x02\x03",
"He's just so stuffy and uptight\x01",
"that I can't help myself...\x02",
)
)
CloseMessageWindow()
NpcTalk( #56
0x103,
"Woman's Voice",
"A portable phone, huh...?\x02",
)
CloseMessageWindow()
NpcTalk( #57
0x103,
"Woman's Voice",
(
"Well, aren't you carrying around\x01",
"quite the nifty gadget?\x02",
)
)
CloseMessageWindow()
OP_62(0x104, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
TurnDirection(0x104, 0x103, 500)
def lambda_1BE2():
OP_6B(3000, 1000)
ExitThread()
QueueWorkItem(0x101, 1, lambda_1BE2)
OP_6D(-190, 5000, -6110, 1000)
Sleep(1000)
ChrTalk( #58
0x104,
"#030FSch-Schera...\x02",
)
CloseMessageWindow()
def lambda_1C20():
OP_6D(100, 5000, -9000, 2500)
ExitThread()
QueueWorkItem(0x101, 1, lambda_1C20)
OP_8E(0x103, 0x0, 0x1388, 0xFFFFE142, 0x7D0, 0x0)
ChrTalk( #59
0x103,
(
"#020FAnd the fact that you're carrying around\x01",
"an orbment that even the Zeiss Central\x01",
"Factory couldn't create, well...\x02\x03",
"How about you tell me who you really\x01",
"are?\x02",
)
)
CloseMessageWindow()
ChrTalk( #60
0x104,
(
"#030FCome on, Schera. Don't treat me like I'm\x01",
"some kind of stranger.\x02\x03",
"I'm Olivier Lenheim, the wandering bard\x01",
"and gifted musician you've come to adore.\x02\x03",
"But if you'd like to get to know me better,\x01",
"I'm sure we could arrange something...\x01",
"A little pillow talk, perhaps...?\x02",
)
)
CloseMessageWindow()
ChrTalk( #61
0x103,
(
"#020FHow about we skip the foreplay and go\x01",
"straight to the climax. Your cheap\x01",
"antics don't fool me, Olivier.\x02\x03",
"Or should I call you\x01",
"'Mr. Erebonian Operative'?\x02",
)
)
CloseMessageWindow()
ChrTalk( #62
0x104,
(
"#030F...\x02\x03",
"Heh. It looks like the title 'Silver Streak'\x01",
"isn't just for show.\x02\x03",
"So I guess you were pretending\x01",
"that you didn't notice in front\x01",
"of Estelle and Joshua, huh?\x02",
)
)
CloseMessageWindow()
ChrTalk( #63
0x103,
(
"#020FI don't want to worry those two\x01",
"any more than they already are.\x02\x03",
"So back to the subject at hand,\x01",
"why don't you start talking?\x02\x03",
"Who are you, and what are you\x01",
"doing in Liberl?\x02",
)
)
CloseMessageWindow()
ChrTalk( #64
0x104,
(
"#030FBefore that...I'm going to have to\x01",
"correct you on two points.\x02\x03",
"First off, these 'cheap antics', as\x01",
"you call them, are totally natural.\x02\x03",
"I'm not playacting or anything.\x01",
"That's just who I am.\x02",
)
)
CloseMessageWindow()
ChrTalk( #65
0x103,
(
"#020FOh, I'm sure.\x02\x03",
"So do you mean to tell me that you\x01",
"drank that wine without paying just\x01",
"because you felt like it?\x02\x03",
"And after that, being taken to the Haken\x01",
"Gate so you could gather information was\x01",
"all a part of the plan?\x02\x03",
"And you even set yourself up to run into\x01",
"us? I don't think so...\x02",
)
)
CloseMessageWindow()
ChrTalk( #66
0x104,
(
"#030FHeh... I'll leave that part\x01",
"up to your imagination.\x02\x03",
"The other thing I must correct you on\x01",
"is that this device is not an orbment.\x02\x03",
"It is an artifact which was unearthed\x01",
"in the Empire.\x02\x03",
"It can piggyback off any orbal communications\x01",
"system and its transmissions can be encrypted,\x01",
"so there's no worry about them being intercepted.\x02\x03",
"It comes in handy for a busy man such as myself.\x02",
)
)
CloseMessageWindow()
ChrTalk( #67
0x103,
(
"#020FAn artifact...like one of the sacred relics\x01",
"the Septian Church has stewardship over?\x02\x03",
"Now I'm all the more curious to\x01",
"know what you're after.\x02",
)
)
CloseMessageWindow()
ChrTalk( #68
0x104,
(
"#030FOh no, no, no, Schera.\x02\x03",
"You should never try to pry into\x01",
"the secrets of a mysterious beauty\x01",
"all at once.\x02",
)
)
CloseMessageWindow()
ChrTalk( #69
0x103,
(
"#020F...\x02\x03",
"How would you like to get to know a\x01",
"real woman? I'd be more than willing\x01",
"to show you with my whip.\x02",
)
)
CloseMessageWindow()
ChrTalk( #70
0x104,
(
"#030F...\x01",
"Schera...I don't see any humor\x01",
"in those eyes...\x02\x03",
"Well, jokes aside...\x02",
)
)
CloseMessageWindow()
ChrTalk( #71
0x103,
(
"#020FYou really should have just been\x01",
"straightforward from the beginning.\x02",
)
)
CloseMessageWindow()
ChrTalk( #72
0x104,
(
"#030FAs you have already figured out,\x01",
"my position is like that of an\x01",
"operative in the Empire.\x02\x03",
"But I have no intention of sabotaging\x01",
"anything or stealing classified\x01",
"information.\x02\x03",
"I merely came here to meet a certain\x01",
"someone.\x02",
)
)
CloseMessageWindow()
ChrTalk( #73
0x103,
"#020FA certain someone...?\x02",
)
CloseMessageWindow()
ChrTalk( #74
0x104,
(
"#030FYes, someone you know all too well.\x02\x03",
"The one lauded as the supreme swordsman\x01",
"and master strategist by the Royal Army.\x02\x03",
"The bracer with the special title belonging\x01",
"to but four people throughout the whole of\x01",
"the entire continent.\x02\x03",
"The Divine Blade--Cassius Bright\x01",
"is the one I seek.\x02",
)
)
CloseMessageWindow()
OP_A2(0x3FC)
NewScene("ED6_DT01/T1101 ._SN", 100, 0, 0)
IdleLoop()
Return()
# Function_5_163D end
SaveToFile()
Try(main)
| [
"[email protected]"
] | |
e3a20c33463c6737ce0a9c7ef85e374de481845f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /wsCshmu5zkN5BfeAC_11.py | 16ee4c11004690dd94154fad7dd29ce965bcbb66 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | """
Create a function that takes a number `n` and checks if each digit is
divisible by the digit on its left. Return a boolean array depending on the
condition checks.
### Examples
divisible_by_left(73312) ➞ [False, False, True, False, True]
# no element left to 7 = False
# 3/7 = False
# 3/3 = True
# 1/3 = False
# 2/1 = True
divisible_by_left(1) ➞ [False]
divisible_by_left(635) ➞ [False, False, False]
### Notes
The array should always start with `False` as there is no digit to the left of
the first digit.
"""
def divisible_by_left(n):
nums = list(map(int, str(n)))
return [False] + [
False if not i else (j / i).is_integer()
for i, j in zip(nums, nums[1:])
]
| [
"[email protected]"
] | |
7586f2806ece479ea1e2d474b53558d8c88144b2 | fdc3d2daf484e8b500368987930d85b833d43fd6 | /sandbox/python/spectrogram3.py | 07bb5de54e8d25c13ce1d5af9224dc0a0bb27ecc | [] | no_license | luiarthur/signal_processing | 9d61b368603b965ab526b9226a1114022e08463b | f6f00ce57b94bfa020ac494fcb4e83549d05c902 | refs/heads/master | 2021-01-01T19:47:45.076460 | 2017-08-12T15:50:18 | 2017-08-12T15:50:18 | 98,684,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,513 | py | import os
import numpy as np
from scipy.io import wavfile
from scipy import signal
import matplotlib.pyplot as plt
from notes import pitch, piano_freq, freq_dict, bin_spec
HOME = os.path.expanduser('~')
### Read a wavfile
(fs, x) = wavfile.read(HOME+"/wav/embraceableYou.wav")
if x.ndim > 1: x = x[:,1]
w_size = 4096
f, t, Zxx = signal.spectrogram(x, fs, nperseg=w_size, window=signal.get_window('blackman', Nx=w_size))
### Plot Spectrogram
### Spectrogram (traditional)
#Z = np.log(Zxx) - np.log(Zxx.max())
#plt.pcolormesh(t, f, Z, vmin=Z.min(), vmax=0, cmap=plt.cm.gist_heat)
### Spectrogram (peak frequency)
Z = np.exp( np.log(Zxx) - np.log(Zxx.max()) )
plt.pcolormesh(t, f, Z, vmin=.00001, vmax=.0005, cmap=plt.cm.gist_heat)
plt.title('STFT Magnitude')
plt.ylabel('Frequency [Hz]')
plt.ylim([0, 4200])
plt.xlabel('Time [sec]')
#plt.yticks(f, pitch(f+1E-6))
plt.show()
### Plot Spectrogram built-in
#Pxx, freqs, bins, im = plt.specgram(x, NFFT=w_size, Fs=fs, noverlap=100, cmap=plt.cm.gist_heat)
#plt.ylim([0, 4200])
#plt.show()
### Plot Spectrogram built-in (2)
#np.mean( np.exp(np.log(Pxx) - np.log(Pxx.max())) < .001 )
#plt.pcolormesh(bins, freqs, np.exp(np.log(Pxx) - np.log(Pxx.max())), cmap=plt.cm.gist_heat, vmin=.00001, vmax=.0001)
#plt.title('STFT Magnitude')
#plt.ylabel('Frequency [Hz]')
#plt.ylim([0, 4200])
#plt.xlabel('Time [sec]')
#plt.yticks(f, pitch(f))
#plt.show()
### Movie
from matplotlib.animation import FuncAnimation
#thresh = .0005
thresh = .5
fig, ax = plt.subplots()
ln, = plt.plot([], [], animated=True)
title = ax.text(.8, .95, '', transform = ax.transAxes, va='center')
#plt.xticks(np.log(piano_freq), pitch(piano_freq), rotation=90)
plt.xticks(np.log(f), pitch(f), rotation=90)
plt.axhline(y=thresh, color='grey')
def init():
#ax.set_ylim(0, 1.1)
#ax.set_ylim(0, .01)
#ax.set_ylim(0, 1.1)
ax.set_ylim(0, thresh*2)
ax.set_xlim(np.log(27.5), np.log(4186))
return [ln, title]
def update(i):
ydata = np.exp( np.log(Zxx[:,i]) - np.log(Zxx[:,i].max()) )
#ydata = np.exp( np.log(Zxx[:,i]) - np.log(Zxx.max()) )
#ydata = np.exp( np.log(Zxx[:,i]) - np.log(10000) )
#ydata = Zxx[:,i]
ln.set_data(np.log(f), ydata)
title.set_text("time: " + str(np.round(t[i],2)) + "s")
#print t[i], pitch(f[Zxx[:,i].argmax()])
return [title, ln]
delay = (t[1:] - t[:-1]).mean() * 1000
ani = FuncAnimation(fig, update, frames=range(t.size),
init_func=init, blit=True, repeat=False, interval=delay)
plt.show()
| [
"[email protected]"
] | |
4b9c499c4cf735c4bbb7e381d11f44e4a1d22ac8 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/byte/Schema+Instance/NISTXML-SV-IV-atomic-byte-maxExclusive-5-3.py | f378841d6d0ec8179638494024c1501e673c1b5e | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 259 | py | from output.models.nist_data.atomic.byte.schema_instance.nistschema_sv_iv_atomic_byte_max_exclusive_5_xsd.nistschema_sv_iv_atomic_byte_max_exclusive_5 import NistschemaSvIvAtomicByteMaxExclusive5
obj = NistschemaSvIvAtomicByteMaxExclusive5(
value=-73
)
| [
"[email protected]"
] | |
47a112ee16196e739b06cf29dc87bb9fe6694f87 | 18508cea9458b2879017b44e6f18520cd8cf4f6c | /UCMDBPython/src/eview_resources.py | ec0e87b018aafd0657874aaf533d7204eb5a8157 | [] | no_license | kvt11/dd-git | 7d4935962e06d835ad0023c4abb185876a5a9e77 | 49aafa7081b861c5f6d0e1753b425e78948116d0 | refs/heads/master | 2022-11-23T19:03:19.763423 | 2016-04-04T14:54:18 | 2016-04-04T14:54:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,215 | py | #
# Host Resource Discovery by Eview
#
# Created on Sep 20, 2010
#
# @author: kchhina
#
# CP8 - Intial version
# CP9 - Added Dasd storage discovery for CP9 March 31,2011 podom
# CP9 - Added FORMAT=SHORT and MAX=* to commands for defects to force the output of the commands podom
# CP10 - Changed script to support LONG format returned from Network commands on IPV6 enabled systems QCCR1H38586 podom
# CP10 - Fixed QCCR1H38525 - Duplicate Software CIs in Bulk
# CP10 - Fixed QCCR1H6397 - Empty Volume Group causes failure on Volume Group Discovery
# CP10 - Add Job Discovery
# CP10 - CUP 1 to fix urget issue with Netlinks module being depricated.
# CP12 - Discovery CPU types of ZIIP and ZAAP processor and set CPU type attribute
# CP15 - Change Jobs Discovery to not discover time sharing users. TSU type was incorrectly added as a Job.
# Cp15 - Change interface discovery to add LPAR name to Linkname. This will prevent duplicate interfaces if MAC not available. QCIM1H94721
import string, re, logger, modeling
import eview_lib
from appilog.common.system.types import ObjectStateHolder
from appilog.common.system.types.vectors import ObjectStateHolderVector
from eview_lib import isNotNull, isnumeric, isNull
from com.hp.ucmdb.discovery.library.common import CollectorsParameters
from string import upper, lower
from modeling import _CMDB_CLASS_MODEL
import eview_netutils
''' Variables '''
global Framework
PARAM_HOST_ID = 'hostId'
PARAM_LPAR_NAME = 'LparName'
global knownPortsConfigFile
TCP_PORT_TYPE_ENUM = 1
UDP_PORT_TYPE_ENUM = 2
_CMD_D_SYMBOLS = 'D SYMBOLS'
_CMD_D_M_CPU = 'D M=CPU'
_CMD_D_TCPIP = 'D TCPIP'
_CMD_TCPIP_NETSTAT_HOME = 'D TCPIP,%s,NETSTAT,HOME,FORMAT=LONG'
_CMD_D_SSI = 'D SSI'
_CMD_D_NET_MAJNODES = 'D NET,MAJNODES,MAX=*'
_CMD_D_ASM = 'D ASM'
_CMD_D_PROD_STATE = 'D PROD,STATE'
_CMD_D_PROD_REGISTERED = 'D PROD,REGISTERED'
_CMD_D_XCF_GRP = 'D XCF,GRP'
_CMD_D_XCF_GRP_ALL = 'D XCF,GRP,%s,ALL'
_CMD_D_TCPIP_NETSTAT_CONN = 'D TCPIP,%s,NETSTAT,CONN,FORMAT=LONG,MAX=*'
_CMD_D_TCPIP_NETSTAT_ROUTE = 'D TCPIP,%s,NETSTAT,ROUTE,FORMAT=LONG'
_CMD_D_TCPIP_NETSTAT_DEV = 'D TCPIP,%s,NETSTAT,DEV,FORMAT=LONG'
_CMD_D_TCPIP_NETSTAT_ARP = 'D TCPIP,%s,NETSTAT,ARP,FORMAT=LONG,MAX=*'
_CMD_I_DASD = '*'
# Classes
class DevLink:
devName = ''
devType = ''
linkName = ''
linkType = ''
linkStatus = ''
linkMac = ''
def __init__(self, devName, devType, linkName, linkType, linkStatus, linkMac):
self.devName = devName
self.devType = devType
self.linkName = linkName
self.linkType = linkType
self.linkStatus = linkStatus
self.linkMac = linkMac
''' Methods '''
def appendToList(originalList, newList):
tempList = []
if isNull(originalList):
originalList = tempList
for val in newList:
if isNotNull(val):
originalList.append(val)
return originalList
def appendToDictionary(originalDict, newDict):
dict = {}
if isNull(originalDict):
originalDict = dict
for (x, y) in newDict.items():
if isNotNull(y) and not originalDict.has_key(x):
originalDict[x] = y
return originalDict
def getCpuStatus(cpuStatusSymbol):
cpuStatus = ''
if isNotNull(cpuStatusSymbol):
#Spencer: CPU status could be a multi-character string
cpuStatusSymbol = cpuStatusSymbol[0]
if cpuStatusSymbol == '+':
cpuStatus = 'ONLINE'
elif cpuStatusSymbol == '-':
cpuStatus = 'OFFLINE'
elif cpuStatusSymbol == '.':
cpuStatus = 'DOES NOT EXIST'
elif cpuStatusSymbol == 'W':
cpuStatus = 'WLM-MANAGED'
elif cpuStatusSymbol == 'N':
cpuStatus = 'NOT AVAILABLE'
return cpuStatus
def processXcfGroups(xcfGroupsList):
list = []
for xcfGroupLists in xcfGroupsList:
for xcfGroup in xcfGroupLists:
# get the group name and the number of members ---------------------
match = re.match('(.*)\((\d*)\)', xcfGroup)
if match:
groupName = match.group(1)
memberCount = match.group(2)
list.append([groupName, memberCount])
return list
''' EView Command Execution Methods '''
def ev2_getSymlistOutput(ls):
# process SYMLIST ----------------------------------------------------------
symbolsMap = {} # {name:value}
output = ls.evMvsCmd(_CMD_D_SYMBOLS)
if output.isSuccess() and len(output.cmdResponseList) > 0:
symbolsList = output.getValuesFromLineList('s', output.cmdResponseList, '&', '\.\s+=\s+"', '"')
for symbols in symbolsList:
if len(symbols) == 4:
symbolName = symbols[1]
symbolValue = symbols[2]
if isNotNull(symbolName) and isNotNull(symbolValue):
symbolsMap[symbolName] = symbolValue
return symbolsMap
def ev3_getCpulistOutput(ls):
cpuLists = [] # [CPU ID, CPU STATUS, CPU SERIAL, CPU RAW STATUS]
cpcSi = ''
cpcName = ''
cpcId = ''
lpName = ''
lpId = ''
output = ls.evMvsCmd(_CMD_D_M_CPU)
if output.isSuccess() and len(output.cmdResponseList) > 0:
# first search for CPUs ------------------------------------------------
headerColumns = ['ID', 'CPU', 'SERIAL']
tableBeginPattern = 'PROCESSOR STATUS'
tableEndPattern = 'CPC ND ='
firstColumnPaddingChar = ''
includeRegexPattern = ''
ignorePatterns = []
cpuTable = output.getTableValues(output.cmdResponseList, headerColumns, tableBeginPattern, tableEndPattern, firstColumnPaddingChar, includeRegexPattern, ignorePatterns)
for i in range(1, len(cpuTable)):
# Spencer: Add the raw entry for the status to the cpuLists array
cpuLists.append([cpuTable[i][0], getCpuStatus(cpuTable[i][1]), cpuTable[i][2], cpuTable[i][1]])
# then search for CPC SI -----------------------------------------------
cpcSiList = output.getValuesFromLineList('s', output.cmdResponseList, 'CPC SI =')
if isNotNull(cpcSiList) and len(cpcSiList) > 0 and isNotNull(cpcSiList[0][1]):
cpcSi = cpcSiList[0][1]
# then search for CPC ID -----------------------------------------------
cpcIdList = output.getValuesFromLineList('s', output.cmdResponseList, 'CPC ID =')
if isNotNull(cpcIdList) and len(cpcIdList) > 0 and isNotNull(cpcIdList[0][1]):
cpcId = cpcIdList[0][1]
# then search for CPC Name ---------------------------------------------
cpcNameList = output.getValuesFromLineList('s', output.cmdResponseList, 'CPC NAME =')
if isNotNull(cpcNameList) and len(cpcNameList) > 0 and isNotNull(cpcNameList[0][1]):
cpcName = cpcNameList[0][1]
# finally search for LP NAME and LP ID ---------------------------------
lpList = output.getValuesFromLineList('s', output.cmdResponseList, 'LP NAME =', 'LP ID =')
if isNotNull(lpList) and len(lpList) > 0 and isNotNull(lpList[0][1]):
lpName = lpList[0][1]
if isNotNull(lpList) and len(lpList) > 0 and isNotNull(lpList[0][2]):
lpId = lpList[0][2]
return (cpuLists, cpcSi, cpcId, cpcName, lpId, lpName)
def ev4_getTcpStackNameOutput(ls):
# get the running TCP stacks -----------------------------------------
tcpStackList = []
output = ls.evMvsCmd(_CMD_D_TCPIP)
if output.isSuccess() and len(output.cmdResponseList) > 0:
headerColumns = ['COUNT', 'TCPIP NAME', 'VERSION', 'STATUS']
tableBeginPattern = 'EZAOP50I TCPIP STATUS REPORT'
tableEndPattern = 'END TCPIP STATUS REPORT'
firstColumnPaddingChar = ' '
includeRegexPattern = ''
ignorePatterns = ['------']
stacks = output.getTableValues(output.cmdResponseList, headerColumns, tableBeginPattern, tableEndPattern, firstColumnPaddingChar, includeRegexPattern, ignorePatterns)
for i in range(1, len(stacks)):
if len(stacks[i]) == 4 and isNotNull(stacks[i][1]):
tcpStackList.append(stacks[i][1])
return tcpStackList
def ev5_getHomelistOutput(ls, tcpStack):
# process HOMELIST ---------------------------------------------------------
homeLists = [] # [ADDRESS, LINK, FLG]
homelistentry = []
complete = 0
output = ls.evMvsCmd(_CMD_TCPIP_NETSTAT_HOME % tcpStack)
if output.isSuccess() and len(output.cmdResponseList) > 0:
for i in range(len(output.cmdResponseList)):
retVal = output.getValuesFromLine('s', output.cmdResponseList[i], 'LINKNAME:')
if len(retVal) > 0 and isNotNull(retVal[1]):
linkname = retVal[1]
complete = 1
continue
retVal = output.getValuesFromLine('s', output.cmdResponseList[i], 'ADDRESS:')
if len(retVal) > 0 and isNotNull(retVal[1]):
address = retVal[1]
if eview_netutils._isValidIp (address):
complete = 1
else:
address = None
continue
retVal = output.getValuesFromLine('s', output.cmdResponseList[i], 'FLAGS:')
if len(retVal) > 0 and isNotNull(retVal[1]):
flags = retVal[1]
complete = 1
else:
flags = ' '
if complete:
homelistentry = [address, linkname, flags]
homeLists.append (homelistentry)
complete = 0
return homeLists
def ev6_getSsilistOutput(ls):
# process SSILIST ----------------------------------------------------------
ssiList = [] # [Name, Dynamic, Status, Commands]
output = ls.evMvsCmd(_CMD_D_SSI)
if output.isSuccess() and len(output.cmdResponseList) > 0:
# first get the subsystem names from alternate lines -------------------
ssiListOutput = output.getRegexedValuesFromList(output.cmdResponseList, '^SUBSYS=(.*)$')
# then get the subsystem parameters from alternate lines ---------------
ssiParamList = output.getValuesFromLineList('s', output.cmdResponseList, 'DYNAMIC=', 'STATUS=', 'COMMANDS=')
if len(ssiListOutput) == len(ssiParamList): # TODO change this condition to something more air tight
for i in range(len(ssiListOutput)):
if isNotNull(ssiListOutput[i][0]):
ssiList.append([ssiListOutput[i][0], ssiParamList[i][1], ssiParamList[i][2], ssiParamList[i][3]])
return ssiList
def ev7_getMajorNodesOutput(ls):
# process MAJOR NODES ------------------------------------------------------
majorNodesLists = [] # [Name, Type, Status]
output = ls.evMvsCmd(_CMD_D_NET_MAJNODES)
if output.isSuccess() and len(output.cmdResponseList) > 0:
majNodeList = output.getValuesFromLineList('s', output.cmdResponseList, '\S+\s(\S+)', 'TYPE =', ',')
for majNodes in majNodeList:
if len(majNodes) == 5:
majorNodesLists.append([majNodes[1], majNodes[3], majNodes[4]])
return majorNodesLists
def ev8_getPagelistOutput(ls):
# process PAGE LIST --------------------------------------------------------
pageLists = [] # [Type, Used, Status, Device, DSN_Name]
output = ls.evMvsCmd(_CMD_D_ASM)
if output.isSuccess() and len(output.cmdResponseList) > 0:
pageLists = output.getRegexedValuesFromList(output.cmdResponseList, '^(\S+)\s+(\d+)%\s+(\S+)\s+(\S+)\s+(\S+)$')
return pageLists
def ev9_getListProdOutput(ls):
# process LISTPROD ---------------------------------------------------------
prodLists = [] # [ID, name, feature, version, owner, state]
headerColumns = ['S', 'OWNER', 'NAME', 'FEATURE', 'VERSION', 'ID']
tableBeginPattern = 'IFA111I'
tableEndPattern = ''
firstColumnPaddingChar = ''
includeRegexPattern = ''
ignorePatterns = []
output = ls.evMvsCmd(_CMD_D_PROD_STATE)
if output.isSuccess() and len(output.cmdResponseList) > 0:
prods = output.getTableValues(output.cmdResponseList, headerColumns, tableBeginPattern, tableEndPattern, firstColumnPaddingChar, includeRegexPattern, ignorePatterns)
for i in range(1, len(prods)):
if len(prods[i]) == 6:
if prods[i][0] != 'D':
prodLists.append([prods[i][5], prods[i][2], prods[i][3], prods[i][4], prods[i][1], 'STATE'])
output = ls.evMvsCmd(_CMD_D_PROD_REGISTERED)
if output.isSuccess() and len(output.cmdResponseList) > 0:
prods = output.getTableValues(output.cmdResponseList, headerColumns, tableBeginPattern, tableEndPattern, firstColumnPaddingChar, includeRegexPattern, ignorePatterns)
for i in range(1, len(prods)):
if len(prods[i]) == 6:
prodLists.append([prods[i][5], prods[i][2], prods[i][3], prods[i][4], prods[i][1], 'REGISTERED'])
return prodLists
def ev10_getXcfGroupOutput(ls):
groups = []
output = ls.evMvsCmd(_CMD_D_XCF_GRP)
if output.isSuccess() and len(output.cmdResponseList) > 0:
# get the groups from the first line -----------------------------------
xcfGroupsList = output.getRegexedValuesFromList(output.cmdResponseList, ".*\s(\S+\(\d+\))\s+(\S+\(\d+\))\s+(\S+\(\d+\))$")
groups.extend(processXcfGroups(xcfGroupsList))
# get the set of three groups ------------------------------------------
xcfGroupsList = output.getRegexedValuesFromList(output.cmdResponseList, "^(\S+\(\d+\))\s+(\S+\(\d+\))\s+(\S+\(\d+\))$")
groups.extend(processXcfGroups(xcfGroupsList))
# get the set of two groups --------------------------------------------
xcfGroupsList = output.getRegexedValuesFromList(output.cmdResponseList, "^(\S+\(\d+\))\s+(\S+\(\d+\))$")
groups.extend(processXcfGroups(xcfGroupsList))
# get the set of single group ------------------------------------------
xcfGroupsList = output.getRegexedValuesFromList(output.cmdResponseList, "^(\S+\(\d+\))$")
groups.extend(processXcfGroups(xcfGroupsList))
return groups
def ev11_getXcfMemberOutput(ls, groupName, xcfGroupsDict):
output = ls.evMvsCmd(_CMD_D_XCF_GRP_ALL % groupName)
if output.isSuccess() and len(output.cmdResponseList) > 0:
headerColumns = ['MEMBER NAME:', 'SYSTEM:', 'JOB ID:', 'STATUS:']
tableBeginPattern = 'INFORMATION FOR GROUP'
tableEndPattern = 'FOR GROUP'
firstColumnPaddingChar = ''
includeRegexPattern = ''
ignorePatterns = []
prods = output.getTableValues(output.cmdResponseList, headerColumns, tableBeginPattern, tableEndPattern, firstColumnPaddingChar, includeRegexPattern, ignorePatterns)
for i in range(1, len(prods)):
if len(prods[i]) == 4:
if xcfGroupsDict.has_key(groupName):
tempList = xcfGroupsDict[groupName]
tempList.append(prods[i])
xcfGroupsDict[groupName] = tempList
else:
xcfGroupsDict[groupName] = [prods[i]]
def ev12_getTcpConnOutput(ls, tcpProcName):
connections = []
connectionentry = []
output = ls.evMvsCmd(_CMD_D_TCPIP_NETSTAT_CONN % tcpProcName)
if output.isSuccess() and len(output.cmdResponseList) > 0:
#connectionentry = ['USER ID', 'CONN', 'LOCAL SOCKET', 'FOREIGN SOCKET', 'STATE']
for line in output.cmdResponseList:
if (re.search('EZD0101', line) or
re.search('USER ID', line)):
continue
m = re.search('LOCAL SOCKET:\s+(\S+)', line)
if (m):
localsocket = m.group(1)
continue
m = re.search('FOREIGN SOCKET:\s+(\S+)', line)
if (m):
foreignsocket = m.group(1)
connectionentry = [userid, conn,localsocket,foreignsocket,state]
connections.append (connectionentry)
continue
m = re.search('(\S+)\s+(\S+)\s+(\S+)', line)
if (m):
userid = m.group(1)
conn = m.group(2)
state = m.group(3)
return connections
def ev13_getTcpRouteOutput(ls, tcpProcName):
routes = []
output = ls.evMvsCmd(_CMD_D_TCPIP_NETSTAT_ROUTE % tcpProcName)
if output.isSuccess() and len(output.cmdResponseList) > 0:
headerColumns = ['DESTINATION', 'GATEWAY', 'FLAGS', 'REFCNT', 'INTERFACE']
tableBeginPattern = 'EZD0101I NETSTAT'
tableEndPattern = 'RECORDS DISPLAYED'
firstColumnPaddingChar = ''
includeRegexPattern = ''
ignorePatterns = ['IPV4']
routes = output.getTableValues(output.cmdResponseList, headerColumns, tableBeginPattern, tableEndPattern, firstColumnPaddingChar, includeRegexPattern, ignorePatterns)
#logger.debug ('Routes == ',routes)
return routes
def ev14_getTcpDevLinkOutput(ls, tcpProcName):
linkDevLinkDict = {} # {LINKNAME:DevLink Instance}
output = ls.evMvsCmd(_CMD_D_TCPIP_NETSTAT_DEV % tcpProcName)
if isNotNull(output) and output.isSuccess() and len(output.cmdResponseList) > 0:
for i in range(len(output.cmdResponseList)):
# get device names -------------------------------------------------
retVal = output.getValuesFromLine('s', output.cmdResponseList[i], 'DEVNAME:', 'DEVTYPE:')
if len(retVal) == 3:
# get link names -----------------------------------------------
j = i + 2
retVal1 = output.getValuesFromLine('s', output.cmdResponseList[j], 'LNKNAME:', 'LNKTYPE:', 'LNKSTATUS:')
if len(retVal1) == 4:
if isNotNull(retVal1[1]):
linkDevLinkDict[retVal1[1]] = DevLink(retVal[1], retVal[2], retVal1[1], retVal1[2], retVal1[3], '')
return linkDevLinkDict
def ev15_getArpCacheOutput(ls, tcpProcName):
ipMacDict = {} # {IP:[MAC, LINKNAME]}
output = ls.evMvsCmd(_CMD_D_TCPIP_NETSTAT_ARP % tcpProcName)
if isNotNull(output) and output.isSuccess() and len(output.cmdResponseList) > 0:
for i in range(len(output.cmdResponseList)):
retVal = output.getValuesFromLine('s', output.cmdResponseList[i], 'CACHE FOR ADDRESS')
if len(retVal) > 0 and isNotNull(retVal[1]):
j = i + 1 #MAC is on the next line
retVal1 = output.getValuesFromLine('s', output.cmdResponseList[j], 'INTERFACE:', 'ETHERNET:')
if len(retVal1) > 0 and isNotNull(retVal1[1]) and isNotNull(retVal1[2]):
ipMacDict[retVal[1]] = [retVal1[1], retVal1[2]]
return ipMacDict
''' OSHV Creation Methods '''
def osh_createSysplexOsh(lparOsh, symbolsMap):
str_name = 'name'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
_vector = ObjectStateHolderVector()
sysplexOsh = None
if symbolsMap.has_key('SYSPLEX'):
sysplexOsh = ObjectStateHolder('mainframe_sysplex')
sysplexOsh.setAttribute(str_name, symbolsMap['SYSPLEX'])
_vector.add(sysplexOsh)
str_membership = 'membership'
if _CMDB_CLASS_MODEL.version() < 9:
str_membership = 'member'
membershipOsh = modeling.createLinkOSH(str_membership, sysplexOsh, lparOsh)
_vector.add(lparOsh)
_vector.add(membershipOsh)
else:
logger.warn("No sysplex found")
return (_vector, sysplexOsh)
def osh_createMainframeCpcOsh(lparOsh, cpcSi, cpcId, cpcName, cpuLists):
str_name = 'name'
str_node_family = 'node_family'
str_discovered_model = 'discovered_model'
str_serial_number = 'serial_number'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
str_node_family = 'host_servertype'
str_discovered_model = 'host_model'
str_serial_number = 'host_serialnumber'
isComplete = 1
createMainframe = 0
_vector = ObjectStateHolderVector()
cpcOsh = ObjectStateHolder('mainframe') # Mainframe CPC
cpcOsh.setAttribute(str_name, cpcName) # CPC Name
cpcOsh.setBoolAttribute('host_iscomplete', isComplete)
cpcOsh.setAttribute('system_information', cpcSi) # CPC SI
if isNotNull(cpcSi):
cpcSiList = string.split(cpcSi, '.')
if len(cpcSiList) == 5:
cpcOsh.setAttribute(str_node_family, cpcSiList[0]) # CPC Type
cpcOsh.setAttribute(str_discovered_model, cpcSiList[1]) # CPC Model
if len(cpuLists) > 0:
if isNotNull(cpuLists[0][2]):
cpuSerial = cpuLists[0][2]
cpcSerial = cpcSiList[4]
if isNotNull(cpcSerial):
createMainframe = 1
cpcOsh.setAttribute(str_serial_number, cpcSerial) # CPC Serial
# set host_key as serial number ----------------------------
cpcOsh.setAttribute('host_key', cpcSerial)
if createMainframe:
str_membership = 'membership'
if _CMDB_CLASS_MODEL.version() < 9:
str_membership = 'member'
membershipOsh = modeling.createLinkOSH(str_membership, cpcOsh, lparOsh)
_vector.add(cpcOsh)
_vector.add(lparOsh)
_vector.add(membershipOsh)
return _vector
def osh_createCpuOsh(lparOsh, cpuLists):
_vector = ObjectStateHolderVector()
for cpu in cpuLists:
if isNotNull(cpu[0]) and isNotNull(cpu[1]) and cpu[1] == 'ONLINE':
cpuOsh = ObjectStateHolder('cpu')
if _CMDB_CLASS_MODEL.version() > 9:
cpuOsh.setAttribute('cpu_id', cpu[0])
cpuOsh.setAttribute('serial_number', cpu[2])
else:
cpuOsh.setAttribute('cpu_cid', cpu[0])
#Spencer: Add cpu type
cpu_type = ''
if (len(cpu[3]) >= 2):
if (cpu[3][1] == 'I'):
cpu_type = 'Ziip'
elif (cpu[3][1] == 'A'):
cpu_type = 'Zaap'
cpuOsh.setAttribute('cpu_type', cpu_type)
cpuOsh.setContainer(lparOsh)
_vector.add(cpuOsh)
return _vector
def osh_createIpOsh(lparOsh, homeLists):
_vector = ObjectStateHolderVector()
ipOshDict = {}
ipstoexclude = ['127.0.0.1']
if len(homeLists) > 0:
for home in homeLists:
if isNotNull(home[0]) and upper(home[0]) != 'ADDRESS' and home[0] not in ipstoexclude and eview_netutils._isValidIp(home[0]):
ipOsh = eview_netutils._buildIp(home[0])
containedOsh = modeling.createLinkOSH('contained', lparOsh, ipOsh)
_vector.add(lparOsh)
_vector.add(ipOsh)
_vector.add(containedOsh)
# add IP OSH to dictionary for later use -----------------------
ipOshDict[home[0]] = ipOsh
return (_vector, ipOshDict)
def osh_createSubsystemsOsh(lparOsh, ssiList):
str_name = 'name'
str_discovered_product_name = 'discovered_product_name'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
str_discovered_product_name = 'data_name' # duplicated on purpose
_vector = ObjectStateHolderVector()
if isNotNull(ssiList):
for ssi in ssiList:
if isNotNull(ssi[0]):
ssOsh = ObjectStateHolder('mainframe_subsystem')
ssOsh.setAttribute(str_name, ssi[0])
ssOsh.setAttribute(str_discovered_product_name, ssi[0])
# Is Dynamic ---------------------------------------------------
if isNotNull(ssi[1]) and upper(ssi[1]) == 'YES':
ssOsh.setBoolAttribute('is_dynamic', 1)
elif isNotNull(ssi[1]) and upper(ssi[1]) == 'NO':
ssOsh.setBoolAttribute('is_dynamic', 0)
# Is Active ----------------------------------------------------
if isNotNull(ssi[2]) and upper(ssi[2]) == 'ACTIVE':
ssOsh.setBoolAttribute('is_active', 1)
elif isNotNull(ssi[2]) and upper(ssi[2]) == 'INACTIVE':
ssOsh.setBoolAttribute('is_active', 0)
# Accepts commands ---------------------------------------------
if isNotNull(ssi[3]):
ssOsh.setAttribute('accepts_commands', ssi[3])
ssOsh.setContainer(lparOsh)
_vector.add(ssOsh)
return _vector
def osh_createMajorNodesOsh(lparOsh, majorNodesLists):
str_name = 'name'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
_vector = ObjectStateHolderVector()
if len(majorNodesLists) > 0:
for majNode in majorNodesLists:
if isNotNull(majNode[0]):
majOsh = ObjectStateHolder('mainframe_major_node')
majOsh.setAttribute(str_name, majNode[0])
if isNotNull(majNode[1]):
majOsh.setAttribute('type', majNode[1])
majOsh.setContainer(lparOsh)
_vector.add(majOsh)
return _vector
def osh_createPageOsh(lparOsh, pageLists):
str_name = 'name'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
_vector = ObjectStateHolderVector()
if len(pageLists) > 0:
for page in pageLists: # [Type, Used, Status, Device, DSN_Name]
if isNotNull(page[4]):
pageOsh = ObjectStateHolder('mainframe_page_dataset')
pageOsh.setAttribute(str_name, page[4]) # DSN Name
if isNotNull(page[0]):
pageOsh.setAttribute('type', page[0]) # Type
if isNotNull(page[1]) and isnumeric(page[1]):
pageOsh.setIntegerAttribute('used', int(page[1])) # Used
if isNotNull(page[2]):
pageOsh.setAttribute('status', page[2])
if isNotNull(page[3]):
pageOsh.setAttribute('device', page[3])
pageOsh.setContainer(lparOsh)
_vector.add(pageOsh)
return _vector
def osh_createSoftwareOsh(lparOsh, prodLists):
str_cit = 'installed_software'
str_name = 'name'
str_description = 'description'
str_version = 'version'
str_software_productid = 'software_productid'
str_discovered_vendor = 'discovered_vendor'
if _CMDB_CLASS_MODEL.version() < 9:
str_cit = 'software'
str_name = 'data_name'
str_description = 'data_description'
str_version = 'software_version'
str_software_productid = 'software_productid'
str_discovered_vendor = 'software_vendor'
_vector = ObjectStateHolderVector()
if len(prodLists) > 0:
for prod in prodLists: # [ID, name, feature, version, owner, registered]
swOsh = None
if isNotNull(prod[1]) and isNotNull(prod[2]):
swOsh = ObjectStateHolder(str_cit)
softwareName = ''
softwareDesc = ''
if upper(prod[1]) == upper(prod[2]):
swOsh.setAttribute(str_name, prod[1]) # Name
swOsh.setAttribute(str_description, prod[1]) # Name
else:
swOsh.setAttribute(str_name, '%s %s' % (prod[1], prod[2])) # Name Feature
swOsh.setAttribute(str_description, '%s %s' % (prod[1], prod[2])) # Name Feature
elif isNotNull(prod[2]):
swOsh = ObjectStateHolder(str_cit)
swOsh.setAttribute(str_name, prod[2]) # Feature
if isNotNull(swOsh):
if isNotNull(prod[3]) and prod[3] != '**.**.**' and prod[3] != '* .* .*':
swOsh.setAttribute(str_version, prod[3]) # Version
if isNotNull(prod[0]):
swOsh.setAttribute(str_software_productid, prod[0]) # Version
if isNotNull(prod[4]):
swOsh.setAttribute(str_discovered_vendor, prod[4]) # Owner
swOsh.setContainer(lparOsh)
_vector.add(swOsh)
return _vector
def getIpFromHomeList(homeLists, linkName = ''):
if isNotNull(homeLists) and len(homeLists) > 0:
firstAvailableIp = ''
for home in homeLists:
if isNotNull(home[0]) and upper(home[0]) != 'ADDRESS' and isNotNull(home[1]):
firstAvailableIp = home[0]
if isNotNull(linkName) and upper(home[1]) == upper(linkName):
return home[0]
elif isNull(linkName) and isNotNull(home[2]) and upper(home[2]) == 'P':
return home[0]
return firstAvailableIp
return ''
def getLinkFromHomeList(homeLists, ip):
if isNotNull(homeLists) and len(homeLists) > 0 and isNotNull(ip):
for home in homeLists:
if isNotNull(home[0]) and upper(home[0]) != 'ADDRESS' and isNotNull(home[1]) and home[0] == ip:
return home[1]
return ''
def osh_createDeviceAndLinkOsh(lparOsh, ipOshDict, lparName, linkDevLinkDict, ipMacDict, homeLists):
str_name = 'interface_name'
str_mac_address = 'mac_address'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
str_mac_address = 'interface_macaddr'
_vector = ObjectStateHolderVector()
for (linkName, j) in linkDevLinkDict.items():
# create interfaces ----------------------------------------------------
ifOsh = ObjectStateHolder('interface')
ifOsh.setAttribute(str_name, linkName)
ifOsh.setAttribute('data_note', j.linkType) ## ER: change attribute to link type
# default the mac address attribute to linkName-Lparname and update later if MAC found #CP15
ifOsh.setAttribute(str_mac_address, '#%s-%s' % (linkName, lparName)) # if MAC not found for set #linkName-Lparname as key #CP15
ifOsh.setContainer(lparOsh)
# link interfaces to IPs -----------------------------------------------
ipOsh = None
parentIp = getIpFromHomeList(homeLists, linkName)
if isNotNull(parentIp) and ipOshDict.has_key(parentIp):
ipOsh = ipOshDict[parentIp]
if isNotNull(ipMacDict) and ipMacDict.has_key(parentIp):
arpInfo = ipMacDict[parentIp]
if isNotNull(arpInfo) and len(arpInfo) == 2:
if isNotNull(arpInfo[0]) and upper(linkName) == upper(arpInfo[0]):
ifOsh.setAttribute(str_mac_address, arpInfo[1])
_vector.add(ifOsh)
if isNotNull(ipOsh):
parentLinkOsh = modeling.createLinkOSH('containment', ifOsh, ipOsh)
_vector.add(ipOsh)
_vector.add(parentLinkOsh)
# create devices (only for UCMDB 9.x) ----------------------------------
if _CMDB_CLASS_MODEL.version() >= 9:
devOsh = ObjectStateHolder('hardware_board')
devOsh.setAttribute('serial_number', j.devName) # serial number not available, use device name
devOsh.setAttribute('name', j.devName)
##devOsh.setAttribute('data_note', j.devType)
devOsh.setContainer(lparOsh)
_vector.add(devOsh)
return _vector
def _getIpPortFromSocket(socket, primaryIp):
ip = ''
port = ''
if isNotNull(socket):
socket = string.split(socket, "..")
if len(socket) == 2:
if isNotNull(socket[0]):
ip = socket[0]
if isNotNull(socket[1]):
port = socket[1]
if ip == '0.0.0.0': # use homelist primary IP
ip = primaryIp
if not eview_netutils._isValidIp (ip):
ip = None
return (ip, port)
def osh_createTcpConnectionsOsh(lparOsh, ipOshDict, connsList, knownPortsConfigFile, homeLists):
str_containment = 'containment'
if _CMDB_CLASS_MODEL.version() < 9:
str_containment = 'contained'
_vector = ObjectStateHolderVector()
ignoreLocalConnections = 0 ## ER: parameterize
primaryIp = getIpFromHomeList(homeLists)
for conn in connsList:
if upper(conn[0]) != 'USER ID':
id = conn[0]
localSocket = conn[2]
foreignSocket = conn[3]
state = conn[4]
srcAddr = ''
# split up the socket text into IP and port ------------------------
(dstAddr, dstPort) = _getIpPortFromSocket(localSocket, primaryIp)
if upper(state) == 'ESTBLSH':
(srcAddr, srcPort) = _getIpPortFromSocket(foreignSocket, primaryIp)
if ignoreLocalConnections and (srcAddr == dstAddr):
continue
if isNotNull(dstAddr) and eview_netutils._isValidIp(dstAddr):
# create destination (server) IP and Host --------------------------
dstIpOsh = eview_netutils._buildIp(dstAddr)
dstHostOsh = None
if isNotNull(lparOsh):
dstHostOsh = lparOsh
else:
dstHostOsh = modeling.createHostOSH(dstAddr)
dstContainedLinkOsh = modeling.createLinkOSH(str_containment, dstHostOsh, dstIpOsh)
_vector.add(dstIpOsh)
_vector.add(dstHostOsh)
_vector.add(dstContainedLinkOsh)
# create destination service address object ------------------------
portTypeEnum = TCP_PORT_TYPE_ENUM
portName = knownPortsConfigFile.getTcpPortName(int(dstPort))
if upper(state) == 'UDP':
portTypeEnum = UDP_PORT_TYPE_ENUM
portName = knownPortsConfigFile.getUdpPortName(int(dstPort))
if isNull(portName):
portName = dstPort
serverPortOsh = modeling.createServiceAddressOsh(dstHostOsh, dstAddr, int(dstPort), portTypeEnum, portName)
_vector.add(serverPortOsh)
if isNotNull(srcAddr) and eview_netutils._isValidIp(srcAddr):
# create source (client) IP and Host ---------------------------
srcIpOsh = eview_netutils._buildIp(srcAddr)
srcHostOsh = modeling.createHostOSH(srcAddr)
srcContainedLinkOsh = modeling.createLinkOSH(str_containment, srcHostOsh, srcIpOsh)
_vector.add(srcIpOsh)
_vector.add(srcHostOsh)
_vector.add(srcContainedLinkOsh)
# create client-server links -----------------------------------
_vector.add(_createClientServerLinkOsh(dstPort, serverPortOsh, portName, lower(state), srcIpOsh))
# create client server dependency links ------------------------
_vector.add(_createClientServerDependencyLinkOsh(dstHostOsh, dstPort, srcHostOsh, portName))
return _vector
def _createClientServerDependencyLinkOsh(serverHostOSH, serverPort, clientHostOsh, portName):
str_dependency = 'node_dependency'
if _CMDB_CLASS_MODEL.version() < 9:
str_dependency = 'dependency'
nodeDependencyLinkOsh = modeling.createLinkOSH(str_dependency, clientHostOsh, serverHostOSH)
nodeDependencyLinkOsh.setAttribute('dependency_name', serverPort)
nodeDependencyLinkOsh.setAttribute('dependency_source', portName)
return nodeDependencyLinkOsh
def _createClientServerLinkOsh(serverPort, serverPortOsh, portName, portType, clientIpOsh):
str_client_server = 'client_server'
str_name = 'name'
if _CMDB_CLASS_MODEL.version() < 9:
str_client_server = 'clientserver'
str_name = 'data_name'
csLinkOsh = modeling.createLinkOSH(str_client_server, clientIpOsh, serverPortOsh)
csLinkOsh.setStringAttribute('clientserver_protocol', portType)
csLinkOsh.setStringAttribute(str_name, portName)
csLinkOsh.setLongAttribute('clientserver_destport', int(serverPort))
return csLinkOsh
def osh_createXcfOsh(lparOsh, xcfGroupsDict, sysplexOsh, lparName):
str_name = 'name'
str_membership = 'membership'
str_containment = 'containment'
if _CMDB_CLASS_MODEL.version() < 9:
str_name = 'data_name'
str_membership = 'member'
str_containment = 'contained'
_vector = ObjectStateHolderVector()
if isNotNull(sysplexOsh):
if isNotNull(xcfGroupsDict):
for (groupName, membersList) in xcfGroupsDict.items():
# Create XCF Groups
xcfGroupOsh = ObjectStateHolder('mainframe_xcf_group')
xcfGroupOsh.setAttribute(str_name, groupName)
xcfGroupOsh.setContainer(sysplexOsh)
_vector.add(xcfGroupOsh)
# Make the LPAR member of XCF
if isNotNull(xcfGroupOsh):
memberLinkOsh = modeling.createLinkOSH(str_membership, xcfGroupOsh, lparOsh)
_vector.add(memberLinkOsh)
# Create XCF member for every group
if isNotNull(xcfGroupOsh) and isNotNull(membersList) and len(membersList) > 0:
for member in membersList:
if isNotNull(member[0]):
memberOsh = ObjectStateHolder('mainframe_xcf_member')
memberOsh.setAttribute(str_name, member[0])
memberOsh.setAttribute('job_id', member[2])
memberOsh.setAttribute('xcf_member_status', member[3])
memberOsh.setContainer(xcfGroupOsh)
_vector.add(memberOsh)
# If LPAR sysid matches member system name, create contained link
if isNotNull(lparName) and isNotNull(memberOsh) and string.upper(lparName) == string.upper(member[1]):
containedLinkOsh = modeling.createLinkOSH(str_containment, lparOsh, memberOsh)
_vector.add(containedLinkOsh)
else:
logger.debug('Not creating any XCF Groups since no sysplex discovered')
return _vector
# Process LPAR Network Resources
def processNetworkResources(ls, lparOsh, ipOshDict, lparName, sysplexOsh, knownPortsConfigFile, Framework):
_vector = ObjectStateHolderVector()
#===========================================================================
# Run commands and create OSHs
# XCF (Groups, Members), TCPIP NETSTAT (CONN, HOME, ROUTE)
#===========================================================================
# XCF Groups and Members Commands ------------------------------------------
xcfGroupsDict = {} # {groupName:[[memberName, memberSystem, jobId, status]]
xcfGroups = ev10_getXcfGroupOutput(ls)
for group in xcfGroups:
if isNotNull(group[0]):
ev11_getXcfMemberOutput(ls, group[0], xcfGroupsDict)
_vector.addAll(osh_createXcfOsh(lparOsh, xcfGroupsDict, sysplexOsh, lparName))
# TCPIP Stacks Command -----------------------------------------------------
createTcpUdp = Framework.getParameter('discover_TCP_UDP')
if isNotNull(createTcpUdp) and string.lower(createTcpUdp) == 'true':
createTcpUdp = 1
else:
createTcpUdp = 0
tcpStacksList = ev4_getTcpStackNameOutput(ls)
connsList = []
routeList = []
linkDevLinkDict = {}
ipMacDict = {}
homeLists = []
for tcpStack in tcpStacksList:
linkDevLinkDict = appendToDictionary(linkDevLinkDict, ev14_getTcpDevLinkOutput(ls, tcpStack)) # for TCP devices and interfaces (links)
ipMacDict = appendToDictionary(ipMacDict, ev15_getArpCacheOutput(ls, tcpStack)) # for TCP interfaces (links)
homeLists = appendToList(homeLists, ev5_getHomelistOutput(ls, tcpStack)) # for IP addresses and links
if createTcpUdp:
connsList = appendToList(connsList, ev12_getTcpConnOutput(ls, tcpStack)) # for TCP connections
routeList = appendToList(routeList, ev13_getTcpRouteOutput(ls, tcpStack)) # for TCP connections
_vector.addAll(osh_createDeviceAndLinkOsh(lparOsh, ipOshDict, lparName, linkDevLinkDict, ipMacDict, homeLists))
if createTcpUdp:
_vector.addAll(osh_createTcpConnectionsOsh(lparOsh, ipOshDict, connsList, knownPortsConfigFile, homeLists))
return _vector
####################################
## Create Jobs Objects ##
####################################
def createJobsOSH(joblist,lparOSH):
myVec = ObjectStateHolderVector()
for job in joblist:
jobOSH = ObjectStateHolder('mainframe_job')
jobOSH.setAttribute('name', job[0])
jobOSH.setAttribute('step_name',job[1])
jobOSH.setAttribute('proc_step',job[2])
jobOSH.setAttribute('job_id',job[3])
jobOSH.setAttribute('process_user',job[4])
jobOSH.setIntegerAttribute('current_storage', int(job[8]))
jobOSH.setAttribute('program_name',job[9])
jobid = job[3]
if re.findall("STC.*", jobid):
jobOSH.setAttribute('type', 'Started Task')
elif re.findall("JOB.*", jobid): #CP15
jobOSH.setAttribute('type', 'Job') #CP15
else:
continue #CP15
jobOSH.setContainer(lparOSH)
myVec.add(jobOSH)
return myVec
####################################
## Create DASD Volume object ##
####################################
def createDASDVolOSH(vollist,lparOSH):
dasdOSH = ObjectStateHolder('dasd3390')
dasdOSH.setAttribute('name', vollist[0])
dasdOSH.setIntegerAttribute('num_tracks', int(vollist[1]))
dasdOSH.setIntegerAttribute('tracks_per_cyl', int(vollist[2]))
dasdOSH.setIntegerAttribute('volume_free_extents', int(vollist[3]))
dasdOSH.setIntegerAttribute('volume_free_tracks', int(vollist[4]))
dasdOSH.setIntegerAttribute('largest_extent', int(vollist[5]))
dasdOSH.setIntegerAttribute('percent_used', int(vollist[6]))
dasdOSH.setContainer(lparOSH)
return dasdOSH
####################################
## Create DASD Storage Group ##
####################################
def createDASDSG(grouplist,lparOSH):
dasdOSH = ObjectStateHolder('volumegroup')
dasdOSH.setAttribute('name', grouplist[0])
dasdOSH.setContainer(lparOSH)
return dasdOSH
#############################################################
## Get the Indivual DASD Volumes and the Groups ##
#############################################################
def getvolumes(ls,lparOSH):
vector = ObjectStateHolderVector()
vollinelist = []
volDICT = {}
#
# First get the indivual DASD volumes for the Lpar
#
output = ls.evSysInfoCmd(_CMD_I_DASD,'01')
if output.isSuccess() and len(output.cmdResponseList) > 0:
lines = output.cmdResponseList
for line in lines:
vollinelist = line.split('|')
volDICT[vollinelist[0]] = vollinelist
vector.add(createDASDVolOSH(vollinelist,lparOSH))
return vector, volDICT
#############################################################
## Get the Storage Volumes in the Storage Groups ##
#############################################################
def getStorageVolumes(ls, lparOSH, vgOSH, sgname, volDICT):
vector = ObjectStateHolderVector()
volumelist = []
#
# First get the volumes for the storage group
#
output = ls.evSysInfoCmd(sgname,'12','evsgv')
if output.isSuccess() and len(output.cmdResponseList) > 0:
lines = output.cmdResponseList
for line in lines:
volumelist = line.split()
if (volumelist[0] in volDICT.keys()):
volOSH = createDASDVolOSH(volDICT[volumelist[0]],lparOSH)
vector.add(modeling.createLinkOSH('containment', vgOSH , volOSH))
return vector
#############################################################
## Get the Storage Groups ##
#############################################################
def getStorageGroups(ls, lparOSH):
vector = ObjectStateHolderVector()
grouplist = []
(volvector, volDICT) = getvolumes(ls,lparOSH)
vector.addAll(volvector)
#
# Get the Storage Groups
#
output = ls.evSysInfoCmd('','12','evsgl')
if output.isSuccess() and len(output.cmdResponseList) > 0:
lines = output.cmdResponseList
for line in lines:
grouplist = line.split()
#Skip the VIO group as it is not a real group
if grouplist[0] == 'VIO':
continue
#Verify we have a valid group, must be at least 10 entries to be valid
if len(grouplist) >= 10:
vgOSH = createDASDSG(grouplist,lparOSH)
vector.add (vgOSH)
vector.addAll(getStorageVolumes(ls, lparOSH, vgOSH, grouplist[0], volDICT))
return vector
#############################################################
# Discover the Dasd Storage connected to the Mainframe
#############################################################
def processDasd(ls,lparOSH,Framework):
_vector = ObjectStateHolderVector()
discoverDasd = Framework.getParameter('discover_DASD')
if isNotNull(discoverDasd) and string.lower(discoverDasd) == 'true':
discoverDasd = 1
else:
discoverDasd = 0
if discoverDasd:
_vector = getStorageGroups(ls, lparOSH)
return _vector
#############################################################
## Get the each Address Space (Jobs and Started Tasks) ##
#############################################################
def getjobs(ls,jobregex,lparOSH):
vector = ObjectStateHolderVector()
joblist = []
joblinelist = []
if jobregex == None:
jobregex = '*'
#
# First get the jobs and started tasks
#
output = ls.evSysInfoCmd(jobregex,'40')
if output.isSuccess() and len(output.cmdResponseList) > 0:
lines = output.cmdResponseList
for line in lines:
joblinelist = line.split('|')
joblist.append(joblinelist)
vector.addAll(createJobsOSH(joblist,lparOSH))
else:
logger.reportWarning('Jobs where not found on target system. Please, verify the regex expression parameter and rerun discovery.')
return vector
#############################################################
## Process the Host Resources ##
#############################################################
def processHostResources(ls, lparOsh, Framework):
_vector = ObjectStateHolderVector()
#===========================================================================
# Run commands and create OSHs
# SYMLIST, CPULIST, HOMELIST, SSILIST, MAJNODES, PAGELIST, LISTPROD
#===========================================================================
# Symbols ------------------------------------------------------------------
symbolsMap = ev2_getSymlistOutput(ls) # {symbolName:symbolValue}
# Create Sysplex OSH -------------------------------------------------------
(sysplexTopology, sysplexOsh) = osh_createSysplexOsh(lparOsh, symbolsMap)
_vector.addAll(sysplexTopology)
# CPU List Command ---------------------------------------------------------
(cpuLists, cpcSi, cpcId, cpcName, lpId, lpName) = ev3_getCpulistOutput(ls)
''' Create Mainframe CPC OSH '''
_vector.addAll(osh_createMainframeCpcOsh(lparOsh, cpcSi, cpcId, cpcName, cpuLists))
''' Create CPU OSH '''
createCpu = Framework.getParameter('discover_CPUs')
if isNotNull(createCpu) and string.lower(createCpu) == 'true':
_vector.addAll(osh_createCpuOsh(lparOsh, cpuLists))
''' TCPIP Stacks Command '''
tcpStacksList = ev4_getTcpStackNameOutput(ls)
# For every TCP stack run the TCPIP NETSTAT HOME ---------------------------
homeLists = []
for tcpStack in tcpStacksList:
homeLists = homeLists + ev5_getHomelistOutput(ls, tcpStack) # [ADDRESS, LINK, FLG]
# Create IP OSH ------------------------------------------------------------
(ipOshv, ipOshDict) = osh_createIpOsh(lparOsh, homeLists)
_vector.addAll(ipOshv)
createSubsystem = Framework.getParameter('discover_Subsystems')
if isNotNull(createSubsystem) and string.lower(createSubsystem) == 'true':
''' SSI Command '''
ssiList = ev6_getSsilistOutput(ls) # {Subsystem Name:[Dynamic, Status, Commands]}
''' Create Subsystem OSH '''
_vector.addAll(osh_createSubsystemsOsh(lparOsh, ssiList))
createNodes = Framework.getParameter('discover_MajorNodes')
if isNotNull(createNodes) and string.lower(createNodes) == 'true':
''' Major Nodes Command '''
majorNodesLists = ev7_getMajorNodesOutput(ls) # [Name, Type, Status]
''' Create Mainframe Major Nodes OSH '''
_vector.addAll(osh_createMajorNodesOsh(lparOsh, majorNodesLists))
createPageDatasets = Framework.getParameter('discover_PageDatasets')
if isNotNull(createPageDatasets) and string.lower(createPageDatasets) == 'true':
''' Page Lists Command '''
pageLists = ev8_getPagelistOutput(ls) # [Type, Used, Status, Device, DSN_Name]
''' Create Mainframe Page Dataset OSH '''
_vector.addAll(osh_createPageOsh(lparOsh, pageLists))
createSoftware = Framework.getParameter('discover_Software')
if isNotNull(createSoftware) and string.lower(createSoftware) == 'true':
''' Prod Lists Command '''
prodLists = ev9_getListProdOutput(ls) # [ID, name, feature, version, owner, state]
''' Create Mainframe Software OSH '''
_vector.addAll(osh_createSoftwareOsh(lparOsh, prodLists))
createJobs = Framework.getParameter('discover_Jobs')
if isNotNull(createJobs) and string.lower(createJobs) == 'true':
jobregex = Framework.getParameter('job_Regex')
if isNotNull(jobregex):
_vector.addAll(getjobs(ls,jobregex,lparOsh))
else:
logger.reportWarning('Regex Parameter invalid. Please, verify the Regex expression parameter and rerun discovery.')
return _vector, ipOshDict, sysplexOsh
#######
# MAIN
#######
def DiscoveryMain(Framework):
OSHVResult = ObjectStateHolderVector()
knownPortsConfigFile = Framework.getConfigFile(CollectorsParameters.KEY_COLLECTORS_SERVERDATA_PORTNUMBERTOPORTNAME)
# create LPAR node
lparName = Framework.getDestinationAttribute(PARAM_LPAR_NAME)
hostId = Framework.getDestinationAttribute(PARAM_HOST_ID)
lparOsh = None
if eview_lib.isNotNull(hostId):
lparOsh = modeling.createOshByCmdbIdString('host_node', hostId)
ls = eview_lib.EvShell(Framework)
(hostResourcesOshv, ipOshDict, sysplexOsh) = processHostResources(ls, lparOsh, Framework)
OSHVResult.addAll(hostResourcesOshv)
(networkResourcesOshv) = processNetworkResources(ls, lparOsh, ipOshDict, lparName, sysplexOsh, knownPortsConfigFile, Framework)
OSHVResult.addAll(networkResourcesOshv)
OSHVResult.addAll(processDasd(ls,lparOsh,Framework))
ls.closeClient()
return OSHVResult | [
"[email protected]"
] | |
90a26d93ea05d64db95e9ed53c7fe2fcd4b30d8a | 56591823019e0ac1d857f97a1b8c85e9d85a8385 | /Scopuli/Interfaces/WEB/Jinja/Filters.py | d2702eaf522afa636d5c239edcaee4604161951d | [
"Apache-2.0"
] | permissive | MaxOnNet/scopuli-core-web | 3c19e312ec5688034295ac86a7a56fe2b2cf7915 | 66a2c31b36d7fc05be36ba5d5b141644459b4aba | refs/heads/master | 2020-03-23T19:49:56.383093 | 2018-08-30T13:44:31 | 2018-08-30T13:44:31 | 142,004,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright [2018] Tatarnikov Viktor [[email protected]]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" """
import phonenumbers
def _decode_text(value):
"""
Decode a text-like value for display.
Unicode values are returned unchanged. Byte strings will be decoded
with a text-safe replacement for unrecognized characters.
"""
if isinstance(value, bytes):
return value.decode('ascii', 'replace')
else:
return value
def filter_markdown(value):
from flask import Markup
from markdown import markdown
return Markup(markdown(value))
def filter_printable(value):
try:
return _decode_text(repr(value))
except Exception as e:
return '<repr(%s) raised %s: %s>' % (
object.__repr__(value), type(e).__name__, e)
def filter_shuffle(seq):
import random
try:
result = list(seq)
random.shuffle(result)
return result
except:
return seq
def filter_phonenumber(value, country='RU', format=phonenumbers.PhoneNumberFormat.INTERNATIONAL):
try:
parsed = phonenumbers.parse(value, country)
return phonenumbers.format_number(parsed, format)
except phonenumbers.NumberParseException as e:
return value
def filter_money(value):
return "{money:0,.2f} р.".format(money=value) | [
"[email protected]"
] | |
8903d69cc21adc5ab2090880649a80026486b7cd | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba0977.pngMap.py | 1cd8f233a130576370bea74bec2c2353600f8e4f | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba0977.pngMap = [
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111100001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000011110100000000000010111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000011000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000001111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000001111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000011111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000011111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000011111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000011111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110111000000000000000000000000000000000000000011111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000011011111111011111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000011111111101111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000011111111111111',
'11111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000001111111111111111',
'11111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000011111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000111111111111111111',
'11111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000001111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110111111110000000000000000000000000000000111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110001111100000000000000000000000001000000011111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000001011111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111100010000000000000000000000000000000000000000000111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000100011111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000010011111111111111111111111111',
'11111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000011111111111111111111111111',
'11111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000011111111111111111111111111',
'11111111111111111111111111111111111111111111010000000000000000000000000000000000000000000000000000000000111111111111111111111111',
'11111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000111111111111111111111111',
'11111111111111111111111111111111111111111100000000000000000000000000000000000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111',
'11111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111',
'11111111111111111111111111111111111100000000000000000000000000000000001000000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111100000000000000000000000000000000001000000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111100000000000000000000000000000000110000000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111100000000000000000000000000000000110000000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111100010001011001111011111101101111111100000000000000000000000000000000000000001111111111111111',
'11111111111111111111111111111111111100000000111011110011111100001111111110000000000000000000000000000000010001000111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000101111111111',
'11111111111111111111111111111111111111111111111111111111111111111111001111000000000000000000000000000000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111000000000100000000000000000000000000000000000000000000000111111111',
'11111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000001111111111',
'11111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000011111111',
'11111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000011111111',
'11111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000001111111111111',
'11111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000111111111111',
'11111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000111111111111',
'11111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000000000000010111111111111',
'11111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000010010100111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000000000101001111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000101111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111110001000000000000000000000000000000000111111111111111111111111',
]
| [
"[email protected]"
] | |
ee1291fd0e95c7b23cc8c9d9423999e621f6112c | e1eaed6dde62fc54eb317d28dbd18e0740e3e8f3 | /official/vision/beta/evaluation/segmentation_metrics.py | ae1131dd227009686ac52ccbdfb66c8051ba2da9 | [
"Apache-2.0"
] | permissive | nlpming/models | cf5008d2e66d2b66b6d61423e214f2f9f9fbe472 | 3cbf0748529d787dd09fa3ed031e557f0ddfa268 | refs/heads/master | 2021-12-03T03:29:16.042489 | 2021-11-23T14:09:10 | 2021-11-23T14:09:10 | 206,007,973 | 0 | 0 | Apache-2.0 | 2019-09-03T06:47:46 | 2019-09-03T06:47:46 | null | UTF-8 | Python | false | false | 9,914 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics for segmentation."""
import tensorflow as tf
from official.vision.beta.evaluation import iou
class MeanIoU(tf.keras.metrics.MeanIoU):
"""Mean IoU metric for semantic segmentation.
This class utilizes tf.keras.metrics.MeanIoU to perform batched mean iou when
both input images and groundtruth masks are resized to the same size
(rescale_predictions=False). It also computes mean iou on groundtruth original
sizes, in which case, each prediction is rescaled back to the original image
size.
"""
def __init__(
self, num_classes, rescale_predictions=False, name=None, dtype=None):
"""Constructs Segmentation evaluator class.
Args:
num_classes: `int`, number of classes.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, y_true['image_info'] is used to rescale
predictions.
name: `str`, name of the metric instance..
dtype: data type of the metric result.
"""
self._rescale_predictions = rescale_predictions
super().__init__(num_classes=num_classes, name=name, dtype=dtype)
def update_state(self, y_true, y_pred):
"""Updates metric state.
Args:
y_true: `dict`, dictionary with the following name, and key values.
- masks: [batch, width, height, 1], groundtruth masks.
- valid_masks: [batch, width, height, 1], valid elements in the mask.
- image_info: [batch, 4, 2], a tensor that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale]
is the scaling factor, which is the ratio of scaled dimension /
original dimension.
y_pred: Tensor [batch, width_p, height_p, num_classes], predicated masks.
"""
predictions = y_pred
masks = y_true['masks']
valid_masks = y_true['valid_masks']
images_info = y_true['image_info']
if isinstance(predictions, tuple) or isinstance(predictions, list):
predictions = tf.concat(predictions, axis=0)
masks = tf.concat(masks, axis=0)
valid_masks = tf.concat(valid_masks, axis=0)
images_info = tf.concat(images_info, axis=0)
# Ignore mask elements is set to zero for argmax op.
masks = tf.where(valid_masks, masks, tf.zeros_like(masks))
if self._rescale_predictions:
# This part can only run on cpu/gpu due to dynamic image resizing.
for i in range(tf.shape(predictions)[0]):
mask = masks[i]
valid_mask = valid_masks[i]
predicted_mask = predictions[i]
image_info = images_info[i]
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
predicted_mask = tf.image.resize(
predicted_mask,
rescale_size,
method=tf.image.ResizeMethod.BILINEAR)
predicted_mask = tf.image.crop_to_bounding_box(predicted_mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.crop_to_bounding_box(mask, 0, 0, image_shape[0],
image_shape[1])
valid_mask = tf.image.crop_to_bounding_box(valid_mask, 0, 0,
image_shape[0],
image_shape[1])
predicted_mask = tf.argmax(predicted_mask, axis=2)
flatten_predictions = tf.reshape(predicted_mask, shape=[1, -1])
flatten_masks = tf.reshape(mask, shape=[1, -1])
flatten_valid_masks = tf.reshape(valid_mask, shape=[1, -1])
super(MeanIoU, self).update_state(
flatten_masks, flatten_predictions,
tf.cast(flatten_valid_masks, tf.float32))
else:
predictions = tf.image.resize(
predictions,
tf.shape(masks)[1:3],
method=tf.image.ResizeMethod.BILINEAR)
predictions = tf.argmax(predictions, axis=3)
flatten_predictions = tf.reshape(predictions, shape=[-1])
flatten_masks = tf.reshape(masks, shape=[-1])
flatten_valid_masks = tf.reshape(valid_masks, shape=[-1])
super().update_state(flatten_masks, flatten_predictions,
tf.cast(flatten_valid_masks, tf.float32))
class PerClassIoU(iou.PerClassIoU):
"""Per Class IoU metric for semantic segmentation.
This class utilizes iou.PerClassIoU to perform batched per class
iou when both input images and groundtruth masks are resized to the same size
(rescale_predictions=False). It also computes per class iou on groundtruth
original sizes, in which case, each prediction is rescaled back to the
original image size.
"""
def __init__(
self, num_classes, rescale_predictions=False, name=None, dtype=None):
"""Constructs Segmentation evaluator class.
Args:
num_classes: `int`, number of classes.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, y_true['image_info'] is used to rescale
predictions.
name: `str`, name of the metric instance..
dtype: data type of the metric result.
"""
self._rescale_predictions = rescale_predictions
super().__init__(num_classes=num_classes, name=name, dtype=dtype)
def update_state(self, y_true, y_pred):
"""Updates metric state.
Args:
y_true: `dict`, dictionary with the following name, and key values.
- masks: [batch, width, height, 1], groundtruth masks.
- valid_masks: [batch, width, height, 1], valid elements in the mask.
- image_info: [batch, 4, 2], a tensor that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale]
is the scaling factor, which is the ratio of scaled dimension /
original dimension.
y_pred: Tensor [batch, width_p, height_p, num_classes], predicated masks.
"""
predictions = y_pred
masks = y_true['masks']
valid_masks = y_true['valid_masks']
images_info = y_true['image_info']
if isinstance(predictions, tuple) or isinstance(predictions, list):
predictions = tf.concat(predictions, axis=0)
masks = tf.concat(masks, axis=0)
valid_masks = tf.concat(valid_masks, axis=0)
images_info = tf.concat(images_info, axis=0)
# Ignore mask elements is set to zero for argmax op.
masks = tf.where(valid_masks, masks, tf.zeros_like(masks))
if self._rescale_predictions:
# This part can only run on cpu/gpu due to dynamic image resizing.
for i in range(tf.shape(predictions)[0]):
mask = masks[i]
valid_mask = valid_masks[i]
predicted_mask = predictions[i]
image_info = images_info[i]
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
predicted_mask = tf.image.resize(
predicted_mask,
rescale_size,
method=tf.image.ResizeMethod.BILINEAR)
predicted_mask = tf.image.crop_to_bounding_box(predicted_mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.crop_to_bounding_box(mask, 0, 0, image_shape[0],
image_shape[1])
valid_mask = tf.image.crop_to_bounding_box(valid_mask, 0, 0,
image_shape[0],
image_shape[1])
predicted_mask = tf.argmax(predicted_mask, axis=2)
flatten_predictions = tf.reshape(predicted_mask, shape=[1, -1])
flatten_masks = tf.reshape(mask, shape=[1, -1])
flatten_valid_masks = tf.reshape(valid_mask, shape=[1, -1])
super().update_state(flatten_masks, flatten_predictions,
tf.cast(flatten_valid_masks, tf.float32))
else:
predictions = tf.image.resize(
predictions,
tf.shape(masks)[1:3],
method=tf.image.ResizeMethod.BILINEAR)
predictions = tf.argmax(predictions, axis=3)
flatten_predictions = tf.reshape(predictions, shape=[-1])
flatten_masks = tf.reshape(masks, shape=[-1])
flatten_valid_masks = tf.reshape(valid_masks, shape=[-1])
super().update_state(flatten_masks, flatten_predictions,
tf.cast(flatten_valid_masks, tf.float32))
| [
"[email protected]"
] | |
a3d679949562466f4ce55d64546316cf11b470e1 | 1b5404b8099de74d4e39e0a41b1d04c61defa8d4 | /Лабиринт/dump/labyrinth_find_solution.py | 6284287ae0344286006f098090bcd1a1b2c5c773 | [] | no_license | ipeterov/random-stuff | 5d07bdcfdcb145d06ed89095f2ad34b70ff0f0bd | dbb38d42331f636919fd149b23783e02ee2c9afb | refs/heads/master | 2023-05-14T00:41:51.122251 | 2023-05-04T12:10:26 | 2023-05-04T12:10:26 | 206,028,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,776 | py | import pickle
def refactored_labyrinth(labyrinth):
# 0 - верх, 1 право, 2 - лево, 3 - низ
refactored_labyrinth = []
for y in range(len(labyrinth)):
refactored_labyrinth.append([])
for x in range(len(labyrinth[0])):
refactored_labyrinth[y].append([0,0,0,0])
for y in range(len(labyrinth)):
for x in range(len(labyrinth[0])):
if labyrinth[y-1][x]['d'] == 1 or y == 0:
refactored_labyrinth[y][x][0] = 1
if labyrinth[y][x]['r'] == 1 or x == len(labyrinth[0]) - 1:
refactored_labyrinth[y][x][1] = 1
if labyrinth[y][x]['d'] == 1 or y == len(labyrinth) - 1:
refactored_labyrinth[y][x][2] = 1
if labyrinth[y][x-1]['r'] == 1 or x == 0:
refactored_labyrinth[y][x][3] = 1
return refactored_labyrinth
def find_path(labyrinth, start_coords = [0,0]):
def move(current_coords, forbidden_move):
if current_coords == goal_coords:
#~ print('aaaaaaaa')
for element in path:
gpath.append(element)
exit
path.append(current_coords)
dead_end = False
print(current_coords)
y = current_coords[0]
x = current_coords[1]
while not dead_end:
for i in range(4):
if labyrinth[y][x][i] != 1 and i != forbidden_move:
if i == 0:
move([y-1,x], 2)
elif i == 1:
move([y,x+1], 3)
elif i == 2:
move([y+1,x], 0)
elif i == 3:
move([y,x-1], 1)
i = 5
if i != 5:
dead_end = True
try:
labyrinth[y + 1][x][0] = 1
except:
pass
try:
labyrinth[y][x - 1][1] = 1
except:
pass
try:
labyrinth[y - 1][x][2] = 1
except:
pass
try:
labyrinth[y][x + 1][3] = 1
except:
pass
path.pop()
#~ print(labyrinth)
labyrinth = refactored_labyrinth(labyrinth)
#~ print(labyrinth)
goal_coords = [99, 99]
gpath = []
path = []
goal_reached = False
move(start_coords, -1)
if len(gpath) == 0:
print('fuckfuckfuck')
return None
gpath.append(goal_coords)
return gpath
name = 'labyrinth_backtrack'
labyrinth = pickle.load(open(name, 'rb'))
path = find_path(labyrinth)
pickle.dump(path, open('labyrinth_solution','wb'))
| [
"[email protected]"
] | |
73db1141a062dab86543dba3571ab8faea784fdc | 4a5562bf8a7967c9c5d76265d89c366165bff9f8 | /template_python/setup.py | a4bf90beaf8b2625aaf3603393c5c536b60eec9a | [] | no_license | lokendert/me132_students | 640f935bd6e5c5d65329a161731afad4068a72e0 | 8e1075c4b61bef5c8f4d322cb168e2f942e1fad6 | refs/heads/master | 2020-03-31T03:40:46.680398 | 2011-02-04T20:13:29 | 2011-02-04T20:13:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | from setuptools import setup, find_packages
setup(
name='me132_template',
author="The ME132 TAs",
author_email="[email protected]",
url='www.its.caltech.edu/~me132',
description="A minimal Player client",
version="0.1",
package_dir={'':'src'},
packages=find_packages(),
entry_points={
'console_scripts': [
# List executables in the format '<name> = <module>:<function>'
'my_python_client = me132_template.basic_client:main'
]
}
)
| [
"[email protected]"
] | |
bf3f86be25ab7a8ffe01b3fea5ec5bc1ae6b5c2b | a4a63eedacd544872fbfa33fc58d7cf1558829b7 | /backend/event/api/v1/urls.py | 046246934dbd8d54f00c2d7d0a6bb4781e87498b | [] | no_license | crowdbotics-apps/revil-18107 | 3d9bd52855e33debaa60f4f5c801629fb1aa60da | 2671f3410b43cd8ed2ccc51780a80366fb594684 | refs/heads/master | 2022-10-17T09:34:39.097853 | 2020-06-15T00:05:02 | 2020-06-15T00:05:02 | 272,301,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import (
VendorViewSet,
LocationViewSet,
FavoritesViewSet,
VendorDetailViewSet,
CategoryViewSet,
FaqViewSet,
PresenterViewSet,
ScheduleViewSet,
MyScheduleViewSet,
SponsorViewSet,
)
router = DefaultRouter()
router.register("faq", FaqViewSet)
router.register("vendordetail", VendorDetailViewSet)
router.register("location", LocationViewSet)
router.register("presenter", PresenterViewSet)
router.register("myschedule", MyScheduleViewSet)
router.register("schedule", ScheduleViewSet)
router.register("category", CategoryViewSet)
router.register("favorites", FavoritesViewSet)
router.register("vendor", VendorViewSet)
router.register("sponsor", SponsorViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| [
"[email protected]"
] | |
5896418942efd005a46d1c7d2e74df68364411c9 | 9ede3bec6dc9cd58f91ba3ee2b3f4b7eb3b6c889 | /lintreview/repo.py | aa745a9b199595da98ab54ef33439fa29c5edb40 | [
"MIT"
] | permissive | LyleH/lint_review_1 | d0816e68ee74c507357471b1183348b2c8d59af2 | a36945446745a9e8d8c1f1955e084add6563647b | refs/heads/master | 2021-01-19T11:43:42.780988 | 2016-09-22T05:28:23 | 2016-09-22T05:28:23 | 68,887,536 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,101 | py | import lintreview.github as github
import lintreview.git as git
import logging
log = logging.getLogger(__name__)
class GithubRepository(object):
"""Abstracting wrapper for the
various interactions we have with github.
This will make swapping in other hosting systems
a tiny bit easier in the future.
"""
def __init__(self, config, user, repo_name):
self.config = config
self.user = user
self.repo_name = repo_name
def repository(self):
"""Get the underlying repository model
"""
self.repo = github.get_repository(
self.config,
self.user,
self.repo_name)
return self.repo
def pull_request(self, number):
"""Get a pull request by number.
"""
pull = self.repository().pull_request(number)
return GithubPullRequest(pull)
def ensure_label(self, label):
"""Create label if it doesn't exist yet
"""
repo = self.repository()
if not repo.label(label):
repo.create_label(
name=label,
color="bfe5bf", # a nice light green
)
def create_status(self, sha, state, description):
"""Create a commit status
"""
context = self.config.get('APP_NAME', 'lintreview')
repo = self.repository()
repo.create_status(
sha,
state,
None,
description,
context)
class GithubPullRequest(object):
"""Abstract the underlying github models.
This makes other code simpler, and enables
the ability to add other hosting services later.
"""
def __init__(self, pull_request):
self.pull = pull_request
@property
def number(self):
return self.pull.number
@property
def is_private(self):
data = self.pull.as_dict()
return data['head']['repo']['private']
@property
def head(self):
data = self.pull.as_dict()
return data['head']['sha']
@property
def clone_url(self):
data = self.pull.as_dict()
return data['head']['repo']['clone_url']
@property
def target_branch(self):
data = self.pull.as_dict()
return data['base']['ref']
def commits(self):
return self.pull.commits()
def review_comments(self):
return self.pull.review_comments()
def files(self):
return list(self.pull.files())
def remove_label(self, label_name):
issue = self.pull.issue()
labels = issue.labels()
if not any(label_name == label.name for label in labels):
return
log.debug("Removing issue label '%s'", label_name)
issue.remove_label(label_name)
def add_label(self, label_name):
issue = self.pull.issue()
issue.add_labels(label_name)
def create_comment(self, body):
self.pull.create_comment(body)
def create_review_comment(self, body, commit_id, path, position):
self.pull.create_review_comment(body, commit_id, path, position)
| [
"[email protected]"
] | |
2ad8fb86568b9db89c98af5b07780a905127da55 | 0675dad295526480242c9da48310a1c958423e72 | /dmrg_j2j2/build_lattice.py | 0d410148abf8f05a76145d0c57c8cbc956ac5397 | [] | no_license | GiggleLiu/numeric_master | 627e054ab7404b1bbf8b8eec65f05346b35640a3 | 47b9eaf1eeaceacf5ff43f2226620e5c37064095 | refs/heads/master | 2021-08-24T11:31:37.107583 | 2017-11-21T06:26:38 | 2017-11-21T06:26:38 | 111,409,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,766 | py | #!/usr/bin/env python
test_str = '''
<LATTICES>
<GRAPH name = "heisenberg" dimension="1" vertices="6" edges="5">
<VERTEX id="1" type="0"><COORDINATE>0</COORDINATE></VERTEX>
<VERTEX id="2" type="1"><COORDINATE>2</COORDINATE></VERTEX>
<VERTEX id="3" type="1"><COORDINATE>3</COORDINATE></VERTEX>
<VERTEX id="4" type="1"><COORDINATE>4</COORDINATE></VERTEX>
<VERTEX id="5" type="1"><COORDINATE>5</COORDINATE></VERTEX>
<VERTEX id="6" type="0"><COORDINATE>6</COORDINATE></VERTEX>
<EDGE source="1" target="2" id="1" type="0" vector="1"/>
<EDGE source="2" target="3" id="2" type="0" vector="1"/>
<EDGE source="3" target="4" id="3" type="0" vector="1"/>
<EDGE source="4" target="5" id="4" type="0" vector="1"/>
<EDGE source="5" target="6" id="5" type="0" vector="1"/>
</GRAPH>
</LATTICES>
'''
import lxml.etree as ET
def build_j1j2(size, filename):
lattice = ET.Element('LATTICES')
graph = ET.SubElement(lattice, 'GRAPH', attrib={'name':'J1J2',
'dimension':'1', 'vertices':'%d'%size, 'edges':'%d'%(size-1)})
for i in range(size):
vi = ET.SubElement(graph, 'VERTEX', attrib={'id':'%d'%(i+1),
'type':'0'})
co = ET.SubElement(vi, 'COORDINATE')
co.text = '%d'%i
for i in range(1,size+1):
ET.SubElement(graph, 'EDGE', attrib={'source':'%d'%(i),'target':'%d'%((i)%size+1),
'id':'%d'%i, 'type':'0', 'vector':'1'})
ET.SubElement(graph, 'EDGE', attrib={'source':'%d'%(i),'target':'%d'%((i+1)%size+1),
'id':'%d'%i, 'type':'1', 'vector':'1'})
with open(filename, 'w') as f:
f.write(ET.tostring(lattice, pretty_print=True))
if __name__ == '__main__':
import sys
nsite = int(sys.argv[1])
build_j1j2(nsite, 'lattices/j1j2_%d.xml'%nsite)
| [
"[email protected]"
] | |
0a594efa5004b79150ace703b60d768090d1e120 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /lib/python2.7/site-packages/odps/tunnel/checksum.py | 8e8fc3c8d359101a5792eac47318794d0db3a82b | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 1,918 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from ..crc import Crc32c, Crc32
from .. import utils
class Checksum(object):
TRUE = bytearray([1])
FALSE = bytearray([0])
def __init__(self, method='crc32c'):
self.crc = Crc32c() if method.lower() == 'crc32c' else Crc32()
def _mode(self):
# use for UT to check if use c extension
try:
from ..src.crc32c_c import Crc32c
return 'c' if isinstance(self.crc, Crc32c) else 'py'
except ImportError:
return 'py'
def update_bool(self, val):
assert isinstance(val, bool)
val = self.TRUE if val else self.FALSE
self._update(val)
def update_int(self, val):
val = struct.pack('<i', val)
self._update(val)
def update_long(self, val):
val = struct.pack('<q', val)
self._update(val)
def update_float(self, val):
val = struct.pack('<d', val)
self._update(val)
def _update(self, b):
# update crc without type checking
self.crc.update(bytearray(b))
def update(self, b):
b = utils.to_binary(b)
self._update(b)
def getvalue(self):
return self.crc.getvalue()
def reset(self):
return self.crc.reset()
| [
"[email protected]"
] | |
faf3c8dfa5ff66ccb5061a5361f46ea8660794fb | 6eb56f2e3f14f2373be07fe95b1c6fedf1e2d49f | /alien_invasion/settings.py | 88e78bcdae2ac7d01be37c4e59510f618f2782a2 | [
"Apache-2.0"
] | permissive | chaofan-zheng/python_leanring_code | fe22b0370cadebf7456477269aff4a35cef0eb41 | 0af44ff39b9ded2c1d2cc96c6d356d21170ac04d | refs/heads/main | 2023-02-28T07:56:46.457552 | 2021-02-10T15:08:33 | 2021-02-10T15:08:33 | 323,584,115 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | """存储雷霆战机的所有类"""
class Settings():
def __init__(self):
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (0, 0, 0)
self.speed_factor = 1.5
self.bullet_speed_factor = 5
self.bullet_width = 5
self.bullet_height = 15
self.bullet_color = 255, 255, 102
self.bullets_allowed = 5
self.alien_speed_factor = 1
self.fleet_drop_speed = 10
# fleet_direction =1 表示右移动,-1表示左移
self.fleet_direction = 1
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.