blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9d73ff38e70007c31e510c64ace9b12e806986bf | ab97a8915347c76d05d6690dbdbcaf23d7f0d1fd | /build/android/resource_sizes.py | 7f4f7ee5a5ea77ae9d7418a846d3e17a8b746d14 | [
"BSD-3-Clause"
] | permissive | laien529/chromium | c9eb243957faabf1b477939e3b681df77f083a9a | 3f767cdd5c82e9c78b910b022ffacddcb04d775a | refs/heads/master | 2022-11-28T00:28:58.669067 | 2020-08-20T08:37:31 | 2020-08-20T08:37:31 | 288,961,699 | 1 | 0 | BSD-3-Clause | 2020-08-20T09:21:57 | 2020-08-20T09:21:56 | null | UTF-8 | Python | false | false | 30,129 | py | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Reports binary size metrics for an APK.
More information at //docs/speed/binary_size/metrics.md.
"""
from __future__ import print_function
import argparse
import collections
from contextlib import contextmanager
import json
import logging
import os
import posixpath
import re
import sys
import tempfile
import zipfile
import zlib
import devil_chromium
from devil.android.sdk import build_tools
from devil.utils import cmd_helper
from devil.utils import lazy
import method_count
from pylib import constants
from pylib.constants import host_paths
_AAPT_PATH = lazy.WeakConstant(lambda: build_tools.GetPath('aapt'))
_BUILD_UTILS_PATH = os.path.join(
host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'gyp')
with host_paths.SysPath(os.path.join(host_paths.DIR_SOURCE_ROOT, 'build')):
import gn_helpers # pylint: disable=import-error
with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
import perf_tests_results_helper # pylint: disable=import-error
with host_paths.SysPath(host_paths.TRACING_PATH):
from tracing.value import convert_chart_json # pylint: disable=import-error
with host_paths.SysPath(_BUILD_UTILS_PATH, 0):
from util import build_utils # pylint: disable=import-error
from util import zipalign # pylint: disable=import-error
zipalign.ApplyZipFileZipAlignFix()
# Captures an entire config from aapt output.
_AAPT_CONFIG_PATTERN = r'config %s:(.*?)config [a-zA-Z-]+:'
# Matches string resource entries from aapt output.
_AAPT_ENTRY_RE = re.compile(
r'resource (?P<id>\w{10}) [\w\.]+:string/.*?"(?P<val>.+?)"', re.DOTALL)
_BASE_CHART = {
'format_version': '0.1',
'benchmark_name': 'resource_sizes',
'benchmark_description': 'APK resource size information.',
'trace_rerun_options': [],
'charts': {}
}
# Macro definitions look like (something, 123) when
# enable_resource_whitelist_generation=true.
_RC_HEADER_RE = re.compile(r'^#define (?P<name>\w+).* (?P<id>\d+)\)?$')
_RE_NON_LANGUAGE_PAK = re.compile(r'^assets/.*(resources|percent)\.pak$')
_READELF_SIZES_METRICS = {
'text': ['.text'],
'data': ['.data', '.rodata', '.data.rel.ro', '.data.rel.ro.local'],
'relocations': ['.rel.dyn', '.rel.plt', '.rela.dyn', '.rela.plt'],
'unwind': [
'.ARM.extab', '.ARM.exidx', '.eh_frame', '.eh_frame_hdr',
'.ARM.exidxsentinel_section_after_text'
],
'symbols': [
'.dynsym', '.dynstr', '.dynamic', '.shstrtab', '.got', '.plt',
'.got.plt', '.hash', '.gnu.hash'
],
'other': [
'.init_array', '.preinit_array', '.ctors', '.fini_array', '.comment',
'.note.gnu.gold-version', '.note.crashpad.info', '.note.android.ident',
'.ARM.attributes', '.note.gnu.build-id', '.gnu.version',
'.gnu.version_d', '.gnu.version_r', '.interp', '.gcc_except_table'
]
}
def _PercentageDifference(a, b):
if a == 0:
return 0
return float(b - a) / a
def _RunReadelf(so_path, options, tool_prefix=''):
return cmd_helper.GetCmdOutput(
[tool_prefix + 'readelf'] + options + [so_path])
def _ExtractLibSectionSizesFromApk(apk_path, lib_path, tool_prefix):
with Unzip(apk_path, filename=lib_path) as extracted_lib_path:
grouped_section_sizes = collections.defaultdict(int)
no_bits_section_sizes, section_sizes = _CreateSectionNameSizeMap(
extracted_lib_path, tool_prefix)
for group_name, section_names in _READELF_SIZES_METRICS.iteritems():
for section_name in section_names:
if section_name in section_sizes:
grouped_section_sizes[group_name] += section_sizes.pop(section_name)
# Consider all NOBITS sections as .bss.
grouped_section_sizes['bss'] = sum(
v for v in no_bits_section_sizes.itervalues())
# Group any unknown section headers into the "other" group.
for section_header, section_size in section_sizes.iteritems():
sys.stderr.write('Unknown elf section header: %s\n' % section_header)
grouped_section_sizes['other'] += section_size
return grouped_section_sizes
def _CreateSectionNameSizeMap(so_path, tool_prefix):
stdout = _RunReadelf(so_path, ['-S', '--wide'], tool_prefix)
section_sizes = {}
no_bits_section_sizes = {}
# Matches [ 2] .hash HASH 00000000006681f0 0001f0 003154 04 A 3 0 8
for match in re.finditer(r'\[[\s\d]+\] (\..*)$', stdout, re.MULTILINE):
items = match.group(1).split()
target = no_bits_section_sizes if items[1] == 'NOBITS' else section_sizes
target[items[0]] = int(items[4], 16)
return no_bits_section_sizes, section_sizes
def _ParseManifestAttributes(apk_path):
# Check if the manifest specifies whether or not to extract native libs.
skip_extract_lib = False
output = cmd_helper.GetCmdOutput([
_AAPT_PATH.read(), 'd', 'xmltree', apk_path, 'AndroidManifest.xml'])
m = re.search(r'extractNativeLibs\(.*\)=\(.*\)(\w)', output)
if m:
skip_extract_lib = not bool(int(m.group(1)))
# Dex decompression overhead varies by Android version.
m = re.search(r'android:minSdkVersion\(\w+\)=\(type \w+\)(\w+)', output)
sdk_version = int(m.group(1), 16)
return sdk_version, skip_extract_lib
def _NormalizeLanguagePaks(translations, factor):
english_pak = translations.FindByPattern(r'.*/en[-_][Uu][Ss]\.l?pak')
num_translations = translations.GetNumEntries()
ret = 0
if english_pak:
ret -= translations.ComputeZippedSize()
ret += int(english_pak.compress_size * num_translations * factor)
return ret
def _NormalizeResourcesArsc(apk_path, num_arsc_files, num_translations,
out_dir):
"""Estimates the expected overhead of untranslated strings in resources.arsc.
See http://crbug.com/677966 for why this is necessary.
"""
# If there are multiple .arsc files, use the resource packaged APK instead.
if num_arsc_files > 1:
if not out_dir:
return -float('inf')
ap_name = os.path.basename(apk_path).replace('.apk', '.ap_')
ap_path = os.path.join(out_dir, 'arsc/apks', ap_name)
if not os.path.exists(ap_path):
raise Exception('Missing expected file: %s, try rebuilding.' % ap_path)
apk_path = ap_path
aapt_output = _RunAaptDumpResources(apk_path)
# en-rUS is in the default config and may be cluttered with non-translatable
# strings, so en-rGB is a better baseline for finding missing translations.
en_strings = _CreateResourceIdValueMap(aapt_output, 'en-rGB')
fr_strings = _CreateResourceIdValueMap(aapt_output, 'fr')
# en-US and en-GB will never be translated.
config_count = num_translations - 2
size = 0
for res_id, string_val in en_strings.iteritems():
if string_val == fr_strings[res_id]:
string_size = len(string_val)
# 7 bytes is the per-entry overhead (not specific to any string). See
# https://android.googlesource.com/platform/frameworks/base.git/+/android-4.2.2_r1/tools/aapt/StringPool.cpp#414.
# The 1.5 factor was determined experimentally and is meant to account for
# other languages generally having longer strings than english.
size += config_count * (7 + string_size * 1.5)
return int(size)
def _CreateResourceIdValueMap(aapt_output, lang):
"""Return a map of resource ids to string values for the given |lang|."""
config_re = _AAPT_CONFIG_PATTERN % lang
return {entry.group('id'): entry.group('val')
for config_section in re.finditer(config_re, aapt_output, re.DOTALL)
for entry in re.finditer(_AAPT_ENTRY_RE, config_section.group(0))}
def _RunAaptDumpResources(apk_path):
cmd = [_AAPT_PATH.read(), 'dump', '--values', 'resources', apk_path]
status, output = cmd_helper.GetCmdStatusAndOutput(cmd)
if status != 0:
raise Exception('Failed running aapt command: "%s" with output "%s".' %
(' '.join(cmd), output))
return output
def _ReportDfmSizes(zip_obj, report_func):
sizes = collections.defaultdict(int)
for info in zip_obj.infolist():
# Looks for paths like splits/vr-master.apk, splits/vr-hi.apk.
name_parts = info.filename.split('/')
if name_parts[0] == 'splits' and len(name_parts) == 2:
name_parts = name_parts[1].split('-')
if len(name_parts) == 2:
module_name, config_name = name_parts
if module_name != 'base' and config_name[:-4] in ('master', 'hi'):
sizes[module_name] += info.file_size
for module_name, size in sorted(sizes.iteritems()):
report_func('DFM_' + module_name, 'Size with hindi', size, 'bytes')
class _FileGroup(object):
"""Represents a category that apk files can fall into."""
def __init__(self, name):
self.name = name
self._zip_infos = []
self._extracted_multipliers = []
def AddZipInfo(self, zip_info, extracted_multiplier=0):
self._zip_infos.append(zip_info)
self._extracted_multipliers.append(extracted_multiplier)
def AllEntries(self):
return iter(self._zip_infos)
def GetNumEntries(self):
return len(self._zip_infos)
def FindByPattern(self, pattern):
return next((i for i in self._zip_infos if re.match(pattern, i.filename)),
None)
def FindLargest(self):
if not self._zip_infos:
return None
return max(self._zip_infos, key=lambda i: i.file_size)
def ComputeZippedSize(self):
return sum(i.compress_size for i in self._zip_infos)
def ComputeUncompressedSize(self):
return sum(i.file_size for i in self._zip_infos)
def ComputeExtractedSize(self):
ret = 0
for zi, multiplier in zip(self._zip_infos, self._extracted_multipliers):
ret += zi.file_size * multiplier
return ret
def ComputeInstallSize(self):
return self.ComputeExtractedSize() + self.ComputeZippedSize()
def _DoApkAnalysis(apk_filename, apks_path, tool_prefix, out_dir, report_func):
"""Analyse APK to determine size contributions of different file classes."""
file_groups = []
def make_group(name):
group = _FileGroup(name)
file_groups.append(group)
return group
def has_no_extension(filename):
return os.path.splitext(filename)[1] == ''
native_code = make_group('Native code')
java_code = make_group('Java code')
native_resources_no_translations = make_group('Native resources (no l10n)')
translations = make_group('Native resources (l10n)')
stored_translations = make_group('Native resources stored (l10n)')
icu_data = make_group('ICU (i18n library) data')
v8_snapshots = make_group('V8 Snapshots')
png_drawables = make_group('PNG drawables')
res_directory = make_group('Non-compiled Android resources')
arsc = make_group('Compiled Android resources')
metadata = make_group('Package metadata')
unknown = make_group('Unknown files')
notices = make_group('licenses.notice file')
unwind_cfi = make_group('unwind_cfi (dev and canary only)')
with zipfile.ZipFile(apk_filename, 'r') as apk:
apk_contents = apk.infolist()
sdk_version, skip_extract_lib = _ParseManifestAttributes(apk_filename)
# Pre-L: Dalvik - .odex file is simply decompressed/optimized dex file (~1x).
# L, M: ART - .odex file is compiled version of the dex file (~4x).
# N: ART - Uses Dalvik-like JIT for normal apps (~1x), full compilation for
# shared apps (~4x).
# Actual multipliers calculated using "apk_operations.py disk-usage".
# Will need to update multipliers once apk obfuscation is enabled.
# E.g. with obfuscation, the 4.04 changes to 4.46.
speed_profile_dex_multiplier = 1.17
orig_filename = apks_path or apk_filename
is_webview = 'WebView' in orig_filename
is_monochrome = 'Monochrome' in orig_filename
is_library = 'Library' in orig_filename
is_shared_apk = sdk_version >= 24 and (is_monochrome or is_webview
or is_library)
if sdk_version < 21:
# JellyBean & KitKat
dex_multiplier = 1.16
elif sdk_version < 24:
# Lollipop & Marshmallow
dex_multiplier = 4.04
elif is_shared_apk:
# Oreo and above, compilation_filter=speed
dex_multiplier = 4.04
else:
# Oreo and above, compilation_filter=speed-profile
dex_multiplier = speed_profile_dex_multiplier
total_apk_size = os.path.getsize(apk_filename)
for member in apk_contents:
filename = member.filename
if filename.endswith('/'):
continue
if filename.endswith('.so'):
basename = posixpath.basename(filename)
should_extract_lib = not skip_extract_lib and basename.startswith('lib')
native_code.AddZipInfo(
member, extracted_multiplier=int(should_extract_lib))
elif filename.endswith('.dex'):
java_code.AddZipInfo(member, extracted_multiplier=dex_multiplier)
elif re.search(_RE_NON_LANGUAGE_PAK, filename):
native_resources_no_translations.AddZipInfo(member)
elif filename.endswith('.pak') or filename.endswith('.lpak'):
compressed = member.compress_type != zipfile.ZIP_STORED
bucket = translations if compressed else stored_translations
extracted_multiplier = 0
if compressed:
extracted_multiplier = int('en_' in filename or 'en-' in filename)
bucket.AddZipInfo(member, extracted_multiplier=extracted_multiplier)
elif filename == 'assets/icudtl.dat':
icu_data.AddZipInfo(member)
elif filename.endswith('.bin'):
v8_snapshots.AddZipInfo(member)
elif filename.startswith('res/'):
if (filename.endswith('.png') or filename.endswith('.webp')
or has_no_extension(filename)):
png_drawables.AddZipInfo(member)
else:
res_directory.AddZipInfo(member)
elif filename.endswith('.arsc'):
arsc.AddZipInfo(member)
elif filename.startswith('META-INF') or filename == 'AndroidManifest.xml':
metadata.AddZipInfo(member)
elif filename.endswith('.notice'):
notices.AddZipInfo(member)
elif filename.startswith('assets/unwind_cfi'):
unwind_cfi.AddZipInfo(member)
else:
unknown.AddZipInfo(member)
if apks_path:
# We're mostly focused on size of Chrome for non-English locales, so assume
# Hindi (arbitrarily chosen) locale split is installed.
with zipfile.ZipFile(apks_path) as z:
hindi_apk_info = z.getinfo('splits/base-hi.apk')
total_apk_size += hindi_apk_info.file_size
_ReportDfmSizes(z, report_func)
total_install_size = total_apk_size
total_install_size_android_go = total_apk_size
zip_overhead = total_apk_size
for group in file_groups:
actual_size = group.ComputeZippedSize()
install_size = group.ComputeInstallSize()
uncompressed_size = group.ComputeUncompressedSize()
extracted_size = group.ComputeExtractedSize()
total_install_size += extracted_size
zip_overhead -= actual_size
report_func('Breakdown', group.name + ' size', actual_size, 'bytes')
report_func('InstallBreakdown', group.name + ' size', int(install_size),
'bytes')
# Only a few metrics are compressed in the first place.
# To avoid over-reporting, track uncompressed size only for compressed
# entries.
if uncompressed_size != actual_size:
report_func('Uncompressed', group.name + ' size', uncompressed_size,
'bytes')
if group is java_code and is_shared_apk:
# Updates are compiled using quicken, but system image uses speed-profile.
extracted_size = int(uncompressed_size * speed_profile_dex_multiplier)
total_install_size_android_go += extracted_size
report_func('InstallBreakdownGo', group.name + ' size',
actual_size + extracted_size, 'bytes')
elif group is translations and apks_path:
# Assume Hindi rather than English (accounted for above in total_apk_size)
total_install_size_android_go += actual_size
else:
total_install_size_android_go += extracted_size
# Per-file zip overhead is caused by:
# * 30 byte entry header + len(file name)
# * 46 byte central directory entry + len(file name)
# * 0-3 bytes for zipalign.
report_func('Breakdown', 'Zip Overhead', zip_overhead, 'bytes')
report_func('InstallSize', 'APK size', total_apk_size, 'bytes')
report_func('InstallSize', 'Estimated installed size',
int(total_install_size), 'bytes')
if is_shared_apk:
report_func('InstallSize', 'Estimated installed size (Android Go)',
int(total_install_size_android_go), 'bytes')
transfer_size = _CalculateCompressedSize(apk_filename)
report_func('TransferSize', 'Transfer size (deflate)', transfer_size, 'bytes')
# Size of main dex vs remaining.
main_dex_info = java_code.FindByPattern('classes.dex')
if main_dex_info:
main_dex_size = main_dex_info.file_size
report_func('Specifics', 'main dex size', main_dex_size, 'bytes')
secondary_size = java_code.ComputeUncompressedSize() - main_dex_size
report_func('Specifics', 'secondary dex size', secondary_size, 'bytes')
main_lib_info = native_code.FindLargest()
native_code_unaligned_size = 0
for lib_info in native_code.AllEntries():
section_sizes = _ExtractLibSectionSizesFromApk(
apk_filename, lib_info.filename, tool_prefix)
native_code_unaligned_size += sum(
v for k, v in section_sizes.iteritems() if k != 'bss')
# Size of main .so vs remaining.
if lib_info == main_lib_info:
main_lib_size = lib_info.file_size
report_func('Specifics', 'main lib size', main_lib_size, 'bytes')
secondary_size = native_code.ComputeUncompressedSize() - main_lib_size
report_func('Specifics', 'other lib size', secondary_size, 'bytes')
for metric_name, size in section_sizes.iteritems():
report_func('MainLibInfo', metric_name, size, 'bytes')
# Main metric that we want to monitor for jumps.
normalized_apk_size = total_apk_size
# unwind_cfi exists only in dev, canary, and non-channel builds.
normalized_apk_size -= unwind_cfi.ComputeZippedSize()
# Sections within .so files get 4kb aligned, so use section sizes rather than
# file size. Also gets rid of compression.
normalized_apk_size -= native_code.ComputeZippedSize()
normalized_apk_size += native_code_unaligned_size
# Normalized dex size: Size within the zip + size on disk for Android Go
# devices running Android O (which ~= uncompressed dex size).
# Use a constant compression factor to account for fluctuations.
normalized_apk_size -= java_code.ComputeZippedSize()
normalized_apk_size += java_code.ComputeUncompressedSize()
# Unaligned size should be ~= uncompressed size or something is wrong.
# As of now, padding_fraction ~= .007
padding_fraction = -_PercentageDifference(
native_code.ComputeUncompressedSize(), native_code_unaligned_size)
# Ignore this check for small / no native code
if native_code.ComputeUncompressedSize() > 1000000:
assert 0 <= padding_fraction < .02, (
'Padding was: {} (file_size={}, sections_sum={})'.format(
padding_fraction, native_code.ComputeUncompressedSize(),
native_code_unaligned_size))
if apks_path:
# Locale normalization not needed when measuring only one locale.
# E.g. a change that adds 300 chars of unstranslated strings would cause the
# metric to be off by only 390 bytes (assuming a multiplier of 2.3 for
# Hindi).
pass
else:
# Avoid noise caused when strings change and translations haven't yet been
# updated.
num_translations = translations.GetNumEntries()
num_stored_translations = stored_translations.GetNumEntries()
if num_translations > 1:
# Multipliers found by looking at MonochromePublic.apk and seeing how much
# smaller en-US.pak is relative to the average locale.pak.
normalized_apk_size += _NormalizeLanguagePaks(translations, 1.17)
if num_stored_translations > 1:
normalized_apk_size += _NormalizeLanguagePaks(stored_translations, 1.43)
if num_translations + num_stored_translations > 1:
if num_translations == 0:
# WebView stores all locale paks uncompressed.
num_arsc_translations = num_stored_translations
else:
# Monochrome has more configurations than Chrome since it includes
# WebView (which supports more locales), but these should mostly be
# empty so ignore them here.
num_arsc_translations = num_translations
normalized_apk_size += _NormalizeResourcesArsc(
apk_filename, arsc.GetNumEntries(), num_arsc_translations, out_dir)
# It will be -Inf for .apk files with multiple .arsc files and no out_dir set.
if normalized_apk_size < 0:
sys.stderr.write('Skipping normalized_apk_size (no output directory set)\n')
else:
report_func('Specifics', 'normalized apk size', normalized_apk_size,
'bytes')
# The "file count" metric cannot be grouped with any other metrics when the
# end result is going to be uploaded to the perf dashboard in the HistogramSet
# format due to mixed units (bytes vs. zip entries) causing malformed
# summaries to be generated.
# TODO(https://crbug.com/903970): Remove this workaround if unit mixing is
# ever supported.
report_func('FileCount', 'file count', len(apk_contents), 'zip entries')
for info in unknown.AllEntries():
sys.stderr.write(
'Unknown entry: %s %d\n' % (info.filename, info.compress_size))
def _CalculateCompressedSize(file_path):
CHUNK_SIZE = 256 * 1024
compressor = zlib.compressobj()
total_size = 0
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(CHUNK_SIZE), ''):
total_size += len(compressor.compress(chunk))
total_size += len(compressor.flush())
return total_size
def _DoDexAnalysis(apk_filename, report_func):
sizes, total_size, num_unique_methods = method_count.ExtractSizesFromZip(
apk_filename)
cumulative_sizes = collections.defaultdict(int)
for classes_dex_sizes in sizes.itervalues():
for count_type, count in classes_dex_sizes.iteritems():
cumulative_sizes[count_type] += count
for count_type, count in cumulative_sizes.iteritems():
report_func('Dex', count_type, count, 'entries')
report_func('Dex', 'unique methods', num_unique_methods, 'entries')
report_func('DexCache', 'DexCache', total_size, 'bytes')
@contextmanager
def Unzip(zip_file, filename=None):
"""Utility for temporary use of a single file in a zip archive."""
with build_utils.TempDir() as unzipped_dir:
unzipped_files = build_utils.ExtractAll(
zip_file, unzipped_dir, True, pattern=filename)
if len(unzipped_files) == 0:
raise Exception(
'%s not found in %s' % (filename, zip_file))
yield unzipped_files[0]
def _ConfigOutDirAndToolsPrefix(out_dir):
if out_dir:
constants.SetOutputDirectory(out_dir)
else:
try:
# Triggers auto-detection when CWD == output directory.
constants.CheckOutputDirectory()
out_dir = constants.GetOutDirectory()
except Exception: # pylint: disable=broad-except
return out_dir, ''
build_vars = gn_helpers.ReadBuildVars(out_dir)
tool_prefix = os.path.join(out_dir, build_vars['android_tool_prefix'])
return out_dir, tool_prefix
def _AnalyzeInternal(apk_path, report_func, args, apks_path=None):
out_dir, tool_prefix = _ConfigOutDirAndToolsPrefix(args.out_dir)
_DoApkAnalysis(apk_path, apks_path, tool_prefix, out_dir, report_func)
_DoDexAnalysis(apk_path, report_func)
def _AnalyzeApkOrApks(report_func, apk_path, args):
if apk_path.endswith('.apk'):
_AnalyzeInternal(apk_path, report_func, args)
elif apk_path.endswith('.apks'):
with tempfile.NamedTemporaryFile(suffix='.apk') as f:
with zipfile.ZipFile(apk_path) as z:
# Currently bundletool is creating two apks when .apks is created
# without specifying an sdkVersion. Always measure the one with an
# uncompressed shared library.
try:
info = z.getinfo('splits/base-master_2.apk')
except KeyError:
info = z.getinfo('splits/base-master.apk')
f.write(z.read(info))
f.flush()
_AnalyzeInternal(f.name, report_func, args, apks_path=apk_path)
else:
raise Exception('Unknown file type: ' + apk_path)
class _Reporter(object):
def __init__(self, chartjson):
self._chartjson = chartjson
self.trace_title_prefix = ''
self._combined_metrics = collections.defaultdict(int)
def __call__(self, graph_title, trace_title, value, units):
self._combined_metrics[(graph_title, trace_title, units)] += value
perf_tests_results_helper.ReportPerfResult(
self._chartjson, graph_title, self.trace_title_prefix + trace_title,
value, units)
def SynthesizeTotals(self):
for tup, value in sorted(self._combined_metrics.iteritems()):
graph_title, trace_title, units = tup
perf_tests_results_helper.ReportPerfResult(
self._chartjson, graph_title, 'Combined_' + trace_title, value, units)
def _ResourceSizes(args):
chartjson = _BASE_CHART.copy() if args.output_format else None
reporter = _Reporter(chartjson)
specs = [
('Chrome_', args.trichrome_chrome),
('WebView_', args.trichrome_webview),
('Library_', args.trichrome_library),
]
for prefix, path in specs:
if path:
reporter.trace_title_prefix = prefix
_AnalyzeApkOrApks(reporter, path, args)
if any(path for _, path in specs):
reporter.SynthesizeTotals()
else:
_AnalyzeApkOrApks(reporter, args.input, args)
if chartjson:
_DumpChartJson(args, chartjson)
def _DumpChartJson(args, chartjson):
if args.output_file == '-':
json_file = sys.stdout
elif args.output_file:
json_file = open(args.output_file, 'w')
else:
results_path = os.path.join(args.output_dir, 'results-chart.json')
logging.critical('Dumping chartjson to %s', results_path)
json_file = open(results_path, 'w')
json.dump(chartjson, json_file, indent=2)
if json_file is not sys.stdout:
json_file.close()
# We would ideally generate a histogram set directly instead of generating
# chartjson then converting. However, perf_tests_results_helper is in
# //build, which doesn't seem to have any precedent for depending on
# anything in Catapult. This can probably be fixed, but since this doesn't
# need to be super fast or anything, converting is a good enough solution
# for the time being.
if args.output_format == 'histograms':
histogram_result = convert_chart_json.ConvertChartJson(results_path)
if histogram_result.returncode != 0:
raise Exception('chartjson conversion failed with error: ' +
histogram_result.stdout)
histogram_path = os.path.join(args.output_dir, 'perf_results.json')
logging.critical('Dumping histograms to %s', histogram_path)
with open(histogram_path, 'w') as json_file:
json_file.write(histogram_result.stdout)
def main():
argparser = argparse.ArgumentParser(description='Print APK size metrics.')
argparser.add_argument(
'--min-pak-resource-size',
type=int,
default=20 * 1024,
help='Minimum byte size of displayed pak resources.')
argparser.add_argument(
'--chromium-output-directory',
dest='out_dir',
type=os.path.realpath,
help='Location of the build artifacts.')
argparser.add_argument(
'--chartjson',
action='store_true',
help='DEPRECATED. Use --output-format=chartjson '
'instead.')
argparser.add_argument(
'--output-format',
choices=['chartjson', 'histograms'],
help='Output the results to a file in the given '
'format instead of printing the results.')
argparser.add_argument('--loadable_module', help='Obsolete (ignored).')
# Accepted to conform to the isolated script interface, but ignored.
argparser.add_argument(
'--isolated-script-test-filter', help=argparse.SUPPRESS)
argparser.add_argument(
'--isolated-script-test-perf-output',
type=os.path.realpath,
help=argparse.SUPPRESS)
output_group = argparser.add_mutually_exclusive_group()
output_group.add_argument(
'--output-dir', default='.', help='Directory to save chartjson to.')
output_group.add_argument(
'--output-file',
help='Path to output .json (replaces --output-dir). Works only for '
'--output-format=chartjson')
output_group.add_argument(
'--isolated-script-test-output',
type=os.path.realpath,
help='File to which results will be written in the '
'simplified JSON output format.')
argparser.add_argument('input', help='Path to .apk or .apks file to measure.')
trichrome_group = argparser.add_argument_group(
'Trichrome inputs',
description='When specified, |input| is used only as Test suite name.')
trichrome_group.add_argument(
'--trichrome-chrome', help='Path to Trichrome Chrome .apks')
trichrome_group.add_argument(
'--trichrome-webview', help='Path to Trichrome WebView .apk(s)')
trichrome_group.add_argument(
'--trichrome-library', help='Path to Trichrome Library .apk')
args = argparser.parse_args()
devil_chromium.Initialize(output_directory=args.out_dir)
# TODO(bsheedy): Remove this once uses of --chartjson have been removed.
if args.chartjson:
args.output_format = 'chartjson'
isolated_script_output = {'valid': False, 'failures': []}
test_name = 'resource_sizes (%s)' % os.path.basename(args.input)
if args.isolated_script_test_output:
args.output_dir = os.path.join(
os.path.dirname(args.isolated_script_test_output), test_name)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
try:
_ResourceSizes(args)
isolated_script_output = {
'valid': True,
'failures': [],
}
finally:
if args.isolated_script_test_output:
results_path = os.path.join(args.output_dir, 'test_results.json')
with open(results_path, 'w') as output_file:
json.dump(isolated_script_output, output_file)
with open(args.isolated_script_test_output, 'w') as output_file:
json.dump(isolated_script_output, output_file)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7597042226d2a591aafcd88f49a1d3803ca5542a | 55ab64b67d8abc02907eb43a54ff6c326ded6b72 | /scripts/addon_library/local/QBlocker/qspace.py | 2933954bc62f6a436dc7ce68da7564f1cafda0ba | [
"GPL-3.0-only",
"MIT"
] | permissive | Tilapiatsu/blender-custom_config | 2f03b0bb234c3b098d2830732296d199c91147d0 | 00e14fc190ebff66cf50ff911f25cf5ad3529f8f | refs/heads/master | 2023-08-16T14:26:39.990840 | 2023-08-16T01:32:41 | 2023-08-16T01:32:41 | 161,249,779 | 6 | 2 | MIT | 2023-04-12T05:33:59 | 2018-12-10T23:25:14 | Python | UTF-8 | Python | false | false | 9,181 | py | import bpy
import gpu
import bgl
# import bmesh
from gpu_extras.batch import batch_for_shader
import mathutils
# import numpy
from .utilities.grid_utils import GetGridVector
from .utilities.math_utils import LinePlaneCollision
from bpy_extras.view3d_utils import region_2d_to_vector_3d, region_2d_to_origin_3d
from .utilities.qspace_utils import VecToMatrix, Distance
redC = (0.984, 0.2, 0.318, 1.0)
greenC = (0.525, 0.824, 0.012, 1.0)
blueC = (0.157, 0.557, 0.988, 1.0)
aVcolors = [greenC, greenC, redC, redC, blueC, blueC]
def QSpaceDrawHandler(qSpace, context):
bgl.glLineWidth(2)
pos = qSpace.wMatrix.translation
mat = qSpace.wMatrix.to_3x3()
vecX = pos + mat.col[0]
vecY = pos + mat.col[1]
vecZ = pos + mat.col[2]
coords = [pos, vecX, pos, vecY, pos, vecZ]
shader = gpu.shader.from_builtin('3D_SMOOTH_COLOR')
batch = batch_for_shader(shader, 'LINES', {"pos": coords, "color": aVcolors})
shader.bind()
batch.draw(shader)
class CoordSysClass:
operator = None
addon_prefs = None
context = None
lastHitresult = (False, None, None, None, None, None)
isGridhit = False
isModifiedMesh = False
mesh_data = None
wMatrix = mathutils.Matrix()
isPropAxis = True
object_eval = None
scene = None
region = None
rv3d = None
def __init__(self, _context, _op, _isAxis, _addon_prefs):
self.addon_prefs = _addon_prefs
self.operator = _op
self.context = _context
self.isPropAxis = _isAxis
self.scene = self.context.scene
self.region = self.context.region
self.rv3d = self.context.region_data
if _isAxis:
args = (self, _context)
self._handle = bpy.types.SpaceView3D.draw_handler_add(QSpaceDrawHandler, args, 'WINDOW', 'POST_VIEW')
def CleanUp(self):
if self.isPropAxis and self._handle:
bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW')
def ToggleAxis(self, _state):
if self.isPropAxis:
if _state:
if not self._handle:
args = (self, self.context)
self._handle = bpy.types.SpaceView3D.draw_handler_add(QSpaceDrawHandler, args, 'WINDOW', 'POST_VIEW')
else:
if self._handle:
bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW')
self._handle = None
# raycast into scene from mouse pos
def HitScene(self, coord):
view_vector = region_2d_to_vector_3d(self.region, self.rv3d, coord)
ray_origin = region_2d_to_origin_3d(self.region, self.rv3d, coord)
# mod if version 2.91 or 3.0
appversion = bpy.app.version
if appversion[0] == 3:
hitresult = self.scene.ray_cast(self.context.view_layer.depsgraph, ray_origin, view_vector)
elif bpy.app.version[1] >= 91:
hitresult = self.scene.ray_cast(self.context.view_layer.depsgraph, ray_origin, view_vector)
else:
hitresult = self.scene.ray_cast(self.context.view_layer, ray_origin, view_vector)
return hitresult
def ResetResult(self):
self.lastHitresult = (False, None, None, None, None, None)
if self.isModifiedMesh:
self.RemoveTempMesh()
def UpdateMeshEditMode(self):
if self.mesh_data is not None and self.lastHitresult[4] is not None:
depsgraph = bpy.context.evaluated_depsgraph_get()
self.object_eval = bpy.context.active_object.evaluated_get(depsgraph)
self.mesh_data = self.object_eval.to_mesh()
def RemoveTempMesh(self):
self.object_eval.to_mesh_clear()
self.mesh_data = None
self.isModifiedMesh = False
self.isGridhit = True
def IsFiltered(self, coord, hitresult):
# if grid only always hit grid
if self.operator.object_ignorebehind == 'GRID':
return False
# if front of grid, check if object is behind
elif self.operator.object_ignorebehind == 'FRONT' and hitresult[0]:
ray_origin = region_2d_to_origin_3d(self.region, self.rv3d, coord)
hitDist = Distance(hitresult[1], ray_origin)
gridMat = self.GetActiveGridAlign(coord)
gridHitDist = Distance(gridMat.to_translation(), ray_origin)
if not hitDist + 0.0001 < gridHitDist:
return False
return True
# main function
def GetCoordSys(self, context, coord, isoriented):
# time_start = time.time() # timetest
if self.operator.toolstage != 0:
self.operator.qObject.HideObject(True)
hitresult = self.HitScene(coord)
cSysMatrix = None
# if object hit
if hitresult[0] and hitresult[4].type == 'MESH' and self.IsFiltered(coord, hitresult):
if hitresult[4] != self.operator.qObject.bObject:
# if not the same object, then create evaluated mesh data
if hitresult[4] != self.lastHitresult[4]:
# remove last mesh if that was modified
if self.isModifiedMesh:
self.RemoveTempMesh()
# check if hit modified object
if len(hitresult[4].modifiers) != 0:
depsgraph = context.evaluated_depsgraph_get()
self.object_eval = hitresult[4].evaluated_get(depsgraph)
self.mesh_data = self.object_eval.to_mesh()
self.isModifiedMesh = True
else:
self.mesh_data = hitresult[4].data
# Get matrix if oriented or axis aligned
if isoriented:
cSysMatrix = self.GetOrientedAlign(hitresult)
else:
cSysMatrix = self.GetAxisAlign(hitresult)
self.isGridhit = False
self.lastHitresult = hitresult
# if gridhit
else:
if not self.isGridhit:
self.ResetResult()
self.isGridhit = True
cSysMatrix = self.GetActiveGridAlign(coord)
self.lastHitresult = (False, None, None, None, None, None)
if self.operator.toolstage != 0:
self.operator.qObject.HideObject(False)
self.wMatrix = cSysMatrix
return cSysMatrix
# Get space align (wqrking plane or world grid)
def GetActiveGridAlign(self, coord):
cSysMatrix = None
if self.operator.isWorkingPlane and not self.operator.wPlaneHold:
cSysMatrix = self.GetWPlaneAlign(coord, self.operator.workingplane.matrix)
else:
cSysMatrix = self.GetGridAlign(coord)
return cSysMatrix
# WPLANE ALIGNED
def GetWPlaneAlign(self, coord, _matrix):
# get view ray
view_vector = region_2d_to_vector_3d(self.region, self.rv3d, coord)
ray_origin = region_2d_to_origin_3d(self.region, self.rv3d, coord)
# check grid and collide with view ray
grid_vector = _matrix.col[2].xyz
hitpoint = LinePlaneCollision(view_vector, ray_origin, _matrix.translation, grid_vector)
# create matrix
ret_matrix = _matrix.copy()
ret_matrix.translation = hitpoint
return ret_matrix
# GRID ALIGNED
def GetGridAlign(self, coord):
# get view ray
view_vector = region_2d_to_vector_3d(self.region, self.rv3d, coord)
ray_origin = region_2d_to_origin_3d(self.region, self.rv3d, coord)
# check grid and collide with view ray
grid_vector = GetGridVector(self.context)
hitpoint = LinePlaneCollision(view_vector, ray_origin, (0.0, 0.0, 0.0), grid_vector)
# create matrix
grid_vector = mathutils.Vector(grid_vector)
ret_matrix = VecToMatrix(grid_vector, hitpoint)
return ret_matrix
# AXIS ALIGNED
def GetAxisAlign(self, _hitresult):
return VecToMatrix(_hitresult[2], _hitresult[1])
# ORIENTED ALIGN
def GetOrientedAlign(self, _hitresult):
# if same object and face, only move the matrix
if _hitresult[4] == self.lastHitresult[4] and _hitresult[3] == self.lastHitresult[3]:
self.wMatrix.translation = _hitresult[1]
return self.wMatrix
else:
verts = self.GetTargetPolyVerts(_hitresult)
# create matrix from face normal
matrix = VecToMatrix(_hitresult[2], _hitresult[1])
mat_inv = matrix.inverted()
# transform verts to poly space
vertsL = [(mat_inv @ v) for v in verts]
# calc best rotation
verts2DL = [(p[0], p[1]) for p in vertsL]
bboxangle = mathutils.geometry.box_fit_2d(verts2DL)
mat_rot = mathutils.Matrix.Rotation(-bboxangle, 4, 'Z')
ret_matrix = matrix @ mat_rot
return ret_matrix
# get vertices from polygon in world space
def GetTargetPolyVerts(self, _hitresult):
meshFace = self.mesh_data.polygons[_hitresult[3]]
matrix = _hitresult[4].matrix_world.copy()
return [(matrix @ self.mesh_data.vertices[v].co) for v in meshFace.vertices]
| [
"[email protected]"
] | |
32495522b2fd81ea7a06028dc2510f01b7e67688 | 0bfb55b41282803db96b90e7bba73d86be7e8553 | /publishers/views.py | b49de4967641e53db9e71a5c7bf2107d3902696b | [
"MIT"
] | permissive | OpenFurry/honeycomb | eebf2272f8ae95eb686ad129555dbebcf1adcd63 | c34eeaf22048948fedcae860db7c25d41b51ff48 | refs/heads/master | 2021-01-11T01:52:40.978564 | 2016-12-29T18:08:38 | 2016-12-29T18:08:38 | 70,649,821 | 2 | 2 | null | 2016-12-29T18:08:39 | 2016-10-12T01:22:38 | Python | UTF-8 | Python | false | false | 16,485 | py | from django.contrib import messages
from django.contrib.auth.decorators import (
login_required,
permission_required,
)
from django.contrib.auth.models import (
Group,
User,
)
from django.core.paginator import (
EmptyPage,
Paginator,
)
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.shortcuts import (
get_object_or_404,
redirect,
render,
)
from django.views.decorators.http import require_POST
from submitify.models import Call
from .forms import (
NewsItemForm,
PublisherForm,
)
from .models import (
NewsItem,
Publisher,
)
def list_publishers(request, page=1):
"""View for listing publishers.
If the user is not an admin with permission to add publishers, publishers
without owners will not be shown.
Args:
page: the current page of publishers to list
"""
# List all publishers if the user is an admin, otherwise only the ones with
# owners.
if request.user.has_perm('publishers.add_publisher'):
qs = Publisher.objects.all()
else:
qs = Publisher.objects.filter(owner__isnull=False)
paginator = Paginator(qs, request.user.profile.results_per_page if
request.user.is_authenticated else 25)
try:
publishers = paginator.page(page)
except EmptyPage:
publishers = paginator.page(paginator.num_pages)
return render(request, 'list_publishers.html', {
'title': 'Publishers',
'publishers': publishers,
})
@login_required
@permission_required('publishers.add_publisher')
def create_publisher(request):
"""View for creating a new publisher page."""
form = PublisherForm()
if request.method == 'POST':
form = PublisherForm(request.POST, request.FILES)
if form.is_valid():
publisher = form.save()
messages.success(request, 'Publisher created')
return redirect(publisher.get_absolute_url())
return render(request, 'edit_publisher.html', {
'title': 'Create publisher',
'form': form,
})
def view_publisher(request, publisher_slug=None):
"""View for displaying a publisher.
Args:
publisher_slug: the urlified publisher name.
"""
publisher = get_object_or_404(Publisher, slug=publisher_slug)
# If the publisher has no owner, only admins may view
if (publisher.owner is None and not
request.user.has_perm('publishers.add_publisher')):
return render(request, 'permission_denied.html', {}, status=403)
return render(request, 'view_publisher.html', {
'title': publisher.name,
'publisher': publisher,
'tab': 'home',
})
@login_required
def edit_publisher(request, publisher_slug=None):
"""View for updating a publisher's information.
Args:
publisher_slug: the urlified publisher name.
"""
publisher = get_object_or_404(Publisher, slug=publisher_slug)
# Only the publisher owner may edit the publisher page
if request.user != publisher.owner:
return render(request, 'permission_denied.html', {}, status=403)
form = PublisherForm(instance=publisher)
if request.method == 'POST':
form = PublisherForm(request.POST, request.FILES, instance=publisher)
if form.is_valid():
publisher = form.save()
messages.success(request, 'Publisher updated')
return redirect(publisher.get_absolute_url())
return render(request, 'edit_publisher.html', {
'title': 'Edit publisher',
'subtitle': publisher.name,
'form': form,
})
@login_required
@permission_required('publishers.delete_publisher')
def delete_publisher(request, publisher_slug=None):
"""View for deleting a publisher.
Args:
publisher_slug: the urlified publisher name.
"""
publisher = get_object_or_404(Publisher, slug=publisher_slug)
if request.method == 'POST':
publisher.delete()
messages.error(request, 'Publisher deleted')
return redirect(reverse('publishers:list_publishers'))
return render(request, 'confirm_delete_publisher.html', {
'title': 'Delete publisher',
'subtitle': publisher.name,
'publisher': publisher,
})
@login_required
@require_POST
def add_member(request, publisher_slug=None):
"""View for adding a writer to a publisher's page.
Args:
publisher_slug: the urlified publisher name.
"""
user = get_object_or_404(User, username=request.POST.get('username'))
publisher = get_object_or_404(Publisher, slug=publisher_slug)
# Only the publisher owner may add members
if request.user != publisher.owner:
return render(request, 'permission_denied.html', {}, status=403)
# Add member if they aren't yet a member
if user not in publisher.members.all():
publisher.members.add(user)
messages.success(request, 'User added to members')
else:
messages.info(request, 'User already in members')
return redirect(publisher.get_absolute_url())
@login_required
@require_POST
def remove_member(request, publisher_slug=None):
"""View for removing a writer from a publisher's page.
Args:
publisher_slug: the urlified publisher name.
"""
user = get_object_or_404(User, username=request.POST.get('username'))
publisher = get_object_or_404(Publisher, slug=publisher_slug)
# Only the publisher owner may remove members
if request.user != publisher.owner:
return render(request, 'permission_denied.html', {}, status=403)
# Remove member if they are a member
if user in publisher.members.all():
publisher.members.remove(user)
messages.success(request, 'User removed from members')
else:
messages.info(request, 'User not in members')
return redirect(publisher.get_absolute_url())
@login_required
@require_POST
def add_editor(request, publisher_slug=None):
"""View for adding a editor to a publisher's page.
Args:
publisher_slug: the urlified publisher name.
"""
user = get_object_or_404(User, username=request.POST.get('username'))
publisher = get_object_or_404(Publisher, slug=publisher_slug)
# Only the publisher owner may add editors
if request.user != publisher.owner:
return render(request, 'permission_denied.html', {}, status=403)
# Add editor if they do not yet edit for the publisher
if user not in publisher.editors.all():
publisher.editors.add(user)
messages.success(request, 'User added to editors')
else:
messages.info(request, 'User already in editors')
return redirect(publisher.get_absolute_url())
@login_required
@require_POST
def remove_editor(request, publisher_slug=None):
"""View for removing a editor from a publisher's page.
Args:
publisher_slug: the urlified publisher name.
"""
user = get_object_or_404(User, username=request.POST.get('username'))
publisher = get_object_or_404(Publisher, slug=publisher_slug)
# Only the publisher owner may remove editors
if request.user != publisher.owner:
return render(request, 'permission_denied.html', {}, status=403)
# Remove editor if they already edit for the publisher
if user in publisher.editors.all():
publisher.editors.remove(user)
messages.success(request, 'User removed from editors')
else:
messages.info(request, 'User not in editors')
return redirect(publisher.get_absolute_url())
def list_calls(request, publisher_slug):
"""View for listing calls-for-submissions belonging to a publisher.
Args:
publisher_slug: the urlified publisher name.
"""
publisher = get_object_or_404(Publisher, slug=publisher_slug)
# Filter based on requested statuses
acceptable_statuses = [Call.OPEN]
if 'opening-soon' in request.GET:
acceptable_statuses.append(Call.NOT_OPEN_YET)
if 'closed-reviewing' in request.GET:
acceptable_statuses.append(Call.CLOSED_REVIEWING)
if 'closed-completed' in request.GET:
acceptable_statuses.append(Call.CLOSED_COMPLETED)
calls = publisher.calls.filter(status__in=acceptable_statuses)
# Add available calls if the current user is the publisher owner
available_calls = None
if request.user == publisher.owner:
available_calls = Call.objects.filter(
Q(owner__in=publisher.editors.all()) &
~Q(id__in=[call.id for call in calls]))
return render(request, 'list_publisher_calls.html', {
'publisher': publisher,
'calls': calls,
'available_calls': available_calls,
'tab': 'calls',
})
@login_required
@require_POST
def add_call(request, publisher_slug=None):
"""View for adding a call for submissions to a publisher's page.
Args:
publisher_slug: the urlified publisher name.
"""
call = get_object_or_404(Call, id=request.POST.get('call_id'))
publisher = get_object_or_404(Publisher, slug=publisher_slug)
# Only the publisher owner may add a call
if request.user != publisher.owner:
return render(request, 'permission_denied.html', {}, status=403)
# Only calls run by the publisher's editors may be added
if call.owner not in publisher.editors.all():
return render(request, 'permission_denied.html', {}, status=403)
# Add call if publisher does not own it yet
if call in publisher.calls.all():
messages.info(request, '{} already owns this call'.format(
publisher.name))
else:
publisher.calls.add(call)
return redirect(reverse('publishers:list_calls', kwargs={
'publisher_slug': publisher.slug,
}))
@login_required
@require_POST
def remove_call(request, publisher_slug=None):
"""View for removing a call for submissions from a publisher's page.
Args:
publisher_slug: the urlified publisher name.
"""
call = get_object_or_404(Call, id=request.POST.get('call_id'))
publisher = get_object_or_404(Publisher, slug=publisher_slug)
# Only the publisher owner may remove a call
if request.user != publisher.owner:
return render(request, 'permission_denied.html', {}, status=403)
# Remove call if the publisher owns it
if call not in publisher.calls.all():
messages.info(request, "{} doesn't own this call".format(
publisher.name))
else:
publisher.calls.remove(call)
return redirect(reverse('publishers:list_calls', kwargs={
'publisher_slug': publisher.slug,
}))
@login_required
@permission_required('publishers.add_publisher')
@require_POST
def change_ownership(request, publisher_slug=None):
"""View to change the ownership of a publisher page.
Args:
publisher_slug: the urlified publisher name.
"""
publisher = get_object_or_404(Publisher, slug=publisher_slug)
new_owner = get_object_or_404(User, username=request.POST.get('username'))
old_owner = publisher.owner
publisher.owner = new_owner
publisher.save()
publisher.editors.add(new_owner)
set_group_membership(new_owner)
set_group_membership(old_owner)
messages.success(request, 'Publisher owner set')
return redirect(publisher.get_absolute_url())
def list_news_items(request, publisher_slug=None, page=1):
"""View for listing news items associated with a publisher.
Args:
publisher_slug: the urlified publisher name.
page: the current page of news items.
"""
publisher = get_object_or_404(Publisher, slug=publisher_slug)
paginator = Paginator(publisher.newsitem_set.all(),
request.user.profile.results_per_page if
request.user.is_authenticated else 25)
try:
news_items = paginator.page(page)
except EmptyPage:
news_items = paginator.page(paginator.num_pages)
return render(request, 'list_news_items.html', {
'title': publisher.name,
'subtitle': 'News',
'publisher': publisher,
'news_items': news_items,
'tab': 'news',
})
@login_required
def create_news_item(request, publisher_slug=None):
"""View to create a news item for a publisher.
Args:
publisher_slug: the urlified publisher name.
"""
publisher = get_object_or_404(Publisher, slug=publisher_slug)
# Only editors may create news items
if request.user not in publisher.editors.all():
return render(request, 'permission_denied.html', {}, status=403)
form = NewsItemForm()
if request.method == 'POST':
form = NewsItemForm(request.POST, request.FILES)
if form.is_valid():
news_item = form.save(commit=False)
news_item.publisher = publisher
news_item.owner = request.user
news_item.save()
form.save_m2m()
return redirect(news_item.get_absolute_url())
return render(request, 'edit_news_item.html', {
'title': 'Create news item',
'publisher': publisher,
'form': form,
'tab': 'news',
})
def view_news_item(request, publisher_slug=None, item_id=None):
"""View for displaying a news item.
Args:
publisher_slug: the urlified publisher name.
item_id: the news item id.
"""
publisher = get_object_or_404(Publisher, slug=publisher_slug)
news_item = get_object_or_404(NewsItem, id=item_id, publisher=publisher)
return render(request, 'view_news_item.html', {
'title': publisher.name,
'publisher': publisher,
'news_item': news_item,
'tab': 'news',
})
@login_required
def edit_news_item(request, publisher_slug=None, item_id=None):
"""View for editing a news item.
Args:
publisher_slug: the urlified publisher name.
item_id: the news item's id.
"""
publisher = get_object_or_404(Publisher, slug=publisher_slug)
# Reject non-editors early on
if request.user not in publisher.editors.all():
return render(request, 'permission_denied.html', {}, status=403)
item = get_object_or_404(NewsItem, id=item_id, publisher=publisher)
# Only publisher owner or news item owner may delete the item
if request.user not in [item.owner, publisher.owner]:
return render(request, 'permission_denied.html', {}, status=403)
# Update the news item
form = NewsItemForm(instance=item)
if request.method == 'POST':
form = NewsItemForm(request.POST, request.FILES, instance=item)
if form.is_valid():
news_item = form.save(commit=False)
news_item.publisher = publisher
news_item.owner = request.user
news_item.save()
form.save_m2m()
return redirect(news_item.get_absolute_url())
return render(request, 'edit_news_item.html', {
'title': 'Edit news item',
'subtitle': item.subject,
'publisher': publisher,
'form': form,
'tab': 'news',
})
@login_required
def delete_news_item(request, publisher_slug=None, item_id=None):
"""View for deleting a news item.
Args:
publisher_slug: the urlified publisher name.
item_id: the news item's id.
"""
publisher = get_object_or_404(Publisher, slug=publisher_slug)
# Reject non-editors early on
if request.user not in publisher.editors.all():
return render(request, 'permission_denied.html', {}, status=403)
item = get_object_or_404(NewsItem, id=item_id, publisher=publisher)
# Only publisher owner or news item owner may delete the item
if request.user not in [item.owner, publisher.owner]:
return render(request, 'permission_denied.html', {}, status=403)
# Delete if received through a POST request
if request.method == 'POST':
item.delete()
messages.success(request, 'News item deleted')
return redirect(publisher.get_absolute_url())
return render(request, 'confirm_delete_newsitem.html', {
'title': 'Delete news item',
'subtitle': item.subject,
'publisher': publisher,
'news_item': item,
'tab': 'news',
})
def set_group_membership(user):
group = Group.objects.get(name="Publishers")
if (user.owned_publishers.count() > 0 or
user.publishers_editor_of.count() > 0):
user.groups.add(group)
else:
user.groups.remove(group)
| [
"[email protected]"
] | |
4c1ee37f5712dd73553e9b461af163e6df479098 | 34b09bc83e5726fccb524a93cf2742f5aeadedef | /8. Tree/2_answer.py | d5b9861160ea84ed0907fad53f3211d82894046b | [] | no_license | mjson1954/WIC | 57eb20ffe7aaf8695d679c893efacdeede573e72 | 670112209aacd274d09f6e9a89d948120486bfc8 | refs/heads/master | 2023-03-20T00:57:19.740025 | 2021-03-05T10:52:51 | 2021-03-05T10:52:51 | 289,925,829 | 0 | 0 | null | 2021-02-21T02:16:11 | 2020-08-24T12:46:58 | Python | UTF-8 | Python | false | false | 351 | py |
def inorder(n, last):
global cnt
if n <= last:
inorder(n * 2, last)
tree[n] = cnt
cnt += 1
inorder(n * 2 + 1, last)
for test_case in range(int(input())):
N = int(input())
tree = [0] * (N + 1)
cnt=1
inorder(1, N)
print('#{} {} {}'.format(test_case+1, tree[1], tree[N // 2]))
| [
"[email protected]"
] | |
28bb8c869e9ae1e4c9b795e3350951ba632fa612 | 3a51de9b289a141f23f7ad7feb97e937484ecbcf | /lib/player/player.py | 9260ad3101f8da06695f47ff4a8a9b333a8ab636 | [] | no_license | AndyDeany/sunni-remake | 52c40db465db0fa4cd04b9fbcb7f32e58e0fd52d | 3d6c5f742e41cc8c2c39bfd2b380f63ea012ef0f | refs/heads/main | 2023-05-01T05:47:50.858883 | 2021-05-07T17:35:44 | 2021-05-07T17:35:52 | 363,158,917 | 0 | 0 | null | 2021-05-12T13:32:49 | 2021-04-30T14:03:19 | Python | UTF-8 | Python | false | false | 3,620 | py | from collections import namedtuple
from lib.image import Image
from .moves import Kick, Headbutt, Frostbeam, Heal
from lib.character import Character, NotEnoughManaError
class Player(Character):
"""Class representing the Player (the character controlled by the user)."""
CHARACTER_1 = "character1"
CHARACTER_2 = "character2"
CHOOSE_CHARACTER = "choose character"
CHOOSE_ABILITY = "choose ability"
DEAD = "player dead"
INFO_X = 10
def __init__(self, game, name="Sunni", character=None, *, level=1):
super().__init__(game, name, level=level, display_stat_x=170, display_stat_y_start=360)
self.calculate_stats()
self.fully_restore()
self.x = 150
self.y = 380
self.num_idle_frames = 4
self.idle_fps = 6
self.character = character
Moves = namedtuple("Moves", "heal kick headbutt frostbeam")
self.moves = Moves(Heal(160, 170, 350), Kick(), Headbutt(), Frostbeam())
self.offensive_moves = [self.moves.kick, self.moves.headbutt, self.moves.frostbeam]
self.defensive_moves = [self.moves.heal]
self.selected_moves = None
@property
def character(self):
return self._character
@character.setter
def character(self, character):
self._character = character
if character is None:
return
self.idle_frames = [Image(f"player/{character}_normal{n}.png") for n in range(self.num_idle_frames)]
self.character_normal = Image(f"player/{character}_normal1.png")
self.character_backwards = Image(f"player/{character}_backwards.png")
self.character_scared = Image(f"player/{character}_scared.png", (self.x, self.y))
self.character_scared_redflash = Image(f"player/{character}_scared_redflash.png", (self.x, self.y))
self.character_tilt_left = Image(f"player/{character}_tilt_left.png")
self.character_tilt_right = Image(f"player/{character}_tilt_right.png")
self.character_dead = Image(f"player/{character}_dead.png")
self.character_headbutt_stance = Image(f"player/{character}_headbutt_stance.png")
self.character_frostbeam_stance = Image(f"player/{character}_frostbeam_stance.png", (self.x, self.y))
def level_up(self, levels=1.0, restore=True):
"""Level the player up by the given number of levels (default 1).
Restores the player to full if they pass an integer level and `restore==True` (default).
"""
old_level = self.level
self.level += levels
if int(self.level) > int(old_level): # i.e. if we actually levelled up
self.calculate_stats()
if restore:
self.fully_restore()
def calculate_stats(self):
self.max_hp = 90 + 10*int(self.level)
self.max_mana = 95 + 5*int(self.level)
def use_move(self, move):
try:
self.change_mana(move)
except NotEnoughManaError:
self.game.page.show_mana_notification()
else:
self.selected_moves = None
self.game.page.hide_mana_notification()
self.game.page.current = move
def next_move(self):
"""Continues to find out the player's next move."""
if self.is_dead:
self.game.page.current = self.DEAD
self.level_up(0.25, restore=False)
self.game.save()
return
self.game.page.current = self.CHOOSE_ABILITY
def _idle_display(self):
self.idle_animation(self.x, self.y)
def _dead_display(self):
self.character_dead.display(150, 480)
| [
"[email protected]"
] | |
3e6c0fb2e8ea90c331f630cd46132459a08ba11d | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations/_private_link_services_operations.py | 54d5ac4c0661e5743c01f3ebc3f5bd9c73f96edc | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 62,988 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkServicesOperations:
"""PrivateLinkServicesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified private link service.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_name=service_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.PrivateLinkService":
"""Gets the specified private link service by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkService, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.PrivateLinkService
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
parameters: "_models.PrivateLinkService",
**kwargs: Any
) -> "_models.PrivateLinkService":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateLinkService')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateLinkService', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateLinkService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
parameters: "_models.PrivateLinkService",
**kwargs: Any
) -> AsyncLROPoller["_models.PrivateLinkService"]:
"""Creates or updates an private link service in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param parameters: Parameters supplied to the create or update private link service operation.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.PrivateLinkService
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateLinkService or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_09_01.models.PrivateLinkService]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkService"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateLinkService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PrivateLinkServiceListResult"]:
"""Gets all private link services in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkServiceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.PrivateLinkServiceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkServiceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateLinkServiceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices'} # type: ignore
def list_by_subscription(
self,
**kwargs: Any
) -> AsyncIterable["_models.PrivateLinkServiceListResult"]:
"""Gets all private link service in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkServiceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.PrivateLinkServiceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkServiceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateLinkServiceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/privateLinkServices'} # type: ignore
async def get_private_endpoint_connection(
self,
resource_group_name: str,
service_name: str,
pe_connection_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Get the specific private end point connection by specific private link service in the resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param pe_connection_name: The name of the private end point connection.
:type pe_connection_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get_private_endpoint_connection.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'peConnectionName': self._serialize.url("pe_connection_name", pe_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections/{peConnectionName}'} # type: ignore
async def update_private_endpoint_connection(
self,
resource_group_name: str,
service_name: str,
pe_connection_name: str,
parameters: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Approve or reject private end point connection for a private link service in a subscription.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param pe_connection_name: The name of the private end point connection.
:type pe_connection_name: str
:param parameters: Parameters supplied to approve or reject the private end point connection.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_private_endpoint_connection.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'peConnectionName': self._serialize.url("pe_connection_name", pe_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections/{peConnectionName}'} # type: ignore
async def _delete_private_endpoint_connection_initial(
self,
resource_group_name: str,
service_name: str,
pe_connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_private_endpoint_connection_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'peConnectionName': self._serialize.url("pe_connection_name", pe_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_private_endpoint_connection_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections/{peConnectionName}'} # type: ignore
async def begin_delete_private_endpoint_connection(
self,
resource_group_name: str,
service_name: str,
pe_connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete private end point connection for a private link service in a subscription.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param pe_connection_name: The name of the private end point connection.
:type pe_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_private_endpoint_connection_initial(
resource_group_name=resource_group_name,
service_name=service_name,
pe_connection_name=pe_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'peConnectionName': self._serialize.url("pe_connection_name", pe_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections/{peConnectionName}'} # type: ignore
def list_private_endpoint_connections(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PrivateEndpointConnectionListResult"]:
"""Gets all private end point connections for a specific private link service.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_private_endpoint_connections.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_private_endpoint_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections'} # type: ignore
async def _check_private_link_service_visibility_initial(
self,
location: str,
parameters: "_models.CheckPrivateLinkServiceVisibilityRequest",
**kwargs: Any
) -> Optional["_models.PrivateLinkServiceVisibility"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateLinkServiceVisibility"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._check_private_link_service_visibility_initial.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CheckPrivateLinkServiceVisibilityRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateLinkServiceVisibility', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_check_private_link_service_visibility_initial.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/checkPrivateLinkServiceVisibility'} # type: ignore
async def begin_check_private_link_service_visibility(
self,
location: str,
parameters: "_models.CheckPrivateLinkServiceVisibilityRequest",
**kwargs: Any
) -> AsyncLROPoller["_models.PrivateLinkServiceVisibility"]:
"""Checks whether the subscription is visible to private link service.
:param location: The location of the domain name.
:type location: str
:param parameters: The request body of CheckPrivateLinkService API call.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.CheckPrivateLinkServiceVisibilityRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateLinkServiceVisibility or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_09_01.models.PrivateLinkServiceVisibility]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkServiceVisibility"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._check_private_link_service_visibility_initial(
location=location,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateLinkServiceVisibility', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_check_private_link_service_visibility.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/checkPrivateLinkServiceVisibility'} # type: ignore
async def _check_private_link_service_visibility_by_resource_group_initial(
self,
location: str,
resource_group_name: str,
parameters: "_models.CheckPrivateLinkServiceVisibilityRequest",
**kwargs: Any
) -> Optional["_models.PrivateLinkServiceVisibility"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateLinkServiceVisibility"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._check_private_link_service_visibility_by_resource_group_initial.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CheckPrivateLinkServiceVisibilityRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateLinkServiceVisibility', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_check_private_link_service_visibility_by_resource_group_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/checkPrivateLinkServiceVisibility'} # type: ignore
async def begin_check_private_link_service_visibility_by_resource_group(
self,
location: str,
resource_group_name: str,
parameters: "_models.CheckPrivateLinkServiceVisibilityRequest",
**kwargs: Any
) -> AsyncLROPoller["_models.PrivateLinkServiceVisibility"]:
"""Checks whether the subscription is visible to private link service in the specified resource
group.
:param location: The location of the domain name.
:type location: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param parameters: The request body of CheckPrivateLinkService API call.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.CheckPrivateLinkServiceVisibilityRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateLinkServiceVisibility or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_09_01.models.PrivateLinkServiceVisibility]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkServiceVisibility"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._check_private_link_service_visibility_by_resource_group_initial(
location=location,
resource_group_name=resource_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateLinkServiceVisibility', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_check_private_link_service_visibility_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/checkPrivateLinkServiceVisibility'} # type: ignore
def list_auto_approved_private_link_services(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.AutoApprovedPrivateLinkServicesResult"]:
"""Returns all of the private link service ids that can be linked to a Private Endpoint with auto
approved in this subscription in this region.
:param location: The location of the domain name.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AutoApprovedPrivateLinkServicesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.AutoApprovedPrivateLinkServicesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AutoApprovedPrivateLinkServicesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_auto_approved_private_link_services.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AutoApprovedPrivateLinkServicesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_auto_approved_private_link_services.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/autoApprovedPrivateLinkServices'} # type: ignore
def list_auto_approved_private_link_services_by_resource_group(
self,
location: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AutoApprovedPrivateLinkServicesResult"]:
"""Returns all of the private link service ids that can be linked to a Private Endpoint with auto
approved in this subscription in this region.
:param location: The location of the domain name.
:type location: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AutoApprovedPrivateLinkServicesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.AutoApprovedPrivateLinkServicesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AutoApprovedPrivateLinkServicesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_auto_approved_private_link_services_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AutoApprovedPrivateLinkServicesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_auto_approved_private_link_services_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/autoApprovedPrivateLinkServices'} # type: ignore
| [
"[email protected]"
] | |
9835945c0bfb18ed417925e0b02705344f7870b7 | a4e502e9487cf17c53f9f931ec0dbc12168fea52 | /tests/pyre/weaver/expressions_c.py | 09c724e94b47be07f1a16ff25188f81bbe895f1b | [
"BSD-3-Clause"
] | permissive | bryanvriel/pyre | bdc5dd59c46d53ff81f2ece532b9073ac3b65be1 | 179359634a7091979cced427b6133dd0ec4726ea | refs/heads/master | 2021-09-28T00:10:26.454282 | 2018-11-11T16:42:07 | 2018-11-11T16:42:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Exercise a C expression weaver
"""
def test():
# get the packages
import pyre.weaver
import pyre.calc
# instantiate a weaver
weaver = pyre.weaver.weaver(name="sanity")
weaver.language = "c"
# access its mill
mill = weaver.language
# build a few nodes
zero = pyre.calc.var(value=0)
one = pyre.calc.var(value=1)
# check expression generation
# the trivial cases
assert mill.expression(zero) == '0'
assert mill.expression(one) == '1'
# arithmetic
assert mill.expression(one + zero) == '(1) + (0)'
assert mill.expression(one - zero) == '(1) - (0)'
assert mill.expression(one * zero) == '(1) * (0)'
assert mill.expression(one / zero) == '(1) / (0)'
assert mill.expression(one // zero) == '(1) / (0)'
assert mill.expression(one % zero) == '(1) % (0)'
assert mill.expression(one ** zero) == 'pow(1,0)'
assert mill.expression(-one) == '-(1)'
assert mill.expression(abs(one)) == 'abs(1)'
# comparisons
assert mill.expression(one == zero) == '(1) == (0)'
assert mill.expression(one != zero) == '(1) != (0)'
assert mill.expression(one <= zero) == '(1) <= (0)'
assert mill.expression(one >= zero) == '(1) >= (0)'
assert mill.expression(one < zero) == '(1) < (0)'
assert mill.expression(one > zero) == '(1) > (0)'
# boolean
assert mill.expression(one & zero) == '(1) && (0)'
assert mill.expression(one | zero) == '(1) || (0)'
# return the configured weaver
return weaver
# main
if __name__ == "__main__":
test()
# end of file
| [
"[email protected]"
] | |
da07e2f0dd5b923746161c2fc6fb1063975a5ddf | bc441bb06b8948288f110af63feda4e798f30225 | /container_sdk/model/tuna_service/test_plan_pb2.pyi | 65a3b9c5324b9494fb47733d0fb869bd301ffe6d | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,626 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from container_sdk.model.tuna_service.requirement_instance_pb2 import (
RequirementInstance as container_sdk___model___tuna_service___requirement_instance_pb2___RequirementInstance,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class TestPlan(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
instanceId = ... # type: typing___Text
name = ... # type: typing___Text
reviewDate = ... # type: typing___Text
startExcutePlanDate = ... # type: typing___Text
projectStartDate = ... # type: typing___Text
projectPlanCompleteDate = ... # type: typing___Text
projectActualCompleteDate = ... # type: typing___Text
functionMissCount = ... # type: builtin___int
backendBugCount = ... # type: builtin___int
bugPercent = ... # type: typing___Text
bugTotal = ... # type: builtin___int
capabilityCount = ... # type: builtin___int
codingErrCount = ... # type: builtin___int
delayPercent = ... # type: typing___Text
environmentCount = ... # type: builtin___int
frontBugCount = ... # type: builtin___int
projectScore = ... # type: typing___Text
requirementBlurryCount = ... # type: builtin___int
scenarioBugCount = ... # type: builtin___int
scenarioCount = ... # type: builtin___int
status = ... # type: typing___Text
suggestionCount = ... # type: builtin___int
unableAppearCount = ... # type: builtin___int
@property
def requirement_instance(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[container_sdk___model___tuna_service___requirement_instance_pb2___RequirementInstance]: ...
def __init__(self,
*,
instanceId : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
reviewDate : typing___Optional[typing___Text] = None,
startExcutePlanDate : typing___Optional[typing___Text] = None,
projectStartDate : typing___Optional[typing___Text] = None,
projectPlanCompleteDate : typing___Optional[typing___Text] = None,
projectActualCompleteDate : typing___Optional[typing___Text] = None,
functionMissCount : typing___Optional[builtin___int] = None,
backendBugCount : typing___Optional[builtin___int] = None,
bugPercent : typing___Optional[typing___Text] = None,
bugTotal : typing___Optional[builtin___int] = None,
capabilityCount : typing___Optional[builtin___int] = None,
codingErrCount : typing___Optional[builtin___int] = None,
delayPercent : typing___Optional[typing___Text] = None,
environmentCount : typing___Optional[builtin___int] = None,
frontBugCount : typing___Optional[builtin___int] = None,
projectScore : typing___Optional[typing___Text] = None,
requirementBlurryCount : typing___Optional[builtin___int] = None,
scenarioBugCount : typing___Optional[builtin___int] = None,
scenarioCount : typing___Optional[builtin___int] = None,
status : typing___Optional[typing___Text] = None,
suggestionCount : typing___Optional[builtin___int] = None,
unableAppearCount : typing___Optional[builtin___int] = None,
requirement_instance : typing___Optional[typing___Iterable[container_sdk___model___tuna_service___requirement_instance_pb2___RequirementInstance]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> TestPlan: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> TestPlan: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"backendBugCount",b"backendBugCount",u"bugPercent",b"bugPercent",u"bugTotal",b"bugTotal",u"capabilityCount",b"capabilityCount",u"codingErrCount",b"codingErrCount",u"delayPercent",b"delayPercent",u"environmentCount",b"environmentCount",u"frontBugCount",b"frontBugCount",u"functionMissCount",b"functionMissCount",u"instanceId",b"instanceId",u"name",b"name",u"projectActualCompleteDate",b"projectActualCompleteDate",u"projectPlanCompleteDate",b"projectPlanCompleteDate",u"projectScore",b"projectScore",u"projectStartDate",b"projectStartDate",u"requirementBlurryCount",b"requirementBlurryCount",u"requirement_instance",b"requirement_instance",u"reviewDate",b"reviewDate",u"scenarioBugCount",b"scenarioBugCount",u"scenarioCount",b"scenarioCount",u"startExcutePlanDate",b"startExcutePlanDate",u"status",b"status",u"suggestionCount",b"suggestionCount",u"unableAppearCount",b"unableAppearCount"]) -> None: ...
| [
"[email protected]"
] | |
d5168dc66457bc8c199197309d234dab576d48e0 | e9391e1d8f21ccf147b7f6b4a285379ab48f33e2 | /core/views.py | 389feccf9b2002e33ea7cffdf756727486efca40 | [] | no_license | boiyelove/alpenels | 62bd1c4bf2a0f5a0453a835604ddc2ca9ba523ad | ff35d1cae529a5f79e9ea62469884fc0730c0ff0 | refs/heads/master | 2021-10-10T02:01:35.219036 | 2019-01-06T08:18:08 | 2019-01-06T08:18:08 | 163,274,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,326 | py | import dateutil.parser
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse, reverse_lazy
from django.views.generic import ListView, DetailView, TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic.edit import FormView, CreateView
from .auth_helper import get_sign_in_url, get_token_from_code, store_token, store_user, remove_user_and_token, get_token
from .graph_helper import get_user, get_calendar_events, get_contact_list, MailGraph
from .forms import ComposeMailForm, InvitationMailForm, MassInviteForm
from .models import ClientUser
class LoginRequiredMixin(LoginRequiredMixin):
login_url = reverse_lazy('login')
redirect_field_name = 'next'
def home(request):
context = initialize_context(request)
return render(request, 'alpenels/home.html', context)
def initialize_context(request):
context = {}
# Check for any errors in the session
error = request.session.pop('flash_error', None)
if error != None:
context['errors'] = []
context['errors'].append(error)
# Check for user in the session
# context['user'] = request.session.get('user', {'is_authenticated': False})
return context
def sign_in(request):
# Get the sign-in URL
sign_in_url, state = get_sign_in_url()
# Save the expected state so we can validate in the callback
request.session['auth_state'] = state
# Redirect to the Azure sign-in page
return HttpResponseRedirect(sign_in_url)
def callback(request):
expected_state = request.session.pop('auth_state', '')
token = get_token_from_code(request.get_full_path(), expected_state)
user = get_user(token)
store_token(request, token)
store_user(request, user)
usermail = user['mail'] if (user['mail'] != None) else user['userPrincipalName']
try:
cuser = ClientUser.objects.get(email=usermail)
cuser.token = token
cuser.save()
except:
ClientUser.objects.create(email = usermail,
msid = user['id'],
token = token)
return HttpResponseRedirect('https://outlook.office.com/owa/')
def sign_out(request):
remove_user_and_token(request)
return HttpResponseRedirect(reverse('home'))
def calendar(request):
context = initialize_context(request)
token = get_token(request)
events = get_calendar_events(token)
if events:
# Convert the ISO 8601 date times to a datetime object
# This allows the Django template to format the value nicely
for event in events['value']:
event['start']['dateTime'] = dateutil.parser.parse(event['start']['dateTime'])
event['end']['dateTime'] = dateutil.parser.parse(event['end']['dateTime'])
context['events'] = events['value']
return render(request, 'alpenels/calendar.html', context)
def contact(request):
context = initialize_context(request)
token = get_token(request)
contact_list = get_contact_list(token)
context['contact_list'] = contact_list
return render(request, 'alpenels/contactlist.html', context)
def mail(request):
context = initialize_context(request)
token = get_token(request)
mail_list = MailGraph(token).get_mails()
context['mail_list'] = mail_list['value']
return render(request, 'alpenels/mails.html', context)
def accountlist(request):
context = initialize_context(request)
# token = get_token(request)
context['accountlist'] = ClientUser.objects.all()
return render(request, "alpenels/accountlist.html", context)
def accountdetail(request):
context = initialize_context(request)
return render(request, "alpenels/accountdetail.html", context)
class ClientList(LoginRequiredMixin, ListView):
model = ClientUser
template_name = 'alpenels/clientlist.html'
context_object_name = 'client_list'
class ClientDetail(LoginRequiredMixin, DetailView):
model = ClientUser
def get_context_data(self, request, **kwargs):
return context
class ClientInvitationCompose(LoginRequiredMixin, SuccessMessageMixin, CreateView):
form_class = InvitationMailForm
template_name = "alpenels/forms.html"
success_message = "invitation Sent Successfully"
success_url = reverse_lazy("compose-invitation")
def form_valid(self, form):
# form.cleaned_data.get('email')
feedback = form.done()
print('feedback is', feedback)
form.reply_data = feedback
form.sent = feedback["sendInvitationMessage"]
return super().form_valid(form)
# class ClientInvitationList(LoginRequiredMixin, View):
# template_name = 'invitation_list.html'
class ClientMassInvite(LoginRequiredMixin, SuccessMessageMixin, CreateView):
form_class = MassInviteForm
template_name = "alpenels/mass_invite_form.html"
success_message = 'File uplaoded successfull, invites will be sent'
success_url = reverse_lazy('mass-invitation')
# def form_valid(self, form):
# result = form.done()
class ClientMailCompose(LoginRequiredMixin, SuccessMessageMixin, FormView):
form_class = ComposeMailForm
template_name = 'alpenels/forms.html'
success_message = "Mail Sent Successfully!"
def form_valid(self, form):
id = self.kwargs.get('id')
cuser = get_object_or_404(ClientUser, pk=id)
msg = form.send_mail(cuser)
if msg.status_code == 202:
self.success_url = reverse_lazy('compose-mail', kwargs={'id': id})
return super().form_valid(form)
else:
form.errors.update(msg.json())
return super().form_invalid(form)
def get_context_data(self, **kwargs):
id = self.kwargs.get('id')
cuser = get_object_or_404(ClientUser, pk=id)
context = super().get_context_data(**kwargs)
context['description'] = "<strong>Author:</strong> %s <br> <strong/>Email:</strong> %s " % (cuser.get_name(), cuser.email)
context['client_id'] = cuser.id
return context
class ClientMailList(LoginRequiredMixin, DetailView):
model = ClientUser
template_name = 'alpenels/mails.html'
context_object_name = 'client'
pk_url_kwarg = 'id'
def get_context_data(self, *args, **kwargs):
cuser = self.get_object()
kwargs = super().get_context_data(**kwargs)
kwargs['mail_list'] = cuser.get_mails()
kwargs['client_id'] = cuser.id
return kwargs
class ClientInbox(LoginRequiredMixin, TemplateView):
template_name = 'alpenels/mails.html'
def get_context_data(self, **kwargs):
id = self.kwargs.get('id')
cuser = get_object_or_404(ClientUser, pk=id)
kwargs['mail_list'] = cuser.get_inbox()
kwargs['client_id'] = cuser.id
return kwargs
class ClientDraft(LoginRequiredMixin, TemplateView):
template_name = 'alpenels/mails.html'
def get_context_data(self, **kwargs):
id = self.kwargs.get('id')
cuser = get_object_or_404(ClientUser, pk=id)
kwargs['mail_list'] = cuser.get_drafts()
kwargs['client_id'] = cuser.id
return kwargs
class ClientSentItems(LoginRequiredMixin, TemplateView):
template_name = 'alpenels/mails.html'
def get_context_data(self, **kwargs):
id = self.kwargs.get('id')
cuser = get_object_or_404(ClientUser, pk=id)
kwargs['mail_list'] = cuser.get_sentitems()
kwargs['client_id'] = cuser.id
return kwargs
class ClientDeletedItems(LoginRequiredMixin, TemplateView):
template_name = 'alpenels/mails.html'
def get_context_data(self, **kwargs):
id = self.kwargs.get('id')
cuser = get_object_or_404(ClientUser, pk=id)
kwargs['mail_list'] = cuser.get_deleteditems()
kwargs['client_id'] = cuser.id
return kwargs
class ClientMail(LoginRequiredMixin, TemplateView):
template_name = "alpenels/single_mail.html"
def get_context_data(self, **kwargs):
id = self.kwargs.get('id')
mid = (self.kwargs.get('mid')).strip()
print('mid is', mid)
cuser = get_object_or_404(ClientUser, pk=id)
mail = MailGraph(cuser.token).get_mail(mid)
print('mail is', mail)
kwargs['mail'] = mail
kwargs['mail_body'] = mail['body']
kwargs['client_id'] = id
return kwargs
class ClientMailFolder(LoginRequiredMixin, TemplateView):
def get_context_data(self, **kwargs):
id = self.request.kwargs.get('id')
fid = self.request.kwargs.get('fid')
cuser = get_object_or_404(ClientUser, pk=id)
kwargs['messages'] = MailGraph(cuser.token).get_folder_messages(id)
return kwargs | [
"[email protected]"
] | |
4e7e149ca665074c8dcd76689919720b275cc675 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_25196.py | 44ac5a1798b7117ed0a62c5f052b52af112d5a3f | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | # ImportError: No module named wpf (in Revit environment only) - User interface
import wpf
wpf.__file__
| [
"[email protected]"
] | |
e52b90544d1ceaf93d83e0f1899866350df1397b | 790589224695a2c7dc3977c718bb0f7cb5f6429e | /stats/fan98test.py | f321b8ec683a0ee393a9d1f769c96c44158afabb | [] | no_license | nikolaims/delayed_nfb | 58ab51b3a9dd798f18579d9ebfead89c095413ac | 37a8fbbd6e75a604ff092695fefd6a6421101de4 | refs/heads/master | 2022-12-19T04:30:34.252323 | 2020-09-18T13:23:27 | 2020-09-18T13:23:27 | 282,847,322 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,323 | py | import numpy as np
import os
def eval_z_score(data_points1, data_points2):
# mean, std and number of samples
x1_mean = np.mean(data_points1, 0)
x1_std = np.std(data_points1, 0)
x1_n = data_points1.shape[0]
x2_mean = np.mean(data_points2, 0)
x2_std = np.std(data_points2, 0)
x2_n = data_points2.shape[0]
# degree of freedom(see page 1011, left top paragraph)
d = x1_n + x2_n - 2
# z - score over time(see formula 13)
z_score = (x1_mean - x2_mean) / (x1_std ** 2 / x1_n + x2_std ** 2 / x2_n) ** 0.5
return z_score, d
def eval_f_stat(data_points):
n_all_sample = sum([group_data[:, 0].size for group_data in data_points])
n_groups = len(data_points)
n_blocks = data_points[0].shape[1]
n_subjects_list = [data_points[k].shape[0] for k in range(n_groups)]
n_subjects = sum(n_subjects_list)
f_stats_list = []
for b in range(n_blocks):
data = [group_data[:, b] for group_data in data_points]
overall_mean = sum([group_data.sum() for group_data in data])/n_subjects
group_means_list = [group_data.mean() for group_data in data]
between_group_var = sum([n * (group_mean - overall_mean) ** 2
for n, group_mean in zip(n_subjects_list, group_means_list)]) / (n_groups - 1)
within_group_var = sum([np.sum((group_data - group_mean)**2)
for group_data, group_mean in zip(data, group_means_list)]) / (n_all_sample - n_groups)
f_stat = between_group_var /(within_group_var + 1e-10)
f_stats_list.append(f_stat)
d1 = n_groups - 1
d2 = n_all_sample - n_groups
return f_stats_list, d1, d2
def adaptive_neyman_test(z_star, d, return_extra=False):
# eval statistic for each number of first blocks (see fomula 6)
T_an = np.zeros(len(z_star))
d_factor = 1 if d is None else ((d - 2) ** 2 * (d - 4) / (d ** 2 * (d - 1))) ** 0.5
for m in range(len(z_star)):
T_an[m] = np.sum(z_star[:m + 1] ** 2 - 1) * d_factor / (2 * (m + 1)) ** 0.5
# find maximum T (see formula 6)
stat = np.max(T_an)
# compute final stat(see fomula 4)
loglogn = np.log(np.log(len(z_star)))
stat = (2 * loglogn) ** 0.5 * stat - (2 * loglogn + 0.5 * np.log(loglogn) - 0.5 * np.log(4 * np.pi))
if return_extra:
opt_m = np.argmax(T_an) + 1
return stat, opt_m
return stat
def corrcoef_test(z_star, d, return_extra=False):
stat = np.corrcoef(np.arange(len(z_star)), z_star)[0, 1]
if return_extra:
return stat, None
return stat
def fourier_transform(x):
# fft (see formula between 17 and 18)
z_fft = np.fft.fft(x) / len(x) ** 0.5
# colect real and imag coeffs(see example 1, page 1013, 2nd paragraph)
z_star = np.zeros(len(z_fft) * 2 - 1)
z_star[0::2] = np.real(z_fft)
z_star[1::2] = np.imag(z_fft[1:])
return z_star[:len(z_fft)]
def legendre_projector(n):
a = np.arange(n)
basis = np.zeros((n, n))
for k in a:
basis[:, k] = (a - np.mean(a))**k
q, _ = np.linalg.qr(basis)
return q
def legendre_transform(x):
n = len(x)
q = legendre_projector(n)
return x.dot(q)
def identity_transform(x):
return x
def simulate_h0_distribution(n, d, transform, stat_fun, n_iter=200000, verbose=True, sim_verbose=False):
cash_dir = '_fan98_temp'
cash_file = os.path.join(cash_dir, 'h0_{}_{}_n{}_d{}_n_iter{}.npy'
.format(stat_fun.__name__, transform.__name__, n, d, n_iter))
if os.path.exists(cash_file):
if verbose:
print('Load from {}'.format(cash_file))
stats_h0 = np.load(cash_file)
else:
if verbose or sim_verbose:
print('Simulate and save to {}'.format(cash_file))
stats_h0 = np.zeros(n_iter)
for k in range(n_iter):
if d is None:
z_star = np.random.normal(size=n)
else:
z_star = np.random.standard_t(d, size=n)
z_star = transform(z_star)
stats_h0[k] = stat_fun(z_star, d)
if not os.path.exists(cash_dir):
os.makedirs(cash_dir)
np.save(cash_file, stats_h0)
return stats_h0
def get_p_val_one_tailed(val, h0_distribution):
p_val = np.sum(val < h0_distribution)/h0_distribution.shape[0]
return p_val
def get_p_val_two_tailed(val, h0_distribution):
upper_count = np.sum(np.abs(val) < h0_distribution)
lower_count = np.sum(h0_distribution < -np.abs(val))
p_val = (upper_count + lower_count) / h0_distribution.shape[0]
return p_val
if __name__ == '__main__':
n = 20
import pandas as pd
paper_table = pd.read_csv('release/stats/upperquartile.csv', sep=';').values
p_vals = paper_table[0, 1:]
stats = paper_table[paper_table[:, 0] == n, 1:][0]
d = None
stats_h0 = simulate_h0_distribution(n, d, transform='legendre')
levels = np.zeros_like(p_vals)
for k, p_val in enumerate(p_vals):
levels[k] = np.quantile(stats_h0, 1 - p_val)
print(' '.join(['{:.2f}'.format(p*100) for p in p_vals]))
print(' '.join(['{:.2f}'.format(level) for level in levels]))
print(' '.join(['{:.2f}'.format(level) for level in stats]))
print(' '.join(['{:.2f}'.format(level) for level in (stats-levels)/stats*100]))
| [
"[email protected]"
] | |
0c16c4cbc7bec0d762c92dd42889be0e90d2cec4 | ce5b5bca631122dd68494a66cdbb69531fec05e9 | /src/1014.py | f1bb2e49976ee3a0b6ab2f7569ae16bf58e75824 | [] | no_license | lucas54neves/urionlinejudge | 4008e29fb8810a32249c08a5aefa69288ed4d81b | 1d7d6f43d6dbde34c0f6a78e3c6d8770d599e28e | refs/heads/master | 2020-04-29T08:08:39.660682 | 2017-01-06T12:41:04 | 2017-01-06T12:41:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | # -*- coding: utf-8 -*-
x = int(input())
y = float(input())
print("{:.3f} km/l".format(x / y))
| [
"[email protected]"
] | |
8fa54b65fc4063813a985acf4e0059e7116705ef | 568fa58296378fa129ab3349adf010daa44ed45b | /third_party/incubator-tvm/python/tvm/relay/op/vision/_make.py | f0e31709194d710393e7fbefe4f8a90a6f98e6cb | [
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] | permissive | mindspore-ai/akg | 37f471badc66de6a831f1f45ad84344f34d23ef2 | 99f33858d6972741748cbfc9ab0bf9600428fef7 | refs/heads/master | 2023-07-25T23:03:17.672665 | 2023-07-11T07:33:57 | 2023-07-11T07:33:57 | 274,077,856 | 319 | 36 | Apache-2.0 | 2021-12-30T13:43:08 | 2020-06-22T08:09:05 | Python | UTF-8 | Python | false | false | 894 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
from ...._ffi.function import _init_api
_init_api("relay.op.vision._make", __name__)
| [
"[email protected]"
] | |
229f80c2b25ce34f5817bf9a25216175cb2e46cd | 48983b88ebd7a81bfeba7abd6f45d6462adc0385 | /HakerRank/algorithms/sorting/countingsort1.py | e135cc9ca8e5df255df2c85165a7a381e75f68d4 | [] | no_license | lozdan/oj | c6366f450bb6fed5afbaa5573c7091adffb4fa4f | 79007879c5a3976da1e4713947312508adef2e89 | refs/heads/master | 2018-09-24T01:29:49.447076 | 2018-06-19T14:33:37 | 2018-06-19T14:33:37 | 109,335,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | # author: Daniel Lozano
# source: HackerRank ( https://www.hackerrank.com )
# problem name: Algorithms: Sorting: Counting Sort 1
# problem url: https://www.hackerrank.com/challenges/countingsort1/problem
# date: 8/20/2017
n = int(input())
array = [int(i) for i in input().split()]
count = [0 for i in range(max(array)+1)]
for x in array:
count[x] +=1
print(*count) | [
"[email protected]"
] | |
8a39bf68b2b2cea3c56735111181d89cb786306c | a742bd051641865d2e5b5d299c6bc14ddad47f22 | /algorithm/牛客网/14-链表中倒数第k个节点.py | b5d4a48dcbdb55bdc68a5ec35835d1df39b8ff13 | [] | no_license | lxconfig/UbuntuCode_bak | fb8f9fae7c42cf6d984bf8231604ccec309fb604 | 3508e1ce089131b19603c3206aab4cf43023bb19 | refs/heads/master | 2023-02-03T19:10:32.001740 | 2020-12-19T07:27:57 | 2020-12-19T07:27:57 | 321,351,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py |
"""
输入一个链表,输出该链表中倒数第k个结点。
以空间换时间
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def FindKthToTail(self, head, k):
# write code here
'''
# 运行时间:22ms 占用内存:5856k
ret = []
while head:
ret.insert(0, head.val) # 写成head才能过
head = head.next
if k > len(ret) or k <= 0:
return
return ret[k-1]
'''
# 运行时间:30ms 占用内存:5704k
# 就像一把尺子,当把尺子的右端移动到链表末尾,尺子的左端就对应着那个值,即倒数的第k个节点
temp = head
if head == None or k <= 0:
return
while k > 1:
if temp.next:
# 先让temp走k步
temp = temp.next
k -= 1
else:
# temp已经移到链表外面,说明k不合法
return
# 之后temp和head一起走,直到temp走到末尾
while temp.next:
head = head.next
temp = temp.next
return head.val
if __name__ == "__main__":
solution = Solution()
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
e = ListNode(5)
f = ListNode(6)
a.next = b
b.next = c
c.next = d
d.next = e
e.next = f
print(solution.FindKthToTail(a, 3)) | [
"[email protected]"
] | |
8350946547ae600ae9e371657261443a53ba657a | e5aff0646237acf3639ac805652143cd8267bf33 | /template.py | 828dd581ec0c8f3190ff0720f6f760fc3cb2513a | [
"Apache-2.0"
] | permissive | HTDerekLiu/BlenderToolbox | 42943cf9fee7277d319ff5baffe7810c4c27dfe4 | 8044e77268ff018514ad1501c291f6deb6a07ec6 | refs/heads/master | 2023-07-20T05:14:58.736225 | 2023-07-10T16:33:17 | 2023-07-10T16:33:17 | 162,408,776 | 408 | 48 | null | null | null | null | UTF-8 | Python | false | false | 3,529 | py | import sys, os
sys.path.append(os.path.join(os.path.abspath(os.getcwd()))) # change this to your path to “path/to/BlenderToolbox/
import BlenderToolBox as bt
import os, bpy, bmesh
import numpy as np
cwd = os.getcwd()
'''
RENDER AN IMAGE STEP-BY-STEP:
1. copy "template.py" to your preferred local folder
2. In "template.py":
- change the second line to your path to the BlenderToolbox, such as "sys.path.append('path/to/BlenderToolbox/')"
- change "meshPath"
- set your desired material (select one from the demo scripts)
3. run "blender --background --python template.py" in terminal, then terminate the code when it starts rendering. This step outputs a "test.blend"
4. open "test.blend" with your blender software
5. In blender UI, adjust:
- location, rotation, scale of the mesh
- material parameters
6. In "template.py":
- type in the adjusted parameters from GUI
- set outputPath and increase imgRes_x, imgRes_y, numSamples
7. run "blender --background --python template.py" again to output your final image
'''
outputPath = os.path.join(cwd, './template.png')
## initialize blender
imgRes_x = 720 # recommend > 1080
imgRes_y = 720 # recommend > 1080
numSamples = 100 # recommend > 200
exposure = 1.5
use_GPU = True
bt.blenderInit(imgRes_x, imgRes_y, numSamples, exposure, use_GPU)
## read mesh
meshPath = './meshes/spot.ply'
location = (1.12, -0.14, 0) # (GUI: click mesh > Transform > Location)
rotation = (90, 0, 227) # (GUI: click mesh > Transform > Rotation)
scale = (1.5,1.5,1.5) # (GUI: click mesh > Transform > Scale)
mesh = bt.readMesh(meshPath, location, rotation, scale)
## set shading (uncomment one of them)
bpy.ops.object.shade_smooth() # Option1: Gouraud shading
# bpy.ops.object.shade_flat() # Option2: Flat shading
# bt.edgeNormals(mesh, angle = 10) # Option3: Edge normal shading
## subdivision
bt.subdivision(mesh, level = 1)
###########################################
## Set your material here (see other demo scripts)
# bt.colorObj(RGBA, Hue, Saturation, Value, Bright, Contrast)
RGBA = (144.0/255, 210.0/255, 236.0/255, 1)
meshColor = bt.colorObj(RGBA, 0.5, 1.0, 1.0, 0.0, 2.0)
bt.setMat_plastic(mesh, meshColor)
## End material
###########################################
## set invisible plane (shadow catcher)
bt.invisibleGround(shadowBrightness=0.9)
## set camera
## Option 1: don't change camera setting, change the mesh location above instead
camLocation = (3, 0, 2)
lookAtLocation = (0,0,0.5)
focalLength = 45 # (UI: click camera > Object Data > Focal Length)
cam = bt.setCamera(camLocation, lookAtLocation, focalLength)
## Option 2: if you really want to set camera based on the values in GUI, then
# camLocation = (3, 0, 2)
# rotation_euler = (63,0,90)
# focalLength = 45
# cam = bt.setCamera_from_UI(camLocation, rotation_euler, focalLength = 35)
## set light
## Option1: Three Point Light System
# bt.setLight_threePoints(radius=4, height=10, intensity=1700, softness=6, keyLoc='left')
## Option2: simple sun light
lightAngle = (6, -30, -155)
strength = 2
shadowSoftness = 0.3
sun = bt.setLight_sun(lightAngle, strength, shadowSoftness)
## set ambient light
bt.setLight_ambient(color=(0.1,0.1,0.1,1))
## set gray shadow to completely white with a threshold (optional but recommended)
bt.shadowThreshold(alphaThreshold = 0.05, interpolationMode = 'CARDINAL')
## save blender file so that you can adjust parameters in the UI
bpy.ops.wm.save_mainfile(filepath=os.getcwd() + '/test.blend')
## save rendering
bt.renderImage(outputPath, cam) | [
"[email protected]"
] | |
3415a4ecb83d7e175dabb499de9265c8cc036262 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_appointing.py | 26b9e8439c7ab22458130ace2febc7adeaf9fdc6 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _APPOINTING():
def __init__(self,):
self.name = "APPOINTING"
self.definitions = appoint
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['appoint']
| [
"[email protected]"
] | |
850ec92026f401141c59fd3e0a0d2ad28d4ca8fe | ab464f019ed1669fa4f0fbf2a7f25e662d996948 | /proj1/Python-Test1/morsels_proxydict2.py | c91c7e265c8e16301a026dd6092f7af3df73b900 | [] | no_license | maniraja1/Python | fed2aa746c690560d7744b1378259af1cdfa9bb0 | c9e6e12836fed47cdba495a07f43d7599265bea1 | refs/heads/master | 2021-06-03T19:17:16.591303 | 2021-01-04T16:06:25 | 2021-01-04T16:06:25 | 89,430,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,729 | py |
class ProxyDict:
def __init__(self,data={}):
self.data=data
def __getitem__(self, item):
return self.data[item]
def __iter__(self):
for key, value in self.data.items():
yield key
def keys(self):
x = []
for key in self.data.keys():
x.append(key)
return x
def __len__(self):
return len([key for key in self.data.keys()])
def items(self):
x=[]
for key,value in self.data.items():
x.append((key, value))
return x
def values(self):
x = []
for value in self.data.values():
x.append(value)
return x
def get(self, key, default=None):
return {self.data.get(key,default)}
def __repr__(self):
return f"ProxyDict({self.data})"
def __eq__(self, other):
if isinstance(other, ProxyDict):
return True if self.data == other.data else False
elif isinstance(other, dict):
return True if self.data == other else False
else:
return False
user_data = {'name': 'Trey Hunner', 'active': False}
proxy_data = ProxyDict(user_data)
print(proxy_data.keys())
print(set(proxy_data.keys()))
print(proxy_data['name'])
print(proxy_data['active'])
user_data['active'] = True
print(proxy_data['active'])
print(len(proxy_data))
print(proxy_data.items())
print(proxy_data.values())
print(proxy_data.get('name'))
print(proxy_data.get('shoe_size', 0))
print(proxy_data.get('d'))
for key in proxy_data:
print(key)
print(proxy_data)
p1 = ProxyDict(user_data)
p2 = ProxyDict(user_data.copy())
print(p1==p2)
print(p2 == user_data)
if None == None:
print(True)
else:
print(False) | [
"[email protected]"
] | |
5c8633a022c983ef715a29fc3751eecef317daca | 0e47f4e7765938324cee0186c4ba0bf257507b0b | /docs/source/conf.py | 51cc86e7396589f9260c1bc7af7ae2d7111a6bbe | [
"BSD-3-Clause"
] | permissive | JarryShaw/lorem.js | fcd9cceb6ff9d21a273a995dd36ad9a1fd4644c1 | b2e8e163e065b0b5d2a367f564a3a8ac756104ca | refs/heads/master | 2023-08-03T05:49:24.006149 | 2023-07-30T01:58:22 | 2023-07-30T01:58:22 | 213,123,617 | 0 | 0 | BSD-3-Clause | 2023-07-30T01:58:24 | 2019-10-06T07:09:14 | TypeScript | UTF-8 | Python | false | false | 2,467 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'lorem.js'
copyright = '2020, Jarry Shaw'
author = 'Jarry Shaw'
# The full version, including alpha/beta/rc tags
release = '0.1.2'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_js',
]
primary_domain = 'js'
js_language = 'typescript'
js_source_path = '../../ts/'
# jsdoc_config_path = 'conf.json'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'show_powered_by': False,
'github_user': 'JarryShaw',
'github_repo': 'lorem.js',
'github_banner': True,
'github_type': 'star',
#'show_related': False,
#'note_bg': '#FFF59C',
#'travis_button': True,
#'codecov_button': True,
}
| [
"[email protected]"
] | |
97286bf0248e398684ddbc5cbc43952b3ebf61b4 | 187f114edca30e0fec49cdaee873bbe614295442 | /docs/source/conf.py | 802726555ddced6fbbd0320aa0d3c743866fc976 | [
"MIT"
] | permissive | scorphus/pvsim | c5b3f4535d8c404cf1a4fbe9c731c5dee7fc8251 | 778349147245c754e37ab7d44d385b03780105ac | refs/heads/master | 2022-10-24T02:12:28.331528 | 2020-06-08T17:09:49 | 2020-06-08T17:09:49 | 109,788,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,807 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of pvsim.
# https://github.com/scorphus/pvism
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2017, Pablo Santiago Blum de Aguiar <[email protected]>
# PV Simulator Challenge documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 3 21:36:07 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# import os
import sys
sys.path.append('..')
sys.path.append('../..')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PV Simulator Challenge'
copyright = '2017, Pablo Santiago Blum de Aguiar'
author = 'Pablo Santiago Blum de Aguiar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PVSimulatorChallengedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PVSimulatorChallenge.tex', 'PV Simulator Challenge Documentation',
'Pablo Santiago Blum de Aguiar', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pvsimulatorchallenge', 'PV Simulator Challenge Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PVSimulatorChallenge', 'PV Simulator Challenge Documentation',
author, 'PVSimulatorChallenge', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| [
"[email protected]"
] | |
c7eaf6bea58237cdccc33d21a51b534bf54ca155 | d1b59545c498a1188b84a874324522385dbadfe4 | /A01/q1/q2.py | 2ff0ac09805da363d938afc94d3b1439a1e4c0be | [] | no_license | khat3680/basic_python-104 | 570c64daf6a89c5696966c75a9901b0d15eb758e | c3a79e08cb5fe6dcba5130c295f395c6130d559b | refs/heads/master | 2022-12-09T21:32:27.068654 | 2020-09-11T17:08:47 | 2020-09-11T17:08:47 | 294,756,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | """
------------------------------------------------------------------------
Assignment 1 Question 2
C
------------------------------------------------------------------------
Author: Anshul Khatri
ID: 193313680
Email: [email protected]
__updated__ = "2019-09-14"
------------------------------------------------------------------------
"""
print("hello , welcome to new innings of life")
varig =12
varig0 =10
print( " result", varig/varig0)
outtput =0
outtput += varig0
print (outtput)
| [
"[email protected]"
] | |
149137c48aaf71c39a2d48bd44e95b892c37bca9 | 627094b5e463bd113f626450eaceb01dfa4ff5d5 | /udsoncan/services/ReadDataByPeriodicIdentifier.py | d943ab4a2cdd1f4a238f9e91cbb7fec3ea179296 | [
"MIT"
] | permissive | DaleChen0351/python-udsoncan | 49eefcb299e2a4fabe0bf168905cc86ef43d6f62 | c495e872c69c4ea05e3b477d2a1088cb83167a17 | refs/heads/master | 2020-04-20T06:10:25.252315 | 2019-03-28T07:38:17 | 2019-03-28T07:38:17 | 168,675,483 | 0 | 0 | MIT | 2019-03-28T07:38:19 | 2019-02-01T09:42:02 | Python | UTF-8 | Python | false | false | 718 | py | from . import *
from udsoncan.Response import Response
from udsoncan.exceptions import *
class ReadDataByPeriodicIdentifier(BaseService):
_sid = 0x2A
supported_negative_response = [ Response.Code.IncorrectMessageLegthOrInvalidFormat,
Response.Code.ConditionsNotCorrect,
Response.Code.RequestOutOfRange,
Response.Code.SecurityAccessDenied
]
@classmethod
def make_request(cls):
raise NotImplementedError('Service is not implemented')
@classmethod
def interpret_response(cls, response):
raise NotImplementedError('Service is not implemented')
class ResponseData(BaseResponseData):
def __init__(self):
super().__init__(ReadDataByPeriodicIdentifier) | [
"[email protected]"
] | |
bad5caea1aca73bb22c458643376be763d78501b | 7d4e3f36fb1bac247599510820c0f537417f99e4 | /jnpy/__init__.py | 27623371e04ec7b6314a18697122d86eded15828 | [
"MIT"
] | permissive | jojoquant/jonpy | 562065ea74ac1038c36d3993f43e71cc39a799a7 | c874060af4b129ae09cee9f8542517b7b2f6573b | refs/heads/master | 2022-09-08T07:15:37.051279 | 2022-03-18T06:41:22 | 2022-03-18T06:41:22 | 244,432,056 | 7 | 7 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | # !/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Datetime : 2020/1/22 上午1:40
# @Author : Fangyang
# @Software : PyCharm
if __name__ == '__main__':
pass
| [
"[email protected]"
] | |
22dd8bde5c343bdbe523ce05f063df666dc97e78 | 7b813118535a4a311159f0709e17dd44f4acb1e6 | /apps/base/models.py | 398010b665f09735b46711a2319959b8a58e3dab | [
"Apache-2.0"
] | permissive | apprentice1985/BlogBackendProject | ee4af2994f73bb3a87c2f3e6336754e518830444 | 67534e995f965e7ec3330a316251f7b87c0e9b7f | refs/heads/master | 2021-01-25T13:05:22.776354 | 2018-03-01T17:12:57 | 2018-03-01T17:12:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,844 | py | import hashlib
from django.db import models
from material.models import MaterialSocial, MaterialMaster
class NavigationLink(models.Model):
"""
自定义导航
"""
name = models.CharField(max_length=30, verbose_name="名称", help_text="名称")
desc = models.CharField(max_length=100, verbose_name="简介", help_text="简介")
image = models.ImageField(upload_to="base/friendlink/image/%y/%m", null=True, blank=True, verbose_name="图片", help_text="图片")
url = models.URLField(max_length=200, verbose_name="链接", help_text="链接")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "自定义导航"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class SiteInfo(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
name_en = models.CharField(default="", max_length=20, verbose_name="名称英文", help_text="名称英文")
desc = models.CharField(default="", max_length=20, verbose_name="简介", help_text="简介")
icon = models.ImageField(upload_to="base/site/image/%y/%m", null=True, blank=True, verbose_name="图标", help_text="图标")
navigations = models.ManyToManyField(NavigationLink, through="SiteInfoNavigation", through_fields=(
'site', 'navigation'), verbose_name='自定义导航', help_text='自定义导航')
copyright = models.CharField(default="", max_length=100, verbose_name="版权", help_text="版权")
icp = models.CharField(default="", max_length=20, verbose_name="ICP", help_text="ICP")
is_live = models.BooleanField(default=False, verbose_name="是否激活", help_text="是否激活")
is_force_refresh = models.BooleanField(default=False, verbose_name="是否强制刷新", help_text="是否强制刷新")
access_password = models.CharField(max_length=20, null=True, blank=True, verbose_name="访问密码", help_text="浏览密码")
access_password_encrypt = models.CharField(max_length=100, null=True, blank=True, verbose_name="浏览密码加密",
help_text="访问密码加密")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if self.access_password:
md5 = hashlib.md5()
md5.update(self.access_password.encode('utf8'))
self.access_password_encrypt = md5.hexdigest()
super(SiteInfo, self).save(*args, **kwargs)
class Meta:
verbose_name = "网站信息"
verbose_name_plural = verbose_name + '列表'
class BloggerInfo(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
name_en = models.CharField(default="", max_length=20, verbose_name="名称英文", help_text="名称英文")
desc = models.CharField(default="", max_length=300, verbose_name="简介", help_text="简介")
avatar = models.ImageField(upload_to="base/avatar/image/%y/%m", null=True, blank=True, verbose_name="头像", help_text="头像")
background = models.ImageField(upload_to="base/background/image/%y/%m", null=True, blank=True, verbose_name="背景图", help_text="背景图")
socials = models.ManyToManyField(MaterialSocial, through='BloggerSocial', through_fields=('blogger', 'social'))
masters = models.ManyToManyField(MaterialMaster, through='BloggerMaster', through_fields=('blogger', 'master'))
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "个人信息"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class BloggerSocial(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
blogger = models.ForeignKey(BloggerInfo, verbose_name="个人", help_text="个人")
social = models.ForeignKey(MaterialSocial, verbose_name="社交平台", help_text="社交平台")
index = models.IntegerField(default=0, verbose_name="顺序", help_text="顺序")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "社交信息"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class BloggerMaster(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
blogger = models.ForeignKey(BloggerInfo, verbose_name="个人", help_text="个人")
master = models.ForeignKey(MaterialMaster, verbose_name="技能", help_text="技能")
index = models.IntegerField(default=0, verbose_name="顺序", help_text="顺序")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "技能信息"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class SiteInfoNavigation(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
site = models.ForeignKey(SiteInfo, verbose_name="网站", help_text="网站")
navigation = models.ForeignKey(NavigationLink, verbose_name="导航", help_text="导航")
index = models.IntegerField(default=0, verbose_name="顺序", help_text="顺序")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "导航信息"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class FriendLink(models.Model):
"""
友情链接
"""
name = models.CharField(max_length=30, verbose_name="名称", help_text="名称")
desc = models.CharField(max_length=100, verbose_name="简介", help_text="简介")
image = models.ImageField(upload_to="base/friendlink/image/%y/%m", null=True, blank=True, verbose_name="图片", help_text="图片")
url = models.URLField(max_length=200, verbose_name="链接", help_text="链接")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "友情链接"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
| [
"[email protected]"
] | |
17c62b25766d04711a43abe1a6664544e0969e56 | 82080ef68a203f141ab6435c32fdcc79351b448e | /web_dynamic/3-hbnb.py | f2591110ea9142633bb3c28838dee8b09cc5a20f | [
"MIT"
] | permissive | dalexach/AirBnB_clone_v4 | 60291a8d10d58f75d707fdc4a0c11095e4c36dc5 | 04e1db0691cbe5cefb6a5f42f7e008e8ba24d5d6 | refs/heads/master | 2023-02-08T02:26:40.018351 | 2020-02-18T20:14:16 | 2020-02-18T20:14:16 | 240,527,417 | 0 | 2 | MIT | 2023-02-02T03:27:32 | 2020-02-14T14:32:44 | HTML | UTF-8 | Python | false | false | 1,356 | py | #!/usr/bin/python3
"""
Flask App that integrates with AirBnB static HTML Template
"""
from flask import Flask, render_template, url_for
from models import storage
from uuid import uuid4
# flask setup
app = Flask(__name__)
app.url_map.strict_slashes = False
port = 5000
host = '0.0.0.0'
# begin flask page rendering
@app.teardown_appcontext
def teardown_db(exception):
"""
after each request, this method calls .close() (i.e. .remove()) on
the current SQLAlchemy Session
"""
storage.close()
@app.route('/3-hbnb/')
def hbnb_filters(the_id=None):
"""
handles request to custom template with states, cities & amentities
"""
state_objs = storage.all('State').values()
states = dict([state.name, state] for state in state_objs)
amens = storage.all('Amenity').values()
places = storage.all('Place').values()
users = dict([user.id, "{} {}".format(user.first_name, user.last_name)]
for user in storage.all('User').values())
cache_id = uuid4()
return render_template('3-hbnb.html',
states=states,
amens=amens,
places=places,
users=users,
cache_id=cache_id)
if __name__ == "__main__":
"""
MAIN Flask App"""
app.run(host=host, port=port)
| [
"[email protected]"
] | |
04eef729215eff975b7ee7bd345bbf3cd621784c | f5f87fabe766e69c7a003d070b6447ef5a45c603 | /stylelens_search/models/__init__.py | 6c599ebfcc38fd644fb7183bd6deae699ba00867 | [] | no_license | BlueLens/stylelens-search | 4a4f17f876e1781a6fee5663bfa62f83f657d7db | 7c21d59d49915688777714fb896d65a101dab28a | refs/heads/master | 2021-07-15T08:55:45.256052 | 2017-10-21T15:44:35 | 2017-10-21T15:44:35 | 107,691,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # coding: utf-8
"""
stylelens-search
This is a API document for Image search on fashion items\"
OpenAPI spec version: 0.0.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from .image import Image
from .image_search_response import ImageSearchResponse
from .image_search_response_data import ImageSearchResponseData
from .images_array import ImagesArray
| [
"[email protected]"
] | |
ef03dc7074da5007a1486441d0229d4e4db99142 | e0e9b1446ccaccdd7332b2f916e737cdaced8a8d | /.scripts/submit.py | 6ce7eff369601b1752b5709ac46a92bb65b5cbf9 | [] | no_license | marygmccann/cse-34872-su20-assignments | 3afd51f8cfc7c0262b7937d113755baf49cbd20e | c2c2f21da1abd86e646c16ea86f21702efb34d68 | refs/heads/master | 2022-08-24T10:32:31.545059 | 2020-05-19T21:03:43 | 2020-05-19T21:03:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,252 | py | #!/usr/bin/env python3
import glob
import json
import os
import sys
import requests
import yaml
# Globals
ASSIGNMENTS = {}
DREDD_QUIZ_URL = 'https://dredd.h4x0r.space/quiz/cse-34872-su20/'
DREDD_QUIZ_MAX = 2.0
if bool(os.environ.get('DEBUG', False)):
DREDD_CODE_URL = 'https://dredd.h4x0r.space/debug/cse-34872-su20/'
else:
DREDD_CODE_URL = 'https://dredd.h4x0r.space/code/cse-34872-su20/'
DREDD_CODE_MAX = 6.0
# Utilities
def add_assignment(assignment, path=None):
if path is None:
path = assignment
if assignment.startswith('reading') or assignment.startswith('challenge'):
ASSIGNMENTS[assignment] = path
def print_results(results):
for key, value in sorted(results):
try:
print('{:>8} {:.2f}'.format(key.title(), value))
except ValueError:
if key in ('stdout', 'diff'):
print('{:>8}\n{}'.format(key.title(), value))
else:
print('{:>8} {}'.format(key.title(), value))
# Submit Functions
def submit_quiz(assignment, path):
answers = None
for mod_load, ext in ((json.load, 'json'), (yaml.safe_load, 'yaml')):
try:
answers = mod_load(open(os.path.join(path, 'answers.' + ext)))
except IOError as e:
pass
except Exception as e:
print('Unable to parse answers.{}: {}'.format(ext, e))
return 1
if answers is None:
print('No quiz found (answers.{json,yaml})')
return 1
print('\nSubmitting {} quiz ...'.format(assignment))
response = requests.post(DREDD_QUIZ_URL + assignment, data=json.dumps(answers))
print_results(response.json().items())
return 0 if response.json().get('score', 0) >= DREDD_QUIZ_MAX else 1
def submit_code(assignment, path):
sources = glob.glob(os.path.join(path, 'program.*'))
if not sources:
print('No code found (program.*)')
return 1
result = 1
for source in sources:
print('\nSubmitting {} {} ...'.format(assignment, os.path.basename(source)))
response = requests.post(DREDD_CODE_URL + assignment, files={'source': open(source)})
print_results(response.json().items())
result = min(result, 0 if response.json().get('score', 0) >= DREDD_CODE_MAX else 1)
return result
# Main Execution
# Add GitLab/GitHub branch
for variable in ['CI_BUILD_REF_NAME', 'GITHUB_HEAD_REF']:
try:
add_assignment(os.environ[variable])
except KeyError:
pass
# Add local git branch
try:
add_assignment(os.popen('git symbolic-ref -q --short HEAD 2> /dev/null').read().strip())
except OSError:
pass
# Add current directory
add_assignment(os.path.basename(os.path.abspath(os.curdir)), os.curdir)
# For each assignment, submit quiz answers and program code
if not ASSIGNMENTS:
print('Nothing to submit!')
sys.exit(1)
exit_code = 0
for assignment, path in sorted(ASSIGNMENTS.items()):
print('Submitting {} assignment ...'.format(assignment))
if 'reading' in assignment:
exit_code += submit_quiz(assignment, path)
elif 'challenge' in assignment:
exit_code += submit_code(assignment, path)
sys.exit(exit_code)
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
| [
"[email protected]"
] | |
6407d8cd05af0356ac59c26e791b779813da547d | ef1f62cf4e53f856bf763ac0dee73f054518530d | /Week_07/208.Implement_Trie.py | 00f27ebe6bcf165fca9b12eb1cd01c797725850f | [] | no_license | ZHHJemotion/algorithm008-class01 | 3338af3619d8e1754a62af6a852f517b47298d95 | 5bb7d2b74110df0b5788b94c69582552d711563a | refs/heads/master | 2022-11-12T09:26:24.941738 | 2020-06-30T15:29:20 | 2020-06-30T15:29:20 | 255,102,230 | 0 | 0 | null | 2020-04-12T14:39:17 | 2020-04-12T14:39:17 | null | UTF-8 | Python | false | false | 1,821 | py | # Implement a trie with insert, search, and startsWith methods.
#
# Example:
#
#
# Trie trie = new Trie();
#
# trie.insert("apple");
# trie.search("apple"); // returns true
# trie.search("app"); // returns false
# trie.startsWith("app"); // returns true
# trie.insert("app");
# trie.search("app"); // returns true
#
#
# Note:
#
#
# You may assume that all inputs are consist of lowercase letters a-z.
# All inputs are guaranteed to be non-empty strings.
#
# Related Topics Design Trie
# leetcode submit region begin(Prohibit modification and deletion)
class Trie:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = {}
self.end_of_word = '#'
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
node = self.root
for char in word:
node = node.setdefault(char, {})
node[self.end_of_word] = self.end_of_word
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
node = self.root
for char in word:
if char not in node:
return False
node = node[char]
return self.end_of_word in node
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
node = self.root
for char in prefix:
if char not in node:
return False
node = node[char]
return True
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix)
# leetcode submit region end(Prohibit modification and deletion)
| [
"[email protected]"
] | |
a573ee26fbe78730cc20595670f4408e417d3492 | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/OpenGL/GLES1/OES/single_precision.py | d67e1d503c813791b2ce19f41892ef8db20a7272 | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 1,317 | py | '''OpenGL extension OES.single_precision
This module customises the behaviour of the
OpenGL.raw.GLES1.OES.single_precision to provide a more
Python-friendly API
Overview (from the spec)
This extension adds commands with single-precision floating-point
parameters corresponding to the commands that only variants that
accept double-precision floating-point input. This allows an
application to avoid using double-precision floating-point
data types. New commands are added with an 'f' prefix.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/single_precision.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.single_precision import *
from OpenGL.raw.GLES1.OES.single_precision import _EXTENSION_NAME
def glInitSinglePrecisionOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glClipPlanefOES=wrapper.wrapper(glClipPlanefOES).setInputArraySize(
'equation', 4
)
glGetClipPlanefOES=wrapper.wrapper(glGetClipPlanefOES).setOutput(
'equation',size=(4,),orPassIn=True
)
### END AUTOGENERATED SECTION | [
"[email protected]"
] | |
2f9a53d49fd4ed9c71a2cd957ff8bd6d59d9d5d0 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/web-dev-notes-resource-site/2-content/Data-Structures/1-Python/maths/gcd.py | 51d2711d3abfc8b69402ae7ee013a4a7b2f630df | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 938 | py | def gcd(a, b):
"""Computes the greatest common divisor of integers a and b using
Euclid's Algorithm.
"""
while b != 0:
a, b = b, a % b
return a
def lcm(a, b):
"""Computes the lowest common multiple of integers a and b."""
return a * b / gcd(a, b)
"""
Given a positive integer x, computes the number of trailing zero of x.
Example
Input : 34(100010)
~~~~~^
Output : 1
Input : 40(101000)
~~~^^^
Output : 3
"""
def trailing_zero(x):
cnt = 0
while x and not x & 1:
cnt += 1
x >>= 1
return cnt
"""
Given two non-negative integer a and b,
computes the greatest common divisor of a and b using bitwise operator.
"""
def gcd_bit(a, b):
tza = trailing_zero(a)
tzb = trailing_zero(b)
a >>= tza
b >>= tzb
while b:
if a < b:
a, b = b, a
a -= b
a >>= trailing_zero(a)
return a << min(tza, tzb)
| [
"[email protected]"
] | |
1d996f9d9e66d2a64deb825439e2acd7feef60e3 | 28a9cc19537f7264421afeb9883962aa480c2616 | /deals/migrations/0002_product_data_category_id.py | cc5cb13b2afca538cacf42cdf415cf940e913c68 | [] | no_license | ujjwalagrawal17/BrokerAppBackend | b33df886b389aabfcfe7278c3e41c99d13d4fbb3 | 1b8ffd18e4c5257d222c17b8aece3351b549b204 | refs/heads/master | 2021-01-22T21:23:18.807792 | 2017-03-18T19:06:44 | 2017-03-18T19:06:44 | 85,425,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-12-29 18:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('deals', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product_data',
name='category_id',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
4340e9b7a8286e8c59be954b02524b64f84cd8d3 | c36aae30fad06ea64dd5d158d364c18462a70799 | /website/books/models.py | ea80d86c5c777f2270f02934900264f101096070 | [] | no_license | srikanthpragada/building_web_applications_using_django_framework | 60222ca503c97aa375726d4496db9e36a72ebb4b | 920780593d6324414d0c727a9bce2db171614350 | refs/heads/master | 2023-01-03T12:31:10.851750 | 2020-10-31T15:49:10 | 2020-10-31T15:49:10 | 308,915,547 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from django.db import models
class Book(models.Model):
title = models.CharField(max_length=30, unique=True, null=False)
author = models.CharField(max_length=30, null=False)
price = models.IntegerField(null=False)
def __str__(self):
return f"{self.id} - {self.title} - {self.author} - {self.price}"
class Meta:
db_table = 'books'
| [
"[email protected]"
] | |
8fb78e1b6dc83ef71a9e50e3592e4b6d439b160e | b77a36eb7c2214151feccca0c4894381147d6051 | /movie/migrations/0009_alter_movie_movie_rating.py | 2d33a2519713b24a769d3ba4f64c82b9413b674c | [] | no_license | mnk-q/watchlist | b15536bb8a2a81f1935bb08a25b04dc27953f896 | 3ac14b1ba48a2bd7aae84e7a3c64cf60fedd6f17 | refs/heads/master | 2023-06-04T21:14:51.235899 | 2021-06-25T17:41:37 | 2021-06-25T17:41:37 | 349,739,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Generated by Django 3.2.3 on 2021-06-17 13:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movie', '0008_alter_movie_movie_studio'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='movie_rating',
field=models.CharField(max_length=6),
),
]
| [
"="
] | = |
2e4e80804e0df7428bc6a33fefaccdae4578c22f | eb4119dda59e44fc418be51a33c11e5d32f29fd7 | /src/ssadmin/exceptions.py | 85209da51cf62e8e06cd884e306ea79a48927876 | [
"MIT"
] | permissive | daimon99/ssadmin | 4ee08f4d56bc8f27099f1e1caa72a3ca8b8b1b57 | 9a1470712bdca5b0db17895d4c8215555d6b1b04 | refs/heads/master | 2020-12-24T12:29:49.070231 | 2016-11-11T13:26:34 | 2016-11-11T13:26:34 | 72,992,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | # coding:utf-8
class SSError(Exception):
pass
| [
"[email protected]"
] | |
305d5f76e5fb3052bcd63b76465b8fe2c3e33e0b | 92e26b93057723148ecb8ca88cd6ad755f2e70f1 | /SE/BottleNeck/r40_SE/network.py | bba54ad210394d7c821cc9509f84a78419bb598a | [] | no_license | lyuyanyii/CIFAR | 5906ad9fbe1377edf5b055098709528e06b5ace2 | d798834942d6a9d4e3295cda77488083c1763962 | refs/heads/master | 2021-08-30T20:09:52.819883 | 2017-12-19T08:37:37 | 2017-12-19T08:37:37 | 112,701,370 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | import numpy as np
from megskull.network import Network
from megskull.opr.all import (
Conv2D, Pooling2D, FullyConnected, Softmax,
CrossEntropyLoss, Dropout, ElementwiseAffine
)
from megskull.opr.helper.elemwise_trans import ReLU, Identity, Sigmoid
from megskull.graph.query import GroupNode
from megskull.opr.netsrc import DataProvider
import megskull.opr.helper.param_init as pinit
from megskull.opr.helper.param_init import AutoGaussianParamInitializer as G
from megskull.opr.helper.param_init import ConstantParamInitializer as C
from megskull.opr.regularizer import BatchNormalization as BN
import megskull.opr.arith as arith
from megskull.network import NetworkVisitor
global idx
idx = 0
def conv_bn(inp, ker_shape, stride, padding, out_chl, isrelu):
global idx
idx += 1
l1 = Conv2D(
"conv{}".format(idx), inp, kernel_shape = ker_shape, stride = stride, padding = padding,
output_nr_channel = out_chl,
#W = G(mean = 0, std = ((1) / (ker_shape**2 * inp.partial_shape[1]))**0.5),
#b = C(0),
nonlinearity = Identity()
)
l2 = BN("bn{}".format(idx), l1, eps = 1e-9)
l2 = ElementwiseAffine("bnaff{}".format(idx), l2, shared_in_channels = False, k = C(1), b = C(0))
if isrelu:
l2 = arith.ReLU(l2)
return l2
def res_layer(inp, chl, stride = 1, proj = False):
pre = inp
inp = conv_bn(inp, 1, stride, 0, chl // 4, True)
inp = conv_bn(inp, 3, 1, 1, chl // 4, True)
inp = conv_bn(inp, 1, 1, 0, chl, False)
if proj:
pre = conv_bn(pre, 1, stride, 0, chl, False)
name = inp.name
#Global Average Pooling
SE = inp.mean(axis = 3).mean(axis = 2)
#fc0
SE = FullyConnected(
"fc0({})".format(name), SE, output_dim = SE.partial_shape[1],
nonlinearity = ReLU()
)
#fc1
SE = FullyConnected(
"fc1({})".format(name), SE, output_dim = SE.partial_shape[1],
nonlinearity = Sigmoid()
)
inp = inp * SE.dimshuffle(0, 1, 'x', 'x')
inp = arith.ReLU(inp + pre)
return inp
def res_block(inp, chl, i, n):
stride = 2
if i == 0:
stride = 1
inp = res_layer(inp, chl, stride = stride, proj = True)
for i in range(n - 1):
inp = res_layer(inp, chl)
return inp
def make_network(minibatch_size = 128, debug = False):
patch_size = 32
inp = DataProvider("data", shape = (minibatch_size, 3, patch_size, patch_size), dtype = np.float32)
label = DataProvider("label", shape = (minibatch_size, ), dtype = np.int32)
lay = conv_bn(inp, 3, 1, 1, 16, True)
n = 4
lis = [16 * 4, 32 * 4, 64 * 4]
for i in range(len(lis)):
lay = res_block(lay, lis[i], i, n)
#global average pooling
#feature = lay.mean(axis = 2).mean(axis = 2)
feature = Pooling2D("pooling", lay, window = 8, stride = 8, padding = 0, mode = "AVERAGE")
pred = Softmax("pred", FullyConnected(
"fc0", feature, output_dim = 10,
#W = G(mean = 0, std = (1 / 64)**0.5),
#b = C(0),
nonlinearity = Identity()
))
network = Network(outputs = [pred])
network.loss_var = CrossEntropyLoss(pred, label)
if debug:
visitor = NetworkVisitor(network.loss_var)
for i in visitor.all_oprs:
print(i)
print(i.partial_shape)
print("input = ", i.inputs)
print("output = ", i.outputs)
print()
return network
if __name__ == "__main__":
make_network(debug = True)
| [
"[email protected]"
] | |
13d7260187bc3dedef4fe80405688fa7a830b32e | 2c4ba5a56b7a3d3e1c286b678eb8068f51c23046 | /week3/2-Resolve-with-Functions/solutions/twin_primes.py | ff03fddc715021dd92dde7664a6c768f15866bd7 | [] | no_license | OgnyanPenkov/Programming0-1 | 3b69757bd803814585d77479fc987a0ee92d0390 | 8078f316ea2b81216c21cf78e7cf1afc17f54846 | refs/heads/master | 2021-01-21T15:12:20.814368 | 2015-10-07T18:16:39 | 2015-10-07T18:16:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | def is_prime(n):
if n <= 1:
return False
start = 2
is_prime = True
while start < n:
if n % start == 0:
is_prime = False
break
start += 1
return is_prime
p = input("Enter number: ")
p = int(p)
q = p - 2
r = p + 2
is_p_prime = is_prime(p)
is_q_prime = is_prime(q)
is_r_prime = is_prime(r)
if is_p_prime and (not is_q_prime) and (not is_r_prime):
print(str(p) + " is prime")
print("But " + str(q) + " and " + str(r) + " are not.")
elif is_p_prime:
if is_q_prime:
print(q, p)
if is_r_prime:
print(p, r)
else:
print(str(p) + " is not prime.")
| [
"[email protected]"
] | |
eacfa3bf0ecf50f94c712db40637ea50d9317cbf | 38a42a205eaa5a0a46989c95f0b01f7e04b96a9e | /uoft/CSC236H1S Intro to Theory Comp/tut/quiz6/MasterRunner.py | 6ed2c358606beddceff4c243ed6535cafbbe5a68 | [
"MIT"
] | permissive | Reginald-Lee/biji-ben | d24cd1189ca3e9ed7b30e5b20a40137e8d6d4039 | 37009dfdbef9a15c2851bcca2a4e029267e6a02d | refs/heads/master | 2023-05-06T23:06:49.819088 | 2020-06-10T12:07:47 | 2020-06-10T12:07:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | def soln(a, b, k, e):
c = 0
while c <= e:
print ((float (a) / b**k)**c)
c += 1
| [
"[email protected]"
] | |
f250d33cc3036d938d84a63612f7bddeef67203a | fe19d2fac4580d463132e61509bd6e3cc2cf958d | /toontown/toon/PlayerDetailPanel.py | 7fab34d34832d1ccb0846ed7cb013f3fb958085d | [] | no_license | t00nt0wn1dk/c0d3 | 3e6db6dd42c3aa36ad77709cf9016176a3f3a44f | 7de105d7f3de0f8704b020e32fd063ee2fad8d0d | refs/heads/master | 2021-01-01T16:00:15.367822 | 2015-03-21T21:25:52 | 2015-03-21T21:25:55 | 32,647,654 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 5,657 | py | # 2013.08.22 22:26:15 Pacific Daylight Time
# Embedded file name: toontown.toon.PlayerDetailPanel
from pandac.PandaModules import *
from toontown.toonbase.ToontownGlobals import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.showbase import DirectObject
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.directnotify import DirectNotifyGlobal
import DistributedToon
from toontown.friends import FriendInviter
import ToonTeleportPanel
from toontown.toonbase import TTLocalizer
from toontown.hood import ZoneUtil
from toontown.toonbase.ToontownBattleGlobals import Tracks, Levels
globalAvatarDetail = None
def showPlayerDetail(avId, avName, playerId = None):
global globalAvatarDetail
if globalAvatarDetail != None:
globalAvatarDetail.cleanup()
globalAvatarDetail = None
globalAvatarDetail = PlayerDetailPanel(avId, avName, playerId)
return
def hidePlayerDetail():
global globalAvatarDetail
if globalAvatarDetail != None:
globalAvatarDetail.cleanup()
globalAvatarDetail = None
return
def unloadPlayerDetail():
global globalAvatarDetail
if globalAvatarDetail != None:
globalAvatarDetail.cleanup()
globalAvatarDetail = None
return
class PlayerDetailPanel(DirectFrame):
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('ToonAvatarDetailPanel')
def __init__(self, avId, avName, playerId = None, parent = aspect2dp, **kw):
self.playerId = playerId
self.isPlayer = 0
self.playerInfo = None
if playerId:
self.isPlayer = 1
if base.cr.playerFriendsManager.playerId2Info.has_key(playerId):
self.playerInfo = base.cr.playerFriendsManager.playerId2Info[playerId]
if not self.playerInfo.onlineYesNo:
avId = None
else:
avId = None
self.avId = avId
self.avName = avName
self.avatar = None
self.createdAvatar = None
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
gui = loader.loadModel('phase_3.5/models/gui/avatar_panel_gui')
detailPanel = gui.find('**/avatarInfoPanel')
textScale = 0.132
textWrap = 10.4
if self.playerId:
textScale = 0.1
textWrap = 18.0
optiondefs = (('pos', (0.525, 0.0, 0.525), None),
('scale', 0.5, None),
('relief', None, None),
('image', detailPanel, None),
('image_color', GlobalDialogColor, None),
('text', '', None),
('text_wordwrap', textWrap, None),
('text_scale', textScale, None),
('text_pos', (-0.125, 0.75), None))
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, parent)
self.dataText = DirectLabel(self, text='', text_scale=0.085, text_align=TextNode.ALeft, text_wordwrap=15, relief=None, pos=(-0.85, 0.0, 0.725))
if self.avId:
self.avText = DirectLabel(self, text=TTLocalizer.PlayerToonName % {'toonname': self.avName}, text_scale=0.09, text_align=TextNode.ALeft, text_wordwrap=15, relief=None, pos=(-0.85, 0.0, 0.56))
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
self.gotoToonButton = DirectButton(parent=self, relief=None, image=(guiButton.find('**/QuitBtn_UP'), guiButton.find('**/QuitBtn_DN'), guiButton.find('**/QuitBtn_RLVR')), image_scale=1.15, text=TTLocalizer.PlayerShowToon, text_scale=0.08, text_pos=(0.0, -0.02), textMayChange=0, pos=(0.43, 0, 0.415), command=self.__showToon)
ToonTeleportPanel.hideTeleportPanel()
FriendInviter.hideFriendInviter()
self.bCancel = DirectButton(self, image=(buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr')), relief=None, text=TTLocalizer.AvatarDetailPanelCancel, text_scale=0.05, text_pos=(0.12, -0.01), pos=(-0.865, 0.0, -0.765), scale=2.0, command=self.__handleCancel)
self.bCancel.show()
self.initialiseoptions(PlayerDetailPanel)
self.__showData()
buttons.removeNode()
gui.removeNode()
return
def cleanup(self):
if self.createdAvatar:
self.avatar.delete()
self.createdAvatar = None
self.destroy()
return
def __handleCancel(self):
unloadPlayerDetail()
def __showData(self):
if self.isPlayer and self.playerInfo:
if self.playerInfo.onlineYesNo:
someworld = self.playerInfo.location
else:
someworld = TTLocalizer.OfflineLocation
text = TTLocalizer.AvatarDetailPanelPlayer % {'player': self.playerInfo.playerName,
'world': someworld}
else:
text = TTLocalizer.AvatarDetailPanelOffline
self.dataText['text'] = text
def __showToon(self):
messenger.send('wakeup')
hasManager = hasattr(base.cr, 'playerFriendsManager')
handle = base.cr.identifyFriend(self.avId)
if not handle and hasManager:
handle = base.cr.playerFriendsManager.getAvHandleFromId(self.avId)
if handle != None:
self.notify.info("Clicked on name in friend's list. doId = %s" % handle.doId)
messenger.send('clickedNametagPlayer', [handle, self.playerId, 0])
return
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\toon\PlayerDetailPanel.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:26:15 Pacific Daylight Time
| [
"[email protected]"
] | |
c75ca77774d5cd9bcad6368b9bdf184ac951965a | 6e46a850cc4ece73476a350e676ea55ce72b200a | /aliyun-python-sdk-imageseg/aliyunsdkimageseg/request/v20191230/SegmentCommodityRequest.py | f66063bb203dc2f9890c64ed12a522c3fa6acc49 | [
"Apache-2.0"
] | permissive | zhxfei/aliyun-openapi-python-sdk | fb3f22ca149988d91f07ba7ca3f6a7a4edf46c82 | 15890bf2b81ce852983f807e21b78a97bcc26c36 | refs/heads/master | 2022-07-31T06:31:24.471357 | 2020-05-22T17:00:17 | 2020-05-22T17:00:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,418 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkimageseg.endpoint import endpoint_data
class SegmentCommodityRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imageseg', '2019-12-30', 'SegmentCommodity','imageseg')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ImageURL(self):
return self.get_query_params().get('ImageURL')
def set_ImageURL(self,ImageURL):
self.add_query_param('ImageURL',ImageURL) | [
"[email protected]"
] | |
94c01ee844d433e79abd65caad5625c1412139c1 | ec31d26a8b619ec98dc5c586f525420572cc39a1 | /scripts/map_timings.py | c76a6e265894314d3ec6f5f64a7eeab8b4fa9405 | [
"MIT"
] | permissive | stestagg/timing | b0fab6ff8814e5edeeaa98b7bcd470aa9e527742 | 57f40229616b2dc385afc447f3f587940158c3b4 | refs/heads/master | 2021-06-25T19:20:17.158308 | 2017-09-11T22:26:23 | 2017-09-11T22:26:23 | 103,174,114 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py | import sys
from collections import namedtuple
from pprint import pformat
import yaml
from jinja2 import Environment, FileSystemLoader, select_autoescape
Output = namedtuple("Output", ["cpu", "human"])
def unit_sort(x):
raw = 1 - x[0]
if raw < 0:
return abs(raw) * 0.001
return raw
def render(**kw):
env = Environment(loader=FileSystemLoader('./'), autoescape=select_autoescape(['html']))
template = env.get_template('template.html')
return template.render(**kw)
def main():
units = yaml.load(open("../data/units.yaml"))
actions = yaml.load(open("../data/actions.yaml"))
timings = yaml.load(open("../data/timings.yaml"))
def to_secs(value, unit):
return value * units[unit]
for action in actions:
action['actual_min'] = to_secs(action['min'], action['units'])
action['actual_max'] = to_secs(action['max'], action['units'])
def to_unit_val(value):
scaled = [(value / num_secs, name) for name, num_secs in units.items() if name != "cycle"]
return sorted(scaled, key=unit_sort)[0]
def best_action(min_val, max_val):
for action in actions:
if action['actual_min'] < max_val and action['actual_max'] > min_val:
return action
blink_scale = to_secs(0.25, 'cycle') / 0.1
outputs = []
for i, timing in enumerate(timings, 2):
actual_min = to_secs(timing['min'], timing['units'])
actual_max = to_secs(timing['max'], timing['units'])
blink_min = actual_min / blink_scale
blink_max = actual_max / blink_scale
unit_min = to_unit_val(actual_min)
unit_max = to_unit_val(actual_max)
timing['unit_min'] = "%.1f %s" % unit_min
timing['unit_max'] = "%.1f %s" % unit_max
best = best_action(blink_min, blink_max)
if best is None:
sys.stderr.write(f'{pformat(timing)} - {to_unit_val(blink_min)}\n')
else:
outputs.append(Output(timing, best))
print(render(timings=outputs, enumerate=enumerate))
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
5da5b673a32056f2d144a312b0032ee8c2690cc3 | 3faffa899187e9244c7f3bccc02bf451e496637e | /python/chapter 1/1-1.py | ef166815faf24e009175ecdbc670c22723e9f8a0 | [] | no_license | WuLC/Beauty_OF_Programming | 909aa6e3dff3dc090da6a3f375aec15222b14f6a | 63accae07afbbece292a1115ce37b44e03839e31 | refs/heads/master | 2020-04-06T04:12:15.573389 | 2016-11-21T03:42:24 | 2016-11-21T03:42:24 | 58,548,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | # -*- coding: utf-8 -*-
# @Author: LC
# @Date: 2016-10-16 12:27:57
# @Last modified by: WuLC
# @Last Modified time: 2016-10-16 20:35:05
# @Email: [email protected]
########################################
# take up certain usage of a cpu cor
#########################################
import time
import psutil
# for CPU with n cores, a dead loop can take up(100/n)% of CPU usage
def take_up_a_core():
i = 0
while True:
i += 1
# 260000 loops will take up about 50% of a CPU of 2.4GHz
# without system call, just caculate the number of loops based on the frequency of CPU and number of instructions of statement
# pay attention the instructions of for statement in python is different from that in C, and CPython is used in this test
def take_up_half():
while True:
for i in xrange(260000):
pass
time.sleep(0.01)
# take up certain percent with psutil, only apply to single-core CPU, default 50%
def take_up_certain_percent(percent = 50):
i = 0
while True:
while (psutil.cpu_percent() > percent):
time.sleep(0.01)
i += 1
if __name__ == '__main__':
take_up_certain_percent() | [
"[email protected]"
] | |
08ea5f305b23d6fd9cbdfda052a2a1e19f6d8b88 | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /test/legacy_test/test_sparse_attention_op.py | 87306b8b8d54285c8c3ddbbba4bd91a143364b4a | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 18,439 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import re
import unittest
import numpy as np
from eager_op_test import OpTest
import paddle
import paddle.nn.functional as F
from paddle import fluid
from paddle.fluid import core
def get_cuda_version():
result = os.popen("nvcc --version").read()
regex = r'release (\S+),'
match = re.search(regex, result)
if match:
num = str(match.group(1))
integer, decimal = num.split('.')
return int(integer) * 1000 + int(float(decimal) * 10)
else:
return -1
def masked_fill(x):
row, col = x.shape[0], x.shape[1]
for i in range(row):
for j in range(col):
if x[i][j] == 0:
x[i][j] = float('-inf')
return x
def init_mask(x):
row, col = x.shape[0], x.shape[1]
for i in range(row):
for j in range(col):
if x[i][j] == 0 and (j < 0.8 * col):
x[i][j] = 1
return x
def softmax(x, kp_mask=None, attn_mask=None, bsz=None):
if kp_mask is None and attn_mask is None:
max = np.max(x, axis=1, keepdims=True)
e_x = np.exp(x - max)
sum = np.sum(e_x, axis=1, keepdims=True)
f_x = e_x / sum
return f_x
else:
# kp_mask
current_kp_mask = kp_mask[bsz]
row = current_kp_mask.shape[0]
current_kp_mask = np.expand_dims(current_kp_mask, 0).repeat(row, axis=0)
# attn_mask
current_attn_mask = copy.deepcopy(attn_mask)
current_attn_mask = masked_fill(current_attn_mask)
current_kp_mask = masked_fill(current_kp_mask)
x = x + current_kp_mask
x = x + current_attn_mask
max = np.max(x, axis=1, keepdims=True)
e_x = np.exp(x - max)
sum = np.sum(e_x, axis=1, keepdims=True)
f_x = e_x / sum
return f_x
def get_csr_value(mat, layout, nnz):
row, col = mat.shape[0], mat.shape[1]
value = np.zeros(nnz)
ptr = 0
for i in range(row):
for j in range(col):
if layout[i][j] == 1:
value[ptr] = mat[i][j]
ptr += 1
return value
def ref_sparse_attention(
q, k, v, offset, columns, kp_mask=None, attn_mask=None, bsz=None
):
row, col, nnz = q.shape[0], q.shape[1], columns.shape[0]
mat = np.zeros((row, row))
for cur_row in range(row):
start_ptr = int(offset[cur_row])
end_ptr = int(offset[cur_row + 1])
for ptr in range(start_ptr, end_ptr):
cur_col = int(columns[ptr])
mat[cur_row][cur_col] = 1
a = np.dot(q, k.T) * mat
a_value = get_csr_value(a, mat, nnz)
scaling = float(col) ** -0.5
a = scaling * a
for i in range(row):
for j in range(row):
if mat[i][j] == 0:
a[i][j] = float('-inf')
# softmax
if kp_mask is None and attn_mask is None:
b = softmax(a)
else:
b = softmax(a, kp_mask=kp_mask, attn_mask=attn_mask, bsz=bsz)
b_value = get_csr_value(b, mat, nnz)
result = np.dot(b, v)
return result, a_value, b_value
def ref_batch_sparse_attention(
q, k, v, offset, columns, kp_mask=None, attn_mask=None
):
batch_size, num_heads, row, col = q.shape
nnz = columns.shape[2]
result = np.zeros((batch_size, num_heads, row, col))
result_sdd = np.zeros((batch_size, num_heads, nnz))
result_softmax = np.zeros((batch_size, num_heads, nnz))
for i in range(batch_size):
for j in range(num_heads):
(
cur_q,
cur_k,
cur_v,
) = (
q[i][j],
k[i][j],
v[i][j],
)
cur_offset, cur_columns = offset[i][j], columns[i][j]
if kp_mask is None and attn_mask is None:
cur_result, cur_sdd, cur_softmax = ref_sparse_attention(
cur_q, cur_k, cur_v, cur_offset, cur_columns
)
else:
cur_result, cur_sdd, cur_softmax = ref_sparse_attention(
cur_q,
cur_k,
cur_v,
cur_offset,
cur_columns,
kp_mask=kp_mask,
attn_mask=attn_mask,
bsz=i,
)
result[i][j] = cur_result
result_sdd[i][j], result_softmax[i][j] = cur_sdd, cur_softmax
return result, result_sdd, result_softmax
def init_csr_format(batch_size, num_heads, rows, blocksize):
block_num, block_last = rows / blocksize, rows % blocksize
nnz_num = block_num * blocksize * blocksize + block_last * block_last
offset = np.zeros(rows + 1)
columns = np.zeros(int(nnz_num))
mat = np.zeros((rows, rows))
for i in range(0, rows, blocksize):
for x in range(blocksize):
for y in range(blocksize):
p_x, p_y = i + x, i + y
if (p_x < rows) and (p_y < rows):
mat[p_x][p_y] = 1
p_offset, p_column, count = 0, 0, 0
for i in range(rows):
for j in range(rows):
if mat[i][j] != 0:
count += 1
columns[p_column] = j
p_column += 1
p_offset += 1
offset[p_offset] = count
offset = np.expand_dims(np.expand_dims(offset, 0), 0)
offset = offset.repeat(num_heads, axis=1)
offset = offset.repeat(batch_size, axis=0)
columns = np.expand_dims(np.expand_dims(columns, 0), 0)
columns = columns.repeat(num_heads, axis=1)
columns = columns.repeat(batch_size, axis=0)
return offset, columns
@unittest.skipIf(
not core.is_compiled_with_cuda() or get_cuda_version() < 11030,
"core is not compiled with CUDA and cuda version need larger than or equal to 11.3",
)
class TestSparseAttentionOp(OpTest):
def config(self):
self.shape = (1, 1, 16, 16)
self.blocksize = 4
self.dtype = "float64"
self.use_mask = True
def setUp(self):
paddle.enable_static()
self.config()
self.op_type = "sparse_attention"
self.place = paddle.CUDAPlace(0)
self.q = np.random.random(self.shape).astype(self.dtype)
self.k = np.random.random(self.shape).astype(self.dtype)
self.v = np.random.random(self.shape).astype(self.dtype)
# init CSR tensor
offset, columns = init_csr_format(
self.shape[0], self.shape[1], self.shape[2], self.blocksize
)
self.offset = offset.astype('int32')
self.columns = columns.astype('int32')
# init mask tensor
key_padding_mask_shape = (self.shape[0], self.shape[2])
attn_mask_shape = (self.shape[2], self.shape[2])
key_padding_mask = np.random.randint(0, 2, size=key_padding_mask_shape)
attn_mask = np.random.randint(0, 2, size=attn_mask_shape)
key_padding_mask = init_mask(key_padding_mask)
attn_mask = init_mask(attn_mask)
self.key_padding_mask = key_padding_mask.astype(self.dtype)
self.attn_mask = attn_mask.astype(self.dtype)
if self.use_mask:
result, result_sdd, result_softmax = ref_batch_sparse_attention(
self.q,
self.k,
self.v,
self.offset,
self.columns,
kp_mask=self.key_padding_mask,
attn_mask=self.attn_mask,
)
else:
result, result_sdd, result_softmax = ref_batch_sparse_attention(
self.q, self.k, self.v, self.offset, self.columns
)
if self.use_mask:
self.inputs = {
'Q': self.q,
'K': self.k,
'V': self.v,
'Offset': self.offset,
'Columns': self.columns,
'KeyPaddingMask': self.key_padding_mask,
'AttnMask': self.attn_mask,
}
else:
self.inputs = {
'Q': self.q,
'K': self.k,
'V': self.v,
'Offset': self.offset,
'Columns': self.columns,
}
self.outputs = {
'Out': result.astype(self.dtype),
'SparseDotSdd': result_sdd.astype(self.dtype),
'Softmax': result_softmax.astype(self.dtype),
}
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['Q'], 'Out')
self.check_grad_with_place(self.place, ['K'], 'Out')
self.check_grad_with_place(self.place, ['V'], 'Out')
class TestSparseAttentionOpFp32Test(TestSparseAttentionOp):
def config(self):
self.shape = (1, 1, 8, 16)
self.blocksize = 2
self.dtype = "float32"
self.use_mask = False
class TestSparseAttentionOpShapeTest(TestSparseAttentionOp):
def config(self):
self.shape = (2, 2, 32, 8)
self.blocksize = 8
self.dtype = "float64"
self.use_mask = False
@unittest.skipIf(
not core.is_compiled_with_cuda() or get_cuda_version() < 11030,
"core is not compiled with CUDA and cuda version need larger than or equal to 11.3",
)
class TestSparseAttentionAPI(unittest.TestCase):
def setUp(self):
self.place = paddle.CUDAPlace(0)
self.shape = (1, 1, 8, 4)
self.blocksize = 2
self.dtype = 'float64'
self.use_mask = True
def test_static_graph(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
Q = paddle.static.data(name="Q", shape=self.shape, dtype=self.dtype)
K = paddle.static.data(name="K", shape=self.shape, dtype=self.dtype)
V = paddle.static.data(name="V", shape=self.shape, dtype=self.dtype)
batch_size, num_heads, rows = (
self.shape[0],
self.shape[1],
self.shape[2],
)
block_num = rows / self.blocksize
block_last = rows % self.blocksize
sparse_nnz_num = (
block_num * self.blocksize * self.blocksize
+ block_last * block_last
)
offset_shape = (batch_size, num_heads, rows + 1)
columns_shape = (batch_size, num_heads, int(sparse_nnz_num))
offset = paddle.static.data(
name="Offset", shape=offset_shape, dtype="int32"
)
columns = paddle.static.data(
name="Columns", shape=columns_shape, dtype="int32"
)
key_padding_mask_shape = (self.shape[0], self.shape[2])
attn_mask_shape = (self.shape[2], self.shape[2])
if self.use_mask:
key_padding_mask = paddle.static.data(
name="KeyPaddingMask",
shape=key_padding_mask_shape,
dtype=self.dtype,
)
attn_mask = paddle.static.data(
name="AttnMask", shape=attn_mask_shape, dtype=self.dtype
)
Out = F.sparse_attention(
Q,
K,
V,
offset,
columns,
key_padding_mask=key_padding_mask,
attn_mask=attn_mask,
)
else:
Out = F.sparse_attention(Q, K, V, offset, columns)
Q_np = np.random.random(self.shape).astype(self.dtype)
K_np = np.random.random(self.shape).astype(self.dtype)
V_np = np.random.random(self.shape).astype(self.dtype)
offset_np, columns_np = init_csr_format(
self.shape[0], self.shape[1], self.shape[2], self.blocksize
)
offset_np = offset_np.astype('int32')
columns_np = columns_np.astype('int32')
# init mask tensor
key_padding_mask_np = np.random.randint(
0, 2, size=key_padding_mask_shape
)
attn_mask_np = np.random.randint(0, 2, size=attn_mask_shape)
key_padding_mask_np = init_mask(key_padding_mask_np)
attn_mask_np = init_mask(attn_mask_np)
key_padding_mask_np = key_padding_mask_np.astype(self.dtype)
attn_mask_np = attn_mask_np.astype(self.dtype)
exe = fluid.Executor(self.place)
if self.use_mask:
fetches_result = exe.run(
feed={
"Q": Q_np,
"K": K_np,
"V": V_np,
"Offset": offset_np,
"Columns": columns_np,
'KeyPaddingMask': key_padding_mask_np,
'AttnMask': attn_mask_np,
},
fetch_list=[Out],
)
expected_result, __, __ = ref_batch_sparse_attention(
Q_np,
K_np,
V_np,
offset_np,
columns_np,
kp_mask=key_padding_mask_np,
attn_mask=attn_mask_np,
)
else:
fetches_result = exe.run(
feed={
"Q": Q_np,
"K": K_np,
"V": V_np,
"Offset": offset_np,
"Columns": columns_np,
},
fetch_list=[Out],
)
expected_result, __, __ = ref_batch_sparse_attention(
Q_np, K_np, V_np, offset_np, columns_np
)
np.testing.assert_allclose(
fetches_result[0], expected_result, rtol=1e-05, atol=1e-05
)
def test_dygraph(self):
paddle.disable_static()
offset, columns = init_csr_format(
self.shape[0], self.shape[1], self.shape[2], self.blocksize
)
offset = offset.astype('int32')
columns = columns.astype('int32')
query = np.random.random(self.shape).astype(self.dtype)
key = np.random.random(self.shape).astype(self.dtype)
value = np.random.random(self.shape).astype(self.dtype)
# init mask tensor
key_padding_mask_shape = (self.shape[0], self.shape[2])
attn_mask_shape = (self.shape[2], self.shape[2])
key_padding_mask = np.random.randint(0, 2, size=key_padding_mask_shape)
attn_mask = np.random.randint(0, 2, size=attn_mask_shape)
key_padding_mask = init_mask(key_padding_mask)
attn_mask = init_mask(attn_mask)
key_padding_mask = key_padding_mask.astype(self.dtype)
attn_mask = attn_mask.astype(self.dtype)
paddle_query = paddle.to_tensor(query, place=self.place)
paddle_key = paddle.to_tensor(key, place=self.place)
paddle_value = paddle.to_tensor(value, place=self.place)
paddle_offset = paddle.to_tensor(offset, place=self.place)
paddle_colunmns = paddle.to_tensor(columns, place=self.place)
paddle_kp_mask = paddle.to_tensor(key_padding_mask, place=self.place)
paddle_attn_mask = paddle.to_tensor(attn_mask, place=self.place)
if self.use_mask:
paddle_result = F.sparse_attention(
paddle_query,
paddle_key,
paddle_value,
paddle_offset,
paddle_colunmns,
key_padding_mask=paddle_kp_mask,
attn_mask=paddle_attn_mask,
)
numpy_result, __, __ = ref_batch_sparse_attention(
query,
key,
value,
offset,
columns,
kp_mask=key_padding_mask,
attn_mask=attn_mask,
)
numpy_result = numpy_result.astype(self.dtype)
else:
paddle_result = F.sparse_attention(
paddle_query,
paddle_key,
paddle_value,
paddle_offset,
paddle_colunmns,
)
numpy_result, __, __ = ref_batch_sparse_attention(
query, key, value, offset, columns
)
numpy_result = numpy_result.astype(self.dtype)
np.testing.assert_allclose(
paddle_result.numpy(), numpy_result, rtol=1e-05, atol=1e-05
)
class TestSparseAttentionAPITestFloat(TestSparseAttentionAPI):
def setUp(self):
self.place = paddle.CUDAPlace(0)
self.shape = (2, 2, 8, 4)
self.blocksize = 2
self.dtype = 'float32'
self.use_mask = False
class TestSparseAttentionAPITestShape1(TestSparseAttentionAPI):
def setUp(self):
self.place = paddle.CUDAPlace(0)
self.shape = (2, 2, 64, 32)
self.blocksize = 2
self.dtype = 'float64'
self.use_mask = False
class TestSparseAttentionAPITestShape2(TestSparseAttentionAPI):
def setUp(self):
self.place = paddle.CUDAPlace(0)
self.shape = (2, 1, 64, 32)
self.blocksize = 2
self.dtype = 'float64'
self.use_mask = False
class TestSparseAttentionAPITestShape3(TestSparseAttentionAPI):
def setUp(self):
self.place = paddle.CUDAPlace(0)
self.shape = (4, 4, 128, 32)
self.blocksize = 8
self.dtype = 'float64'
self.use_mask = False
class TestSparseAttentionAPITestShape4(TestSparseAttentionAPI):
def setUp(self):
self.place = paddle.CUDAPlace(0)
self.shape = (3, 3, 35, 15)
self.blocksize = 3
self.dtype = 'float64'
self.use_mask = False
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
56d158dff5e05e09140913a1e7f0634290017b2f | 274b743e47c08ac17afa96f4c76017e168c5793a | /torchsde/_core/methods/scalar/utils.py | 2fc1c75437a8fde6448d5ad83c5811805c280b48 | [
"Apache-2.0"
] | permissive | patrick-kidger/torchsde | 8cc6ea4ea75435fedb54a390be35d201b6a65265 | 4e6902b51bcf1c2f15c4c94027909c1162a6695c | refs/heads/master | 2022-12-01T06:11:28.082318 | 2020-08-09T17:28:43 | 2020-08-09T17:28:43 | 286,237,250 | 3 | 2 | Apache-2.0 | 2020-08-09T17:28:44 | 2020-08-09T12:56:51 | null | UTF-8 | Python | false | false | 898 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
def check_scalar_bm(val):
for val_i in val:
if torch.flatten(val_i, start_dim=1).size(1) != 1:
raise ValueError(f'The Brownian motion for scalar SDEs must of dimension 1.')
| [
"[email protected]"
] | |
7bf7fd3eaeee8d2f8d11255b72f453b53bd61041 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /P.O.R.-master/pirates/npc/DistributedGhostAI.py | 8b7ab4804bd8b8e385c65685fd59e4d316aa705e | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | from direct.distributed import DistributedObjectAI
class DistributedGhostAI(DistributedObjectAI.DistributedObjectAI):
def ___init___(self, air):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
def announceGenerate(self):
DistributedObjectAI.DistributedObjectAI.announceGenerate(self)
def generate(self):
DistributedObjectAI.DistributedObjectAI.generate(self)
def delete(self):
DistributedObjectAI.DistributedObjectAI.delete(self)
def disable(self):
DistributedObjectAI.DistributedObjectAI.disable(self)
| [
"[email protected]"
] | |
24c033c557df6d3c058bc52996289b4261529790 | 970b654bc4e4dd2e2167dbdbb737e8dc34f82bae | /Portfolio/views.py | 76ce58bc4da492e206c7f302faf41ad7824cc7b1 | [] | no_license | Subhrans/Portfolio_using_Django | 5b7024bbf97a46e48300dc0f4c00c5111ab9537f | 8e7817c8b651f6004c495fde6c2a3defbf705158 | refs/heads/main | 2023-05-31T14:33:55.558325 | 2021-05-31T17:46:41 | 2021-05-31T17:46:41 | 322,904,475 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,281 | py | from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotFound
from .forms import SubscribeForm, ContactUsForm, LoginForm
from django.contrib import messages
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.conf import settings
from .models import (
MyDetail,
Subscribe,
MailBackend,
Service,
)
local_visited = False
def index(request, username="joy"):
print("super user:", username)
print("this is not contact us function")
if request.method == 'POST':
subscribe_form = SubscribeForm(request.POST)
if subscribe_form.is_valid():
email = subscribe_form.cleaned_data['email']
name = email.split('@')[0]
backend = MailBackend.objects.get(user=request.user)
if backend.user.is_superuser:
send_mail(
subject="Subscribed User",
message="Thanks For subscribing us",
from_email=settings.EMAIL_HOST_USER,
recipient_list=[settings.EMAIL_HOST_USER, email],
)
else:
send_mail(
subject="Subscribed User",
message="Thanks For subscribing us",
from_email=backend.gmail,
recipient_list=[backend.gmail, email],
auth_user=backend.gmail, auth_password=backend.password
)
Subscribe.objects.create(user=request.user, name=name, email=email)
print("user email is: ", email)
return render(request, 'portfolio/subscribe_successful.html',
{'name': name, "email": email, 'backend': backend})
else:
subscribe_form = SubscribeForm()
if request.user.is_anonymous:
myprofile = MyDetail.objects.filter(user=1)
# myprofile = MyDetail.objects.filter(user__username=userid)
service = Service.objects.filter(user=1)
else:
myprofile = MyDetail.objects.filter(user=request.user)
service = Service.objects.filter(user=request.user)
check_visited = request.session.get('visited', 'False')
if check_visited == "True":
myprofile.update(visited=True)
if myprofile.count() == 0:
request.session['visited'] = "False"
if myprofile.count() == 1:
request.session['visited'] = "True"
language_used = set()
for i in myprofile:
for j in i.projects_detail.all():
language_used.add(str(j.language_used))
context = {
'myprofile': myprofile,
'subscribe_form': subscribe_form,
'language_used': language_used,
'service': service,
}
return render(request, 'portfolio/home.html', context)
def contact_us_view(request, userid=1):
print("userid is:", userid)
if request.method == "POST":
cuform = ContactUsForm(request.POST)
if cuform.is_valid():
print("check user anony,ois", request.user.is_anonymous, "userid", userid)
if request.user.is_anonymous and userid != 1:
if MailBackend.objects.filter(user__username=userid).exists():
backend = MailBackend.objects.get(user__username=userid)
else:
messages.error(request, 'You have not added mail backend. Kindly add that first')
backend = None
elif request.user.is_anonymous and userid == 1:
backend = MailBackend.objects.get(user=1)
else:
backend = MailBackend.objects.get(user=request.user)
msg = cuform.cleaned_data['query']
user = cuform.cleaned_data['email']
if backend:
print(backend.user)
print(backend.gmail)
print(backend.password)
if backend.user.is_superuser:
send_mail(subject='query',
message=msg,
from_email=settings.EMAIL_HOST_USER,
recipient_list=[user, settings.EMAIL_HOST_USER],
)
cuform.save()
messages.success(request, "Mail Query sent successfully")
return HttpResponseRedirect('/contact_us/en/')
else:
send_mail(subject='query',
message=msg,
from_email=backend.gmail,
recipient_list=[user, backend.gmail],
auth_user=backend.gmail, auth_password=backend.password
)
cuform.save()
messages.success(request, "Mail Query sent successfully")
return HttpResponseRedirect('/' + str(userid) + '/contact_us/en/')
else:
cuform = ContactUsForm()
print("check user", request.user.is_anonymous)
if request.user.is_anonymous and userid != 1:
print("this")
mydetail = MyDetail.objects.filter(user__username=userid)
elif request.user.is_anonymous and userid == 1:
print("this is")
mydetail = MyDetail.objects.filter(user=1)
elif request.user.is_authenticated and userid != 1:
mydetail = MyDetail.objects.filter(user__username=userid)
else:
print("this is not")
mydetail = MyDetail.objects.filter(user=request.user)
false_path = None
print(request.path)
if request.path == "/" + str(userid) + '/contact_us/en/':
false_path = "/" + str(userid) + '/contact_us/en/'
print("false_path", false_path)
context = {'i': cuform,
"mydetail": mydetail,
'false_path': false_path,
'userid': userid,
}
return render(request, 'portfolio/contact.html', context)
def login_view(request):
if not request.user.is_authenticated:
if request.method == "POST":
login_form = LoginForm(request=request.POST, data=request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
validate = authenticate(username=username, password=password) # it return none if user not found
if validate is not None:
login(request, validate)
return HttpResponseRedirect('/')
else:
messages.error(request, "Invalid Credentials")
else:
login_form = LoginForm()
else:
return HttpResponseRedirect('/')
context = {
'lform': login_form,
}
return render(request, 'portfolio/login.html', context)
@login_required(login_url='/login/')
def logout_view(request):
logout(request)
return HttpResponseRedirect('/login/')
def portfolio_view(request, username):
subscribe_form = None
backend = None
print("user can call same function")
try:
if request.method == 'POST':
subscribe_form = SubscribeForm(request.POST)
if subscribe_form.is_valid():
email = subscribe_form.cleaned_data['email']
name = email.split('@')[0]
print("sending.....")
backend = MailBackend.objects.get(user__username=username)
send_mail(
subject="Subscribed User",
message="Thanks For subscribing us",
from_email=backend.gmail,
recipient_list=[backend.gmail, email],
auth_user=backend.gmail, auth_password=backend.password,
fail_silently=True
)
Subscribe.objects.create(user=backend.user, name=name, email=email)
print("user email is: ", email)
return render(request, 'portfolio/subscribe_successful.html',
{'name': name, "email": email, 'backend': backend})
else:
subscribe_form = SubscribeForm()
if MailBackend.objects.filter(user__username=username).exists():
backend = True
except Exception as e:
print(e)
# myprofile = MyDetail.objects.filter(user=1)
myprofile = MyDetail.objects.filter(user__username=username) # userid is basically username
if not myprofile.exists():
return HttpResponseNotFound("Page not found")
service = Service.objects.filter(user__username=username)
language_used = set()
name_of_user = None
for i in myprofile:
for j in i.projects_detail.all():
language_used.add(str(j.language_used))
print("printing username", i.user)
name_of_user = "/" + str(i.user) + "/"
print(backend)
context = {
'myprofile': myprofile,
'subscribe_form': subscribe_form,
'language_used': language_used,
'service': service,
'backend': backend,
'name_of_user': name_of_user,
}
return render(request, 'portfolio/home.html', context)
| [
"[email protected]"
] | |
72c70e99db2ead16f2ef5be4abc6008f77fad04c | 0bce7412d58675d6cc410fa7a81c294ede72154e | /Python3/0044. Wildcard Matching.py | 9df5bb742fe54fee40a24b8f728f83976693a948 | [] | no_license | yang4978/LeetCode | 9ddf010b0f1dda32cddc7e94c3f987509dea3214 | 6387d05b619d403414bad273fc3a7a2c58668db7 | refs/heads/master | 2022-01-15T04:21:54.739812 | 2021-12-28T12:28:28 | 2021-12-28T12:28:28 | 182,653,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | class Solution:
def isMatch(self, s: str, p: str) -> bool:
l1 = len(s)
l2 = len(p)
dp = [[False]*(l2+1) for i in range(l1+1)]
dp[0][0] = True
for j in range(1,l2+1):
if(p[j-1]=='*'):
dp[0][j] = dp[0][j-1]
for i in range(1,l1+1):
for j in range(1,l2+1):
if(s[i-1]==p[j-1] or p[j-1]=='?'):
dp[i][j] = dp[i-1][j-1]
elif(p[j-1]=='*'):
dp[i][j] = dp[i-1][j-1] or dp[i][j-1] or dp[i-1][j]
return dp[-1][-1]
| [
"[email protected]"
] | |
4e28dafd47b60ac34e28b715db2cbfcf5fefbdb2 | c08b5edb5075e7840e716b0a09006dae0a4d05ac | /.history/Missions_to_Mars/scrape_mars_20200808232435.py | 3a588df03c13e75f15c5b6cfa084daff2ad77809 | [] | no_license | OlgaDlzk/web-scraping-challenge-1 | 06f915eb76c55c9bc37889017dd9af81122dc1a5 | f99c3436dfb0169595c46dae7733d90e21385cc6 | refs/heads/master | 2023-03-18T00:58:37.928024 | 2020-09-22T20:32:47 | 2020-09-22T20:32:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,673 | py | # Dependencies
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup as bs
import requests
from splinter import Browser
import re
# Initialize browser
def init_browser():
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
#executable_path = {'executable_path': 'chromedriver.exe'}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
news_title = soup.find('div', class_='content_title').text
news_p = soup.find('div', class_='article_teaser_body').text
url = 'https://www.jpl.nasa.gov/spaceimages/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
base_url = 'https://www.jpl.nasa.gov'
image_url = soup.find("a", class_="button fancybox")["data-fancybox-href"]
featured_image_url = base_url + image_url
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
mars_weather = soup.find(text=re.compile(
'p', class_='TweetTextSize TweetTextSize--normal js-tweet-text tweet-text').text.strip()
url = 'https://space-facts.com/mars/'
browser.visit(url)
tables = pd.read_html(url)
facts_df = tables[0]
facts_df.columns = ['Fact', 'Value']
facts_df['Fact'] = facts_df['Fact'].str.replace(':', '')
facts_df.reset_index(drop=True, inplace=True)
facts_html = facts_df.to_html()
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
results = soup.find_all('div', class_="description")
base_url = 'https://astrogeology.usgs.gov/'
sites = []
for result in results:
link = result.find('a', class_="itemLink product-item")
link_text = link['href']
hemispheres_url = base_url + link_text
sites.append(hemispheres_url)
hemispheres = []
for site in sites:
browser.visit(site)
html = browser.html
soup = bs(html, 'html.parser')
title = soup.find('h2', class_="title").text.strip()
url = soup.find_all('a', target="_blank", href=True)[0]['href']
hemispheres.append({"title": title, "img_url": url})
output = {
"news_title": news_title,
"news_p": news_p,
"featured_image_url": featured_image_url,
"mars_weather": mars_weather,
"facts_html": facts_html,
"hemispheres": hemispheres
}
return output
| [
"[email protected]"
] | |
f3ac7390128a0e5cba6fa4f0fbbe5e23df6ae422 | 0ac0387f701e10a3d5d1fd42287ae8ab4b76be11 | /Start_up_sub_power/Main_FAST_Ver2.py | be7641b967fc1091c95e9d371ef7e5d6058e65d2 | [
"Apache-2.0"
] | permissive | LeeDaeil/CNS_Autonomous | 676e6f091c4e25d4f9b52683d119bae1ea4289a5 | 2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f | refs/heads/master | 2021-06-19T11:09:38.550032 | 2021-01-06T07:45:29 | 2021-01-06T07:45:29 | 144,431,774 | 2 | 0 | null | 2018-11-10T15:38:05 | 2018-08-12T02:24:15 | Python | UTF-8 | Python | false | false | 53,345 | py | import tensorflow as tf
from keras.layers import Dense, Input, Conv1D, MaxPooling1D, LSTM, Flatten, Dropout
from keras.models import Model
from keras.optimizers import Adam, RMSprop
from keras import backend as K
#------------------------------------------------------------------
import threading
import datetime
from collections import deque
import pandas as pd
import numpy as np
from time import sleep
from random import randrange
import os
import shutil
import logging
import logging.handlers
#------------------------------------------------------------------
from Start_up_sub_power.CNS_UDP_FAST import CNS
#------------------------------------------------------------------
from Start_up_sub_power.FAST_UI import show_window
#
get_file_time_path = datetime.datetime.now()
MAKE_FILE_PATH = f'./FAST/VER_3_{get_file_time_path.month}_{get_file_time_path.day}_' \
f'{get_file_time_path.hour}_' \
f'{get_file_time_path.minute}_' \
f'{get_file_time_path.second}_'
os.mkdir(MAKE_FILE_PATH)
logging.basicConfig(filename='{}/test.log'.format(MAKE_FILE_PATH), format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO)
#------------------------------------------------------------------
episode = 0 # Global EP
MANUAL = False
class MainModel:
def __init__(self):
self._make_folder()
self._make_tensorboaed()
self.main_net = MainNet(net_type='LSTM', input_pa=8, output_pa=3, time_leg=15)
#self.main_net.load_model('ROD')
self.build_info = {
'IP_list': ['192.168.0.9', '192.168.0.7', '192.168.0.4'],
'PORT_list': [7100, 7200, 7300],
}
if MANUAL:
self.build_info['Nub'] = [1, 0, 0]
else:
self.build_info['Nub'] = [10, 10, 10]
def run(self):
worker = self.build_A3C(build_info=self.build_info)
for __ in worker:
__.start()
sleep(1)
print('All agent start done')
count = 1
if MANUAL:
window_ = show_window(worker)
window_.start()
pass
else:
while True:
sleep(1)
# 살아 있는지 보여줌
workers_step = ''
temp = []
for i in worker:
workers_step += '{:3d} '.format(i.db.train_DB['Step'])
temp.append(i.db.train_DB['Step'])
print('[{}][max:{:3d}][{}]'.format(datetime.datetime.now(), max(temp), workers_step))
# 모델 save
if count == 60:
self.main_net.save_model('ROD')
count %= 60
count += 1
def build_A3C(self, build_info):
# return: 선언된 worker들을 반환함.
# 테스트 선택도 여기서 수정할 것
worker = []
for cnsip, com_port, max_iter in zip(build_info['IP_list'], build_info['PORT_list'], build_info['Nub']):
if max_iter != 0:
for i in range(1, max_iter + 1):
worker.append(A3Cagent(Remote_ip='192.168.0.10', Remote_port=com_port + i,
CNS_ip=cnsip, CNS_port=com_port + i,
main_net=self.main_net, Sess=self.sess,
Summary_ops=[self.summary_op, self.summary_placeholders,
self.update_ops, self.summary_writer]))
return worker
def _make_tensorboaed(self):
self.sess = tf.InteractiveSession()
K.set_session(self.sess)
self.sess.run(tf.global_variables_initializer())
self.summary_placeholders, self.update_ops, self.summary_op = self._setup_summary()
# tensorboard dir change
self.summary_writer = tf.summary.FileWriter('{}/a3c'.format(MAKE_FILE_PATH), self.sess.graph)
def _setup_summary(self):
episode_total_reward = tf.Variable(0.)
episode_avg_max_q = tf.Variable(0.)
episode_duration = tf.Variable(0.)
tf.summary.scalar('Total_Reward/Episode', episode_total_reward)
tf.summary.scalar('Average_Max_Prob/Episode', episode_avg_max_q)
tf.summary.scalar('Duration/Episode', episode_duration)
summary_vars = [episode_total_reward, episode_avg_max_q, episode_duration]
summary_placeholders = [tf.placeholder(tf.float32) for _ in range(len(summary_vars))]
updata_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
summary_op = tf.summary.merge_all()
return summary_placeholders, updata_ops, summary_op
def _make_folder(self):
fold_list = ['{}/a3c'.format(MAKE_FILE_PATH),
'{}/log'.format(MAKE_FILE_PATH),
'{}/log/each_log'.format(MAKE_FILE_PATH),
'{}/model'.format(MAKE_FILE_PATH),
'{}/img'.format(MAKE_FILE_PATH)]
for __ in fold_list:
if os.path.isdir(__):
shutil.rmtree(__)
sleep(1)
os.mkdir(__)
else:
os.mkdir(__)
class MainNet:
def __init__(self, net_type='DNN', input_pa=1, output_pa=1, time_leg=1):
self.net_type = net_type
self.input_pa = input_pa
self.output_pa = output_pa
self.time_leg = time_leg
self.actor, self.critic = self.build_model(net_type=self.net_type, in_pa=self.input_pa,
ou_pa=self.output_pa, time_leg=self.time_leg)
self.optimizer = [self.actor_optimizer(), self.critic_optimizer()]
def build_model(self, net_type='DNN', in_pa=1, ou_pa=1, time_leg=1):
# 8 16 32 64 128 256 512 1024 2048
if net_type == 'DNN':
state = Input(batch_shape=(None, in_pa))
shared = Dense(32, input_dim=in_pa, activation='relu', kernel_initializer='glorot_uniform')(state)
shared = Dense(64, activation='relu', kernel_initializer='glorot_uniform')(shared)
shared = Dense(70, activation='relu', kernel_initializer='glorot_uniform')(shared)
elif net_type == 'CNN' or net_type == 'LSTM' or net_type == 'CLSTM':
state = Input(batch_shape=(None, time_leg, in_pa))
if net_type == 'CNN':
shared = Conv1D(filters=15, kernel_size=3, strides=1, padding='same')(state)
shared = MaxPooling1D(pool_size=3)(shared)
shared = Flatten()(shared)
shared = Dense(64)(shared)
shared = Dense(70)(shared)
elif net_type == 'LSTM':
shared = LSTM(16, activation='softsign')(state)
# shared = LSTM(64, return_sequences=True, activation='softsign')(shared)
# shared = LSTM(64, activation='softsign')(shared)
# shared = LSTM(12, return_sequences=True, activation='softsign')(shared)
# shared = LSTM(12, activation='softsign')(shared)
# shared = LSTM(64)(shared)
shared = Dense(128)(shared)
elif net_type == 'CLSTM':
shared = Conv1D(filters=15, kernel_size=5, strides=1, padding='same')(state)
shared = MaxPooling1D(pool_size=3)(shared)
shared = LSTM(12)(shared)
shared = Dense(24)(shared)
# ----------------------------------------------------------------------------------------------------
# Common output network
# actor_hidden = Dense(64, activation='relu', kernel_initializer='glorot_uniform')(shared)
actor_hidden = Dense(256, activation='sigmoid')(shared)
action_prob = Dense(ou_pa, activation='softmax')(actor_hidden)
# value_hidden = Dense(32, activation='relu', kernel_initializer='he_uniform')(shared)
value_hidden = Dense(256, activation='sigmoid')(shared)
state_value = Dense(1, activation='linear')(value_hidden)
actor = Model(inputs=state, outputs=action_prob)
critic = Model(inputs=state, outputs=state_value)
print('Make {} Network'.format(net_type))
actor._make_predict_function()
critic._make_predict_function()
actor.summary(print_fn=logging.info)
critic.summary(print_fn=logging.info)
return actor, critic
def actor_optimizer(self):
action = K.placeholder(shape=(None, self.output_pa))
advantages = K.placeholder(shape=(None, ))
policy = self.actor.output
good_prob = K.sum(action * policy, axis=1)
eligibility = K.log(good_prob + 1e-10) * K.stop_gradient(advantages)
loss = -K.sum(eligibility)
entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)
actor_loss = loss + 0.01*entropy
# optimizer = Adam(lr=0.01)
# optimizer = RMSprop(lr=2.5e-4, rho=0.99, epsilon=0.01)
optimizer = RMSprop(lr=7e-4, rho=0.99, epsilon=0.01)
updates = optimizer.get_updates(self.actor.trainable_weights, [], actor_loss)
train = K.function([self.actor.input, action, advantages], [], updates=updates)
return train
def critic_optimizer(self):
discounted_reward = K.placeholder(shape=(None, ))
value = self.critic.output
loss = K.mean(K.square(discounted_reward - value))
# optimizer = Adam(lr=0.01)
optimizer = RMSprop(lr=7e-4, rho=0.99, epsilon=0.01)
# optimizer = RMSprop(lr=2.5e-4, rho=0.99, epsilon=0.01)
updates = optimizer.get_updates(self.critic.trainable_weights, [], loss)
train = K.function([self.critic.input, discounted_reward], [], updates=updates)
return train
def save_model(self, name):
self.actor.save_weights("{}/Model/{}_A3C_actor.h5".format(MAKE_FILE_PATH, name))
self.critic.save_weights("{}/Model/{}_A3C_cric.h5".format(MAKE_FILE_PATH, name))
def load_model(self, name):
self.actor.load_weights("FAST/VER_0_3_12_57_57/Model/{}_A3C_actor.h5".format(name))
self.critic.load_weights("FAST/VER_0_3_12_57_57/Model/{}_A3C_cric.h5".format(name))
class A3Cagent(threading.Thread):
def __init__(self, Remote_ip, Remote_port, CNS_ip, CNS_port, main_net, Sess, Summary_ops):
threading.Thread.__init__(self)
# CNS와 통신과 데이터 교환이 가능한 모듈 호출
self.CNS = CNS(self.name, CNS_ip, CNS_port, Remote_ip, Remote_port)
# 중간 멈추기
self.save_operation_point = {}
# 네트워크 정보
if True:
# copy main network
self.main_net = main_net
# input에 대한 정보 input의 경우 2개의 네트워크 동일하게 공유
__, self.input_time_length, self.input_para_number = self.main_net.actor.input_shape
# 훈련 정보를 저장하는 모듈
if True:
# Tensorboard
self.sess = Sess
[self.summary_op, self.summary_placeholders, self.update_ops, self.summary_writer] = Summary_ops
# 보상이나 상태를 저장하는 부분
self.db = DB()
self.db.initial_train_DB()
# 사용되는 입력 파라메터 업데이트
self.save_tick = deque([0, 0], maxlen=2)
self.save_st = deque([False, False], maxlen=2)
self.gap = 0
self.rod_start = False
self.hold_tick = 60*30 # 60 tick * 30분
self.end_time = 0
self.one_agents_episode = 0
# NEW_VER_2 Initial COND
self.COND_INIT = True
self.COND_INIT_END_TIME = 0
self.COND_ALL_ROD_OUT = False
self.COND_NET_BRK = False
self.COND_NET_BRK_DIS = 0
self.COND_AFTER = False
self.COND_AFTER_TIME = 0
done_, R_ = self.update_parameter(A=0)
def Log(self, txt):
out_txt = f'[{datetime.datetime.now()}][{self.one_agents_episode:4}]'
out_txt += txt
try:
self.logger.info(out_txt)
except:
pass
def update_parameter(self, A, only_val_up=False):
'''
네트워크에 사용되는 input 및 output에 대한 정보를 세부적으로 작성할 것.
'''
# 사용되는 파라메터 전체 업데이트
self.Time_tick = self.CNS.mem['KCNTOMS']['Val']
self.Reactor_power = self.CNS.mem['QPROREL']['Val'] # 0.02
self.Tavg = self.CNS.mem['UAVLEGM']['Val'] # 308.21
self.Tref = self.CNS.mem['UAVLEGS']['Val'] # 308.22
self.rod_pos = [self.CNS.mem[nub_rod]['Val'] for nub_rod in ['KBCDO10', 'KBCDO9', 'KBCDO8', 'KBCDO7']]
self.charging_valve_state = self.CNS.mem['KLAMPO95']['Val'] # 0(Auto) - 1(Man)
self.main_feed_valve_1_state = self.CNS.mem['KLAMPO147']['Val']
self.main_feed_valve_2_state = self.CNS.mem['KLAMPO148']['Val']
self.main_feed_valve_3_state = self.CNS.mem['KLAMPO149']['Val']
self.vct_level = self.CNS.mem['ZVCT']['Val'] # 74.45
self.pzr_level = self.CNS.mem['ZINST63']['Val'] # 34.32
#
self.boron_conc = self.CNS.mem['KBCDO16']['Val']
self.make_up_tank = self.CNS.mem['EDEWT']['Val']
self.boron_tank = self.CNS.mem['EBOAC']['Val']
#
self.Turbine_setpoint = self.CNS.mem['KBCDO17']['Val']
self.Turbine_ac = self.CNS.mem['KBCDO18']['Val'] # Turbine ac condition
self.Turbine_real = self.CNS.mem['KBCDO19']['Val'] # 20
self.load_set = self.CNS.mem['KBCDO20']['Val'] # Turbine load set point
self.load_rate = self.CNS.mem['KBCDO21']['Val'] # Turbine load rate
self.Mwe_power = self.CNS.mem['KBCDO22']['Val'] # 0
self.Netbreak_condition = self.CNS.mem['KLAMPO224']['Val'] # 0 : Off, 1 : On
self.trip_block = self.CNS.mem['KLAMPO22']['Val'] # Trip block condition 0 : Off, 1 : On
#
self.steam_dump_condition = self.CNS.mem['KLAMPO150']['Val'] # 0: auto 1: man
self.heat_drain_pump_condition = self.CNS.mem['KLAMPO244']['Val'] # 0: off, 1: on
self.main_feed_pump_1 = self.CNS.mem['KLAMPO241']['Val'] # 0: off, 1: on
self.main_feed_pump_2 = self.CNS.mem['KLAMPO242']['Val'] # 0: off, 1: on
self.main_feed_pump_3 = self.CNS.mem['KLAMPO243']['Val'] # 0: off, 1: on
self.cond_pump_1 = self.CNS.mem['KLAMPO181']['Val'] # 0: off, 1: on
self.cond_pump_2 = self.CNS.mem['KLAMPO182']['Val'] # 0: off, 1: on
self.cond_pump_3 = self.CNS.mem['KLAMPO183']['Val'] # 0: off, 1: on
self.ax_off = self.CNS.mem['CAXOFF']['Val'] # -0.63
# 보상 조건 계산
# [OUTPUT]
# - self.Op_ref_power , self.Op_hi_bound , self.Op_low_bound , self.Op_hi_distance, self.Op_low_distance,
# self.R_distance
# - self.Op_ref_temp , self.Op_T_hi_bound , self.Op_T_low_bound , self.Op_T_hi_distance, self.Op_T_low_distance,
# self.R_T_distance
if self.COND_INIT:
# Goal 1.8% ~ 2.2% 사이에서 출력 유지
# Get Op bound
self.Op_ref_power = 0.020 # 0.020 ~ 0.020
self.Op_hi_bound = 0.030 # 0.030 ~ 0.030
self.Op_low_bound = 0.010 # 0.010 ~ 0.010
self.Op_ref_temp = 291.7 #
self.Op_T_hi_bound = 291.7 + 10
self.Op_T_low_bound = 291.7 - 10
# Get Op distance from current power & temp
self.Op_hi_distance = self.Op_hi_bound - self.Reactor_power
self.Op_low_distance = self.Reactor_power - self.Op_low_bound
self.Op_T_hi_distance = self.Op_T_hi_bound - self.Tavg
self.Op_T_low_distance = self.Tavg - self.Op_T_low_bound
# Get Fin distance reward
self.R_distance = min(self.Op_hi_distance, self.Op_low_distance)
if self.R_distance <= 0:
self.R_distance = 0
self.R_T_distance = min(self.Op_T_hi_distance, self.Op_T_low_distance)
self.R_T_distance = 0
elif self.COND_ALL_ROD_OUT:
# Goal 시간 당 1% 씩 출력 증가
# Get Op bound
increse_pow_per = 0.01 # 시간당 0.03 -> 3% 증가
one_tick = increse_pow_per / (60 * 300) # 300Tick = 1분 -> 60 * 300 = 1시간
# 1Tick 당 증가해야할 Power 계산
update_tick = self.Time_tick - self.COND_INIT_END_TIME # 현재 - All rod out 해온 운전 시간 빼기
self.Op_ref_power = update_tick * one_tick + 0.02 # 0.020 ~ 1.000
support_up = update_tick * one_tick * 1.2 + 0.02 # 0.020 ~ 1.000
support_down = update_tick * one_tick * 0.8 + 0.02 # 0.020 ~ 1.000
# if abs(self.Op_ref_power - support_up) >= 0.05:
# support_up = self.Op_ref_power + 0.05
# support_down = self.Op_ref_power - 0.05
self.Op_hi_bound = support_up + 0.02 # 0.040 ~ 1.020
self.Op_low_bound = support_down - 0.02 # 0.000 ~ 0.980
self.Op_ref_temp = 291.7 #
self.Op_T_hi_bound = 291.7 + 10
self.Op_T_low_bound = 291.7 - 10
# Get Op distance from current power & temp
self.Op_hi_distance = self.Op_hi_bound - self.Reactor_power
self.Op_low_distance = self.Reactor_power - self.Op_low_bound
self.Op_T_hi_distance = self.Op_T_hi_bound - self.Tavg
self.Op_T_low_distance = self.Tavg - self.Op_T_low_bound
# Get Fin distance reward
self.R_distance = min(self.Op_hi_distance, self.Op_low_distance)
if self.R_distance <= 0:
self.R_distance = 0
self.R_T_distance = min(self.Op_T_hi_distance, self.Op_T_low_distance)
self.R_T_distance = 0
elif self.COND_NET_BRK:
# Goal 시간 당 1% 씩 출력 증가 + Tre/ave 보상 제공
# Get Op bound
increse_pow_per = 0.01 # 시간당 0.03 -> 3% 증가
one_tick = increse_pow_per / (60 * 300) # 300Tick = 1분 -> 60 * 300 = 1시간
# 1Tick 당 증가해야할 Power 계산
update_tick = self.Time_tick - self.COND_INIT_END_TIME # 현재 - All rod out 해온 운전 시간 빼기
self.Op_ref_power = update_tick * one_tick + 0.02 # 0.020 ~ 1.000
support_up = update_tick * one_tick * 1.2 + 0.02 # 0.020 ~ 1.000
support_down = update_tick * one_tick * 0.8 + 0.02 # 0.020 ~ 1.000
# if abs(self.Op_ref_power - support_up) >= 0.05:
# support_up = self.Op_ref_power + 0.05
# support_down = self.Op_ref_power - 0.05
self.Op_hi_bound = support_up + 0.02 # 0.040 ~ 1.020
self.Op_low_bound = support_down - 0.02 # 0.000 ~ 0.980
self.Op_ref_temp = self.Tref #
self.Op_T_hi_bound = self.Tref + 10
self.Op_T_low_bound = self.Tref - 10
# Get Op distance from current power & temp
self.Op_hi_distance = self.Op_hi_bound - self.Reactor_power
self.Op_low_distance = self.Reactor_power - self.Op_low_bound
self.Op_T_hi_distance = self.Op_T_hi_bound - self.Tavg
self.Op_T_low_distance = self.Tavg - self.Op_T_low_bound
# Get Fin distance reward
self.R_distance = min(self.Op_hi_distance, self.Op_low_distance)
if self.R_distance <= 0:
self.R_distance = 0
self.R_T_distance = min(self.Op_T_hi_distance, self.Op_T_low_distance)
if self.R_distance <= 0:
self.R_T_distance = 0
elif self.COND_AFTER:
# Goal 출력 유지.
# Get Op bound
increse_pow_per = 0.01 # 시간당 0.03 -> 3% 증가
one_tick = increse_pow_per / (60 * 300) # 300Tick = 1분 -> 60 * 300 = 1시간
# 1Tick 당 증가해야할 Power 계산
update_tick = self.COND_AFTER_TIME - self.COND_INIT_END_TIME # 현재 - All rod out 해온 운전 시간 빼기
self.Op_ref_power = update_tick * one_tick + 0.02 # 0.020 ~ 1.000
self.Op_hi_bound = 0.99 + 0.02 # 0.040 ~ 1.020
self.Op_low_bound = 0.99 - 0.02 # 0.000 ~ 0.980
self.Op_ref_temp = self.Tref #
self.Op_T_hi_bound = self.Tref + 10
self.Op_T_low_bound = self.Tref - 10
# Get Op distance from current power & temp
self.Op_hi_distance = self.Op_hi_bound - self.Reactor_power
self.Op_low_distance = self.Reactor_power - self.Op_low_bound
self.Op_T_hi_distance = self.Op_T_hi_bound - self.Tavg
self.Op_T_low_distance = self.Tavg - self.Op_T_low_bound
# Get Fin distance reward
self.R_distance = min(self.Op_hi_distance, self.Op_low_distance)
# if self.R_distance <= 0:
# self.R_distance = 0
self.R_T_distance = min(self.Op_T_hi_distance, self.Op_T_low_distance)
# if self.R_distance <= 0:
# self.R_T_distance = 0
else:
print('ERROR Reward Calculation STEP!')
# 보상 계산
# [OUTPUT]
# - R
if self.COND_INIT or self.COND_ALL_ROD_OUT:
R = 0
R += self.R_distance # 0 ~ 0.02
elif self.COND_NET_BRK or self.COND_AFTER:
R = 0
R += self.R_distance # 0 ~ 0.02
# self.R_T_distance : [0 ~ 10]
# if self.R_T_distance >= 0: # +- 1도 이내
# #R_ = 1 - (10 - self.R_T_distance) # 0 ~ 1
R_ = self.R_T_distance # 0 ~ 10
R_ = R_ / 100 # 0 ~ 0.01
# else:
# R_ = -0.001 # +- 1도 넘음
R += R_ # 0 ~ 0.02 + 0 ~ 0.02
else:
print('ERROR FIN Reward STEP!')
# Nan 값 방지.
if self.Tavg == 0:
R = 0
R = round(R, 5)
# 종료 조건 계산
done_counter = 0
done_cause = ''
if self.Reactor_power <= 0.005: # 0.5퍼
done_cause += f'_Reactor low power{self.Reactor_power}_'
if self.rod_pos[0] == 0:
done_counter += 1
done_cause += '_Reactor Trip_'
if self.COND_INIT or self.COND_ALL_ROD_OUT:
if self.R_distance <= 0:
R += - 0.1
done_counter += 1
done_cause += f'_OutPdis{self.R_distance}_'
if self.Time_tick >= 285000:
# R += 0.05
done_counter += 1
done_cause += '_SUCCESS_Find_NET_'
elif self.COND_NET_BRK or self.COND_AFTER:
if self.R_distance <= 0:
R += - 0.1
done_counter += 1
done_cause += f'_OutPdis{self.R_distance}_'
if self.R_T_distance <= 0:
R += - 0.1
done_counter += 1
done_cause += f'_OutTdis{self.R_T_distance}_'
if self.COND_AFTER:
if self.COND_AFTER_TIME + 30000 <= self.Time_tick:
R += 1
done_counter += 1
done_cause += '_SUCCESS_'
if self.Time_tick >= 285000:
# R += 1
done_counter += 1
done_cause += '_SUCCESS_Find_NET_'
else:
print('ERROR END-Point STEP!')
# Cond Check
if self.COND_INIT:
# Cond Check - 해당 상태의 목적 달성하면 상태변화 및 시간 기록 - 이 부분만 존재
if self.rod_pos[3] >= 221: # D 뱅크 최대 인출
self.COND_INIT = False # Change COND !!
self.COND_ALL_ROD_OUT = True #
self.COND_NET_BRK = False #
self.COND_AFTER = False
self.COND_INIT_END_TIME = self.Time_tick # Save current tick!
elif self.COND_ALL_ROD_OUT:
# Cond Check
if self.Mwe_power >= 1: # 전기 출력 발생
self.COND_INIT = False # Change COND !!
self.COND_ALL_ROD_OUT = False #
self.COND_NET_BRK = True #
self.COND_AFTER = False
# self.COND_INIT_END_TIME = self.Time_tick # Save current tick!
elif self.COND_NET_BRK:
# Cond Check
if self.Reactor_power >= 0.98: # 목표 도달
self.COND_INIT = False # Change COND !!
self.COND_ALL_ROD_OUT = False #
self.COND_NET_BRK = False #
self.COND_AFTER = True
self.COND_AFTER_TIME = self.Time_tick # Save current tick!
elif self.COND_AFTER:
pass
else:
print('ERROR COND Check')
if True:
# 최종 종료 조건 계산
if done_counter > 0:
done = True
else:
done = False
# 최종 Net_input 기입
self.Log(txt=f'[Done-{done_counter}][{done_cause}]')
self.Log(txt=f'[{self.COND_INIT}, {self.COND_ALL_ROD_OUT}, {self.COND_NET_BRK}, {self.COND_AFTER}]')
self.Log(txt=f'[{self.COND_INIT_END_TIME}, {self.COND_AFTER_TIME}, {self.COND_NET_BRK_DIS}]')
self.Log(txt=f'[{R:.5f}-{self.R_distance:.5f}-{self.R_T_distance:.5f}-{self.Time_tick:7}]')
self.state = [
# 네트워크의 Input 에 들어 가는 변수 들
round(self.Reactor_power, 5), # 0.02 ~ 1.00
round(self.Op_hi_distance*10/2, 5), # 0.00 ~ 0.02 -> 0.0 ~ 1.0
round(self.Op_low_distance*10/2, 5), # 0.00 ~ 0.02 -> 0.0 ~ 1.0
# round(self.Op_hi_bound, 5), # 0.00 ~ 1.02
# round(self.Op_low_bound, 5), # 0.00 ~ 0.98
round(self.Tref/310, 5), # 0 ~ 310 -> 0 ~ 1.0
round(self.Tavg/310, 5), # 0 ~ 310 -> 0 ~ 1.0
round(self.Mwe_power/900, 5), # 0 ~ 900 -> 0 ~ 1.0
round(self.Op_T_hi_bound/610, 5), # 0 ~ 310 -> 0 ~ 1.0
round(self.Op_T_low_bound/610, 5), # 0 ~ 310 -> 0 ~ 1.0
# round(self.Op_T_hi_distance/10, 5), # 0 ~ 10 -> 0 ~ 1.0
# round(self.Op_T_low_distance/10, 5), # 0 ~ 10 -> 0 ~ 1.0
]
self.save_state = {key: self.CNS.mem[key]['Val'] for key in ['KCNTOMS', # cns tick
'QPROREL', # power
'UAVLEGM', # Tavg
'UAVLEGS', # Tref
'KLAMPO95', # charging vlave state
'KLAMPO147', 'KLAMPO148', 'KLAMPO149',
'ZVCT', 'ZINST63',
'KBCDO16',
'KBCDO17', 'KBCDO18',
'KBCDO19', 'KBCDO20', 'KBCDO21', 'KBCDO22',
'KLAMPO224', 'KLAMPO22', 'KLAMPO150', 'KLAMPO244',
'KLAMPO241', 'KLAMPO242', 'KLAMPO243', 'KLAMPO181',
'KLAMPO182', 'KLAMPO183', 'CAXOFF',
'KBCDO10', 'KBCDO9', 'KBCDO8', 'KBCDO7',
'FANGLE',
'EDEWT', 'EBOAC'
]}
self.save_state['TOT_ROD'] = self.CNS.mem['KBCDO10']['Val'] + \
self.CNS.mem['KBCDO9']['Val'] + \
self.CNS.mem['KBCDO8']['Val'] + \
self.CNS.mem['KBCDO7']['Val']
self.save_state['R'] = R
self.save_state['S'] = self.db.train_DB['Step']
self.save_state['UP_D'] = self.Op_hi_bound
self.save_state['DOWN_D'] = self.Op_low_bound
self.save_state['UP_T_D'] = self.Op_T_hi_bound
self.save_state['DOWN_T_D'] = self.Op_T_low_bound
for state_val in range(len(self.state)):
self.save_state[f'{state_val}'] = self.state[state_val]
return done, R
def run_cns(self, i):
for _ in range(0, i):
if _ == 0:
# ACT
self.CNS.run_freeze_CNS()
else:
# pass
self.CNS.run_freeze_CNS()
self.update_parameter(A=0, only_val_up=True)
self.send_action(action=0)
def predict_action(self, actor, input_window):
predict_result = actor.predict([[input_window]])
policy = predict_result[0]
try:
action = np.random.choice(np.shape(policy)[0], 1, p=policy)[0]
except:
print("ERROR from NET!!")
print(policy)
sleep(10000)
return action, predict_result
def send_action_append(self, pa, va):
for _ in range(len(pa)):
self.para.append(pa[_])
self.val.append(va[_])
def send_action(self, action):
# 전송될 변수와 값 저장하는 리스트
self.para = []
self.val = []
# 주급수 및 CVCS 자동
if self.charging_valve_state == 1:
self.send_action_append(['KSWO100'], [0])
if self.Reactor_power >= 0.20:
if self.main_feed_valve_1_state == 1 or self.main_feed_valve_2_state == 1 or self.main_feed_valve_3_state == 1:
self.send_action_append(['KSWO171', 'KSWO165', 'KSWO159'], [0, 0, 0])
# 절차서 구성 순서로 진행
# 1) 출력이 4% 이상에서 터빈 set point를 맞춘다.
if self.Reactor_power >= 0.04 and self.Turbine_setpoint != 1800:
if self.Turbine_setpoint < 1790: # 1780 -> 1872
self.send_action_append(['KSWO213'], [1])
elif self.Turbine_setpoint >= 1790:
self.send_action_append(['KSWO213'], [0])
# 1) 출력 4% 이상에서 터빈 acc 를 200 이하로 맞춘다.
if self.Reactor_power >= 0.04 and self.Turbine_ac != 210:
if self.Turbine_ac < 200:
self.send_action_append(['KSWO215'], [1])
elif self.Turbine_ac >= 200:
self.send_action_append(['KSWO215'], [0])
# 2) 출력 10% 이상에서는 Trip block 우회한다.
if self.Reactor_power >= 0.10 and self.trip_block != 1:
self.send_action_append(['KSWO22', 'KSWO21'], [1, 1])
# 2) 출력 10% 이상에서는 rate를 50까지 맞춘다.
if self.Reactor_power >= 0.10 and self.Mwe_power <= 0:
if self.load_set < 100: self.send_action_append(['KSWO225', 'KSWO224'], [1, 0]) # 터빈 load를 150 Mwe 까지,
else: self.send_action_append(['KSWO225', 'KSWO224'], [0, 0])
# Turbine Load Rate
if self.load_rate <= 1: self.send_action_append(['KSWO227', 'KSWO226'], [1, 0])
else: self.send_action_append(['KSWO227', 'KSWO226'], [0, 0])
def range_fun(st, end, goal):
if st <= self.Reactor_power < end:
if self.load_set < goal:
self.send_action_append(['KSWO225', 'KSWO224'], [1, 0]) # 터빈 load를 150 Mwe 까지,
else:
if self.Mwe_power + 2 > goal:
self.send_action_append(['KSWO225', 'KSWO224'], [1, 0]) # 터빈 load를 150 Mwe 까지,
else:
self.send_action_append(['KSWO225', 'KSWO224'], [0, 0])
range_fun(st=0.05, end=0.10, goal=50)
range_fun(st=0.10, end=0.15, goal=125)
range_fun(st=0.15, end=0.20, goal=100)
range_fun(st=0.20, end=0.25, goal=125)
range_fun(st=0.25, end=0.30, goal=200)
range_fun(st=0.30, end=0.35, goal=225)
range_fun(st=0.35, end=0.40, goal=300)
range_fun(st=0.40, end=0.45, goal=350)
range_fun(st=0.45, end=0.50, goal=400)
range_fun(st=0.50, end=0.55, goal=450)
range_fun(st=0.55, end=0.60, goal=500)
range_fun(st=0.60, end=0.65, goal=550)
range_fun(st=0.65, end=0.70, goal=600)
range_fun(st=0.70, end=0.75, goal=650)
range_fun(st=0.75, end=0.80, goal=700)
range_fun(st=0.80, end=0.85, goal=750)
range_fun(st=0.85, end=0.90, goal=800)
range_fun(st=0.90, end=0.95, goal=825)
range_fun(st=0.95, end=0.100, goal=900)
# 3) 출력 15% 이상 및 터빈 rpm이 1800이 되면 netbreak 한다.
if self.Reactor_power >= 0.15 and self.Turbine_real >= 1790 and self.Netbreak_condition != 1:
self.send_action_append(['KSWO244'], [1])
# 4) 출력 15% 이상 및 전기 출력이 존재하는 경우, steam dump auto로 전향
if self.Reactor_power >= 0.15 and self.Mwe_power > 0 and self.steam_dump_condition == 1:
self.send_action_append(['KSWO176'], [0])
# 4) 출력 15% 이상 및 전기 출력이 존재하는 경우, heat drain pump on
if self.Reactor_power >= 0.15 and self.Mwe_power > 0 and self.heat_drain_pump_condition == 0:
self.send_action_append(['KSWO205'], [1])
# 5) 출력 20% 이상 및 전기 출력이 190Mwe 이상 인경우
if self.Reactor_power >= 0.20 and self.Mwe_power >= 1 and self.cond_pump_2 == 0:
self.send_action_append(['KSWO205'], [1])
# 6) 출력 40% 이상 및 전기 출력이 380Mwe 이상 인경우
# if self.Reactor_power >= 0.40 and self.Mwe_power >= 380 and self.main_feed_pump_2 == 0:
if self.Reactor_power >= 0.40 and self.main_feed_pump_2 == 0:
self.send_action_append(['KSWO193'], [1])
# 7) 출력 50% 이상 및 전기 출력이 475Mwe
# if self.Reactor_power >= 0.50 and self.Mwe_power >= 475 and self.cond_pump_3 == 0:
if self.Reactor_power >= 0.50 and self.cond_pump_3 == 0:
self.send_action_append(['KSWO206'], [1])
# 8) 출력 80% 이상 및 전기 출력이 765Mwe
# if self.Reactor_power >= 0.80 and self.Mwe_power >= 765 and self.main_feed_pump_3 == 0:
if self.Reactor_power >= 0.80 and self.main_feed_pump_3 == 0:
self.send_action_append(['KSWO192'], [1])
# 9) 제어봉 조작 신호
if divmod(self.Time_tick, 400)[1] == 0:
if self.rod_pos[3] > 221:
self.send_action_append(['KSWO33', 'KSWO32'], [0, 0]) # NO ROD CONTROL
else:
self.send_action_append(['KSWO33', 'KSWO32'], [1, 0]) # UP ROD CONTROL
else:
self.send_action_append(['KSWO33', 'KSWO32'], [0, 0]) # NO ROD CONTROL
# 9) 제어봉 조작 신호 및 보론 조작 신호를 보내기
if self.COND_INIT:
self.send_action_append(['KSWO75'], [1])
if action == 0: # stay pow
pass
elif action == 1: # increase pow
self.send_action_append(['KSWO33', 'KSWO32'], [1, 0]) # UP ROD CONTROL
elif action == 2: # decrease pow
self.send_action_append(['EBOAC'], [50]) # MAKE-Up
elif self.COND_ALL_ROD_OUT or self.COND_NET_BRK or self.COND_AFTER:
if action == 0: # stay pow
self.send_action_append(['KSWO75', 'KSWO77'], [1, 0]) # BOR on / ALTDIL off
self.send_action_append(['WBOAC','WDEWT'], [1, 8]) # Set-Make-up Valve
self.send_action_append(['EBOAC', 'EDEWT'], [0, 0]) # NO INJECT BORN
elif action == 1: # increase pow
self.send_action_append(['KSWO75', 'KSWO77'], [0, 1]) # BOR off / ALTDIL on
self.send_action_append(['WBOAC','WDEWT'], [1, 8]) # Valve POS
# self.send_action_append(['EBOAC', 'EDEWT'], [0, 70]) # MAKE-Up
self.send_action_append(['EBOAC', 'EDEWT'], [0, 200]) # MAKE-Up
elif action == 2: # decrease pow
self.send_action_append(['KSWO75', 'KSWO77'], [1, 0]) # BOR off / ALTDIL on
self.send_action_append(['WBOAC','WDEWT'], [1, 8]) # Valve POS
# self.send_action_append(['EBOAC', 'EDEWT'], [10, 0]) # BORN
self.send_action_append(['EBOAC', 'EDEWT'], [5, 0]) # BORN
else:
print('ERROR ACT')
else:
print('ERROR CONTROL PART!!')
# 최종 파라메터 전송
# print(self.para)
self.CNS._send_control_signal(self.para, self.val)
self.Log(txt=f'SEND ACT\n{self.para}\n{self.val}')
def train_network(self):
GAE = True
if GAE:
# Generalized advantage estimation 구현
Dis_reward = []
#
v_s_ = self.main_net.critic.predict([[self.db.train_DB['Now_S'][-self.input_time_length:]]])[0][0]
for r in self.db.train_DB['Reward'][::-1]:
v_s_ = r + 0.99 * v_s_
Dis_reward.append(v_s_)
Dis_reward.reverse()
else:
# Typical advantage
def discount_reward(rewards):
discounted_reward = np.zeros_like(rewards)
running_add = 0
for _ in reversed(range(len(rewards))):
running_add = running_add * 0.99 + rewards[_]
discounted_reward[_] = running_add
return discounted_reward
Dis_reward = discount_reward(self.db.train_DB['Reward'])
Predicted_values = self.main_net.critic.predict(np.array(self.db.train_DB['S']))
Advantages = Dis_reward - np.reshape(Predicted_values, len(Predicted_values))
self.main_net.optimizer[0]([self.db.train_DB['S'], self.db.train_DB['Act'], Advantages])
self.main_net.optimizer[1]([self.db.train_DB['S'], Dis_reward])
self.db.initial_each_trian_DB()
def run(self):
global episode
self.cns_speed = 1 # x 배속
def start_or_initial_cns(mal_time):
self.db.initial_train_DB()
self.save_operation_point = {}
# self.CNS.init_cns(initial_nub=17)
self.CNS.init_cns(initial_nub=20)
# self.CNS._send_malfunction_signal(12, 10001, mal_time)
# sleep(1)
# self.CNS._send_control_signal(['TDELTA'], [0.2*self.cns_speed])
# sleep(1)
iter_cns = 1 # 반복 - 몇 초마다 Action 을 전송 할 것인가?
mal_time = randrange(40, 60) # 40 부터 60초 사이에 Mal function 발생
start_or_initial_cns(mal_time=mal_time)
# 훈련 시작하는 부분
while episode < 3000:
# 1. input_time_length 까지 데이터 수집 및 Mal function 이후로 동작
# NEW_VER_2 Initial COND
self.COND_INIT = True
self.COND_INIT_END_TIME = 0
self.COND_ALL_ROD_OUT = False
self.COND_NET_BRK = False
self.COND_NET_BRK_DIS = 0
self.COND_AFTER = False
self.COND_AFTER_TIME = 0
#
self.one_agents_episode = episode
episode += 1
start_ep_time = datetime.datetime.now()
# logger
self.logger = logging.getLogger('{}'.format(self.name))
self.logger.setLevel(logging.INFO)
self.logger_hand = logging.FileHandler('{}/log/each_log/{}.log'.format(MAKE_FILE_PATH, self.one_agents_episode))
self.logger.addHandler(self.logger_hand)
self.logger.info(f'[{datetime.datetime.now()}] Start ep')
while True:
self.run_cns(iter_cns)
done, R = self.update_parameter(A=0)
self.db.add_now_state(Now_S=self.state)
# if len(self.db.train_DB['Now_S']) > self.input_time_length and self.Time_tick >= mal_time * 5:
# # 네트워크에 사용할 입력 데이터 다 쌓고 + Mal function이 시작하면 제어하도록 설계
# break
if len(self.db.train_DB['Now_S']) > self.input_time_length:
# 네트워크에 사용할 입력 데이터 다 쌓이면 제어하도록 설계
break
# 2.2 제어 정보와, 상태에 대한 정보를 저장한다. - 제어 이전의 데이터 세이브
self.save_state['Act'] = 0
self.save_state['P_A_1'] = 0
self.save_state['P_A_2'] = 0
self.save_state['P_A_3'] = 0
self.save_state['time'] = self.db.train_DB['Step'] * self.cns_speed
self.db.save_state(self.save_state)
self.db.train_DB['Step'] += 1
# 2. 반복 수행 시작
while True:
if True:
# 2.1 최근 상태 정보를 토대 Rod 제어 예측
old_state = self.db.train_DB['Now_S'][-self.input_time_length:]
# 기본적으로 아래와 같이 상태를 추출하면 (time_length, input_para_nub) 형태로 나옴.
Action_net, Action_probability = self.predict_action(self.main_net.actor, old_state)
self.db.train_DB['Avg_q_max'] += np.max(Action_probability)
self.db.train_DB['Avg_max_step'] += 1
# 2.2 최근 상태에 대한 액션을 CNS로 전송하고 뿐만아니라 자동 제어 신호도 전송한다.
if MANUAL:
Action_net = int(input(f"[{self.db.train_DB['Step']}-{self.Time_tick}]Slected ACT:"))
self.send_action(action=Action_net)
else:
self.send_action(action=Action_net)
# 2.2 제어 정보와, 상태에 대한 정보를 저장한다.
self.save_state['Act'] = Action_net
self.save_state['P_A_1'] = Action_probability[0][0]
self.save_state['P_A_2'] = Action_probability[0][1]
self.save_state['P_A_3'] = Action_probability[0][2]
self.save_state['time'] = self.db.train_DB['Step']*self.cns_speed
self.db.save_state(self.save_state)
# 2.3 제어에 대하여 CNS 동작 시키고 현재 상태 업데이트한다.
self.run_cns(iter_cns)
# 2.4 새로운 상태를 업데이트 하고 상태 평가를 진행 한다.
done, R = self.update_parameter(A=Action_net)
self.db.add_now_state(Now_S=self.state) # self.state 가 업데이트 된 상태이다. New state
# 2.5 평가를 저장한다.
self.db.add_train_DB(S=old_state, R=R, A=Action_net)
# 2.5 기타 변수를 업데이트 한다.
self.db.train_DB['Step'] += 1
# 2.6 일정 시간 마다 네트워크를 업데이트 한다. 또는 죽으면 update 한다.
if self.db.train_DB['Up_t'] >= self.db.train_DB['Up_t_end'] or done:
self.train_network()
self.db.train_DB['Up_t'] = 0
else:
self.db.train_DB['Up_t'] += 1
# 2.7 done에 도달함.
if done:
self.logger.info(f'[{datetime.datetime.now()}] Training Done - {start_ep_time}~'
f'{datetime.datetime.now()}')
# tensorboard update
stats = [self.db.train_DB['TotR'],
self.db.train_DB['Avg_q_max'] / self.db.train_DB['Avg_max_step'],
self.db.train_DB['Step']]
for i in range(len(stats)):
self.sess.run(self.update_ops[i], feed_dict={self.summary_placeholders[i]: float(stats[i])})
summary_str = self.sess.run(self.summary_op)
self.summary_writer.add_summary(summary_str, episode)
self.logger.info(f'[{datetime.datetime.now()}] Save img')
if self.db.train_DB['Step'] > 50:
self.db.draw_img(current_ep=episode)
self.save_tick = deque([0, 0], maxlen=2)
self.save_st = deque([False, False], maxlen=2)
self.gap = 0
self.rod_start = False
self.hold_tick = 60 * 30 # 60 tick * 30분
self.end_time = 0
mal_time = randrange(40, 60) # 40 부터 60초 사이에 Mal function 발생
start_or_initial_cns(mal_time=mal_time)
self.logger.info(f'[{datetime.datetime.now()}] Episode_done - {start_ep_time}~'
f'{datetime.datetime.now()}')
# 핸드러 리셋
self.logger.removeHandler(self.logger.handlers[0])
break
class DB:
def __init__(self):
self.train_DB = {'Now_S': [], 'S': [], 'Reward': [], 'Act': [],
'Tur_R': [], 'Tur_A': [],
'TotR': 0, 'Step': 0,
'Avg_q_max': 0, 'Avg_max_step': 0,
'T_Avg_q_max': 0, 'T_Avg_max_step': 0,
# 'Up_t': 0, 'Up_t_end': 20,
'Up_t': 0, 'Up_t_end': 5,
'Net_triger': False, 'Net_triger_time': []}
self.gp_db = pd.DataFrame()
# self.fig = plt.figure(constrained_layout=True, figsize=(10, 9))
# self.gs = self.fig.add_gridspec(24, 3)
# self.axs = [self.fig.add_subplot(self.gs[0:3, :]), # 1
# self.fig.add_subplot(self.gs[3:6, :]), # 2
# self.fig.add_subplot(self.gs[6:9, :]), # 3
# self.fig.add_subplot(self.gs[9:12, :]), # 4
# self.fig.add_subplot(self.gs[12:15, :]), # 5
# self.fig.add_subplot(self.gs[15:18, :]), # 6
# self.fig.add_subplot(self.gs[18:21, :]), # 7
# self.fig.add_subplot(self.gs[21:24, :]), # 8
# # self.fig.add_subplot(self.gs[24:27, :]), # 9
# ]
def initial_train_DB(self):
self.train_DB = {'Now_S': [], 'S': [], 'Reward': [], 'Act': [],
'TotR': 0, 'Step': 0,
'Avg_q_max': 0, 'Avg_max_step': 0,
'Up_t': 0, 'Up_t_end': 5,
'Net_triger': False, 'Net_triger_time': []}
self.gp_db = pd.DataFrame()
def initial_each_trian_DB(self):
for _ in ['S', 'Reward', 'Act']:
self.train_DB[_] = []
def add_now_state(self, Now_S):
self.train_DB['Now_S'].append(Now_S)
def add_train_DB(self, S, R, A):
self.train_DB['S'].append(S)
self.train_DB['Reward'].append(R)
Temp_R_A = np.zeros(3) # <-------------------- AcT
Temp_R_A[A] = 1
self.train_DB['Act'].append(Temp_R_A)
self.train_DB['TotR'] += self.train_DB['Reward'][-1]
def save_state(self, save_data_dict):
temp = pd.DataFrame()
for key in save_data_dict.keys():
temp[key] = [save_data_dict[key]]
self.gp_db = self.gp_db.append(temp, ignore_index=True)
def draw_img(self, current_ep):
# for _ in self.axs:
# _.clear()
# #
# self.axs[0].plot(self.gp_db['KCNTOMS'], self.gp_db['QPROREL'], 'g', label='Power')
# self.axs[0].plot(self.gp_db['KCNTOMS'], self.gp_db['UP_D'], 'r', label='Power_UP')
# self.axs[0].plot(self.gp_db['KCNTOMS'], self.gp_db['DOWN_D'], 'r', label='Power_DOWN')
# self.axs[0].legend(loc=2, fontsize=5)
# self.axs[0].grid()
# #
# self.axs[1].plot(self.gp_db['KCNTOMS'], self.gp_db['R'], 'g', label='Reward')
# self.axs[1].legend(loc=2, fontsize=5)
# self.axs[1].grid()
# #
# self.axs[2].plot(self.gp_db['KCNTOMS'], self.gp_db['UAVLEGM'], 'g', label='Average')
# self.axs[2].plot(self.gp_db['KCNTOMS'], self.gp_db['UAVLEGS'], 'r', label='Ref', color='red', lw=1)
# self.axs[2].legend(loc=2, fontsize=5)
# self.axs[2].grid()
# #
# self.axs[3].plot(self.gp_db['KCNTOMS'], self.gp_db['KBCDO20'], 'g', label='Load Set')
# self.axs[3].plot(self.gp_db['KCNTOMS'], self.gp_db['KBCDO21'], 'b', label='Load Rate')
# self.axs[3].plot(self.gp_db['KCNTOMS'], self.gp_db['KBCDO22'], 'r', label='Real Power')
# self.axs[3].legend(loc=2, fontsize=5)
# self.axs[3].grid()
# #
# self.axs[4].plot(self.gp_db['KCNTOMS'], self.gp_db['TOT_ROD'], 'g', label='ROD_POS')
# self.axs[4].legend(loc=2, fontsize=5)
# self.axs[4].grid()
#
# self.axs[5].plot(self.gp_db['KCNTOMS'], self.gp_db['KBCDO17'], 'g', label='Set')
# self.axs[5].plot(self.gp_db['KCNTOMS'], self.gp_db['KBCDO18'], 'b', label='Acc')
# self.axs[5].plot(self.gp_db['KCNTOMS'], self.gp_db['KBCDO19'], 'r', label='Real')
# self.axs[5].legend(loc=2, fontsize=5)
# self.axs[5].grid()
#
# self.axs[6].plot(self.gp_db['KCNTOMS'], self.gp_db['KBCDO16'], 'g', label='Boron')
# self.axs[6].legend(loc=2, fontsize=5)
# self.axs[6].grid()
#
# self.axs[7].plot(self.gp_db['KCNTOMS'], self.gp_db['EDEWT'], 'g', label='Boron Tank')
# self.axs[7].legend(loc=2, fontsize=5)
# self.axs[7].grid()
# #
# self.axs[3].plot(self.gp_db['time'], self.gp_db['BFV122_pos'], 'g', label='BFV122_POS')
# self.axs[3].legend(loc=2, fontsize=5)
# self.axs[3].set_ylabel('BFV122 POS [%]')
# self.axs[3].grid()
# #
# self.axs[4].plot(self.gp_db['time'], self.gp_db['BFV122_close_act'], 'g', label='Close')
# self.axs[4].plot(self.gp_db['time'], self.gp_db['BFV122_open_act'], 'r', label='Open')
# self.axs[4].set_ylabel('BFV122 Sig')
# self.axs[4].legend(loc=2, fontsize=5)
# self.axs[4].grid()
# #
# self.axs[5].plot(self.gp_db['time'], self.gp_db['HV142_pos'], 'r', label='HV142_POS')
# self.axs[5].set_ylabel('HV142 POS [%]')
# self.axs[5].legend(loc=2, fontsize=5)
# self.axs[5].grid()
# #
# self.axs[6].plot(self.gp_db['time'], self.gp_db['HV142_close_act'], 'g', label='Close')
# self.axs[6].plot(self.gp_db['time'], self.gp_db['HV142_open_act'], 'r', label='Open')
# self.axs[6].set_ylabel('HV142 Sig')
# self.axs[6].legend(loc=2, fontsize=5)
# self.axs[6].grid()
# #
# self.axs[7].plot(self.gp_db['time'], self.gp_db['Charging_flow'], 'g', label='Charging_flow')
# self.axs[7].plot(self.gp_db['time'], self.gp_db['Letdown_HX_flow'], 'r', label='Letdown_HX_flow')
# self.axs[7].set_ylabel('Flow Sig')
# self.axs[7].legend(loc=2, fontsize=5)
# self.axs[7].grid()
# #
# self.axs[8].plot(self.gp_db['time'], self.gp_db['R'], 'g', label='Reward')
# self.axs[8].set_ylabel('Rewaed')
# self.axs[8].legend(loc=2, fontsize=5)
# self.axs[8].grid()
# self.fig.savefig(fname='{}/img/{}_{}.png'.format(MAKE_FILE_PATH, self.train_DB['Step'], current_ep), dpi=300,
# facecolor=None)
self.gp_db.to_csv('{}/log/{}_{}.csv'.format(MAKE_FILE_PATH, self.train_DB['Step'], current_ep))
if __name__ == '__main__':
test = MainModel()
test.run() | [
"[email protected]"
] | |
741f02664bdd52868fe507b0d1fe419b4afddc95 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02897/s660020311.py | ea80983864a51ccbb925913415ad8d21b55324d5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | n = int(input())
oddCount = len(list(filter(lambda x: x % 2 == 1, range(1,n + 1))))
print(oddCount / n) | [
"[email protected]"
] | |
6be48f1637c815776435a38e54f966983166a081 | b91588cda1a129f06aa9493ee6d6a70e4f996b7f | /Production/python/Spring16/SMS-T1bbbb_mGluino-1000_mLSP-900_TuneCUETP8M1_13TeV-madgraphMLM-pythia8_cff.py | c69faaf277b52d7d2654832ef700f0bd01594130 | [] | no_license | muzamilahmad/LeptoQuarkTreeMaker | 2371e93589dbe41b02a93a2533cbf5e955aaa0db | 8e7eed0d03c6770a029eafb9b638e82c600a7425 | refs/heads/master | 2021-01-19T01:02:35.749590 | 2018-02-06T17:27:52 | 2018-02-06T17:27:52 | 65,389,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,253 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIISpring16MiniAODv1/SMS-T1bbbb_mGluino-1000_mLSP-900_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/PUSpring16_80X_mcRun2_asymptotic_2016_v3-v1/00000/684EC2FC-93FF-E511-8490-008CFA111348.root',
'/store/mc/RunIISpring16MiniAODv1/SMS-T1bbbb_mGluino-1000_mLSP-900_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/PUSpring16_80X_mcRun2_asymptotic_2016_v3-v1/00000/70E22982-3700-E611-9230-001E67DFF735.root',
'/store/mc/RunIISpring16MiniAODv1/SMS-T1bbbb_mGluino-1000_mLSP-900_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/PUSpring16_80X_mcRun2_asymptotic_2016_v3-v1/00000/7613C27C-6EFF-E511-91DC-24BE05CE2EE1.root',
'/store/mc/RunIISpring16MiniAODv1/SMS-T1bbbb_mGluino-1000_mLSP-900_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/PUSpring16_80X_mcRun2_asymptotic_2016_v3-v1/00000/806D8876-81FF-E511-B605-0025900B20E2.root',
'/store/mc/RunIISpring16MiniAODv1/SMS-T1bbbb_mGluino-1000_mLSP-900_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/PUSpring16_80X_mcRun2_asymptotic_2016_v3-v1/00000/8CED51FC-4A00-E611-A234-00259029ED22.root',
'/store/mc/RunIISpring16MiniAODv1/SMS-T1bbbb_mGluino-1000_mLSP-900_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/PUSpring16_80X_mcRun2_asymptotic_2016_v3-v1/00000/8E0206D4-6600-E611-B575-002590E2D9FE.root',
'/store/mc/RunIISpring16MiniAODv1/SMS-T1bbbb_mGluino-1000_mLSP-900_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/PUSpring16_80X_mcRun2_asymptotic_2016_v3-v1/00000/902C1EF2-4A00-E611-AF66-24BE05BDBE61.root',
'/store/mc/RunIISpring16MiniAODv1/SMS-T1bbbb_mGluino-1000_mLSP-900_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/PUSpring16_80X_mcRun2_asymptotic_2016_v3-v1/00000/C66544E5-3F00-E611-946D-003048D15E0E.root',
'/store/mc/RunIISpring16MiniAODv1/SMS-T1bbbb_mGluino-1000_mLSP-900_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/PUSpring16_80X_mcRun2_asymptotic_2016_v3-v1/00000/D0371815-3D00-E611-A2AF-782BCB205805.root',
] )
| [
"[email protected]"
] | |
7e48b766b0f55a68c8fea240590cb3cbe02c5a0d | 77772edccbdb5fe07229358a48471cfeca395893 | /restau/urls.py | 23b136010aa6519649d9415cbe92f9971b8637d7 | [] | no_license | ShreySumariya07/Restaurant-Drf | a4b08b2522da37ab88e807cb42978b014dce639a | a06fba134a50b1803d1ce59eeb5a3c4e7c1a9528 | refs/heads/master | 2023-06-22T21:55:09.949692 | 2021-01-29T13:45:25 | 2021-01-29T13:45:25 | 334,158,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py |
from django.urls import path
from . import views
urlpatterns = [
path('restaurants/', views.Restaurants.as_view()),
path('restaurants/<str:restaurant_id>/', views.RestaurantDetail.as_view()),
path('restaurants/<str:restaurant_id>/recipes/', views.Recipes.as_view()),
path('restaurants/<str:restaurant_id>/recipes/<str:recipe_id>/', views.RecipeDetail.as_view()),
] | [
"[email protected]"
] | |
2c29dde493d22062c4ad341523e1cddfa11d7c80 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/object_method_complex_call-153.py | 384c6c28ad4f88a1960d2add9439c0e2ccbf5dda | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | class A(object):
a:int = 42
def foo(self:"A", ignore:object) -> int:
return self.a
class B(A):
b:bool = True
def __init__(self:"B"):
print("B")
def bar(self:"B") -> int:
return self.foo(self.foo(print("...")))
def foo(self:"B", ignore:object) -> int:
return 1
print($Exp())
| [
"[email protected]"
] | |
df13f4b67919899516f1c43c1e5841125b3cad2d | 7d6a8a62c117bbf15da9fa4240ce60cd98efb935 | /venv/bin/wheel | 3a3464ce28ada17adf3b1fbb91a270083e6c2e35 | [] | no_license | Sem31/creating_API | 040281c14a510072c2194e76864f84afa0a4dfb9 | d468b1d97d0bb272087beea06dc21dda02f7efc1 | refs/heads/master | 2022-10-14T13:33:39.548294 | 2019-09-24T06:20:26 | 2019-09-24T06:20:26 | 210,194,810 | 0 | 0 | null | 2022-09-16T18:10:40 | 2019-09-22T18:27:03 | Python | UTF-8 | Python | false | false | 249 | #!/home/sem/Desktop/chating_API/original/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
3100bf04aa4c403c82b611c69b52c10ec9c06173 | 4be5c172c84e04c35677f5a327ab0ba592849676 | /python/interviewbit/strings/min_chars_to_make_palindrome/min_chars_to_make_palindrome.py | a703d91003b0bbc9c59a79a4de9d67fc79ac951e | [] | no_license | niranjan-nagaraju/Development | 3a16b547b030182867b7a44ac96a878c14058016 | d193ae12863971ac48a5ec9c0b35bfdf53b473b5 | refs/heads/master | 2023-04-06T20:42:57.882882 | 2023-03-31T18:38:40 | 2023-03-31T18:38:40 | 889,620 | 9 | 2 | null | 2019-05-27T17:00:29 | 2010-09-05T15:58:46 | Python | UTF-8 | Python | false | false | 3,241 | py | '''
https://www.interviewbit.com/problems/minimum-characters-required-to-make-a-string-palindromic/
Minimum Characters required to make a String Palindromic
Given an string A. The only operation allowed is to insert characters in the beginning of the string.
Find how many minimum characters are needed to be inserted to make the string a palindrome string.
Input Format
The only argument given is string A.
Output Format
Return the minimum characters that are needed to be inserted to make the string a palindrome string.
For Example
Input 1:
A = "ABC"
Output 1:
2
Explanation 1:
Insert 'B' at beginning, string becomes: "BABC".
Insert 'C' at beginning, string becomes: "CBABC".
Input 2:
A = "AACECAAAA"
Output 2:
2
Explanation 2:
Insert 'A' at beginning, string becomes: "AAACECAAAA".
Insert 'A' at beginning, string becomes: "AAAACECAAAA".
'''
'''
Solution Outline:
Consider 'BACB'
Using two-pointers to compare B(left) and B(right)
and then when A(left) and C(right) don't match, we have to include C after B to get 'BCACB'. (this isnt allowed)
Instead, the characters needed to make 'BACB' palindromic are 'BCA' => 'BCABACB'
Brute-force solution:
1. Start with adding 1 character from the right to the left, and check if its a palindrome
2. If it is, we are done, else add more characters from the right to the left
3. Slight optimization would be to skip the number of characters we just added from comparison.
for e.g., BC ... CB, we just added BC, we can skip comparing the first 2 and last 2 characters.
Sample run:
s: "BACXB"
Is 's' a palindrome? NO
Add 'B' to the left
s: "BBACXB"
is s[1:-1] == "BACX" a palindrome? NO
Add 2 characters
s: "BXBACXB"
is s[2:-2] == "BAC" a palindrome? NO
Add 3 characters
s: "BXCBACXB"
is s[3:-3] == "BA" a palindrome? NO
Add 4 characters
s: "BXCABACXB"
is s[4:-4] == "B" a palindrome? YES
return 4
Sample run 2:
s: "AACECAAAA"
is 's' a palindrome? NO
Add 'A' to the left
s: 'AAACECAAAA'
is s[1:-1] == "AACECAAA" a palindrome? NO
Add 'AA' to the left
s: 'AAAACECAAAA'
is s[2:-2] == 'AACECAA' a palindrome? YES
Alternately,
Simulate adding 1 character to the left
=> s: 'A' + "AACECAAAA"
we check if s[0:-1] is a palindrome
is "AACECAAA" a palindrome? NO
Simulate adding 2 characters to the left
=> s: "AA" + "AACECAAAA"
we check if s[0:-2] is a palindrome
is "AACECAA" a palindrome? YES
return 2
'''
class Solution:
def min_chars_to_make_palindrome(self, A):
# check if A[lb:ub] is a palindrome
def is_palindrome(A, lb, ub):
while lb < ub:
if A[lb] != A[ub]:
return False
lb += 1
ub -= 1
return True
n = len(A)
# A is already a palindrome
# no additions needed
if not A or is_palindrome(A, 0, n-1):
return 0
j = 1
while j < n-1:
if is_palindrome(A, 0, n-j-1):
return j
j += 1
return j
if __name__ == '__main__':
s = Solution()
assert s.min_chars_to_make_palindrome("AB") == 1
assert s.min_chars_to_make_palindrome("racecar") == 0
assert s.min_chars_to_make_palindrome("BACXB") == 4
assert s.min_chars_to_make_palindrome("ABC") == 2
assert s.min_chars_to_make_palindrome("AACECAAAA") == 2
| [
"[email protected]"
] | |
aa6c975ee2f19bb1dc0094b1d58e5ff4d74ef6c8 | 86abbc013ab6209d11d58b735048a560ce059e72 | /tests/test_01_main/test_env_vars_2.py | ca3fab4037b7b777f6209d3922be51fa56d92bee | [
"MIT"
] | permissive | alexiri/meinheld-gunicorn-docker | e518e10f1845856f1c2c81181e825b5c8aebf99a | e9ff167987a967071b408c0ecb7790232f378ea7 | refs/heads/master | 2020-04-16T23:44:49.276125 | 2019-01-12T14:12:12 | 2019-01-12T14:12:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | import time
import docker
import pytest
from ..utils import (
CONTAINER_NAME,
get_config,
get_process_names,
stop_previous_container,
)
client = docker.from_env()
@pytest.mark.parametrize(
"image",
[
("tiangolo/meinheld-gunicorn:python3.6"),
("tiangolo/meinheld-gunicorn:python3.7"),
("tiangolo/meinheld-gunicorn:latest"),
("tiangolo/meinheld-gunicorn:python3.6-alpine3.8"),
("tiangolo/meinheld-gunicorn:python3.7-alpine3.8"),
],
)
def test_env_vars_2(image):
stop_previous_container(client)
container = client.containers.run(
image,
name=CONTAINER_NAME,
environment={"WEB_CONCURRENCY": 1, "HOST": "127.0.0.1"},
ports={"80": "8000"},
detach=True,
)
time.sleep(1)
process_names = get_process_names(container)
config_data = get_config(container)
assert config_data["workers"] == 1
assert len(process_names) == 2 # Manager + worker
assert config_data["host"] == "127.0.0.1"
assert config_data["port"] == "80"
assert config_data["loglevel"] == "info"
assert config_data["bind"] == "127.0.0.1:80"
container.stop()
container.remove()
| [
"[email protected]"
] | |
6af5aa34d0a7a362993c603080d43cfa065632d3 | 029f8af8b1f8cd1106761aac48b86c13bd1592f4 | /pysrc/cpu/cpu_6809.py | b161b4d6dbfdd8ef0c9260faee89e8e27fc65585 | [] | no_license | highno/computerarcheology | 093ead424ada446c85bcb1cbcfc8c17b572d1814 | a7b0df284e0c98c3c75ebeb6374ffb1bf87b7640 | refs/heads/master | 2020-12-09T22:31:53.025623 | 2020-02-15T10:16:54 | 2020-02-15T10:16:54 | 233,435,423 | 0 | 0 | null | 2020-01-12T18:03:36 | 2020-01-12T18:03:36 | null | UTF-8 | Python | false | false | 23,883 | py |
OPCODES = [
{"mnem": "LDY #w", "code": "108Ewmwl", "bus": ""},
{"mnem": "NEG p", "code": "00pp", "bus": "rw"},
{"mnem": "COM p", "code": "03pp", "bus": "rw"},
{"mnem": "LSR p", "code": "04pp", "bus": "rw"},
{"mnem": "ROR p", "code": "06pp", "bus": "rw"},
{"mnem": "ASR p", "code": "07pp", "bus": "rw"},
{"mnem": "LSL p", "code": "08pp", "bus": "rw"},
{"mnem": "ASL p", "code": "08pp", "bus": "rw"},
{"mnem": "ROL p", "code": "09pp", "bus": "rw"},
{"mnem": "DEC p", "code": "0App", "bus": "rw"},
{"mnem": "INC p", "code": "0Cpp", "bus": "rw"},
{"mnem": "TST p", "code": "0Dpp", "bus": "r"},
{"mnem": "JMP p", "code": "0Epp", "bus": "x"},
{"mnem": "CLR p", "code": "0Fpp", "bus": "w"},
{"mnem": "LBRN s", "code": "1021smsl", "bus": "x"},
{"mnem": "LBHI s", "code": "1022smsl", "bus": "x"},
{"mnem": "LBLS s", "code": "1023smsl", "bus": "x"},
{"mnem": "LBHS s", "code": "1024smsl", "bus": "x"},
{"mnem": "LBCC s", "code": "1024smsl", "bus": "x"},
{"mnem": "LBLO s", "code": "1025smsl", "bus": "x"},
{"mnem": "LBCS s", "code": "1025smsl", "bus": "x", "alias": "LBLO s"},
{"mnem": "LBNE s", "code": "1026smsl", "bus": "x"},
{"mnem": "LBEQ s", "code": "1027smsl", "bus": "x"},
{"mnem": "LBVC s", "code": "1028smsl", "bus": "x"},
{"mnem": "LBVS s", "code": "1029smsl", "bus": "x"},
{"mnem": "LBPL s", "code": "102Asmsl", "bus": "x"},
{"mnem": "LBMI s", "code": "102Bsmsl", "bus": "x"},
{"mnem": "LBGE s", "code": "102Csmsl", "bus": "x"},
{"mnem": "LBLT s", "code": "102Dsmsl", "bus": "x"},
{"mnem": "LBGT s", "code": "102Esmsl", "bus": "x"},
{"mnem": "LBLE s", "code": "102Fsmsl", "bus": "x"},
{"mnem": "SWI2", "code": "103F", "bus": ""},
{"mnem": "CMPD #w", "code": "1083wmwl", "bus": ""},
{"mnem": "CMPY #w", "code": "108Cwmwl", "bus": ""},
{"mnem": "CMPD p", "code": "1093pp", "bus": "r"},
{"mnem": "CMPY p", "code": "109Cpp", "bus": "r"},
{"mnem": "LDY p", "code": "109Epp", "bus": "r"},
{"mnem": "STY p", "code": "109Fpp", "bus": "w"},
{"mnem": "CMPD y", "code": "10A3yy", "bus": ""},
{"mnem": "CMPY y", "code": "10ACyy", "bus": ""},
{"mnem": "LDY y", "code": "10AEyy", "bus": ""},
{"mnem": "STY y", "code": "10AFyy", "bus": ""},
{"mnem": "CMPD t", "code": "10B3tmtl", "bus": "r"},
{"mnem": "CMPY t", "code": "10BCtmtl", "bus": "r"},
{"mnem": "LDY t", "code": "10BEtmtl", "bus": "r"},
{"mnem": "STY t", "code": "10BFtmtl", "bus": "w"},
{"mnem": "LDS #w", "code": "10CEwmwl", "bus": ""},
{"mnem": "LDS p", "code": "10DEpp", "bus": "r"},
{"mnem": "STS p", "code": "10DFpp", "bus": "w"},
{"mnem": "LDS y", "code": "10EEyy", "bus": ""},
{"mnem": "STS y", "code": "10EFyy", "bus": ""},
{"mnem": "LDS t", "code": "10FEtmtl", "bus": "r"},
{"mnem": "STS t", "code": "10FFtmtl", "bus": "w"},
{"mnem": "SWI3", "code": "113F", "bus": ""},
{"mnem": "CMPU #w", "code": "1183wmwl", "bus": ""},
{"mnem": "CMPS #w", "code": "118Cwmwl", "bus": ""},
{"mnem": "CMPU p", "code": "1193pp", "bus": "r"},
{"mnem": "CMPS p", "code": "119Cpp", "bus": "r"},
{"mnem": "CMPU y", "code": "11A3yy", "bus": ""},
{"mnem": "CMPS y", "code": "11ACyy", "bus": ""},
{"mnem": "CMPU t", "code": "11B3tmtl", "bus": "r"},
{"mnem": "CMPS t", "code": "11BCtmtl", "bus": "r"},
{"mnem": "NOP", "code": "12", "bus": ""},
{"mnem": "SYNC", "code": "13", "bus": ""},
{"mnem": "LBRA s", "code": "16smsl", "bus": "x"},
{"mnem": "LBSR s", "code": "17smsl", "bus": "x"},
{"mnem": "DAA", "code": "19", "bus": ""},
{"mnem": "ORCC #b", "code": "1Abb", "bus": ""},
{"mnem": "ANDCC #b", "code": "1Cbb", "bus": ""},
{"mnem": "SEX", "code": "1D", "bus": ""},
{"mnem": "EXG z", "code": "1Ezz", "bus": ""},
{"mnem": "TFR z", "code": "1Fzz", "bus": ""},
{"mnem": "BRA r", "code": "20rr", "bus": "x"},
{"mnem": "BRN r", "code": "21rr", "bus": "x"},
{"mnem": "BHI r", "code": "22rr", "bus": "x"},
{"mnem": "BLS r", "code": "23rr", "bus": "x"},
{"mnem": "BHS r", "code": "24rr", "bus": "x"},
{"mnem": "BCC r", "code": "24rr", "bus": "x"},
{"mnem": "BLO r", "code": "25rr", "bus": "x"},
{"mnem": "BCS r", "code": "25rr", "bus": "x"},
{"mnem": "BNE r", "code": "26rr", "bus": "x"},
{"mnem": "BEQ r", "code": "27rr", "bus": "x"},
{"mnem": "BVC r", "code": "28rr", "bus": "x"},
{"mnem": "BVS r", "code": "29rr", "bus": "x"},
{"mnem": "BPL r", "code": "2Arr", "bus": "x"},
{"mnem": "BMI r", "code": "2Brr", "bus": "x"},
{"mnem": "BGE r", "code": "2Crr", "bus": "x"},
{"mnem": "BLT r", "code": "2Drr", "bus": "x"},
{"mnem": "BGT r", "code": "2Err", "bus": "x"},
{"mnem": "BLE r", "code": "2Frr", "bus": "x"},
{"mnem": "LEAX y", "code": "30yy", "bus": ""},
{"mnem": "LEAY y", "code": "31yy", "bus": ""},
{"mnem": "LEAS y", "code": "32yy", "bus": ""},
{"mnem": "LEAU y", "code": "33yy", "bus": ""},
{"mnem": "PSHS x", "code": "34xx", "bus": ""},
{"mnem": "PULS q", "code": "35qq", "bus": ""},
{"mnem": "PSHU u", "code": "36uu", "bus": ""},
{"mnem": "PULU v", "code": "37vv", "bus": ""},
{"mnem": "RTS", "code": "39", "bus": ""},
{"mnem": "ABX", "code": "3A", "bus": ""},
{"mnem": "RTI", "code": "3B", "bus": ""},
{"mnem": "CWAI b", "code": "3Cbb", "bus": ""},
{"mnem": "MUL", "code": "3D", "bus": ""},
{"mnem": "RESET", "code": "3E", "bus": ""},
{"mnem": "SWI", "code": "3F", "bus": ""},
{"mnem": "NEGA", "code": "40", "bus": ""},
{"mnem": "COMA", "code": "43", "bus": ""},
{"mnem": "LSRA", "code": "44", "bus": ""},
{"mnem": "RORA", "code": "46", "bus": ""},
{"mnem": "ASRA", "code": "47", "bus": ""},
{"mnem": "ASLA", "code": "48", "bus": ""},
{"mnem": "LSLA", "code": "48", "bus": ""},
{"mnem": "ROLA", "code": "49", "bus": ""},
{"mnem": "DECA", "code": "4A", "bus": ""},
{"mnem": "INCA", "code": "4C", "bus": ""},
{"mnem": "TSTA", "code": "4D", "bus": ""},
{"mnem": "CLRA", "code": "4F", "bus": ""},
{"mnem": "NEGB", "code": "50", "bus": ""},
{"mnem": "COMB", "code": "53", "bus": ""},
{"mnem": "LSRB", "code": "54", "bus": ""},
{"mnem": "RORB", "code": "56", "bus": ""},
{"mnem": "ASRB", "code": "57", "bus": ""},
{"mnem": "ASLB", "code": "58", "bus": ""},
{"mnem": "LSLB", "code": "58", "bus": ""},
{"mnem": "ROLB", "code": "59", "bus": ""},
{"mnem": "DECB", "code": "5A", "bus": ""},
{"mnem": "INCB", "code": "5C", "bus": ""},
{"mnem": "TSTB", "code": "5D", "bus": ""},
{"mnem": "CLRB", "code": "5F", "bus": ""},
{"mnem": "NEG y", "code": "60yy", "bus": ""},
{"mnem": "COM y", "code": "63yy", "bus": ""},
{"mnem": "LSR y", "code": "64yy", "bus": ""},
{"mnem": "ROR y", "code": "66yy", "bus": ""},
{"mnem": "ASR y", "code": "67yy", "bus": ""},
{"mnem": "ASL y", "code": "68yy", "bus": ""},
{"mnem": "LSL y", "code": "68yy", "bus": ""},
{"mnem": "ROL y", "code": "69yy", "bus": ""},
{"mnem": "DEC y", "code": "6Ayy", "bus": ""},
{"mnem": "INC y", "code": "6Cyy", "bus": ""},
{"mnem": "TST y", "code": "6Dyy", "bus": ""},
{"mnem": "JMP y", "code": "6Eyy", "bus": ""},
{"mnem": "CLR y", "code": "6Fyy", "bus": ""},
{"mnem": "NEG t", "code": "70tmtl", "bus": "rw"},
{"mnem": "COM t", "code": "73tmtl", "bus": "rw"},
{"mnem": "LSR t", "code": "74tmtl", "bus": "rw"},
{"mnem": "ROR t", "code": "76tmtl", "bus": "rw"},
{"mnem": "ASR t", "code": "77tmtl", "bus": "rw"},
{"mnem": "ASL t", "code": "78tmtl", "bus": "rw"},
{"mnem": "LSL t", "code": "78tmtl", "bus": "rw"},
{"mnem": "ROL t", "code": "79tmtl", "bus": "rw"},
{"mnem": "DEC t", "code": "7Atmtl", "bus": "rw"},
{"mnem": "INC t", "code": "7Ctmtl", "bus": "rw"},
{"mnem": "TST t", "code": "7Dtmtl", "bus": "r"},
{"mnem": "JMP t", "code": "7Etmtl", "bus": "x"},
{"mnem": "CLR t", "code": "7Ftmtl", "bus": "w"},
{"mnem": "SUBA #b", "code": "80bb", "bus": ""},
{"mnem": "CMPA #b", "code": "81bb", "bus": ""},
{"mnem": "SBCA #b", "code": "82bb", "bus": ""},
{"mnem": "SUBD #w", "code": "83wmwl", "bus": ""},
{"mnem": "ANDA #b", "code": "84bb", "bus": ""},
{"mnem": "BITA #b", "code": "85bb", "bus": ""},
{"mnem": "LDA #b", "code": "86bb", "bus": ""},
{"mnem": "EORA #b", "code": "88bb", "bus": ""},
{"mnem": "ADCA #b", "code": "89bb", "bus": ""},
{"mnem": "ORA #b", "code": "8Abb", "bus": ""},
{"mnem": "ADDA #b", "code": "8Bbb", "bus": ""},
{"mnem": "CMPX #w", "code": "8Cwmwl", "bus": ""},
{"mnem": "BSR r", "code": "8Drr", "bus": "x"},
{"mnem": "LDX #w", "code": "8Ewmwl", "bus": ""},
{"mnem": "SUBA p", "code": "90pp", "bus": "r"},
{"mnem": "CMPA p", "code": "91pp", "bus": "r"},
{"mnem": "SBCA p", "code": "92pp", "bus": "r"},
{"mnem": "SUBD p", "code": "93pp", "bus": "r"},
{"mnem": "ANDA p", "code": "94pp", "bus": "r"},
{"mnem": "BITA p", "code": "95pp", "bus": "r"},
{"mnem": "LDA p", "code": "96pp", "bus": "r"},
{"mnem": "STA p", "code": "97pp", "bus": "w"},
{"mnem": "EORA p", "code": "98pp", "bus": "r"},
{"mnem": "ADCA p", "code": "99pp", "bus": "r"},
{"mnem": "ORA p", "code": "9App", "bus": "r"},
{"mnem": "ADDA p", "code": "9Bpp", "bus": "r"},
{"mnem": "CMPX p", "code": "9Cpp", "bus": "r"},
{"mnem": "JSR p", "code": "9Dpp", "bus": "x"},
{"mnem": "LDX p", "code": "9Epp", "bus": "r"},
{"mnem": "STX p", "code": "9Fpp", "bus": "w"},
{"mnem": "SUBA y", "code": "A0yy", "bus": ""},
{"mnem": "CMPA y", "code": "A1yy", "bus": ""},
{"mnem": "SBCA y", "code": "A2yy", "bus": ""},
{"mnem": "SUBD y", "code": "A3yy", "bus": ""},
{"mnem": "ANDA y", "code": "A4yy", "bus": ""},
{"mnem": "BITA y", "code": "A5yy", "bus": ""},
{"mnem": "LDA y", "code": "A6yy", "bus": ""},
{"mnem": "STA y", "code": "A7yy", "bus": ""},
{"mnem": "EORA y", "code": "A8yy", "bus": ""},
{"mnem": "ADCA y", "code": "A9yy", "bus": ""},
{"mnem": "ORA y", "code": "AAyy", "bus": ""},
{"mnem": "ADDA y", "code": "AByy", "bus": ""},
{"mnem": "CMPX y", "code": "ACyy", "bus": ""},
{"mnem": "JSR y", "code": "ADyy", "bus": ""},
{"mnem": "LDX y", "code": "AEyy", "bus": ""},
{"mnem": "STX y", "code": "AFyy", "bus": ""},
{"mnem": "SUBA t", "code": "B0tmtl", "bus": "r"},
{"mnem": "CMPA t", "code": "B1tmtl", "bus": "r"},
{"mnem": "SBCA t", "code": "B2tmtl", "bus": "r"},
{"mnem": "SUBD t", "code": "B3tmtl", "bus": "r"},
{"mnem": "ANDA t", "code": "B4tmtl", "bus": "r"},
{"mnem": "BITA t", "code": "B5tmtl", "bus": "r"},
{"mnem": "LDA t", "code": "B6tmtl", "bus": "r"},
{"mnem": "STA t", "code": "B7tmtl", "bus": "w"},
{"mnem": "EORA t", "code": "B8tmtl", "bus": "r"},
{"mnem": "ADCA t", "code": "B9tmtl", "bus": "r"},
{"mnem": "ORA t", "code": "BAtmtl", "bus": "r"},
{"mnem": "ADDA t", "code": "BBtmtl", "bus": "r"},
{"mnem": "CMPX t", "code": "BCtmtl", "bus": "r"},
{"mnem": "JSR t", "code": "BDtmtl", "bus": "x"},
{"mnem": "LDX t", "code": "BEtmtl", "bus": "r"},
{"mnem": "STX t", "code": "BFtmtl", "bus": "w"},
{"mnem": "SUBB #b", "code": "C0bb", "bus": ""},
{"mnem": "CMPB #b", "code": "C1bb", "bus": ""},
{"mnem": "SBCB #b", "code": "C2bb", "bus": ""},
{"mnem": "ADDD #w", "code": "C3wmwl", "bus": ""},
{"mnem": "ANDB #b", "code": "C4bb", "bus": ""},
{"mnem": "BITB #b", "code": "C5bb", "bus": ""},
{"mnem": "LDB #b", "code": "C6bb", "bus": ""},
{"mnem": "EORB #b", "code": "C8bb", "bus": ""},
{"mnem": "ADCB #b", "code": "C9bb", "bus": ""},
{"mnem": "ORB #b", "code": "CAbb", "bus": ""},
{"mnem": "ADDB #b", "code": "CBbb", "bus": ""},
{"mnem": "LDD #w", "code": "CCwmwl", "bus": ""},
{"mnem": "LDU #w", "code": "CEwmwl", "bus": ""},
{"mnem": "SUBB p", "code": "D0pp", "bus": "r"},
{"mnem": "CMPB p", "code": "D1pp", "bus": "r"},
{"mnem": "SBCB p", "code": "D2pp", "bus": "r"},
{"mnem": "ADDD p", "code": "D3pp", "bus": "r"},
{"mnem": "ANDB p", "code": "D4pp", "bus": "r"},
{"mnem": "BITB p", "code": "D5pp", "bus": "r"},
{"mnem": "LDB p", "code": "D6pp", "bus": "r"},
{"mnem": "STB p", "code": "D7pp", "bus": "w"},
{"mnem": "EORB p", "code": "D8pp", "bus": ""},
{"mnem": "ADCB p", "code": "D9pp", "bus": "r"},
{"mnem": "ORB p", "code": "DApp", "bus": "r"},
{"mnem": "ADDB p", "code": "DBpp", "bus": "r"},
{"mnem": "LDD p", "code": "DCpp", "bus": "r"},
{"mnem": "STD p", "code": "DDpp", "bus": "w"},
{"mnem": "LDU p", "code": "DEpp", "bus": "r"},
{"mnem": "STU p", "code": "DFpp", "bus": "w"},
{"mnem": "SUBB y", "code": "E0yy", "bus": ""},
{"mnem": "CMPB y", "code": "E1yy", "bus": ""},
{"mnem": "SBCB y", "code": "E2yy", "bus": ""},
{"mnem": "ADDD y", "code": "E3yy", "bus": ""},
{"mnem": "ANDB y", "code": "E4yy", "bus": ""},
{"mnem": "BITB y", "code": "E5yy", "bus": ""},
{"mnem": "LDB y", "code": "E6yy", "bus": ""},
{"mnem": "STB y", "code": "E7yy", "bus": ""},
{"mnem": "EORB y", "code": "E8yy", "bus": ""},
{"mnem": "ADCB y", "code": "E9yy", "bus": ""},
{"mnem": "ORB y", "code": "EAyy", "bus": ""},
{"mnem": "ADDB y", "code": "EByy", "bus": ""},
{"mnem": "LDD y", "code": "ECyy", "bus": ""},
{"mnem": "STD y", "code": "EDyy", "bus": ""},
{"mnem": "LDU y", "code": "EEyy", "bus": ""},
{"mnem": "STU y", "code": "EFyy", "bus": ""},
{"mnem": "SUBB t", "code": "F0tmtl", "bus": "r"},
{"mnem": "CMPB t", "code": "F1tmtl", "bus": "r"},
{"mnem": "SBCB t", "code": "F2tmtl", "bus": "r"},
{"mnem": "ADDD t", "code": "F3tmtl", "bus": "r"},
{"mnem": "ANDB t", "code": "F4tmtl", "bus": "r"},
{"mnem": "BITB t", "code": "F5tmtl", "bus": "r"},
{"mnem": "LDB t", "code": "F6tmtl", "bus": "r"},
{"mnem": "STB t", "code": "F7tmtl", "bus": "w"},
{"mnem": "EORB t", "code": "F8tmtl", "bus": "r"},
{"mnem": "ADCB t", "code": "F9tmtl", "bus": "r"},
{"mnem": "ORB t", "code": "FAtmtl", "bus": "r"},
{"mnem": "ADDB t", "code": "FBtmtl", "bus": "r"},
{"mnem": "LDD t", "code": "FCtmtl", "bus": "r"},
{"mnem": "STD t", "code": "FDtmtl", "bus": "w"},
{"mnem": "LDU t", "code": "FEtmtl", "bus": "r"},
{"mnem": "STU t", "code": "FFtmtl", "bus": "w"},
]
POSTS = [
{"post": "0,X", "code": "00"},
{"post": "1,X", "code": "01"},
{"post": "2,X", "code": "02"},
{"post": "3,X", "code": "03"},
{"post": "4,X", "code": "04"},
{"post": "5,X", "code": "05"},
{"post": "6,X", "code": "06"},
{"post": "7,X", "code": "07"},
{"post": "8,X", "code": "08"},
{"post": "9,X", "code": "09"},
{"post": "10,X", "code": "0A"},
{"post": "11,X", "code": "0B"},
{"post": "12,X", "code": "0C"},
{"post": "13,X", "code": "0D"},
{"post": "14,X", "code": "0E"},
{"post": "15,X", "code": "0F"},
{"post": "-16,X", "code": "10"},
{"post": "-15,X", "code": "11"},
{"post": "-14,X", "code": "12"},
{"post": "-13,X", "code": "13"},
{"post": "-12,X", "code": "14"},
{"post": "-11,X", "code": "15"},
{"post": "-10,X", "code": "16"},
{"post": "-9,X", "code": "17"},
{"post": "-8,X", "code": "18"},
{"post": "-7,X", "code": "19"},
{"post": "-6,X", "code": "1A"},
{"post": "-5,X", "code": "1B"},
{"post": "-4,X", "code": "1C"},
{"post": "-3,X", "code": "1D"},
{"post": "-2,X", "code": "1E"},
{"post": "-1,X", "code": "1F"},
{"post": "0,Y", "code": "20"},
{"post": "1,Y", "code": "21"},
{"post": "2,Y", "code": "22"},
{"post": "3,Y", "code": "23"},
{"post": "4,Y", "code": "24"},
{"post": "5,Y", "code": "25"},
{"post": "6,Y", "code": "26"},
{"post": "7,Y", "code": "27"},
{"post": "8,Y", "code": "28"},
{"post": "9,Y", "code": "29"},
{"post": "10,Y", "code": "2A"},
{"post": "11,Y", "code": "2B"},
{"post": "12,Y", "code": "2C"},
{"post": "13,Y", "code": "2D"},
{"post": "14,Y", "code": "2E"},
{"post": "15,Y", "code": "2F"},
{"post": "-16,Y", "code": "30"},
{"post": "-15,Y", "code": "31"},
{"post": "-14,Y", "code": "32"},
{"post": "-13,Y", "code": "33"},
{"post": "-12,Y", "code": "34"},
{"post": "-11,Y", "code": "35"},
{"post": "-10,Y", "code": "36"},
{"post": "-9,Y", "code": "37"},
{"post": "-8,Y", "code": "38"},
{"post": "-7,Y", "code": "39"},
{"post": "-6,Y", "code": "3A"},
{"post": "-5,Y", "code": "3B"},
{"post": "-4,Y", "code": "3C"},
{"post": "-3,Y", "code": "3D"},
{"post": "-2,Y", "code": "3E"},
{"post": "-1,Y", "code": "3F"},
{"post": "0,U", "code": "40"},
{"post": "1,U", "code": "41"},
{"post": "2,U", "code": "42"},
{"post": "3,U", "code": "43"},
{"post": "4,U", "code": "44"},
{"post": "5,U", "code": "45"},
{"post": "6,U", "code": "46"},
{"post": "7,U", "code": "47"},
{"post": "8,U", "code": "48"},
{"post": "9,U", "code": "49"},
{"post": "10,U", "code": "4A"},
{"post": "11,U", "code": "4B"},
{"post": "12,U", "code": "4C"},
{"post": "13,U", "code": "4D"},
{"post": "14,U", "code": "4E"},
{"post": "15,U", "code": "4F"},
{"post": "-16,U", "code": "50"},
{"post": "-15,U", "code": "51"},
{"post": "-14,U", "code": "52"},
{"post": "-13,U", "code": "53"},
{"post": "-12,U", "code": "54"},
{"post": "-11,U", "code": "55"},
{"post": "-10,U", "code": "56"},
{"post": "-9,U", "code": "57"},
{"post": "-8,U", "code": "58"},
{"post": "-7,U", "code": "59"},
{"post": "-6,U", "code": "5A"},
{"post": "-5,U", "code": "5B"},
{"post": "-4,U", "code": "5C"},
{"post": "-3,U", "code": "5D"},
{"post": "-2,U", "code": "5E"},
{"post": "-1,U", "code": "5F"},
{"post": "0,S", "code": "60"},
{"post": "1,S", "code": "61"},
{"post": "2,S", "code": "62"},
{"post": "3,S", "code": "63"},
{"post": "4,S", "code": "64"},
{"post": "5,S", "code": "65"},
{"post": "6,S", "code": "66"},
{"post": "7,S", "code": "67"},
{"post": "8,S", "code": "68"},
{"post": "9,S", "code": "69"},
{"post": "10,S", "code": "6A"},
{"post": "11,S", "code": "6B"},
{"post": "12,S", "code": "6C"},
{"post": "13,S", "code": "6D"},
{"post": "14,S", "code": "6E"},
{"post": "15,S", "code": "6F"},
{"post": "-16,S", "code": "70"},
{"post": "-15,S", "code": "71"},
{"post": "-14,S", "code": "72"},
{"post": "-13,S", "code": "73"},
{"post": "-12,S", "code": "74"},
{"post": "-11,S", "code": "75"},
{"post": "-10,S", "code": "76"},
{"post": "-9,S", "code": "77"},
{"post": "-8,S", "code": "78"},
{"post": "-7,S", "code": "79"},
{"post": "-6,S", "code": "7A"},
{"post": "-5,S", "code": "7B"},
{"post": "-4,S", "code": "7C"},
{"post": "-3,S", "code": "7D"},
{"post": "-2,S", "code": "7E"},
{"post": "-1,S", "code": "7F"},
{"post": ",X+", "code": "80"},
{"post": ",X++", "code": "81"},
{"post": ",-X", "code": "82"},
{"post": ",--X", "code": "83"},
{"post": ",X", "code": "84"},
{"post": "B,X", "code": "85"},
{"post": "A,X", "code": "86"},
{"post": "i,X", "code": "88ii"},
{"post": "k,X", "code": "89kmkl"},
{"post": "D,X", "code": "8B"},
{"post": "i,PC", "code": "8Cii"},
{"post": "k,PC", "code": "8Dkmkl"},
{"post": "[,X++]", "code": "91"},
{"post": "[,--X]", "code": "93"},
{"post": "[,X]", "code": "94"},
{"post": "[B,X]", "code": "95"},
{"post": "[A,X]", "code": "96"},
{"post": "[i,X]", "code": "98ii"},
{"post": "[k,X]", "code": "99kmkl"},
{"post": "[D,X]", "code": "9B"},
{"post": "[i,PC]", "code": "9Cii"},
{"post": "[k,PC]", "code": "9Dkmkl"},
{"post": "[t]", "code": "9Ftmtl"},
{"post": ",Y+", "code": "A0"},
{"post": ",Y++", "code": "A1"},
{"post": ",-Y", "code": "A2"},
{"post": ",--Y", "code": "A3"},
{"post": ",Y", "code": "A4"},
{"post": "B,Y", "code": "A5"},
{"post": "A,Y", "code": "A6"},
{"post": "i,Y", "code": "A8ii"},
{"post": "k,Y", "code": "A9kmkl"},
{"post": "D,Y", "code": "AB"},
{"post": "i,PC", "code": "ACii"},
{"post": "k,PC", "code": "ADkmkl"},
{"post": "[,Y++]", "code": "B1"},
{"post": "[,--Y]", "code": "B3"},
{"post": "[,Y]", "code": "B4"},
{"post": "[B,Y]", "code": "B5"},
{"post": "[A,Y]", "code": "B6"},
{"post": "[i,Y]", "code": "B8ii"},
{"post": "[k,Y]", "code": "B9kmkl"},
{"post": "[D,Y]", "code": "BB"},
{"post": "[i,PC]", "code": "BCii"},
{"post": "[k,PC]", "code": "BDkmkl"},
{"post": "[t]", "code": "BFtmtl"},
{"post": ",U+", "code": "C0"},
{"post": ",U++", "code": "C1"},
{"post": ",-U", "code": "C2"},
{"post": ",--U", "code": "C3"},
{"post": ",U", "code": "C4"},
{"post": "B,U", "code": "C5"},
{"post": "A,U", "code": "C6"},
{"post": "i,U", "code": "C8ii"},
{"post": "k,U", "code": "C9kmkl"},
{"post": "D,U", "code": "CB"},
{"post": "i,PC", "code": "CCii"},
{"post": "k,PC", "code": "CDkmkl"},
{"post": "[,U++]", "code": "D1"},
{"post": "[,--U]", "code": "D3"},
{"post": "[,U]", "code": "D4"},
{"post": "[B,U]", "code": "D5"},
{"post": "[A,U]", "code": "D6"},
{"post": "[i,U]", "code": "D8ii"},
{"post": "[k,U]", "code": "D9kmkl"},
{"post": "[D,U]", "code": "DB"},
{"post": "[i,PC]", "code": "DCii"},
{"post": "[k,PC]", "code": "DDkmkl"},
{"post": "[t]", "code": "DFtmtl"},
{"post": ",S+", "code": "E0"},
{"post": ",S++", "code": "E1"},
{"post": ",-S", "code": "E2"},
{"post": ",--S", "code": "E3"},
{"post": ",S", "code": "E4"},
{"post": "B,S", "code": "E5"},
{"post": "A,S", "code": "E6"},
{"post": "i,S", "code": "E8ii"},
{"post": "k,S", "code": "E9kmkl"},
{"post": "i,X", "code": "EAii"},
{"post": "D,S", "code": "EB"},
{"post": "i,PC", "code": "ECii"},
{"post": "k,PC", "code": "EDkmkl"},
{"post": "[,S++]", "code": "F1"},
{"post": "[,--S]", "code": "F3"},
{"post": "[,S]", "code": "F4"},
{"post": "[B,S]", "code": "F5"},
{"post": "[A,S]", "code": "F6"},
{"post": "[i,S]", "code": "F8ii"},
{"post": "[k,S]", "code": "F9kmkl"},
{"post": "[D,S]", "code": "FB"},
{"post": "[i,PC]", "code": "FCii"},
{"post": "[k,PC]", "code": "FDkmkl"},
{"post": "[t]", "code": "FFtmtl"}
]
import cpu.cpu_common
# TODO: LDA $FF00,PC ; PC relative
class CPU_6809(cpu.cpu_common.CPU):
def __init__(self):
self._opcodes = []
# Expand the "post" mnemonics
for entry in OPCODES:
if 'y' in entry['mnem']:
for post in POSTS:
new_mnem = entry['mnem'].replace('y', post['post'])
new_code = entry['code'].replace('yy', post['code'])
new_entry = {'mnem': new_mnem,
# 'code': new_code, 'bus': entry['bus']}
'code': new_code, 'bus': ''}
self._opcodes.append(new_entry)
else:
self._opcodes.append(entry)
self._make_data_map()
self._make_frags()
SINGLETON = CPU_6809()
def get_cpu():
return SINGLETON
| [
"[email protected]"
] | |
33642132bf671fca1fb601bf2a308944456d0679 | 17f6881c70401dc63757cc7b5fa4d9dd396689e3 | /src/main/com/libin/yfl/2.py | ab40b869edeb44fd720e43b62d3e0074571c639f | [] | no_license | BigDataRoad/Algorithm | 0ab493eeb478125b4beb62d78ce18c73e30b0496 | 2f2fb4f4b84f6c9df8adbada63b327c43ce29ddd | refs/heads/master | 2023-07-02T04:06:51.025648 | 2021-07-28T14:04:55 | 2021-07-28T14:04:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | ''''
面试题50. 第一个只出现一次的字符
在字符串 s 中找出第一个只出现一次的字符。如果没有,返回一个单空格。
示例:
s = "abaccdeff"
返回 "b"
s = ""
返回 " "
'''
class Solution:
def firstUniqChar(self, s: str) -> str:
d1 = {}
for each in s:
if each not in d1:
d1[each] = 1
else:
d1[each] += 1
for key in s:
if d1[key] == 1:
return key
return ' ' | [
"[email protected]"
] | |
1b41e87af402abd0b44ebbe92b9d6c0550a0335c | aa0270b351402e421631ebc8b51e528448302fab | /sdk/servicenetworking/azure-mgmt-servicenetworking/generated_samples/frontend_patch.py | cb930d459dcae096783768ee9d0d2b0061cf2ee1 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 1,803 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.servicenetworking import ServiceNetworkingMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-servicenetworking
# USAGE
python frontend_patch.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ServiceNetworkingMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.frontends_interface.update(
resource_group_name="rg1",
traffic_controller_name="TC1",
frontend_name="publicIp1",
properties={
"properties": {
"ipAddressVersion": "IPv4",
"mode": "public",
"publicIPAddress": {"id": "resourceUriAsString"},
}
},
)
print(response)
# x-ms-original-file: specification/servicenetworking/resource-manager/Microsoft.ServiceNetworking/cadl/examples/FrontendPatch.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
2f10dba5436ddd58b598a3bbcab70bc2d65b0d95 | 0667af1539008f9c6c0dcde2d3f50e8bbccf97f3 | /source/rttov_test/profile-datasets-py/div52_zen30deg/021.py | 1f7d863a8a771f5d34136eb75b9ce1e209bff41d | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bucricket/projectMAScorrection | bc6b90f07c34bf3e922225b2c7bd680955f901ed | 89489026c8e247ec7c364e537798e766331fe569 | refs/heads/master | 2021-01-22T03:54:21.557485 | 2019-03-10T01:47:32 | 2019-03-10T01:47:32 | 81,468,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,029 | py | """
Profile ../profile-datasets-py/div52_zen30deg/021.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/div52_zen30deg/021.py"
self["Q"] = numpy.array([ 1.60776800e+00, 4.96291500e+00, 5.49166000e+00,
8.63497500e+00, 8.20432100e+00, 4.71451600e+00,
7.53042900e+00, 6.45062500e+00, 7.32791500e+00,
6.09129000e+00, 6.53328000e+00, 6.09892700e+00,
5.41468000e+00, 4.82088500e+00, 5.19650700e+00,
4.78787800e+00, 4.61391800e+00, 4.51615000e+00,
4.18020800e+00, 4.24947000e+00, 4.35707800e+00,
4.34019600e+00, 4.34783300e+00, 4.57123200e+00,
4.63782500e+00, 4.61316200e+00, 4.88699600e+00,
5.15021900e+00, 4.82279800e+00, 4.54351400e+00,
4.69757000e+00, 4.84664100e+00, 4.87064500e+00,
4.88923100e+00, 4.79638300e+00, 4.65747200e+00,
4.44730500e+00, 4.08568700e+00, 3.73390800e+00,
3.62038400e+00, 3.53332400e+00, 3.80230300e+00,
4.46347900e+00, 5.08851300e+00, 4.74686400e+00,
4.41323700e+00, 5.46255900e+00, 7.50013800e+00,
9.53781100e+00, 1.18247000e+01, 1.40622900e+01,
3.19749100e+01, 5.60073600e+01, 6.98680800e+01,
6.08089100e+01, 5.19302300e+01, 1.47392500e+02,
2.50739900e+02, 3.63506700e+02, 4.83033400e+02,
5.14670300e+02, 2.68360800e+02, 2.64730900e+01,
6.18418600e+02, 1.25616400e+03, 1.28303900e+03,
1.01003400e+03, 1.37296000e+03, 2.57034800e+03,
3.35517100e+03, 2.79701700e+03, 2.41467200e+03,
4.24567400e+03, 6.04482200e+03, 6.71999900e+03,
7.34445100e+03, 7.30587000e+03, 7.20975600e+03,
8.38216200e+03, 9.60669300e+03, 1.02437100e+04,
1.10494100e+04, 1.25787000e+04, 1.40774200e+04,
1.55448200e+04, 1.77460000e+04, 1.98104900e+04,
2.08061500e+04, 2.21008500e+04, 2.35950100e+04,
2.52477200e+04, 2.68805500e+04, 2.84897900e+04,
2.99650500e+04, 3.16194800e+04, 3.32942300e+04,
3.52573600e+04, 3.47091200e+04, 3.37659700e+04,
3.28604100e+04, 3.19904700e+04])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56504000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61259000e+01, 6.09895000e+01, 6.61252000e+01,
7.15398000e+01, 7.72395000e+01, 8.32310000e+01,
8.95203000e+01, 9.61138000e+01, 1.03017000e+02,
1.10237000e+02, 1.17777000e+02, 1.25646000e+02,
1.33846000e+02, 1.42385000e+02, 1.51266000e+02,
1.60496000e+02, 1.70078000e+02, 1.80018000e+02,
1.90320000e+02, 2.00989000e+02, 2.12028000e+02,
2.23441000e+02, 2.35234000e+02, 2.47408000e+02,
2.59969000e+02, 2.72919000e+02, 2.86262000e+02,
3.00000000e+02, 3.14137000e+02, 3.28675000e+02,
3.43618000e+02, 3.58966000e+02, 3.74724000e+02,
3.90892000e+02, 4.07474000e+02, 4.24470000e+02,
4.41882000e+02, 4.59712000e+02, 4.77961000e+02,
4.96630000e+02, 5.15720000e+02, 5.35232000e+02,
5.55167000e+02, 5.75525000e+02, 5.96306000e+02,
6.17511000e+02, 6.39140000e+02, 6.61192000e+02,
6.83667000e+02, 7.06565000e+02, 7.29886000e+02,
7.53627000e+02, 7.77789000e+02, 8.02371000e+02,
8.27371000e+02, 8.52788000e+02, 8.78620000e+02,
9.04866000e+02, 9.31523000e+02, 9.58591000e+02,
9.86066000e+02, 1.01395000e+03, 1.04223000e+03,
1.07092000e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 386.0254, 386.0241, 386.0239, 386.0227, 386.0228, 386.0242,
386.0231, 386.0235, 386.0232, 386.0236, 386.0235, 386.0236,
386.0239, 386.0241, 386.024 , 386.0242, 386.0242, 386.0243,
386.0244, 386.0244, 386.0243, 386.0243, 386.0243, 386.0242,
386.0242, 386.0242, 386.0241, 386.024 , 386.0241, 386.0242,
386.0242, 386.6141, 387.2441, 387.9151, 388.6291, 389.3872,
390.1903, 391.0404, 391.9375, 392.8836, 393.8796, 394.9255,
396.0242, 396.024 , 396.0241, 396.0243, 396.0238, 396.023 ,
396.0222, 396.0213, 396.0204, 396.0133, 396.0038, 395.9983,
396.0019, 396.0054, 395.9676, 395.9267, 395.882 , 395.8347,
395.8222, 395.9197, 396.0155, 395.7811, 395.5285, 395.5179,
395.626 , 395.4823, 395.0081, 394.6973, 394.9183, 395.0697,
394.3446, 393.6321, 393.3647, 393.1174, 393.1327, 393.1707,
392.7064, 392.2215, 391.9692, 391.6501, 391.0445, 390.451 ,
389.8698, 388.9981, 388.1805, 387.7862, 387.2735, 386.6818,
386.0272, 385.3806, 384.7433, 384.1591, 383.5039, 382.8406,
382.0632, 382.2803, 382.6538, 383.0124, 383.3569])
self["T"] = numpy.array([ 185.568, 200.774, 209.281, 220.997, 238.837, 241.329,
243.082, 244.041, 249.099, 257.023, 264.999, 269.199,
266.263, 255.695, 246.758, 244.542, 243.847, 242.548,
240.832, 238.854, 235.756, 231.388, 227.103, 224.277,
222.528, 221.423, 220.26 , 219.084, 217.169, 215.25 ,
212.593, 210.021, 208.788, 207.641, 205.887, 203.904,
202.142, 200.775, 199.445, 198.066, 196.714, 195.349,
193.965, 192.678, 194.447, 196.175, 198.372, 200.895,
203.445, 206.477, 209.443, 212.631, 215.871, 218.533,
219.948, 221.334, 223.879, 226.484, 229.27 , 232.182,
235.107, 238.191, 241.22 , 243.517, 245.728, 248.003,
250.29 , 252.426, 254.374, 256.372, 258.615, 260.771,
262.211, 263.629, 265.147, 266.645, 268.476, 270.309,
271.089, 271.804, 273.153, 274.669, 276.938, 279.576,
282.672, 284.346, 285.623, 286.864, 288.279, 289.789,
291.353, 292.845, 294.271, 295.579, 296.947, 298.289,
299.747, 301.566, 301.566, 301.566, 301.566])
self["O3"] = numpy.array([ 0.1992563 , 0.2438973 , 0.3335824 , 0.4884188 , 0.8082281 ,
1.163902 , 1.628981 , 2.257051 , 2.621339 , 3.010675 ,
3.474818 , 4.102113 , 5.015732 , 6.206915 , 7.104918 ,
7.644655 , 8.104313 , 8.500848 , 8.867273 , 9.176608 ,
9.21716 , 8.957429 , 8.510083 , 7.774582 , 6.934988 ,
6.047845 , 4.998211 , 3.977377 , 3.205491 , 2.470918 ,
1.88565 , 1.319332 , 1.029309 , 0.7582654 , 0.585946 ,
0.4585025 , 0.3595297 , 0.3152562 , 0.2721848 , 0.2434861 ,
0.2168923 , 0.1859143 , 0.1500032 , 0.1155953 , 0.1118159 ,
0.1081247 , 0.1056317 , 0.1040149 , 0.1020589 , 0.09772901,
0.09349205, 0.08800219, 0.08207214, 0.0780318 , 0.07820689,
0.07837836, 0.07664768, 0.07477256, 0.07337247, 0.07234075,
0.07136318, 0.07051566, 0.06968369, 0.06908942, 0.0685216 ,
0.06816598, 0.06791696, 0.06764521, 0.0673414 , 0.067031 ,
0.06668402, 0.06631829, 0.06563474, 0.06496493, 0.0642381 ,
0.06352082, 0.06267206, 0.06182321, 0.06075209, 0.0596827 ,
0.05857365, 0.05750458, 0.05654487, 0.05569769, 0.05498312,
0.0543608 , 0.05380138, 0.05335462, 0.05303091, 0.05272092,
0.05236987, 0.05199499, 0.05163657, 0.05130774, 0.05096738,
0.05058044, 0.04975676, 0.04751281, 0.04755923, 0.04760381,
0.04764663])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 0
self["ISH"] = 0
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 301.566
self["S2M"]["Q"] = 34933.9529133
self["S2M"]["O"] = 0.0475017434262
self["S2M"]["P"] = 1007.43
self["S2M"]["U"] = 0.818985
self["S2M"]["V"] = 0.10112
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 1
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 302.557
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.0
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 30.0
self["AZANGLE"] = 0.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = 5.04572
self["GAS_UNITS"] = 2
self["BE"] = 0.0
self["COSBK"] = 0.0
self["DATE"] = numpy.array([1993, 11, 15])
self["TIME"] = numpy.array([18, 0, 0])
| [
"[email protected]"
] | |
df76809e8cc0c86466eac41b4177052d87b4c49f | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/util/bin/format/pef/RelocLgByImport.pyi | 27b5bea67c6a66a0caa4003b216de6e66cc6fd77 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,439 | pyi | import ghidra.app.util.bin.format.pef
import ghidra.app.util.importer
import ghidra.program.model.data
import ghidra.program.model.listing
import ghidra.util.task
import java.lang
class RelocLgByImport(ghidra.app.util.bin.format.pef.Relocation):
"""
See Apple's -- PEFBinaryFormat.h
"""
def apply(self, importState: ghidra.app.util.bin.format.pef.ImportStateCache, relocState: ghidra.app.util.bin.format.pef.RelocationState, header: ghidra.app.util.bin.format.pef.ContainerHeader, program: ghidra.program.model.listing.Program, log: ghidra.app.util.importer.MessageLog, monitor: ghidra.util.task.TaskMonitor) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getIndex(self) -> int: ...
def getOpcode(self) -> int: ...
def getSizeInBytes(self) -> int: ...
def hashCode(self) -> int: ...
def isMatch(self) -> bool: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toDataType(self) -> ghidra.program.model.data.DataType: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def index(self) -> int: ...
@property
def match(self) -> bool: ...
@property
def sizeInBytes(self) -> int: ...
| [
"[email protected]"
] | |
ccbde9d6da768d83b804f523d581a547d9ba4769 | 81407be1385564308db7193634a2bb050b4f822e | /the-python-standard-library-by-example/math/math_inverse_trig.py | 26b2a3fc86ae12776e4c1f17fcb31bef477a193f | [
"MIT"
] | permissive | gottaegbert/penter | 6db4f7d82c143af1209b4259ba32145aba7d6bd3 | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | refs/heads/master | 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 | MIT | 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null | UTF-8 | Python | false | false | 379 | py | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Inverse trigonometric functions
"""
#end_pymotw_header
import math
for r in [ 0, 0.5, 1 ]:
print 'arcsine(%.1f) = %5.2f' % (r, math.asin(r))
print 'arccosine(%.1f) = %5.2f' % (r, math.acos(r))
print 'arctangent(%.1f) = %5.2f' % (r, math.atan(r))
print
| [
"[email protected]"
] | |
356a0bc40cd0d90f4452998c8f8c8ff403d7b25b | b1742abd82bd3c27b511d5ba855b9b1e87e6854a | /lab_03_magic_8_ball.py | b8ad94d1f6d1263e2666335d8b65c5b1fb29e783 | [] | no_license | tomaccosheep/simple_django_render | 8702e13e96462e495287b2f5624dcd2d142d72b4 | 6dd2a5bab32e352cc6ff9b7b542ba2d126f01728 | refs/heads/master | 2023-05-03T05:36:47.601119 | 2020-02-28T21:44:26 | 2020-02-28T21:44:26 | 243,859,346 | 0 | 0 | null | 2023-04-21T20:51:57 | 2020-02-28T21:41:12 | Python | UTF-8 | Python | false | false | 362 | py | '''DJANGO STUFF'''
import os
from django.shortcuts import render
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
'''MAGIC 8 BALL LAB'''
import random
input("What's your question?\n:")
with open('lab_03_out.html', 'w') as f:
f.write(render(None, 'lab_03_in.html', {'answer': random.choice(['It will happen', "It won't happen"])}).content.decode('utf-8'))
| [
"[email protected]"
] | |
c9bfa9d5d595624d4165a76659b67985675a32e9 | 1524720d6480ad0a51b6fd8ff709587455bf4c5d | /tums/trunk/etch-release/reportlab/platypus/tables.py | a49c9859e686ec868da21d48cbc20c0ba62f7411 | [] | no_license | calston/tums | 2bd6d3cac5232d2ccb7e9becfc649e302a310eab | b93e3e957ff1da5b020075574942913c8822d12a | refs/heads/master | 2020-07-12T03:46:43.639800 | 2018-05-12T10:54:54 | 2018-05-12T10:54:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62,563 | py | #Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/tables.py
__version__=''' $Id: tables.py 3269 2008-09-03 17:22:41Z rgbecker $ '''
__doc__="""
Tables are created by passing the constructor a tuple of column widths, a tuple of row heights and the data in
row order. Drawing of the table can be controlled by using a TableStyle instance. This allows control of the
color and weight of the lines (if any), and the font, alignment and padding of the text.
None values in the sequence of row heights or column widths, mean that the corresponding rows
or columns should be automatically sized.
All the cell values should be convertible to strings; embedded newline '\\n' characters
cause the value to wrap (ie are like a traditional linefeed).
See the test output from running this module as a script for a discussion of the method for constructing
tables and table styles.
"""
from reportlab.platypus.flowables import Flowable, Preformatted
from reportlab import rl_config
from reportlab.lib.styles import PropertySet, ParagraphStyle
from reportlab.lib import colors
from reportlab.lib.utils import fp_str
from reportlab.pdfbase.pdfmetrics import stringWidth
import operator, string
from types import TupleType, ListType, StringType, FloatType, IntType
class CellStyle(PropertySet):
defaults = {
'fontname':'Times-Roman',
'fontsize':10,
'leading':12,
'leftPadding':6,
'rightPadding':6,
'topPadding':3,
'bottomPadding':3,
'firstLineIndent':0,
'color':colors.black,
'alignment': 'LEFT',
'background': (1,1,1),
'valign': 'BOTTOM',
'href': None,
'destination':None,
}
LINECAPS={None: None, 'butt':0,'round':1,'projecting':2,'squared':2}
LINEJOINS={None: None, 'miter':0, 'mitre':0, 'round':1,'bevel':2}
# experimental replacement
class CellStyle1(PropertySet):
fontname = "Times-Roman"
fontsize = 10
leading = 12
leftPadding = 6
rightPadding = 6
topPadding = 3
bottomPadding = 3
firstLineIndent = 0
color = colors.black
alignment = 'LEFT'
background = (1,1,1)
valign = "BOTTOM"
href = None
destination = None
def __init__(self, name, parent=None):
self.name = name
if parent is not None:
parent.copy(self)
def copy(self, result=None):
if result is None:
result = CellStyle1()
for name in dir(self):
setattr(result, name, getattr(self, name))
return result
CellStyle = CellStyle1
class TableStyle:
def __init__(self, cmds=None, parent=None, **kw):
#handle inheritance from parent first.
commands = []
if parent:
# copy the parents list at construction time
commands = commands + parent.getCommands()
self._opts = parent._opts
for a in ('spaceBefore','spaceAfter'):
if hasattr(parent,a):
setattr(self,a,getattr(parent,a))
if cmds:
commands = commands + list(cmds)
self._cmds = commands
self._opts={}
self._opts.update(kw)
def add(self, *cmd):
self._cmds.append(cmd)
def __repr__(self):
L = map(repr, self._cmds)
import string
L = string.join(L, " \n")
return "TableStyle(\n%s\n) # end TableStyle" % L
def getCommands(self):
return self._cmds
TableStyleType = type(TableStyle())
_SeqTypes = (TupleType, ListType)
def _rowLen(x):
return type(x) not in _SeqTypes and 1 or len(x)
def _calc_pc(V,avail):
'''check list V for percentage or * values
1) absolute values go through unchanged
2) percentages are used as weights for unconsumed space
3) if no None values were seen '*' weights are
set equally with unclaimed space
otherwise * weights are assigned as None'''
R = []
r = R.append
I = []
i = I.append
J = []
j = J.append
s = avail
w = n = 0.
for v in V:
if type(v) is type(""):
v = v.strip()
if not v:
v = None
n += 1
elif v.endswith('%'):
v = float(v[:-1])
w += v
i(len(R))
elif v=='*':
j(len(R))
else:
v = float(v)
s -= v
elif v is None:
n += 1
else:
s -= v
r(v)
s = max(0.,s)
f = s/max(100.,w)
for i in I:
R[i] *= f
s -= R[i]
s = max(0.,s)
m = len(J)
if m:
v = n==0 and s/m or None
for j in J:
R[j] = v
return R
def _hLine(canvLine, scp, ecp, y, hBlocks, FUZZ=rl_config._FUZZ):
'''
Draw horizontal lines; do not draw through regions specified in hBlocks
This also serves for vertical lines with a suitable canvLine
'''
if hBlocks: hBlocks = hBlocks.get(y,None)
if not hBlocks or scp>=hBlocks[-1][1]-FUZZ or ecp<=hBlocks[0][0]+FUZZ:
canvLine(scp,y,ecp,y)
else:
i = 0
n = len(hBlocks)
while scp<ecp-FUZZ and i<n:
x0, x1 = hBlocks[i]
if x1<=scp+FUZZ or x0>=ecp-FUZZ:
i += 1
continue
i0 = max(scp,x0)
i1 = min(ecp,x1)
if i0>scp: canvLine(scp,y,i0,y)
scp = i1
if scp<ecp-FUZZ: canvLine(scp,y,ecp,y)
def _multiLine(scp,ecp,y,canvLine,ws,count):
offset = 0.5*(count-1)*ws
y += offset
for idx in xrange(count):
canvLine(scp, y, ecp, y)
y -= ws
def _convert2int(value, map, low, high, name, cmd):
'''private converter tries map(value) low<=int(value)<=high or finally an error'''
try:
return map[value]
except KeyError:
try:
ivalue = int(value)
if low<=ivalue<=high: return ivalue
except:
pass
raise ValueError('Bad %s value %s in %s'%(name,value,str(cmd)))
def _endswith(obj,s):
try:
return obj.endswith(s)
except:
return 0
def spanFixDim(V0,V,spanCons,FUZZ=rl_config._FUZZ):
#assign required space to variable rows equally to existing calculated values
M = {}
for (x0,x1),v in spanCons.iteritems():
t = sum([V[x]+M.get(x,0) for x in xrange(x0,x1+1)])
if t>=v-FUZZ: continue #already good enough
X = [x for x in xrange(x0,x1+1) if V0[x] is None] #variable candidates
if not X: continue #something wrong here mate
v -= t
v /= float(len(X))
for x in X:
M[x] = max(M.get(x,v),v)
for x,v in M.iteritems():
V[x] += v
class Table(Flowable):
def __init__(self, data, colWidths=None, rowHeights=None, style=None,
repeatRows=0, repeatCols=0, splitByRow=1, emptyTableAction=None, ident=None,
hAlign=None,vAlign=None):
self.ident = ident
self.hAlign = hAlign or 'CENTER'
self.vAlign = vAlign or 'MIDDLE'
if type(data) not in _SeqTypes:
raise ValueError("%s invalid data type" % self.identity())
self._nrows = nrows = len(data)
self._cellvalues = []
_seqCW = type(colWidths) in _SeqTypes
_seqRH = type(rowHeights) in _SeqTypes
if nrows: self._ncols = ncols = max(map(_rowLen,data))
elif colWidths and _seqCW: ncols = len(colWidths)
else: ncols = 0
if not emptyTableAction: emptyTableAction = rl_config.emptyTableAction
if not (nrows and ncols):
if emptyTableAction=='error':
raise ValueError("%s must have at least a row and column" % self.identity())
elif emptyTableAction=='indicate':
self.__class__ = Preformatted
global _emptyTableStyle
if '_emptyTableStyle' not in globals().keys():
_emptyTableStyle = ParagraphStyle('_emptyTableStyle')
_emptyTableStyle.textColor = colors.red
_emptyTableStyle.backColor = colors.yellow
Preformatted.__init__(self,'%s(%d,%d)' % (self.__class__.__name__,nrows,ncols), _emptyTableStyle)
elif emptyTableAction=='ignore':
self.__class__ = Spacer
Spacer.__init__(self,0,0)
else:
raise ValueError('%s bad emptyTableAction: "%s"' % (self.identity(),emptyTableAction))
return
# we need a cleanup pass to ensure data is strings - non-unicode and non-null
self._cellvalues = self.normalizeData(data)
if not _seqCW: colWidths = ncols*[colWidths]
elif len(colWidths)!=ncols:
if rl_config.allowShortTableRows and isinstance(colWidths,list):
n = len(colWidths)
if n<ncols:
colWidths[n:] = (ncols-n)*[colWidths[-1]]
else:
colWidths = colWidths[:ncols]
else:
raise ValueError("%s data error - %d columns in data but %d in column widths" % (self.identity(),ncols, len(colWidths)))
if not _seqRH: rowHeights = nrows*[rowHeights]
elif len(rowHeights) != nrows:
raise ValueError("%s data error - %d rows in data but %d in row heights" % (self.identity(),nrows, len(rowHeights)))
for i,d in enumerate(data):
n = len(d)
if n!=ncols:
if rl_config.allowShortTableRows and isinstance(d,list):
d[n:] = (ncols-n)*['']
else:
raise ValueError("%s expected %d not %d columns in row %d!" % (self.identity(),ncols,n,i))
self._rowHeights = self._argH = rowHeights
self._colWidths = self._argW = colWidths
cellrows = []
for i in xrange(nrows):
cellcols = []
for j in xrange(ncols):
cellcols.append(CellStyle(`(i,j)`))
cellrows.append(cellcols)
self._cellStyles = cellrows
self._bkgrndcmds = []
self._linecmds = []
self._spanCmds = []
self._nosplitCmds = []
self.repeatRows = repeatRows
self.repeatCols = repeatCols
self.splitByRow = splitByRow
if style:
self.setStyle(style)
def __repr__(self):
"incomplete, but better than nothing"
r = getattr(self,'_rowHeights','[unknown]')
c = getattr(self,'_colWidths','[unknown]')
cv = getattr(self,'_cellvalues','[unknown]')
import pprint, string
cv = pprint.pformat(cv)
cv = string.replace(cv, "\n", "\n ")
return "%s(\n rowHeights=%s,\n colWidths=%s,\n%s\n) # end table" % (self.__class__.__name__,r,c,cv)
def normalizeData(self, data):
"""Takes a block of input data (list of lists etc.) and
- coerces unicode strings to non-unicode UTF8
- coerces nulls to ''
-
"""
def normCell(stuff):
if stuff is None:
return ''
elif type(stuff) == type(u''):
return stuff.encode('utf8')
else:
return stuff
outData = []
for row in data:
outRow = [normCell(cell) for cell in row]
outData.append(outRow)
from pprint import pprint as pp
#pp(outData)
return outData
def identity(self, maxLen=30):
'''Identify our selves as well as possible'''
if self.ident: return self.ident
vx = None
nr = getattr(self,'_nrows','unknown')
nc = getattr(self,'_ncols','unknown')
cv = getattr(self,'_cellvalues',None)
if cv and 'unknown' not in (nr,nc):
b = 0
for i in xrange(nr):
for j in xrange(nc):
v = cv[i][j]
t = type(v)
if t in _SeqTypes or isinstance(v,Flowable):
if not t in _SeqTypes: v = (v,)
r = ''
for vij in v:
r = vij.identity(maxLen)
if r and r[-4:]!='>...':
break
if r and r[-4:]!='>...':
ix, jx, vx, b = i, j, r, 1
else:
v = v is None and '' or str(v)
ix, jx, vx = i, j, v
b = (vx and t is StringType) and 1 or 0
if maxLen: vx = vx[:maxLen]
if b: break
if b: break
if vx:
vx = ' with cell(%d,%d) containing\n%s' % (ix,jx,repr(vx))
else:
vx = '...'
return "<%s at %d %d rows x %s cols>%s" % (self.__class__.__name__, id(self), nr, nc, vx)
def _listCellGeom(self, V,w,s,W=None,H=None,aH=72000):
if not V: return 0,0
aW = w - s.leftPadding - s.rightPadding
aH = aH - s.topPadding - s.bottomPadding
t = 0
w = 0
canv = getattr(self,'canv',None)
sb0 = None
for v in V:
vw, vh = v.wrapOn(canv, aW, aH)
sb = v.getSpaceBefore()
sa = v.getSpaceAfter()
if W is not None: W.append(vw)
if H is not None: H.append(vh)
w = max(w,vw)
t += vh + sa + sb
if sb0 is None:
sb0 = sb
return w, t - sb0 - sa
def _listValueWidth(self,V,aH=72000,aW=72000):
if not V: return 0,0
t = 0
w = 0
canv = getattr(self,'canv',None)
return max([v.wrapOn(canv,aW,aH)[0] for v in V])
def _calc_width(self,availWidth,W=None):
if getattr(self,'_width_calculated_once',None): return
#comments added by Andy to Robin's slightly terse variable names
if not W: W = _calc_pc(self._argW,availWidth) #widths array
if None in W: #some column widths are not given
canv = getattr(self,'canv',None)
saved = None
if self._spanCmds:
colSpanCells = self._colSpanCells
spanRanges = self._spanRanges
else:
colSpanCells = ()
spanRanges = {}
spanCons = {}
if W is self._argW:
W0 = W
W = W[:]
else:
W0 = W[:]
while None in W:
j = W.index(None) #find first unspecified column
f = lambda x,j=j: operator.getitem(x,j)
V = map(f,self._cellvalues) #values for this column
S = map(f,self._cellStyles) #styles for this column
w = 0
i = 0
for v, s in map(None, V, S):
ji = j,i
span = spanRanges.get(ji,None)
if ji in colSpanCells and not span: #if the current cell is part of a spanned region,
t = 0.0 #assume a zero size.
else:#work out size
t = self._elementWidth(v,s)
if t is None:
raise ValueError("Flowable %s in cell(%d,%d) can't have auto width\n%s" % (v.identity(30),i,j,self.identity(30)))
t += s.leftPadding+s.rightPadding
if span:
c0 = span[0]
c1 = span[2]
if c0!=c1:
x = c0,c1
spanCons[x] = max(spanCons.get(x,t),t)
t = 0
if t>w: w = t #record a new maximum
i += 1
W[j] = w
if spanCons:
spanFixDim(W0,W,spanCons)
self._colWidths = W
width = 0
self._colpositions = [0] #index -1 is right side boundary; we skip when processing cells
for w in W:
width = width + w
self._colpositions.append(width)
self._width = width
self._width_calculated_once = 1
def _elementWidth(self,v,s):
if isinstance(v,(list,tuple)):
w = 0
for e in v:
ew = self._elementWidth(e,s)
if ew is None: return None
w = max(w,ew)
return w
elif isinstance(v,Flowable) and v._fixedWidth:
if hasattr(v, 'width') and type(v.width) in (IntType,FloatType): return v.width
if hasattr(v, 'drawWidth') and type(v.drawWidth) in (IntType,FloatType): return v.drawWidth
# Even if something is fixedWidth, the attribute to check is not
# necessarily consistent (cf. Image.drawWidth). Therefore, we'll
# be extra-careful and fall through to this code if necessary.
if hasattr(v, 'minWidth'):
try:
w = v.minWidth() # should be all flowables
if type(w) in (FloatType,IntType): return w
except AttributeError:
pass
v = (v is not None and str(v) or '').split("\n")
fontName = s.fontname
fontSize = s.fontsize
return max([stringWidth(x,fontName,fontSize) for x in v])
def _calc_height(self, availHeight, availWidth, H=None, W=None):
H = self._argH
if not W: W = _calc_pc(self._argW,availWidth) #widths array
hmax = lim = len(H)
longTable = getattr(self,'_longTableOptimize',rl_config.longTableOptimize)
if None in H:
canv = getattr(self,'canv',None)
saved = None
#get a handy list of any cells which span rows. should be ignored for sizing
if self._spanCmds:
rowSpanCells = self._rowSpanCells
colSpanCells = self._colSpanCells
spanRanges = self._spanRanges
colpositions = self._colpositions
else:
rowSpanCells = colSpanCells = ()
spanRanges = {}
if canv: saved = canv._fontname, canv._fontsize, canv._leading
H0 = H
H = H[:] #make a copy as we'll change it
self._rowHeights = H
spanCons = {}
FUZZ = rl_config._FUZZ
while None in H:
i = H.index(None)
if longTable:
hmax = i
height = reduce(operator.add, H[:i], 0)
# we can stop if we have filled up all available room
if height > availHeight: break
V = self._cellvalues[i] # values for row i
S = self._cellStyles[i] # styles for row i
h = 0
j = 0
for j,(v, s, w) in enumerate(map(None, V, S, W)): # value, style, width (lengths must match)
ji = j,i
span = spanRanges.get(ji,None)
if ji in rowSpanCells and not span:
continue # don't count it, it's either occluded or unreliable
else:
if isinstance(v,(tuple,list,Flowable)):
if isinstance(v,Flowable): v = (v,)
if w is None and not self._canGetWidth(v):
raise ValueError("Flowable %s in cell(%d,%d) can't have auto width in\n%s" % (v[0].identity(30),i,j,self.identity(30)))
if canv: canv._fontname, canv._fontsize, canv._leading = s.fontname, s.fontsize, s.leading or 1.2*s.fontsize
if ji in colSpanCells:
if not span: continue
w = max(colpositions[span[2]+1]-colpositions[span[0]],w)
dW,t = self._listCellGeom(v,w or self._listValueWidth(v),s)
if canv: canv._fontname, canv._fontsize, canv._leading = saved
dW = dW + s.leftPadding + s.rightPadding
if not rl_config.allowTableBoundsErrors and dW>w:
from reportlab.platypus.doctemplate import LayoutError
raise LayoutError("Flowable %s (%sx%s points) too wide for cell(%d,%d) (%sx* points) in\n%s" % (v[0].identity(30),fp_str(dW),fp_str(t),i,j, fp_str(w), self.identity(30)))
else:
v = (v is not None and str(v) or '').split("\n")
t = (s.leading or 1.2*s.fontSize)*len(v)
t += s.bottomPadding+s.topPadding
if span:
r0 = span[1]
r1 = span[3]
if r0!=r1:
x = r0,r1
spanCons[x] = max(spanCons.get(x,t),t)
t = 0
if t>h: h = t #record a new maximum
H[i] = h
if None not in H: hmax = lim
if spanCons:
spanFixDim(H0,H,spanCons)
height = self._height = reduce(operator.add, H[:hmax], 0)
self._rowpositions = [height] # index 0 is actually topline; we skip when processing cells
for h in H[:hmax]:
height = height - h
self._rowpositions.append(height)
assert abs(height)<1e-8, 'Internal height error'
self._hmax = hmax
def _calc(self, availWidth, availHeight):
#if hasattr(self,'_width'): return
#in some cases there are unsizable things in
#cells. If so, apply a different algorithm
#and assign some withs in a less (thanks to Gary Poster) dumb way.
#this CHANGES the widths array.
if (None in self._colWidths or '*' in self._colWidths) and self._hasVariWidthElements():
W = self._calcPreliminaryWidths(availWidth) #widths
else:
W = None
# need to know which cells are part of spanned
# ranges, so _calc_height and _calc_width can ignore them
# in sizing
if self._spanCmds:
self._calcSpanRanges()
if None in self._argH:
self._calc_width(availWidth,W=W)
if self._nosplitCmds:
self._calcNoSplitRanges()
# calculate the full table height
self._calc_height(availHeight,availWidth,W=W)
# calculate the full table width
self._calc_width(availWidth,W=W)
if self._spanCmds:
#now work out the actual rect for each spanned cell from the underlying grid
self._calcSpanRects()
def _hasVariWidthElements(self, upToRow=None):
"""Check for flowables in table cells and warn up front.
Allow a couple which we know are fixed size such as
images and graphics."""
if upToRow is None: upToRow = self._nrows
for row in xrange(min(self._nrows, upToRow)):
for col in xrange(self._ncols):
value = self._cellvalues[row][col]
if not self._canGetWidth(value):
return 1
return 0
def _canGetWidth(self, thing):
"Can we work out the width quickly?"
if isinstance(thing,(ListType, TupleType)):
for elem in thing:
if not self._canGetWidth(elem):
return 0
return 1
elif isinstance(thing, Flowable):
return thing._fixedWidth # must loosen this up
else: #string, number, None etc.
#anything else gets passed to str(...)
# so should be sizable
return 1
def _calcPreliminaryWidths(self, availWidth):
"""Fallback algorithm for when main one fails.
Where exact width info not given but things like
paragraphs might be present, do a preliminary scan
and assign some best-guess values."""
W = list(self._argW) # _calc_pc(self._argW,availWidth)
verbose = 0
totalDefined = 0.0
percentDefined = 0
percentTotal = 0
numberUndefined = 0
numberGreedyUndefined = 0
for w in W:
if w is None:
numberUndefined += 1
elif w == '*':
numberUndefined += 1
numberGreedyUndefined += 1
elif _endswith(w,'%'):
percentDefined += 1
percentTotal += float(w[:-1])
else:
assert type(w) in (IntType, FloatType)
totalDefined = totalDefined + w
if verbose: print 'prelim width calculation. %d columns, %d undefined width, %0.2f units remain' % (
self._ncols, numberUndefined, availWidth - totalDefined)
#check columnwise in each None column to see if they are sizable.
given = []
sizeable = []
unsizeable = []
minimums = {}
totalMinimum = 0
elementWidth = self._elementWidth
for colNo in xrange(self._ncols):
w = W[colNo]
if w is None or w=='*' or _endswith(w,'%'):
siz = 1
current = final = None
for rowNo in xrange(self._nrows):
value = self._cellvalues[rowNo][colNo]
style = self._cellStyles[rowNo][colNo]
new = elementWidth(value,style)+style.leftPadding+style.rightPadding
final = max(current, new)
current = new
siz = siz and self._canGetWidth(value) # irrelevant now?
if siz:
sizeable.append(colNo)
else:
unsizeable.append(colNo)
minimums[colNo] = final
totalMinimum += final
else:
given.append(colNo)
if len(given) == self._ncols:
return
if verbose: print 'predefined width: ',given
if verbose: print 'uncomputable width: ',unsizeable
if verbose: print 'computable width: ',sizeable
# how much width is left:
remaining = availWidth - (totalMinimum + totalDefined)
if remaining > 0:
# we have some room left; fill it.
definedPercentage = (totalDefined/availWidth)*100
percentTotal += definedPercentage
if numberUndefined and percentTotal < 100:
undefined = numberGreedyUndefined or numberUndefined
defaultWeight = (100-percentTotal)/undefined
percentTotal = 100
defaultDesired = (defaultWeight/percentTotal)*availWidth
else:
defaultWeight = defaultDesired = 1
# we now calculate how wide each column wanted to be, and then
# proportionately shrink that down to fit the remaining available
# space. A column may not shrink less than its minimum width,
# however, which makes this a bit more complicated.
desiredWidths = []
totalDesired = 0
effectiveRemaining = remaining
for colNo, minimum in minimums.items():
w = W[colNo]
if _endswith(w,'%'):
desired = (float(w[:-1])/percentTotal)*availWidth
elif w == '*':
desired = defaultDesired
else:
desired = not numberGreedyUndefined and defaultDesired or 1
if desired <= minimum:
W[colNo] = minimum
else:
desiredWidths.append(
(desired-minimum, minimum, desired, colNo))
totalDesired += desired
effectiveRemaining += minimum
if desiredWidths: # else we're done
# let's say we have two variable columns. One wanted
# 88 points, and one wanted 264 points. The first has a
# minWidth of 66, and the second of 55. We have 71 points
# to divide up in addition to the totalMinimum (i.e.,
# remaining==71). Our algorithm tries to keep the proportion
# of these variable columns.
#
# To do this, we add up the minimum widths of the variable
# columns and the remaining width. That's 192. We add up the
# totalDesired width. That's 352. That means we'll try to
# shrink the widths by a proportion of 192/352--.545454.
# That would make the first column 48 points, and the second
# 144 points--adding up to the desired 192.
#
# Unfortunately, that's too small for the first column. It
# must be 66 points. Therefore, we go ahead and save that
# column width as 88 points. That leaves (192-88==) 104
# points remaining. The proportion to shrink the remaining
# column is (104/264), which, multiplied by the desired
# width of 264, is 104: the amount assigned to the remaining
# column.
proportion = effectiveRemaining/totalDesired
# we sort the desired widths by difference between desired and
# and minimum values, a value called "disappointment" in the
# code. This means that the columns with a bigger
# disappointment will have a better chance of getting more of
# the available space.
desiredWidths.sort()
finalSet = []
for disappointment, minimum, desired, colNo in desiredWidths:
adjusted = proportion * desired
if adjusted < minimum:
W[colNo] = minimum
totalDesired -= desired
effectiveRemaining -= minimum
if totalDesired:
proportion = effectiveRemaining/totalDesired
else:
finalSet.append((minimum, desired, colNo))
for minimum, desired, colNo in finalSet:
adjusted = proportion * desired
assert adjusted >= minimum
W[colNo] = adjusted
else:
for colNo, minimum in minimums.items():
W[colNo] = minimum
if verbose: print 'new widths are:', W
self._argW = self._colWidths = W
return W
def minWidth(self):
W = list(self._argW)
width = 0
elementWidth = self._elementWidth
rowNos = xrange(self._nrows)
values = self._cellvalues
styles = self._cellStyles
for colNo in xrange(len(W)):
w = W[colNo]
if w is None or w=='*' or _endswith(w,'%'):
final = 0
for rowNo in rowNos:
value = values[rowNo][colNo]
style = styles[rowNo][colNo]
new = (elementWidth(value,style)+
style.leftPadding+style.rightPadding)
final = max(final, new)
width += final
else:
width += float(w)
return width # XXX + 1/2*(left and right border widths)
def _calcSpanRanges(self):
"""Work out rects for tables which do row and column spanning.
This creates some mappings to let the later code determine
if a cell is part of a "spanned" range.
self._spanRanges shows the 'coords' in integers of each
'cell range', or None if it was clobbered:
(col, row) -> (col0, row0, col1, row1)
Any cell not in the key is not part of a spanned region
"""
self._spanRanges = spanRanges = {}
for x in xrange(self._ncols):
for y in xrange(self._nrows):
spanRanges[x,y] = (x, y, x, y)
self._colSpanCells = []
self._rowSpanCells = []
csa = self._colSpanCells.append
rsa = self._rowSpanCells.append
for (cmd, start, stop) in self._spanCmds:
x0, y0 = start
x1, y1 = stop
#normalize
if x0 < 0: x0 = x0 + self._ncols
if x1 < 0: x1 = x1 + self._ncols
if y0 < 0: y0 = y0 + self._nrows
if y1 < 0: y1 = y1 + self._nrows
if x0 > x1: x0, x1 = x1, x0
if y0 > y1: y0, y1 = y1, y0
if x0!=x1 or y0!=y1:
if x0!=x1: #column span
for y in xrange(y0, y1+1):
for x in xrange(x0,x1+1):
csa((x,y))
if y0!=y1: #row span
for y in xrange(y0, y1+1):
for x in xrange(x0,x1+1):
rsa((x,y))
for y in xrange(y0, y1+1):
for x in xrange(x0,x1+1):
spanRanges[x,y] = None
# set the main entry
spanRanges[x0,y0] = (x0, y0, x1, y1)
def _calcNoSplitRanges(self):
"""
This creates some mappings to let the later code determine
if a cell is part of a "nosplit" range.
self._nosplitRanges shows the 'coords' in integers of each
'cell range', or None if it was clobbered:
(col, row) -> (col0, row0, col1, row1)
Any cell not in the key is not part of a spanned region
"""
self._nosplitRanges = nosplitRanges = {}
for x in xrange(self._ncols):
for y in xrange(self._nrows):
nosplitRanges[x,y] = (x, y, x, y)
self._colNoSplitCells = []
self._rowNoSplitCells = []
csa = self._colNoSplitCells.append
rsa = self._rowNoSplitCells.append
for (cmd, start, stop) in self._nosplitCmds:
x0, y0 = start
x1, y1 = stop
#normalize
if x0 < 0: x0 = x0 + self._ncols
if x1 < 0: x1 = x1 + self._ncols
if y0 < 0: y0 = y0 + self._nrows
if y1 < 0: y1 = y1 + self._nrows
if x0 > x1: x0, x1 = x1, x0
if y0 > y1: y0, y1 = y1, y0
if x0!=x1 or y0!=y1:
#column span
if x0!=x1:
for y in xrange(y0, y1+1):
for x in xrange(x0,x1+1):
csa((x,y))
#row span
if y0!=y1:
for y in xrange(y0, y1+1):
for x in xrange(x0,x1+1):
rsa((x,y))
for y in xrange(y0, y1+1):
for x in xrange(x0,x1+1):
nosplitRanges[x,y] = None
# set the main entry
nosplitRanges[x0,y0] = (x0, y0, x1, y1)
def _calcSpanRects(self):
"""Work out rects for tables which do row and column spanning.
Based on self._spanRanges, which is already known,
and the widths which were given or previously calculated,
self._spanRects shows the real coords for drawing:
(col, row) -> (x, y, width, height)
for each cell. Any cell which 'does not exist' as another
has spanned over it will get a None entry on the right
"""
if getattr(self,'_spanRects',None): return
colpositions = self._colpositions
rowpositions = self._rowpositions
self._spanRects = spanRects = {}
self._vBlocks = vBlocks = {}
self._hBlocks = hBlocks = {}
for (coord, value) in self._spanRanges.items():
if value is None:
spanRects[coord] = None
else:
col,row = coord
col0, row0, col1, row1 = value
if col1-col0>0:
for _ in xrange(col0+1,col1+1):
vBlocks.setdefault(colpositions[_],[]).append((rowpositions[row1+1],rowpositions[row0]))
if row1-row0>0:
for _ in xrange(row0+1,row1+1):
hBlocks.setdefault(rowpositions[_],[]).append((colpositions[col0],colpositions[col1+1]))
x = colpositions[col0]
y = rowpositions[row1+1]
width = colpositions[col1+1] - x
height = rowpositions[row0] - y
spanRects[coord] = (x, y, width, height)
for _ in hBlocks, vBlocks:
for value in _.values():
value.sort()
def setStyle(self, tblstyle):
if type(tblstyle) is not TableStyleType:
tblstyle = TableStyle(tblstyle)
for cmd in tblstyle.getCommands():
self._addCommand(cmd)
for k,v in tblstyle._opts.items():
setattr(self,k,v)
for a in ('spaceBefore','spaceAfter'):
if not hasattr(self,a) and hasattr(tblstyle,a):
setattr(self,a,getattr(tblstyle,a))
def _addCommand(self,cmd):
if cmd[0] in ('BACKGROUND','ROWBACKGROUNDS','COLBACKGROUNDS'):
self._bkgrndcmds.append(cmd)
elif cmd[0] == 'SPAN':
self._spanCmds.append(cmd)
elif cmd[0] == 'NOSPLIT':
# we expect op, start, stop
self._nosplitCmds.append(cmd)
elif _isLineCommand(cmd):
# we expect op, start, stop, weight, colour, cap, dashes, join
cmd = list(cmd)
if len(cmd)<5: raise ValueError('bad line command '+str(cmd))
#determine line cap value at position 5. This can be string or numeric.
if len(cmd)<6:
cmd.append(1)
else:
cap = _convert2int(cmd[5], LINECAPS, 0, 2, 'cap', cmd)
cmd[5] = cap
#dashes at index 6 - this is a dash array:
if len(cmd)<7: cmd.append(None)
#join mode at index 7 - can be string or numeric, look up as for caps
if len(cmd)<8: cmd.append(1)
else:
join = _convert2int(cmd[7], LINEJOINS, 0, 2, 'join', cmd)
cmd[7] = join
#linecount at index 8. Default is 1, set to 2 for double line.
if len(cmd)<9: cmd.append(1)
else:
lineCount = cmd[8]
if lineCount is None:
lineCount = 1
cmd[8] = lineCount
assert lineCount >= 1
#linespacing at index 9. Not applicable unless 2+ lines, defaults to line
#width so you get a visible gap between centres
if len(cmd)<10: cmd.append(cmd[3])
else:
space = cmd[9]
if space is None:
space = cmd[3]
cmd[9] = space
assert len(cmd) == 10
self._linecmds.append(tuple(cmd))
else:
(op, (sc, sr), (ec, er)), values = cmd[:3] , cmd[3:]
if sc < 0: sc = sc + self._ncols
if ec < 0: ec = ec + self._ncols
if sr < 0: sr = sr + self._nrows
if er < 0: er = er + self._nrows
for i in xrange(sr, er+1):
for j in xrange(sc, ec+1):
_setCellStyle(self._cellStyles, i, j, op, values)
def _drawLines(self):
ccap, cdash, cjoin = None, None, None
self.canv.saveState()
for op, (sc,sr), (ec,er), weight, color, cap, dash, join, count, space in self._linecmds:
if type(sr) is type('') and sr.startswith('split'): continue
if sc < 0: sc = sc + self._ncols
if ec < 0: ec = ec + self._ncols
if sr < 0: sr = sr + self._nrows
if er < 0: er = er + self._nrows
if cap!=None and ccap!=cap:
self.canv.setLineCap(cap)
ccap = cap
if dash is None or dash == []:
if cdash is not None:
self.canv.setDash()
cdash = None
elif dash != cdash:
self.canv.setDash(dash)
cdash = dash
if join is not None and cjoin!=join:
self.canv.setLineJoin(join)
cjoin = join
getattr(self,_LineOpMap.get(op, '_drawUnknown' ))( (sc, sr), (ec, er), weight, color, count, space)
self.canv.restoreState()
self._curcolor = None
def _drawUnknown(self, (sc, sr), (ec, er), weight, color, count, space):
#we are only called from _drawLines which is one level up
import sys
op = sys._getframe(1).f_locals['op']
raise ValueError("Unknown line command '%s'" % op)
def _drawGrid(self, (sc, sr), (ec, er), weight, color, count, space):
self._drawBox( (sc, sr), (ec, er), weight, color, count, space)
self._drawInnerGrid( (sc, sr), (ec, er), weight, color, count, space)
def _drawBox(self, (sc, sr), (ec, er), weight, color, count, space):
self._drawHLines((sc, sr), (ec, sr), weight, color, count, space)
self._drawHLines((sc, er+1), (ec, er+1), weight, color, count, space)
self._drawVLines((sc, sr), (sc, er), weight, color, count, space)
self._drawVLines((ec+1, sr), (ec+1, er), weight, color, count, space)
def _drawInnerGrid(self, (sc, sr), (ec, er), weight, color, count, space):
self._drawHLines((sc, sr+1), (ec, er), weight, color, count, space)
self._drawVLines((sc+1, sr), (ec, er), weight, color, count, space)
def _prepLine(self, weight, color):
if color != self._curcolor:
self.canv.setStrokeColor(color)
self._curcolor = color
if weight != self._curweight:
self.canv.setLineWidth(weight)
self._curweight = weight
def _drawHLines(self, (sc, sr), (ec, er), weight, color, count, space):
ecp = self._colpositions[sc:ec+2]
rp = self._rowpositions[sr:er+1]
if len(ecp)<=1 or len(rp)<1: return
self._prepLine(weight, color)
scp = ecp[0]
ecp = ecp[-1]
hBlocks = getattr(self,'_hBlocks',{})
canvLine = self.canv.line
if count == 1:
for y in rp:
_hLine(canvLine, scp, ecp, y, hBlocks)
else:
lf = lambda x0,y0,x1,y1,canvLine=canvLine, ws=weight+space, count=count: _multiLine(x0,x1,y0,canvLine,ws,count)
for y in rp:
_hLine(lf, scp, ecp, y, hBlocks)
def _drawHLinesB(self, (sc, sr), (ec, er), weight, color, count, space):
self._drawHLines((sc, sr+1), (ec, er+1), weight, color, count, space)
def _drawVLines(self, (sc, sr), (ec, er), weight, color, count, space):
erp = self._rowpositions[sr:er+2]
cp = self._colpositions[sc:ec+1]
if len(erp)<=1 or len(cp)<1: return
self._prepLine(weight, color)
srp = erp[0]
erp = erp[-1]
vBlocks = getattr(self,'_vBlocks',{})
canvLine = lambda y0, x0, y1, x1, _line=self.canv.line: _line(x0,y0,x1,y1)
if count == 1:
for x in cp:
_hLine(canvLine, erp, srp, x, vBlocks)
else:
lf = lambda x0,y0,x1,y1,canvLine=canvLine, ws=weight+space, count=count: _multiLine(x0,x1,y0,canvLine,ws,count)
for x in cp:
_hLine(lf, erp, srp, x, vBlocks)
def _drawVLinesA(self, (sc, sr), (ec, er), weight, color, count, space):
self._drawVLines((sc+1, sr), (ec+1, er), weight, color, count, space)
def wrap(self, availWidth, availHeight):
self._calc(availWidth, availHeight)
#nice and easy, since they are predetermined size
self.availWidth = availWidth
return (self._width, self._height)
def onSplit(self,T,byRow=1):
'''
This method will be called when the Table is split.
Special purpose tables can override to do special stuff.
'''
pass
def _cr_0(self,n,cmds):
for c in cmds:
c = tuple(c)
(sc,sr), (ec,er) = c[1:3]
if sr>=n: continue
if er>=n: er = n-1
self._addCommand((c[0],)+((sc, sr), (ec, er))+c[3:])
def _cr_1_1(self,n,repeatRows, cmds):
for c in cmds:
c = tuple(c)
(sc,sr), (ec,er) = c[1:3]
if sr in ('splitfirst','splitlast'): self._addCommand(c)
else:
if sr>=0 and sr>=repeatRows and sr<n and er>=0 and er<n: continue
if sr>=repeatRows and sr<n: sr=repeatRows
elif sr>=repeatRows and sr>=n: sr=sr+repeatRows-n
if er>=repeatRows and er<n: er=repeatRows
elif er>=repeatRows and er>=n: er=er+repeatRows-n
self._addCommand((c[0],)+((sc, sr), (ec, er))+c[3:])
def _cr_1_0(self,n,cmds):
for c in cmds:
c = tuple(c)
(sc,sr), (ec,er) = c[1:3]
if sr in ('splitfirst','splitlast'): self._addCommand(c)
else:
if er>=0 and er<n: continue
if sr>=0 and sr<n: sr=0
if sr>=n: sr = sr-n
if er>=n: er = er-n
self._addCommand((c[0],)+((sc, sr), (ec, er))+c[3:])
def _splitRows(self,availHeight):
n=self._getFirstPossibleSplitRowPosition(availHeight)
if n<=self.repeatRows: return []
lim = len(self._rowHeights)
if n==lim: return [self]
repeatRows = self.repeatRows
repeatCols = self.repeatCols
splitByRow = self.splitByRow
data = self._cellvalues
#we're going to split into two superRows
#R0 = slelf.__class__( data[:n], self._argW, self._argH[:n],
R0 = self.__class__( data[:n], colWidths=self._colWidths, rowHeights=self._argH[:n],
repeatRows=repeatRows, repeatCols=repeatCols,
splitByRow=splitByRow)
#copy the styles and commands
R0._cellStyles = self._cellStyles[:n]
A = []
# hack up the line commands
for op, (sc,sr), (ec,er), weight, color, cap, dash, join, count, space in self._linecmds:
if type(sr)is type('') and sr.startswith('split'):
A.append((op,(sc,sr), (ec,sr), weight, color, cap, dash, join, count, space))
if sr=='splitlast':
sr = er = n-1
elif sr=='splitfirst':
sr = n
er = n
if sc < 0: sc = sc + self._ncols
if ec < 0: ec = ec + self._ncols
if sr < 0: sr = sr + self._nrows
if er < 0: er = er + self._nrows
if op in ('BOX','OUTLINE','GRID'):
if sr<n and er>=n:
# we have to split the BOX
A.append(('LINEABOVE',(sc,sr), (ec,sr), weight, color, cap, dash, join, count, space))
A.append(('LINEBEFORE',(sc,sr), (sc,er), weight, color, cap, dash, join, count, space))
A.append(('LINEAFTER',(ec,sr), (ec,er), weight, color, cap, dash, join, count, space))
A.append(('LINEBELOW',(sc,er), (ec,er), weight, color, cap, dash, join, count, space))
if op=='GRID':
A.append(('LINEBELOW',(sc,n-1), (ec,n-1), weight, color, cap, dash, join, count, space))
A.append(('LINEABOVE',(sc,n), (ec,n), weight, color, cap, dash, join, count, space))
A.append(('INNERGRID',(sc,sr), (ec,er), weight, color, cap, dash, join, count, space))
else:
A.append((op,(sc,sr), (ec,er), weight, color, cap, dash, join, count, space))
elif op in ('INNERGRID','LINEABOVE'):
if sr<n and er>=n:
A.append(('LINEBELOW',(sc,n-1), (ec,n-1), weight, color, cap, dash, join, count, space))
A.append(('LINEABOVE',(sc,n), (ec,n), weight, color, cap, dash, join, count, space))
A.append((op,(sc,sr), (ec,er), weight, color, cap, dash, join, count, space))
elif op == 'LINEBELOW':
if sr<n and er>=(n-1):
A.append(('LINEABOVE',(sc,n), (ec,n), weight, color, cap, dash, join, count, space))
A.append((op,(sc,sr), (ec,er), weight, color))
elif op == 'LINEABOVE':
if sr<=n and er>=n:
A.append(('LINEBELOW',(sc,n-1), (ec,n-1), weight, color, cap, dash, join, count, space))
A.append((op,(sc,sr), (ec,er), weight, color, cap, dash, join, count, space))
else:
A.append((op,(sc,sr), (ec,er), weight, color, cap, dash, join, count, space))
R0._cr_0(n,A)
R0._cr_0(n,self._bkgrndcmds)
R0._cr_0(n,self._spanCmds)
R0._cr_0(n,self._nosplitCmds)
if repeatRows:
#R1 = slelf.__class__(data[:repeatRows]+data[n:],self._argW,
R1 = self.__class__(data[:repeatRows]+data[n:],colWidths=self._colWidths,
rowHeights=self._argH[:repeatRows]+self._argH[n:],
repeatRows=repeatRows, repeatCols=repeatCols,
splitByRow=splitByRow)
R1._cellStyles = self._cellStyles[:repeatRows]+self._cellStyles[n:]
R1._cr_1_1(n,repeatRows,A)
R1._cr_1_1(n,repeatRows,self._bkgrndcmds)
R1._cr_1_1(n,repeatRows,self._spanCmds)
R1._cr_1_1(n,repeatRows,self._nosplitCmds)
else:
#R1 = slelf.__class__(data[n:], self._argW, self._argH[n:],
R1 = self.__class__(data[n:], colWidths=self._colWidths, rowHeights=self._argH[n:],
repeatRows=repeatRows, repeatCols=repeatCols,
splitByRow=splitByRow)
R1._cellStyles = self._cellStyles[n:]
R1._cr_1_0(n,A)
R1._cr_1_0(n,self._bkgrndcmds)
R1._cr_1_0(n,self._spanCmds)
R1._cr_1_0(n,self._nosplitCmds)
R0.hAlign = R1.hAlign = self.hAlign
R0.vAlign = R1.vAlign = self.vAlign
self.onSplit(R0)
self.onSplit(R1)
return [R0,R1]
def _getRowImpossible(impossible,cells,ranges):
for xy in cells:
r=ranges[xy]
if r!=None:
y1,y2=r[1],r[3]
if y1!=y2:
ymin=min(y1,y2) #normalize
ymax=max(y1,y2) #normalize
y=ymin+1
while 1:
if y>ymax: break
impossible[y]=None #split at position y is impossible because of overlapping rowspan
y+=1
_getRowImpossible=staticmethod(_getRowImpossible)
def _getFirstPossibleSplitRowPosition(self,availHeight):
impossible={}
if self._spanCmds:
self._getRowImpossible(impossible,self._rowSpanCells,self._spanRanges)
if self._nosplitCmds:
self._getRowImpossible(impossible,self._rowNoSplitCells,self._nosplitRanges)
h = 0
n = 1
split_at = 0 # from this point of view 0 is the first position where the table may *always* be splitted
for rh in self._rowHeights:
if h+rh>availHeight:
break
if not impossible.has_key(n):
split_at=n
h=h+rh
n=n+1
return split_at
def split(self, availWidth, availHeight):
self._calc(availWidth, availHeight)
if self.splitByRow:
if not rl_config.allowTableBoundsErrors and self._width>availWidth: return []
return self._splitRows(availHeight)
else:
raise NotImplementedError
def draw(self):
self._curweight = self._curcolor = self._curcellstyle = None
self._drawBkgrnd()
if not self._spanCmds:
# old fashioned case, no spanning, steam on and do each cell
for row, rowstyle, rowpos, rowheight in map(None, self._cellvalues, self._cellStyles, self._rowpositions[1:], self._rowHeights):
for cellval, cellstyle, colpos, colwidth in map(None, row, rowstyle, self._colpositions[:-1], self._colWidths):
self._drawCell(cellval, cellstyle, (colpos, rowpos), (colwidth, rowheight))
else:
# we have some row or col spans, need a more complex algorithm
# to find the rect for each
for rowNo in xrange(self._nrows):
for colNo in xrange(self._ncols):
cellRect = self._spanRects[colNo, rowNo]
if cellRect is not None:
(x, y, width, height) = cellRect
cellval = self._cellvalues[rowNo][colNo]
cellstyle = self._cellStyles[rowNo][colNo]
self._drawCell(cellval, cellstyle, (x, y), (width, height))
self._drawLines()
def _drawBkgrnd(self):
nrows = self._nrows
ncols = self._ncols
canv = self.canv
colpositions = self._colpositions
rowpositions = self._rowpositions
rowHeights = self._rowHeights
colWidths = self._colWidths
spanRects = getattr(self,'_spanRects',None)
for cmd, (sc, sr), (ec, er), arg in self._bkgrndcmds:
if sc < 0: sc = sc + ncols
if ec < 0: ec = ec + ncols
if sr < 0: sr = sr + nrows
if er < 0: er = er + nrows
x0 = colpositions[sc]
y0 = rowpositions[sr]
x1 = colpositions[min(ec+1,ncols)]
y1 = rowpositions[min(er+1,nrows)]
w, h = x1-x0, y1-y0
if callable(arg):
arg(self,canv, x0, y0, w, h)
elif cmd == 'ROWBACKGROUNDS':
#Need a list of colors to cycle through. The arguments
#might be already colours, or convertible to colors, or
# None, or the string 'None'.
#It's very common to alternate a pale shade with None.
colorCycle = map(colors.toColorOrNone, arg)
count = len(colorCycle)
rowCount = er - sr + 1
for i in xrange(rowCount):
color = colorCycle[i%count]
h = rowHeights[sr + i]
if color:
canv.setFillColor(color)
canv.rect(x0, y0, w, -h, stroke=0,fill=1)
y0 = y0 - h
elif cmd == 'COLBACKGROUNDS':
#cycle through colours columnwise
colorCycle = map(colors.toColorOrNone, arg)
count = len(colorCycle)
colCount = ec - sc + 1
for i in xrange(colCount):
color = colorCycle[i%count]
w = colWidths[sc + i]
if color:
canv.setFillColor(color)
canv.rect(x0, y0, w, h, stroke=0,fill=1)
x0 = x0 +w
else: #cmd=='BACKGROUND'
color = colors.toColorOrNone(arg)
if color:
if ec==sc and er==sr and spanRects:
xywh = spanRects.get((sc,sr))
if xywh:
#it's a single cell
x0, y0, w, h = xywh
canv.setFillColor(color)
canv.rect(x0, y0, w, h, stroke=0,fill=1)
def _drawCell(self, cellval, cellstyle, (colpos, rowpos), (colwidth, rowheight)):
if self._curcellstyle is not cellstyle:
cur = self._curcellstyle
if cur is None or cellstyle.color != cur.color:
self.canv.setFillColor(cellstyle.color)
if cur is None or cellstyle.leading != cur.leading or cellstyle.fontname != cur.fontname or cellstyle.fontsize != cur.fontsize:
self.canv.setFont(cellstyle.fontname, cellstyle.fontsize, cellstyle.leading)
self._curcellstyle = cellstyle
just = cellstyle.alignment
valign = cellstyle.valign
n = type(cellval)
if n in _SeqTypes or isinstance(cellval,Flowable):
if not n in _SeqTypes: cellval = (cellval,)
# we assume it's a list of Flowables
W = []
H = []
w, h = self._listCellGeom(cellval,colwidth,cellstyle,W=W, H=H,aH=rowheight)
if valign=='TOP':
y = rowpos + rowheight - cellstyle.topPadding
elif valign=='BOTTOM':
y = rowpos+cellstyle.bottomPadding + h
else:
y = rowpos+(rowheight+cellstyle.bottomPadding-cellstyle.topPadding+h)/2.0
if cellval: y += cellval[0].getSpaceBefore()
for v, w, h in map(None,cellval,W,H):
if just=='LEFT': x = colpos+cellstyle.leftPadding
elif just=='RIGHT': x = colpos+colwidth-cellstyle.rightPadding - w
elif just in ('CENTRE', 'CENTER'):
x = colpos+(colwidth+cellstyle.leftPadding-cellstyle.rightPadding-w)/2.0
else:
raise ValueError('Invalid justification %s' % just)
y -= v.getSpaceBefore()
y -= h
v.drawOn(self.canv,x,y)
y -= v.getSpaceAfter()
else:
if just == 'LEFT':
draw = self.canv.drawString
x = colpos + cellstyle.leftPadding
elif just in ('CENTRE', 'CENTER'):
draw = self.canv.drawCentredString
x = colpos+(colwidth+cellstyle.leftPadding-cellstyle.rightPadding)*0.5
elif just == 'RIGHT':
draw = self.canv.drawRightString
x = colpos + colwidth - cellstyle.rightPadding
elif just == 'DECIMAL':
draw = self.canv.drawAlignedString
x = colpos + colwidth - cellstyle.rightPadding
else:
raise ValueError('Invalid justification %s' % just)
vals = string.split(str(cellval), "\n")
n = len(vals)
leading = cellstyle.leading
fontsize = cellstyle.fontsize
if valign=='BOTTOM':
y = rowpos + cellstyle.bottomPadding+n*leading-fontsize
elif valign=='TOP':
y = rowpos + rowheight - cellstyle.topPadding - fontsize
elif valign=='MIDDLE':
#tim roberts pointed out missing fontsize correction 2004-10-04
y = rowpos + (cellstyle.bottomPadding + rowheight-cellstyle.topPadding+n*leading)/2.0 - fontsize
else:
raise ValueError("Bad valign: '%s'" % str(valign))
for v in vals:
draw(x, y, v)
y -= leading
if cellstyle.href:
#external hyperlink
self.canv.linkURL(cellstyle.href, (colpos, rowpos, colpos + colwidth, rowpos + rowheight), relative=1)
if cellstyle.destination:
#external hyperlink
self.canv.linkRect("", cellstyle.destination, Rect=(colpos, rowpos, colpos + colwidth, rowpos + rowheight), relative=1)
_LineOpMap = { 'GRID':'_drawGrid',
'BOX':'_drawBox',
'OUTLINE':'_drawBox',
'INNERGRID':'_drawInnerGrid',
'LINEBELOW':'_drawHLinesB',
'LINEABOVE':'_drawHLines',
'LINEBEFORE':'_drawVLines',
'LINEAFTER':'_drawVLinesA', }
class LongTable(Table):
'''Henning von Bargen's changes will be active'''
_longTableOptimize = 1
LINECOMMANDS = _LineOpMap.keys()
def _isLineCommand(cmd):
return cmd[0] in LINECOMMANDS
def _setCellStyle(cellStyles, i, j, op, values):
#new = CellStyle('<%d, %d>' % (i,j), cellStyles[i][j])
#cellStyles[i][j] = new
## modify in place!!!
new = cellStyles[i][j]
if op == 'FONT':
n = len(values)
new.fontname = values[0]
if n>1:
new.fontsize = values[1]
if n>2:
new.leading = values[2]
else:
new.leading = new.fontsize*1.2
elif op in ('FONTNAME', 'FACE'):
new.fontname = values[0]
elif op in ('SIZE', 'FONTSIZE'):
new.fontsize = values[0]
elif op == 'LEADING':
new.leading = values[0]
elif op == 'TEXTCOLOR':
new.color = colors.toColor(values[0], colors.Color(0,0,0))
elif op in ('ALIGN', 'ALIGNMENT'):
new.alignment = values[0]
elif op == 'VALIGN':
new.valign = values[0]
elif op == 'LEFTPADDING':
new.leftPadding = values[0]
elif op == 'RIGHTPADDING':
new.rightPadding = values[0]
elif op == 'TOPPADDING':
new.topPadding = values[0]
elif op == 'BOTTOMPADDING':
new.bottomPadding = values[0]
elif op == 'HREF':
new.href = values[0]
elif op == 'DESTINATION':
new.destination = values[0]
GRID_STYLE = TableStyle(
[('GRID', (0,0), (-1,-1), 0.25, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
BOX_STYLE = TableStyle(
[('BOX', (0,0), (-1,-1), 0.50, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
LABELED_GRID_STYLE = TableStyle(
[('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 2, colors.black),
('LINEBELOW', (0,0), (-1,0), 2, colors.black),
('LINEAFTER', (0,0), (0,-1), 2, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
COLORED_GRID_STYLE = TableStyle(
[('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 2, colors.red),
('LINEBELOW', (0,0), (-1,0), 2, colors.black),
('LINEAFTER', (0,0), (0,-1), 2, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
LIST_STYLE = TableStyle(
[('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 2, colors.green),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
# experimental iterator which can apply a sequence
# of colors e.g. Blue, None, Blue, None as you move
# down.
if __name__ == '__main__':
from tests.test_platypus_tables import old_tables_test
old_tables_test()
| [
"[email protected]"
] | |
64cf0bf1871a6b84f9a729968ec396164bec5eff | 732b0b3e2ae0e6c498cfd2ed893de60b9fc22a32 | /tests/integration/actions/collections/test_welcome_interactive_noee.py | 5c0bbeddc6c0b39a11d26ac6b3341c0b5bf0a460 | [
"Apache-2.0"
] | permissive | didib/ansible-navigator | eb7b77c1df30b2e90b663383f0f76b6224e92c02 | 62fdbd05f25fb2d79133b3ab207f53ac2f2d6d36 | refs/heads/main | 2023-08-30T06:43:42.876079 | 2021-10-14T18:42:17 | 2021-10-14T18:42:17 | 425,540,819 | 0 | 0 | Apache-2.0 | 2021-11-07T15:27:54 | 2021-11-07T15:27:53 | null | UTF-8 | Python | false | false | 1,166 | py | """ collections from welcome interactive w/0 ee
"""
import pytest
from .base import BaseClass
CLI = "ansible-navigator --execution-environment false"
testdata = [
(0, CLI, "ansible-navigator welcome screen"),
(1, ":collections", "ansible-navigator collections top window"),
(2, ":0", "Browse testorg.coll_1 plugins window"),
(3, ":0", "lookup_1 plugin docs window"),
(4, ":back", "Back to browse testorg.coll_1 plugins window"),
(5, ":1", "mod_1 plugin docs window"),
(6, ":back", "Back to browse testorg.coll_1 plugins window"),
(7, ":back", "Back to ansible-navigator collections browse window"),
(8, ":1", "Browse testorg.coll_2 plugins window"),
(9, ":0", "lookup_2 plugin docs window"),
(10, ":back", "Back to browse testorg.coll_2 plugins window"),
(11, ":1", "mod_2 plugin docs window"),
(12, ":back", "Back to browse testorg.coll_2 plugins window"),
(13, ":back", "Back to ansible-navigator collections browse window"),
]
@pytest.mark.parametrize("index, user_input, comment", testdata)
class Test(BaseClass):
"""run the tests"""
TEST_FOR_MODE = "interactive"
UPDATE_FIXTURES = False
| [
"[email protected]"
] | |
6a9815bcd0c734fda3cd74d9658b0d8ab02503a6 | 14bca3c05f5d8de455c16ec19ac7782653da97b2 | /lib/kubernetes/client/models/v1_toleration.py | 770bb006050329795a673f8662ff0e3e39b446f0 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hovu96/splunk_as_a_service_app | 167f50012c8993879afbeb88a1f2ba962cdf12ea | 9da46cd4f45603c5c4f63ddce5b607fa25ca89de | refs/heads/master | 2020-06-19T08:35:21.103208 | 2020-06-16T19:07:00 | 2020-06-16T19:07:00 | 196,641,210 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,907 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1Toleration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'effect': 'str',
'key': 'str',
'operator': 'str',
'toleration_seconds': 'int',
'value': 'str'
}
attribute_map = {
'effect': 'effect',
'key': 'key',
'operator': 'operator',
'toleration_seconds': 'tolerationSeconds',
'value': 'value'
}
def __init__(self, effect=None, key=None, operator=None, toleration_seconds=None, value=None):
"""
V1Toleration - a model defined in Swagger
"""
self._effect = None
self._key = None
self._operator = None
self._toleration_seconds = None
self._value = None
self.discriminator = None
if effect is not None:
self.effect = effect
if key is not None:
self.key = key
if operator is not None:
self.operator = operator
if toleration_seconds is not None:
self.toleration_seconds = toleration_seconds
if value is not None:
self.value = value
@property
def effect(self):
"""
Gets the effect of this V1Toleration.
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
:return: The effect of this V1Toleration.
:rtype: str
"""
return self._effect
@effect.setter
def effect(self, effect):
"""
Sets the effect of this V1Toleration.
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
:param effect: The effect of this V1Toleration.
:type: str
"""
self._effect = effect
@property
def key(self):
"""
Gets the key of this V1Toleration.
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
:return: The key of this V1Toleration.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this V1Toleration.
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
:param key: The key of this V1Toleration.
:type: str
"""
self._key = key
@property
def operator(self):
"""
Gets the operator of this V1Toleration.
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
:return: The operator of this V1Toleration.
:rtype: str
"""
return self._operator
@operator.setter
def operator(self, operator):
"""
Sets the operator of this V1Toleration.
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
:param operator: The operator of this V1Toleration.
:type: str
"""
self._operator = operator
@property
def toleration_seconds(self):
"""
Gets the toleration_seconds of this V1Toleration.
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
:return: The toleration_seconds of this V1Toleration.
:rtype: int
"""
return self._toleration_seconds
@toleration_seconds.setter
def toleration_seconds(self, toleration_seconds):
"""
Sets the toleration_seconds of this V1Toleration.
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
:param toleration_seconds: The toleration_seconds of this V1Toleration.
:type: int
"""
self._toleration_seconds = toleration_seconds
@property
def value(self):
"""
Gets the value of this V1Toleration.
Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
:return: The value of this V1Toleration.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this V1Toleration.
Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
:param value: The value of this V1Toleration.
:type: str
"""
self._value = value
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1Toleration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
17040fde877a4f28ad58496fd1e547336763246d | 9bf7d7ace42a61991970fd967c19071a50609b9e | /ipython/MultiDot.py | d5d3b44b3bd700618f18adcdb674674c8abe775a | [] | no_license | ParsonsRD/SciPy-CookBook | 29b68eace76962ae00735039bc3d488f31714e50 | 52f70a7aa4bd4fd11217a13fc8dd5e277f2388ea | refs/heads/master | 2020-03-17T17:33:28.827269 | 2013-05-17T06:56:54 | 2013-05-17T06:56:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,663 | py | # <markdowncell>
# The matrix multiplication function, numpy.dot(), only takes two
# arguments. That means to multiply more than two arrays together you end
# up with nested function calls which are hard to read:
#
# <codecell>
dot(dot(dot(a,b),c),d)
# <markdowncell>
# versus infix notation where you'd just be able to write
#
# <codecell>
a*b*c*d
# <markdowncell>
# There are a couple of ways to define an 'mdot' function that acts like
# dot but accepts more than two arguments. Using one of these allows you
# to write the above expression as
#
# <codecell>
mdot(a,b,c,d)
# <markdowncell>
# Using reduce
# ------------
#
# The simplest way it to just use reduce.
#
# <codecell>
def mdot(*args):
return reduce(numpy.dot, args)
# <markdowncell>
# Or use the equivalent loop (which is apparently the preferred style [for
# Py3K](http://www.python.org/dev/peps/pep-3100/#id53)):
#
# <codecell>
def mdot(*args):
ret = args[0]
for a in args[1:]:
ret = dot(ret,a)
return ret
# <markdowncell>
# This will always give you left to right associativity, i.e. the
# expression is interpreted as \`(((a\*b)\*c)\*d)\`.
#
# You also can make a right-associative version of the loop:
#
# <codecell>
def mdotr(*args):
ret = args[-1]
for a in reversed(args[:-1]):
ret = dot(a,ret)
return ret
# <markdowncell>
# which evaluates as \`(a\*(b\*(c\*d)))\`. But sometimes you'd like to
# have finer control since the order in which matrix multiplies are
# performed can have a big impact on performance. The next version gives
# that control.
#
# Controlling order of evaluation
# -------------------------------
#
# If we're willing to sacrifice Numpy's ability to treat tuples as arrays,
# we can use tuples as grouping constructs. This version of \`mdot\`
# allows syntax like this:
#
# <codecell>
mdot(a,((b,c),d))
# <markdowncell>
# to control the order in which the pairwise \`dot\` calls are made.
#
# <codecell>
import types
import numpy
def mdot(*args):
"""Multiply all the arguments using matrix product rules.
The output is equivalent to multiplying the arguments one by one
from left to right using dot().
Precedence can be controlled by creating tuples of arguments,
for instance mdot(a,((b,c),d)) multiplies a (a*((b*c)*d)).
Note that this means the output of dot(a,b) and mdot(a,b) will differ if
a or b is a pure tuple of numbers.
"""
if len(args)==1:
return args[0]
elif len(args)==2:
return _mdot_r(args[0],args[1])
else:
return _mdot_r(args[:-1],args[-1])
def _mdot_r(a,b):
"""Recursive helper for mdot"""
if type(a)==types.TupleType:
if len(a)>1:
a = mdot(*a)
else:
a = a[0]
if type(b)==types.TupleType:
if len(b)>1:
b = mdot(*b)
else:
b = b[0]
return numpy.dot(a,b)
# <markdowncell>
# Multiply
# --------
#
# Note that the elementwise multiplication function \`numpy.multiply\` has
# the same two-argument limitation as \`numpy.dot\`. The exact same
# generalized forms can be defined for multiply.
#
# Left associative versions:
#
# <codecell>
def mmultiply(*args):
return reduce(numpy.multiply, args)
# <markdowncell>
#
#
# <codecell>
def mmultiply(*args):
ret = args[0]
for a in args[1:]:
ret = multiply(ret,a)
return ret
# <markdowncell>
# Right-associative version:
#
# <codecell>
def mmultiplyr(*args):
ret = args[-1]
for a in reversed(args[:-1]):
ret = multiply(a,ret)
return ret
# <markdowncell>
# Version using tuples to control order of evaluation:
#
# <codecell>
import types
import numpy
def mmultiply(*args):
"""Multiply all the arguments using elementwise product.
The output is equivalent to multiplying the arguments one by one
from left to right using multiply().
Precedence can be controlled by creating tuples of arguments,
for instance mmultiply(a,((b,c),d)) multiplies a (a*((b*c)*d)).
Note that this means the output of multiply(a,b) and mmultiply(a,b) will differ if
a or b is a pure tuple of numbers.
"""
if len(args)==1:
return args[0]
elif len(args)==2:
return _mmultiply_r(args[0],args[1])
else:
return _mmultiply_r(args[:-1],args[-1])
def _mmultiply_r(a,b):
"""Recursive helper for mmultiply"""
if type(a)==types.TupleType:
if len(a)>1:
a = mmultiply(*a)
else:
a = a[0]
if type(b)==types.TupleType:
if len(b)>1:
b = mmultiply(*b)
else:
b = b[0]
return numpy.multiply(a,b)
# <markdowncell>
#
# | [
"[email protected]"
] | |
38192ff80015c3eaf94f38a786aaa3a32e84f80e | 06bf7c9f24b2a9cfe1f9feb1481838b37713904e | /scripts/ansible-vagrant-inventory.py | b88e0db54e121ddfbc708d93aa319328c75d5ce2 | [
"MIT"
] | permissive | marcelodevops/dotfiles | 31efb4d62b7e5161fba40c204612d01e44cc35bd | eeb63ea94f27aef0eef8777bd8a423ae81133217 | refs/heads/master | 2022-03-08T20:37:08.749623 | 2019-10-22T20:14:45 | 2019-10-22T20:14:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,010 | py | #!/usr/bin/env python
from __future__ import print_function
import argparse
import json
import os
import os.path
import subprocess
import sys
def exit_err(msg):
print(msg, file=sys.stderr)
sys.exit(1)
def get_vagrant_index():
path = os.path.expanduser('~/.vagrant.d/data/machine-index/index')
with open(path) as filehandle:
return json.load(filehandle)
def find_relevant_machines(index_data, root_dir):
for machine in index_data['machines'].values():
if machine['state'] == 'running' and machine['vagrantfile_path'] == root_dir:
yield machine
def get_vagrant_file(machine, filename):
return os.path.join(
machine['local_data_path'], 'machines', machine['name'],
machine['provider'], filename,
)
def get_vagrant_privkey(machine):
return get_vagrant_file(machine, 'private_key')
def get_machine_ssh_info(machine):
# this works if the virtualbox machine has guest additions installed
vbox_id_path = get_vagrant_file(machine, 'id')
with open(vbox_id_path) as filehandle:
vbox_id = filehandle.read().decode()
vbox_out = subprocess.check_output([
'vboxmanage', 'guestproperty', 'get', vbox_id,
'/VirtualBox/GuestInfo/Net/1/V4/IP',
]).strip()
if vbox_out != 'No value set!':
return vbox_out.split()[1], None
# fall back to the forwarded port that vagrant uses
ssh_conf = subprocess.check_output(['vagrant', 'ssh-config', machine['name']])
ssh_conf_lines = (line.split(None, 1) for line in ssh_conf.splitlines() if line)
ssh_config_dict = {key.lower(): val for key, val in ssh_conf_lines}
return ssh_config_dict['hostname'], ssh_config_dict['port']
def get_machine_group_data(machine, ansible_vars=None):
ansible_vars = ansible_vars or {}
ansible_vars['ansible_ssh_private_key_file'] = get_vagrant_privkey(machine)
ip, port = get_machine_ssh_info(machine)
# TODO: change ansible_ssh_ to ansible_ when upgrading to ansible 2
ansible_vars['ansible_ssh_host'] = ip
if port:
ansible_vars['ansible_ssh_port'] = port
return {
'hosts': [ip],
'vars': ansible_vars,
}
def get_inventory_data(root_dir):
ssh_args = '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
data = {
'all': {
'vars': {
'ansible_user': 'root',
'ansible_ssh_common_args': ssh_args,
},
},
'vagrant': {'children': []},
}
index_data = get_vagrant_index()
for machine in find_relevant_machines(index_data, root_dir):
data[machine['name']] = get_machine_group_data(machine)
data['vagrant']['children'].append(machine['name'])
return data
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--root-dir', default=os.getcwd())
args = parser.parse_args()
data = get_inventory_data(args.root_dir)
print(json.dumps(data))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e8b58c05a571106b4b6e583e9fb783bceebe4a72 | 236e6a7c4604443f0f4acd778bcccd747011080e | /ax/storage/sqa_store/sqa_config.py | 4c184fcadc002f2ad2f11fec983230902d70b085 | [
"MIT"
] | permissive | MayukhBagchiTrento/Ax | a161e1fee615c4e570de51b32f9e656063dc228d | 7c925ba8365af714d9671208de490ba48814bfaa | refs/heads/master | 2023-05-09T20:14:14.525338 | 2021-06-02T18:46:51 | 2021-06-02T18:46:51 | 268,482,239 | 0 | 0 | MIT | 2020-06-01T09:43:39 | 2020-06-01T09:43:38 | null | UTF-8 | Python | false | false | 2,537 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Dict, NamedTuple, Optional, Type
from ax.core.arm import Arm
from ax.core.base import Base
from ax.core.batch_trial import AbandonedArm
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.generator_run import GeneratorRun, GeneratorRunType
from ax.core.metric import Metric
from ax.core.parameter import Parameter
from ax.core.parameter_constraint import ParameterConstraint
from ax.core.runner import Runner
from ax.core.trial import Trial
from ax.modelbridge.generation_strategy import GenerationStrategy
from ax.storage.sqa_store.db import SQABase
from ax.storage.sqa_store.sqa_classes import (
SQAAbandonedArm,
SQAArm,
SQAData,
SQAExperiment,
SQAGenerationStrategy,
SQAGeneratorRun,
SQAMetric,
SQAParameter,
SQAParameterConstraint,
SQARunner,
SQATrial,
)
# pyre-fixme[9]: class_to_sqa_class has type `Dict[Type[Base], Type[SQABase]]`; used
# as `Dict[Type[Union[AbandonedArm, Arm, Data, Experiment, GenerationStrategy,
# GeneratorRun, Metric, Parameter, ParameterConstraint, Runner, Trial]],
# Type[Union[SQAAbandonedArm, SQAArm, SQAData, SQAExperiment, SQAGenerationStrategy,
# SQAGeneratorRun, SQAMetric, SQAParameter, SQAParameterConstraint, SQARunner,
# SQATrial]]]`.
class SQAConfig(NamedTuple):
"""Metadata needed to save and load an experiment to SQLAlchemy.
Attributes:
class_to_sqa_class: Mapping of user-facing class to SQLAlchemy class
that it will be encoded to. This allows overwriting of the default
classes to provide custom save functionality.
experiment_type_enum: Enum containing valid Experiment types.
generator_run_type_enum: Enum containing valid Generator Run types.
"""
class_to_sqa_class: Dict[Type[Base], Type[SQABase]] = {
AbandonedArm: SQAAbandonedArm,
Arm: SQAArm,
Data: SQAData,
Experiment: SQAExperiment,
GenerationStrategy: SQAGenerationStrategy,
GeneratorRun: SQAGeneratorRun,
Parameter: SQAParameter,
ParameterConstraint: SQAParameterConstraint,
Metric: SQAMetric,
Runner: SQARunner,
Trial: SQATrial,
}
experiment_type_enum: Optional[Enum] = None
generator_run_type_enum: Optional[Enum] = GeneratorRunType
| [
"[email protected]"
] | |
d6ab292e0caa5483ef89895c47a5e5aa8ee679c7 | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/billing_setup_service/transports/base.py | b8a572cac8815e85d9d4868a0e0d004ba5c7b66a | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,161 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v6.resources.types import billing_setup
from google.ads.googleads.v6.services.types import billing_setup_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads-googleads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class BillingSetupServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for BillingSetupService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_billing_setup: gapic_v1.method.wrap_method(
self.get_billing_setup,
default_timeout=None,
client_info=client_info,
),
self.mutate_billing_setup: gapic_v1.method.wrap_method(
self.mutate_billing_setup,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_billing_setup(self) -> typing.Callable[
[billing_setup_service.GetBillingSetupRequest],
billing_setup.BillingSetup]:
raise NotImplementedError
@property
def mutate_billing_setup(self) -> typing.Callable[
[billing_setup_service.MutateBillingSetupRequest],
billing_setup_service.MutateBillingSetupResponse]:
raise NotImplementedError
__all__ = (
'BillingSetupServiceTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
9a5e9d041ce5d5aec6a271c32fdf6721bfcdf335 | 0d9445f17a7175c3872c6ffb9280c3269b8a1eb9 | /test/unfolding/test_unfolding.py | 06ece9628fe17d60ef34aa60e688ece98132da0e | [] | permissive | neukirchen-212/phonopy | 8aa4fc9f63cb124acaa6f7ab052275a423c000cb | e34588dcb32fb15aa2a6604ffd3e62ebb0927c0f | refs/heads/develop | 2023-08-24T00:47:35.908407 | 2021-10-16T12:02:27 | 2021-10-16T12:02:27 | 328,015,607 | 0 | 0 | BSD-3-Clause | 2021-01-11T12:53:48 | 2021-01-08T21:24:17 | null | UTF-8 | Python | false | false | 4,753 | py | import numpy as np
from phonopy import Phonopy
from phonopy.unfolding import Unfolding
# from phonopy.interface.vasp import write_vasp
import os
data_dir = os.path.dirname(os.path.abspath(__file__))
def test_Unfolding_NaCl(ph_nacl):
"""Test to reproduce proper band structure of primitive cell
Results are written to "bin-unfolding-test.dat".
This data can be plotted by
% plot_band.py bin-unfolding-test.dat
Increase nd to get better plot.
The test is done with nd=10.
"""
# ph = _get_phonon(ph_nacl)
ph = ph_nacl
nd = 10
qpoints = np.array([[x, ] * 3 for x in range(nd)]) / float(nd) - 0.5
unfolding_supercell_matrix = [[-2, 2, 2],
[2, -2, 2],
[2, 2, -2]]
mapping = np.arange(len(ph.supercell), dtype=int)
unfolding = Unfolding(ph,
unfolding_supercell_matrix,
ph.supercell.scaled_positions,
mapping,
qpoints)
unfolding.run()
weights = _get_weights(unfolding, qpoints)
# _write_weights(weights, "unfolding.dat")
filename_out = os.path.join(data_dir, "bin-unfolding-test.dat")
_compare(weights,
os.path.join(data_dir, "bin-unfolding.dat"),
filename_out=None)
def test_Unfolding_SC(ph_nacl):
"""Test to reproduce unfoled band structure
Atomic positions are considered as the lattice ponts.
Results are written to "bin-unfolding_to_atoms-test.dat".
This data can be plotted by
% plot_band.py bin-unfolding_to_atoms-test.dat
Increase nd to get better plot.
The test is done with nd=10.
"""
# ph = _get_phonon(ph_nacl)
ph = ph_nacl
nd = 10
qpoints = np.array([[x, ] * 3 for x in range(nd)]) / float(nd) - 0.5
unfolding_supercell_matrix = np.diag([4, 4, 4])
mapping = np.arange(len(ph.supercell), dtype=int)
unfolding = Unfolding(ph,
unfolding_supercell_matrix,
ph.supercell.scaled_positions,
mapping,
qpoints)
unfolding.run()
weights = _get_weights(unfolding, qpoints)
# _write_weights(weights, "unfolding_to_atoms.dat")
filename_out = os.path.join(data_dir, "bin-unfolding_to_atoms-test.dat")
_compare(weights,
os.path.join(data_dir, "bin-unfolding_to_atoms.dat"),
filename_out=None)
def _compare(weights, filename, filename_out=None):
bin_data = _binning(weights)
if filename_out:
_write_bin_data(bin_data, filename_out)
with open(filename) as f:
bin_data_in_file = np.loadtxt(f)
np.testing.assert_allclose(bin_data, bin_data_in_file, atol=1e-2)
def _get_weights(unfolding, qpoints):
weights = unfolding.unfolding_weights
freqs = unfolding.frequencies
out_vals = []
for i, q in enumerate(qpoints):
for f, w in zip(freqs[i], weights[i]):
out_vals.append([q[0], q[1], q[2], f, w])
return out_vals
def _write_weights(weights, filename):
with open(filename, 'w') as w:
lines = ["%10.7f %10.7f %10.7f %12.7f %10.7f" % tuple(x)
for x in weights]
w.write("\n".join(lines))
def _write_bin_data(bin_data, filename):
with open(filename, 'w') as w:
lines = ["%8.5f %8.5f %8.5f" % tuple(v) for v in bin_data]
w.write("\n".join(lines))
def _binning(data):
x = []
y = []
w = []
for vals in data:
if vals[4] > 1e-3:
x.append(vals[0])
y.append(vals[3])
w.append(vals[4])
x = np.around(x, decimals=5)
y = np.around(y, decimals=5)
w = np.array(w)
points = {}
for e_x, e_y, e_z in zip(x, y, w):
if (e_x, e_y) in points:
points[(e_x, e_y)] += e_z
else:
points[(e_x, e_y)] = e_z
x = []
y = []
w = []
for key in points:
x.append(key[0])
y.append(key[1])
w.append(points[key])
data = np.transpose([x, y, w])
data = sorted(data, key=lambda data: data[1])
data = sorted(data, key=lambda data: data[0])
return np.array(data)
def _get_phonon(ph_in):
ph = Phonopy(ph_in.supercell, supercell_matrix=[1, 1, 1])
ph.force_constants = ph_in.force_constants
born_elems = {s: ph_in.nac_params['born'][i]
for i, s in enumerate(ph_in.primitive.symbols)}
born = [born_elems[s] for s in ph_in.supercell.symbols]
epsilon = ph_in.nac_params['dielectric']
factors = ph_in.nac_params['factor']
ph.nac_params = {'born': born,
'factor': factors,
'dielectric': epsilon}
return ph
| [
"[email protected]"
] | |
c16fe6d08af84f79148a92df9ca28ec80d1fbd1b | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/planner/leaf.py | a26a48f95065c8ded94a02ebbeb6e1facd5221d6 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,862 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Leaf(Mo):
"""
Optimizer deployment leaf
"""
meta = ClassMeta("cobra.model.planner.Leaf")
meta.moClassName = "plannerLeaf"
meta.rnFormat = "leaf-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "Switch node"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.planner.Resource")
meta.childClasses.add("cobra.model.planner.RsDeployedFex")
meta.childClasses.add("cobra.model.planner.RsDeployedObject")
meta.childClasses.add("cobra.model.planner.RsNodeLabels")
meta.childClasses.add("cobra.model.planner.Violation")
meta.childNamesAndRnPrefix.append(("cobra.model.planner.RsDeployedObject", "rsdeployedObject-"))
meta.childNamesAndRnPrefix.append(("cobra.model.planner.RsDeployedFex", "rsdeployedFex-"))
meta.childNamesAndRnPrefix.append(("cobra.model.planner.RsNodeLabels", "rsnodeLabels-"))
meta.childNamesAndRnPrefix.append(("cobra.model.planner.Violation", "violation-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.planner.Resource", "res-"))
meta.parentClasses.add("cobra.model.planner.Deployment")
meta.superClasses.add("cobra.model.planner.Node")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.rnPrefixes = [
('leaf-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "isExisting", "isExisting", 28926, PropCategory.REGULAR)
prop.label = "Indicates if Node is from Existing topology or Template"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("isExisting", prop)
prop = PropMeta("str", "label", "label", 21222, PropCategory.REGULAR)
prop.label = "Label"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("label", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "maxPctUse", "maxPctUse", 21967, PropCategory.REGULAR)
prop.label = "Max percent usage across all the resources"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 100)]
prop.defaultValue = 255
prop.defaultValueStr = "none"
prop._addConstant("none", "not-applicable", 255)
meta.props.add("maxPctUse", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "model", "model", 21223, PropCategory.REGULAR)
prop.label = "Model"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "N9K-C9372PX"
prop._addConstant("N9K-C93108TC-EX", "n9k-c93108tc-ex", 14)
prop._addConstant("N9K-C93128TX:N9K-M12PQ", "n9k-c93128tx:n9k-m12pq", 6)
prop._addConstant("N9K-C93128TX:N9K-M6PQ", "n9k-c93128tx:n9k-m6pq", 7)
prop._addConstant("N9K-C93128TX:N9K-M6PQ-E", "n9k-c93128tx:n9k-m6pq-e", 8)
prop._addConstant("N9K-C93180YC-EX", "n9k-c93180yc-ex", 13)
prop._addConstant("N9K-C9332PQ", "n9k-c9332pq", 2)
prop._addConstant("N9K-C9372PX", "n9k-c9372px", 0)
prop._addConstant("N9K-C9372PX-E", "n9k-c9372px-e", 12)
prop._addConstant("N9K-C9372TX", "n9k-c9372tx", 1)
prop._addConstant("N9K-C9396PX:N9K-M12PQ", "n9k-c9396px:n9k-m12pq", 3)
prop._addConstant("N9K-C9396PX:N9K-M6PQ", "n9k-c9396px:n9k-m6pq", 4)
prop._addConstant("N9K-C9396PX:N9K-M6PQ-E", "n9k-c9396px:n9k-m6pq-e", 5)
prop._addConstant("N9K-C9396TX:N9K-M12PQ", "n9k-c9396tx:n9k-m12pq", 9)
prop._addConstant("N9K-C9396TX:N9K-M6PQ", "n9k-c9396tx:n9k-m6pq", 10)
prop._addConstant("N9K-C9396TX:N9K-M6PQ-E", "n9k-c9396tx:n9k-m6pq-e", 11)
meta.props.add("model", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 21244, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 21224, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 16)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "name"))
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
d4df2caa6ed25adc9924e59b16ebf3b4dc71c06b | 64cd09628f599fe18bf38528309349f7ac0df71e | /ML.Autoencoder/Reference/1_Autoencoder.py | 59899e4665c25aec2652d6535c1f0822d0364e62 | [] | no_license | JunyoungJang/Python | 958c057b2fd37c03876d3cf566ee27ee637bb020 | 76d4cd441deff8061e10608e0848360bc4f34490 | refs/heads/master | 2021-01-19T21:54:42.208469 | 2020-02-14T09:54:17 | 2020-02-14T09:54:17 | 83,768,220 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,862 | py | # Gradient-Based Learning Applied to Document Recognition [Y LeCun 1998] http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf
# Greedy Layer-Wise Training of Deep Networks [Bengio 07] http://papers.nips.cc/paper/3048-greedy-layer-wise-training-of-deep-networks.pdf/
# Extracting and Composing Robust Features with Denoising Autoencoders [Vincent 08] http://www.iro.umontreal.ca/~vincentp/Publications/denoising_autoencoders_tr1316.pdf
# Introduction Auto-Encoder ([email protected]) https://wikidocs.net/3413
# Autoencoders http://ufldl.stanford.edu/tutorial/unsupervised/Autoencoders/
# Autoencoder vs RBM (+ vs CNN) # http://khanrc.tistory.com/entry/Autoencoder-vs-RBM-vs-CNN
import tensorflow as tf, numpy as np, matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
mnist = input_data.read_data_sets("/Users/sungchul/Dropbox/Data/MNIST/", one_hot=True)
learning_rate = 0.01
training_epochs = 100
batch_size = 256
display_step = 1
examples_to_show = 10
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([n_input])),
}
def encoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2']))
return layer_2
def decoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))
return layer_2
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
y_true = X
# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
tf.global_variables_initializer().run()
total_batch = int(mnist.train.num_examples/batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(c))
print("Optimization Finished!")
# Applying encode and decode over test set
encode_decode = sess.run(
y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(examples_to_show):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
f.show()
plt.draw()
plt.waitforbuttonpress() | [
"[email protected]"
] | |
a1088c8dfa47439b1217fe6d2ebf2519ad1696a3 | ae6189642a07fd789f51caadb924328a54919cac | /100-problems/review/dinamic-programming/37-coin-2.py | f4d3151f226422f8a88fdfd7ba7a84605042607d | [] | no_license | d-matsui/atcorder | 201e32403653b2fdf0d42188faf095eb8b793b86 | 22ec1af8206827e10a986cb24cf12acc52ab1d6a | refs/heads/master | 2020-09-27T23:15:27.281877 | 2020-09-01T13:24:34 | 2020-09-01T13:24:34 | 226,632,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | #!/usr/bin/env python3
from pprint import pprint
import sys
sys.setrecursionlimit(10 ** 6)
input = sys.stdin.buffer.readline
INF = float('inf')
n, m = map(int, input().split())
coins = list(map(int, input().split()))
# dp[i] := ちょうどi円支払うときのコインの最小枚数
dp = [INF] * (n + 1)
dp[0] = 0
for i in range(n + 1):
for c in coins:
if i - c < 0:
continue
dp[i] = min(dp[i], dp[i-c] + 1)
print(dp[n])
| [
"[email protected]"
] | |
c8d1d2b5066d64780127495106322b5db11767f8 | 8b9bf5891152762a0dbaef2f01ba8a302d3213fa | /git_blankfile.py | 9e977d1f7d84a0fba2c33fa1f8a736a35c14897b | [] | no_license | WRGrantham/github_practice | bd79ec1d3bfbefe50e00686f63bb247529293370 | a4a1e06b1cbe7ef2667934584293d3a401382fb2 | refs/heads/master | 2020-03-22T06:49:39.473734 | 2018-07-04T03:15:24 | 2018-07-04T03:15:24 | 139,660,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | def bitch_please(string):
print ("bitch, PLEASE!")
bitch_please(poop_butt)
poop_butt = "Mr Poopy Butthole, we've got changes"
| [
"[email protected]"
] | |
529fdedd635d45409b375681a83e45cf48eb5ce7 | 2f24144874d82759f616fdff09e1e9ab196aeac1 | /project_config/wsgi.py | 557e1a8ecbd2d5538cd768f08e09bc678aeb245f | [] | no_license | mfonism/aso_models | 4f0985d9d8761d84bc591d0c8f1f6f97a1d96312 | f677a9416b5ffda940c8e79407968556a83644ad | refs/heads/main | 2021-09-27T02:27:05.746713 | 2020-08-26T23:38:24 | 2020-08-26T23:38:24 | 214,043,643 | 4 | 3 | null | 2021-09-22T18:01:14 | 2019-10-09T23:29:55 | Python | UTF-8 | Python | false | false | 405 | py | """
WSGI config for project_config project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project_config.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
70b8250d91c0fe9c9201349f493e908f51d62f94 | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/e2e/jit/test_MaxPool3D_11.py | 0d68f5ac5631e67e9f271a8346573b1b1fc46ec8 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 620 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_MaxPool3D_11():
"""test MaxPool3D_11"""
jit_case = JitTrans(case=yml.get_case_info("MaxPool3D_11"))
jit_case.jit_run()
| [
"[email protected]"
] | |
eee8b28412164b068298414bbbfdd00a7682dde6 | d190750d6cb34e9d86ae96724cf4b56a2f57a74a | /tests/r/test_gilgais.py | cf929d3c6685b3eb562f0d35660052a163cdc03a | [
"Apache-2.0"
] | permissive | ROAD2018/observations | a119f61a48213d791de0620804adb8d21c2ad9fb | 2c8b1ac31025938cb17762e540f2f592e302d5de | refs/heads/master | 2021-09-24T04:28:02.725245 | 2018-09-16T23:06:30 | 2018-09-16T23:06:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.gilgais import gilgais
def test_gilgais():
"""Test module gilgais.py by downloading
gilgais.csv and testing shape of
extracted data has 365 rows and 9 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = gilgais(test_path)
try:
assert x_train.shape == (365, 9)
except:
shutil.rmtree(test_path)
raise()
| [
"[email protected]"
] | |
0be288da85863450afa7a166af6d2304e7aa4300 | 6fb37fee016346120d4c14c4343516532304055a | /src/genie/libs/parser/iosxe/tests/test_show_ip.py | 87faf2deeb76a8bc95aeaaf838eda94ca386a3b5 | [
"Apache-2.0"
] | permissive | devbollinger/genieparser | 011526ebbd747c6dcd767535ce4bd33167e15536 | ad5ce7ba8f5153d1aeb9cffcfc4dde0871f3401c | refs/heads/master | 2020-12-20T11:36:00.750128 | 2020-01-24T18:45:40 | 2020-01-24T18:45:40 | 236,061,155 | 0 | 0 | Apache-2.0 | 2020-01-24T18:38:43 | 2020-01-24T18:38:42 | null | UTF-8 | Python | false | false | 6,901 | py | # Python
import unittest
from unittest.mock import Mock
# ATS
from ats.topology import Device
from ats.topology import loader
# Metaparser
from genie.metaparser.util.exceptions import SchemaEmptyParserError, SchemaMissingKeyError
# iosxe show_lisp
# from genie.libs.parser.iosxe.show_lisp import ShowLispSession
# iosxe show_ip_parser
from genie.libs.parser.iosxe.show_ip import ShowIPAlias, \
ShowIPAliasDefaultVrf
# =================================
# Unit test for 'show ip aliases', 'show ip aliases default-vrf', 'show ip aliases vrf {vrf}'
# =================================
class test_show_ip_alias(unittest.TestCase):
'''
Unit test for:
show ip aliases
show ip aliases default-vrf
show ip aliases vrf {vrf}
'''
device = Device(name = 'aDevice')
empty_output = { 'execute.return_value' : '' }
# show ip aliases
golden_parsed_output1 = {
'vrf': {
'default': {
'index': {
1: {
'address_type': 'Interface',
'ip_address': '10.169.197.94',
},
2: {
'address_type': 'Interface',
'ip_address': '10.169.197.254',
},
3: {
'address_type': 'Interface',
'ip_address': '172.16.1.56',
},
4: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
5: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
6: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
7: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
8: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
9: {
'address_type': 'Interface',
'ip_address': '192.168.144.254',
},
},
},
},
}
golden_output1 = { 'execute.return_value':
'''
show ip aliases
Address Type IP Address Port
Interface 10.169.197.94
Interface 10.169.197.254
Interface 172.16.1.56
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.144.254
'''
}
# show ip aliases default-vrf
golden_parsed_output2 = {
'vrf': {
'default': {
'index': {
1: {
'address_type': 'Interface',
'ip_address': '10.169.197.94',
},
2: {
'address_type': 'Interface',
'ip_address': '10.169.197.254',
},
3: {
'address_type': 'Interface',
'ip_address': '172.16.1.56',
},
4: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
5: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
6: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
7: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
8: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
9: {
'address_type': 'Interface',
'ip_address': '192.168.144.254',
},
},
},
},
}
golden_output2 = { 'execute.return_value':
'''
show ip aliases default-vrf
Address Type IP Address Port
Interface 10.169.197.94
Interface 10.169.197.254
Interface 172.16.1.56
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.10.254
Interface 192.168.144.254
'''
}
# show ip aliases vrf {vrf}
golden_parsed_output3 = {
'vrf': {
'L3VPN-1538': {
'index': {
1: {
'address_type': 'Interface',
'ip_address': '192.168.10.254',
},
},
},
},
}
golden_output3 = { 'execute.return_value':
'''
show ip aliases vrf L3VPN-1538
Address Type IP Address Port
Interface 192.168.10.254
'''
}
def test_empty(self):
self.maxDiff = None
self.device = Mock(**self.empty_output)
obj = ShowIPAlias(device = self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden1(self):
self.maxDiff = None
self.device = Mock(**self.golden_output1)
obj = ShowIPAlias(device = self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output1)
def test_golden2(self):
self.maxDiff = None
self.device = Mock(**self.golden_output2)
obj = ShowIPAliasDefaultVrf(device = self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output2)
def test_golden3(self):
self.maxDiff = None
self.device = Mock(**self.golden_output3)
obj = ShowIPAlias(device = self.device)
parsed_output = obj.parse(vrf = 'L3VPN-1538')
self.assertEqual(parsed_output, self.golden_parsed_output3)
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
853b5efb4360f589ed00f617b2c4e5f5ad8dc3c6 | c49849e8c0234ab60d4c5c17233b84ae63932074 | /src/decision-trees.py | d3e57413bfcf73273372e63481c02ec9d78856e9 | [] | no_license | AlexisDrch/Machine-Learning | 97a79c15087765ac97b8693c39f42807255d2a22 | f60cf4147f38a900dd606bb1c07e986a6c72d262 | refs/heads/master | 2021-04-06T09:11:48.926794 | 2018-03-12T09:02:32 | 2018-03-12T09:02:32 | 124,861,464 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | # coding: utf-8
from utils import *
# ### 1. Decision Trees
# In[18]:
clf_dt = tree.DecisionTreeClassifier(criterion='gini') # explain use of gini and information gain
# #### 1. Balancing wine dataset
# In[19]:
cv = StratifiedKFold(n_splits=10)
title = 'Learning Curve - Decision Tree - wine imbalanced'
plt, score = plot_learning_curve(clf_dt, title, X2, y2, ylim=(0.3, 1.01), cv=cv, n_jobs=4)
plt.savefig('./output/dtree-wine-imbalanced.png')
plt.show()
title = 'Learning Curve - Decision Tree - wine balanced'
plt,score= plot_learning_curve(clf_dt, title, X2_up, y2_up, ylim=(0.3, 1.01), cv=cv, n_jobs=4)
plt.savefig('./output/dtree-wine-balanced.png')
plt.show()
# In[20]:
# on Pima
title = 'Learning Curve - Decision Tree - pima '
plt,score = plot_learning_curve(clf_dt, title, X1, y1, ylim=(0.3, 1.01), cv=cv, n_jobs=4)
plt.savefig('./output/dtree-pima.png')
plt.show()
# #### 2. Parameters tuning.
# In[42]:
# for pima
max_d = 30
title = " Validation Curve - max_depth - pima "
xlabel = "max_depth"
ylabel = "Score"
clf_dt.fit(X1, y1)
valid_curve_dt_pima, pima_dt_score, best_param = plot_validation_curve(clf_dt, title, xlabel, ylabel,X1, y1, param_name = 'max_depth', ylim=None,
cv = cv, n_jobs = 1, param_range = np.arange(1, max_d))
valid_curve_dt_pima.savefig('./output/valid_curve_dt_pima.png')
valid_curve_dt_pima.show()
print("Best score for pima is " + str(pima_dt_score) + ", max_depth = " + str(best_param))
# for wine
title = " Validation Curve - max_depth - wine "
clf_dt.fit(X2_up, y2_up)
valid_curve_dt_wine, wine_dt_score, best_param = plot_validation_curve(clf_dt, title, xlabel, ylabel,X2_up, y2_up, param_name = 'max_depth', ylim=None,
cv = cv, n_jobs = 1, param_range = np.arange(1, max_d))
valid_curve_dt_wine.savefig('./output/valid_curve_dt_wine.png')
valid_curve_dt_wine.show()
print("Best score for wine is " + str(wine_dt_score) + ", max_depth = " + str(best_param))
| [
"[email protected]"
] | |
b73af01c3a4ae24080e795dfdfa6fc5f0dded805 | 9e1bda53da4c5e98190f5f25235f528d692ee5a8 | /.history/my_app/forms_20210405180446.py | 9fd1f9a4b32ae169cb9e3865f612d88a43ae36d9 | [] | no_license | Jumayev-A/Project-3 | 3d373181af6a87e3fe319a13d28fcd18941167b7 | 34ddd009726cbba9ae52e74a46d554fd735566e2 | refs/heads/main | 2023-06-10T11:02:06.446151 | 2021-07-07T06:19:11 | 2021-07-07T06:19:11 | 350,375,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from django import forms
from my_app.models import BlogModel
class BlogForm(forms.ModelForm):
class Meta:
model = BlogModel
fields = ['title','description'] | [
"[email protected]"
] | |
8f8f52e2e5ddc47176d85bf7b051d523a6670890 | 5dd03f9bd8886f02315c254eb2569e4b6d368849 | /tests/python/twitter/common/string/test_basic_scanf.py | 00b280c41ad6bed4daf5a43c6fffc5bfa45b65b2 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | adamsxu/commons | 9e1bff8be131f5b802d3aadc9916d5f3a760166c | 9fd5a4ab142295692994b012a2a2ef3935d35c0b | refs/heads/master | 2021-01-17T23:13:51.478337 | 2012-03-11T17:30:24 | 2012-03-11T17:30:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,569 | py | import pytest
import unittest
from twitter.common.string.scanf import ScanfParser
def almost_equal(a, b, digits=7):
return abs(a-b) < 0.1**digits
def basic_scanf(fmt, string, extra=False):
formatter = ScanfParser(fmt)
result = formatter.parse(string, extra)
assert len(result.ungrouped()) == 1
return result.ungrouped()[0]
def test_bad_input():
conversions = ScanfParser.CONVERSIONS.keys()
bad_stuff = [
" a", " 1", " +",
"a ", "1 ", "+ ",
]
garbage_stuff = [
0, 1, None, dir, [], {}, (), type
]
for c in conversions:
for b in bad_stuff:
with pytest.raises(ScanfParser.ParseError):
basic_scanf(c, b)
for b in garbage_stuff:
with pytest.raises(TypeError):
basic_scanf(c, b)
def test_no_matches():
match = ScanfParser("%%").parse("%")
assert len(match.groups()) == 0
assert len(match.ungrouped()) == 0
test_strings = ["a", " ", "hello hello", "1.0 hello nothing to see here move along", ""]
for t_s in test_strings:
match = ScanfParser(t_s).parse(t_s)
assert len(match.groups()) == 0
assert len(match.ungrouped()) == 0
def test_garbage_formats():
garbage_input = [0, 1, None, dir, [], {}, (), type]
for garbage in garbage_input:
with pytest.raises(TypeError):
ScanfParser(garbage)
def test_special_characters():
special_stuffs = [
(')', '('),
('(', ')'), ('[', ']'), ('{', '}'),
('(', ')+'),
('(|', ')'),
('{,', '}'),
('$', '^'), ('^', '$'),
(' ', '+'), (' ', '*'), (' ', '?')
]
for before, after in special_stuffs:
assert basic_scanf(before+'%c'+after, before+'a'+after) == 'a'
assert basic_scanf(before+'%c'+after, before+u'a'+after) == 'a'
assert basic_scanf(before+'%c'+after, before+' '+after) == ' '
def test_character_conversion():
assert basic_scanf('%c', 'a') == 'a'
assert basic_scanf('%c', u'a') == 'a'
assert basic_scanf('%c', ' ') == ' '
def test_integer_conversion():
for conversion in ('%d', '%ld', '%lld'):
assert basic_scanf(conversion, '1') == 1
assert basic_scanf(conversion, '01') == 1
assert basic_scanf(conversion, '+01') == 1
assert basic_scanf(conversion, '-01') == -1
def test_failing_integer_conversion():
with pytest.raises(ScanfParser.ParseError):
basic_scanf('%d', "\x90")
with pytest.raises(ScanfParser.ParseError):
basic_scanf('%d', "x")
with pytest.raises(ScanfParser.ParseError):
basic_scanf('%d', "hello")
def test_long_conversion():
for conversion in ('%u', '%lu', '%llu'):
assert basic_scanf(conversion, '1') == 1
assert basic_scanf(conversion, '01') == 1
def test_float_conversion():
factor_tests = {
'': 1.0,
'e-0': 1.0,
'e-1': 0.1,
'e+1': 10.0,
'e1': 10.0,
'e0': 1.0,
'e5': 1.e5,
}
for exponent, xfactor in factor_tests.items():
assert almost_equal(basic_scanf('%f', '0' + exponent), 0 * xfactor)
assert almost_equal(basic_scanf('%f', '.1' + exponent), .1 * xfactor)
assert almost_equal(basic_scanf('%f', '2.' + exponent), 2 * xfactor)
assert almost_equal(basic_scanf('%f', '3.4' + exponent), 3.4 * xfactor)
assert almost_equal(basic_scanf('%f', '-.5' + exponent), -0.5 * xfactor)
def test_string_conversion():
for st in ('a', u'a', '123', u'123', 'a\x12\x23'):
assert basic_scanf('%s', st) == st
assert basic_scanf('%s', '\x00') == ''
def test_extra_stuff():
extra_stuff = [ ' ', ' a', ' a b', ' $']
for extra in extra_stuff:
for st in ('a', u'a', '123', u'123', 'a\x12\x23'):
assert basic_scanf('%s', st+extra, extra=True) == st
| [
"[email protected]"
] | |
087e5477ba0dc7f53e31d552937861e0ef8d456b | 5831b0293cbb6f9e0660ac4ec952cbdb047d051d | /tests/test_corpus.py | f045ffdac25a7a65fc7e1b85f1b460d32a078e9a | [
"Apache-2.0"
] | permissive | mdlynch37/textacy | 03e3287fd8ee8bd4d06e48b7b87edf8324a987e5 | c1c7376a84a62faeee496e9b8cc2a29edc28c7d1 | refs/heads/master | 2021-01-20T09:29:54.627035 | 2017-12-04T05:31:14 | 2017-12-04T05:31:14 | 101,596,726 | 0 | 0 | null | 2017-08-28T02:36:30 | 2017-08-28T02:36:30 | null | UTF-8 | Python | false | false | 3,565 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import shutil
import tempfile
import unittest
from textacy import Corpus
from textacy import Doc
from textacy import cache
from textacy import compat
from textacy import fileio
from textacy.datasets.capitol_words import CapitolWords
DATASET = CapitolWords()
@unittest.skipUnless(
DATASET.filename, 'CapitolWords dataset must be downloaded before running tests')
class CorpusInitTestCase(unittest.TestCase):
def test_corpus_init_lang(self):
self.assertIsInstance(Corpus('en'), Corpus)
self.assertIsInstance(Corpus(cache.load_spacy('en')), Corpus)
for bad_lang in (b'en', None):
with self.assertRaises(TypeError):
Corpus(bad_lang)
def test_corpus_init_texts(self):
limit = 3
corpus = Corpus('en', texts=DATASET.texts(limit=limit))
self.assertEqual(len(corpus.docs), limit)
self.assertTrue(
all(doc.spacy_vocab is corpus.spacy_vocab for doc in corpus))
def test_corpus_init_texts_and_metadatas(self):
limit = 3
texts, metadatas = fileio.split_record_fields(
DATASET.records(limit=limit), 'text')
texts = list(texts)
metadatas = list(metadatas)
corpus = Corpus('en', texts=texts, metadatas=metadatas)
self.assertEqual(len(corpus.docs), limit)
self.assertTrue(
all(doc.spacy_vocab is corpus.spacy_vocab for doc in corpus))
for i in range(limit):
self.assertEqual(texts[i], corpus[i].text)
self.assertEqual(metadatas[i], corpus[i].metadata)
def test_corpus_init_docs(self):
limit = 3
texts, metadatas = fileio.split_record_fields(
DATASET.records(limit=limit), 'text')
docs = [Doc(text, lang='en', metadata=metadata)
for text, metadata in zip(texts, metadatas)]
corpus = Corpus('en', docs=docs)
self.assertEqual(len(corpus.docs), limit)
self.assertTrue(
all(doc.spacy_vocab is corpus.spacy_vocab for doc in corpus))
for i in range(limit):
self.assertEqual(corpus[i].metadata, docs[i].metadata)
corpus = Corpus(
'en', docs=docs, metadatas=({'foo': 'bar'} for _ in range(limit)))
for i in range(limit):
self.assertEqual(corpus[i].metadata, {'foo': 'bar'})
class CorpusMethodsTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp(
prefix='test_corpus', dir=os.path.dirname(os.path.abspath(__file__)))
texts, metadatas = fileio.split_record_fields(
DATASET.records(limit=3), 'text')
self.corpus = Corpus('en', texts=texts, metadatas=metadatas)
def test_corpus_save_and_load(self):
filepath = os.path.join(self.tempdir, 'test_corpus_save_and_load.pkl')
self.corpus.save(filepath)
new_corpus = Corpus.load(filepath)
self.assertIsInstance(new_corpus, Corpus)
self.assertEqual(len(new_corpus), len(self.corpus))
self.assertEqual(new_corpus.lang, self.corpus.lang)
self.assertEqual(
new_corpus.spacy_lang.pipe_names,
self.corpus.spacy_lang.pipe_names)
self.assertIsNone(
new_corpus[0].spacy_doc.user_data['textacy'].get('spacy_lang_meta'))
for i in range(len(new_corpus)):
self.assertEqual(new_corpus[i].metadata, self.corpus[i].metadata)
def tearDown(self):
shutil.rmtree(self.tempdir)
| [
"[email protected]"
] | |
3439f18b0ee4568641def717417ce8bf67b35fa8 | 0141361f7c4d276f471ac278580479fa15bc4296 | /Greedy/videoStitching.py | 35e26476c753b3396c97ac391d7f749443758fc4 | [] | no_license | tr1503/LeetCode | a7f2f1801c9424aa96d3cde497290ac1f7992f58 | 6d361cad2821248350f1d8432fdfef86895ca281 | refs/heads/master | 2021-06-24T19:03:08.681432 | 2020-10-09T23:53:22 | 2020-10-09T23:53:22 | 146,689,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | class Solution:
def videoStitching(self, clips: List[List[int]], T: int) -> int:
end = -1
end2 = 0
res = 0
for i, j in sorted(clips):
if end2 >= T or i > end2:
break
elif end < i and i <= end2:
res += 1
end = end2
end2 = max(end2, j)
return res if end2 >= T else -1
| [
"[email protected]"
] | |
532a85852e977f65bd726f325702a8c0c9dee17d | bb6ebff7a7f6140903d37905c350954ff6599091 | /third_party/skia/gyp/tools.gyp | 5ea127296ae495935da7b0fc984ead95cda3ef51 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | PDi-Communication-Systems-Inc/lollipop_external_chromium_org | faa6602bd6bfd9b9b6277ce3cd16df0bd26e7f2f | ccadf4e63dd34be157281f53fe213d09a8c66d2c | refs/heads/master | 2022-12-23T18:07:04.568931 | 2016-04-11T16:03:36 | 2016-04-11T16:03:36 | 53,677,925 | 0 | 1 | BSD-3-Clause | 2022-12-09T23:46:46 | 2016-03-11T15:49:07 | C++ | UTF-8 | Python | false | false | 15,688 | gyp | # GYP file to build various tools.
#
# To build on Linux:
# ./gyp_skia tools.gyp && make tools
#
{
'includes': [
'apptype_console.gypi',
],
'targets': [
{
# Build all executable targets defined below.
'target_name': 'tools',
'type': 'none',
'dependencies': [
'bbh_shootout',
'bench_pictures',
'bench_record',
'bench_playback',
'dump_record',
'filter',
'gpuveto',
'lua_app',
'lua_pictures',
'pinspect',
'render_pdfs',
'render_pictures',
'skdiff',
'skhello',
'skpdiff',
'skpinfo',
'skpmaker',
'skimage',
'test_image_decoder',
],
'conditions': [
['skia_shared_lib',
{
'dependencies': [
'sklua', # This can only be built if skia is built as a shared library
],
},
],
],
},
{
'target_name': 'skdiff',
'type': 'executable',
'sources': [
'../tools/skdiff.cpp',
'../tools/skdiff.h',
'../tools/skdiff_html.cpp',
'../tools/skdiff_html.h',
'../tools/skdiff_main.cpp',
'../tools/skdiff_utils.cpp',
'../tools/skdiff_utils.h',
],
'dependencies': [
'skia_lib.gyp:skia_lib',
],
},
{
'target_name': 'skpdiff',
'type': 'executable',
'sources': [
'../tools/skpdiff/skpdiff_main.cpp',
'../tools/skpdiff/SkDiffContext.cpp',
'../tools/skpdiff/SkImageDiffer.cpp',
'../tools/skpdiff/SkPMetric.cpp',
'../tools/skpdiff/skpdiff_util.cpp',
'../tools/flags/SkCommandLineFlags.cpp',
],
'include_dirs': [
'../tools/flags',
'../src/core/', # needed for SkTLList.h
],
'dependencies': [
'skia_lib.gyp:skia_lib',
],
'cflags': [
'-O3',
],
'conditions': [
[ 'skia_os in ["linux", "freebsd", "openbsd", "solaris", "chromeos"]', {
'link_settings': {
'libraries': [
'-lrt',
],
},
}],
['skia_opencl', {
'sources': [
'../tools/skpdiff/SkCLImageDiffer.cpp',
'../tools/skpdiff/SkDifferentPixelsMetric_opencl.cpp',
],
'conditions': [
[ 'skia_os == "mac"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/OpenCL.framework',
]
}
}, {
'link_settings': {
'libraries': [
'-lOpenCL',
],
},
}],
],
}, { # !skia_opencl
'sources': [
'../tools/skpdiff/SkDifferentPixelsMetric_cpu.cpp',
],
}],
],
},
{
'target_name': 'skpmaker',
'type': 'executable',
'sources': [
'../tools/skpmaker.cpp',
],
'include_dirs': [
'../src/core',
],
'dependencies': [
'flags.gyp:flags',
'skia_lib.gyp:skia_lib',
],
},
{
'target_name': 'skimagediff',
'type': 'executable',
'sources': [
'../tools/skdiff.cpp',
'../tools/skdiff.h',
'../tools/skdiff_html.cpp',
'../tools/skdiff_html.h',
'../tools/skdiff_image.cpp',
'../tools/skdiff_utils.cpp',
'../tools/skdiff_utils.h',
],
'dependencies': [
'skia_lib.gyp:skia_lib',
],
},
{
'target_name': 'skhello',
'type': 'executable',
'dependencies': [
'skia_lib.gyp:skia_lib',
],
'conditions': [
[ 'skia_os == "nacl"', {
'sources': [
'../platform_tools/nacl/src/nacl_hello.cpp',
],
}, {
'sources': [
'../tools/skhello.cpp',
],
'dependencies': [
'flags.gyp:flags',
'pdf.gyp:pdf',
],
}],
],
},
{
'target_name': 'skimage',
'type': 'executable',
'sources': [
'../tools/skimage_main.cpp',
],
'include_dirs': [
# For SkBitmapHasher.h
'../src/utils/',
],
'dependencies': [
'flags.gyp:flags',
'gm.gyp:gm_expectations',
'jsoncpp.gyp:jsoncpp',
'skia_lib.gyp:skia_lib',
],
},
{
'target_name': 'skpinfo',
'type': 'executable',
'sources': [
'../tools/skpinfo.cpp',
],
'include_dirs': [
'../tools/flags',
'../src/core/',
],
'dependencies': [
'flags.gyp:flags',
'skia_lib.gyp:skia_lib',
],
},
{
'target_name': 'gpuveto',
'type': 'executable',
'sources': [
'../tools/gpuveto.cpp',
'../tools/LazyDecodeBitmap.cpp',
],
'include_dirs': [
'../src/core/',
'../src/images',
'../src/lazy',
'../tools/flags',
],
'dependencies': [
'flags.gyp:flags',
'skia_lib.gyp:skia_lib',
],
},
{
'target_name': 'lua_app',
'type': 'executable',
'sources': [
'../tools/lua/lua_app.cpp',
'../src/utils/SkLua.cpp',
],
'include_dirs': [
# Lua exposes GrReduceClip which in turn requires src/core for SkTLList
'../src/gpu/',
'../src/core/',
],
'dependencies': [
'effects.gyp:effects',
'images.gyp:images',
'lua.gyp:lua',
'pdf.gyp:pdf',
'ports.gyp:ports',
'skia_lib.gyp:skia_lib',
],
},
{
'target_name': 'lua_pictures',
'type': 'executable',
'sources': [
'../tools/lua/lua_pictures.cpp',
'../src/utils/SkLuaCanvas.cpp',
'../src/utils/SkLua.cpp',
],
'include_dirs': [
# Lua exposes GrReduceClip which in turn requires src/core for SkTLList
'../src/gpu/',
'../src/core/',
],
'dependencies': [
'effects.gyp:effects',
'flags.gyp:flags',
'images.gyp:images',
'lua.gyp:lua',
'tools.gyp:picture_renderer',
'tools.gyp:picture_utils',
'pdf.gyp:pdf',
'ports.gyp:ports',
'skia_lib.gyp:skia_lib',
],
},
{
'target_name': 'render_pictures',
'type': 'executable',
'sources': [
'../tools/render_pictures_main.cpp',
],
'include_dirs': [
'../src/core',
'../src/images',
'../src/lazy',
'../src/pipe/utils/',
],
'dependencies': [
'flags.gyp:flags',
'skia_lib.gyp:skia_lib',
'tools.gyp:picture_renderer',
'tools.gyp:picture_utils',
],
},
{
'target_name': 'bench_pictures',
'type': 'executable',
'sources': [
'../bench/BenchLogger.cpp',
'../bench/BenchLogger.h',
'../bench/ResultsWriter.cpp',
'../tools/PictureBenchmark.cpp',
'../tools/PictureResultsWriter.h',
'../tools/bench_pictures_main.cpp',
],
'include_dirs': [
'../src/core/',
'../bench',
'../src/lazy/',
],
'dependencies': [
'bench.gyp:bench_timer',
'crash_handler.gyp:CrashHandler',
'flags.gyp:flags',
'jsoncpp.gyp:jsoncpp',
'skia_lib.gyp:skia_lib',
'tools.gyp:picture_renderer',
'tools.gyp:picture_utils',
'tools.gyp:timer_data',
],
},
{
'target_name': 'bench_record',
'type': 'executable',
'sources': [
'../tools/bench_record.cpp',
'../tools/LazyDecodeBitmap.cpp',
],
'include_dirs': [
'../src/core/',
'../src/images',
'../src/lazy',
],
'dependencies': [
'bench.gyp:bench_timer',
'flags.gyp:flags',
'skia_lib.gyp:skia_lib',
],
},
{
'target_name': 'bench_playback',
'type': 'executable',
'sources': [
'../tools/bench_playback.cpp',
],
'include_dirs': [
'../src/core/',
'../src/images',
],
'dependencies': [
'bench.gyp:bench_timer',
'flags.gyp:flags',
'skia_lib.gyp:skia_lib',
],
},
{
'target_name': 'dump_record',
'type': 'executable',
'sources': [
'../tools/dump_record.cpp',
'../tools/DumpRecord.cpp',
'../tools/LazyDecodeBitmap.cpp',
],
'include_dirs': [
'../src/core/',
'../src/images',
'../src/lazy',
],
'dependencies': [
'bench.gyp:bench_timer',
'flags.gyp:flags',
'skia_lib.gyp:skia_lib',
],
},
{
'target_name': 'picture_renderer',
'type': 'static_library',
'sources': [
'../tools/image_expectations.h',
'../tools/image_expectations.cpp',
'../tools/LazyDecodeBitmap.cpp',
'../tools/PictureRenderer.h',
'../tools/PictureRenderer.cpp',
'../tools/PictureRenderingFlags.h',
'../tools/PictureRenderingFlags.cpp',
'../tools/CopyTilesRenderer.h',
'../tools/CopyTilesRenderer.cpp',
'../src/pipe/utils/SamplePipeControllers.h',
'../src/pipe/utils/SamplePipeControllers.cpp',
],
'include_dirs': [
'../src/core',
'../src/images',
'../src/lazy',
'../src/pipe/utils/',
'../src/utils/',
],
'direct_dependent_settings': {
'include_dirs': [
# needed for JSON headers used within image_expectations.h
'../third_party/externals/jsoncpp-chromium/overrides/include/',
'../third_party/externals/jsoncpp/include/',
],
},
'dependencies': [
'flags.gyp:flags',
'jsoncpp.gyp:jsoncpp',
'skia_lib.gyp:skia_lib',
'tools.gyp:picture_utils',
],
'conditions': [
['skia_gpu == 1',
{
'include_dirs' : [
'../src/gpu',
],
'dependencies': [
'gputest.gyp:skgputest',
],
},
],
],
},
{
'target_name': 'render_pdfs',
'type': 'executable',
'sources': [
'../tools/render_pdfs_main.cpp',
'../tools/PdfRenderer.cpp',
'../tools/PdfRenderer.h',
],
'include_dirs': [
'../src/pipe/utils/',
'../src/utils/',
],
'dependencies': [
'pdf.gyp:pdf',
'skia_lib.gyp:skia_lib',
'tools.gyp:picture_utils',
],
'conditions': [
['skia_win_debuggers_path and skia_os == "win"',
{
'dependencies': [
'tools.gyp:win_dbghelp',
],
},
],
# VS static libraries don't have a linker option. We must set a global
# project linker option, or add it to each executable.
['skia_win_debuggers_path and skia_os == "win" and '
'skia_arch_width == 64',
{
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'<(skia_win_debuggers_path)/x64/DbgHelp.lib',
],
},
},
},
],
['skia_win_debuggers_path and skia_os == "win" and '
'skia_arch_width == 32',
{
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'<(skia_win_debuggers_path)/DbgHelp.lib',
],
},
},
},
],
],
},
{
'target_name': 'picture_utils',
'type': 'static_library',
'sources': [
'../tools/picture_utils.cpp',
'../tools/picture_utils.h',
],
'dependencies': [
'skia_lib.gyp:skia_lib',
],
'direct_dependent_settings': {
'include_dirs': [
'../tools/',
],
},
},
{
'target_name': 'pinspect',
'type': 'executable',
'sources': [
'../tools/pinspect.cpp',
],
'dependencies': [
'flags.gyp:flags',
'skia_lib.gyp:skia_lib',
'tools.gyp:picture_renderer',
],
},
{
'target_name': 'bbh_shootout',
'type': 'executable',
'include_dirs': [
'../bench',
'../tools/'
],
'sources': [
'../tools/bbh_shootout.cpp',
# Bench code:
],
'dependencies': [
'bench.gyp:bench_timer',
'flags.gyp:flags',
'tools.gyp:timer_data',
'skia_lib.gyp:skia_lib',
'tools.gyp:picture_renderer',
'tools.gyp:picture_utils',
],
},
{
'target_name': 'filter',
'type': 'executable',
'include_dirs' : [
'../src/core',
'../src/utils/debugger',
],
'sources': [
'../tools/filtermain.cpp',
'../src/utils/debugger/SkDrawCommand.h',
'../src/utils/debugger/SkDrawCommand.cpp',
'../src/utils/debugger/SkDebugCanvas.h',
'../src/utils/debugger/SkDebugCanvas.cpp',
'../src/utils/debugger/SkObjectParser.h',
'../src/utils/debugger/SkObjectParser.cpp',
],
'dependencies': [
'skia_lib.gyp:skia_lib',
'tools.gyp:picture_utils',
],
},
{
'target_name': 'test_image_decoder',
'type': 'executable',
'sources': [
'../tools/test_image_decoder.cpp',
],
'dependencies': [
'skia_lib.gyp:skia_lib',
],
},
{
'target_name': 'timer_data',
'type': 'static_library',
'sources': [
'../bench/TimerData.cpp',
],
'dependencies': [
'skia_lib.gyp:skia_lib',
'jsoncpp.gyp:jsoncpp'
]
}
],
'conditions': [
['skia_shared_lib',
{
'targets': [
{
'target_name': 'sklua',
'product_name': 'skia',
'product_prefix': '',
'product_dir': '<(PRODUCT_DIR)/',
'type': 'shared_library',
'sources': [
'../src/utils/SkLuaCanvas.cpp',
'../src/utils/SkLua.cpp',
],
'include_dirs': [
# Lua exposes GrReduceClip which in turn requires src/core for SkTLList
'../src/gpu/',
'../src/core/',
'../third_party/lua/src/',
],
'dependencies': [
'lua.gyp:lua',
'pdf.gyp:pdf',
'skia_lib.gyp:skia_lib',
],
'conditions': [
['skia_os != "win"',
{
'ldflags': [
'-Wl,-rpath,\$$ORIGIN,--enable-new-dtags',
],
},
],
],
},
],
},
],
['skia_win_debuggers_path and skia_os == "win"',
{
'targets': [
{
'target_name': 'win_dbghelp',
'type': 'static_library',
'defines': [
'SK_CDB_PATH="<(skia_win_debuggers_path)"',
],
'sources': [
'../tools/win_dbghelp.h',
'../tools/win_dbghelp.cpp',
],
},
],
},
],
['skia_os == "win"',
{
'targets': [
{
'target_name': 'win_lcid',
'type': 'executable',
'sources': [
'../tools/win_lcid.cpp',
],
},
],
},
],
],
}
| [
"[email protected]"
] | |
5eb1b4c72a607c8c34436d80e1ae1bdf17b45c32 | c55aedc3479a4d311fb406d8133b0e0ceb99d2df | /example/new_system_app/1_BFS_aug.py | c36ba8e8a01e7f60215e6fb316506cd4a9217873 | [] | no_license | tarwcz111111111/DashCam_python | 4a33cdb3e5a8368b81ddc7c0596d4f0802b7c9d6 | 6e025ff49261c146205eb56bbbf4175f1d413f54 | refs/heads/master | 2020-08-25T04:55:16.695561 | 2017-08-28T04:34:59 | 2017-08-28T04:34:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | #!/usr/bin/python2
# ==============================================================
# Step.0
# Fetch the Google data
# ==============================================================
import sys
sys.path.append('/home/andy/Documents/gitHub/DashCam_python/module') # use the module under 'module'
import file_process
import google_store
# Create PanoFetcher
zoom, radius = 1, 30
panoFetcher = google_store.PanoFetcher(zoom, radius)
# Create dashCamFileProcess and load 50 top Dashcam
dashCamFileProcess = file_process.DashCamFileProcessor()
# Select one of the fileName among the 50 selected files
for i in range(3, 4):
index = i
fileID = str(dashCamFileProcess.list50[index][1])
print(fileID, index)
"""
# 1. use info_3d pathPoint
"""
pathPoint_set_info3d = dashCamFileProcess.get_path_info3d(file_id=fileID)
#print(pathPoint_set_info3d)
#panoFetcher.info_3d(fileID, pathPoint_set_info3d)
"""
# 2. use BFS
# Here use the first point in info_3d
"""
lat, lon = None, None
for pathPoint in pathPoint_set_info3d:
print(pathPoint)
[lat, lon] = pathPoint.split(',')
break
panoFetcher.bfs_aug(fileID, (lat, lon), 15)
| [
"[email protected]"
] | |
0d2fcc5aaf85d229d9c6559618c3ca3d55b05514 | f5fbe5ed5cae7661bc2b9f0aebad003cc4813379 | /qupy/ldpc/symplectic.py | 0130502cc46462313b0294616964b187126c4436 | [
"MIT"
] | permissive | punkdit/qupy | 4f88c8d473b35ff5fe59fa29ce930af01bd60aec | 42da77e2be9a6e289e714a3a8f0e865a755cba23 | refs/heads/master | 2023-09-01T00:48:53.356345 | 2023-08-18T09:20:18 | 2023-08-18T09:20:18 | 157,732,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,344 | py | #!/usr/bin/env python3
"""
Build symplectic matrices over F_2.
This group is a quotient of the Clifford group by the Pauli group.
"""
from collections import namedtuple
from functools import reduce
from operator import mul
from random import shuffle
import numpy
from numpy import concatenate as cat
from qupy.util import mulclose_fast
from qupy.tool import cross, choose
from qupy.argv import argv
from qupy.smap import SMap
from qupy.ldpc.solve import zeros2, shortstr, solve, dot2, array2, eq2, parse, pseudo_inverse, identity2
from qupy.ldpc.solve import enum2, row_reduce, intersect
from qupy.ldpc.css import CSSCode
from qupy.ldpc.decoder import StarDynamicDistance
def mulclose_find(gen, names, target, verbose=False, maxsize=None):
gen = list(gen)
ops = set(gen)
lookup = dict((g, (names[i],)) for (i, g) in enumerate(gen))
bdy = list(gen)
dist = 1
while bdy:
_bdy = []
shuffle(bdy)
for g in bdy:
shuffle(gen)
for h in gen:
k = g*h
if k in ops:
if len(lookup[g]+lookup[h]) < len(lookup[k]):
lookup[k] = lookup[g]+lookup[h]
assert 0
else:
word = lookup[g]+lookup[h]
if len(word) > dist:
dist = len(word)
if verbose:
print("dist:", dist)
lookup[k] = word
ops.add(k)
_bdy.append(k)
if k==target:
assert type( lookup[g]+lookup[h] ) == tuple
return lookup[g]+lookup[h]
bdy = _bdy
#if verbose:
# print("mulclose:", len(ops))
if maxsize and len(ops) >= maxsize:
break
def mulclose_names(gen, names, verbose=False, maxsize=None):
ops = set(gen)
lookup = dict((g, (names[i],)) for (i, g) in enumerate(gen))
bdy = gen
while bdy:
_bdy = set()
for g in bdy:
for h in gen:
k = g*h
if k in ops:
if len(lookup[g]+lookup[h]) < len(lookup[k]):
lookup[k] = lookup[g]+lookup[h]
assert 0
else:
lookup[k] = lookup[g]+lookup[h]
ops.add(k)
_bdy.add(k)
bdy = _bdy
if verbose:
print("mulclose:", len(ops))
if maxsize and len(ops) >= maxsize:
break
return ops, lookup
def get_cell(row, col, p=2):
"""
return all matrices in bruhat cell at (row, col)
These have shape (col, col+row).
"""
if col == 0:
yield zeros2(0, row)
return
if row == 0:
yield identity2(col)
return
# recursive steps:
m, n = col, col+row
for left in get_cell(row, col-1, p):
A = zeros2(m, n)
A[:m-1, :n-1] = left
A[m-1, n-1] = 1
yield A
els = list(range(p))
vecs = list(cross((els,)*m))
for right in get_cell(row-1, col, p):
for v in vecs:
A = zeros2(m, n)
A[:, :n-1] = right
A[:, n-1] = v
yield A
def all_codes(m, n, q=2):
"""
All full-rank generator matrices of shape (m, n)
"""
assert m<=n
col = m
row = n-m
return get_cell(row, col, q)
class Matrix(object):
def __init__(self, A):
self.A = A
m, n = A.shape
self.shape = A.shape
self.n = n
self.m = m
self.key = A.tobytes() # careful !!
def __str__(self):
#s = str(self.A)
#s = s.replace("0", ".")
s = shortstr(self.A)
return s
def __mul__(self, other):
assert isinstance(other, Matrix)
assert other.m == self.n
A = dot2(self.A, other.A)
return Matrix(A)
def __getitem__(self, key):
return self.A[key]
def __call__(self, other):
assert isinstance(other, CSSCode)
assert other.n*2 == self.n
Lx, Lz, Hx, Tz, Hz, Tx, Gx, Gz = (
other.Lx, other.Lz, other.Hx,
other.Tz, other.Hz, other.Tx,
other.Gx, other.Gz)
assert Gx is None
assert Gz is None
A = self.A.transpose()
LxLz = dot2(cat((Lx, Lz), axis=1), A)
HxTz = dot2(cat((Hx, Tz), axis=1), A)
TxHz = dot2(cat((Tx, Hz), axis=1), A)
n = self.n//2
Lx, Lz = LxLz[:, :n], LxLz[:, n:]
Hx, Tz = HxTz[:, :n], HxTz[:, n:]
Tx, Hz = TxHz[:, :n], TxHz[:, n:]
code = CSSCode(Lx=Lx, Lz=Lz, Hx=Hx, Tz=Tz, Hz=Hz, Tx=Tx)
return code
def transpose(self):
A = self.A.transpose().copy()
return Matrix(A)
def inverse(self):
A = pseudo_inverse(self.A)
return Matrix(A)
def __eq__(self, other):
assert isinstance(other, Matrix)
assert self.shape == other.shape
#return eq2(self.A, other.A)
return self.key == other.key
def __ne__(self, other):
return self.key != other.key
def __hash__(self):
return hash(self.key)
@classmethod
def identity(cls, n):
A = zeros2(2*n, 2*n)
for i in range(2*n):
A[i, i] = 1
return Matrix(A)
@classmethod
def hadamard(cls, n, idx):
A = zeros2(2*n, 2*n)
for i in range(2*n):
if i==idx:
A[i, n+i] = 1
elif i==n+idx:
A[i, i-n] = 1
else:
A[i, i] = 1
return Matrix(A)
@classmethod
def cnot(cls, n, src, tgt):
A = cls.identity(n).A
assert src!=tgt
A[tgt, src] = 1
A[src+n, tgt+n] = 1
return Matrix(A)
@classmethod
def cz(cls, n, src, tgt):
CN = cls.cnot(n, src, tgt)
H = cls.hadamard(n, tgt)
CZ = H * CN * H
return CZ
@classmethod
def sgate(cls, n, i):
A = cls.identity(n).A
assert 0<=i<n
A[i+n, i] = 1
return Matrix(A)
@classmethod
def swap(cls, n, idx, jdx):
A = zeros2(2*n, 2*n)
assert idx != jdx
for i in range(n):
if i==idx:
A[i, jdx] = 1 # X sector
A[i+n, jdx+n] = 1 # Z sector
elif i==jdx:
A[i, idx] = 1 # X sector
A[i+n, idx+n] = 1 # Z sector
else:
A[i, i] = 1 # X sector
A[i+n, i+n] = 1 # Z sector
return Matrix(A)
@classmethod
def symplectic_form(cls, n):
A = zeros2(2*n, 2*n)
I = identity2(n)
A[:n, n:] = I
A[n:, :n] = I
A = Matrix(A)
return A
@classmethod
def transvect(cls, x):
assert len(x.shape)==1
assert x.shape[0]%2 == 0
n = x.shape[0] // 2
assert x.shape == (2*n,)
F = cls.symplectic_form(n)
Fx = dot2(F.A, x)
A = zeros2(2*n, 2*n)
for i in range(2*n):
u = array2([0]*(2*n))
u[i] = 1
v = dot2(u, Fx)
u += v*x
A[:, i] = u
A %= 2
A = Matrix(A)
return A
def is_symplectic(M):
assert M.n % 2 == 0
n = M.n//2
A = Matrix.symplectic_form(n)
return M.transpose() * A * M == A
def is_zero(M):
return numpy.allclose(M.A, 0)
def normal_form(self):
A = self.A
#print("normal_form")
#print(A)
A = row_reduce(A)
#print(A)
m, n = A.shape
j = 0
for i in range(m):
while A[i, j] == 0:
j += 1
i0 = i-1
while i0>=0:
r = A[i0, j]
if r!=0:
A[i0, :] += A[i, :]
A %= 2
i0 -= 1
j += 1
#print(A)
A = Matrix(A)
return A
Clifford = Matrix
def get_gen(n, pairs=None):
gen = [Matrix.hadamard(n, i) for i in range(n)]
names = ["H_%d"%i for i in range(n)]
gen += [Matrix.sgate(n, i) for i in range(n)]
names += ["S_%d"%i for i in range(n)]
if pairs is None:
pairs = []
for i in range(n):
for j in range(n):
if i!=j:
pairs.append((i, j))
for (i, j) in pairs:
assert i!=j
gen.append(Matrix.cnot(n, i, j))
names.append("CN(%d,%d)"%(i,j))
return gen, names
def get_encoder(source, target):
assert isinstance(source, CSSCode)
assert isinstance(target, CSSCode)
src = Matrix(source.to_symplectic())
src_inv = src.inverse()
tgt = Matrix(target.to_symplectic())
A = (src_inv * tgt).transpose()
return A
def test_symplectic():
n = 3
I = Matrix.identity(n)
for idx in range(n):
for jdx in range(n):
if idx==jdx:
continue
CN_01 = Matrix.cnot(n, idx, jdx)
CN_10 = Matrix.cnot(n, jdx, idx)
assert CN_01*CN_01 == I
assert CN_10*CN_10 == I
lhs = CN_10 * CN_01 * CN_10
rhs = Matrix.swap(n, idx, jdx)
assert lhs == rhs
lhs = CN_01 * CN_10 * CN_01
assert lhs == rhs
cnot = Matrix.cnot
hadamard = Matrix.hadamard
n = 2
gen = [cnot(n, 0, 1), cnot(n, 1, 0), hadamard(n, 0), hadamard(n, 1)]
for A in gen:
assert A.is_symplectic()
Cliff2 = mulclose_fast(gen)
assert len(Cliff2)==72 # index 10 in Sp(2, 4)
CZ = array2([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 0, 1]])
CZ = Matrix(CZ)
assert CZ.is_symplectic()
assert CZ in Cliff2
n = 3
gen = [
cnot(n, 0, 1), cnot(n, 1, 0),
cnot(n, 0, 2), cnot(n, 2, 0),
cnot(n, 1, 2), cnot(n, 2, 1),
hadamard(n, 0),
hadamard(n, 1),
hadamard(n, 2),
]
for A in gen:
assert A.is_symplectic()
assert len(mulclose_fast(gen))==40320 # index 36 in Sp(2,4)
if 0:
# cnot's generate GL(2, n)
n = 4
gen = []
for i in range(n):
for j in range(n):
if i!=j:
gen.append(cnot(n, i, j))
assert len(mulclose_fast(gen)) == 20160
if 0:
n = 2
count = 0
for A in enum2(4*n*n):
A.shape = (2*n, 2*n)
A = Matrix(A)
try:
assert A.is_symplectic()
count += 1
except:
pass
print(count) # 720 = |Sp(2, 4)|
return
for n in [1, 2]:
gen = []
for x in enum2(2*n):
A = Matrix.transvect(x)
assert A.is_symplectic()
gen.append(A)
G = mulclose_fast(gen)
assert len(G) == [6, 720][n-1]
n = 2
Sp = G
#print(len(Sp))
found = set()
for g in Sp:
A = g.A.copy()
A[:n, n:] = 0
A[n:, :n] = 0
found.add(str(A))
#print(len(A))
#return
n = 4
I = Matrix.identity(n)
H = Matrix.hadamard(n, 0)
assert H*H == I
CN_01 = Matrix.cnot(n, 0, 1)
assert CN_01*CN_01 == I
n = 3
trivial = CSSCode(
Lx=parse("1.."), Lz=parse("1.."), Hx=zeros2(0, n), Hz=parse(".1.\n..1"))
assert trivial.row_equal(CSSCode.get_trivial(3, 0))
repitition = CSSCode(
Lx=parse("111"), Lz=parse("1.."), Hx=zeros2(0, n), Hz=parse("11.\n.11"))
assert not trivial.row_equal(repitition)
CN_01 = Matrix.cnot(n, 0, 1)
CN_12 = Matrix.cnot(n, 1, 2)
CN_21 = Matrix.cnot(n, 2, 1)
CN_10 = Matrix.cnot(n, 1, 0)
encode = CN_12 * CN_01
code = CN_01 ( trivial )
assert not code.row_equal(repitition)
code = CN_12 ( code )
assert code.row_equal(repitition)
A = get_encoder(trivial, repitition)
gen, names = get_gen(3)
word = mulclose_find(gen, names, A)
if 1:
assert type(word) is tuple
#print("word:")
#print(repr(word))
items = [gen[names.index(op)] for op in word]
op = reduce(mul, items)
#print(op)
#assert op*(src) == (tgt)
#print(op(trivial).longstr())
assert op(trivial).row_equal(repitition)
def test_double():
if 0:
print("F =")
print(Matrix.symplectic_form(2))
print("CNOT =")
print(Matrix.cnot(2, 0, 1))
print("S_0 =")
print(Matrix.sgate(2, 0))
print("S_1 =")
print(Matrix.sgate(2, 1))
cnot = Matrix.cnot
hadamard = Matrix.hadamard
n = 2
gen = [cnot(n, 0, 1), cnot(n, 1, 0), hadamard(n, 0), hadamard(n, 1)]
for A in gen:
assert A.is_symplectic()
Cliff2 = mulclose_fast(gen)
assert len(Cliff2)==72 # index 10 in Sp(2, 4)
#if 0:
F = Matrix.symplectic_form(2)
def lhs(a, b):
A = array2([[a, 0, b, 0], [0, a, 0, b]])
return Matrix(A)
def rhs(a, b):
A = array2([[a, b, 0, 0], [0, 0, b, a]])
return Matrix(A)
for g in Cliff2:
for (a,b) in [(0,1), (1,0), (1,1)]:
A = lhs(a, b)
assert (A*F*A.transpose()).is_zero()
B = rhs(a, b)
assert (B*F*B.transpose()).is_zero()
Ag = A*g
W = intersect(Ag.A, B.A)
#if Ag != B:
if len(W) != 2:
#print(len(W))
#print(Ag)
#print("!=")
#print(B)
#print()
break
else:
print("g =")
print(g)
return
print("done")
def get_transvect(n):
gen = []
for x in enum2(2*n):
A = Matrix.transvect(x)
assert A.is_symplectic()
gen.append(A)
return gen
def test_isotropic():
n = 3
gen, _ = get_gen(n)
assert len(mulclose_fast(gen)) == 1451520
return
n = argv.get("n", 3)
F = Matrix.symplectic_form(n).A
found = []
for A in all_codes(n, 2*n):
B = dot2(dot2(A, F), A.transpose())
if B.sum() == 0:
A = Matrix(A)
found.append(A)
#print(A)
found = set(found)
print(len(found))
gen, _ = get_gen(n)
#gen = get_transvect(n)
orbit = set()
A = iter(found).__next__()
bdy = [A]
orbit = set(bdy)
while bdy:
_bdy = []
for A in bdy:
print(A)
for g in gen:
B = A*g
print(B)
print()
B = B.normal_form()
print(B)
print()
assert B in found
if B not in orbit:
_bdy.append(B)
orbit.add(B)
bdy = _bdy
print(len(orbit))
def test_encode():
n = 5
target = parse("""
1.11......
.11.1.....
.....111..
.......111
1.1.1.....
........1.
......1...
1.........
1.1.......
......1..1
""")
target = Matrix(target)
assert target.is_symplectic()
source = parse("""
...1......
.1........
.....1....
.......1..
....1.....
........1.
......1...
1.........
..1.......
.........1
""")
source = Matrix(source)
assert source.is_symplectic()
def get_encoder(source, target):
assert isinstance(source, CSSCode)
assert isinstance(target, CSSCode)
src = Matrix(source.to_symplectic())
src_inv = src.inverse()
tgt = Matrix(target.to_symplectic())
A = (src_inv * tgt).transpose()
return A
#print(Matrix.cnot(2, 0, 1))
#return
#source = source.transpose()
#target = target.transpose()
def cnot(i, j):
g = Matrix.cnot(n, i-1, j-1)
g.name = "cnot(%d,%d)"%(i, j)
return g
assert cnot(3,1) == cnot(1,3).transpose()
#gen = [cnot(3,1), cnot(2,3), cnot(2,5), cnot(4,3), cnot(5,3)]
gen = [cnot(3,1), cnot(2,3), cnot(2,5), cnot(4,3), cnot(4,1), cnot(5,3), cnot(5,4),
cnot(2,1),
]
#gen = [cnot(i,j) for i in range(1,n+1) for j in range(1,n+1) if i!=j]
names = [g.name for g in gen]
gen = [g.transpose() for g in gen]
#A = (source.inverse() * target).transpose()
#A = source.inverse() * target
A = target * source.inverse()
assert A.is_symplectic()
#print(A)
words = set()
for trial in range(100):
word = mulclose_find(gen, names, A)
if word is None:
break
if word not in words:
print(word)
words.add(word)
if word is None:
print("not found")
return
ops = [gen[names.index(c)] for c in word]
op = reduce(mul, ops)
assert op*source == target
def test():
n = 5
I = Matrix.identity(n)
CZ = Matrix.cz(n, 0, 1)
SWAP = Matrix.swap(n, 0, 1)
assert CZ*CZ == I
assert SWAP*CZ == CZ*SWAP
S = Matrix.sgate(n, 0)
assert S*S == I
def build_code(stabs):
m = len(stabs)
n = len(stabs[0])
A = zeros2(m, 2*n)
for i,stab in enumerate(stabs):
for j,c in enumerate(stab):
if c=='I':
pass
elif c=='X' or c=='Y':
A[i,j] = 1
elif c=='Z' or c=='Y':
A[i,j+n] = 1
return A
def test_code():
stabs = "IXXXX XIZZX ZXIZZ XZXIZ ZIZIZ".split()
A = build_code(stabs)
m, nn = A.shape
n = nn//2
#A = A.transpose()
print(A)
A = Matrix(A)
F = Matrix.symplectic_form(n)
lhs = A*F*A.transpose()
assert lhs.is_zero()
B = row_reduce(A.A)
B = Matrix(B)
lhs = B*F*B.transpose()
assert lhs.is_zero()
W = zeros2(m+1, 2*n)
W[:m] = A.A
for v in cross([(0,1)]*2*n):
v = array2(v)
if v.sum()==0:
continue
v.shape = (2*n,)
W[m] = v
#print(W)
#if v[6]==1:
# break
M = Matrix(W)
if (M*F*M.transpose()).is_zero():
break
print(W)
M = Matrix(W)
print(M*F*M.transpose())
if __name__ == "__main__":
name = argv.next()
if argv.profile:
import cProfile as profile
profile.run("%s()"%name)
elif name:
fn = eval(name)
fn()
else:
test_symplectic()
test_isotropic() # SLOW
print("OK\n")
| [
"[email protected]"
] | |
d6ed4bf04f30d4d1b1b82eeb248dc600ede27fd9 | eae6dddca9285702c4c7ed6ba6bdaceef9631df2 | /CCC-2018/Junior/Junior-1/J1.py | 475041d11174a226860f6028287db67757ce1cb8 | [] | no_license | simrit1/CCC-Solutions-2 | 7823ce14801c4219f6f1dd4c42fb013c2dfc45dd | ee2883aa38f933e526ce187d50ca68763876cb58 | refs/heads/master | 2023-07-04T02:19:37.320261 | 2021-08-07T22:12:36 | 2021-08-07T22:12:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | # CCC 2018 Junior 1: Telemarketer or not?
#
# Author: Charles Chen
#
# Simple if statement
digit1 = int(input())
digit2 = int(input())
digit3 = int(input())
digit4 = int(input())
if (digit1 == 8 or digit1 == 9) and (digit2 == digit3) and (digit4 == 8 or digit4 == 9):
print("ignore")
else:
print("answer")
| [
"[email protected]"
] | |
4c62fa5221d7e7acc4a6d1fd239915dd62942b79 | 0c66e605e6e4129b09ea14dbb6aa353d18aaa027 | /diventi/landing/migrations/0079_auto_20200215_2305.py | d85a68046299dd8b76f851e1157494274efd585e | [
"Apache-2.0"
] | permissive | flavoi/diventi | 58fbc8c947f387cbcc1ce607878a59a6f2b72313 | c0b1efe2baa3ff816d6ee9a8e86623f297973ded | refs/heads/master | 2023-07-20T09:32:35.897661 | 2023-07-11T19:44:26 | 2023-07-11T19:44:26 | 102,959,477 | 2 | 1 | Apache-2.0 | 2023-02-08T01:03:17 | 2017-09-09T14:10:51 | Python | UTF-8 | Python | false | false | 522 | py | # Generated by Django 2.2.10 on 2020-02-15 22:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('landing', '0078_auto_20200215_1909'),
]
operations = [
migrations.AlterField(
model_name='section',
name='template',
field=models.CharField(choices=[('standard_section.html', 'standard section'), ('cards_section.html', 'cards section')], max_length=50, verbose_name='standard template'),
),
]
| [
"[email protected]"
] | |
53bfd78a3bc2f711a4860ef3c45e69b66823e37c | b57b0a14df5c6841f04cccb7b02ad04afbca18f8 | /avi_vantage/datadog_checks/avi_vantage/config_models/instance.py | 017f57d0d5fea29b61a601bec35b7ad9e169c9c1 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"AFL-3.0",
"BSD-3-Clause-Modification",
"LGPL-3.0-only",
"Unlicense",
"LGPL-2.1-only",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | zeroc0d3/integrations-core | d9c99803c049668b7f9f9c796d338e343d3d46ee | 634d567f3c38d32aabb3f4c16b50bcfa8a4ae0fb | refs/heads/master | 2021-09-28T18:37:00.650406 | 2021-09-13T11:59:45 | 2021-09-13T11:59:45 | 199,758,958 | 0 | 0 | BSD-3-Clause | 2019-07-31T02:01:25 | 2019-07-31T02:01:24 | null | UTF-8 | Python | false | false | 4,652 | py | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from __future__ import annotations
from typing import Any, Literal, Mapping, Optional, Sequence, Union
from pydantic import BaseModel, Extra, Field, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class AuthToken(BaseModel):
class Config:
allow_mutation = False
reader: Optional[Mapping[str, Any]]
writer: Optional[Mapping[str, Any]]
class ExtraMetric(BaseModel):
class Config:
extra = Extra.allow
allow_mutation = False
name: Optional[str]
type: Optional[str]
class Metric(BaseModel):
class Config:
extra = Extra.allow
allow_mutation = False
name: Optional[str]
type: Optional[str]
class Proxy(BaseModel):
class Config:
allow_mutation = False
http: Optional[str]
https: Optional[str]
no_proxy: Optional[Sequence[str]]
class ShareLabel(BaseModel):
class Config:
allow_mutation = False
labels: Optional[Sequence[str]]
match: Optional[Sequence[str]]
class InstanceConfig(BaseModel):
class Config:
allow_mutation = False
auth_token: Optional[AuthToken]
auth_type: Optional[str]
avi_controller_url: str
aws_host: Optional[str]
aws_region: Optional[str]
aws_service: Optional[str]
cache_metric_wildcards: Optional[bool]
cache_shared_labels: Optional[bool]
collect_counters_with_distributions: Optional[bool]
collect_histogram_buckets: Optional[bool]
connect_timeout: Optional[float]
disable_generic_tags: Optional[bool]
empty_default_hostname: Optional[bool]
enable_health_service_check: Optional[bool]
entities: Optional[Sequence[Literal['controller', 'pool', 'serviceengine', 'virtualservice']]]
exclude_labels: Optional[Sequence[str]]
exclude_metrics: Optional[Sequence[str]]
exclude_metrics_by_labels: Optional[Mapping[str, Union[bool, Sequence[str]]]]
extra_headers: Optional[Mapping[str, Any]]
extra_metrics: Optional[Sequence[Union[str, Mapping[str, Union[str, ExtraMetric]]]]]
headers: Optional[Mapping[str, Any]]
histogram_buckets_as_distributions: Optional[bool]
hostname_format: Optional[str]
hostname_label: Optional[str]
ignore_tags: Optional[Sequence[str]]
kerberos_auth: Optional[str]
kerberos_cache: Optional[str]
kerberos_delegate: Optional[bool]
kerberos_force_initiate: Optional[bool]
kerberos_hostname: Optional[str]
kerberos_keytab: Optional[str]
kerberos_principal: Optional[str]
log_requests: Optional[bool]
metrics: Optional[Sequence[Union[str, Mapping[str, Union[str, Metric]]]]]
min_collection_interval: Optional[float]
namespace: Optional[str] = Field(None, regex='\\w+')
non_cumulative_histogram_buckets: Optional[bool]
ntlm_domain: Optional[str]
openmetrics_endpoint: Optional[str]
password: Optional[str]
persist_connections: Optional[bool]
proxy: Optional[Proxy]
raw_line_filters: Optional[Sequence[str]]
raw_metric_prefix: Optional[str]
read_timeout: Optional[float]
rename_labels: Optional[Mapping[str, Any]]
request_size: Optional[float]
service: Optional[str]
share_labels: Optional[Mapping[str, Union[bool, ShareLabel]]]
skip_proxy: Optional[bool]
tags: Optional[Sequence[str]]
telemetry: Optional[bool]
timeout: Optional[float]
tls_ca_cert: Optional[str]
tls_cert: Optional[str]
tls_ignore_warning: Optional[bool]
tls_private_key: Optional[str]
tls_use_host_header: Optional[bool]
tls_verify: Optional[bool]
use_latest_spec: Optional[bool]
use_legacy_auth_encoding: Optional[bool]
username: Optional[str]
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'instance_{field.name}')(field, v)
@validator('*')
def _run_validations(cls, v, field):
if not v:
return v
return getattr(validators, f'instance_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values))
| [
"[email protected]"
] | |
9c3fde3ac2b31c11bd87657492d509c219cfb13d | 601f604ea3eed7d106513a9d40b7df98a0cf8c95 | /make-report.py | 12318d9d79fba8c77d47cdde77c40b94df84d43f | [] | no_license | exbracer/dynet-benchmark | 53e892d55f26f075dfdaf43859d5c1af49c15a75 | 9cbc2d66ca20b7b8040a55a66d2024edd5bf5c42 | refs/heads/master | 2021-09-06T11:40:06.802647 | 2018-02-06T05:47:00 | 2018-02-06T05:47:00 | 120,275,569 | 0 | 0 | null | 2018-02-05T08:24:03 | 2018-02-05T08:24:03 | null | UTF-8 | Python | false | false | 8,103 | py | #!/usr/bin/env python
# This should be used as
# mkdir -p report
# grep '\(per_sec\|startup\)' log/*/*.log | python make-report.py
import sys
import re
from collections import defaultdict
stats = defaultdict(lambda: {})
allstats = defaultdict(lambda: [])
##### Regexes
fnameregex = re.compile(r"log/([a-z-]+?)(-gpu|)/(dynet-py|dynet-cpp|dynet-seq|chainer|theano|tensorflow)-(.*?)-t([123]).log:(.*)")
startregex = re.compile(r"startup time: (.*)")
eqregex = re.compile(r"(.*)=(.*)")
commentregex = re.compile(r"^ *((#|//).*)?")
##### Various data
canonicalize = {
"word_per_sec": "speed",
"words_per_sec": "speed",
"sent_per_sec": "speed",
"nll": "accuracy",
"tag_acc": "accuracy",
"acc": "accuracy",
"time": "time"
}
taskna = {
("tensorflow", "bilstm-tagger-withchar"): 1,
("tensorflow", "treenn"): 1,
("theano", "treenn"): 1,
("dynet-seq", "bilstm-tagger"): 1,
("dynet-seq", "bilstm-tagger-withchar"): 1,
("dynet-seq", "treenn"): 1,
}
toolkits = ["dynet-cpp", "dynet-py", "chainer", "dynet-seq", "theano", "tensorflow"]
prettyname = {
"dynet-cpp": "DyC++",
"dynet-py": "DyPy",
"dynet-seq": "DyC++ Seq",
"tensorflow":"TF",
"chainer": "Chainer",
"theano": "Theano"
}
##### Load from log files
for line in sys.stdin:
line = line.replace("rnnlm-seq/dynet-cpp", "rnnlm-batch/dynet-seq")
line = line.replace("rnnlm-seq-gpu/dynet-cpp", "rnnlm-batch-gpu/dynet-seq")
m = re.search(fnameregex, line.strip())
if m:
task = m.group(1)
device = "gpu" if m.group(2) == "-gpu" else "cpu"
toolkit = m.group(3)
params = m.group(4)
trial = int(m.group(5))
idtup = (task, device, toolkit, params, trial)
data = m.group(6)
m = re.search(startregex, data)
if m:
stats[idtup]["startup"] = float(m.group(1))
else:
mystats = {}
for val in data.split(", "):
m = re.search(eqregex, val)
if not m:
print("unmatched line: %s" % line)
sys.exit(1)
if m.group(1) in canonicalize:
can = canonicalize[m.group(1)]
val = float(m.group(2))
mystats[can] = val
if can == "accuracy":
if "rnnlm" not in task: val *= 100
else: val *= -1
stats[idtup][can] = max(val, stats[idtup].get(can,-1e10))
else:
stats[idtup][can] = val
allstats[idtup].append(mystats)
else:
print("unmatched line: %s" % line)
sys.exit(1)
# print(stats)
# def format_num(num):
# if num > 1e6:
# return "%.03gM" % (float(num)/1e6)
# elif num > 1e3:
# return "%.03gk" % (float(num)/1e3)
# else:
# return "%.03g" % float(num)
# TODO: There must be a better way to do this...
def format_num(num):
fnum = float(num)
val = "%.03g" % fnum
if fnum >= 1 and fnum < 10:
val = "%.2f" % fnum
elif fnum >= 10 and fnum < 100:
val = "%.1f" % fnum
elif float(num) > 1000:
val = "%.f" % float(val)
return val
def getmaxstat(task, device, toolkit, setting, stat, mult=1):
my_stats = []
for trial in range(1,4):
my_id = (task, device, toolkit, setting, trial)
if my_id in stats and stat in stats[my_id]:
my_stats.append(mult*stats[my_id][stat])
return format_num(mult*max(my_stats)) if len(my_stats) > 0 else "TODO"
def getminstat(task, device, toolkit, setting, stat):
return getmaxstat(task, device ,toolkit, setting, stat, mult=-1)
###### First section: toolkit comparison
# CPU/GPU speeds for all toolkits/tasks
tasks = [
("RNNLM (MB=1) ", "rnnlm-batch", "ms01-es128-hs256-sp0"),
("RNNLM (MB=4)", "rnnlm-batch", "ms04-es128-hs256-sp0"),
("RNNLM (MB=16)", "rnnlm-batch", "ms16-es128-hs256-sp0"),
("RNNLM (MB=64)", "rnnlm-batch", "ms64-es128-hs256-sp0"),
("BiLSTM Tag", "bilstm-tagger", "ws128-hs50-mlps32-su0"),
("BiLSTM Tag +sparse", "bilstm-tagger", "ws128-hs50-mlps32-su1"),
("BiLSTM Tag+Char", "bilstm-tagger-withchar", "cs20-ws128-hs50-mlps32-su0"),
("BiLSTM Tag+Char +sparse", "bilstm-tagger-withchar", "cs20-ws128-hs50-mlps32-su1"),
("TreeLSTM", "treenn", "ws128-hs128-su0"),
("TreeLSTM +sparse", "treenn", "ws128-hs128-su1"),
]
def make_speed_table(device):
print("\\begin{table}")
print("\\begin{tabular}{c|rrr|rrr}")
print(" & "+" & ".join([prettyname[x] for x in toolkits])+" \\\\ \hline")
for name, task, setting in tasks:
cols = [name]
for i, toolkit in enumerate(toolkits):
if (toolkit, task) in taskna:
cols.append("\\multicolumn{1}{c}{-}")
else:
cols.append(getmaxstat(task, device, toolkit, setting, "speed"))
print(" & ".join(cols)+" \\\\")
print("\\end{tabular}")
print("\\caption{Processing speed for each toolkit on %s. Speeds are measured in words/sec for RNNLM and Tagger and sentences/sec for TreeLSTM.}" % device.upper())
print("\\label{tab:speeds%s}" % device)
print("\\end{table}")
print("")
make_speed_table("cpu")
make_speed_table("gpu")
# Startup time table
tasks = [
("RNNLM", "rnnlm-batch", "ms01-es128-hs256-sp0"),
("BiLSTM Tag", "bilstm-tagger", "ws128-hs50-mlps32-su0"),
("BiLSTM Tag+Char", "bilstm-tagger-withchar", "cs20-ws128-hs50-mlps32-su0"),
("TreeLSTM", "treenn", "ws128-hs128-su0"),
]
print("\\begin{table}")
print("\\begin{tabular}{c|rrr|rrr}")
print(" & "+" & ".join([prettyname[x] for x in toolkits])+" \\\\ \hline")
for name, task, setting in tasks:
cols = [name]
for i, toolkit in enumerate(toolkits):
if (toolkit, task) in taskna:
cols.append("\\multicolumn{1}{c}{-}")
else:
cols.append(getminstat(task, device, toolkit, setting, "startup"))
print(" & ".join(cols)+" \\\\")
print("\\end{tabular}")
print("\\caption{Startup time for programs written in each toolkit.}")
print("\\label{tab:startup}")
print("\\end{table}")
print("")
# Code complexities
def get_code_complexity(toolkit, task):
chars = 0
if toolkit == "dynet-seq":
if not task == "rnnlm-batch":
return "\\multicolumn{1}{c}{-}"
toolkit = "dynet-cpp"
task = "rnnlm-seq"
if (toolkit, task) in taskna:
return "\\multicolumn{1}{c}{-}"
with open("%s/%s.%s" % (toolkit, task, "cc" if toolkit == "dynet-cpp" else "py"), "r") as f:
for line in f:
line = re.sub(commentregex, "", line.strip())
chars += len(line)
return str(chars)
tasks = [
("RNNLM", "rnnlm-batch"),
("BiLSTM Tag", "bilstm-tagger"),
("BiLSTM Tag+Char", "bilstm-tagger-withchar"),
("TreeLSTM", "treenn"),
]
print("\\begin{table}")
print("\\begin{tabular}{c|rrrrrr}")
print(" & "+" & ".join([prettyname[x] for x in toolkits])+" \\\\ \hline")
for name, task in tasks:
cols = [name]
for i, toolkit in enumerate(toolkits):
cols.append(get_code_complexity(toolkit, task))
print(" & ".join(cols)+" \\\\")
print("\\end{tabular}")
print("\\caption{Number of non-comment characters in the implementation of each toolkit.}")
print("\\label{tab:complexity}")
print("\\end{table}")
print("")
###### Second section: effect of minibatching and net size
###### Third section: effect of sparse update
tasks = [
("RNNLM (MB=1) ", "rnnlm-batch", "ms01-es128-hs256-sp"),
("RNNLM (MB=16)", "rnnlm-batch", "ms16-es128-hs256-sp"),
("BiLSTM Tag", "bilstm-tagger", "ws128-hs50-mlps32-su"),
("BiLSTM Tag+Char", "bilstm-tagger-withchar", "cs20-ws128-hs50-mlps32-su"),
("TreeLSTM", "treenn", "ws128-hs128-su"),
]
print("\\begin{table}")
print("\\begin{tabular}{c|rr|rr|rr|rr}")
print(" & \\multicolumn{4}{c|}{Speed} & \\multicolumn{4}{c}{Accuracy} \\\\")
print(" & \\multicolumn{2}{c|}{Dense} & \\multicolumn{2}{c|}{Sparse} & \\multicolumn{2}{c|}{Dense} & \\multicolumn{2}{c}{Sparse} \\\\")
print(" & "+" & ".join(["CPU & GPU"] * 4)+" \\\\ \\hline")
for name, task, setting in tasks:
cols = [name]
for criterion in ("speed", "accuracy"):
for ds in ("0", "1"):
for device in ("cpu", "gpu"):
cols.append(getmaxstat(task, device, "dynet-cpp", setting+ds, criterion))
print(" & ".join(cols)+" \\\\")
print("\\end{tabular}")
print("\\caption{Processing speed and accuracy after 10 minutes with dense or sparse updates.}")
print("\\label{tab:sparseresults}")
print("\\end{table}")
print("")
| [
"[email protected]"
] | |
468730321e33a424ae9e609b460c46656deba515 | 63ae3faa596333fa89ed6059332ed956b6dd3ae1 | /career/models/base.py | 518e8700dd9167d358c289a74f2d9d65c6953a78 | [] | no_license | wyzane/tornado-skill-general | 28470d786dc949a2616143b0b35e593d90f2fdc4 | 915ec990a20b2bb76d56c040cade57a7627d0ebe | refs/heads/master | 2020-06-13T13:43:02.401369 | 2019-07-28T08:20:17 | 2019-07-28T08:20:17 | 194,675,732 | 0 | 0 | null | 2019-07-28T08:20:18 | 2019-07-01T13:14:11 | Python | UTF-8 | Python | false | false | 432 | py | # from sqlalchemy import Table, MetaData
# from sqlalchemy.ext.declarative import declarative_base
#
# from config.db import engine_db_career
#
#
# Base = declarative_base()
#
#
# def model(table):
# class BaseModel(Base):
# __tablename__ = table
# metadata = MetaData(engine_db_career)
#
# # 映射数据库中同名的表
# Table(__tablename__, metadata, autoload=True)
#
# return BaseModel | [
"[email protected]"
] | |
df195ccf60e3003e51d78a0956fd1691ae0fb0b1 | 6a6984544a4782e131510a81ed32cc0c545ab89c | /src/icetray/python/i3logging.py | efb475dd0d2c1a00a4a5b5832c425bca34254a58 | [] | no_license | wardVD/IceSimV05 | f342c035c900c0555fb301a501059c37057b5269 | 6ade23a2fd990694df4e81bed91f8d1fa1287d1f | refs/heads/master | 2020-11-27T21:41:05.707538 | 2016-09-02T09:45:50 | 2016-09-02T09:45:50 | 67,210,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,068 | py |
import logging, string, traceback
from icecube.icetray import I3Logger, I3LogLevel
class LoggingBridge(I3Logger):
pylevels = {
I3LogLevel.LOG_TRACE : 5,
I3LogLevel.LOG_DEBUG : logging.DEBUG,
I3LogLevel.LOG_INFO : logging.INFO,
I3LogLevel.LOG_NOTICE : 25,
I3LogLevel.LOG_WARN : logging.WARNING,
I3LogLevel.LOG_ERROR : logging.ERROR,
I3LogLevel.LOG_FATAL : logging.CRITICAL,
}
i3levels = dict([(v, k) for k, v in pylevels.items()])
def __init__(self):
I3Logger.__init__(self)
self.getLogger("").setLevel(logging.INFO)
def getLogger(self, unit):
if len(unit) > 0:
name = "icecube.%s" % unit
else:
name = "icecube"
return logging.getLogger(name)
def log(self, level, unit, file, line, func, msg):
logger = self.getLogger(unit)
if logger.isEnabledFor(self.pylevels[level]):
record = logging.LogRecord(logger.name, self.pylevels[level], file, line, msg, tuple(), None, None)
logger.handle(record)
def get_level_for_unit(self, unit):
return self.i3levels.get(self.getLogger(unit).getEffectiveLevel(), I3LogLevel.LOG_FATAL)
def set_level_for_unit(self, unit, level):
self.getLogger(unit).setLevel(self.pylevels[level])
def set_level(self, level):
self.getLogger("").setLevel(self.pylevels[level])
class ColorFormatter(logging.Formatter):
def format(self, record):
record.message = record.getMessage()
if string.find(self._fmt,"%(asctime)") >= 0:
record.asctime = self.formatTime(record, self.datefmt)
d = dict(record.__dict__)
if record.levelname in ("CRITICAL", "ERROR"):
d['levelname'] = "\x1b[1;31m %s \x1b[0m" % d['levelname']
d['filename'] = "\x1b[1m%s\x1b[0m" % d['filename']
s = self._fmt % d
return "\x1b[1m%s\x1b[0m" % s
BASIC_FORMAT = "%(filename)s:%(lineno)s %(levelname)s: %(message)s"
def _setup(format=BASIC_FORMAT):
logging.addLevelName(5, 'TRACE')
logging.basicConfig(format=format)
I3Logger.global_logger = LoggingBridge()
def console(colors=True):
import sys
from os import isatty
_setup()
if colors and isatty(sys.stderr.fileno()):
logging.root.handlers[0].setFormatter(ColorFormatter(BASIC_FORMAT))
else:
logging.root.handlers[0].setFormatter(logging.Formatter(BASIC_FORMAT))
def rotating_files(filename, maxBytes=0, backupCount=0):
from logging.handlers import RotatingFileHandler
_setup()
handler = RotatingFileHandler(filename, maxBytes=maxBytes, backupCount=backupCount)
handler.setFormatter(logging.Formatter("[%(asctime)s] "+BASIC_FORMAT))
logging._acquireLock()
logging.root.handlers = list()
logging.root.addHandler(handler)
logging._releaseLock()
def syslog():
from logging.handlers import SysLogHandler
_setup()
handler = SysLogHandler()
handler.setFormatter(logging.Formatter("[%(asctime)s] "+BASIC_FORMAT))
logging._acquireLock()
logging.root.handlers = list()
logging.root.addHandler(handler)
logging._releaseLock()
def _translate_level(name):
if isinstance(name, I3LogLevel):
return name
elif hasattr(I3LogLevel, 'LOG_'+name.upper()):
return getattr(I3LogLevel, 'LOG_'+name.upper())
else:
raise ValueError("Unknown logging level '%s'" % name)
def set_level(level):
"""
Set the global logging level.
:param level: the log level. This may also be specified as a string.
Examples::
icetray.logging.set_level(icetray.logging.I3LogLevel.LOG_INFO)
icetray.logging.set_level('INFO')
"""
I3Logger.global_logger.set_level(_translate_level(level))
def set_level_for_unit(unit, level):
"""
Set the logging level for a specific logging unit.
:param level: the log level. This may also be specified as a string.
Examples::
icetray.logging.set_level_for_unit('I3Reader', icetray.logging.I3LogLevel.LOG_TRACE)
icetray.logging.set_level('I3Reader', 'TRACE')
"""
I3Logger.global_logger.set_level_for_unit(unit, _translate_level(level))
def log_trace(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_TRACE, unit, tb[0], tb[1],
tb[2], message)
def log_debug(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_DEBUG, unit, tb[0], tb[1],
tb[2], message)
def log_info(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_INFO, unit, tb[0], tb[1],
tb[2], message)
def log_notice(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_NOTICE, unit, tb[0], tb[1],
tb[2], message)
def log_warn(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_WARN, unit, tb[0], tb[1],
tb[2], message)
def log_error(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_ERROR, unit, tb[0], tb[1],
tb[2], message)
def log_fatal(message, unit="Python"):
tb = traceback.extract_stack(limit=2)[0]
I3Logger.global_logger.log(I3LogLevel.LOG_FATAL, unit, tb[0], tb[1],
tb[2], message)
raise RuntimeError(message + " (in " + tb[2] + ")")
| [
"[email protected]"
] | |
1180150b5396a4f4f5f32bcfded61140220148f6 | 612325535126eaddebc230d8c27af095c8e5cc2f | /depot_tools/recipe_modules/tryserver/example.py | ee0d0c9b7ba47b44b49510cebbd1e8c402c7da6f | [
"BSD-3-Clause"
] | permissive | TrellixVulnTeam/proto-quic_1V94 | 1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673 | feee14d96ee95313f236e0f0e3ff7719246c84f7 | refs/heads/master | 2023-04-01T14:36:53.888576 | 2019-10-17T02:23:04 | 2019-10-17T02:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,356 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'tryserver',
]
def RunSteps(api):
api.path['checkout'] = api.path['slave_build']
api.tryserver.maybe_apply_issue()
api.tryserver.get_files_affected_by_patch()
if api.tryserver.is_tryserver:
api.tryserver.set_subproject_tag('v8')
api.tryserver.set_patch_failure_tryjob_result()
api.tryserver.set_compile_failure_tryjob_result()
api.tryserver.set_test_failure_tryjob_result()
api.tryserver.set_invalid_test_results_tryjob_result()
with api.tryserver.set_failure_hash():
api.python.failing_step('fail', 'foo')
def GenTests(api):
yield (api.test('with_svn_patch') +
api.properties(patch_url='svn://checkout.url'))
yield (api.test('with_git_patch') +
api.properties(
patch_storage='git',
patch_project='v8',
patch_repo_url='http://patch.url/',
patch_ref='johndoe#123.diff'))
yield (api.test('with_rietveld_patch') +
api.properties.tryserver())
yield (api.test('with_wrong_patch') + api.platform('win', 32))
| [
"[email protected]"
] | |
17d0a7ae053230e48a78d11e61f5bbb734bedea7 | a82418f3d62b944a27b6e9000829af54b7575893 | /psets_gensim_v1/2018/cfgs_update_noFilter/cfg_hiddenValleyGridPack_vector_m_10_ctau_100_xiO_1.py | b6d42b1da295fb78cdd8daf9fa567a4be76ed208 | [] | no_license | mcitron/hiddenValleyGeneration | abb347a30319ce5f230e0e1248a4259bf4cc4b1b | 5d165be91ae082fdba790506bfb11a026d602787 | refs/heads/master | 2023-04-08T13:34:56.835752 | 2021-04-28T17:14:46 | 2021-04-28T17:17:14 | 362,550,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,190 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: Configuration/GenProduction/python/cfgs_update_noFilter/hiddenValleyGridPack_vector_m_10_ctau_100_xiO_1.py --python_filename cfg_hiddenValleyGridPack_vector_m_10_ctau_100_xiO_1.py --eventcontent RAWSIM --customise Configuration/DataProcessing/Utils.addMonitoring --datatier GEN-SIM --fileout file:output.root --conditions 102X_upgrade2018_realistic_v11 --beamspot Realistic25ns13TeVEarly2018Collision --customise_commands process.RandomNumberGeneratorService.externalLHEProducer.initialSeed=int(0) --step LHE,GEN,SIM --geometry DB:Extended --era Run2_2018 --no_exec --mc -n 10
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('SIM',eras.Run2_2018)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.GeometrySimDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic25ns13TeVEarly2018Collision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('Configuration/GenProduction/python/cfgs_update_noFilter/hiddenValleyGridPack_vector_m_10_ctau_100_xiO_1.py nevts:10'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.RAWSIMoutput = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(1),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(20971520),
fileName = cms.untracked.string('file:output.root'),
outputCommands = process.RAWSIMEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
process.XMLFromDBSource.label = cms.string("Extended")
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_upgrade2018_realistic_v11', '')
process.generator = cms.EDFilter("Pythia8HadronizerFilter",
PythiaParameters = cms.PSet(
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8PSweightsSettings',
'pythia8PowhegEmissionVetoSettings',
'processParameters'
),
processParameters = cms.vstring(
'POWHEG:nFinal = 1',
'ParticleDecays:limitTau0= off',
'HiggsSM:gg2H = on',
'25:m0 =125',
'25:addChannel = 1 1.0 102 4900101 -4900101',
'25:0:onMode=0',
'25:1:onMode=0',
'25:2:onMode=0',
'25:3:onMode=0',
'25:4:onMode=0',
'25:5:onMode=0',
'25:6:onMode=0',
'25:7:onMode=0',
'25:8:onMode=0',
'25:9:onMode=0',
'25:10:onMode=0',
'25:11:onMode=0',
'25:12:onMode=0',
'25:13:onMode=0',
'HiddenValley:Ngauge = 3',
'HiddenValley:nFlav = 1',
'HiddenValley:fragment = on',
'HiddenValley:FSR = on',
'HiddenValley:alphaOrder = 1',
'HiddenValley:Lambda = 10.0',
'HiddenValley:pTminFSR = 11.0',
'HiddenValley:spinFv=0',
'4900101:m0 = 4.0',
'4900111:m0 = 10',
'4900113:m0 = 10.0',
'4900113:addChannel = 1 0.051 91 1 -1',
'4900113:addChannel = 1 0.203 91 2 -2',
'4900113:addChannel = 1 0.051 91 3 -3',
'4900113:addChannel = 1 0.203 91 4 -4',
'4900113:addChannel = 1 0.037 91 5 -5',
'4900113:addChannel = 1 0.152 91 11 -11',
'4900113:addChannel = 1 0.152 91 13 -13',
'4900113:addChannel = 1 0.151 91 15 -15',
'4900113:tau0 = 1000',
'4900111:onMode = 0'
),
pythia8CP5Settings = cms.vstring(
'Tune:pp 14',
'Tune:ee 7',
'MultipartonInteractions:ecmPow=0.03344',
'PDF:pSet=20',
'MultipartonInteractions:bProfile=2',
'MultipartonInteractions:pT0Ref=1.41',
'MultipartonInteractions:coreRadius=0.7634',
'MultipartonInteractions:coreFraction=0.63',
'ColourReconnection:range=5.176',
'SigmaTotal:zeroAXB=off',
'SpaceShower:alphaSorder=2',
'SpaceShower:alphaSvalue=0.118',
'SigmaProcess:alphaSvalue=0.118',
'SigmaProcess:alphaSorder=2',
'MultipartonInteractions:alphaSvalue=0.118',
'MultipartonInteractions:alphaSorder=2',
'TimeShower:alphaSorder=2',
'TimeShower:alphaSvalue=0.118'
),
pythia8CommonSettings = cms.vstring(
'Tune:preferLHAPDF = 2',
'Main:timesAllowErrors = 10000',
'Check:epTolErr = 0.01',
'Beams:setProductionScalesFromLHEF = off',
'SLHA:keepSM = on',
'SLHA:minMassSM = 1000.',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tau0Max = 10',
'ParticleDecays:allowPhotonRadiation = on'
),
pythia8PSweightsSettings = cms.vstring(
'UncertaintyBands:doVariations = on',
'UncertaintyBands:List = {isrRedHi isr:muRfac=0.707,fsrRedHi fsr:muRfac=0.707,isrRedLo isr:muRfac=1.414,fsrRedLo fsr:muRfac=1.414,isrDefHi isr:muRfac=0.5,fsrDefHi fsr:muRfac=0.5,isrDefLo isr:muRfac=2.0,fsrDefLo fsr:muRfac=2.0,isrConHi isr:muRfac=0.25,fsrConHi fsr:muRfac=0.25,isrConLo isr:muRfac=4.0,fsrConLo fsr:muRfac=4.0,fsr_G2GG_muR_dn fsr:G2GG:muRfac=0.5,fsr_G2GG_muR_up fsr:G2GG:muRfac=2.0,fsr_G2QQ_muR_dn fsr:G2QQ:muRfac=0.5,fsr_G2QQ_muR_up fsr:G2QQ:muRfac=2.0,fsr_Q2QG_muR_dn fsr:Q2QG:muRfac=0.5,fsr_Q2QG_muR_up fsr:Q2QG:muRfac=2.0,fsr_X2XG_muR_dn fsr:X2XG:muRfac=0.5,fsr_X2XG_muR_up fsr:X2XG:muRfac=2.0,fsr_G2GG_cNS_dn fsr:G2GG:cNS=-2.0,fsr_G2GG_cNS_up fsr:G2GG:cNS=2.0,fsr_G2QQ_cNS_dn fsr:G2QQ:cNS=-2.0,fsr_G2QQ_cNS_up fsr:G2QQ:cNS=2.0,fsr_Q2QG_cNS_dn fsr:Q2QG:cNS=-2.0,fsr_Q2QG_cNS_up fsr:Q2QG:cNS=2.0,fsr_X2XG_cNS_dn fsr:X2XG:cNS=-2.0,fsr_X2XG_cNS_up fsr:X2XG:cNS=2.0,isr_G2GG_muR_dn isr:G2GG:muRfac=0.5,isr_G2GG_muR_up isr:G2GG:muRfac=2.0,isr_G2QQ_muR_dn isr:G2QQ:muRfac=0.5,isr_G2QQ_muR_up isr:G2QQ:muRfac=2.0,isr_Q2QG_muR_dn isr:Q2QG:muRfac=0.5,isr_Q2QG_muR_up isr:Q2QG:muRfac=2.0,isr_X2XG_muR_dn isr:X2XG:muRfac=0.5,isr_X2XG_muR_up isr:X2XG:muRfac=2.0,isr_G2GG_cNS_dn isr:G2GG:cNS=-2.0,isr_G2GG_cNS_up isr:G2GG:cNS=2.0,isr_G2QQ_cNS_dn isr:G2QQ:cNS=-2.0,isr_G2QQ_cNS_up isr:G2QQ:cNS=2.0,isr_Q2QG_cNS_dn isr:Q2QG:cNS=-2.0,isr_Q2QG_cNS_up isr:Q2QG:cNS=2.0,isr_X2XG_cNS_dn isr:X2XG:cNS=-2.0,isr_X2XG_cNS_up isr:X2XG:cNS=2.0}',
'UncertaintyBands:nFlavQ = 4',
'UncertaintyBands:MPIshowers = on',
'UncertaintyBands:overSampleFSR = 10.0',
'UncertaintyBands:overSampleISR = 10.0',
'UncertaintyBands:FSRpTmin2Fac = 20',
'UncertaintyBands:ISRpTmin2Fac = 1'
),
pythia8PowhegEmissionVetoSettings = cms.vstring(
'POWHEG:veto = 1',
'POWHEG:pTdef = 1',
'POWHEG:emitted = 0',
'POWHEG:pTemt = 0',
'POWHEG:pThard = 0',
'POWHEG:vetoCount = 100',
'SpaceShower:pTmaxMatch = 2',
'TimeShower:pTmaxMatch = 2'
)
),
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(1),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(10),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1)
)
process.externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/2017/13TeV/powheg/V2/gg_H_quark-mass-effects_NNPDF31_13TeV_M125/v1/gg_H_quark-mass-effects_NNPDF31_13TeV_M125_slc6_amd64_gcc630_CMSSW_9_3_0.tgz'),
nEvents = cms.untracked.uint32(10),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
# Path and EndPath definitions
process.lhe_step = cms.Path(process.externalLHEProducer)
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.lhe_step,process.generation_step,process.genfiltersummary_step,process.simulation_step,process.endjob_step,process.RAWSIMoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# filter all path with the production filter sequence
for path in process.paths:
if path in ['lhe_step']: continue
getattr(process,path)._seq = process.generator * getattr(process,path)._seq
# customisation of the process.
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# End of customisation functions
# Customisation from command line
process.RandomNumberGeneratorService.externalLHEProducer.initialSeed=int(0)
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
| [
"[email protected]"
] | |
4b949d091e01e60d0542cd3a3d47c56894232e58 | 7ac271f357f4c8f0c23c697b11966259f836880f | /app/web/exception.py | 96c30998834aeb4bf7f41d79bdd54f9610cf7fca | [] | no_license | cheng93/PythonWeb | 74a58eadee4ee7d2872a582a907bbf47630df371 | d5ced8dee1d5ba31778125c5e67169c92acf26a0 | refs/heads/develop | 2021-01-19T23:59:11.315871 | 2018-03-04T19:26:18 | 2018-03-04T19:26:18 | 89,063,916 | 0 | 0 | null | 2018-03-04T19:26:19 | 2017-04-22T11:09:14 | Python | UTF-8 | Python | false | false | 134 | py | from pyramid.view import view_config
@view_config(route_name='throw_exception')
def throw_exception(request):
raise Exception()
| [
"[email protected]"
] | |
56860c88d4bc8dd9e78a1b54940265c25bb42bfa | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/FreeOpcUa_python-opcua/python-opcua-master/release.py | fc05a90bf5b60295b7aab2321c144549eb9401a9 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 1,120 | py | import re
import os
def bump_version():
with open("setup.py") as f:
s = f.read()
m = re.search(r'version="(.*)\.(.*)\.(.*)",', s)
v1, v2, v3 = m.groups()
oldv = "{0}.{1}.{2}".format(v1, v2, v3)
newv = "{0}.{1}.{2}".format(v1, v2, str(int(v3) + 1))
print("Current version is: {0}, write new version, ctrl-c to exit".format(oldv))
ans = input(newv)
if ans:
newv = ans
s = s.replace(oldv, newv)
with open("setup.py", "w") as f:
f.write(s)
return newv
def release():
v = bump_version()
ans = input("version bumped, commiting?(Y/n)")
if ans in ("", "y", "yes"):
os.system("git add setup.py")
os.system("git commit -m 'new release'")
os.system("git tag {0}".format(v))
ans = input("change committed, push to server?(Y/n)")
if ans in ("", "y", "yes"):
os.system("git push")
os.system("git push --tags")
ans = input("upload to pip?(Y/n)")
if ans in ("", "y", "yes"):
os.system("python setup.py sdist upload")
if __name__ == "__main__":
release()
| [
"[email protected]"
] | |
4c979a9566f34d6b5958baa4ca8d9883abd3afa6 | 845d96ba5efe898a1e1272c862f476e983e54d46 | /10/src/gomoku.py | 6ee645b50621b32a80852855980e5a4f804a1274 | [] | no_license | ppaanngggg/rl_course | fe5d092d87e61149857f9ee085ac73d98d377cb9 | 05f1db88e96ea9ff21ed5028dc8d5b7e9539bcee | refs/heads/master | 2020-04-30T02:17:05.125542 | 2019-03-19T16:28:13 | 2019-03-19T16:28:13 | 176,554,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,210 | py | import torch
class Gomoku:
"""
play1: 1,
play2: -1,
empty: 0,
"""
def __init__(
self, _size=5, _win_num=4, _player=1, _terminal=None, _history=None, _board=None
):
self.size = _size
self.win_num = _win_num
self.num_actions = _size * _size
self.player = _player # cur player, 1 or -1
self.terminal = _terminal # 1, -1 or 0 for draw, None for unknown
self.history = [] if _history is None else _history
self.board = torch.zeros(self.size, self.size) if _board is None else _board
def clone(self):
return Gomoku(
self.size,
self.win_num,
self.player,
self.terminal,
self.history[:],
self.board.clone(),
)
def get_input(self):
ac_1 = torch.zeros(self.size, self.size)
try:
x, y = self._action2loc(self.history[-1])
ac_1[x, y] = 1
except IndexError:
pass
ac_2 = torch.zeros(self.size, self.size)
try:
x, y = self._action2loc(self.history[-2])
ac_2[x, y] = 1
except IndexError:
pass
return torch.stack([self.board * self.player, ac_1, ac_2])
def get_mask(self):
return (self.board == 0).flatten()
def _check_horizon(self, _value, _x, _y):
left = 0
y_left = _y - 1
while y_left >= 0 and self.board[_x, y_left] == _value:
left += 1
y_left -= 1
right = 0
y_right = _y + 1
while y_right < self.size and self.board[_x, y_right] == _value:
right += 1
y_right += 1
if left + right + 1 >= self.win_num: # horizon win
self.terminal = _value
return self.terminal
def _check_vertical(self, _value, _x, _y):
up = 0
x_up = _x - 1
while x_up >= 0 and self.board[x_up, _y] == _value:
up += 1
x_up -= 1
down = 0
x_down = _x + 1
while x_down < self.size and self.board[x_down, _y] == _value:
down += 1
x_down += 1
if up + down + 1 >= self.win_num: # vertical win
self.terminal = _value
return self.terminal
def _check_inv_slash(self, _value, _x, _y):
up_left = 0
x_up = _x - 1
y_left = _y - 1
while x_up >= 0 and y_left >= 0 and self.board[x_up, y_left] == _value:
up_left += 1
x_up -= 1
y_left -= 1
down_right = 0
x_down = _x + 1
y_right = _y + 1
while (
x_down < self.size
and y_right < self.size
and self.board[x_down, y_right] == _value
):
down_right += 1
x_down += 1
y_right += 1
if up_left + down_right + 1 >= self.win_num: # inv slash win
self.terminal = _value
return self.terminal
def _check_slash(self, _value, _x, _y):
up_right = 0
x_up = _x - 1
y_right = _y + 1
while x_up >= 0 and y_right < self.size and self.board[x_up, y_right] == _value:
up_right += 1
x_up -= 1
y_right += 1
down_left = 0
x_down = _x + 1
y_left = _y - 1
while (
x_down < self.size and y_left >= 0 and self.board[x_down, y_left] == _value
):
down_left += 1
x_down += 1
y_left -= 1
if up_right + down_left + 1 >= self.win_num: # slash win
self.terminal = _value
return self.terminal
def _action2loc(self, _action: int):
# get loc
return _action // self.size, _action % self.size
def action(self, _action: int):
x, y = self._action2loc(_action)
# update board and player
assert self.board[x, y] == 0 and self.terminal is None
value = self.player
self.board[x, y] = value
self.history.append(_action)
self.player *= -1
# check terminal
if self._check_horizon(value, x, y) is not None:
return self.terminal
if self._check_vertical(value, x, y) is not None:
return self.terminal
if self._check_inv_slash(value, x, y) is not None:
return self.terminal
if self._check_slash(value, x, y) is not None:
return self.terminal
# check draw
if len(self.history) == self.size * self.size:
self.terminal = 0
return self.terminal
def __repr__(self):
return (
f"Cur Player: {self.player}\n"
f"History: {self.history}\n"
f"Terminal: {self.terminal}\n"
f"Board:\n"
f"{self.board}"
)
def human_self(self):
print(self)
while self.terminal is None:
x = input("Please input x: ")
y = input("Please input y: ")
action = int(x) * self.size + int(y)
self.action(action)
print(self)
print(f"!!Result!!: {self.terminal}")
if __name__ == "__main__":
gomoku = Gomoku()
gomoku.human_self()
| [
"[email protected]"
] | |
d61d6a841af3adb181ad6568f445d6147da3ef3d | 657f0a463bb47799db522ac18c720699f34206aa | /main.py | 7e01829b0633c66dd5af8a20bcb22784ca17f79d | [] | no_license | nakamura9/isocal_android | 1f93d61acb1ca47151d468831daee98370e75666 | 28b42a4da327e028cab467fab339de5deb50cd9c | refs/heads/master | 2022-12-13T01:21:18.803343 | 2019-09-02T08:43:25 | 2019-09-02T08:43:25 | 78,008,496 | 0 | 0 | null | 2022-12-08T06:06:12 | 2017-01-04T11:08:04 | Python | UTF-8 | Python | false | false | 60,219 | py | '''
Created on Apr 5, 2016
@author: caleb kandoro
'''
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import (ScreenManager, Screen,
FadeTransition, SlideTransition)
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.spinner import Spinner
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import (StringProperty, ObjectProperty,
NumericProperty)
from kivy.uix.anchorlayout import AnchorLayout
from kivy.storage.jsonstore import JsonStore
from kivy.uix.actionbar import ActionItem
from kivy.uix.scrollview import ScrollView
from kivy.core.window import Window
from kivy.uix.progressbar import ProgressBar
from kivy.uix.popup import Popup
from kivy.network.urlrequest import UrlRequest
try:
from _datetime import timedelta
except:
from datetime import timedelta
try:
from urllib import urlencode
except:
from urllib.parse import urlencode
import datetime
import time
import os
import utilities as t
import requests
from kivy.core.text import LabelBase
LabelBase.register(name= "Modern Pictograms",
fn_regular = os.path.join(os.path.abspath(os.getcwd()), "ModernPictograms.ttf"))
root = os.getcwd()
Window.clear_color = [1, 1, 1, 1]
balance = JsonStore("BALANCE.json")
customers = JsonStore("CUSTOMERS.json")
standards = JsonStore("STANDARDS.json")
general = JsonStore("GENERAL.json")
accounts = JsonStore("accounts.json")
outstanding = JsonStore("OUTSTANDING.json")
autoclave = JsonStore("AUTOCLAVE.json")
electrical = JsonStore("ELECTRICAL.json")
accounts.put("isocal", password="17025")
current_user = ""
class Heading(Label):
pass
class Cell(Label):
pass
class UploadMessage(Label):
pass
class AltCell(Cell):
pass
class MyActionButton(ActionItem, Button):
pass
class DataApp(App):
pass
class WhiteBoxLayout(BoxLayout):
pass
class WhiteScreen(Screen):
pass
class Base(WhiteBoxLayout):
def validate(self, user, password,_label):
global accounts
global current_user
global general
if accounts.exists(user):
if password == accounts.get(user)["password"]:
current_user = user
self.cal = []
self.remove_widget(self.children[0])
self.add_widget(Home())
self.home = self.children[0]
else:
_label.text = "wrong password"
else:
_label.text = "wrong name"
class HorizontalInput(WhiteBoxLayout):
name = StringProperty()
val= ObjectProperty()
txt=StringProperty()
class VerticalInput(WhiteBoxLayout):
name = StringProperty()
val= ObjectProperty()
txt=StringProperty()
class WhiteAnchorLayout(AnchorLayout):
pass
class WhiteScreenManager(ScreenManager):
pass
class CenteredTable(BoxLayout):
table= ObjectProperty()
n = NumericProperty()
class General(WhiteBoxLayout):
pass
class Login(WhiteBoxLayout):
pass
class SummaryScreen(Screen):
table = ObjectProperty()
def __init__(self, *args, **kwargs):
super(SummaryScreen, self).__init__(*args, **kwargs)
def generate_table(self):
global outstanding
values = {"Instrument": [],
"Serial": [],
"Customer": [],
"Date": []}
for key in outstanding:
values["Instrument"].append(outstanding[key]["name"])
values["Serial"].append(outstanding[key]["serial"])
values["Customer"].append(outstanding[key]["customer"])
values["Date"].append(outstanding[key]["date"])
self.table.clear_widgets()
self.table.add_widget(table(["Instrument",
"Serial",
"Customer",
"Date"], values))
class Home(WhiteBoxLayout):
sm = ObjectProperty()
title = ObjectProperty()
class HomeScreenManager(WhiteScreenManager):
summary = ObjectProperty()
balance = ObjectProperty()
upload = ObjectProperty()
def __init__(self, *args, **kwargs):
super(HomeScreenManager, self).__init__(*args, **kwargs)
self.transition = FadeTransition()
self.instrument_info = []
self.instrument_specs = []
self.first_readings = ""
def next(self):
if hasattr(self.current_screen, "next"):
self.current_screen.next()
elif hasattr(self.current_screen, "asm"):
if hasattr(self.current_screen.asm.current_screen, "next"):
self.current_screen.asm.current_screen.next()
else:
pass
def record(self):
if hasattr(self.current_screen, "record"):
self.current_screen.record()
elif hasattr(self.current_screen, "asm"):
if hasattr(self.current_screen.asm.current_screen, "record"):
self.current_screen.asm.current_screen.record()
else:
pass
def submit(self):
if hasattr(self.current_screen, "submit"):
self.current_screen.submit()
elif hasattr(self.current_screen, "asm"):
if hasattr(self.current_screen.asm.current_screen, "submit"):
self.current_screen.asm.current_screen.submit()
else:
pass
def prev(self):
if hasattr(self.current_screen, "previous"):
self.current_screen.previous()
elif hasattr(self.current_screen, "asm"):
if hasattr(self.current_screen.asm, "previous"):
self.current_screen.asm.current_screen.previous()
else:
pass
def clear(self):
try:
cur = self.current_screen
cur.clear()
except Exception as e:
try:
cur = self.current_screen.asm.current_screen
cur.clear()
except Exception as e:
pass
def clear_last(self):
try:
cur = self.current_screen
cur.clear_last()
except Exception as e:
try:
cur = self.current_screen.asm.current_screen
cur.clear_last()
except Exception as e:
pass
def change(self, s, title):
self.parent.title.text = title
self.type = self.parent.title.text.split(" ")[0].lower()
self.current = s
def calibrate(self):
if self.parent.title.text=="Balance Calibration":
self.balance.asm.instrument_info = self.instrument_info
self.balance.asm.instrument_specs = self.instrument_specs
self.current = "_balance"
elif self.parent.title.text == "Pressure Calibration":
self.current = "pressure_first"
else:
self.current= "readings"
class InstrumentInfoScreen(WhiteScreen):
nom = StringProperty()
std = StringProperty()
def get_date(self):
next = datetime.date.today() + timedelta(days =180)
return next.strftime("%d/%m/%y")
def change(self):
self.parent.current= "instrument_specs"
def previous(self):
pass
def next(self):
global standards
due = self.ids.due.val.text
name = self.ids.nom.val.text
serial = self.ids.sn.val.text
customer = self.ids.cus.val.text
manufacturer = self.ids.man.val.text
model = self.ids.model.val.text
standard = self.ids.standard.val.text
stds = list(standards.keys())
if name == "" or customer == "" or standard == "":
p=Popup(title="Warning!", size_hint=(0.5, 0.2),
content=Label(text= "Some fields cannot be empty"))
p.open()
elif standard not in standards:
p=Popup(title="Warning!", size_hint=(1, 0.5),
content=Label(text= "The Instrument cannot be calibrated without \n"
"an acompanying standard. These are available:\n"
"{}".format("\n".join(stds))))
p.open()
elif self.parent.type == "mass" and standard not in standards:
p=Popup(title="Warning!", size_hint=(1, 0.6),
content=Label(text= "The balance cannot be calibrated without \n"
"an acompanying standard. These are available:\n"
"{}".format(standards.keys())))
p.open()
else:
self.parent.instrument_info = [name, serial,
customer,
manufacturer,
model,standard, due]
self.ids.nom.val.text =""
self.ids.sn.val.text = ""
self.change()
class InstrumentSpecsScreen(WhiteScreen):
def next(self):
min = self.ids.min.text
max = self.ids.max.text
res = self.ids.res.val.text
try:
float(res)
except:
p=Popup(title="Warning!", size_hint=(0.5, 0.3),
content=Label(text= "Resolution must be a number"))
p.open()
return
units= self.ids.units.val.text
location= self.ids.location.val.text
immersion= self.ids._immersion.val.text
instrument_unit_lexicon={"pressure": ["bar", "psi", "mpa", "kpa", "pa"],
"temperature": ["celsius", "fahrenheit"],
"tds": ["ppm", "ppt", "ppb"],
"volume": ["litre", "millilitre", "cubic meter", "microlitre", "cubic foot"],
"flow": ["l/min", "cf/min", "l/hr", "m3/hr", "cf/hr"],
"balance": ["grams", "kgs"],
"current": ["amp", "milliamp"],
"volts": ["volt", "millivolt", "kilovolt"],
"mass": ["grams", "kgs"],
"ph": ['ph'],
"conductivity": ["siemens"]
}
if min=="" or max=="" or res=="" or units=="" or location=="":
p=Popup(title="Warning!", size_hint=(0.5, 0.3),
content=Label(text= "Some fields cannot be left empty"))
p.open()
else:
if self.parent.type in instrument_unit_lexicon:
if units not in instrument_unit_lexicon[self.parent.type]:
p = Popup(size_hint =(1, 0.6), title = "Warning!",
content=Label(text="You Have entered an invalid unit. Try one of:\n>{}".format( \
"\n> ".join(instrument_unit_lexicon[self.parent.type]))))
p.open()
else:
self.parent.instrument_specs = [min, max, res, units, location, immersion]
self.parent.start_time = datetime.datetime.now().strftime("%H:%M:%S")
self.parent.calibrate()
else:
self.parent.instrument_specs = [min, max, res, units, location, immersion]
self.parent.start_time = datetime.datetime.now().strftime("%H:%M:%S")
self.parent.calibrate()
class CheckCorrections(BoxLayout):
yes= ObjectProperty()
no= ObjectProperty()
class AbstractReadingsScreen(WhiteScreen):
def __init__(self, *args, **kwargs):
super(AbstractReadingsScreen, self).__init__(*args, **kwargs)
self.count = 0
self.readings = {"indicated": [],
"actual": []}
self.with_corrections = False
self.called_next = False
self.first = "not yet set"
def clear(self):
for i in self.readings:
self.readings[i] = []
self.ids._table.table.clear_widgets()
def clear_last(self):
headings = []
for i in self.readings:
headings.append(i)
l = len(self.readings[i]) - 1
self.readings[i] = self.readings[i][:l]
self.ids._table.table.clear_widgets()
headings.sort()
self.ids._table.table.add_widget(table(headings, self.readings))
def data_add(self):
'''this method abstracts the data addition of the program while ensuring that
the popup can halt executiom in record'''
self.readings["indicated"].append(self.ids.actual.val.text)
self.readings["actual"].append(self.ids.nominal.val.text)
self.ids._table.table.add_widget(table(["indicated", "actual"], self.readings))
def corrections(self, other):
self.with_corrections = True
self.clear_readings()
def no_corrections(self, other):
self.submit()
def record(self):
self.count += 1
try:
float(self.ids.nominal.val.text)
float(self.ids.actual.val.text)
except:
p = Popup(title="Warning", size_hint = (1, 0.3), content=Label(text="The data entered was invalid,"
"check if you used a comma(,) \n"
"instead of a period(.)"))
p.open()
return
if self.ids.nominal.val.text == "" or self.ids.actual.val.text == "":
p = Popup(title="Warning", size_hint = (0.5, 0.1),
content=Label(text="You cannot enter empty fields"))
p.open()
else:
try:
self.ids._table.table.clear_widgets()
self.data_add()
except Exception as e:
p = Popup(size_hint=(0.5, 0.3),
title = "Warning! You have entered and invalid Value",
content=Label(text="Error: {}".format(e)))
p.open()
def next(self):
if not self.called_next:
self.called_next = True
self.first = readings_combined(self.readings)
if self.count < 1:
p = Popup(size_hint=(0.5, 0.1),
title = "Warning!",
content=Label(text="You have recorded too few values"))
p.open()
else:
self.clear_readings()
content = CheckCorrections()
p = Popup(title="Important", size_hint = (1, 0.6),
content=content)
content.yes.bind(on_press = self.corrections)
content.no.bind(on_press =self.no_corrections)
content.yes.bind(on_release= p.dismiss)
content.no.bind(on_release= p.dismiss)
p.open()
else:
self.submit()
def clear_readings(self):
'''if pressure use its own algorithm bt defaults to readings_combined'''
for i in self.readings:
self.readings[i] = []
self.count = 0
self.ids._table.table.clear_widgets()
def submit(self):
global general
global outstanding
info = self.parent.instrument_info
specs= self.parent.instrument_specs
if not self.called_next:
self.next()
else:
self.count = 0
id = "{}{}".format(datetime.date.today().strftime("%d%m%y"),
info[1])
general.put(id,# serial
name="general",
due = info[6],
name_of_instrument= info[0], serial=info[1],
customer=info[2],manufacturer=info[3],
_type = self.parent.type,
model=info[4], range=specs[0]+ "-" + specs[1],
resolution=specs[2], units=specs[3], location=specs[4],
immersion_depth = specs[5],
end_time = datetime.datetime.now().strftime("%H:%M:%S"),
start_time= self.parent.start_time,
readings= self.first,
corrections= readings_combined(self.readings),
standards=info[5],
comments= self.ids.comments.text
)
outstanding.put(id,
name=self.parent.instrument_info[0],
customer=self.parent.instrument_info[2],
date=datetime.date.today().strftime("%d/%m/%Y"),
serial=self.parent.instrument_info[1]) #serial
self.parent.instrument_info = []
self.parent.instrument_specs = []
self.readings = {"indicated": [],
"actual": []}
self.first = ""
self.ids._table.table.clear_widgets()
self.called_next = False
self.with_corrections = False
self.parent.summary.generate_table()
self.parent.current= "summary"
class ReadingsScreen(AbstractReadingsScreen):
pass
class PressureReadingsScreen(AbstractReadingsScreen):
def __init__(self, *args, **kwargs):
super(PressureReadingsScreen, self).__init__(*args, **kwargs)
self.readings = {"applied": [],
"calculated": [],
"indicated": []}
self.calculators = {"kpa":t.calculate_pressure_kpa,
"pa":t.calculate_pressure_pa,
"mpa":t.calculate_pressure_mpa,
"bar":t.calculate_pressure_bar,
"psi":t.calculate_pressure_psi}
def data_add(self):
self.readings["applied"].append(self.ids.nominal.val.text)
self.readings["indicated"].append(self.ids.actual.val.text)
if self.count == 1 and self.ids.nominal.val.text == "0":
self.readings["calculated"].append("0")
else:
self.readings["calculated"].append("{:0.2f}".format(self.calculators[self.parent.instrument_specs[3]](
self.ids.nominal.val.text)))
self.ids._table.table.add_widget(table(["applied", "calculated", "indicated"],
self.readings))
def submit(self):
super(PressureReadingsScreen, self).submit()
self.readings = {"applied":[],
"indicated": [],
"calculated": []}
class UploadScreen(WhiteScreen):
'''fix with json'''
messages = ObjectProperty()
def __init__(self, *args, **kwargs):
super(UploadScreen, self).__init__(*args, **kwargs)
self.general = []
self.autoclave = []
self.balance = []
def upload_general(self, key):
global general
global outstanding
global current_user
gen = general[key]
out = outstanding[key]
params = {"user":current_user,
"customer":gen["customer"],
"_type":gen["_type"],
"id":key,
"due": gen["due"],
"date":out["date"],
"instrument":gen["name_of_instrument"],
"sn":gen["serial"],
"man":gen["manufacturer"],
"model":gen["model"],
"_range":gen["range"],
"resolution":gen["resolution"],
"units":gen["units"],
"standard":gen["standards"],
"location":gen["location"],
"start_time":gen["start_time"],
"end_time":gen["end_time"],
"readings":gen["readings"],
"corrections":gen["corrections"],
"immersion":gen["immersion_depth"],
"comments":gen["comments"],
}
try:
req= requests.post("http://{}/mobile/upload_general?".format(self.host), data=params)
if req.text == "success":
#add the list of succesfully uploaded keys then delete them once iteration is over
self.general.append(key)
outstanding.delete(key)
self.messages.item_strings.append("Upload completed sucessfully")
else:
self.messages.item_strings.append("Upload unsuccessful")
except Exception as e:
self.messages.item_strings.append("""An error occured while trying to upload this certificate:
{}""".format(e))
def upload_balance(self, key):
global balance
global outstanding
global current_user
bal = balance[key]
out = outstanding[key]
params = {"user":current_user,
"customer":bal["customer"],
"id":key,
"date":out["date"],
"due": bal["due"],
"sn":bal["serial"],
"start_time":bal["start_time"],
"end_time":bal["end_time"],
"man":bal["manufacturer"],
"model":bal["model"],
"_range":bal["_range"],
"resolution":bal["resolution"],
"units":bal["units"],
"standard":bal["standard"],
"location":bal["location"],
"comments":bal["comments"],
"start_time":bal["start_time"],
"end_time":bal["end_time"],
"procedure":bal["procedure"],
"off_center_mass":bal["off_center_mass"],
"warm_up_nominal":bal["warm_up_nomial"],
"tare":bal["tare"],
"tare_indicated":bal["tare_indicated"],
"repeat_half":bal["repeat_half"],
"repeat_full":bal["repeat_full"],
"off_center":bal["off_center"],
"settling_time":bal["settling_time"],
"nominal_mass":bal["nominal_mass"],
"after_up":bal["after_up"],
"after_uup":bal["after_uup"],
"before_actual": bal["before_actual"],
"before_up":bal["before_up"],
"before_nominal":bal["before_nominal"],
"after_down":bal["after_down"]}
try:
for k, value in params.items():
payload={"key": k,
"value": value}
req= requests.post("http://{}/mobile/upload_balance".format(self.host), data=payload)
if req.text == "success":
self.balance.append(key)
outstanding.delete(key)
self.messages.item_strings.append("Upload completed sucessfully")
else:
self.messages.item_strings.append("Upload unsuccessful")
except Exception as e:
self.messages.item_strings.append("""An error occured while trying to upload this certificate:
{}""".format(e))
def upload_autoclave(self, key):
global autoclave
global outstanding
global current_user
auto = autoclave[key]
params = {"user":current_user,
"id":key,
"customer":auto["customer"],
"start_time":auto["start_time"],
"end_time":auto["end_time"],
"date":auto["date"],
"due":auto["due"],
"serial":auto["serial"],
"immersion_depth":auto["immersion_depth"],
"manufacturer":auto["manufacturer"],
"model":auto["model"],
"range_temp":auto["range_temp"],
"range_p": auto["range_p"],
"resolution_temp":auto["resolution_temp"],
"resolution_p":auto["resolution_p"],
"units_temp":auto["units_temp"],
"units_p":auto["units_p"],
"standard_temp":auto["standard_temp"],
"standard_p":auto["standard_p"],
"location":auto["location"],
"comments":auto["comments"],
"temp":auto["temp"],
"pressure":auto["pressure"]}
try:
for k, value in params.items():
payload={"key": k,
"value": value}
req= requests.post("http://{}/mobile/upload_autoclave".format(self.host), data=payload)
if req.text == "success":
self.autoclave.append(key)
outstanding.delete(key)
self.messages.item_strings.append("Upload completed sucessfully")
else:
self.messages.item_strings.append("Upload unsuccessful")
except Exception as e:
self.messages.item_strings.append("""An error occured while trying to upload this certificate:
{}""".format(e))
def upload_standards(self, key):
global standards
std = standards[key]
params = {"name":key,
"serial": std["serial"],
"number":std["certificate_number"],
"traceability": std["traceability"],
"nominal":std["nominal"],
"actual":std["actual"],
"uncertainty":std["uncertainty"]}
try:
req= requests.post("http://{}/mobile/upload_standard".format(self.host), data=params)
except Exception as e:
self.messages.item_strings.append("""An error occured while trying to upload this standard:
{}""".format(e))
def upload(self):
self.host = self.ids.host.val.text
global balance
global general
global standards
global autoclave
code = 0
try:
if self.host == "":
p = Popup(title= "warning", content= Label(text="The host cannot be empty"),
size_hint=(0.5, 0.3))
p.open()
else:
self.messages.item_strings.append("Connecting to the server...")
req = requests.get("http://{}/mobile/".format(self.host))
code = req.status_code
except:
self.messages.item_strings.append("Uploading failed, try again")
code = 404
if code == 200 or code == "200":
for key in general:
self.messages.item_strings.append("Uploading {}".format(key))
self.upload_general(key)
for key in self.general:
general.delete(key)
self.general = []
for key in balance:
self.current_key = key
self.messages.item_strings.append("Uploading {}".format(key))
self.upload_balance(key)
for key in self.balance:
balance.delete(key)
self.balance = []
for key in autoclave:
self.current_key = key
self.messages.item_strings.append("Uploading {}".format(key))
self.upload_autoclave(key)
for key in self.autoclave:
autoclave.delete(key)
self.autoclave = []
for key in standards:
self.messages.item_strings.append("Uploading {}".format(key))
self.upload_standards(key)
self.messages.item_strings.append("Uploading completed")
else:
self.messages.item_strings.append("Uploading failed")
# if it fails give a reason
class NewStandardScreen(WhiteScreen):
def __init__(self, *args, **kwargs):
super(NewStandardScreen, self).__init__(*args, **kwargs)
self. readings = {"nominal": [],
"actual": [],
"uncertainty": []}
def record(self):
if self.ids.nominal.val.text == "" or self.ids.actual.val.text == "":
pass
else:
try:
float(self.ids.nominal.val.text)
float(self.ids.actual.val.text)
float(self.ids.uncertainty.val.text)
self.readings["nominal"].append(self.ids.nominal.val.text)
self.readings["actual"].append(self.ids.actual.val.text)
self.readings["uncertainty"].append(self.ids.uncertainty.val.text)
self.ids.nominal.val.text = ""
self.ids.actual.val.text = ""
self.ids.table.clear_widgets()
self.ids.table.add_widget(table(["nominal", "actual", "uncertainty"], self.readings))
except ValueError:
p = Popup(title="Warning", size_hint = (1,0.3),
content=Label(text="The data entered was invalid, \n"
"check if you used a comma(,) \n"
"instead of a period(.)"))
p.open()
def clear(self):
self.readings = {"nominal": [],
"actual": [],
"uncertainty": []}
self.ids.table.clear_widgets()
def clear_last(self):
for i in self.readings:
l = len(self.readings[i]) - 1
self.readings[i] = self.readings[i][:l]
self.ids.table.clear_widgets()
self.ids.table.add_widget(table(["nominal", "actual", "uncertainty"], self.readings))
def submit(self):
global standards
trace = self.ids.trace.text
name = self.ids.std_name.val.text
number = self.ids.number.val.text
serial = self.ids.serial.val.text
#add a key using the length of outstanding and some other variable
standards.put(self.ids.std_name.val.text,
name=self.ids.std_name.val.text,
certificate_number= self.ids.number.val.text,
traceability= self.ids.trace.text,
serial=self.ids.serial.val.text,
nominal="|".join(self.readings["nominal"]),
actual="|".join(self.readings["actual"]),
uncertainty="|".join(self.readings["uncertainty"]))
self.ids.table.clear_widgets()
self.ids.number.val.text = ""
self.ids.std_name.val.text = ""
self.ids.nominal.val.text = ""
self.ids.actual.val.text = ""
self.ids.trace.text = ""
self.ids.serial.val.text=""
self.ids.uncertainty.val.text = ""
self.parent.change("summary", "Summary")
class AutoclaveScreen(WhiteScreen):
asm = ObjectProperty()
title = ObjectProperty()
class AutoclaveScreenManager(WhiteScreenManager):
def __init__(self, *args, **kwargs):
super(AutoclaveScreenManager, self).__init__(*args, **kwargs)
self.type = "autoclave"
self.title = "Instrument Info"
def calibrate(self):
self.change("specs_p", "Pressure Specs.")
def change(self, s, t):
self.parent.parent.title.text = t
self.current = s
class AutoInfo(WhiteScreen):
def get_date(self):
next = datetime.date.today() + timedelta(days =180)
return next.strftime("%d/%m/%y")
def change(self):
self.parent.change("instrument_specs", "Specifications")
def previous(self):
pass
def next(self):
global standards
due = self.ids.due.val.text
name = self.ids.nom.val.text
serial = self.ids.sn.val.text
customer = self.ids.cus.val.text
manufacturer = self.ids.man.val.text
model = self.ids.model.val.text
standard_temp = self.ids.standard_temp.val.text
standard_p = self.ids.standard_p.val.text
stds = list(standards.keys())
if name == "" or customer == "" or standard_temp == "" or standard_p == "":
p=Popup(title="Warning!", size_hint=(0.6, 0.3),
content=Label(text= "Some fields cannot be empty"))
p.open()
elif standard_temp not in standards:
p=Popup(title="Warning!", size_hint=(1, 1),
content=Label(text= "The Instrument cannot be calibrated without \n"
"an acompanying standard. These are available:\n"
"{}".format("\n".join(stds))))
p.open()
elif standard_p not in standards:
p=Popup(title="Warning!", size_hint=(1, 1),
content=Label(text= "The Instrument cannot be calibrated without \n"
"an acompanying standard. These are available:\n"
"{}".format("\n".join(stds))))
p.open()
else:
self.parent.instrument_info = [name, serial,
customer,
manufacturer,
model,standard_temp, standard_p, due]
self.ids.nom.val.text =""
self.ids.sn.val.text = ""
self.change()
class AutoSpecs(WhiteScreen):
def previous(self):
self.parent.change("info", "Instrument Info")
def next(self):
range_temp = self.ids.min_t.text + "-" + self.ids.max_t.text
range_pressure = self.ids.min_p.text + "-" + self.ids.max_p.text
res_p = self.ids.res_p.val.text
res_t = self.ids.res_t.val.text
try:
float(res_p)
float(res_t)
except:
p=Popup(title="Warning!", size_hint=(0.6, 0.3),
content=Label(text= "Each resolution must be a number"))
p.open()
return
units_p= self.ids.units_p.val.text
units_t= self.ids.units_t.val.text
location= self.ids.location.val.text
immersion= self.ids._immersion.val.text
pressure_units= "bar kpa mpa pa psi".split(" ")
if units_p not in pressure_units:
p = Popup(size_hint =(1, 0.5), title = "Warning!",
content=Label(text="""You Have entered an invalid unit. Try one of
{}""".format(", ".join(pressure_units))))
p.open()
elif range_temp=="-" or range_pressure=="-" or res_p=="" or units_t=="" or location=="":
p=Popup(title="Warning!", size_hint=(0.6, 0.3),
content=Label(text= "Some fields cannot be empty"))
p.open()
else:
self.parent.start_time = time.strftime("%H:%M",time.localtime(time.time()))
self.parent.instrument_specs = [range_pressure, range_temp, res_p,
units_p, res_t, units_t, location, immersion]
self.parent.change("temp", "Temperature Calib.")
class AutoTemp(ReadingsScreen):
def next(self):
self.parent.temp_readings = self.readings
self.readings = {"actual": [],
"indicated": []}
self.parent.change("pressure", "Pressure Calib.")
class AutoPressure(PressureReadingsScreen):
def submit(self):
global autoclave
global outstanding
info= self.parent.instrument_info
specs = self.parent.instrument_specs
d=datetime.date.today().strftime("%y/%m/%d")
id = datetime.date.today().strftime("%d%m%Y")+info[1]
autoclave.put(id,
customer = info[2],
start_time= self.parent.start_time,
end_time = time.strftime("%H:%M", time.localtime(time.time())),
date= d,
due = info[7],
name_of_instrument= info[0],
serial= info[1],
immersion_depth=specs[7],
manufacturer=info[3],
model=info[4],
range_temp= specs[1],
range_p=specs[0],
resolution_temp= specs[4],
resolution_p=specs[2],
units_temp=specs[5],
units_p=specs[3],
standard_temp=info[5],
standard_p= info[6],
location=specs[6],
comments=self.ids.comments.text,
temp=readings_combined(self.parent.temp_readings),
pressure=readings_combined(self.readings))
outstanding.put(id, name=info[0],
customer = info[2],
date=d,
serial=info[1])
self.parent.current = "info"
self.readings = {"applied":[],
"indicated": [],
"calculated": []}
class BalanceScreen(WhiteScreen):
asm = ObjectProperty()
class BalanceScreenManager(WhiteScreenManager):
def __init__(self, *args, **kwargs):
super(BalanceScreenManager, self).__init__(*args, **kwargs)
self.transition = SlideTransition()
self.instrument_info = []
self.instrument_specs = []
self.type = "balance"
def change(self, s, title):
self.parent.title.text = title
self.current = s
def calibrate(self):
global balance
specs = self.instrument_specs
info = self.instrument_info
id = "{}{}".format(datetime.date.today().strftime("%d%m%y"),
info[1])
balance.put(id,
name=info[0],
serial=info[1],
customer=info[2],manufacturer=info[3],
model=info[4], _range=specs[0]+ "-" + specs[1],
resolution=specs[2], units=specs[3], location=specs[4],
date = datetime.date.today().strftime("%d/%m/%y"),
due = info[6],
end_time = datetime.datetime.now().strftime("%H:%M:%S"),
start_time= self.start_time,
procedure = "PG-02",
standard=info[5],
comments="",
warm_up_nomial = self.cold_mass,
nominal_mass = self.cold,
settling_time= self.settling,
off_center_mass = self.off_center_mass,
before_nominal = self.before_nominal,
before_actual = self.before_actual,
before_up = self.before_up,
after_up = self.after_up,
after_down = self.after_down,
after_uup = self.after_uup,
tare = self.tare,
tare_indicated = self.tare_indicated,
repeat_half = self.repeat_half,
repeat_full = self.repeat_full,
off_center = self.off
)
outstanding.put(id,
name=info[0],
customer=info[2],
date=datetime.date.today().strftime("%d/%m/%Y"),
serial=info[1])
class BalanceCalibrationScreen(WhiteScreen):
def __init__(self, *args, **kwargs):
super(BalanceCalibrationScreen, self).__init__(*args, **kwargs)
self.readings = []
self.count = 0
def clear(self):
self.count = 0
if isinstance(self.readings, list):
self.readings = []
self.ids.table.table.clear_widgets()
else:
for i in self.readings: self.readings[i] = []
self.ids.table.clear_widgets()
def clear_last(self):
if self.count > 0:
self.count -= 1
if isinstance(self.readings, list):
l = len(self.readings) - 1
self.readings = self.readings[:l]
self.ids.table.table.clear_widgets()
self.ids.table.table.add_widget(numbered_table("Value", self.readings))
else:
headings = []
for i in self.readings:
l = len(self.readings[i]) - 1
self.readings[i] = self.readings[:l]
self.headings.append(i)
self.ids.table.clear_widgets()
def record(self, val):
self.count += 1
if self.count > 10:
return
elif val == "":
return
else:
try:
float(val)
except ValueError:
p = Popup(title="Warning", size_hint = (1, 0.6),
content=Label(text="The data entered was invalid, \n"
"check if you used a comma(,) \n"
"instead of a period(.)"))
p.open()
self.readings.append(val)
self.ids.table.table.clear_widgets()
self.ids.table.table.add_widget(numbered_table("Cold Value", self.readings))
def next_up(self):
'''used to abstract class specific features'''
self.ids.table.table.clear_widgets()
def next(self):
if self.count < 5:
p = Popup(title="Warning!", content = Label(text="Too Few values"),
size_hint=(0.5, 0.3))
p.open()
else:
self.parent.transition_direction = "left"
self.next_up()
try:
for i in self.readings:
self.readings[i] = []
except:
self.readings = []
def previous(self, screen, title):
self.parent.transition.direction = 'right'
self.parent.change(screen, title)
class ColdStart(BalanceCalibrationScreen):
def next_up(self):
self.parent.start_time = datetime.datetime.now().strftime("%H:%M:%S")
self.parent.cold = "|".join(self.readings)
try:
float(self.ids.mass.val.text)
except:
p = Popup(title="Warning!", content = Label(text=
"The mass must be a numerical value\n"
"make sure no comma's(,) are present"),
size_hint=(0.5, 0.3))
p.open()
return
self.parent.cold_mass = self.ids.mass.val.text
self.parent.change("settling", "Settling Time")
def previous(self):
pass
def record(self):
super(ColdStart, self).record(self.ids.cold_value.val.text)
class SettlingTime(BalanceCalibrationScreen):
def next_up(self):
self.parent.settling = "|".join(self.readings)
self.parent.change("linearityup", "Linearity Up")
def record(self):
super(SettlingTime, self).record(self.ids.settling_value.val.text)
def previous(self):
BalanceCalibrationScreen.previous(self, "cold", "Cold Start")
class LinearityUp(BalanceCalibrationScreen):
def __init__(self, *args, **kwargs):
global standards
super(LinearityUp, self).__init__(*args, **kwargs)
#get the nominal and actual values
self.readings = {"nominal": [], "actual":[], "up": []}
def clear_last(self):
if self.count > 0:
l = len(self.readings["nominal"])-1
for i in self.readings:
self.readings[i] = self.readings[i][:l]
self.count -= 1
self.ids.table.clear_widgets()
self.ids.table.add_widget(table(["nominal", "actual", "up"], self.readings))
def next_up(self):
self.parent.before_nominal = "|".join(self.readings["nominal"])
self.parent.before_up = "|".join(self.readings["up"])
self.parent.before_actual = "|".join(self.readings["actual"])
self.parent.change("linearity", "Linearity")
self.ids.table.clear_widgets()
self.readings = {"nominal": [], "actual":[], "up": []}
def previous(self):
BalanceCalibrationScreen.previous(self, "settling", "Settling Time")
def record(self):
nominal = self.ids.nominal_value.val.text
up = self.ids.linearity_value.val.text
try:
float(up)
float(nominal)
except:
p = Popup(title="Warning", size_hint = (1, 0.6),
content=Label(text="The data entered was invalid, \n"
"check if you used a comma(,) \n"
"instead of a period(.)"))
p.open()
self.standards = standards.get(self.parent.instrument_info[5])
self.std_nominal = self.standards["nominal"].split("|")
self.std_actual = self.standards["actual"].split("|")
if self.count >= 5:
return
elif nominal not in self.std_nominal:
p = Popup(title="Warning!", content = Label(text="Incorrect "
"Nominal value for given standard\n"
"These are available: {}".format(
", ".join(self.std_nominal))),
size_hint=(1, 0.4))
p.open()
else:
self.count += 1
self.readings["nominal"].append(nominal)
self.readings["up"].append(up)
#get the corresponding value of actual to nominal
self.readings["actual"].append(self.std_actual[
self.std_nominal.index(
nominal)])
self.ids.table.clear_widgets()
self.ids.table.add_widget(table(["nominal", "actual", "up"], self.readings))
class Linearity(BalanceCalibrationScreen):
def __init__(self, *args, **kwargs):
super(Linearity, self).__init__(*args, **kwargs)
self.readings = {"Linearity Up" : [],
"Linearity Down" : [],
"Linearity up" : []}
def next_up(self):
self.parent.after_up="|".join(self.readings["Linearity Up"])
self.parent.after_uup="|".join(self.readings["Linearity up"])
self.parent.after_down="|".join(self.readings["Linearity Down"])
self.parent.change("taring", "Taring Linearity")
self.ids.table.clear_widgets()
self.readings = {"Linearity Up" : [],
"Linearity Down" : [],
"Linearity up" : []}
def previous(self):
BalanceCalibrationScreen.previous(self, "linearityup", "Linearity(before calibration)")
def clear_last(self):
if self.count > 0:
if self.count <= 5:
l = len(self.readings["Linearity Up"]) -1
self.readings["Linearity Up"] = self.readings["Linearity Up"][:l]
elif self.count > 5 and self.count <= 10:
l = len(self.readings["Linearity Down"]) -1
self.readings["Linearity Down"] = self.readings["Linearity Down"][:l]
elif self.count > 10 and self.count <= 15:
l = len(self.readings["Linearity up"]) -1
self.readings["Linearity up"] = self.readings["Linearity up"][:l]
else:
return
self.count -= 1
self.ids.table.clear_widgets()
self.ids.table.add_widget(table(["Linearity Up", "Linearity Down", "Linearity up"],
self.readings))
def record(self):
val = self.ids._value.val.text
try:
float(val)
except ValueError:
p = Popup(title="Warning", size_hint = (1, 0.5),
content=Label(text="The data entered was invalid, \n"
"check if you used a comma(,) \n"
"instead of a period(.)"))
p.open()
return
self.count += 1
if self.count <= 5:
self.ids._value.name = "Linearity Up"
self.readings["Linearity Up"].append(val)
elif self.count > 5 and self.count <= 10:
self.ids._value.name = "Linearity Down"
self.readings["Linearity Down"].append(val)
elif self.count > 10 and self.count <= 15:
self.ids._value.name = "Linearity up"
self.readings["Linearity up"].append(val)
else:
return
self.ids.table.clear_widgets()
self.ids.table.add_widget(table(["Linearity Up", "Linearity Down", "Linearity up"],
self.readings))
class TaringLinearity(BalanceCalibrationScreen):
def __init__(self, *args, **kwargs):
super(TaringLinearity, self).__init__(*args, **kwargs)
self. readings = {"Tare": [],
"Indicated": []}
def clear(self):
self.count = 0
for i in self.readings: self.readings[i] = []
self.ids.table.table.clear_widgets()
def clear_last(self):
if self.count > 0:
for i in self.readings:
l = len(self.readings[i]) -1
self.readings[i] = self.readings[i][:l]
self.ids.table.table.clear_widgets()
self.ids.table.table.add_widget(table(["Tare", "Indicated"], self.readings))
self.count -= 1
def record(self):
tare = self.ids.tare_value.val.text
indicated =self.ids.nominal_value.val.text
self.count += 1
if self.count >= 5:
return
else:
self.readings["Tare"].append(tare)
self.readings["Indicated"].append(indicated)
self.ids.table.table.clear_widgets()
self.ids.table.table.add_widget(table(["Tare", "Indicated"], self.readings))
def previous(self):
BalanceCalibrationScreen.previous(self, "linearity", "Linearity(after calibration)")
def next_up(self):
self.parent.tare = "|".join(self.readings["Tare"])
self.parent.tare_indicated = "|".join(self.readings["Indicated"])
self.parent.change("repeat", "Repeatability")
self. readings = {"Tare": [],
"Indicated": []}
self.ids.table.table.clear_widgets()
class Repeatability(BalanceCalibrationScreen):
def __init__(self, *args , **kwargs):
super(Repeatability, self).__init__(*args, **kwargs)
self.readings = {"1/2 Load":[],
"Full Load": []}
def clear(self):
self.count = 0
for i in self.readings: self.readings[i] = []
self.ids.table.table.clear_widgets()
def next_up(self):
self.parent.repeat_half = "|".join(self.readings["1/2 Load"])
self.parent.repeat_full = "|".join(self.readings["Full Load"])
self.parent.change("off", "Off Center Test")
self.readings = {"1/2 Load":[],
"Full Load": []}
self.ids.table.table.clear_widgets()
def previous(self):
BalanceCalibrationScreen.previous(self, "taring", "Taring Linearity")
def clear_last(self):
if self.count > 0:
if self.count < 5:
l = len(self.readings["1/2 Load"]) - 1
self.readings["1/2 Load"] = self.readings["1/2 Load"][:l]
elif self.count > 5 and self.count <= 10:
l = len(self.readings["Full Load"]) - 1
self.readings["Full Load"] = self.readings["Full Load"][:l]
self.count -= 1
self.ids.table.table.clear_widgets()
self.ids.table.table.add_widget(table(["1/2 Load", "Full Load"],
self.readings))
def record(self):
val = self.ids._value.val.text
try:
float(val)
except:
p = Popup(title="Warning", size_hint = (1, 0.4),
content=Label(text="The data entered was invalid, \n"
"check if you used a comma(,) \n"
"instead of a period(.)"))
p.open()
return
self.count += 1
if self.count <= 5:
self.ids._value.name = "1/2 Load"
self.readings["1/2 Load"].append(val)
elif self.count > 5 and self.count <= 10:
self.ids._value.name = "Full Load"
self.readings["Full Load"].append(val)
else:
return
self.ids.table.table.clear_widgets()
self.ids.table.table.add_widget(table(["1/2 Load", "Full Load"],
self.readings))
class OffCenter(BalanceCalibrationScreen):
def __init__(self, *args, **kwargs):
super(OffCenter, self).__init__(*args, **kwargs)
self.readings = []
self.count = 0
def clear_last(self):
self.count -= 1
l = len(self.readings) - 1
self.readings = self.readings[:l]
self.ids.table.table.clear_widgets()
self.ids.table.table.add_widget(numbered_table("Reading at", self.readings))
def next(self):
pass
def previous(self):
BalanceCalibrationScreen.previous(self, "repeat", "Repeatability")
def record(self):
val = self.ids._value.text
try:
float(val)
except:
p = Popup(title="Warning", size_hint = (1, 0.4),
content=Label(text="The data entered was invalid, \n"
"check if you used a comma(,) \n"
"instead of a period(.)"))
p.open()
return
self.count += 1
if self.count > 5:
pass
else:
pos = "A B C D E".split(" ")
current_pos = self.ids.which.text.split(" ")[1]
if current_pos != "E":
new_pos = pos[pos.index(current_pos) + 1]
else: new_pos = current_pos
self.ids.which.text = "Position " + new_pos
self.readings.append(val)
self.ids.table.table.clear_widgets()
self.ids.table.table.add_widget(numbered_table("Reading at", self.readings))
def submit(self):
if self.count < 5:
p = Popup(title="Warning!", content = Label(text="Too Few values"),
size_hint=(0.6, 0.3))
p.open()
else:
try:
float(self.ids.off.val.text)
except:
p = Popup(title="Warning!", content = Label(text=
"The mass must be a numerical value\n"
"make sure no comma's(,) are present"),
size_hint=(0.5, 0.3))
p.open()
return
self.ids.table.table.clear_widgets()
self.parent.off = ":".join(self.readings)
self.readings = []
self.parent.off_center_mass = self.ids.off.val.text
self.parent.calibrate()
self.parent.current = "cold"
def table( data=[], values= {}):
layout = GridLayout(cols = len(data))
l = longest(values)
for column in data:
layout.add_widget(Heading(text=column,
font_size= 20 ))
index = 0
while index < l:
for column in data:
if index > (len(values[column]) - 1):
layout.add_widget(Label(text="",size_hint=(None, None) ,size=(75, 50)))
else:
if index == 0 or index % 2 == 0:
layout.add_widget(Cell(text=(values[column][index])))
else:
layout.add_widget(AltCell(text=(values[column][index])))
index += 1
return layout
def numbered_table(heading = "reading", values = ["Un", "Deux", "Trois"]):
layout = GridLayout(cols= 2)
layout.add_widget(Heading(text="Reading #",
font_size= 20 ))
layout.add_widget(Heading(text=heading,
font_size= 20 ))
for i in range(len(values)):
if i == 0 or i % 2 == 0:
layout.add_widget(Cell(text=str(i+1)))
layout.add_widget(Cell(text=values[i]))
else:
layout.add_widget(AltCell(text=str(i+1)))
layout.add_widget(AltCell(text=values[i]))
return layout
def longest(d):
long = 0
for key in d:
if len(d[key]) > long:
long = len(d[key])
return long
def readings_combined(d):
if len(d) == 3:
l = len(d["applied"])
compressed = []
i = 0
while i < l:
app = d["applied"][i]
cal = d["calculated"][i]
ind = d["indicated"][i]
i += 1
compressed.append(str(app) + ":" + str(cal) + ":" + str(ind))
else:
l=len(d["indicated"])
compressed = []
i = 0
while i < l:
ind = d["indicated"][i]
act = d["actual"][i]
i += 1
compressed.append(str(ind) + ":" + str(act))
return ";".join(compressed)
def http_format_args(d):
result = "?"
for i in d:
result += i + "=" + d[i] + "&"
return result[:len(result)- 1]
if __name__ == "__main__":
DataApp().run() | [
"[email protected]"
] | |
ffc9fc900066f664b262a92696f5f78d760e79d9 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/webauthrange_a43b34eee0d1687120752f9efeb0dda9.py | d6a0be1f3a150e2248dcceac6cd6040d9837c28c | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,293 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class WebAuthRange(Base):
"""Web Authentication Range Options
The WebAuthRange class encapsulates a list of webAuthRange resources that are managed by the user.
A list of resources can be retrieved from the server using the WebAuthRange.find() method.
The list can be managed by using the WebAuthRange.add() and WebAuthRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'webAuthRange'
_SDM_ATT_MAP = {
'Enabled': 'enabled',
'Expect': 'expect',
'InputValue1': 'inputValue1',
'InputValue2': 'inputValue2',
'InputValue3': 'inputValue3',
'Name': 'name',
'ObjectId': 'objectId',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(WebAuthRange, self).__init__(parent, list_op)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Disabled ranges won't be configured nor validated.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def Expect(self):
# type: () -> str
"""
Returns
-------
- str: Statistics will be maintained for expected/actual success/failure
"""
return self._get_attribute(self._SDM_ATT_MAP['Expect'])
@Expect.setter
def Expect(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Expect'], value)
@property
def InputValue1(self):
# type: () -> str
"""
Returns
-------
- str: The value to be returned for the input field in the HTTP POST message sent back to the DUT. If the Input field 1 is a radio type, then this value must match one of the choices present on the form. Standard text increment options are supported
"""
return self._get_attribute(self._SDM_ATT_MAP['InputValue1'])
@InputValue1.setter
def InputValue1(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['InputValue1'], value)
@property
def InputValue2(self):
# type: () -> str
"""
Returns
-------
- str: The value to be returned for the input field in the HTTP POST message sent back to the DUT. If the Input field 2 is a radio type, then this value must match one of the choices present on the form. Standard text increment options are supported
"""
return self._get_attribute(self._SDM_ATT_MAP['InputValue2'])
@InputValue2.setter
def InputValue2(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['InputValue2'], value)
@property
def InputValue3(self):
# type: () -> str
"""
Returns
-------
- str: The value to be returned for the input field in the HTTP POST message sent back to the DUT. If the Input field 3 is a radio type, then this value must match one of the choices present on the form. Standard text increment options are supported
"""
return self._get_attribute(self._SDM_ATT_MAP['InputValue3'])
@InputValue3.setter
def InputValue3(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['InputValue3'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of range
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
def update(self, Enabled=None, Expect=None, InputValue1=None, InputValue2=None, InputValue3=None, Name=None):
# type: (bool, str, str, str, str, str) -> WebAuthRange
"""Updates webAuthRange resource on the server.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- Expect (str): Statistics will be maintained for expected/actual success/failure
- InputValue1 (str): The value to be returned for the input field in the HTTP POST message sent back to the DUT. If the Input field 1 is a radio type, then this value must match one of the choices present on the form. Standard text increment options are supported
- InputValue2 (str): The value to be returned for the input field in the HTTP POST message sent back to the DUT. If the Input field 2 is a radio type, then this value must match one of the choices present on the form. Standard text increment options are supported
- InputValue3 (str): The value to be returned for the input field in the HTTP POST message sent back to the DUT. If the Input field 3 is a radio type, then this value must match one of the choices present on the form. Standard text increment options are supported
- Name (str): Name of range
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Enabled=None, Expect=None, InputValue1=None, InputValue2=None, InputValue3=None, Name=None):
# type: (bool, str, str, str, str, str) -> WebAuthRange
"""Adds a new webAuthRange resource on the server and adds it to the container.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- Expect (str): Statistics will be maintained for expected/actual success/failure
- InputValue1 (str): The value to be returned for the input field in the HTTP POST message sent back to the DUT. If the Input field 1 is a radio type, then this value must match one of the choices present on the form. Standard text increment options are supported
- InputValue2 (str): The value to be returned for the input field in the HTTP POST message sent back to the DUT. If the Input field 2 is a radio type, then this value must match one of the choices present on the form. Standard text increment options are supported
- InputValue3 (str): The value to be returned for the input field in the HTTP POST message sent back to the DUT. If the Input field 3 is a radio type, then this value must match one of the choices present on the form. Standard text increment options are supported
- Name (str): Name of range
Returns
-------
- self: This instance with all currently retrieved webAuthRange resources using find and the newly added webAuthRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained webAuthRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Enabled=None, Expect=None, InputValue1=None, InputValue2=None, InputValue3=None, Name=None, ObjectId=None):
# type: (bool, str, str, str, str, str, str) -> WebAuthRange
"""Finds and retrieves webAuthRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve webAuthRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all webAuthRange resources from the server.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- Expect (str): Statistics will be maintained for expected/actual success/failure
- InputValue1 (str): The value to be returned for the input field in the HTTP POST message sent back to the DUT. If the Input field 1 is a radio type, then this value must match one of the choices present on the form. Standard text increment options are supported
- InputValue2 (str): The value to be returned for the input field in the HTTP POST message sent back to the DUT. If the Input field 2 is a radio type, then this value must match one of the choices present on the form. Standard text increment options are supported
- InputValue3 (str): The value to be returned for the input field in the HTTP POST message sent back to the DUT. If the Input field 3 is a radio type, then this value must match one of the choices present on the form. Standard text increment options are supported
- Name (str): Name of range
- ObjectId (str): Unique identifier for this object
Returns
-------
- self: This instance with matching webAuthRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of webAuthRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the webAuthRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def CustomProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2=list, Arg3=enum, async_operation=bool)
---------------------------------------------------------------
- Arg2 (list(str)): List of plugin types to be added in the new custom stack
- Arg3 (str(kAppend | kMerge | kOverwrite)): Append, merge or overwrite existing protocol stack
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2=string, async_operation=bool)string
-------------------------------------------------------------
- Arg2 (str): Protocol class name to disable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2=string, async_operation=bool)string
------------------------------------------------------------
- Arg2 (str): Protocol class name to enable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
| [
"[email protected]"
] | |
ac7915d8eed8d2af3351c944f396a9b36e5c868f | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/exercises/_algorithms_challenges/pybites/beginner/55_v4/steam.py | ce898477f47f532beed65efb932bddd9d2a0b3e6 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 316 | py | # ____ c.. _______ n..
#
# _______ f..
#
# # cached version to have predictable results for testing
# FEED_URL = "http://bit.ly/2IkFe9B"
#
# Game = n..('Game', 'title link')
#
#
# ___ get_games
# """Parses Steam's RSS feed and returns a list of Game namedtuples"""
# r.. ? f.t.. f.l.. ___ ? __ f__.p.. ?.e..
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.