blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
77fc26163f61919df4efdfcbb251803d49603c9d | ec8fef96af2a6b6610d298637f05bcdfe67cba2b | /experiments/cremi/utils/align_test_samples_part_3.py | f1a04694e55134c663b6542a64a388ae5f5fa0df | [] | no_license | abailoni/longRangeAgglo | 8b98aca75b17d177cb5e408460f95ff20f411aeb | 260b452e106125722ae3824755584ce7bfd5b81c | refs/heads/master | 2021-06-25T14:14:57.150233 | 2020-11-06T11:14:52 | 2020-11-06T11:14:52 | 150,707,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,272 | py | import long_range_compare # Add missing package-paths
from long_range_compare.data_paths import get_trendytukan_drive_path, get_hci_home_path
"""
This is a modified version of part 2 to downscale the whole aligned data without cropping it at all
"""
downscale = True
include_affs = False
from scipy.ndimage import zoom
import vigra
import numpy as np
import os
import h5py
import sys
sys.path += [
os.path.join(get_hci_home_path(), "python_libraries/cremi_tools"),]
def get_gt_bounding_box(gt):
# no-label ids are <0, i.e. the highest numbers in uint64
fg_indices = np.where(gt == 1)
return tuple(
slice(np.min(fg_indices[d]),np.max(fg_indices[d])+1)
for d in range(3)
)
POSTFIX = "_no_crop"
# Weird defects to be blacked out in the uncropped version:
blacked_out = {"A+": [11, 25, 37, 70],
"B+": [18],
"C+": [123]}
# original_pad = ((37, 38), (911, 911), (911, 911))
# FOUND CROP SLICES:
# A+ (slice(36, 163, None), slice(1154, 2753, None), slice(934, 2335, None))
# B+ (slice(36, 163, None), slice(1061, 2802, None), slice(1254, 4009, None))
# C+ (slice(36, 163, None), slice(980, 2443, None), slice(1138, 2569, None))
for sample in ["B+", "C+"]:
# Load GT mask:
print("Loading")
mask_inner_path = "volumes/labels/mask"
source_path_big_pad = os.path.join(get_trendytukan_drive_path(),
"datasets/CREMI/official_test_samples/full_aligned_samples/sample_{}_aligned_plus_big_pad.hdf".format(sample))
source_path = os.path.join(get_trendytukan_drive_path(),
"datasets/CREMI/official_test_samples/full_aligned_samples/sample_{}_aligned.hdf".format(sample))
from segmfriends.utils.various import readHDF5, writeHDF5
print("Reading...")
mask_big_pad = readHDF5(source_path_big_pad, mask_inner_path)
print("Max big pad: ", mask_big_pad.max())
mask_border = mask_big_pad > 10
mask_big_pad = np.logical_not(mask_border).astype('uint16')
# print(mask_GT.shape)
print("Find crop")
# crop_slice = get_gt_bounding_box(mask_big_pad)
# Write crop_slice to file:
import csv
csv_file_path = os.path.join(get_hci_home_path(),
"datasets/CREMI/official_test_samples/cropped_aligned_samples/sample{}_cropped{}.csv".format(sample, POSTFIX))
with open(csv_file_path, mode='w') as f:
employee_writer = csv.writer(f, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(3):
employee_writer.writerow([0, str(mask_big_pad.shape[i])])
# print(crop_slice)
# Write affs and mask in target file:
print("Saving...")
target_path_old = os.path.join(get_hci_home_path(),
"datasets/CREMI/official_test_samples/cropped_aligned_samples/sample{}_cropped{}.h5".format(
sample, POSTFIX))
target_path = os.path.join(get_hci_home_path(),
"datasets/CREMI/official_test_samples/cropped_aligned_samples/sample{}_cropped{}.h5".format(sample, POSTFIX))
# if include_affs:
# affs_path = os.path.join(get_trendytukan_drive_path(), "datasets/CREMI/constantin_affs/test_samples/sample{}.h5".format(sample))
# affs_inner_path = "affinities"
# affs = readHDF5(affs_path, affs_inner_path, crop_slice=(slice(None), ) + crop_slice)
# writeHDF5(affs, target_path, "volumes/affinities")
raw = readHDF5(source_path, "volumes/raw")
# raw = readHDF5(target_path_old, "volumes/raw_2x")
if sample in blacked_out:
for blk in blacked_out[sample]:
print("blacking out ", blk)
raw[blk] = 0
# mask_gt = readHDF5(source_path, mask_inner_path, dtype="uint16", crop_slice=crop_slice)
# writeHDF5(raw, target_path, "volumes/raw")
# writeHDF5(mask_big_pad.astype('uint16'), target_path, "volumes/labels/mask_gt")
# writeHDF5(mask_gt, target_path, "volumes/labels/mask_gt")
if downscale:
writeHDF5(zoom(mask_big_pad, (1, 0.5, 0.5), order=0), target_path, "volumes/labels/mask_raw_2x")
writeHDF5(zoom(raw, (1, 0.5, 0.5), order=3), target_path, "volumes/raw_2x")
# writeHDF5(raw, target_path, "volumes/raw_2x")
| [
"[email protected]"
] | |
73b494f266e34eb2deabf1b569f57c9d6a30555d | 0db6e82011087bc31b2edfd3ac2d5757c08116b8 | /my_library/models/res_config_settings.py | 71cc7770897e3ef56c78265398b82ee46e3e046c | [] | no_license | NumanIbnMazid/odoo-my-library | 60617551f3e968a4cf42670785347284901aa4b0 | f000fea813a5f246e58617e09a5420739569169c | refs/heads/master | 2023-07-29T05:33:41.697740 | 2021-09-13T11:27:58 | 2021-09-13T11:27:58 | 405,944,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | # -*- coding: utf-8 -*-
from odoo import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
group_self_borrow = fields.Boolean(
string="Self borrow", implied_group='my_library.group_self_borrow')
| [
"[email protected]"
] | |
8e644cd95efa509e80748bd93f7e036283a75068 | 8dc84558f0058d90dfc4955e905dab1b22d12c08 | /third_party/blink/tools/blinkpy/common/system/filesystem.py | c21aaead2375f65d616dec302def76250afcf109 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | meniossin/src | 42a95cc6c4a9c71d43d62bc4311224ca1fd61e03 | 44f73f7e76119e5ab415d4593ac66485e65d700a | refs/heads/master | 2022-12-16T20:17:03.747113 | 2020-09-03T10:43:12 | 2020-09-03T10:43:12 | 263,710,168 | 1 | 0 | BSD-3-Clause | 2020-05-13T18:20:09 | 2020-05-13T18:20:08 | null | UTF-8 | Python | false | false | 16,246 | py | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrapper object for functions that access the filesystem.
A FileSystem object can be used to represent dependency on the
filesystem, and can be replaced with a MockFileSystem in tests.
"""
import codecs
import errno
import exceptions
import glob
import hashlib
import logging
import os
import shutil
import stat
import sys
import tempfile
import time
_log = logging.getLogger(__name__)
class FileSystem(object):
"""FileSystem interface for blinkpy.
Unless otherwise noted, all paths are allowed to be either absolute
or relative.
"""
sep = os.sep
pardir = os.pardir
WINDOWS_MAX_PATH = 260
def _path_for_access(self, path):
"""Ensures a path can be used to access the file.
Pass a path through this method when and only when the path is about to
be accessed via a syscall (e.g. open()); DO NOT use this method if the
path is to be manipulated by (most of) the functions in os.path, etc.
This method currently only works around one issue: the maximum path
length on Windows. If the current platform is Windows and the given path
is longer than MAX_PATH (260), the path will be converted to a UNC path
by first making the path absolute and then prepending the UNC magic
prefix '\\?\'. Otherwise, the method is a no-op.
(https://msdn.microsoft.com/en-us/library/aa365247.aspx#maxpath)
"""
if sys.platform == 'win32' and len(path) >= self.WINDOWS_MAX_PATH:
return ur'\\?\%s' % (self.abspath(path),)
return path
def abspath(self, path):
return os.path.abspath(path)
def realpath(self, path):
return os.path.realpath(path)
def path_to_module(self, module_name):
"""Returns the absolute path of a module."""
# FIXME: This is the only use of sys in this file. It's possible that
# this function should move elsewhere.
# __file__ is not always an absolute path in Python <3.4
# (https://bugs.python.org/issue18416).
return self.abspath(sys.modules[module_name].__file__)
def expanduser(self, path):
return os.path.expanduser(path)
def basename(self, path):
return os.path.basename(path)
def chdir(self, path):
return os.chdir(path)
def copyfile(self, source, destination):
# shutil.copyfile() uses open() underneath, which supports UNC paths.
shutil.copyfile(self._path_for_access(source), self._path_for_access(destination))
def dirname(self, path):
return os.path.dirname(path)
def exists(self, path):
return os.path.exists(path)
def files_under(self, path, dirs_to_skip=None, file_filter=None):
"""Walks the filesystem tree under the given path in top-down order.
Args:
dirs_to_skip: A list of directories to skip over during the
traversal (e.g., .svn, resources, etc.).
file_filter: If not None, the filter will be invoked with the
filesystem object and the dirname and basename of each file
found. The file is included in the result if the callback
returns True.
Returns:
A list of all files under the given path in top-down order.
"""
dirs_to_skip = dirs_to_skip or []
def filter_all(fs, dirpath, basename):
return True
file_filter = file_filter or filter_all
files = []
if self.isfile(path):
if file_filter(self, self.dirname(path), self.basename(path)):
files.append(path)
return files
if self.basename(path) in dirs_to_skip:
return []
for (dirpath, dirnames, filenames) in os.walk(path):
for d in dirs_to_skip:
if d in dirnames:
dirnames.remove(d)
for filename in filenames:
if file_filter(self, dirpath, filename):
files.append(self.join(dirpath, filename))
return files
def getcwd(self):
return os.getcwd()
def glob(self, path):
return glob.glob(path)
def isabs(self, path):
return os.path.isabs(path)
def isfile(self, path):
return os.path.isfile(path)
def isdir(self, path):
return os.path.isdir(path)
def join(self, *comps):
# TODO(robertma): UNC paths are not currently supported, but can be done
# with os.path.splitunc().
return os.path.join(*comps)
def listdir(self, path):
return os.listdir(path)
def walk(self, top, topdown=True, onerror=None, followlinks=False):
return os.walk(top, topdown=topdown, onerror=onerror, followlinks=followlinks)
def mkdtemp(self, **kwargs):
"""Creates and returns a uniquely-named directory.
This is like tempfile.mkdtemp, but if used in a with statement
the directory will self-delete at the end of the block (if the
directory is empty; non-empty directories raise errors). The
directory can be safely deleted inside the block as well, if so
desired.
Note that the object returned is not a string and does not support all
of the string methods. If you need a string, coerce the object to a
string and go from there.
"""
class TemporaryDirectory(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
self._directory_path = tempfile.mkdtemp(**self._kwargs)
def __str__(self):
return self._directory_path
def __enter__(self):
return self._directory_path
def __exit__(self, type, value, traceback):
# Only self-delete if necessary.
# FIXME: Should we delete non-empty directories?
if os.path.exists(self._directory_path):
os.rmdir(self._directory_path)
return TemporaryDirectory(**kwargs)
def maybe_make_directory(self, *path):
"""Creates the specified directory if it doesn't already exist."""
try:
# os.makedirs() supports UNC paths:
# https://docs.python.org/2/library/os.html#os.makedirs
os.makedirs(self._path_for_access(self.join(*path)))
except OSError as error:
if error.errno != errno.EEXIST:
raise
def move(self, source, destination):
shutil.move(source, destination)
def mtime(self, path):
return os.stat(path).st_mtime
def normpath(self, path):
return os.path.normpath(path)
def open_binary_tempfile(self, suffix=''):
"""Creates, opens, and returns a binary temp file.
Returns a tuple of the file and the name.
"""
temp_fd, temp_name = tempfile.mkstemp(suffix)
f = os.fdopen(temp_fd, 'wb')
return f, temp_name
def open_binary_file_for_reading(self, path):
return file(self._path_for_access(path), 'rb')
def open_binary_file_for_writing(self, path):
return file(self._path_for_access(path), 'wb')
def read_binary_file(self, path):
"""Returns the contents of the file as a byte string."""
with self.open_binary_file_for_reading(path) as f:
return f.read()
def write_binary_file(self, path, contents):
with self.open_binary_file_for_writing(path) as f:
f.write(contents)
def open_text_tempfile(self, suffix=''):
"""Creates, opens, and returns a text temp file.
Returns a tuple of the file and the name.
"""
_, temp_name = tempfile.mkstemp(suffix)
f = codecs.open(temp_name, 'w', 'utf8')
return f, temp_name
def open_text_file_for_reading(self, path):
# Note: There appears to be an issue with the returned file objects not
# being seekable. See:
# http://stackoverflow.com/questions/1510188/can-seek-and-tell-work-with-utf-8-encoded-documents-in-python
return codecs.open(self._path_for_access(path), 'r', 'utf8')
def open_text_file_for_writing(self, path):
return codecs.open(self._path_for_access(path), 'w', 'utf8')
def read_text_file(self, path):
"""Returns the contents of the file as a Unicode string.
The file is read assuming it is a UTF-8 encoded file with no BOM.
"""
with self.open_text_file_for_reading(path) as f:
return f.read()
def write_text_file(self, path, contents):
"""Writes the contents to the file at the given location.
The file is written encoded as UTF-8 with no BOM.
"""
with self.open_text_file_for_writing(path) as f:
f.write(contents)
def sha1(self, path):
contents = self.read_binary_file(path)
return hashlib.sha1(contents).hexdigest()
def relpath(self, path, start='.'):
return os.path.relpath(path, start)
class _WindowsError(exceptions.OSError):
"""Fake exception for Linux and Mac."""
def remove(self, path, osremove=os.remove, retry=True):
"""Removes a file.
On Windows, if a process was recently killed and it held on to a file,
the OS will hold on to the file for a short while. This makes attempts
to delete the file fail. To work around that, this method will retry
for a few seconds until Windows is done with the file.
"""
try:
exceptions.WindowsError
except AttributeError:
exceptions.WindowsError = FileSystem._WindowsError
retry_timeout_sec = 3.0
sleep_interval = 0.1
while True:
try:
# The default os.remove() supports UNC paths on Windows.
osremove(self._path_for_access(path))
return True
except exceptions.WindowsError:
time.sleep(sleep_interval)
retry_timeout_sec -= sleep_interval
if retry_timeout_sec < 0 and not retry:
raise
def rmtree(self, path, ignore_errors=True, onerror=None):
"""Deletes the directory rooted at path, whether empty or not."""
# shutil.rmtree() uses os.path.join() which doesn't support UNC paths.
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
def remove_contents(self, dirname):
"""Attempts to remove the contents of a directory tree.
Args:
dirname (string): Directory to remove the contents of.
Returns:
bool: True if the directory is now empty.
"""
return _remove_contents(self, dirname)
def copytree(self, source, destination):
# shutil.copytree() uses os.path.join() which doesn't support UNC paths.
shutil.copytree(source, destination)
def split(self, path):
"""Return (dirname, basename + '.' + ext)"""
return os.path.split(path)
def splitext(self, path):
"""Return (dirname + os.sep + basename, '.' + ext)"""
return os.path.splitext(path)
def make_executable(self, file_path):
os.chmod(file_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP)
def symlink(self, source, link_name):
"""Create a symbolic link. Unix only."""
os.symlink(source, link_name)
def sanitize_filename(self, filename, replacement='_'):
"""Replaces all illegal characters in a filename with a given character.
The list of illegal characters covers all restrictions on Linux, Mac and
Windows.
Args:
filename: A basename (or a part of the basename) of a file to
sanitize. It cannot be a path because slashes will be replaced.
replacement: A character to replace all illegal characters with.
Returns:
The sanitized filename.
"""
return _sanitize_filename(filename, replacement)
# _remove_contents is implemented in terms of other FileSystem functions. To
# allow it to be reused on the MockFileSystem object, we define it here and
# then call it in both FileSystem and MockFileSystem classes.
def _remove_contents(fs, dirname, sleep=time.sleep):
# We try multiple times, because on Windows a process which is
# currently closing could still have a file open in the directory.
_log.info('Removing contents of %s', dirname)
errors = []
def onerror(func, path, exc_info):
errors.append(path)
_log.exception('Failed at %s %s: %r', func, path, exc_info)
attempts = 0
while attempts < 5:
del errors[:]
for name in fs.listdir(dirname):
fullname = fs.join(dirname, name)
isdir = True
try:
isdir = fs.isdir(fullname)
except os.error:
onerror(fs.isdir, fullname, sys.exc_info())
continue
if isdir:
try:
_log.debug('Removing directory %s', fullname)
fs.rmtree(fullname, ignore_errors=False, onerror=onerror)
except os.error:
onerror(fs.rmtree, fullname, sys.exc_info())
continue
else:
try:
_log.debug('Removing file %s', fullname)
fs.remove(fullname, retry=False)
except os.error:
onerror(fs.remove, fullname, sys.exc_info())
continue
if not errors:
break
_log.warning('Contents removal failed, retrying in 1 second.')
attempts += 1
sleep(1)
# Check the path is gone.
if not fs.listdir(dirname):
return True
_log.warning('Unable to remove %s', dirname)
for dirpath, dirnames, filenames in fs.walk(dirname, onerror=onerror, topdown=False):
for fname in filenames:
_log.warning('File %s still in output dir.', fs.join(dirpath, fname))
for dname in dirnames:
_log.warning('Dir %s still in output dir.', fs.join(dirpath, dname))
return False
# Same as _remove_contents, to reuse it in MockFileSystem.
def _sanitize_filename(filename, replacement):
# The list comes from restrictions on Windows:
# https://support.microsoft.com/lo-la/help/905231/information-about-the-characters-that-you-cannot-use-in-site-names--fo
# It also includes all illegal characters on Mac and Linux.
illegal_filename_chars = r'~#%&*{}\:<>?/|"'
for char in illegal_filename_chars:
filename = filename.replace(char, replacement)
return filename
| [
"[email protected]"
] | |
182b8c9037b1084f8352e0f3eb7ec4b0f6ba7d29 | 0032c98333ffc0efdb920ecca31ab224378880e5 | /lib/Gear.py | 79b191aa67c1062ebd954f6df99bbe7433fe9d01 | [] | no_license | raspibrick/install | bd1c6f9a8cb524f2ab5a2c17ad8c5463b768dffa | 96288d6ca21abd8fb993cc376e37c16473b54dd5 | refs/heads/master | 2021-01-10T05:00:39.159879 | 2019-07-25T09:46:04 | 2019-07-25T09:46:04 | 40,703,681 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,940 | py | # Gear.java
'''
Class that represents the combination of two motors on an axis
to perform a car-like movement.
This software is part of the raspibrick module.
It is Open Source Free Software, so you may
- run the code for any purpose
- study how the code works and adapt it to your needs
- integrate all or parts of the code in your own programs
- redistribute copies of the code
- improve the code and release your improvements to the public
However the use of the code is entirely your responsibility.
'''
from Tools import Tools
import SharedConstants
from RobotInstance import RobotInstance
class GearState():
FORWARD = 0
BACKWARD = 1
STOPPED = 2
LEFT = 3
RIGHT = 4
LEFTARC = 5
RIGHTARC = 6
UNDEFINED = 7
# ------------------------ Class Gear --------------------------------------------------
class Gear(object):
'''
Class that represents the combination of two motors on an axis
to perform a car-like movement.
'''
def __init__(self):
'''
Creates a gear instance.
'''
self.speed = SharedConstants.GEAR_DEFAULT_SPEED
self.state = GearState.UNDEFINED
self.arcRadius = 0
Tools.debug("Gear instance created")
def forward(self, duration = 0):
'''
Starts the forward rotation with preset speed.
If duration = 0, the method returns immediately, while the rotation continues.
Otherwise the method blocks until the duration is expired. Then the gear stops.
@param duration: if greater than 0, the method blocks for the given duration (in ms)
@type duration: int
'''
Tools.debug("Calling Gear.forward() with speed " + str(self.speed))
self._checkRobot()
if self.state != GearState.FORWARD:
leftDuty = self.speedToDutyCycle(self.speed + SharedConstants.GEAR_FORWARD_SPEED_DIFF)
rightDuty = self.speedToDutyCycle(self.speed)
SharedConstants.LEFT_MOTOR_PWM[0].ChangeDutyCycle(leftDuty)
SharedConstants.LEFT_MOTOR_PWM[1].ChangeDutyCycle(0)
SharedConstants.RIGHT_MOTOR_PWM[0].ChangeDutyCycle(rightDuty)
SharedConstants.RIGHT_MOTOR_PWM[1].ChangeDutyCycle(0)
self.state = GearState.FORWARD
if duration > 0:
Tools.delay(duration)
self.stop()
def backward(self, duration = 0):
'''
Starts the backward rotation with preset speed.
If duration = 0, the method returns immediately, while the rotation continues.
Otherwise the method blocks until the duration is expired. Then the gear stops.
@param duration if greater than 0, the method blocks for the given duration (in ms)
'''
Tools.debug("Calling Gear.backward() with speed " + str(self.speed))
self._checkRobot()
if self.state != GearState.BACKWARD:
leftDuty = self.speedToDutyCycle(self.speed + SharedConstants.GEAR_BACKWARD_SPEED_DIFF)
rightDuty = self.speedToDutyCycle(self.speed)
SharedConstants.LEFT_MOTOR_PWM[0].ChangeDutyCycle(0)
SharedConstants.LEFT_MOTOR_PWM[1].ChangeDutyCycle(leftDuty)
SharedConstants.RIGHT_MOTOR_PWM[0].ChangeDutyCycle(0)
SharedConstants.RIGHT_MOTOR_PWM[1].ChangeDutyCycle(rightDuty)
self.state = GearState.BACKWARD
if duration > 0:
Tools.delay(duration)
self.stop()
def left(self, duration = 0):
'''
Starts turning left with right motor rotating forward and
left motor rotating backward at preset speed.
If duration = 0, the method returns immediately, while the rotation continues.
Otherwise the method blocks until the duration is expired. Then the gear stops.
@param duration if greater than 0, the method blocks for the given duration (in ms)
'''
Tools.debug("Calling Gear.left()")
self._checkRobot()
if self.state != GearState.LEFT:
duty = self.speedToDutyCycle(self.speed)
duty = self.speedToDutyCycle(self.speed)
SharedConstants.LEFT_MOTOR_PWM[0].ChangeDutyCycle(0)
SharedConstants.LEFT_MOTOR_PWM[1].ChangeDutyCycle(duty)
SharedConstants.RIGHT_MOTOR_PWM[0].ChangeDutyCycle(duty)
SharedConstants.RIGHT_MOTOR_PWM[1].ChangeDutyCycle(0)
self.state = GearState.LEFT
if duration > 0:
Tools.delay(duration)
self.stop()
def right(self, duration = 0):
'''
Starts turning right with left motor rotating forward and
right motor rotating backward at preset speed.
If duration = 0, the method returns immediately, while the rotation continues.
Otherwise the method blocks until the duration is expired. Then the gear stops.
@param duration if greater than 0, the method blocks for the given duration (in ms)
'''
Tools.debug("Calling Gear.right()")
self._checkRobot()
if self.state != GearState.RIGHT:
duty = self.speedToDutyCycle(self.speed)
SharedConstants.LEFT_MOTOR_PWM[0].ChangeDutyCycle(duty)
SharedConstants.LEFT_MOTOR_PWM[1].ChangeDutyCycle(0)
SharedConstants.RIGHT_MOTOR_PWM[0].ChangeDutyCycle(0)
SharedConstants.RIGHT_MOTOR_PWM[1].ChangeDutyCycle(duty)
self.state = GearState.RIGHT
if duration > 0:
Tools.delay(duration)
self.stop()
def leftArc(self, radius, duration = 0):
'''
Starts turning to the left on an arc with given radius (in m) with preset speed.
If duration = 0, the method returns immediately, while the rotation continues.
Otherwise the method blocks until the duration is expired. Then the gear stops.
If the radius is negative, turns left backwards.
@param duration:
@return:
'''
Tools.debug("Calling Gear.leftArc() with radius: " + str(radius))
self._checkRobot()
speed1 = \
self.speed * (abs(radius) - SharedConstants.GEAR_AXE_LENGTH) / (abs(radius) + SharedConstants.GEAR_AXE_LENGTH)
Tools.debug("Calling leftArc(). Left speed: " + str(speed1) + ". Right speed: " + str(self.speed))
if self.state != GearState.LEFTARC or radius != self.arcRadius:
self.arcRadius = radius
leftDuty = self.speedToDutyCycle(speed1)
rightDuty = self.speedToDutyCycle(self.speed)
if radius >= 0:
SharedConstants.LEFT_MOTOR_PWM[0].ChangeDutyCycle(leftDuty)
SharedConstants.LEFT_MOTOR_PWM[1].ChangeDutyCycle(0)
SharedConstants.RIGHT_MOTOR_PWM[0].ChangeDutyCycle(rightDuty)
SharedConstants.RIGHT_MOTOR_PWM[1].ChangeDutyCycle(0)
else:
SharedConstants.LEFT_MOTOR_PWM[0].ChangeDutyCycle(0)
SharedConstants.LEFT_MOTOR_PWM[1].ChangeDutyCycle(rightDuty)
SharedConstants.RIGHT_MOTOR_PWM[0].ChangeDutyCycle(0)
SharedConstants.RIGHT_MOTOR_PWM[1].ChangeDutyCycle(leftDuty)
self.state = GearState.LEFTARC
if duration > 0:
Tools.delay(duration)
self.stop()
def leftArcMilli(self, radius, duration = 0):
'''
Same as leftArc(radius, duration), but radius in mm
@param radius in mm
'''
self.leftArc(radius / 1000.0, duration)
def rightArc(self, radius, duration = 0):
'''
Starts turning to the right on an arc with given radius (in m) with preset speed.
If duration = 0, the method returns immediately, while the rotation continues.
Otherwise the method blocks until the duration is expired. Then the gear stops.
If the radius is negative, turns right backwards.
@param duration:
'''
Tools.debug("Calling Gear.rigthArc() with radius: " + str(radius))
self._checkRobot()
speed1 = \
self.speed * (abs(radius) - SharedConstants.GEAR_AXE_LENGTH) / (abs(radius) + SharedConstants.GEAR_AXE_LENGTH)
Tools.debug("Calling rightArc(). Left speed: " + str(self.speed) + ". Right speed: " + str(speed1))
if self.state != GearState.RIGHTARC or self.arcRadius != radius:
self.arcRadius = radius
leftDuty = self.speedToDutyCycle(self.speed)
rightDuty = self.speedToDutyCycle(speed1)
if radius >= 0:
SharedConstants.LEFT_MOTOR_PWM[0].ChangeDutyCycle(leftDuty)
SharedConstants.LEFT_MOTOR_PWM[1].ChangeDutyCycle(0)
SharedConstants.RIGHT_MOTOR_PWM[0].ChangeDutyCycle(rightDuty)
SharedConstants.RIGHT_MOTOR_PWM[1].ChangeDutyCycle(0)
else:
SharedConstants.LEFT_MOTOR_PWM[0].ChangeDutyCycle(0)
SharedConstants.LEFT_MOTOR_PWM[1].ChangeDutyCycle(rightDuty)
SharedConstants.RIGHT_MOTOR_PWM[0].ChangeDutyCycle(0)
SharedConstants.RIGHT_MOTOR_PWM[1].ChangeDutyCycle(leftDuty)
self.state = GearState.RIGHTARC
if duration > 0:
Tools.delay(duration)
self.stop()
def rightArcMilli(self, radius, duration = 0):
'''
Sama as rightArc(radius, duration), but radius in mm
@param radius in mm
'''
self.rightArc(radius / 1000.0, duration)
def stop(self):
'''
Stops the gear.
(If gear is already stopped, returns immediately.)
'''
Tools.debug("Calling Gear.stop()")
self._checkRobot()
if self.state != GearState.STOPPED:
SharedConstants.LEFT_MOTOR_PWM[0].ChangeDutyCycle(0)
SharedConstants.LEFT_MOTOR_PWM[1].ChangeDutyCycle(0)
SharedConstants.RIGHT_MOTOR_PWM[0].ChangeDutyCycle(0)
SharedConstants.RIGHT_MOTOR_PWM[1].ChangeDutyCycle(0)
self.state = GearState.STOPPED
def setSpeed(self, speed):
'''
Sets the speed to the given value (arbitrary units).
The speed will be changed to the new value at the next movement call only.
The speed is limited to 0..100.
@param speed: the new speed 0..100
'''
Tools.debug("Calling Gear.setSpeed with speed: " + str(speed))
if self.speed == speed:
return
if speed > 100:
speed = 100
if speed < 0:
speed = 0
self.speed = speed
self.state = GearState.UNDEFINED
def speedToDutyCycle(self, speed):
'''
Linear relationship for mapping speed 0..100 to duty cycle
'''
if speed < 0:
return 0
elif speed > 100:
return 100
return speed
def _checkRobot(self):
if RobotInstance.getRobot() == None:
raise Exception("Create Robot instance first")
| [
"[email protected]"
] | |
2eeb9ae2d804446a3ffa2341c35a49419b89a47d | 0ad8fc76aebe7ce22abe771fbeadf227e5b471cb | /app/productdb/tests/test_celery_task_creation.py | 86ea5651b0e4aaaf77df4f63d6dd2a1ae313ba00 | [
"MIT"
] | permissive | ppavlu/product-database | 354c6a1a3e9ebfdc931f2aacf8751ed0f149401c | 09610c09600c63eb91106c0b5a2fa995b134dbf4 | refs/heads/master | 2021-01-17T22:51:43.247027 | 2015-10-11T11:37:12 | 2015-10-11T11:37:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | from django.core.urlresolvers import reverse
from django.test import TestCase
from app.productdb.models import Settings
class TestCeleryTaskCreation(TestCase):
"""
This test verifies that a celery task is created in celery when calling certain URLs with a specific parameter
"""
fixtures = ['default_vendors.yaml', 'default_users.yaml']
def test_trigger_manual_cisco_eox_synchronization(self):
"""
Test if the manual Cisco EoX synchronization can be scheduled manually
:return:
"""
print("--> remember to start a redis server when executing this test")
s, created = Settings.objects.get_or_create(id=0)
s.cisco_api_enabled = True
s.cisco_eox_api_auto_sync_enabled = True
s.save()
# schedule Cisco EoX API update
url = reverse('productdb:schedule_cisco_eox_api_sync_now')
self.client.login(username="admin", password="admin")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
# verify that task ID is saved in settings (set by the schedule call)
s = Settings.objects.get(id=0)
self.assertNotEqual(s.eox_api_sync_task_id, "")
| [
"[email protected]"
] | |
364125728030ee90a6148fc48a1cf9ab5ed80027 | 03898aa9b248360c16164adb50a40da418cdcb45 | /src/settings/common.py | 545ccdd9db091ede491583504513fa6161c3b57b | [] | no_license | asamolion/jobi | d0f9184de3db6fdee22934270e36c3c469f75ccb | d0ad0165c9d55b430d545d7c68d10cd7757e3766 | refs/heads/master | 2021-05-15T05:47:23.194551 | 2017-06-13T20:15:10 | 2017-06-13T20:15:10 | 115,773,416 | 0 | 0 | null | 2017-12-30T04:25:17 | 2017-12-30T04:25:17 | null | UTF-8 | Python | false | false | 4,115 | py | """
Django settings for src project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'rest_framework',
'tinymce',
'django_extensions',
'sorl.thumbnail',
'newsletter',
'django_celery_beat',
'django_celery_results',
'user_custom',
'admin_custom',
'data',
'essentials',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'src.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'src.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_dev", "static_root")
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_dev", "media_root")
SITE_ID = 1
TINYMCE_JS_URL = os.path.join(STATIC_URL, "/tiny_mce/tiny_mce.js")
TINYMCE_JS_ROOT = os.path.join(STATIC_URL, 'tiny_mce')
TINYMCE_DEFAULT_CONFIG = {
'plugins': "table,spellchecker,paste,searchreplace",
'theme': "advanced",
'cleanup_on_startup': True,
'custom_undo_redo_levels': 10,
}
TINYMCE_SPELLCHECKER = True
TINYMCE_COMPRESSOR = True
# FIXME : Complete installation of django newsletter
# django-newsletter
# https://django-newsletter.readthedocs.io/en/latest/index.html
# Using django-tinymce as editor
NEWSLETTER_RICHTEXT_WIDGET = "tinymce.widgets.TinyMCE"
NEWSLETTER_CONFIRM_EMAIL = True
# Used by Celery and RabbitMq
# Set interval for the Master to check for scrapper status [seconds]
RMQ_REFRESH_RATE = 3600.00
| [
"[email protected]"
] | |
f34a1994b54077e70d2ad74f2ccf5a3171b699e2 | e35f87bf8c905bceddb40fe3ee6792dcabf62009 | /lte/gateway/python/magma/pipelined/tests/test_redirect.py | 7bab13c657d8bd71537b664de104018c7870e74d | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | phirmware/magma | 2364544792b8c0f76e6a86fcbf1273bdb0bf6a49 | 1b3e4533235293f754d7375eb421c968cc0b1856 | refs/heads/master | 2020-09-20T21:30:36.523248 | 2019-11-28T17:42:01 | 2019-11-28T17:42:01 | 224,595,084 | 1 | 0 | NOASSERTION | 2019-11-28T07:31:58 | 2019-11-28T07:31:57 | null | UTF-8 | Python | false | false | 10,649 | py | """
Copyright (c) 2018-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import unittest
from concurrent.futures import Future
from unittest.mock import MagicMock
import warnings
from lte.protos.mconfig.mconfigs_pb2 import PipelineD
from lte.protos.policydb_pb2 import FlowDescription, FlowMatch, PolicyRule, \
RedirectInformation
from magma.pipelined.app.enforcement import EnforcementController
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.policy_converters import flow_match_to_magma_match
from magma.pipelined.tests.app.flow_query import RyuDirectFlowQuery \
as FlowQuery
from magma.pipelined.tests.app.packet_builder import TCPPacketBuilder
from magma.pipelined.tests.app.packet_injector import ScapyPacketInjector
from magma.pipelined.tests.app.start_pipelined import PipelinedController, \
TestSetup
from magma.pipelined.tests.app.subscriber import RyuDirectSubscriberContext
from magma.pipelined.tests.app.table_isolation import RyuDirectTableIsolator, \
RyuForwardFlowArgsBuilder
from magma.pipelined.tests.pipelined_test_util import FlowTest, FlowVerifier, \
create_service_manager, start_ryu_app_thread, stop_ryu_app_thread, \
wait_after_send, assert_bridge_snapshot_match
class RedirectTest(unittest.TestCase):
BRIDGE = 'testing_br'
IFACE = 'testing_br'
MAC_DEST = "5e:cc:cc:b1:49:4b"
BRIDGE_IP_ADDRESS = '192.168.128.1'
# TODO test for multiple incoming requests (why we match on tcp ports)
@classmethod
def setUpClass(cls):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures, mocks the redis policy_dictionary
of enforcement_controller
"""
super(RedirectTest, cls).setUpClass()
warnings.simplefilter('ignore')
cls.service_manager = create_service_manager([PipelineD.ENFORCEMENT])
cls._tbl_num = cls.service_manager.get_table_num(
EnforcementController.APP_NAME)
enforcement_controller_reference = Future()
testing_controller_reference = Future()
test_setup = TestSetup(
apps=[PipelinedController.Enforcement,
PipelinedController.Testing],
references={
PipelinedController.Enforcement:
enforcement_controller_reference,
PipelinedController.Testing:
testing_controller_reference
},
config={
'bridge_name': cls.BRIDGE,
'bridge_ip_address': cls.BRIDGE_IP_ADDRESS,
'nat_iface': 'eth2',
'enodeb_iface': 'eth1',
'enable_queue_pgm': False,
},
mconfig=None,
loop=None,
service_manager=cls.service_manager,
integ_test=False,
)
BridgeTools.create_bridge(cls.BRIDGE, cls.IFACE)
cls.thread = start_ryu_app_thread(test_setup)
cls.enforcement_controller = enforcement_controller_reference.result()
cls.testing_controller = testing_controller_reference.result()
cls.enforcement_controller._redirect_manager._save_redirect_entry =\
MagicMock()
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
def test_url_redirect(self):
"""
Partial redirection test, checks if flows were added properly for url
based redirection.
Assert:
1 Packet is matched
Packet bypass flows are added
Flow learn action is triggered - another flow is added to the table
"""
redirect_ips = ["185.128.101.5", "185.128.121.4"]
self.enforcement_controller._redirect_manager._dns_cache.get(
"about.sha.ddih.org", lambda: redirect_ips, max_age=42
)
imsi = 'IMSI010000000088888'
sub_ip = '192.168.128.74'
flow_list = [FlowDescription(match=FlowMatch())]
policy = PolicyRule(
id='redir_test', priority=3, flow_list=flow_list,
redirect=RedirectInformation(
support=1,
address_type=2,
server_address="http://about.sha.ddih.org/"
)
)
# ============================ Subscriber ============================
sub_context = RyuDirectSubscriberContext(
imsi, sub_ip, self.enforcement_controller, self._tbl_num
).add_dynamic_rule(policy)
isolator = RyuDirectTableIsolator(
RyuForwardFlowArgsBuilder.from_subscriber(sub_context.cfg)
.build_requests(),
self.testing_controller
)
pkt_sender = ScapyPacketInjector(self.IFACE)
packet = TCPPacketBuilder()\
.set_tcp_layer(42132, 80, 321)\
.set_tcp_flags("S")\
.set_ip_layer('151.42.41.122', sub_ip)\
.set_ether_layer(self.MAC_DEST, "00:00:00:00:00:00")\
.build()
# Check if these flows were added (queries should return flows)
permit_outbound, permit_inbound = [], []
for ip in redirect_ips:
permit_outbound.append(FlowQuery(
self._tbl_num, self.testing_controller,
match=flow_match_to_magma_match(
FlowMatch(ipv4_dst=ip, direction=FlowMatch.UPLINK))
))
permit_inbound.append(FlowQuery(
self._tbl_num, self.testing_controller,
match=flow_match_to_magma_match(
FlowMatch(ipv4_src=ip, direction=FlowMatch.DOWNLINK))
))
learn_action_flow = flow_match_to_magma_match(
FlowMatch(ip_proto=6, direction=FlowMatch.DOWNLINK,
ipv4_src=self.BRIDGE_IP_ADDRESS, ipv4_dst=sub_ip)
)
learn_action_query = FlowQuery(self._tbl_num, self.testing_controller,
learn_action_flow)
# =========================== Verification ===========================
# 1 packet sent, permit rules installed, learn action installed. Since
# the enforcement table is entered via the DPI table and the scratch
# enforcement table, the number of packets handled by the table is 2.
flow_verifier = FlowVerifier(
[FlowTest(FlowQuery(self._tbl_num, self.testing_controller), 2),
FlowTest(learn_action_query, 0, flow_count=1)] +
[FlowTest(query, 0, flow_count=1) for query in permit_outbound] +
[FlowTest(query, 0, flow_count=1) for query in permit_inbound],
lambda: wait_after_send(self.testing_controller))
with isolator, sub_context, flow_verifier:
pkt_sender.send(packet)
assert_bridge_snapshot_match(self, self.BRIDGE,
self.service_manager)
flow_verifier.verify()
def test_ipv4_redirect(self):
"""
Partial redirection test, checks if flows were added properly for ipv4
based redirection.
Assert:
1 Packet is matched
Packet bypass flows are added
Flow learn action is triggered - another flow is added to the table
"""
redirect_ip = "54.12.31.42"
imsi = 'IMSI012000000088888'
sub_ip = '192.168.128.74'
flow_list = [FlowDescription(match=FlowMatch())]
policy = PolicyRule(
id='redir_ip_test', priority=3, flow_list=flow_list,
redirect=RedirectInformation(
support=1,
address_type=0,
server_address=redirect_ip
)
)
# ============================ Subscriber ============================
sub_context = RyuDirectSubscriberContext(
imsi, sub_ip, self.enforcement_controller, self._tbl_num
).add_dynamic_rule(policy)
isolator = RyuDirectTableIsolator(
RyuForwardFlowArgsBuilder.from_subscriber(sub_context.cfg)
.build_requests(),
self.testing_controller
)
pkt_sender = ScapyPacketInjector(self.IFACE)
packet = TCPPacketBuilder()\
.set_tcp_layer(42132, 80, 321)\
.set_tcp_flags("S")\
.set_ip_layer('151.42.41.122', sub_ip)\
.set_ether_layer(self.MAC_DEST, "00:00:00:00:00:00")\
.build()
# Check if these flows were added (queries should return flows)
permit_outbound = FlowQuery(
self._tbl_num, self.testing_controller,
match=flow_match_to_magma_match(
FlowMatch(ipv4_dst=redirect_ip, direction=FlowMatch.UPLINK))
)
permit_inbound = FlowQuery(
self._tbl_num, self.testing_controller,
match=flow_match_to_magma_match(
FlowMatch(ipv4_src=redirect_ip, direction=FlowMatch.DOWNLINK))
)
learn_action_flow = flow_match_to_magma_match(
FlowMatch(ip_proto=6, direction=FlowMatch.DOWNLINK,
ipv4_src=self.BRIDGE_IP_ADDRESS, ipv4_dst=sub_ip)
)
learn_action_query = FlowQuery(self._tbl_num, self.testing_controller,
learn_action_flow)
# =========================== Verification ===========================
# 1 packet sent, permit rules installed, learn action installed. Since
# the enforcement table is entered via the DPI table and the scratch
# enforcement table, the number of packets handled by the table is 2.
flow_verifier = FlowVerifier([
FlowTest(FlowQuery(self._tbl_num, self.testing_controller), 2),
FlowTest(permit_outbound, 0, flow_count=1),
FlowTest(permit_inbound, 0, flow_count=1),
FlowTest(learn_action_query, 0, flow_count=1)
], lambda: wait_after_send(self.testing_controller))
with isolator, sub_context, flow_verifier:
pkt_sender.send(packet)
assert_bridge_snapshot_match(self, self.BRIDGE,
self.service_manager)
flow_verifier.verify()
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
11e02d43b0ac54e7b1904e84863f92e6e5608b69 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02951/s958921676.py | d638adbe5c5e44f9223b05cd2fe5a87eb08d31df | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | a, b, c = map(int, input().split())
ans = c - (a - b)
if ans <= 0:
print(0)
else:
print(ans) | [
"[email protected]"
] | |
431d32716f21e9eef507696baec83c2625141591 | 251d6d11e807fa47fd1bad1f070b727500b17fd5 | /shares/migrations/0009_shareitemdividend_percent.py | 102eb77b42c8eeb128d242542ab0ea2694dc1656 | [] | no_license | khokhlov/dinv | a0964403a930f479fb744d90c4dbad887ba9810c | 7943b533808c913ec3564aa28ada485f857609ee | refs/heads/master | 2020-05-26T13:06:42.975971 | 2017-03-10T12:02:30 | 2017-03-10T12:02:30 | 82,479,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-18 18:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shares', '0008_auto_20170218_2106'),
]
operations = [
migrations.AddField(
model_name='shareitemdividend',
name='percent',
field=models.DecimalField(blank=True, decimal_places=15, help_text='\u0414\u043e\u0445\u043e\u0434\u043d\u043e\u0441\u0442\u044c \u043d\u0430 \u0434\u0430\u0442\u0443 \u0432\u044b\u043f\u043b\u0430\u0442\u044b, %', max_digits=35, null=True, verbose_name='\u0414\u043e\u0445\u043e\u0434\u043d\u043e\u0441\u0442\u044c, %'),
),
]
| [
"[email protected]"
] | |
566f7fe7adac4f3e60a1df3d58f77ea9f53eda7d | f22d31484a12d001826c1775a6f2d245a720fce8 | /Introdução à Programação com Python/Do autor/Códigi fonte e listagem/listagem/capitulo 05/05.10 - Contagem de questões corretas.py | 94220dbdaf11aa24f9b075ebf42e8fc3747daf6f | [] | no_license | eduardoprograma/linguagem_Python | 9eb55f0a5a432a986e047b091eb7ed7152b7da67 | 942aba9146800fc33bbea98778467f837396cb93 | refs/heads/master | 2021-07-07T20:48:37.673101 | 2020-07-31T21:24:17 | 2020-07-31T21:24:17 | 159,852,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2017
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/2012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Primeira reimpressão - Segunda edição - Maio/2015
# Segunda reimpressão - Segunda edição - Janeiro/2016
# Terceira reimpressão - Segunda edição - Junho/2016
# Quarta reimpressão - Segunda edição - Março/2017
#
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem\capitulo 05\05.10 - Contagem de questões corretas.py
##############################################################################
pontos = 0
questão = 1
while questão <= 3:
resposta = input("Resposta da questão %d: " % questão)
if questão == 1 and resposta == "b":
pontos = pontos + 1
if questão == 2 and resposta == "a":
pontos = pontos + 1
if questão == 3 and resposta == "d":
pontos = pontos + 1
questão += 1
print("O aluno fez %d ponto(s)" % pontos)
| [
"[email protected]"
] | |
461eeec321d529c29878dcb64812490da5702fda | 7938413839bb664b97769c2d7a72664b7ab80a64 | /tests/test_forms.py | bef9c1a8488fedd534e226e87159d4a5497f46ce | [
"BSD-3-Clause"
] | permissive | yurkobb/django-contact-form | a9268bdf357f7746ca38f34adf2d9a05deed5c8b | 3a160183458f4a782fc8d23f88807689d1335d10 | refs/heads/master | 2020-09-24T09:33:24.313968 | 2019-10-04T09:24:10 | 2019-10-04T09:24:10 | 225,729,641 | 0 | 0 | BSD-3-Clause | 2019-12-03T22:27:53 | 2019-12-03T22:27:52 | null | UTF-8 | Python | false | false | 6,323 | py | import os
import unittest
from django.conf import settings
from django.core import mail
from django.test import RequestFactory, TestCase
from django.utils.six import text_type
import mock
from contact_form.forms import AkismetContactForm, ContactForm
class ContactFormTests(TestCase):
"""
Tests the base ContactForm.
"""
valid_data = {'name': 'Test',
'email': '[email protected]',
'body': 'Test message'}
def request(self):
return RequestFactory().request()
def test_request_required(self):
"""
Can't instantiate without an HttpRequest.
"""
self.assertRaises(TypeError, ContactForm)
def test_valid_data_required(self):
"""
Can't try to build the message dict unless data is valid.
"""
data = {'name': 'Test',
'body': 'Test message'}
form = ContactForm(request=self.request(), data=data)
self.assertRaises(ValueError, form.get_message_dict)
self.assertRaises(ValueError, form.get_context)
def test_send(self):
"""
Valid form can and does in fact send email.
"""
form = ContactForm(request=self.request(),
data=self.valid_data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(1, len(mail.outbox))
message = mail.outbox[0]
self.assertTrue(self.valid_data['body'] in message.body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL,
message.from_email)
self.assertEqual(form.recipient_list,
message.recipients())
def test_no_sites(self):
"""
Sites integration works with or without installed
contrib.sites.
"""
with self.modify_settings(
INSTALLED_APPS={
'remove': ['django.contrib.sites'],
}):
form = ContactForm(request=self.request(),
data=self.valid_data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(1, len(mail.outbox))
def test_recipient_list(self):
"""
Passing recipient_list when instantiating ContactForm properly
overrides the list of recipients.
"""
recipient_list = ['[email protected]']
form = ContactForm(request=self.request(),
data=self.valid_data,
recipient_list=recipient_list)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(1, len(mail.outbox))
message = mail.outbox[0]
self.assertEqual(recipient_list,
message.recipients())
def test_callable_template_name(self):
"""
When a template_name() method is defined, it is used and
preferred over a 'template_name' attribute.
"""
class CallableTemplateName(ContactForm):
def template_name(self):
return 'contact_form/test_callable_template_name.html'
form = CallableTemplateName(request=self.request(),
data=self.valid_data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(1, len(mail.outbox))
message = mail.outbox[0]
self.assertTrue('Callable template_name used.' in
message.body)
def test_callable_message_parts(self):
"""
Message parts implemented as methods are called and preferred
over attributes.
"""
overridden_data = {
'from_email': '[email protected]',
'message': 'Overridden message.',
'recipient_list': ['[email protected]'],
'subject': 'Overridden subject',
}
class CallableMessageParts(ContactForm):
def from_email(self):
return overridden_data['from_email']
def message(self):
return overridden_data['message']
def recipient_list(self):
return overridden_data['recipient_list']
def subject(self):
return overridden_data['subject']
form = CallableMessageParts(request=self.request(),
data=self.valid_data)
self.assertTrue(form.is_valid())
self.assertEqual(overridden_data,
form.get_message_dict())
@unittest.skipUnless(
getattr(
settings,
'AKISMET_API_KEY',
os.getenv('PYTHON_AKISMET_API_KEY')
) is not None,
"AkismetContactForm requires Akismet configuration"
)
class AkismetContactFormTests(TestCase):
"""
Tests the Akismet contact form.
"""
def request(self):
return RequestFactory().request()
def test_akismet_form_spam(self):
"""
The Akismet contact form correctly rejects spam.
"""
data = {'name': 'viagra-test-123',
'email': '[email protected]',
'body': 'This is spam.'}
with mock.patch('akismet.Akismet', autospec=True) as akismet_mock:
instance = akismet_mock.return_value
instance.verify_key.return_value = True
instance.comment_check.return_value = True
form = AkismetContactForm(
request=self.request(),
data=data
)
self.assertFalse(form.is_valid())
self.assertTrue(
text_type(form.SPAM_MESSAGE) in
form.errors['body']
)
def test_akismet_form_ham(self):
"""
The Akismet contact form correctly accepts non-spam.
"""
data = {'name': 'Test',
'email': '[email protected]',
'body': 'Test message.'}
with mock.patch('akismet.Akismet', autospec=True) as akismet_mock:
instance = akismet_mock.return_value
instance.verify_key.return_value = True
instance.comment_check.return_value = False
form = AkismetContactForm(
request=self.request(),
data=data
)
self.assertTrue(form.is_valid())
| [
"[email protected]"
] | |
1d39e93c056e4c8546343a22db34e45dab76f66d | 56dff287f055067b3c08dcbbad0c5df09377cab5 | /myshop/urls.py | 75a244fd38b3cf95124769c5247992ace63fdd8e | [] | no_license | antonmazun/kursach_project | 971b37435a5fe231afe24a57a497bba6f417b0d0 | 04ee570b1975791e1a1c22ac76bbdc55f5accbd8 | refs/heads/master | 2021-01-13T14:40:30.541635 | 2016-12-17T07:16:45 | 2016-12-17T07:16:45 | 76,683,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | """myshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
from . import settings
urlpatterns = [
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^basicview/', include('blog.urls')),
url(r'^auth/', include('loginsys.urls') ),
url(r'^api/' , include('blog.api_urls')),
#url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^', include('blog.urls')),
# url(r'^ckeditor/', include('ckeditor_uploader.urls')),
# url(r'', include('user.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL , document_root = settings.MEDIA_ROOT) | [
"[email protected]"
] | |
ef0abb1939ca922caa44de6b6f2b04e213bf49d4 | 0cf7dd2c3c0b28b52f1273e8fe2ea0a87cacc6af | /futval_graph_2.py | 59a74b1035887410d2c7ae4724dc960ecaacca5f | [] | no_license | EngrDevDom/Everyday-Coding-in-Python | 61b0e4fcbc6c7f399587deab2fa55763c9d519b5 | 93329ad485a25e7c6afa81d7229147044344736c | refs/heads/master | 2023-02-25T05:04:50.051111 | 2021-01-30T02:43:40 | 2021-01-30T02:43:40 | 274,971,215 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | # File : futval_graph_2.py
# Desc : Future Value Graph Version 2.0
from graphics import *
def main():
# Introduction
print("This program plots the growth of a 10-year investment.")
# Get principal and interest rate
principal = float(input("Enter the initial principal: "))
apr = float(input("Enter the annualized interest rate: "))
# Create a graphics window with labels on left edge
win = GraphWin("Investment Growth Chart", 320, 240)
win.setBackground("white")
win.setCoords(-1.75, -200, 11.5, 10400)
Text(Point(-1, 0), ' 0.0K').draw(win)
Text(Point(-1, 2500), ' 2.5K').draw(win)
Text(Point(-1, 5000), ' 5.0K').draw(win)
Text(Point(-1, 7500), ' 7.5K').draw(win)
Text(Point(-1, 10000), ' 10.0K').draw(win)
# Draw bar for initial principal
bar = Rectangle(Point(0, 0), Point(1, principal))
bar.setFill("green")
bar.setWidth(2)
bar.draw(win)
# Draw a bar for each subsequent year
for year in range(1, 11):
principal = principal * (1 + apr)
bar = Rectangle(Point(year, 0), Point(year+1, principal))
bar.setWidth(2)
bar.draw(win)
input("Press <Enter> to quit.")
win.close()
main()
| [
"[email protected]"
] | |
ca14417a5fd678ba5eb1b37a8aa1ddfa86d0688c | 82149a84b47fb37238452a658d5f3a8f23342658 | /pushbase/message_box_component.py | 563dab44d1e184f89d5ad92aec6a767a6d607eb8 | [] | no_license | maratbakirov/AbletonLive11_MIDIRemoteScripts | 408c90522d8f54b878e595b0d8af28ad5008a4a1 | 2b25ba9351764c49f7fd1f99875e28e67b002f30 | refs/heads/master | 2023-04-12T04:44:16.715220 | 2021-05-09T11:48:24 | 2021-05-09T11:48:24 | 365,708,395 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,551 | py | #Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/pushbase/message_box_component.py
from __future__ import absolute_import, print_function, unicode_literals
from builtins import map
from builtins import object
import re
from future.moves.itertools import zip_longest
from ableton.v2.base import forward_property, const, nop, listens, listenable_property
from ableton.v2.base.dependency import dependency
from ableton.v2.control_surface import Component
from ableton.v2.control_surface.elements import DisplayDataSource
from ableton.v2.control_surface.components import BackgroundComponent
from .consts import DISPLAY_LENGTH, MessageBoxText
FORMAT_SPECIFIER_WITH_MARKUP_PATTERN = re.compile(u'[%](len=([0-9]+),)?([^%]*?[diouxXeEfFgGcrs])')
def strip_restriction_markup_and_format(text_or_text_spec):
if isinstance(text_or_text_spec, tuple):
format_string = text_or_text_spec[0]
stripped_format_string = re.sub(FORMAT_SPECIFIER_WITH_MARKUP_PATTERN, u'%\\g<3>', format_string)
arguments = text_or_text_spec[1:]
return stripped_format_string % arguments
else:
return text_or_text_spec
class Notification(object):
def __init__(self, parent, *a, **k):
super(Notification, self).__init__(*a, **k)
self.hide = parent.hide_notification
class Messenger(object):
u"""
Externally provided interface for those components that provide
global Push feedback.
"""
expect_dialog = dependency(expect_dialog=const(nop))
show_notification = dependency(show_notification=const(nop))
class MessageBoxComponent(BackgroundComponent):
u"""
Component showing a temporary message in the display
"""
__events__ = (u'cancel',)
num_lines = 4
def __init__(self, *a, **k):
super(MessageBoxComponent, self).__init__(*a, **k)
self._current_text = None
self._can_cancel = False
self.data_sources = list(map(DisplayDataSource, (u'',) * self.num_lines))
self._notification_display = None
def _set_display_line(self, n, display_line):
if display_line:
display_line.set_data_sources((self.data_sources[n],))
def set_display_line1(self, display_line):
self._set_display_line(0, display_line)
def set_display_line2(self, display_line):
self._set_display_line(1, display_line)
def set_display_line3(self, display_line):
self._set_display_line(2, display_line)
def set_display_line4(self, display_line):
self._set_display_line(3, display_line)
def set_cancel_button(self, button):
self._on_cancel_button_value.subject = button
self._update_cancel_button()
def _update_cancel_button(self):
if self.is_enabled():
button = self._on_cancel_button_value.subject
if button is not None:
button.reset()
if self._can_cancel and button:
button.set_light(u'MessageBox.Cancel')
def _update_display(self):
if self._current_text != None:
lines = self._current_text.split(u'\n')
for source_line, line in zip_longest(self.data_sources, lines):
if source_line:
source_line.set_display_string(line or u'')
if self._can_cancel:
self.data_sources[-1].set_display_string(u'[ Ok ]'.rjust(DISPLAY_LENGTH - 1))
@listens(u'value')
def _on_cancel_button_value(self, value):
if self.is_enabled() and self._can_cancel and value:
self.notify_cancel()
@listenable_property
def text(self):
return self._current_text
@text.setter
def text(self, text):
if self._current_text != text:
self._current_text = text
self._update_display()
self.notify_text()
@listenable_property
def can_cancel(self):
return self._can_cancel
@can_cancel.setter
def can_cancel(self, can_cancel):
if self._can_cancel != can_cancel:
self._can_cancel = can_cancel
self._update_cancel_button()
self._update_display()
self.notify_can_cancel()
def update(self):
super(MessageBoxComponent, self).update()
self._update_cancel_button()
self._update_display()
class DialogComponent(Component):
u"""
Handles representing modal dialogs from the application. The
script can also request dialogs.
"""
def __init__(self, *a, **k):
super(DialogComponent, self).__init__(*a, **k)
self._message_box = MessageBoxComponent(parent=self, is_enabled=False)
self._next_message = None
self._on_open_dialog_count.subject = self.application
self._on_message_cancel.subject = self._message_box
message_box_layer = forward_property(u'_message_box')(u'layer')
def expect_dialog(self, message):
u"""
Expects a dialog from Live to appear soon. The dialog will be
shown on the controller with the given message regardless of
wether a dialog actually appears. This dialog can be
cancelled.
"""
self._next_message = message
self._update_dialog()
@listens(u'open_dialog_count')
def _on_open_dialog_count(self):
self._update_dialog(open_dialog_changed=True)
self._next_message = None
@listens(u'cancel')
def _on_message_cancel(self):
self._next_message = None
try:
self.application.press_current_dialog_button(0)
except RuntimeError:
pass
self._update_dialog()
def _update_dialog(self, open_dialog_changed = False):
message = self._next_message or MessageBoxText.LIVE_DIALOG
can_cancel = self._next_message != None
self._message_box.text = message
self._message_box.can_cancel = can_cancel
self._message_box.set_enabled(self.application.open_dialog_count > 0 or not open_dialog_changed and self._next_message)
class InfoComponent(BackgroundComponent):
u"""
Component that will show an info text and grab all components that should be unusable.
"""
def __init__(self, info_text = u'', *a, **k):
super(InfoComponent, self).__init__(*a, **k)
self._data_source = DisplayDataSource()
self._data_source.set_display_string(info_text)
def set_display(self, display):
if display:
display.set_data_sources([self._data_source])
| [
"[email protected]"
] | |
c8c7fc85d28ffc62c00ad9995fef8973968eb8be | b63142e8540cb30bb0c663332e29a4112721073e | /901_closest_binary_search_tree_value_II.py | 7c734dae6192625113de43b849bed05bdabc682b | [] | no_license | HaydenInEdinburgh/LintCode | 025bb2f0d75686097061de324c0fd292536dbb14 | dbeae2bf631e57667d1415164d452d5ca2df7447 | refs/heads/master | 2023-08-18T19:52:54.561623 | 2021-10-06T21:46:50 | 2021-10-06T21:46:50 | 370,733,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,092 | py | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: the given BST
@param target: the given target
@param k: the given k
@return: k values in the BST that are closest to the target
"""
def closestKValues(self, root, target, k):
# write your code here
if not root:
return
lower_stack, upper_stack = [], []
res = []
# upper_stack => upper bound of the target [desc]
# lower_stack => lower bound of the target [asc]
cur = root
while cur:
upper_stack.append(cur)
cur = cur.left
cur = root
while cur:
lower_stack.append(cur)
cur = cur.right
while len(upper_stack) >0 and upper_stack[-1].val < target:
self.move_upper(upper_stack)
while len(lower_stack) >0 and lower_stack[-1].val >= target:
self.move_lower(lower_stack)
for i in range(k):
if not lower_stack:
res.append(upper_stack[-1].val)
self.move_upper(upper_stack)
elif not upper_stack:
res.append(lower_stack[-1].val)
self.move_lower(lower_stack)
else:
upper, lower = upper_stack[-1].val, lower_stack[-1].val
if abs(upper - target) < abs(lower - target):
res.append(upper)
self.move_upper(upper_stack)
else:
res.append(lower)
self.move_lower(lower_stack)
return res
def move_upper(self, stack):
cur = stack.pop()
if cur.right:
cur = cur.right
while cur:
stack.append(cur)
cur = cur.left
def move_lower(self, stack):
cur = stack.pop()
if cur.left:
cur = cur.left
while cur:
stack.append(cur)
cur = cur.right | [
"[email protected]"
] | |
662c08229760c0a9763d7d467b8797d9e6268021 | f68ec37ae975d3aaff2ab3d6a0bae11a2cc432fa | /iot_message/tests/test_plain_cryptor.py | 7909cbc06e25c672eddf4706f64f472276ea30a7 | [
"MIT"
] | permissive | bkosciow/python_iot-1 | 3f78a0b2ec18949e579a75132a838a7793d6bbe8 | f3cd2bdbb75cb9afe13fecb603b5b8c026d23500 | refs/heads/master | 2021-04-29T01:00:10.595553 | 2019-10-27T19:09:46 | 2019-10-27T19:09:46 | 77,786,513 | 0 | 0 | MIT | 2019-10-27T19:09:47 | 2017-01-01T16:24:51 | Python | UTF-8 | Python | false | false | 1,394 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#pylint: skip-file
from nose.tools import assert_equal
from iot_message.cryptor.plain import Cryptor
from iot_message.message import Message
__author__ = 'Bartosz Kościów'
import iot_message.factory as factory
class TestCryptorPlain(object):
def setUp(self):
Message.chip_id = 'pc'
Message.node_name = 'Turkusik'
Message.drop_unencrypted = False
Message.encoders = []
Message.decoders = {}
def test_encode_message(self):
Message.add_encoder(Cryptor())
msg = factory.MessageFactory.create()
inp = {"event": "channel.on", "parameters": {"channel": 0}, "response": "", "targets": ["node-north"]}
msg.set(inp)
msg.encrypt()
assert_equal(inp["event"], msg.data["event"])
assert_equal(inp["parameters"], msg.data["parameters"])
assert_equal(inp["targets"], msg.data["targets"])
def test_decrypt_message(self):
Message.add_decoder(Cryptor())
inp = """{"protocol": "iot:1", "node": "Turkusik", "chip_id": "pc", "event": "message.plain", "parameters": ["a"], "response": "", "targets": ["Turkusik"]}"""
msg = factory.MessageFactory.create(inp)
assert_equal(msg.data["event"], "message.plain")
assert_equal(msg.data["parameters"], ["a"])
assert_equal(msg.data["targets"], ['Turkusik'])
| [
"[email protected]"
] | |
d7a51028837d657019f9ce5cb1f457861af7fb4f | e9757274ddb8484e27590ff0cc3f24550776c6cc | /Solved/0090/0090.py | 2e9cf4d9d2394d5db0791134951f59d920d1fab6 | [] | no_license | Jinmin-Goh/LeetCode | 948a9b3e77eb03507aad6f3c78640aa7f00e6ad5 | d6e80b968032b08506c5b185f66d35c6ff1f8bb9 | refs/heads/master | 2020-09-22T10:22:18.443352 | 2020-09-06T06:34:12 | 2020-09-06T06:34:12 | 225,153,497 | 1 | 1 | null | 2020-01-29T15:16:53 | 2019-12-01T11:55:25 | Python | UTF-8 | Python | false | false | 642 | py | # Problem No.: 90
# Solver: Jinmin Goh
# Date: 20200105
# URL: https://leetcode.com/problems/subsets-ii/
import sys
class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
ans = [[]]
nums.sort()
for i in range(len(nums)):
temp = []
temp_ans = []
for j in range(len(ans)):
temp.append(ans[j][:])
for j in range(len(temp)):
if temp[j] + [nums[i]] not in temp or temp[j] + [nums[i]] not in ans:
temp_ans.append(temp[j] + [nums[i]])
ans += temp_ans[:]
return ans | [
"[email protected]"
] | |
1a16421fb2838a45d9e6547212bb8e2eb6d37eeb | 23b686feb2d0ab9082a7ce622fc055946ed99c55 | /.history/atkd/views_20190410194907.py | 7a0a09bfdb29f76eeeb07e0da6fa0ffd18751db4 | [] | no_license | jasvr/atkd | a18b9840bf9948a7560684cd5eb0d5e22f6c52c7 | daf61f7aa11cfc812171298894b1d0019641c4bd | refs/heads/master | 2020-05-07T09:35:56.343837 | 2019-04-12T16:17:09 | 2019-04-12T16:17:09 | 180,383,260 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | from django.shortcuts import render
from .models import Parent, Student
def parent_list(request):
parents = Parent.objects.all()
return render(request, 'atkd/parent_list.html', {'parents': parents})
def parent_detail(request, pk):
parent = Parent.objects.get(id=pk)
return render(request, 'atkd/parent_detail.html', {'parent': parent})
def student_list(request):
students = Student.objects.all()
return render(request,'atkd/student_list.html',{'students':students})
def student_detail(request, pk):
| [
"[email protected]"
] | |
b08f9607a37ab94a68b28bf4a97f2e2c7d373c85 | 0a0b75df10cb4643a2a9124750239f85bb0abadf | /dataset/arxiv_ordering.py | 8c95621558da411473c4112a7be5e95129c8ec1c | [
"MIT"
] | permissive | baotg080599/passage-ordering | 1d8eaac939c08f21c398425af819f49544af809d | f63b993dfd5b7e6475e7fb8950c23c3f22951979 | refs/heads/main | 2023-09-05T21:04:05.172285 | 2021-11-22T11:18:26 | 2021-11-22T11:18:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,542 | py | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""arXiv ordering dataset."""
from __future__ import absolute_import, division, print_function
import json
import os
import datasets
import numpy as np
import pathlib
_CITATION = """
@misc{chen2016neural,
title={Neural Sentence Ordering},
author={Xinchi Chen and Xipeng Qiu and Xuanjing Huang},
year={2016},
eprint={1607.06952},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """
Dataset for sentence ordering using text from arXiv."""
_PATH = "dataset/arxiv/"
_SENTENCES = "sentences"
_SHUFFLED_SENTENCES = "shuffled_sentences"
_LABEL = "label"
class ArXivOrdering(datasets.GeneratorBasedBuilder):
"""arXiv ordering dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
info = datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
_SENTENCES: datasets.Sequence(datasets.Value("string")),
_SHUFFLED_SENTENCES: datasets.Sequence(datasets.Value("string")),
_LABEL: datasets.Sequence(datasets.Value("int8")),
}
),
supervised_keys=None,
homepage="https://github.com/FudanNLP/NeuralSentenceOrdering",
citation=_CITATION,
)
return info
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_path = os.path.join(pathlib.Path().absolute(), _PATH)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"path": os.path.join(data_path, "train.txt")},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"path": os.path.join(data_path, "valid.txt")},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"path": os.path.join(data_path, "test.txt")},
),
]
def _generate_examples(self, path=None):
"""Yields examples."""
with open(path, "r") as f:
data = f.read()
examples = data.split("\n\n")
for i, example in enumerate(examples):
lines = example.split("\n")
sentences = lines[2:]
if sentences == []:
continue
shuffled_sentences, label = self.shuffle_sentences(sentences)
yield i, {
_SENTENCES: sentences,
_SHUFFLED_SENTENCES: shuffled_sentences,
_LABEL: label,
}
def shuffle_sentences(self, sentences):
sentences = np.array(sentences)
permutation = np.random.permutation(len(sentences))
return sentences[permutation].tolist(), np.argsort(permutation).tolist()
| [
"[email protected]"
] | |
cb33f0219b3459387def27f934269bb9559ff9cf | 3f60b999ea7bda83c9586f75f52463dc20337f24 | /sensitive_user_portrait/influence_application/user_list.py | 123e10e3ce502ea62b35a1d374c6cac5452c08ff | [] | no_license | jianjian0dandan/sensitive_user_portrait | 629e49ce71db92b50634bac9c828811cdb5381e9 | cacc30267ebc0e621b1d48d4f1206277a0f48123 | refs/heads/master | 2021-01-20T23:18:07.138057 | 2016-05-22T12:09:40 | 2016-05-22T12:09:40 | 42,869,287 | 0 | 0 | null | 2015-09-21T13:55:12 | 2015-09-21T13:55:11 | null | UTF-8 | Python | false | false | 1,754 | py | # -*- coding:utf-8 -*-
writer = ['1311967407', '1195347197', '1142648704', '1889213710', '1706987705']
expert = ['1827652007', '1265965213', '1596329427', '1908195982', '2248789552']
grassroot = ['1220291284','1677126410', '1635764393', '2682546440', '3188186580', '1665808371', '1751124681', '2721237781', '2971141411', '3188186580', '2540187460', '3689493785']
religion = ['1218353337', '1761179351', '3482911112', '1220291284', '2504433601']
attorney = ['1840604224', '2752172261', '1707314224', '3268947881', '1935084477', '3568404205', '1510017057', '3306911303', '1006235863', '3194187021', '3575186384', '1762325394', '1628679672', '2338225900']
public_intellectual = ['1197161814','1182391231','1182389073', '1989660417', '1494720324', '1189591617', '1971861621', '3270699555', '1093622153', '2892376557']
non_public_owner = ['1182391231', '1640571365', '1197161814', '1191220232', '1749127163']
independent_media = ['1189729754', '1497882593', '1742566624', '1661598840', '2283406765']
public_media = ['2803301701', '1974576991', '1639498782', '2105426467', '1644648333']
civil_officer = ['1419517335', '1098736570', '1729736051', '2419062270', '1369714780']
star = ['1687429295', '1195300800', '1997641692', '1746274673', '1223178222']
commonweal = ['3299094722', '1342829361', '1946798710', '1894477791', '1958321657']
domain_dict = {'作家写手': writer, '专家学者': expert, '草根红人': grassroot, '宗教人士': religion, \
'维权律师': attorney, '公知分子': public_intellectual, '非公企业主': non_public_owner, \
'独立媒体人': independent_media, '官方媒体': public_media, '公职人员': civil_officer, \
'文体明星': star, '社会公益': commonweal}
| [
"[email protected]"
] | |
ad5ccba0f6cdf7b7f5ac9268a639d3d4f3c37905 | 22b64b8157f8a1daa55f4508ca0a28e356329d94 | /809.column_density.py | dc6c3b8c71b55fed067bd24764fbd01b93a2ad6d | [] | no_license | GiantMolecularCloud/NGC253-turbulent-gas-structure | 2edc96a8b6e6b407217f8d34459a1d6a3b155b8c | d2c47e62f412bfb3665bcde5754221e40214ff7b | refs/heads/main | 2023-03-02T13:47:53.191569 | 2021-02-05T16:32:15 | 2021-02-05T16:32:15 | 336,327,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,098 | py | ##############
# DENDROGRAM #
##############
###################################################################################################
# load data
###################################################################################################
execfile(join(scriptdir, '800.info.py'))
data = fnunpickle(join(datadir, 'data.pickle'))
dendrograms = load_dendrograms()
###################################################################################################
# get column density
###################################################################################################
# integrated flux
###################################################################################################
for co,CO in lines.items():
for gal,GAL in galaxies.items():
from astropy.table.column import Column
catalog = dendrograms[co][gal]['catalog']
dendrogram = dendrograms[co][gal]['dendrogram']
try:
del catalog['integrated Tb']
except:
pass
chanwidth = CO['resolution']['spectral']
idx_list = [i.idx for i in dendrogram.all_structures]
all_structs_ordered = [x for _,x in sorted(zip(idx_list,list(dendrogram.all_structures)))]
flux = []
for struct in tqdm(all_structs_ordered):
flux.append( struct.values().sum()*chanwidth.value )
# add column to catalog table
catalog.add_column( Column(name='integrated Tb', data=flux, dtype=np.float64, unit='K km/s') )
# structure area
###################################################################################################
for co,CO in lines.items():
for gal,GAL in galaxies.items():
from astropy.table.column import Column
catalog = dendrograms[co][gal]['catalog']
dendrogram = dendrograms[co][gal]['dendrogram']
Apix = ((CO['resolution']['spatial']/5.)**2)
npix = []
idx_list = [i.idx for i in dendrogram.all_structures]
all_structs_ordered = [x for _,x in sorted(zip(idx_list,list(dendrogram.all_structures)))]
for struct in tqdm(all_structs_ordered):
mask = struct.get_mask()
n = np.sum(mask, axis=0)
n[n>0] = 1.
n = np.sum(n)
npix.append(n)
area_projected = npix*Apix
catalog.add_column( Column(name='npix (projected)', data=npix, dtype=np.int64) )
catalog.add_column( Column(name='area (projected)', data=area_projected, dtype=np.float64, unit='pc^2') )
# effective pixel area
###################################################################################################
for co,CO in lines.items():
for gal,GAL in galaxies.items():
from astropy.table.column import Column
catalog = dendrograms[co][gal]['catalog']
dendrogram = dendrograms[co][gal]['dendrogram']
Apix = ((CO['resolution']['spatial']/5.)**2)
npix_exact = catalog['area (exact)']/Apix
npix_ellipse = catalog['area (ellipse)']/Apix
npix_effective = catalog['area (effective)']/Apix
catalog.add_column( Column(name='npix (exact)', data=npix_exact, dtype=np.float64) )
catalog.add_column( Column(name='npix (ellipse)', data=npix_ellipse, dtype=np.float64) )
catalog.add_column( Column(name='npix (effective)', data=npix_effective, dtype=np.float64) )
for i in ['projected','exact','ellipse','effective']:
print(np.percentile(dendrograms['CO(1-0)']['NGC253']['catalog']['npix ('+i+')'], (1,50,99)))
# colunm density
###################################################################################################
for co,CO in lines.items():
for gal,GAL in galaxies.items():
from astropy.table.column import Column
catalog = dendrograms[co][gal]['catalog']
dendrogram = dendrograms[co][gal]['dendrogram']
Xco = GAL['Xco'] *u.cm**-2 /(u.K*u.km/u.s)
catalog['integrated Tb']*Xco/CO['excitation correction']
# factor 1.36 due to Helium contribution
coldens_projected = catalog['integrated Tb']*1.36*Xco/CO['excitation correction'] /catalog['npix (projected)']
coldens_exact = catalog['integrated Tb']*1.36*Xco/CO['excitation correction'] /catalog['npix (exact)']
coldens_ellipse = catalog['integrated Tb']*1.36*Xco/CO['excitation correction'] /catalog['npix (ellipse)']
coldens_effective = catalog['integrated Tb']*1.36*Xco/CO['excitation correction'] /catalog['npix (effective)']
for col in ['projected','exact','ellipse','effective']:
try:
del catalog['column density ('+col+')']
except:
pass
catalog.add_column( Column(name='column density (projected)', data=coldens_projected, dtype=np.float64, unit='cm^-2') )
catalog.add_column( Column(name='column density (exact)', data=coldens_exact, dtype=np.float64, unit='cm^-2') )
catalog.add_column( Column(name='column density (ellipse)', data=coldens_ellipse, dtype=np.float64, unit='cm^-2') )
catalog.add_column( Column(name='column density (effective)', data=coldens_effective, dtype=np.float64, unit='cm^-2') )
for i in ['projected','exact','ellipse','effective']:
print(np.percentile(dendrograms['CO(1-0)']['NGC253']['catalog']['column density ('+i+')'], (1,50,99)))
for i in ['projected','exact','ellipse','effective']:
print(np.percentile(dendrograms['CO(3-2)']['NGC253']['catalog']['column density ('+i+')'], (1,50,99)))
# mass
###################################################################################################
for co,CO in lines.items():
for gal,GAL in galaxies.items():
from astropy.table.column import Column
catalog = dendrograms[co][gal]['catalog']
dendrogram = dendrograms[co][gal]['dendrogram']
Apix = ((CO['resolution']['spatial']/5.)**2).to(u.cm**2)
Xco = GAL['Xco'] *u.cm**-2 /(u.K*u.km/u.s)
# atomic weight of H2: 2; 1.36 to account for helium
mass = ((catalog['integrated Tb']*Xco/CO['excitation correction'] *Apix *1.36*2.0*u.u).to(u.Msun)).value
catalog.add_column( Column(name='mass', data=mass, dtype=np.float64, unit='Msun') )
# log mass
mass[(mass<1e0) & ~np.isfinite(mass)] = np.nan
log_mass = np.log10(mass)
catalog.add_column( Column(name='log mass', data=log_mass, dtype=np.float64, unit='log Msun') )
###################################################################################################
# save catalog
###################################################################################################
for co,CO in lines.items():
for gal,GAL in galaxies.items():
fnpickle(dendrograms[co][gal]['catalog'], join(compdir,gal+'.'+co+'.catalog.pickle'))
###################################################################################################
#
###################################################################################################
| [
"[email protected]"
] | |
b46d4b24129fca46f968e5c52580c6c91b8043f9 | 979ee8dcf0ca0c4c249809bbac86c7a781c2e98d | /tensorflow_datasets/image/mnist.py | 265e602b62665c4d4d327b3bdd0e63be1f6e7108 | [
"Apache-2.0"
] | permissive | MODU-FTNC/tensorflow-datasets | c315602bcb50830e05d6a4c8968d20e7f1d5a3f5 | 1565c40e3d8a9d6ecf186cb53117d4bd998b4437 | refs/heads/master | 2020-04-02T21:51:33.243567 | 2018-10-24T19:04:41 | 2018-10-24T19:05:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,017 | py | # coding=utf-8
# Copyright 2018 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MNIST and Fashion MNIST."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import random
import numpy as np
import six.moves.urllib as urllib
import tensorflow as tf
from tensorflow_datasets.core import dataset_builder
from tensorflow_datasets.core import file_format_adapter
from tensorflow_datasets.image import image_utils
# MNIST constants
_MNIST_URL = "http://yann.lecun.com/exdb/mnist/"
_MNIST_TRAIN_DATA_FILENAME = "train-images-idx3-ubyte.gz"
_MNIST_TRAIN_LABELS_FILENAME = "train-labels-idx1-ubyte.gz"
_MNIST_TEST_DATA_FILENAME = "t10k-images-idx3-ubyte.gz"
_MNIST_TEST_LABELS_FILENAME = "t10k-labels-idx1-ubyte.gz"
_MNIST_IMAGE_SIZE = 28
_TRAIN_EXAMPLES = 60000
_TEST_EXAMPLES = 10000
# URL for Fashion MNIST data.
_FASHION_MNIST_URL = ("http://fashion-mnist.s3-website.eu-central-1"
".amazonaws.com/")
class MNIST(dataset_builder.GeneratorBasedDatasetBuilder):
"""MNIST."""
URL = _MNIST_URL
def _dataset_split_generators(self, dl_manager):
# Download the full MNist Database
filenames = {
"train_data": _MNIST_TRAIN_DATA_FILENAME,
"train_labels": _MNIST_TRAIN_LABELS_FILENAME,
"test_data": _MNIST_TEST_DATA_FILENAME,
"test_labels": _MNIST_TEST_LABELS_FILENAME,
}
mnist_files = dl_manager.download_and_extract({
k: urllib.parse.urljoin(self.URL, v) for k, v in filenames.items()
})
# MNIST provides TRAIN and TEST splits, not a VALIDATION split, so we only
# write the TRAIN and TEST splits to disk.
train_gen = functools.partial(
_generate_mnist_examples,
nb_examples=_TRAIN_EXAMPLES,
data_path=mnist_files["train_data"],
label_path=mnist_files["train_labels"],
)
test_gen = functools.partial(
_generate_mnist_examples,
nb_examples=_TEST_EXAMPLES,
data_path=mnist_files["test_data"],
label_path=mnist_files["test_labels"],
)
train_splits = [
self._split_files(split=dataset_builder.Split.TRAIN, num_shards=10)
]
test_splits = [
self._split_files(split=dataset_builder.Split.TEST, num_shards=1)
]
return [
dataset_builder.SplitGenerator(generator_fn=train_gen,
split_files=train_splits),
dataset_builder.SplitGenerator(generator_fn=test_gen,
split_files=test_splits),
]
@property
def _file_format_adapter(self):
example_spec = {
"input/encoded": tf.FixedLenFeature(tuple(), tf.string),
"target": tf.FixedLenFeature(tuple(), tf.int64),
}
return file_format_adapter.TFRecordExampleAdapter(example_spec)
def _preprocess(self, record):
record["input"] = image_utils.decode_png(
record.pop("input/encoded"),
[_MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1])
return record
def _generate_mnist_examples(nb_examples, data_path, label_path):
"""Generate MNIST examples as dicts.
Args:
nb_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Returns:
Generator yielding:
Feature dictionaries `dict<str feature_name, feature_value>` containing:
* `image/encoded`: png-encoded image
* `image/shape`: image shape
* `image/format`: "png"
* `target`: label
"""
images = _extract_mnist_images(data_path, nb_examples)
labels = _extract_mnist_labels(label_path, nb_examples)
# Shuffle the data to make sure classes are well distributed.
data = list(zip(images, labels))
random.shuffle(data)
return image_utils.image_classification_generator(data)
class FashionMNIST(MNIST):
URL = _FASHION_MNIST_URL
def _extract_mnist_images(image_filepath, num_images):
with tf.gfile.Open(image_filepath, "rb") as f:
f.read(16) # header
buf = f.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images)
data = np.frombuffer(
buf, dtype=np.uint8).reshape(num_images, _MNIST_IMAGE_SIZE,
_MNIST_IMAGE_SIZE, 1)
return data
def _extract_mnist_labels(labels_filepath, num_labels):
with tf.gfile.Open(labels_filepath, "rb") as f:
f.read(8) # header
buf = f.read(num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
| [
"[email protected]"
] | |
ae8418d13bffc926264d2ff5a2082318766d25f5 | 57cf3fca43dd108f0d1626ab411177dc0604385c | /mozbuilddata/exporter.py | 4504541d5dab43bb4675af50992d752b6121348f | [] | no_license | indygreg/mozilla-build-analyzer | d5b8ec2a49a23b244fb214c52a60ce7a8f6bf659 | 108c81610e6b9f5cd8ebcf322d15158cea8a8c70 | refs/heads/master | 2021-01-11T08:31:07.125777 | 2013-08-21T22:07:14 | 2013-08-21T22:07:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,650 | py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import errno
import json
import os
class JSONExporter(object):
def __init__(self, conn, path):
self.c = conn
self.root = path
def export(self):
self._mkdir('.')
for f in ['totals', 'builder_counters']:
func = getattr(self, '_export_%s' % f)
for msg in func():
yield msg
def _mkdir(self, p):
p = os.path.join(self.root, p)
try:
os.makedirs(p)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _active_dates(self):
today = datetime.date.today()
for i in range(30, 0, -1):
yield today - datetime.timedelta(i)
def _write_obj(self, path, obj):
path = os.path.join(self.root, path)
with open(path, 'wb') as fh:
json.dump(obj, fh)
def _export_totals(self):
yield 'Writing totals.json'
o = {}
#o['file_counts'] = len(list(self.c.filenames()))
self._write_obj('totals.json', o)
def _export_builder_counters(self):
yield 'Writing builder counts files.'
self._mkdir('builder/job_counts/by-day')
self._mkdir('builder/job_durations/by-day')
counts = dict(self.c.builder_counts())
self._write_obj('builder/job_counts/all.json', counts)
for date in self._active_dates():
df = date.isoformat()
counts = dict(self.c.builder_counts_in_day(df))
if not counts:
continue
self._write_obj('builder/job_counts/by-day/%s.json' % df, counts)
yield 'Writing builder duration files.'
durations = dict(self.c.builder_durations())
self._write_obj('builder/job_durations/all.json', durations)
for date in self._active_dates():
df = date.isoformat()
durations = dict(self.c.builder_durations_in_day(df))
if not durations:
continue
self._write_obj('builder/job_durations/by-day/%s.json' % df,
durations)
yield 'Writing per-category builder files.'
for cat in sorted(self.c.builder_categories()):
p = 'builder/by-category/%s' % cat
self._mkdir(p)
counts = dict(self.c.builder_counts_in_category(cat))
total = sum(counts.values())
self._write_obj('%s/job-counts.json' % p, counts)
| [
"[email protected]"
] | |
9999cfa6cbfc157f54994c94631a64343430436d | c577f5380b4799b4db54722749cc33f9346eacc1 | /BugSwarm/scikit-learn-scikit-learn-405440735/buggy_files/sklearn/linear_model/tests/test_least_angle.py | 15df84177e66d95ee0d07414c7bbb4ac4a699fc9 | [
"BSD-3-Clause"
] | permissive | tdurieux/BugSwarm-dissection | 55db683fd95f071ff818f9ca5c7e79013744b27b | ee6b57cfef2119523a083e82d902a6024e0d995a | refs/heads/master | 2020-04-30T17:11:52.050337 | 2019-05-09T13:42:03 | 2019-05-09T13:42:03 | 176,972,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,892 | py | import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
import pytest
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
@pytest.mark.filterwarnings('ignore: `rcond` parameter will change')
# numpy deprecation
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
# Avoid FutureWarning about default value change when numpy >= 1.14
rcond = None if LooseVersion(np.__version__) >= '1.14' else -1
coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
@pytest.mark.filterwarnings('ignore:`rcond` parameter will change')
# numpy deprecation
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
rng = np.random.RandomState(0)
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = rng.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
@pytest.mark.parametrize(
'classifier',
[linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC])
def test_lars_precompute(classifier):
# Check for different values of precompute
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
clf = classifier(precompute=G)
output_1 = ignore_warnings(clf.fit)(X, y).coef_
for precompute in [True, False, 'auto', None]:
clf = classifier(precompute=precompute)
output_2 = clf.fit(X, y).coef_
assert_array_almost_equal(output_1, output_2, decimal=8)
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
# same test, with normalization
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
estimators = [
linear_model.LassoLars(),
linear_model.Lars(),
# regression test for gh-1615
linear_model.LassoLars(fit_intercept=False),
linear_model.Lars(fit_intercept=False),
]
for estimator in estimators:
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
assert_false(hasattr(lars_cv, 'n_nonzero_coefs'))
def test_lars_cv_max_iter():
with warnings.catch_warnings(record=True) as w:
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
x = rng.randn(len(y))
X = np.c_[X, x, x] # add correlated features
lars_cv = linear_model.LassoLarsCV(max_iter=5)
lars_cv.fit(X, y)
assert_true(len(w) == 0)
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
# Once deprecation of LAR + positive option is done use these:
# assert_raises(ValueError, linear_model.lars_path, diabetes['data'],
# diabetes['target'], method='lar', positive=True)
with pytest.warns(DeprecationWarning, match="broken"):
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method='lar',
positive=True)
method = 'lasso'
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'LassoLars': {'alpha': 0.1},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_R_implementation():
# Test that sklearn LassoLars implementation agrees with the LassoLars
# implementation available in R (lars library) under the following
# scenarios:
# 1) fit_intercept=False and normalize=False
# 2) fit_intercept=True and normalize=True
# Let's generate the data used in the bug report 7778
y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822,
-19.42109366])
x = np.array([[0.47299829, 0, 0, 0, 0],
[0.08239882, 0.85784863, 0, 0, 0],
[0.30114139, -0.07501577, 0.80895216, 0, 0],
[-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0],
[-0.69363927, 0.06754067, 0.18064514, -0.0803561,
0.40427291]])
X = x.T
###########################################################################
# Scenario 1: Let's compare R vs sklearn when fit_intercept=False and
# normalize=False
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE,
# trace=TRUE, normalize=FALSE)
# r = t(model_lasso_lars$beta)
#
r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829,
-83.777653739190711, -83.784156932888934,
-84.033390591756657],
[0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0,
0.025219751009936],
[0, -3.577397088285891, -4.702795355871871,
-7.016748621359461, -7.614898471899412, -0.336938391359179,
0, 0, 0.001213370600853, 0.048162321585148],
[0, 0, 0, 2.231558436628169, 2.723267514525966,
2.811549786389614, 2.813766976061531, 2.817462468949557,
2.817368178703816, 2.816221090636795],
[0, 0, -1.218422599914637, -3.457726183014808,
-4.021304522060710, -45.827461592423745,
-47.776608869312305,
-47.911561610746404, -47.914845922736234,
-48.039562334265717]])
model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False,
normalize=False)
model_lasso_lars.fit(X, y)
skl_betas = model_lasso_lars.coef_path_
assert_array_almost_equal(r, skl_betas, decimal=12)
###########################################################################
###########################################################################
# Scenario 2: Let's compare R vs sklearn when fit_intercept=True and
# normalize=True
#
# Note: When normalize is equal to True, R returns the coefficients in
# their original units, that is, they are rescaled back, whereas sklearn
# does not do that, therefore, we need to do this step before comparing
# their results.
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars2 = lars(X, t(y), type="lasso", intercept=TRUE,
# trace=TRUE, normalize=TRUE)
# r2 = t(model_lasso_lars2$beta)
r2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 8.371887668009453, 19.463768371044026],
[0, 0, 0, 0, 9.901611055290553],
[0, 7.495923132833733, 9.245133544334507,
17.389369207545062, 26.971656815643499],
[0, 0, -1.569380717440311, -5.924804108067312,
-7.996385265061972]])
model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True,
normalize=True)
model_lasso_lars2.fit(X, y)
skl_betas2 = model_lasso_lars2.coef_path_
# Let's rescale back the coefficients returned by sklearn before comparing
# against the R result (read the note above)
temp = X - np.mean(X, axis=0)
normx = np.sqrt(np.sum(temp ** 2, axis=0))
skl_betas2 /= normx[:, np.newaxis]
assert_array_almost_equal(r2, skl_betas2, decimal=12)
###########################################################################
| [
"[email protected]"
] | |
286ae249433950b017b9abd83b42271e3242490c | 7029073a12cf93e066e2b2e51134160d8c5c2b6d | /tango_with_django_project/rango/migrations/0002_auto_20190606_1734.py | c924bc8c3da1075575b89d9c75c256e34b2979ca | [] | no_license | eflipe/tango-with-django-and-polls | c85898808e2103a8b95aa290caceb5a8ce5b3bf9 | cc91f7cf5f219ec945035e98628363ad4c56b023 | refs/heads/master | 2020-06-04T02:16:34.155465 | 2019-06-26T14:45:20 | 2019-06-26T14:45:20 | 191,830,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | # Generated by Django 2.1.5 on 2019-06-06 20:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories'},
),
migrations.AddField(
model_name='category',
name='likes',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='category',
name='views',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
b6a3316fb8d4683775360c8b0ba25f6e49ae3e68 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03852/s966510800.py | 2b804b35488391e38ae1b2a96d024bb448a9d5da | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | import sys
input = lambda: sys.stdin.readline().rstrip()
def main():
c = input()
str = ['a', 'i', 'u', 'e', 'o']
if c in str:
print('vowel')
else:
print('consonant')
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
9cf28886a88e6788f61893d95e128594c5d472c1 | 37cde98734ebe6cc99a390c8ae2f049346ffbe88 | /sort_for_forecast.py | ecec9b034a219c5940ec96d52cd2d383d1c90ced | [] | no_license | Dimon0014/Zapis_chtenie_03_12 | 4d36efaf2667263e163cec1dd5d64c9c8f3f0422 | d32e9eeb968ff9f6c9339a8e33219f166f757687 | refs/heads/master | 2021-09-09T16:40:39.751513 | 2018-03-18T05:21:33 | 2018-03-18T05:21:33 | 113,576,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | from operator import itemgetter
from collections import OrderedDict
d = {(37,36):[ 1,[1, 2], 33],(37,35):[ 11,[101, 102], 31]}
x = {1: 2, 3: 4, 4: 3, 2: 1, 0: 0}
sorted_x = (sorted(d.items(), key=lambda t: t[1][2])) # работает по последней цифре из списка
# OrderedDict([(0, 0), (2, 1), (1, 2), (4, 3), (3, 4)])
print(sorted_x)
sorted_x = (sorted(d.items(),reverse=True, key=lambda t: t[1][2]))# работает по последней цифре из списка, а потом выдает с конца
print(sorted_x)
#print(sorted_x[0][0]) # вытаскиваем первый ключ
#print(sorted_x[1][0]) # вытаскиваем второй ключ | [
"[email protected]"
] | |
20bb6ca5b8cb7ce87db17974d93530e6b68b9bf4 | 7c2dcfefcf9eec3f12095b304541048f4e23cb6f | /butn-eg1.py | 123a91a0706998fec8708fb2180707c671586d07 | [] | no_license | mamaker/eupy | b042e6f43601235afc290c25c62e58f8622643ee | 8700ae7f22cda3be7170cf153205e8b41a943d4b | refs/heads/master | 2020-05-23T11:02:50.835477 | 2019-05-15T01:57:56 | 2019-05-15T01:57:56 | 186,731,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
butn-eg1.py
Created on Tue May 14 14:12:58 2019
@author: madhu
"""
import tkinter as tk
def write_slogan():
print("Tkinter is easy to use!")
root = tk.Tk()
frame = tk.Frame(root)
frame.pack()
button = tk.Button(frame,
text="QUIT",
fg="red",
font="Verdana 26 bold",
command=root.destroy)
button.pack(side=tk.LEFT)
slogan = tk.Button(frame,
text="Hello",
font="Verdana 26 bold",
command=write_slogan)
slogan.pack(side=tk.LEFT)
root.mainloop()
| [
"[email protected]"
] | |
a88577bd3025afd548ec507773aec881739a8541 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02402/s285849568.py | 4fafd999b2f74b08bae833e0e846df311a76b6ef | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | #coding:utf-8
n = input()
a = []
a_input = raw_input().split()
for i in a_input:
a.append(int(i))
print min(a), max(a), sum(a) | [
"[email protected]"
] | |
6a72b8899c4d9357de5c92967a680fdad3f299a6 | 434b6556038ad326ffaa8584a8a91edf8ad5c037 | /GenericTrees-1/NumNodes.py | fdc36e9ff4df334a4381f17d45707aef4923454a | [] | no_license | Pranav016/DS-Algo-in-Python | 60702460ad6639dd3e8a1fdc3caf0821b8e0b4c2 | 5557e371ccdf801d78ba123ca83c0dd47b3bdb3b | refs/heads/master | 2023-01-23T08:29:32.186861 | 2020-11-01T17:14:12 | 2020-11-01T17:14:12 | 284,651,382 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,255 | py | class TreeNode:
def __init__(self,data):
self.data=data
self.children=list()
def printTree(root):
if root is None:
return
print(root.data)
for child in root.children:
printTree(child)
def printTreeDetailed(root):
if root is None:
return
print(root.data,end=":")
for child in root.children:
print(child.data,end=",")
print()
for child in root.children:
printTreeDetailed(child)
def treeInput():
print("Enter root data: ")
rootData=int(input())
if rootData == -1:
return None
root=TreeNode(rootData)
print("Enter the no of children of ", rootData," :")
numChild=int(input())
for i in range(numChild):
child=treeInput()
root.children.append(child)
i+=1
return root
# Method 1
def numNodes1(root):
if root is None:
return 0
count=1
for child in root.children:
count=count+numNodes1(child)
return count
# Method 2
count=0
def numNodes2(root):
global count
if root is None:
return 0
count+=1
for child in root.children:
numNodes2(child)
return count
# main
root=treeInput()
# printTreeDetailed(root)
print(numNodes2(root)) | [
"[email protected]"
] | |
878eb2b6b84778f9c3205b98ff27172ba0646c63 | f539db814fce098f71192e6b4922be53687d140e | /azure-cognitiveservices-vision-contentmoderator/azure/cognitiveservices/vision/contentmoderator/models/phone_properties.py | 3899065bcfe7573352ca53101bcb53f4b00b8028 | [
"MIT"
] | permissive | marki555/azure-sdk-for-python | 1eb7abe1de5f13db7bd6654f2eb517544e4950dc | adc3a3bf4ddb06ab8207bbabf8910577e96d2512 | refs/heads/master | 2021-08-28T08:32:50.974402 | 2017-12-11T17:36:06 | 2017-12-11T17:36:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,202 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PhoneProperties(Model):
"""Phone Property details.
:param country_code: CountryCode of the detected Phone number.
:type country_code: str
:param text: Detected Phone number.
:type text: str
:param index: Index(Location) of the Phone number in the input text
content.
:type index: float
"""
_attribute_map = {
'country_code': {'key': 'countryCode', 'type': 'str'},
'text': {'key': 'text', 'type': 'str'},
'index': {'key': 'index', 'type': 'float'},
}
def __init__(self, country_code=None, text=None, index=None):
self.country_code = country_code
self.text = text
self.index = index
| [
"[email protected]"
] | |
409819f0c59ef097758fd6cba4b76ab3e963f738 | 4a240d06679e464f885228b3a08c91644b4af65d | /0118. Pascal's Triangle.py | 21ea6a88fad0487d4d9331b9b87abc494ee31cd7 | [] | no_license | CaizhiXu/LeetCode-Solutions-Python-Weimin | f46a75797c32eb3ff8c884eea2c6e31e18a47c1e | 48d63c98beac4260b2b2b4dff26139d19752d125 | refs/heads/master | 2022-02-24T06:22:36.085101 | 2019-09-18T04:03:19 | 2019-09-18T04:03:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | # dynamic programming, space O(n), time O(n^2)
# bottom up
# https://leetcode.com/problems/pascals-triangle/
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
if numRows < 1:
return []
elif numRows == 1:
return [[1]]
res = [[1]]
for i in range(1, numRows):
vals = [1]
for j in range(i-1): # not range(i)
vals.append(res[-1][j] + res[-1][j+1])
vals.append(1)
res.append(vals)
return res
| [
"[email protected]"
] | |
ed322316c06fedeff412830f9458b976b3b6a53f | c828f5c86e8ae7a157bd3f212c4bd754ee04e5e7 | /exercise_coding_test_1.py | e4f3de6db1535257feb85c9bf02006381dbf19d7 | [] | no_license | SeungHune/beakjun | c319e33f10a3dfd3acb090a7872b900ed92c5419 | 5d4610557aa56efc41053954299924ab890812f2 | refs/heads/master | 2020-12-03T04:37:48.054739 | 2020-11-10T10:50:17 | 2020-11-10T10:50:17 | 231,204,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | # 해시 - 완주하지 못한 선수
def solution(participant, completion):
participant.sort()
# print(participant)
completion.sort()
# print(completion)
for i in range(len(completion)):
if participant[i] != completion[i]:
return participant[i]
return participant[-1]
print(solution(["leo", "kiki", "eden"], ["eden", "kiki"]))
print(solution(["marina", "josipa", "nikola", "vinko", "filipa"],
["josipa", "filipa", "marina", "nikola"]))
print(solution(["mislav", "stanko", "mislav", "ana"],["stanko", "ana", "mislav"])) | [
"[email protected]"
] | |
9fe0d8ccd1b7e3c64db5f15e1b6b6322e84cb7b0 | f54d702c1289b2b78f423850d7fedba6c9378126 | /Python/Sets/set-discard-remove-pop.py | c14b54b2998a1f2696af860af61e098f00d6a952 | [
"MIT"
] | permissive | ekant1999/HackerRank | 81e6ac5bec8307bca2bd1debb169f2acdf239b66 | 084d4550b4eaf130837ab26a4efdbcaf8b667cdc | refs/heads/master | 2020-05-02T09:19:10.102144 | 2016-10-27T04:10:28 | 2016-10-27T04:10:28 | 177,868,424 | 0 | 0 | MIT | 2019-03-26T21:04:17 | 2019-03-26T21:04:17 | null | UTF-8 | Python | false | false | 378 | py | n = input()
s = set(map(int, raw_input().split()))
c = int(raw_input())
for i in range(c):
command = raw_input().split()
if command[0] == 'pop':
s.pop()
elif command[0] == 'remove':
s.remove(int(command[1]))
elif command[0] == 'discard':
s.discard(int(command[1]))
total = 0
for x in s:
total += x
print total | [
"[email protected]"
] | |
275d4cf639767b26b5534e0e960c0fbfebb14e65 | 1e50d8c2217ffd5dca17bf0da539ff98e22be990 | /tests/test_utils.py | a3597ccb51723e9b482e201120174823b64037e1 | [
"CC-BY-4.0",
"MIT"
] | permissive | matslindh/dtedifier | 2c90ae69ed6e20559839d4ac58c8b7a8a099480c | daad614f4d3ee4e0b88e832f5b07128eba2f0602 | refs/heads/master | 2022-09-28T18:15:29.056421 | 2020-05-29T13:40:21 | 2020-05-29T13:40:21 | 267,865,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | from dtedifier.utils import (
degrees_string_to_float,
na_or_int,
empty_or_int,
convert_signed_16bfixed,
)
from pytest import (
approx
)
def test_degrees_string_to_float():
assert degrees_string_to_float('1001010E') == approx(100.16944444)
assert degrees_string_to_float('0505959E') == approx(50.99972222)
assert degrees_string_to_float('1001010W') == approx(-100.16944444)
assert degrees_string_to_float('0505959W') == approx(-50.99972222)
assert degrees_string_to_float('0505959N') == approx(50.99972222)
assert degrees_string_to_float('0505959S') == approx(-50.99972222)
assert degrees_string_to_float('424242.42') == approx(42.71178333)
def test_na_or_int():
assert na_or_int('NA ') is None
assert na_or_int('341') == 341
assert na_or_int('341 ') == 341
def test_empty_or_int():
assert empty_or_int(' ') is None
assert empty_or_int('1231 ') == 1231
def test_convert_signed_16bfixed():
assert convert_signed_16bfixed(-1) is None
assert convert_signed_16bfixed(-2) == -32766
assert convert_signed_16bfixed(2) == 2
assert convert_signed_16bfixed(32767) == 32767
| [
"[email protected]"
] | |
32374aebf85565af27a3add23cec07466aaa07aa | e121dcc5d23e225891420e730549b9cc7ebe8e88 | /python/lib/direct/extensions_native/VBase3.py | f54e3e8a89de985505c6438f24eb3c6957a91610 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | PlumpMath/panda3d-3 | 4f4cf7627eddae9b7f30795e0a0657b01fdf670d | 5c0be0e1cd46b422d28d5b81ffb1e8b28c3ac914 | refs/heads/master | 2021-01-25T06:55:36.209044 | 2014-09-29T14:24:53 | 2014-09-29T14:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | """
Methods to extend functionality of the VBase3 class
"""
from panda3d.direct.extensions_native.Helpers import *
Dtool_PreloadDLL("panda")
from panda import *
def pPrintValues(self):
"""
Pretty print
"""
return "% 10.4f, % 10.4f, % 10.4f" % (self[0], self[1], self[2])
Dtool_funcToMethod(pPrintValues, VBase3)
del pPrintValues
def asTuple(self):
"""
Returns the vector as a tuple.
"""
print "Warning: VBase3.asTuple() is no longer needed and deprecated. Use the vector directly instead."
return tuple(self)
Dtool_funcToMethod(asTuple, VBase3)
del asTuple
| [
"[email protected]"
] | |
f0b7b2b37d848ed15bfc888c0eba84a61cbd2f51 | 5e80f0b1af9fbf9dc774dbb68aa603574e4ae0ba | /algorithm-study/leetcode/climbing-stairs.py | ca3dca47f2b76b821331f45202cfeefc9033a5d6 | [] | no_license | namujinju/study-note | 4271b4248b3c4ac1b96ef1da484d86569a030762 | 790b21e5318a326e434dc836f5f678a608037a8c | refs/heads/master | 2023-02-04T13:25:55.418896 | 2020-12-26T10:47:11 | 2020-12-26T10:47:11 | 275,279,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | def climbStairs(n):
if n <= 2:
return n
arr = [1, 2]
for i in range(n-2):
arr.append(arr[-1]+arr[-2])
return arr[n-1]
n = 4
print(climbStairs(n))
| [
"[email protected]"
] | |
2bbf4d931135df1477a8e41fcae63595118ead1e | 8076124f4087781e0513dbe09c0f43dc6a861ab0 | /src/sentry/mediators/external_requests/util.py | f3775d3ac28f7b2ab5638ec645a128b03c78f206 | [
"BSD-2-Clause"
] | permissive | sharmapacific/sentry | 75e3356f87cb5a1e812e0974b081fd47852dfe33 | fceabe7cb84de587fe05b2c36edc013058e7e55a | refs/heads/master | 2020-08-19T00:13:48.748983 | 2019-10-17T17:09:06 | 2019-10-17T17:09:06 | 215,851,537 | 1 | 0 | BSD-3-Clause | 2019-10-17T17:43:49 | 2019-10-17T17:43:49 | null | UTF-8 | Python | false | false | 953 | py | from __future__ import absolute_import
from jsonschema import Draft4Validator
SELECT_OPTIONS_SCHEMA = {
"type": "array",
"definitions": {
"select-option": {
"type": "object",
"properties": {"label": {"type": "string"}, "value": {"type": "string"}},
"required": ["label", "value"],
}
},
"properties": {"type": "array", "items": {"$ref": "#definitions/select-option"}},
}
ISSUE_LINKER_SCHEMA = {
"type": "object",
"properties": {
"webUrl": {"type": "string"},
"identifier": {"type": "string"},
"project": {"type": "string"},
},
"required": ["webUrl", "identifier", "project"],
}
SCHEMA_LIST = {"select": SELECT_OPTIONS_SCHEMA, "issue_link": ISSUE_LINKER_SCHEMA}
def validate(instance, schema_type):
schema = SCHEMA_LIST[schema_type]
v = Draft4Validator(schema)
if not v.is_valid(instance):
return False
return True
| [
"[email protected]"
] | |
f20e2a525e425c4f6bb3fa847e3b214c7e761cdc | cceda0ed268253be60c549ee957804d367bc3ca3 | /from_cpython/Lib/test/test_dictcomps.py | 60b2dc0cb3a8e085690d52cff40c7cb34ac48b7d | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | mcanthony/pyston | 3bc6a45e5c118fb6860427c9b0cc885dec8f5b6e | eed1d41307b578ff8d873b92b8b4db24775d5daf | refs/heads/master | 2020-12-29T00:55:11.099535 | 2015-10-23T22:28:07 | 2015-10-23T22:28:07 | 44,902,270 | 2 | 0 | null | 2015-10-25T08:34:09 | 2015-10-25T08:34:08 | null | UTF-8 | Python | false | false | 3,866 | py | # expected: fail
import unittest
from test import test_support as support
# For scope testing.
g = "Global variable"
class DictComprehensionTest(unittest.TestCase):
def test_basics(self):
expected = {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17,
8: 18, 9: 19}
actual = {k: k + 10 for k in range(10)}
self.assertEqual(actual, expected)
expected = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
actual = {k: v for k in range(10) for v in range(10) if k == v}
self.assertEqual(actual, expected)
def test_scope_isolation(self):
k = "Local Variable"
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {k: None for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(k, "Local Variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {k: v for v in range(10) for k in range(v * 9, v * 10)}
self.assertEqual(k, "Local Variable")
self.assertEqual(actual, expected)
def test_scope_isolation_from_global(self):
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {g: None for g in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(g, "Global variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {g: v for v in range(10) for g in range(v * 9, v * 10)}
self.assertEqual(g, "Global variable")
self.assertEqual(actual, expected)
def test_global_visibility(self):
expected = {0: 'Global variable', 1: 'Global variable',
2: 'Global variable', 3: 'Global variable',
4: 'Global variable', 5: 'Global variable',
6: 'Global variable', 7: 'Global variable',
8: 'Global variable', 9: 'Global variable'}
actual = {k: g for k in range(10)}
self.assertEqual(actual, expected)
def test_local_visibility(self):
v = "Local variable"
expected = {0: 'Local variable', 1: 'Local variable',
2: 'Local variable', 3: 'Local variable',
4: 'Local variable', 5: 'Local variable',
6: 'Local variable', 7: 'Local variable',
8: 'Local variable', 9: 'Local variable'}
actual = {k: v for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(v, "Local variable")
def test_illegal_assignment(self):
with self.assertRaisesRegexp(SyntaxError, "can't assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} = 5", "<test>",
"exec")
with self.assertRaisesRegexp(SyntaxError, "can't assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} += 5", "<test>",
"exec")
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| [
"[email protected]"
] | |
8c8f9ea9bae907e335aa983e57ae69c1969f4aca | 177d3f04c566e9de05a2fd651a4d91d24dfdb4d2 | /exifcleaner.py | c6bd3078cdd1dbab5c09cd5d4f3ecd6c4b9344c5 | [] | no_license | luisgf/exifcleaner | 5c7ebe941f6930f2173b6eb9f5a9ba73d91cf74f | de1954e7514929acba8cf02096cd06023adf770d | refs/heads/master | 2021-01-10T19:43:31.690766 | 2015-12-28T13:21:12 | 2015-12-28T13:21:12 | 35,278,798 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,245 | py | #!/usr/bin/env python3
"""
EXIF Cleaner. A tool to clean EXIF metadata from files
Luis González Fernández (c) 2015
luisgf at luisgf . es
"""
import os
import sys
from subprocess import check_call, CalledProcessError, DEVNULL
class ExifCleaner():
def __init__(self, folder_list=None, verbose=False):
self.folders = folder_list
self.verbose = verbose
self.errors = []
def clean_exif(self, path):
""" Clean EXIF metadata using exiv2 """
try:
args = ['exiv2', 'rm', path]
check_call(args, shell=False, stdout=DEVNULL, stderr=DEVNULL)
if self.verbose:
print('File %s cleaned' % path)
except FileNotFoundError:
print('exiv2 not found. Please install it!')
sys.exit(-1)
except CalledProcessError as e:
if self.verbose:
print('Error cleaning EXIF in %s' % path)
if path not in self.errors:
self.errors.append(path)
def check_exif_presence(self, path):
""" Check the EXIF metadata presence in a given file """
rc = False
try:
args = ['exiv2', 'pr', path]
check_call(args, shell=False, stdout=DEVNULL, stderr=DEVNULL)
rc = True # File has exif, rc=0 running exiv2
except CallProgramError as e:
if e.returncode is 253:
pass # File hasn't exif
else:
raise
finally:
return rc
def Start(self):
wiped = 0 # Num of wiped files
for folder in self.folders:
if self.verbose:
print('Cleaning: %s' % folder)
for path in os.listdir(folder):
file_path = os.path.join(folder, path)
if self.check_exif_presence(file_path):
self.clean_exif(file_path)
wiped += 1
print('EXIF data cleaned in %d Files. Errors %d' % (wiped,len(self.errors)))
def has_errors(self):
""" Return True if some file has errors """
return True if len(self.errors) > 0 else False
def show_errors(self):
""" Show the errors after execution """
if self.errors:
print('Clean error in:')
for file in self.errors:
print(' %s' % file)
def set_verbose(self, value):
self.verbose = bool(value)
def set_folders(self, folders):
""" Set the folder list to check """
self.folders = folders
if __name__ == '__main__':
params = [param for param in sys.argv]
params.pop(0)
exif = ExifCleaner()
if '-v' in params:
exif.set_verbose(True)
params.pop(params.index('-v'))
if len(params) is 0:
print('Please, pass a list of folders to check as parameter')
print('Example: %s /folder1 [/folder2 /folder3' % sys.argv[0])
sys.exit(-1)
else:
exif.set_folders(params)
exif.Start()
if exif.has_errors():
exit.show_errors()
| [
"[email protected]"
] | |
1d1b804b1e7e0fe0ee563309b987e0186f53edc9 | d305e9667f18127e4a1d4d65e5370cf60df30102 | /mindspore/ops/_op_impl/tbe/layer_norm.py | 2414b9bcb3d9ac3edd5a9d50cc364f6a9c0e3a43 | [
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | imyzx2017/mindspore_pcl | d8e5bd1f80458538d07ef0a8fc447b552bd87420 | f548c9dae106879d1a83377dd06b10d96427fd2d | refs/heads/master | 2023-01-13T22:28:42.064535 | 2020-11-18T11:15:41 | 2020-11-18T11:15:41 | 313,906,414 | 6 | 1 | Apache-2.0 | 2020-11-18T11:25:08 | 2020-11-18T10:57:26 | null | UTF-8 | Python | false | false | 1,921 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""LayerNorm op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
layer_norm_op_info = TBERegOp("LayerNorm") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("layer_norm.so") \
.compute_cost(10) \
.kernel_name("layer_norm") \
.partial_flag(True) \
.attr("begin_norm_axis", "required", "int", "all") \
.attr("begin_params_axis", "required", "int", "all") \
.attr("epsilon", "optional", "float", "all") \
.input(0, "x", False, "required", "all") \
.input(1, "gamma", False, "required", "all") \
.input(2, "beta", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.output(1, "mean", False, "required", "all") \
.output(2, "variance", False, "required", "all") \
.op_pattern("dynamicFormat") \
.dtype_format(DataType.F16_None, DataType.F16_None, DataType.F16_None, DataType.F16_None,
DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None, DataType.F32_None, DataType.F32_None,
DataType.F32_None, DataType.F32_None) \
.get_op_info()
@op_info_register(layer_norm_op_info)
def _layer_norm_tbe():
"""LayerNorm TBE register"""
return
| [
"[email protected]"
] | |
1d3d42f917cfbf6d11fe41892de5d8a866f19c15 | dad45eb1fb0505a7b515c68eda08f004ca0d6c0e | /algorithms/ellipticCurves.py | e654bc34b8fecf2d1f8416ad8ddb5592f79a7c73 | [
"MIT"
] | permissive | jaanos/kirv | 8132785436a834460725d46ee9089c685f1cedda | 8ea0a106a1eee1f22d46c6613f09f533678d2ed1 | refs/heads/master | 2023-01-06T05:26:19.075970 | 2022-12-27T17:50:54 | 2022-12-27T17:50:54 | 112,191,005 | 7 | 3 | MIT | 2020-10-21T17:59:26 | 2017-11-27T12:01:35 | Jupyter Notebook | UTF-8 | Python | false | false | 1,819 | py | from .euclidean import inverse
from .util import xxrange
def points(params):
"""
Find all points on an elliptic curve y^2 = x^3 + ax + b
over a field with p elements with characteristic greater than 3.
"""
a, b, p = params
sqrt = {x: [] for x in xxrange(p)}
sqrt[0].append(0)
for x in xxrange(1, (p+1)//2):
sqrt[x*x % p].append(x)
sqrt[x*x % p].append(p-x)
return [()] + sum([[(x, y) for y in sqrt[(x**3 + a*x + b) % p]]
for x in xxrange(p)], [])
def pointSum(P, Q, params):
"""
Compute the sum of the points P and Q
on an elliptic curve y^2 = x^3 + ax + b
over a field with p elements with characteristic greater than 3.
"""
if P == ():
return Q
elif Q == ():
return P
a, b, p = params
Px, Py = P
Qx, Qy = Q
if Px == Qx:
if Py == Qy:
lm = (3*Px*Px + a) * inverse(2*Py, p) % p
else:
return ()
else:
lm = (Qy-Py) * inverse(Qx-Px, p) % p
x = (lm*lm - Px - Qx) % p
y = (lm*(Px - x) - Py) % p
return (x, y)
def pointMultiply(k, P, params, trace = False):
"""
Compute the multiple of the point P by the scalar k
on an elliptic curve y^2 = x^3 + ax + b
over a field with p elements with characteristic greater than 3.
"""
a, b, p = params
if k == 0:
return ()
elif k < 0:
k = -k
x, y = P
P = (x, p-y)
Q = ()
if trace:
r, s = 0, 1
while k > 0:
if k % 2 == 1:
Q = pointSum(P, Q, (a, b, p))
if trace:
r += s
print("%dP = %s" % (r, Q))
P = pointSum(P, P, (a, b, p))
k //= 2
if trace:
s *= 2
print("%dP = %s" % (s, P))
return Q
| [
"[email protected]"
] | |
d0ee3b1ff7c46b38cc69612bc693a3c46b9c79fa | dff65cb5c1c68a452830650e2df0efb9351b715a | /2-MAY-2016_Assignment/python/removeat.py | 3491e9ac84b0ba1c847c06fdfb6194964876b298 | [] | no_license | vishnusak/DojoAssignments | 194f2ff5f200d431110d89a81be1341d2f565055 | fb6b5384016e58490fbe6999a4651743fcde8692 | refs/heads/master | 2020-04-06T09:20:44.291863 | 2016-10-26T13:30:05 | 2016-10-26T13:30:05 | 55,562,579 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | # Remove At
# Given array and an index into array, remove and return the array value at that index. Do this without using built-in array methods except pop() . Think of PopFront(arr) as equivalent to RemoveAt(arr,0) .
# steps:
# 1 - assign value of required index to the return variable
# 2 - move all elements from that index to last 1 spot to the left
# 3 - remove the last element of the array
# can be done using the built-in function remove(element)
def removeAt(arr, idx):
val = arr[idx]
for i in range(idx, (len(arr) - 1)):
arr[i] = arr[i+1]
arr.remove(arr[-1])
return val
my_array = [1,2,3,4,5,6,7,8,9]
index = 4
print("\nThe existing array is {}").format(my_array)
print("The length of existing array is {}").format(len(my_array))
print("The index at which value should be removed is {}\n").format(index)
removed_val = removeAt(my_array, index)
print("The removed value is {}").format(removed_val)
print("The array after removing the value is {}").format(my_array)
print("The length of array now is {}").format(len(my_array))
| [
"[email protected]"
] | |
2faaf13fffa6514aabd159c763c83841c52b60c8 | 0e9b2916470bc355f778c9d9c066eeb142a937f8 | /struct2tensor/expression_impl/parquet.py | ff663241d42857c5e0ec3a1b91bacf08942150c4 | [
"Apache-2.0"
] | permissive | google/struct2tensor | 9805515f9b4e9da458dec67f435423e5c26064f7 | 86d8676ac295697853be8a194460e4d71de3990f | refs/heads/master | 2023-09-02T01:53:48.350908 | 2023-08-10T18:46:22 | 2023-08-10T18:46:56 | 196,274,653 | 36 | 41 | Apache-2.0 | 2023-08-10T18:50:01 | 2019-07-10T21:02:42 | Python | UTF-8 | Python | false | false | 19,120 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Apache Parquet Dataset.
Example usage:
```
exp = create_expression_from_parquet_file(filenames)
docid_project_exp = project.project(exp, [path.Path(["DocId"])])
pqds = parquet_dataset.calculate_parquet_values([docid_project_exp], exp,
filenames, batch_size)
for prensors in pqds:
doc_id_prensor = prensors[0]
```
"""
import collections
from typing import Any, Dict, List, Optional, Tuple, Union
import pyarrow as pa
import pyarrow.parquet as pq
from struct2tensor import calculate
from struct2tensor import calculate_options
from struct2tensor import expression
from struct2tensor import path
from struct2tensor import prensor
from struct2tensor.expression_impl import map_prensor_to_prensor as mpp
from struct2tensor.expression_impl import placeholder
from struct2tensor.ops import gen_parquet_dataset
import tensorflow as tf
def create_expression_from_parquet_file(
filenames: List[str]) -> placeholder._PlaceholderRootExpression: # pylint: disable=protected-access
"""Creates a placeholder expression from a parquet file.
Args:
filenames: A list of parquet files.
Returns:
A PlaceholderRootExpression that should be used as the root of an expression
graph.
"""
metadata = pq.ParquetFile(filenames[0]).metadata
parquet_schema = metadata.schema
arrow_schema = parquet_schema.to_arrow_schema()
root_schema = mpp.create_schema(
is_repeated=True,
children=_create_children_from_arrow_fields(
[arrow_schema.field_by_name(name) for name in arrow_schema.names]))
# pylint: disable=protected-access
return placeholder._PlaceholderRootExpression(root_schema)
def calculate_parquet_values(
expressions: List[expression.Expression],
root_exp: placeholder._PlaceholderRootExpression, # pylint: disable=protected-access
filenames: List[str],
batch_size: int,
options: Optional[calculate_options.Options] = None):
"""Calculates expressions and returns a parquet dataset.
Args:
expressions: A list of expressions to calculate.
root_exp: The root placeholder expression to use as the feed dict.
filenames: A list of parquet files.
batch_size: The number of messages to batch.
options: calculate options.
Returns:
A parquet dataset.
"""
pqds = _ParquetDatasetWithExpression(expressions, root_exp, filenames,
batch_size, options)
return pqds.map(pqds._calculate_prensor) # pylint: disable=protected-access
class _RawParquetDataset(tf.compat.v1.data.Dataset):
"""A dataset which reads columns from parquet and outputs a vector of tensors.
A ParquetDataset is a Dataset of batches of messages (records).
Every leaf field field of the messages in each batch has its own values tensor
and parent indices tensors (which encodes the structural information).
The user has control over which parent indices of which fields in a path to
read, and is determined by parent_index_paths and path_index.
View //struct2tensor/ops/parquet_dataset_op.cc
for a better understanding of what format the vector of tensors is in.
"""
def __init__(self, filenames: List[str], value_paths: List[str],
value_dtypes: List[tf.DType], parent_index_paths: List[str],
path_index: List[int], batch_size: int):
"""Creates a ParquetDataset.
Args:
filenames: A list containing the name(s) of the file(s) to be read.
value_paths: A list of strings of the dotstring path(s) of each leaf
path(s).
value_dtypes: value_dtypes[i] is the Tensorflow data type value_paths[i]
would be of.
parent_index_paths: A list of strings of the dotstring path(s) of the
path(s) to be read.
path_index: A list containing the index of each field to get the parent
index of. This will have the same length as parent_index_paths.
batch_size: An int that determines how many messages are parsed into one
prensor tree in an iteration. If there are fewer than batch_size
remaining messages, then all remaining messages will be returned.
Raises:
ValueError: if the column does not exist in the parquet schema.
ValueError: if the column dtype does not match the value_dtype passed in.
"""
self._filenames = filenames
self._value_paths = value_paths
self._value_dtypes = tuple(value_dtypes)
self._parent_index_paths = parent_index_paths
self._path_index = path_index
self._batch_size = batch_size
super().__init__()
def _get_column_path_to_index_mapping(self, metadata_file) -> Dict[str, int]:
"""Gets the column index of every column.
Args:
metadata_file: the file to be used as the metadata. If there is no
metadata_file, any file from file_names will suffice.
Returns:
A dictionary mapping path name (str) to column index (int).
"""
metadata = pq.ParquetFile(metadata_file).metadata
path_to_column_index = {
metadata.schema.column(index).path: index
for index in range(metadata.num_columns)
}
return path_to_column_index
def _parquet_to_tf_type(self, parquet_type: str) -> Union[tf.DType, None]:
"""Maps tensorflow datatype to a parquet datatype.
Args:
parquet_type: a string representing the parquet datatype.
Returns:
the tensorflow datatype equivalent of a parquet datatype.
"""
return {
"BOOLEAN": tf.bool,
"INT32": tf.int32,
"INT64": tf.int64,
"FLOAT": tf.float32,
"DOUBLE": tf.double,
"BYTE_ARRAY": tf.string
}.get(parquet_type)
def _as_variant_tensor(self):
return gen_parquet_dataset.parquet_dataset(
self._filenames,
value_paths=self._value_paths,
value_dtypes=self._value_dtypes,
parent_index_paths=self._parent_index_paths,
path_index=self._path_index,
batch_size=self._batch_size)
def _inputs(self):
return []
@property
def output_types(self):
res = []
column_counter = 0
prev = self._parent_index_paths[0]
res.append(tf.int64)
for i in range(1, len(self._parent_index_paths)):
curr = self._parent_index_paths[i]
res.append(tf.int64)
if curr != prev:
res.append(self._value_dtypes[column_counter])
column_counter += 1
prev = curr
res.append(tf.int64)
res.append(self._value_dtypes[column_counter])
self.output_dtypes = tuple(res)
return self.output_dtypes
@property
def output_shapes(self):
return (tf.TensorShape([]),) + tuple(
tf.TensorShape([None]) for i in range(1, len(self.output_dtypes)))
@property
def output_classes(self):
return tuple(tf.Tensor for i in range(len(self.output_dtypes)))
class ParquetDataset(_RawParquetDataset):
"""A dataset which reads columns from a parquet file and returns a prensor.
The prensor will have a PrensorTypeSpec, which is created based on
value_paths.
Note: In tensorflow v1 this dataset will not return a prensor. The output will
be the same format as _RawParquetDataset's output (a vector of tensors).
The following is a workaround in v1:
pq_ds = ParquetDataset(...)
type_spec = pq_ds.element_spec
tensors = pq_ds.make_one_shot_iterator().get_next()
prensor = type_spec.from_components(tensors)
session.run(prensor)
"""
def __init__(self, filenames: List[str], value_paths: List[str],
batch_size: int):
"""Creates a ParquetDataset.
Args:
filenames: A list containing the name(s) of the file(s) to be read.
value_paths: A list of strings of the dotstring path(s) of each leaf
path(s).
batch_size: An int that determines how many messages are parsed into one
prensor tree in an iteration. If there are fewer than batch_size
remaining messages, then all remaining messages will be returned.
Raises:
ValueError: if the column does not exist in the parquet schema.
"""
self._filenames = filenames
self._value_paths = value_paths
self._batch_size = batch_size
for filename in filenames:
self._validate_file(filename, value_paths)
self._value_dtypes = self._get_column_dtypes(filenames[0], value_paths)
self._parent_index_paths = []
self._path_index = []
self.element_structure = self._create_prensor_spec()
self._create_parent_index_paths_and_index_from_type_spec(
self.element_structure, 0, 0)
super(ParquetDataset,
self).__init__(filenames, self._value_paths, self._value_dtypes,
self._parent_index_paths, self._path_index, batch_size)
def _get_column_dtypes(
self, metadata_file: str,
value_paths: List[str]) -> List[Union[tf.DType, None]]:
"""Returns a list of tensorflow datatypes for each column.
Args:
metadata_file: the file to be used as the metadata. If there is no
metadata_file, any file from file_names will suffice.
value_paths: A list of strings of the dotstring path(s).
Returns:
A list of tensorflow datatypes for each column. This list aligns with
value_paths.
"""
path_to_column_index = self._get_column_path_to_index_mapping(metadata_file)
metadata = pq.ParquetFile(metadata_file).metadata
value_dtypes = []
for column in value_paths:
col = metadata.schema.column(path_to_column_index[column])
parquet_type = col.physical_type
value_dtypes.append(self._parquet_to_tf_type(parquet_type))
return value_dtypes
def _validate_file(self, filename: str, value_paths: List[str]):
"""Checks if each requested path exists in the parquet file.
Args:
filename: The parquet filename.
value_paths: A list of strings of the dotstring path(s).
Raises:
ValueError: if a path does not exist in the parquet file's schema.
"""
metadata = pq.ParquetFile(filename).metadata
paths = {}
for i in range(metadata.num_columns):
col = metadata.schema.column(i)
p = (col.path)
paths[p] = col.physical_type
for i, p in enumerate(value_paths):
if p not in paths:
raise ValueError("path " + p + " does not exist in the file.")
def _create_children_spec(
self, field: pa.lib.Field, index_and_paths: List[Tuple[int,
List[path.Step]]]
) -> Tuple[path.Step, prensor._PrensorTypeSpec]:
"""Creates the _PrensorTypeSpec for children and leaves.
Args:
field: a pyarrow field.
index_and_paths: a list of tuple(index, list[step]), where index is the
column index this step belongs to, and list[step] are children steps of
the passed in step arg. The reason index is needed is because we need to
keep track of which column this step belongs to, to populate
parent_index_paths and path_index.
Returns:
a child or leaf _PrensorTypeSpec.
"""
# pylint: disable=protected-access
curr_steps_as_set = collections.OrderedDict()
# Construct the dictionary of paths we need.
if len(index_and_paths) >= 1 and len(index_and_paths[0][1]) >= 1:
for p in index_and_paths:
index = p[0]
p = p[1]
curr_step = p[0]
if p:
if curr_step in curr_steps_as_set:
curr_steps_as_set[curr_step].append((index, p[1:]))
else:
curr_steps_as_set[curr_step] = [(index, p[1:])]
field_type = field.type
if isinstance(field_type, pa.lib.ListType):
field_type = field_type.value_type
is_repeated = True
else:
is_repeated = False
if isinstance(field_type, pa.lib.StructType):
node_type = prensor._PrensorTypeSpec._NodeType.CHILD
dtype = tf.int64
children = [
self._create_children_spec(field_type[step], curr_steps_as_set[step])
for step in curr_steps_as_set
]
else:
node_type = prensor._PrensorTypeSpec._NodeType.LEAF
dtype = tf.dtypes.as_dtype(field_type)
children = []
return (field.name,
prensor._PrensorTypeSpec(is_repeated, node_type, dtype, children))
def _create_prensor_spec(self) -> prensor._PrensorTypeSpec: # pylint: disable=protected-access
"""Creates the prensor type spec based on value_paths.
Returns:
a root _PrensorTypeSpec.
"""
metadata = pq.ParquetFile(self._filenames[0]).metadata
parquet_schema = metadata.schema
arrow_schema = parquet_schema.to_arrow_schema()
# pylint: disable=protected-access
# Sort the paths by number of fields.
paths = [path.create_path(p) for p in self._value_paths]
mapped = zip(paths, self._value_paths, self._value_dtypes)
sorted_mapped = sorted(mapped, key=lambda x: len(x[0].field_list))
paths, self._value_paths, self._value_dtypes = zip(*sorted_mapped)
# Creates an ordered dictionary mapping step to a list of children fields.
# This will allow us to find paths that share a parent.
curr_steps_as_set = collections.OrderedDict()
for (i, p) in enumerate(paths):
step = p.field_list[0]
if step in curr_steps_as_set:
curr_steps_as_set[step].append((i, p.field_list[1:]))
else:
curr_steps_as_set[step] = [(i, p.field_list[1:])]
return prensor._PrensorTypeSpec(
None, prensor._PrensorTypeSpec._NodeType.ROOT, tf.int64, [
self._create_children_spec(
arrow_schema.field(step), curr_steps_as_set[step])
for step in curr_steps_as_set
])
def _create_parent_index_paths_and_index_from_type_spec(
self, type_spec, index, level):
"""Populates self._parent_index_paths and self.path_index from the typespec.
It traverses the prensor type spec to get index and level. It then uses
index to get the correct path from self._value_paths.
This assumes that self._value_paths is sorted alphabetically, and thus the
prensor type spec has the same order of paths as self._value_paths.
Args:
type_spec: A Prensor type spec.
index: The index of self._value_paths. It is incremented each time we
reach a leaf, ie we have a new path.
level: the step number in a path. It is incremented each time we go to a
spec's child. It is then decremented when exiting the child spec.
"""
fields = type_spec._children_specs # pylint: disable=protected-access
for field_tuple in fields:
spec = field_tuple[1]
self._parent_index_paths.append(self._value_paths[index])
self._path_index.append(level)
level += 1
self._create_parent_index_paths_and_index_from_type_spec(
spec, index, level)
level -= 1
index += 1
@property
def element_spec(self):
return self.element_structure
def _create_children_from_arrow_fields(
fields: pa.lib.Field) -> Dict[str, Dict[Any, Any]]:
"""Creates a dictionary of children schema for a pyarrow field.
Args:
fields: A list of pyarrow fields.
Returns:
A dictionary of children. Key is field name. Value is a dictionary
representing a schema.
"""
children = {}
for field in fields:
field_type = field.type
if isinstance(field_type, pa.lib.ListType):
sub_field_type = field_type.value_type
if isinstance(sub_field_type, pa.lib.StructType):
children[field.name] = {
"is_repeated":
True,
"children":
_create_children_from_arrow_fields(
[subfield for subfield in sub_field_type])
}
elif isinstance(sub_field_type, pa.lib.DataType):
children[field.name] = {
"is_repeated": True,
"dtype": tf.dtypes.as_dtype(sub_field_type)
}
else:
print("this should never be printed")
elif isinstance(field_type, pa.lib.StructType):
children[field.name] = {
"is_repeated":
False,
"children":
_create_children_from_arrow_fields(
[subfield for subfield in field_type])
}
else:
children[field.name] = {
"is_repeated": False,
"dtype": tf.dtypes.as_dtype(field_type)
}
return children
class _ParquetDatasetWithExpression(ParquetDataset):
"""A dataset which reads columns from a parquet file based on the expressions.
The data read from the parquet file will then have the expression queries
applied to it, creating a new prensor.
This dataset should not be created by the user, call
parquet_dataset.calculate_parquet_values() to get this dataset instead.
"""
def __init__(self, exprs: List[expression.Expression],
root_expr: placeholder._PlaceholderRootExpression,
filenames: List[str], batch_size: int,
options: Optional[calculate_options.Options]):
self._exprs = exprs
self._root_expr = root_expr
self._filesnames = filenames
self._batch_size = batch_size
self._options = options
# pylint: disable=protected-access
self._subtrees = [x.get_known_descendants() for x in self._exprs]
self._all_expressions = []
for tree in self._subtrees:
self._all_expressions.extend(tree.values())
expression_graph = calculate.OriginalExpressionGraph(self._all_expressions)
self._canonical_graph = calculate.CanonicalExpressionGraph(expression_graph)
paths = placeholder.get_placeholder_paths_from_graph(self._canonical_graph)
parquet_paths = [".".join(p.field_list) for p in paths]
super(_ParquetDatasetWithExpression,
self).__init__(filenames, parquet_paths, batch_size)
def _calculate_prensor(self, pren) -> List[prensor.Prensor]:
"""Function for applying expression queries to a prensor.
This function should be passed into dataset.map().
Args:
pren: The prensor that will be used to bind to the root expression.
Returns:
A list of modified prensor that have the expression queries applied.
"""
self._canonical_graph.calculate_values(
options=self._options, feed_dict={self._root_expr: pren})
values = [
self._canonical_graph.get_value_or_die(x) for x in self._all_expressions
]
expr_to_value_map = {
id(expr): value for expr, value in zip(self._all_expressions, values)
}
# pylint: disable=protected-access
return [
calculate._get_prensor(subtree, expr_to_value_map)
for subtree in self._subtrees
]
| [
"[email protected]"
] | |
64ac7d923348dc3b2424ff077f7bbb9981d7d925 | 31014bf4464a5fae77ff86241ae15cfdd71ccb9e | /gnomics/objects/anatomical_structure_files/caro.py | 585227e09243676728f164cf37246900f31a4e24 | [
"BSD-3-Clause",
"BSD-2-Clause-Views"
] | permissive | izhangcd/Gnomics | 14de8f90960f88d3eb2f2a49c94fa3a0f8048a2d | bd0fb4e7be009b2afe1c2667f2890c712ae0ad9d | refs/heads/master | 2021-09-09T03:42:40.953105 | 2018-03-13T16:05:17 | 2018-03-13T16:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | #!/usr/bin/env python
#
#
#
#
#
#
# IMPORT SOURCES:
#
#
#
# Common Anatomy Reference Ontology (CARO).
#
# PRE-CODE
import faulthandler
faulthandler.enable()
# IMPORTS
# Imports for recognizing modules.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
# Import modules.
from gnomics.objects.user import User
import gnomics.objects.anatomical_structure
import gnomics.objects.auxiliary_files.identifier
# Other imports.
import json
import requests
import timeit
# MAIN
def main():
caro_unit_tests()
# Return CARO ID.
def get_caro_id(anat, user=None):
caro_array = []
for iden in gnomics.objects.auxiliary_files.identifier.filter_identifiers(anat.identifiers, ["caro", "caro id", "caro identifier"]):
if iden["identifier"] not in caro_array:
caro_array.append(iden["identifier"])
return caro_array
# UNIT TESTS
def caro_unit_tests():
print("NOT FUNCTIONAL.")
# MAIN
if __name__ == "__main__": main() | [
"[email protected]"
] | |
eb703ff151fe9213cc4a4e74f1ae4110553b205c | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /third_party/llvm/expand_cmake_vars.py | 2197698837f1d209a363f796d8db6886d686d70c | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 2,767 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Expands CMake variables in a text file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
_CMAKE_DEFINE_REGEX = re.compile(r"\s*#cmakedefine\s+([A-Za-z_0-9]*)(\s.*)?$")
_CMAKE_DEFINE01_REGEX = re.compile(r"\s*#cmakedefine01\s+([A-Za-z_0-9]*)")
_CMAKE_VAR_REGEX = re.compile(r"\${([A-Za-z_0-9]*)}")
def _parse_args(argv):
"""Parses arguments with the form KEY=VALUE into a dictionary."""
result = {}
for arg in argv:
k, v = arg.split("=")
result[k] = v
return result
def _expand_variables(input_str, cmake_vars):
"""Expands ${VARIABLE}s in 'input_str', using dictionary 'cmake_vars'.
Args:
input_str: the string containing ${VARIABLE} expressions to expand.
cmake_vars: a dictionary mapping variable names to their values.
Returns:
The expanded string.
"""
def replace(match):
if match.group(1) in cmake_vars:
return cmake_vars[match.group(1)]
return ""
return _CMAKE_VAR_REGEX.sub(replace, input_str)
def _expand_cmakedefines(line, cmake_vars):
"""Expands #cmakedefine declarations, using a dictionary 'cmake_vars'."""
# Handles #cmakedefine lines
match = _CMAKE_DEFINE_REGEX.match(line)
if match:
name = match.group(1)
suffix = match.group(2) or ""
if name in cmake_vars:
return "#define {}{}\n".format(name,
_expand_variables(suffix, cmake_vars))
else:
return "/* #undef {} */\n".format(name)
# Handles #cmakedefine01 lines
match = _CMAKE_DEFINE01_REGEX.match(line)
if match:
name = match.group(1)
value = cmake_vars.get(name, "0")
return "#define {} {}\n".format(name, value)
# Otherwise return the line unchanged.
return _expand_variables(line, cmake_vars)
def main():
cmake_vars = _parse_args(sys.argv[1:])
for line in sys.stdin:
sys.stdout.write(_expand_cmakedefines(line, cmake_vars))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
d211cd9a32864330bf938f5f881ee0e2753c328a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2737/60593/254924.py | 559e2d68a0fc4a972e5eecb3b97eaa9b07420a80 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | a=eval(input())
cm=0
cn=0
m=0
n=0
for i in a:
if(i==m):
cm+=1
elif(i==n):
cn+=1
elif(cm==0):
m=i
cm=1
elif(cn==0):
n=i
cn=1
else:
cm-=1
cn-=1
cm=0
cn=0
for i in a:
if(i==m):
cm+=1
elif(i==n):
cn+=1
res=[]
if(cm>len(a)//3):
res.append(cm)
if(cn>len(a)//3):
res.append(cn)
print(res) | [
"[email protected]"
] | |
fe566ec84f6894c4e4ceed91b8fb7202418afba3 | 8952afe242c836b516c6236cf0987676cfb7abf7 | /TaobaoSdk/Request/SellercenterUserPermissionsGetRequest.py | c9f37ef7d916d816fc56b7ede1d4da4f103f2062 | [] | no_license | xieguanfu/TaobaoOpenPythonSDK | 2fc20df983811990a2d981379c9da6c1117f9f21 | 88cdab41ba19a2326aa4085c92455697bd37d8d7 | refs/heads/master | 2021-01-18T14:38:51.465614 | 2014-08-21T05:44:42 | 2014-08-21T05:44:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,182 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 获取指定用户的权限集合,并不组装成树。如果是主账号,返回所有的权限列表;如果是子账号,返回所有已授权的权限。只能查询属于自己的账号信息 (如果是主账号,则是主账号以及所属子账号,如果是子账号则是对应主账号以及所属子账号)
# @author [email protected]
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取指定用户的权限集合,并不组装成树。如果是主账号,返回所有的权限列表;如果是子账号,返回所有已授权的权限。只能查询属于自己的账号信息 (如果是主账号,则是主账号以及所属子账号,如果是子账号则是对应主账号以及所属子账号)</SPAN>
# <UL>
# </UL>
class SellercenterUserPermissionsGetRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.sellercenter.user.permissions.get"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户标识,次入参必须为子账号比如zhangsan:cool。如果只输入主账号zhangsan,将报错。</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.nick = None
| [
"[email protected]"
] | |
a0256ae799eee5f88ab513b7a53e1e61b8802ff8 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_304/ch30_2019_03_22_13_13_50_872840.py | 2dc4f4c0e8660f0b4ce64b93dc3369535be2c7b0 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | import math
v=int(input('qual é a velocidade? '))
a=int(input('qual é o angulo? '))
r=(a*mathi.pi)/180
d=(v**2 * (math.sin(2*r)))/9.8
if d<=98:
print('Muito perto')
elif d>=102:
print('Muito longe')
else:
print ('Acertou!')
| [
"[email protected]"
] | |
32aeab6cc7b86ecb32e647dae4e1b1f7b2d6b2b7 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /uugzpwJXKdiESZbjM_12.py | 0b959d83852aa9daf41c4758204d29c248f28006 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | """
Create a function that determines whether or not a player is holding a **Full
House** in their hand. A hand is represented as a list of 5 cards. A full
house is defined as a pair of cards and a three-of-a-kind.
To illustrate: `["A", "A", "A", "K", "K"]` would be a **Full House** , since
the player holds 3 aces and 2 kings.
### Examples
is_full_house(["A", "A", "A", "K", "K"]) ➞ True
is_full_house(["3", "J", "J", "3", "3"]) ➞ True
is_full_house(["10", "J", "10", "10", "10"]) ➞ False
is_full_house(["7", "J", "3", "4", "2"]) ➞ False
### Notes
N/A
"""
def is_full_house(hand):
return sorted(hand.count(card) for card in set(hand)) == [2, 3]
| [
"[email protected]"
] | |
496c48ec6b7c11f47ad5fbcf46c65d16d7712f7e | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2011.1/network/connection/openconnect/actions.py | 5b670dd034baf8d5d77b5d8818ec40931144eed4 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | # -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def build():
autotools.make("OPT_FLAGS='%s' openconnect" % get.CFLAGS())
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
autotools.rawInstall("DESTDIR=%s LIBDIR=/usr/lib" % get.installDIR(), "install-lib")
pisitools.doman("openconnect.8")
pisitools.dodoc("AUTHORS", "COPYING*", "README*")
| [
"[email protected]"
] | |
06c6745e6392d7d569d51badf6a793b8f4123198 | d552a3c92155d82ad146cd99ea9b8b4a3b65eab7 | /openstack/cloud/_accelerator.py | b28ac1f6b37ddd125b92ebcabd89945e5482d8e2 | [
"Apache-2.0"
] | permissive | jlyheden/openstacksdk | 600201d4fbf23fd8a4fa9a53b398b29811446051 | 7e0dcaaa4a69b17b97e746ce8de104689c60becc | refs/heads/master | 2022-11-30T19:15:16.113961 | 2020-06-07T18:02:22 | 2020-06-07T18:02:23 | 270,694,856 | 0 | 0 | Apache-2.0 | 2020-06-08T14:15:36 | 2020-06-08T14:15:35 | null | UTF-8 | Python | false | false | 5,707 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import types so that we can reference ListType in sphinx param declarations.
# We can't just use list, because sphinx gets confused by
# openstack.resource.Resource.list and openstack.resource2.Resource.list
from openstack.cloud import _normalize
class AcceleratorCloudMixin(_normalize.Normalizer):
def list_deployables(self, filters=None):
"""List all available deployables.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of deployable info.
"""
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
return list(self.accelerator.deployables(**filters))
def list_devices(self, filters=None):
"""List all devices.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of device info.
"""
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
return list(self.accelerator.devices(**filters))
def list_device_profiles(self, filters=None):
"""List all device_profiles.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of device profile info.
"""
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
return list(self.accelerator.device_profiles(**filters))
def create_device_profile(self, attrs):
"""Create a device_profile.
:param attrs: The info of device_profile to be created.
:returns: A ``munch.Munch`` of the created device_profile.
"""
return self.accelerator.create_device_profile(**attrs)
def delete_device_profile(self, name_or_id, filters):
"""Delete a device_profile.
:param name_or_id: The Name(or uuid) of device_profile to be deleted.
:returns: True if delete succeeded, False otherwise.
"""
device_profile = self.accelerator.get_device_profile(
name_or_id,
filters
)
if device_profile is None:
self.log.debug(
"device_profile %s not found for deleting",
name_or_id
)
return False
self.accelerator.delete_device_profile(name_or_id=name_or_id)
return True
def list_accelerator_requests(self, filters=None):
"""List all accelerator_requests.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of accelerator request info.
"""
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
return list(self.accelerator.accelerator_requests(**filters))
def delete_accelerator_request(self, name_or_id, filters):
"""Delete a accelerator_request.
:param name_or_id: The Name(or uuid) of accelerator_request.
:returns: True if delete succeeded, False otherwise.
"""
accelerator_request = self.accelerator.get_accelerator_request(
name_or_id,
filters
)
if accelerator_request is None:
self.log.debug(
"accelerator_request %s not found for deleting",
name_or_id
)
return False
self.accelerator.delete_accelerator_request(name_or_id=name_or_id)
return True
def create_accelerator_request(self, attrs):
"""Create an accelerator_request.
:param attrs: The info of accelerator_request to be created.
:returns: A ``munch.Munch`` of the created accelerator_request.
"""
return self.accelerator.create_accelerator_request(**attrs)
def bind_accelerator_request(self, uuid, properties):
"""Bind an accelerator to VM.
:param uuid: The uuid of the accelerator_request to be binded.
:param properties: The info of VM that will bind the accelerator.
:returns: True if bind succeeded, False otherwise.
"""
accelerator_request = self.accelerator.get_accelerator_request(uuid)
if accelerator_request is None:
self.log.debug(
"accelerator_request %s not found for unbinding", uuid
)
return False
return self.accelerator.update_accelerator_request(uuid, properties)
def unbind_accelerator_request(self, uuid, properties):
"""Unbind an accelerator from VM.
:param uuid: The uuid of the accelerator_request to be unbinded.
:param properties: The info of VM that will unbind the accelerator.
:returns:True if unbind succeeded, False otherwise.
"""
accelerator_request = self.accelerator.get_accelerator_request(uuid)
if accelerator_request is None:
self.log.debug(
"accelerator_request %s not found for unbinding", uuid
)
return False
return self.accelerator.update_accelerator_request(uuid, properties)
| [
"[email protected]"
] | |
4941e8f118771aa5bc373cb3e248e556ca14e33a | c088967f6fcd2cfbae48ad9eb757935ba8783b8b | /nikola/data/themes/base/messages/messages_ja.py | 4a238ccc164b4b5bf3d58505c1e34be7a882403d | [
"MIT"
] | permissive | verbalshadow/nikola | 281bafcf52b725bc0e54f99691d17f2a7fd9d95c | 1e12b9525227ac223d80beb3e537a7b9eb637fc5 | refs/heads/master | 2021-01-18T08:26:36.867146 | 2013-09-18T15:41:39 | 2013-09-18T15:41:39 | 12,930,300 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | # -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"Also available in": "他の言語で読む",
"Archive": "過去の記事",
"Categories": "",
"LANGUAGE": "日本語",
"More posts about": "タグ",
"Newer posts": "新しい記事",
"Next post": "次の記事",
"Older posts": "過去の記事",
"Original site": "元のサイト",
"Posted": "投稿日時",
"Posts about %s": "%sについての記事",
"Posts for year %s": "%s年の記事",
"Posts for {month} {year}": "{year}年{month}月の記事",
"Previous post": "前の記事",
"Read in English": "日本語で読む",
"Read more": "続きを読む",
"Source": "ソース",
"Tags and Categories": "",
"Tags": "タグ",
"old posts page %d": "前の記事 %dページ目",
}
| [
"[email protected]"
] | |
d422609448dc381fa6c35856a2abe8a58b6fbd74 | d400c32010a414a2f536c5c0a3490c8b8e2e9d5a | /resources/upgrades/deploy/farmer.py | b7aa3982bd3e52615ce756b21c060c1ce309897c | [
"LicenseRef-scancode-public-domain"
] | permissive | CarlosCorreiaM16e/chirico_cms | 3e521eae8f38b732497a2b808950c6a534e69d4f | 73897cbddb230630e13f22333b9094d0a047acb3 | refs/heads/master | 2020-12-30T07:59:04.100330 | 2020-05-02T12:26:58 | 2020-05-02T12:26:58 | 238,917,321 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,682 | py | # -*- coding: utf-8 -*-
import sys
from fabric.contrib import console
from deploy import deploy_menu, db_utils, deploy_utils
from deploy.app import AppExistsException
from deploy.config import Config
from m16e import term
__author__ = '[email protected]'
__version__ = "0.1.0"
def init_app( cfg ):
app = cfg.get_app()
msg = app.init_remote_repo()
if msg:
print( msg )
ret = console.prompt( 'Delete Skip Abort', default='d' )
if ret == 'a':
return
app.init_remote_repo()
app.init_web_repo()
db_name = cfg.get_db_name( app.app_name )
server = cfg.get_server()
server.pg_ctl( 'restart' )
if deploy_utils.get_prompt_continue() and console.confirm( 'Drop DB (%s) and upload?' % db_name, default=True ):
server.push_db( db_name )
server.apache_ctl( 'restart' )
def init_app_child( cfg, child_name ):
app = cfg.get_app()
server = cfg.get_server()
db_name = cfg.get_db_name( child_name )
try:
app.init_child_app( child_name )
except AppExistsException as e:
print( e )
force = raw_input( 'Force (y/N)?' )
if force and force.lower().startswith( 'y' ):
app.purge_child_app( child_name )
db_utils.drop_db( server, db_name )
app.init_child_app( child_name )
if deploy_utils.get_prompt_continue() and console.confirm( 'Drop DB (%s) and upload?' % db_name, default=True ):
server.pg_ctl( 'restart' )
db_utils.drop_db( server, db_name )
db_utils.push_db( cfg, child_name )
server.apache_ctl( 'restart' )
def update_app( cfg, repo_only=False ):
app = cfg.get_app()
app.update_remote_repo()
app.upgrade_web_repo()
cfg.get_server().apache_ctl( 'restart' )
def purge_app( cfg ):
app = cfg.get_app()
if deploy_utils.get_prompt_continue() and console.confirm( 'Purge app "%s"' % app.app_name, default=False ):
msg = app.purge_web_folder()
if msg:
print( msg )
msg = app.purge_remote_repo()
if msg:
print( msg )
db_name = cfg.get_db_name( app.app_name )
db_utils.drop_db( cfg.get_server(), db_name )
def init_cluster( cfg ):
cluster = cfg.get_cluster()
msg = cluster.init_web2py_from_zip()
if msg:
print( msg )
# term.printDebug( 'msg: %s' % repr( msg ),
# prompt_continue=True )
msg = cluster.init_web2py()
if msg:
print( msg )
if msg.startswith( 'ERROR:->' ):
if console.confirm( 'Abort?', default=False ):
return
cluster.define_web2py_admin_password()
if console.confirm( 'Create site-available file?', default=False ):
cluster.create_site_available()
server = cfg.get_server()
server.apache_ctl( 'stop' )
server.apache_ctl( 'start' )
def clear_app_errors( cfg, age=5 ):
'''
clear_app_errors( )
'''
app = cfg.get_app()
f_list = app.list_error_files( age )
print( 'Delete app errors: ' + app.app_name )
if not f_list:
print( 'no files to remove' )
else:
for e in f_list.splitlines():
print(' - ' + e)
op = raw_input( 'Continue (Y/n)?' )
if op != 'n':
app.delete_error_files( age )
def clear_app_sessions( cfg ):
'''
clear_app_sessions()
'''
app = cfg.get_app()
op = raw_input( 'Continue (Y/n)?' )
if op != 'n':
app.remove_sessions()
def clear_app_old_releases( cfg, age=5, preserve=3 ):
'''
clear_app_old_releases( )
'''
app = cfg.get_app()
rel_list = app.clear_old_releases( age=age, preserve=preserve )
def get_app_i18n_files( cfg ):
app = cfg.get_app()
app.get_app_i18n_files()
def upgrade_app( cfg, v_from, v_to ):
app = cfg.get_app()
term.printDebug( 'v_from: %s, v_to: %s' % (repr( v_from ), repr( v_to ) ) )
app.upgrade_app( v_from, v_to )
def pull_db( cfg ):
app = cfg.get_app()
app.pull_db()
def push_db( cfg ):
app = cfg.get_app()
app.push_db()
def execute( cfg ):
s = "args (" + str( len( sys.argv ) ) + "): " + str( sys.argv )
print()
print( ">" * len( s ) )
print( s )
print( ">" * len( s ) )
print()
cfg.curr_section = deploy_menu.SEC_APP
op = deploy_menu.show_main_menu( cfg )
while op != deploy_menu.QUIT[ 0 ]:
args = [ a.strip() for a in op.split( "," ) ]
if args[0] == deploy_menu.INIT_APP[0 ]:
init_app( cfg )
elif args[ 0 ] == deploy_menu.UPDATE_APP[ 0 ]:
update_app( cfg )
elif args[0] == deploy_menu.PURGE_APP[0 ]:
purge_app( cfg )
elif args[0] == deploy_menu.INIT_CLUSTER[0 ]:
init_cluster( cfg )
elif args[0] == deploy_menu.CLEAR_APP_ERRORS[0 ]:
clear_app_errors( cfg )
elif args[0] == deploy_menu.CLEAR_APP_SESSIONS[0 ]:
clear_app_sessions( cfg )
elif args[ 0 ] == deploy_menu.CLEAR_APP_OLD_RELEASES[ 0 ]:
if len( args ) > 1:
clear_app_old_releases( cfg, age=int( args[ 1 ] ) )
else:
clear_app_old_releases( cfg )
elif args[ 0 ] == deploy_menu.UPGRADE_APP[ 0 ]:
upgrade_app( cfg, args[ 1 ], args[ 2 ] )
elif args[ 0 ] == deploy_menu.PULL_DB[ 0 ]:
pull_db( cfg )
elif args[ 0 ] == deploy_menu.PUSH_DB[ 0 ]:
push_db( cfg )
elif args[ 0 ] == deploy_menu.GET_APP_I18N_FILES[ 0 ]:
get_app_i18n_files( cfg )
op = deploy_menu.show_main_menu( cfg )
# term.printDebug( 'args: %s' % (repr(args) ) )
# term.printLog( 'op: %s' % (op ) )
# if args[ 0 ] == SRV_APACHE_RESTART[ 0 ]:
# apache_ctl( 'restart' )
#
# elif args[ 0 ] == SRV_POSTGRES_RESTART[ 0 ]:
# postgres_ctl( 'restart' )
#
# elif args[ 0 ] == SRV_LIST_DISK_FREE[ 0 ]:
# list_server_disk_free()
# list_server_disk_usage()
#
# elif args[ 0 ] == SRV_LIST_DISK_USE[ 0 ]:
# folder = args[ 1 ]
# # term.printDebug( 'folder: %s' % folder )
# list_server_disk_usage( folder=folder )
#
# elif args[ 0 ] == COMPILE_FOLDER_REPORTS[ 0 ]:
# folder = args[ 1 ]
# # term.printDebug( 'folder: %s' % folder )
# compile_folder_reports( folder, server_name=selection.server.srv_ctx )
#
# elif args[ 0 ] == SRV_UPDATE[ 0 ]:
# server_upgrade_pkg()
#
# elif args[ 0 ] == SRV_REBOOT[ 0 ]:
# server_reboot()
#
# elif args[0] == INIT_SERVER_LIST_PACKAGES[0]:
# list_server_packages()
#
# elif args[0] == INIT_SERVER_PACKAGES:
# init_server_packages( yes_to_all=yes_to_all )
#
# elif args[0] == INIT_SERVER_ENV[0]:
# init_server_env()
#
# elif args[0] == INIT_SERVER_GIT[0]:
# init_server_env_git()
#
# elif args[0] == INIT_SERVER_GITOLITE[0]:
# init_server_env_gitolite_step_1( yes_to_all=yes_to_all )
#
# elif args[0] == INIT_SERVER_GITOLITE_CONF[0]:
# init_server_env_gitolite_step_2( yes_to_all=yes_to_all )
#
# elif args[0] == INIT_SERVER_W2P_REPO[0]:
# init_server_web2py_repo()
# # init_server_env_gitweb( yes_to_all=yes_to_all )
#
# elif args[0] == INIT_SERVER[0]:
# init_server_packages( yes_to_all=yes_to_all )
# init_server_env()
#
# elif args[0] == PURGE_SERVER_PACKAGES[0]:
# purge_server_packages( yes_to_all=yes_to_all )
#
# elif args[0] == PURGE_SERVER_ENV[0]:
# purge_server_env( yes_to_all=yes_to_all )
#
# elif args[0] == PURGE_SERVER[0]:
# purge_server_packages( yes_to_all=yes_to_all )
# purge_server_env( yes_to_all=yes_to_all )
#
# elif args[0] == SET_CLUSTER[0]:
# cl_list = sorted( selection.server.srv_ctx.clusters.clusters.keys() )
# cluster = cl_list[ int( args[ 1 ] ) - 1 ]
# set_cluster( cluster )
#
# elif args[0] == INIT_CLUSTER[0]:
# init_cluster( yes_to_all=yes_to_all )
#
# elif args[ 0 ] == UPGRADE_CLUSTER[ 0 ]:
# upgrade_cluster( yes_to_all=yes_to_all )
#
# elif args[0] == PURGE_CLUSTER[0]:
# purge_cluster( yes_to_all=yes_to_all )
#
# elif args[0] == CLEAR_APP_ERRORS[0]:
# clear_app_errors()
#
# elif args[0] == CLEAR_APP_SESSIONS[0]:
# clear_app_sessions()
#
# elif args[0] == CLEAR_CLUSTER_ERRORS[0]:
# if len( args ) > 1:
# clear_cluster_errors( age=args[ 1 ] )
# else:
# clear_cluster_errors()
#
# elif args[0] == CLEAR_CLUSTER_SESSIONS[0]:
# if len( args ) > 1:
# clear_cluster_sessions( age=args[ 1 ] )
# else:
# clear_cluster_sessions()
#
# elif args[ 0 ] == CLEAR_CLUSTER_TMP_DIRS[ 0 ]:
# if len( args ) > 1:
# clear_cluster_tmp_dirs( age=args[ 1 ] )
# else:
# clear_cluster_tmp_dirs()
#
# elif args[ 0 ] == CLEAR_APP_OLD_RELEASES[ 0 ]:
# if len( args ) > 1:
# clear_app_old_releases( age=int( args[ 1 ] ) )
# else:
# clear_app_old_releases()
#
# elif args[ 0 ] == LIST_APPS[ 0 ]:
# children = None
# if len( args ) > 1:
# children = bool( args[ 1 ] == 'c' )
# list_cluster_apps( children=children, yes_to_all=yes_to_all )
#
# elif args[ 0 ] == UPGRADE_CLUSTER_APPS[ 0 ]:
# upgrade_cluster_apps( args[ 1 ], args[ 2 ] )
#
# elif args[ 0 ] == ADD_USERS_TO_CLUSTER[ 0 ]:
# u_list_file = None
# if len( args ) > 1:
# u_list_file = args[ 1 ]
# add_users_to_cluster( u_list_file )
#
# elif args[ 0 ] == CLUSTER_SET_DEFAULT_USERS[ 0 ]:
# add_default_users_to_cluster()
#
# elif args[ 0 ] == CLEAR_CLUSTER_SESSIONS[ 0 ]:
# clear_cluster_sessions()
#
# elif args[ 0 ] == CHANGE_CLUSTER_USER_MAIL[ 0 ]:
# change_cluster_user_mail( args[ 1 ], args[ 2 ] )
#
# elif args[ 0 ] == CHANGE_CLUSTER_USER_PASS[ 0 ]:
# change_cluster_user_pass( args[ 1 ], args[ 2 ] )
#
# elif args[ 0 ] == CLUSTER_ADD_USER_TO_GROUP[ 0 ]:
# cluster_add_user_to_group( args[ 1 ], args[ 2 ] )
#
# elif args[ 0 ] == ADD_GROUP_TO_CLUSTER[ 0 ]:
# add_group_to_cluster( args[ 1 ], args[ 2 ] )
#
# elif args[ 0 ] == CLUSTER_SYNC_FROM_PRODUCTION[ 0 ]:
# if selection.server.srv_ctx.test_server:
# sync_cluster_from_production()
#
# elif args[0] == SET_APP[0]:
# cluster = selection.server.srv_ctx.clusters.get_cluster( selection.cluster )
# term.printDebug( 'cluster: %s' % repr( cluster ) )
# app_list = cluster.get_sorted_cluster_app_list()
# app = app_list[ int( args[ 1 ] ) - 1 ]
# set_app( app )
#
# elif args[0] == 'iag':
# init_app_repo()
#
# elif args[0] == INIT_APP[0]:
# init_app( yes_to_all=yes_to_all )
#
# elif args[ 0 ] == UPDATE_APP[ 0 ]:
# compile_jasper = False
# if len( args ) > 1:
# compile_jasper = args[ 1 ]
# update_app( compile_jasper=compile_jasper,
# yes_to_all=yes_to_all )
#
# elif args[ 0 ] == COMPILE_APP[ 0 ]:
# compile_app( compile_reports=True )
#
# elif args[ 0 ] == UPDATE_APP_REPO[ 0 ]:
# update_app( repo_only=True )
#
# elif args[ 0 ] == UPLOAD_APP[ 0 ]:
# upload_app( yes_to_all=yes_to_all )
#
# # elif args[ 0 ] == UPLOAD_APP_PRIVATE_REPORTS[ 0 ]:
# # upload_app_private_reports( args[ 1 ] )
#
# elif args[ 0 ] == UPGRADE_BLM_APP[ 0 ]:
# org_db_name = args[ 1 ]
# upgrade_app_from_v2( org_db_name )
#
# elif args[ 0 ] == PURGE_APP[ 0 ]:
# purge_app()
#
# elif args[ 0 ] == PURGE_APP_CHILD[ 0 ]:
# purge_app_child( args[1] )
#
# elif args[ 0 ] == RESET_APP[ 0 ]:
# app_name = None
# reset_docs = False
# reset_ents = False
# if len( args ) > 1:
# app_name = args[ 1 ]
# if len( args ) > 2:
# r = args[ 2 ]
# term.printDebug( 'r: %s' % r )
# if 'd' in r:
# reset_docs = True
# if 'e' in r:
# reset_ents = True
# reset_app( app_name, reset_docs=reset_docs, reset_ents=reset_ents )
#
# elif args[0] == INIT_APP_CHILD[0]:
# init_app_child( args[1], yes_to_all=yes_to_all )
#
# elif args[ 0 ] == LIST_APP_RELEASES[ 0 ]:
# list_app_releases( yes_to_all=yes_to_all )
#
# elif args[ 0 ] == GET_APP_TARBALL[ 0 ]:
# app_name = None
# if len( args ) > 1:
# app_name = args[ 1 ]
# get_app_tarball( app_name=app_name )
#
# elif args[0] == LIST_ALL_DBS[0]:
# print( list_databases( 'local' ) )
# print( list_databases( 'remote' ) )
#
# elif args[0] == LIST_LOCAL_DBS[0]:
# print( list_databases( 'local' ) )
#
# elif args[0] == LIST_REMOTE_DBS[0]:
# print( list_databases( 'remote' ) )
#
# elif args[ 0 ] == GET_CUSTOMER_IP[ 0 ]:
# customer_name = args[ 1 ]
# get_customer_ip( customer_name )
#
# elif args[ 0 ] == SECTION_SERVER[ 0 ]:
# selection.curr_section = SEC_SERVER
#
# elif args[ 0 ] == SECTION_CLUSTER[ 0 ]:
# selection.curr_section = SEC_CLUSTER
#
# elif args[ 0 ] == SECTION_APP[ 0 ]:
# selection.curr_section = SEC_APP
#
# elif args[ 0 ] == SECTION_DATABASE[ 0 ]:
# selection.curr_section = SEC_DATABASE
#
# elif args[ 0 ] == SECTION_MISC[ 0 ]:
# selection.curr_section = SEC_MISC
#
# op = show_main_menu()
if op:
print( "op: " + op )
def usage():
print( "Usage for " + sys.argv[0] + " version " + __version__ + ":" )
print( sys.argv[0] + " [ -h ]" )
# print( sys.argv[0] + " -s: sync backup from <server> <cluster>" )
print( sys.argv[0] + " [--test] <config_file>" )
print( sys.argv[0] + " " + deploy_menu.INIT_CLUSTER[0] + " <config_file>" )
print( sys.argv[0] + " " + deploy_menu.INIT_APP[0] + " <config_file>" )
print( sys.argv[0] + " " + deploy_menu.INIT_APP_CHILD[0] + " <app_name> <config_file>" )
print( sys.argv[0] + " " + deploy_menu.UPDATE_APP[0] + " <config_file>" )
print( sys.argv[0] + " " + deploy_menu.UPGRADE_APP[0] + " <from> <to> <config_file>" )
print( sys.argv[0] + " " + deploy_menu.CLEAR_APP_SESSIONS[0] + " <config_file>" )
print( sys.argv[0] + " " + deploy_menu.CLEAR_APP_ERRORS[0] + " <config_file>" )
print( sys.argv[0] + " " + deploy_menu.CLEAR_APP_OLD_RELEASES[0] + " <config_file>" )
print( sys.argv[0] + " " + deploy_menu.GET_APP_I18N_FILES[0] + " <config_file>" )
def run_app():
# term.printDebug( 'args: %s' % repr( sys.argv ) )
if len( sys.argv ) > 1:
# help
if sys.argv[ 1 ] == '-h':
usage()
else:
cfg = Config( sys.argv[ -1 ] )
server = cfg.get_server()
server.get_ip()
idx = 1
test_server = False
if sys.argv[ idx ] == '--test':
test_server = True
idx += 1
if sys.argv[ idx ] == '-y':
deploy_utils.set_prompt_continue( False )
idx += 1
if sys.argv[ idx ] == deploy_menu.INIT_CLUSTER[0]:
init_cluster( cfg )
elif sys.argv[ idx ] == deploy_menu.INIT_APP[0]:
init_app( cfg )
elif sys.argv[ idx ] == deploy_menu.INIT_APP_CHILD[0]:
init_app_child( cfg, sys.argv[ idx + 1 ] )
elif sys.argv[ idx ] == deploy_menu.UPDATE_APP[ 0 ]:
update_app( cfg )
elif sys.argv[ idx ] == deploy_menu.UPGRADE_APP[ 0 ]:
upgrade_app( cfg, sys.argv[ idx + 1 ], sys.argv[ idx + 2 ] )
elif sys.argv[ idx ] == deploy_menu.CLEAR_APP_ERRORS[ 0 ]:
clear_app_sessions( cfg )
elif sys.argv[ idx ] == deploy_menu.CLEAR_APP_SESSIONS[ 0 ]:
clear_app_errors( cfg )
elif sys.argv[ idx ] == deploy_menu.CLEAR_APP_OLD_RELEASES[ 0 ]:
clear_app_old_releases( cfg )
elif sys.argv[ idx ] == deploy_menu.GET_APP_I18N_FILES[ 0 ]:
get_app_i18n_files( cfg )
else:
execute( cfg )
else:
usage()
if __name__ == "__main__":
run_app()
| [
"[email protected]"
] | |
abcf3cb3d023f19268c08f7a3479b54c66c941e0 | 83762584d226f2c9ccbf42d5f745cf95baa71247 | /Practice/Word palindrome.py | 9ddf33de50fdce2ac8d3557ad0f48c486b28adae | [] | no_license | anmolpanwar/python-practice | 85f420e57b8b3e4295b5759b451e6b2673731b6f | 831d8c6eeed8ff08a5d282bdac8c897f39dd4c6f | refs/heads/master | 2020-12-18T22:52:14.226202 | 2020-02-05T06:22:14 | 2020-02-05T06:22:14 | 235,538,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | def is_palindrome(word):
newword = word.lower()
list1 = list(newword.strip())
strn = ''
list1.reverse()
for i in list1:
strn+=i
pal = newword+strn
if strn == newword:
print True
else:
print "Palindrome of this will be: " + pal
is_palindrome('dfsbj')
| [
"[email protected]"
] | |
c79adfc9e9b5025797457c31a03c1ee87cd7922f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/96/usersdata/164/53660/submittedfiles/estatistica.py | 50b28060dc86c4a798dc697d40a8e01a37e12e8e | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | # -*- coding: utf-8 -*-
n=int(input('Digite n: '))
a=[]
b=[]
somaA=0
difquadA=0
resultadoA=0
mediaA=0
for z in range (1, n+1, 1):
valorA=float(input('Valor da lista A: '))
a.append(valorA)
for i in range(0, len(a), 1):
somaA=somaA+a[i]
resultadoA=somaA/len(a)
for j in range (0, len(a), 1):
difquadA=(a[j]-resultadoA)**2
desvioA=difquadA/len(a)
print(resultadoA)
print(desvioA)
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas. | [
"[email protected]"
] | |
e74dde33ebc5b1089f1e3842a3e27d7443ec4650 | 7680dbfce22b31835107403514f1489a8afcf3df | /Exercícios_parte_1/exercício__017.py | 7f8da7cc45144d76b466704e28a2b16706f28d93 | [] | no_license | EstephanoBartenski/Aprendendo_Python | c0022d545af00c14e6778f6a80f666de31a7659e | 69b4c2e07511a0bd91ac19df59aa9dafdf28fda3 | refs/heads/master | 2022-11-27T17:14:00.949163 | 2020-08-03T22:11:19 | 2020-08-03T22:11:19 | 284,564,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # catetos e hipotenusa
import math
co = float(input('Comprimento do cateto oposto:'))
ca = float(input('Comprimento do cateto adjacente:'))
h = math.sqrt(co**2+ca**2)
print('A hipotenusa deste triângulo retângulo irá medir {:.2f}'.format(h))
# há uma fórmula para a hipotenusa no math
print('A hipotenusa deste triângulo retângulo irá medir {:.2f}'.format(math.hypot(co, ca)))
| [
"[email protected]"
] | |
e6920e519350bed9d9f0b08101a7efa981861c19 | efb32799a616432b9cf90113d042bd45d889ef99 | /jel/test/test_ast.py | 4f9eb086fabaadcdc8389f4e4dde503ecf7900f0 | [] | no_license | cstawarz/jel | 859216fd0dc63a9a85810263d42b662967c00788 | b01ca2127c0c317aaf9142dcd6c9441154e8830e | refs/heads/master | 2020-04-29T14:58:58.448508 | 2014-03-31T18:18:41 | 2014-03-31T18:18:41 | 3,422,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,072 | py | from __future__ import division, print_function, unicode_literals
import unittest
from .. import ast
class Node(ast.AST):
_fields = ('foo', 'bar')
class DerivedNode(Node):
pass
class OtherNode(ast.AST):
pass
class TestAST(unittest.TestCase):
def test_lineno_and_lexpos(self):
n = OtherNode()
self.assertEqual(-1, n.lineno)
self.assertEqual(-1, n.lexpos)
n = OtherNode(lineno=12, lexpos=34)
self.assertEqual(12, n.lineno)
self.assertEqual(34, n.lexpos)
def test_missing_field(self):
with self.assertRaises(KeyError) as cm:
Node(foo=1)
self.assertEqual('bar', cm.exception.args[0])
def test_invalid_field(self):
with self.assertRaises(TypeError) as cm:
DerivedNode(foo=1, bar=2, blah=3)
self.assertEqual("DerivedNode has no field 'blah'",
cm.exception.args[0])
def test_repr(self):
self.assertEqual('Node(foo=1, bar=2)', repr(Node(foo=1, bar=2)))
self.assertEqual('DerivedNode(foo=3, bar=4)',
repr(DerivedNode(foo=3, bar=4)))
self.assertEqual('OtherNode()', repr(OtherNode()))
def test_equality(self):
n1 = Node(foo=1, bar=2)
n2 = Node(foo=1, bar=2)
n3 = Node(foo=1, bar=3)
self.assertTrue(n1 == n1)
self.assertTrue(n1 == n2)
self.assertFalse(n1 == n3)
self.assertFalse(n1 != n1)
self.assertFalse(n1 != n2)
self.assertTrue(n1 != n3)
d = DerivedNode(foo=n1.foo, bar=n1.bar)
self.assertEqual(n1.foo, d.foo)
self.assertEqual(n1.bar, d.bar)
self.assertFalse(n1 == d)
self.assertFalse(d == n1)
self.assertTrue(n1 != d)
self.assertTrue(d != n1)
o1 = OtherNode()
o2 = OtherNode()
self.assertTrue(o1 == o1)
self.assertTrue(o1 == o2)
self.assertFalse(o1 == n1)
self.assertFalse(o1 != o1)
self.assertFalse(o1 != o2)
self.assertTrue(o1 != n1)
| [
"[email protected]"
] | |
22c25bd8ee4081e8a410ce1a2069675d52ed0986 | bf397e60bba27b649084966aee686869c7df595d | /PythonNet/day04/code/fork_getpid_son.py | fcbdab4d16a7bc28a60e6498331d6ef65489308e | [] | no_license | demo112/1807 | 3783e37f7dab3945a3fc857ff8f77f4690012fbe | 9b921c90b3003226d919017d521a32da47e546ad | refs/heads/master | 2022-12-01T10:50:24.086828 | 2018-12-06T09:48:14 | 2018-12-06T09:48:14 | 150,758,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | import os
from time import sleep
pid = os.fork()
if pid < 0:
print('创建进程失败')
elif pid == 0:
sleep(0.1)
print('这是新的进程')
print('Child Get PID:', os.getpid())
print('Child Get Parent PID:', os.getppid())
else:
print("这是原有进程")
print('Parent get PID:', os.getpid())
print('Parent get Child PID:', pid)
print('演示完毕')
| [
"[email protected]"
] | |
57f85335ce9e48a956e6424ac6143b84d050b3d7 | 80e83dd69395312db092f7b0277310a29afb95b6 | /untitled1/doc-To-Excel/ResolveDocx_JieYing-jieying.py | f739c48a9bfdeb957f1633eb3c946ad4da258aae | [] | no_license | yif-zhu/Python-Project | 3a102695a7eab2e149e260ccee955de84685b6cb | d55edb652d66e6694a120eb329cd04abba57ba1e | refs/heads/master | 2023-01-21T00:59:27.743845 | 2020-12-04T08:42:56 | 2020-12-04T08:42:56 | 299,492,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,169 | py | # _*_ coding:utf-8 _*_
import os
import os.path
import sys
import xml.etree.ElementTree as XETree
import xml.etree.ElementTree as ET
from docx import Document
from openpyxl import load_workbook
cdfp = None
cwb = None
clws = None
DATANOTFOUND = 0
writeLog = 0
def arearMapSupInfoToExcelName(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, reportType, dataSource, tableCode, dcolsNames):
sheet = cwb[dSheet]
sList = sCols.split(',')
dList = dCols.split(',')
dNameList = dcolsNames.split(',')
dRowIncress = 0
itemName = 1
while sBeginRow <= sEndRow:
for sCellI in range(len(sList)):
sCol = sList[sCellI]
value = table.rows[sBeginRow].cells[int(sCol)].text
dRow = dBeginRow + dRowIncress
sheet[dList[0] + str(dRow)] = dNameList[sCellI]
sheet[dList[1] + str(dRow)] = itemName
sheet[dList[2] + str(dRow)] = value
sheet[dList[3] + str(dRow)] = reportType
sheet[dList[4] + str(dRow)] = dataSource
sheet[dList[5] + str(dRow)] = tableCode
dRowIncress += 1
sBeginRow += 1
itemName += 1
def arearMapSupInfoToExcel(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, reportType, dataSource, tableCode):
sheet = cwb[dSheet]
sList = sCols.split(',')
dList = dCols.split(',')
dRowIncress = 0
itemName = 1
while sBeginRow <= sEndRow:
for sCellI in range(len(sList)):
sCol = sList[sCellI]
value = table.rows[sBeginRow].cells[int(sCol)].text
dRow = dBeginRow + dRowIncress
sheet[dList[0] + str(dRow)] = 'ItemCode'+ str(sCellI)
sheet[dList[1] + str(dRow)] = itemName
sheet[dList[2] + str(dRow)] = value
sheet[dList[3] + str(dRow)] = reportType
sheet[dList[4] + str(dRow)] = dataSource
sheet[dList[5] + str(dRow)] = tableCode
dRowIncress += 1
sBeginRow += 1
itemName += 1
def arearMapExtractDataToExcel(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols):
sheet = cwb[dSheet]
sList = sCols.split(',')
dList = dCols.split(',')
dRowIncress = 0
while sBeginRow <= sEndRow:
for sCellI in range(len(sList)):
sCol = sList[sCellI]
value = table.rows[sBeginRow].cells[int(sCol)].text
dRow = dBeginRow + dRowIncress
if dSheet == '资产统计信息':
sheet['A' + str(dRow)] = 'ItemCode'+ str(dRowIncress)
if value != '':
sheet[dList[sCellI] + str(dRow)] = value
dRowIncress += 1
sBeginRow += 1
def cellMapExtractDataToExcel(table, dNode, dSheet):
sheet = cwb[dSheet]
for cell in dNode:
cUsing = cell.attrib['using']
cTag = cell.tag
cText = cell.text
if cUsing == 'replace':
r = int(cText.split(',')[0])
c = int(cText.split(',')[1])
v = table.rows[r].cells[c].text.strip()
if v != '':
sheet[cTag] = v
elif cUsing == 'sum':
dcs = cText.split(';')
sumv = 0
for i in range(len(dcs)):
r = int(dcs[i].split(',')[0])
c = int(dcs[i].split(',')[1])
v = table.rows[r].cells[c].text.strip()
v = v.replace(',', '').replace('-', '')
if v != '':
sumv += float(v)
sheet[cTag] = "{0:.2f}".format(sumv)
def arearMapExtract(table, cfgItem, itemIndex):
global DATANOTFOUND
itemDesc = cfgItem.attrib['desc']
sNode = cfgItem.find('source')
dNode = cfgItem.find('dest')
sAnchor = sNode.attrib['anchor'].strip()
sSkipRows = int(sNode.attrib['skiprows']) if 'skiprows' in sNode.attrib else 0
sAnchorEnd = sNode.attrib['anchorend'].strip()
dLimit = int(dNode.attrib['limited']) if 'limited' in dNode.attrib else 0
sAnchorEndArr = sAnchorEnd.split('$')
sBeginRow = -1
sEndRow = -1
for rIndex, row in enumerate(table.rows):
firstCellText = row.cells[0].text.strip()
if firstCellText == '':
continue
if sBeginRow == -1 and (firstCellText.startswith(sAnchor) or firstCellText.endswith(sAnchor)):
sBeginRow = rIndex + sSkipRows + 1;
elif sBeginRow != -1 and sAnchorEnd != '' and (
(sAnchorEnd.find('$') == -1 and firstCellText.startswith(sAnchorEnd)) or (
sAnchorEnd.find('$') != -1 and firstCellText in sAnchorEndArr)):
sEndRow = rIndex if dLimit == 0 or rIndex + 1 - sBeginRow <= dLimit else sBeginRow + dLimit - 1
break
if sBeginRow != -1 and sEndRow == -1:
rowsCount = len(table.rows)
if dLimit == 0 and sAnchorEnd == '':
sEndRow = rowsCount - 1
break
if dLimit != 0 and sAnchorEnd == '':
sEndRow = sBeginRow + dLimit if sBeginRow + dLimit <= rowsCount - 1 else rowsCount - 1
break
if dLimit != 0 and sAnchorEnd != '' and rIndex - sBeginRow == dLimit - 1:
sEndRow = rIndex
break
if sBeginRow != -1 and sEndRow != -1:
sCols = sNode.attrib['cols']
dCols = dNode.attrib['cols']
dSheet = dNode.attrib['sheet']
dBeginRow = int(dNode.attrib['beginrow'])
writeSheetLog('{0} 提取: 【{1}】'.format(itemIndex + 1, itemDesc))
writeSheetLog(
'--------源表格起始行:{0},源表格结束行:{1},目标Sheet[{3}]开始行:{2}'.format(sBeginRow, sEndRow, dBeginRow, dSheet))
if 'type' in cfgItem.attrib:
reportType = dNode.attrib['ReportType']
dataSource = dNode.attrib['DataSource']
tableCode = dNode.attrib['TableCode']
if 'colsNames' in sNode.attrib:
dcolsNames = sNode.attrib['colsNames']
arearMapSupInfoToExcelName(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, reportType, dataSource, tableCode, dcolsNames)
else:
arearMapSupInfoToExcel(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, reportType, dataSource, tableCode)
else:
arearMapExtractDataToExcel(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols)
writeSheetLog('--------【{0}】数据已提取完成'.format(itemDesc))
if writeLog == 0:
cwb.save(cdfp)
if sBeginRow == -1 and sEndRow == -1:
DATANOTFOUND += 1
writeSheetLog('{1} 【{0}】数据未找到,请检查源文件和配置文件'.format(itemDesc, itemIndex))
def arearMapExtractTable(tables, cfgItem, itemIndex):
global DATANOTFOUND
itemDesc = cfgItem.attrib['desc']
sNode = cfgItem.find('source')
dNode = cfgItem.find('dest')
sAnchor = sNode.attrib['anchor'].strip()
sSkipRows = int(sNode.attrib['skiprows']) if 'skiprows' in sNode.attrib else 0
sAnchorEnd = sNode.attrib['anchorend'].strip()
dLimit = int(dNode.attrib['limited']) if 'limited' in dNode.attrib else 0
sAnchorEndArr = sAnchorEnd.split('$')
sBeginRow = -1
sEndRow = -1
index = int(sNode.attrib['index'].strip())
for tbIndex, table in enumerate(tables):
if tbIndex >= index or index == -1:
for rIndex, row in enumerate(table.rows):
firstCellText = row.cells[0].text.strip()
if firstCellText == '':
continue
if sBeginRow == -1 and (firstCellText.startswith(sAnchor) or firstCellText.endswith(sAnchor)):
sBeginRow = rIndex + sSkipRows + 1;
elif sBeginRow != -1 and sAnchorEnd != '' and (
(sAnchorEnd.find('$') == -1 and firstCellText.startswith(sAnchorEnd)) or (
sAnchorEnd.find('$') != -1 and firstCellText in sAnchorEndArr)):
sEndRow = rIndex if dLimit == 0 or rIndex + 1 - sBeginRow <= dLimit else sBeginRow + dLimit - 1
break
if sBeginRow != -1 and sEndRow == -1:
rowsCount = len(table.rows)
if dLimit == 0 and sAnchorEnd == '':
sEndRow = rowsCount - 1
break
if dLimit != 0 and sAnchorEnd == '':
sEndRow = sBeginRow + dLimit if sBeginRow + dLimit <= rowsCount - 1 else rowsCount - 1
break
if dLimit != 0 and sAnchorEnd != '' and rIndex - sBeginRow == dLimit - 1:
sEndRow = rIndex
break
if sBeginRow != -1 and sEndRow != -1:
sCols = sNode.attrib['cols']
dCols = dNode.attrib['cols']
dSheet = dNode.attrib['sheet']
dBeginRow = int(dNode.attrib['beginrow'])
writeSheetLog('{0} 提取: 【{1}】'.format(itemIndex + 1, itemDesc))
writeSheetLog(
'--------源表格起始行:{0},源表格结束行:{1},目标Sheet[{3}]开始行:{2}'.format(sBeginRow, sEndRow, dBeginRow, dSheet))
if 'type' in cfgItem.attrib:
reportType = dNode.attrib['ReportType']
dataSource = dNode.attrib['DataSource']
tableCode = dNode.attrib['TableCode']
if 'colsNames' in sNode.attrib:
dcolsNames = sNode.attrib['colsNames']
arearMapSupInfoToExcelName(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, reportType, dataSource, tableCode, dcolsNames)
else:
arearMapSupInfoToExcel(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, reportType, dataSource, tableCode)
else:
arearMapExtractDataToExcel(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols)
writeSheetLog('--------【{0}】数据已提取完成'.format(itemDesc))
if writeLog == 0:
cwb.save(cdfp)
break
if sBeginRow == -1 and sEndRow == -1:
DATANOTFOUND += 1
writeSheetLog('{1} 【{0}】数据未找到,请检查源文件和配置文件'.format(itemDesc, itemIndex))
def cellMapExtract(tables, cfgItem, itemIndex):
global DATANOTFOUND
itemDesc = cfgItem.attrib['desc']
sNode = cfgItem.find('source')
dNode = cfgItem.find('dest')
foundTable = 0
sAnchor = sNode.attrib['anchor'].strip()
index = int(sNode.attrib['index'].strip())
for tbIndex, table in enumerate(tables):
if tbIndex >= index or index == -1:
for rIndex, row in enumerate(table.rows):
firstCellText = row.cells[0].text.strip()
if firstCellText == '' or firstCellText != sAnchor:
continue
if firstCellText == sAnchor:
foundTable = 1
break
if foundTable == 1:
dSheet = dNode.attrib['sheet']
writeSheetLog('{0} 提取: 【{1}】'.format(itemIndex + 1, itemDesc))
writeSheetLog('--------开始表格映射映射数据提取')
cellMapExtractDataToExcel(table, dNode, dSheet)
writeSheetLog('--------【{0}】数据已提取完成'.format(itemDesc))
if writeLog == 0:
cwb.save(cdfp)
break
if foundTable == 0:
DATANOTFOUND += 1
writeSheetLog('\033[1;31m {1} 【{0}】数据未找到,请检查源文件和配置文件 \033[0m!'.format(itemDesc, itemIndex + 1))
def extractDocFile(cfgItems, sourceFilePath):
doc = Document(sourceFilePath)
tables = doc.tables
for i in range(len(cfgItems)):
cfgItem = cfgItems[i]
if 'useTableName' not in cfgItem.attrib:
if 'type' in cfgItem.attrib:
itemType = cfgItem.attrib['type']
if itemType == 'cellmap':
cellMapExtract(tables, cfgItem, i)
elif itemType == 'supInfo':
arearMapExtractTable(tables, cfgItem, i)
else:
arearMapExtractTable(tables, cfgItem, i)
else:
if 'type' in cfgItem.attrib:
itemType = cfgItem.attrib['type']
if itemType == 'cellmap':
findTableName(tables, cfgItem, i, 1)
elif itemType == 'supInfo':
findTableName(tables, cfgItem, i, 0)
else:
findTableName(tables, cfgItem, i, 0)
def writeSheetLog(info):
if writeLog == 1 and clws is not None:
clws['A' + str(clws.max_row + 1)] = info
cwb.save(cdfp)
def findTableName(tables, cfgItem, index, typeId):
sNode = cfgItem.find('source')
dNode = cfgItem.find('dest')
itemDesc = cfgItem.attrib['desc']
tableName = sNode.attrib['tableName'].strip()
for tbIndex, table in enumerate(tables):
xml = table._tblPr.xml
root_elem = ET.fromstring(xml)
for ch in root_elem:
key = ch.tag.split('}')[1]
if key == 'tblCaption':
titleName = str(list(ch.attrib.values())).split('\'')[1]
if titleName == tableName:
if typeId == 1:
dSheet = dNode.attrib['sheet']
writeSheetLog('{0} 提取: 【{1}】'.format(tbIndex + 1, itemDesc))
writeSheetLog('--------开始表格映射映射数据提取')
cellMapExtractDataToExcel(table, dNode, dSheet)
writeSheetLog('--------【{0}】数据已提取完成'.format(itemDesc))
if writeLog == 0:
cwb.save(cdfp)
break
elif typeId == 0:
arearMapExtract(table, cfgItem, index)
for rIndex, row in enumerate(table.rows):
for cell in row.cells:
if len(cell.tables) > 0:
findTableName(cell.tables, cfgItem,index, typeId)
def main():
global DATANOTFOUND
global cdfp
global clws
global cwb
global writeLog
reload(sys)
sys.setdefaultencoding('utf-8')
sourceFilePath = sys.argv[1]
destFileName =sys.argv[3]
configFilePath = sys.argv[2]
mappingTree = XETree.parse(configFilePath)
cfgRoot = mappingTree.getroot()
destFolder = cfgRoot.attrib['destfolder']
templateFilePath = cfgRoot.attrib['template']
writeLog = int(cfgRoot.attrib['writelog']) if 'writelog' in cfgRoot.attrib else 0
cdfp = os.path.join(destFolder, destFileName)
if not os.path.exists(destFolder):
os.makedirs(destFolder)
if os.path.exists(cdfp):
os.remove(cdfp)
open(cdfp, "wb").write(open(templateFilePath, "rb").read())
cwb = load_workbook(cdfp)
if writeLog == 1:
clws = cwb.create_sheet("Extract Log")
cwb.save(cdfp)
extractDocFile(cfgRoot, sourceFilePath)
main()
| [
"[email protected]"
] | |
c52935dd962d4d9ac6a315d564c60e248c2169ad | d3cc9db967b05c740db85ed31358701434900aaa | /code/python/caffe/draw.py | ae7ec76eb5f7e3484ec332370e47be1f48cc80ab | [
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] | permissive | minar09/LIP-SSL-Caffe | 371134b4aaadae3371dbbe450fc9a44afa368b53 | 089d437844a7d15352199b55bf80e429f1d63e4a | refs/heads/master | 2020-04-13T10:43:27.677741 | 2019-01-26T14:10:23 | 2019-01-26T14:10:23 | 163,150,964 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,663 | py | """
Caffe network visualization: draw the NetParameter protobuffer.
.. note::
This requires pydot>=1.0.2, which is not included in requirements.txt since
it requires graphviz and other prerequisites outside the scope of the
Caffe.
"""
from caffe.proto import caffe_pb2
"""
pydot is not supported under python 3 and pydot2 doesn't work properly.
pydotplus works nicely (pip install pydotplus)
"""
try:
# Try to load pydotplus
import pydotplus as pydot
except ImportError:
import pydot
# Internal layer and blob styles.
LAYER_STYLE_DEFAULT = {'shape': 'record',
'fillcolor': '#6495ED',
'style': 'filled'}
NEURON_LAYER_STYLE = {'shape': 'record',
'fillcolor': '#90EE90',
'style': 'filled'}
BLOB_STYLE = {'shape': 'octagon',
'fillcolor': '#E0E0E0',
'style': 'filled'}
def get_pooling_types_dict():
"""Get dictionary mapping pooling type number to type name
"""
desc = caffe_pb2.PoolingParameter.PoolMethod.DESCRIPTOR
d = {}
for k, v in desc.values_by_name.items():
d[v.number] = k
return d
def get_edge_label(layer):
"""Define edge label based on layer type.
"""
if layer.type == 'Data':
edge_label = 'Batch ' + str(layer.data_param.batch_size)
elif layer.type == 'Convolution' or layer.type == 'Deconvolution':
edge_label = str(layer.convolution_param.num_output)
elif layer.type == 'InnerProduct':
edge_label = str(layer.inner_product_param.num_output)
else:
edge_label = '""'
return edge_label
def get_layer_label(layer, rankdir):
"""Define node label based on layer type.
Parameters
----------
layer : ?
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
Returns
-------
string :
A label for the current layer
"""
if rankdir in ('TB', 'BT'):
# If graph orientation is vertical, horizontal space is free and
# vertical space is not; separate words with spaces
separator = ' '
else:
# If graph orientation is horizontal, vertical space is free and
# horizontal space is not; separate words with newlines
separator = '\\n'
if layer.type == 'Convolution' or layer.type == 'Deconvolution':
# Outer double quotes needed or else colon characters don't parse
# properly
node_label = '"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
layer.type,
separator,
layer.convolution_param.kernel_size[0] if len(
layer.convolution_param.kernel_size._values) else 1,
separator,
layer.convolution_param.stride[0] if len(
layer.convolution_param.stride._values) else 1,
separator,
layer.convolution_param.pad[0] if len(layer.convolution_param.pad._values) else 0)
elif layer.type == 'Pooling':
pooling_types_dict = get_pooling_types_dict()
node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
pooling_types_dict[layer.pooling_param.pool],
layer.type,
separator,
layer.pooling_param.kernel_size,
separator,
layer.pooling_param.stride,
separator,
layer.pooling_param.pad)
else:
node_label = '"%s%s(%s)"' % (layer.name, separator, layer.type)
return node_label
def choose_color_by_layertype(layertype):
"""Define colors for nodes based on the layer type.
"""
color = '#6495ED' # Default
if layertype == 'Convolution' or layertype == 'Deconvolution':
color = '#FF5050'
elif layertype == 'Pooling':
color = '#FF9900'
elif layertype == 'InnerProduct':
color = '#CC33FF'
return color
def get_pydot_graph(caffe_net, rankdir, label_edges=True):
"""Create a data structure which represents the `caffe_net`.
Parameters
----------
caffe_net : object
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
label_edges : boolean, optional
Label the edges (default is True).
Returns
-------
pydot graph object
"""
pydot_graph = pydot.Dot(caffe_net.name,
graph_type='digraph',
rankdir=rankdir)
pydot_nodes = {}
pydot_edges = []
for layer in caffe_net.layer:
node_label = get_layer_label(layer, rankdir)
node_name = "%s_%s" % (layer.name, layer.type)
if (len(layer.bottom) == 1 and len(layer.top) == 1 and
layer.bottom[0] == layer.top[0]):
# We have an in-place neuron layer.
pydot_nodes[node_name] = pydot.Node(node_label,
**NEURON_LAYER_STYLE)
else:
layer_style = LAYER_STYLE_DEFAULT
layer_style['fillcolor'] = choose_color_by_layertype(layer.type)
pydot_nodes[node_name] = pydot.Node(node_label, **layer_style)
for bottom_blob in layer.bottom:
pydot_nodes[bottom_blob + '_blob'] = pydot.Node('%s' % bottom_blob,
**BLOB_STYLE)
edge_label = '""'
pydot_edges.append({'src': bottom_blob + '_blob',
'dst': node_name,
'label': edge_label})
for top_blob in layer.top:
pydot_nodes[top_blob + '_blob'] = pydot.Node('%s' % (top_blob))
if label_edges:
edge_label = get_edge_label(layer)
else:
edge_label = '""'
pydot_edges.append({'src': node_name,
'dst': top_blob + '_blob',
'label': edge_label})
# Now, add the nodes and edges to the graph.
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edge in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edge['src']],
pydot_nodes[edge['dst']],
label=edge['label']))
return pydot_graph
def draw_net(caffe_net, rankdir, ext='png'):
"""Draws a caffe net and returns the image string encoded using the given
extension.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
ext : string, optional
The image extension (the default is 'png').
Returns
-------
string :
Postscript representation of the graph.
"""
return get_pydot_graph(caffe_net, rankdir).create(format=ext)
def draw_net_to_file(caffe_net, filename, rankdir='LR'):
"""Draws a caffe net, and saves it to file using the format given as the
file extension. Use '.raw' to output raw text that you can manually feed
to graphviz to draw graphs.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
filename : string
The path to a file where the networks visualization will be stored.
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
"""
ext = filename[filename.rfind('.')+1:]
with open(filename, 'wb') as fid:
fid.write(draw_net(caffe_net, rankdir, ext))
| [
"[email protected]"
] | |
c2b29616b93803713a5bd6a6203584e68b80e826 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02623/s631394286.py | e65dea6135c3f5910ed9740aa8f22592b2ef34ca | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | import sys
input = sys.stdin.readline
n, m, k = list(map(int, input().split()))
a = list(map(int, input().split()))
b = list(map(int, input().split()))
#Aだけのときのmax
s = 0
j = 0
for i in range(n):
if s + a[i] > k:
break
else:
s += a[i]
j += 1
ans = [j]
for l in range(m):
flag = True
s += b[l]
while s > k:
s -= a[j-1]
j -= 1
if j < 0:
flag = False
break
if not flag:
break
else:
ans.append(l + 1 + j)
#print(ans)
print(max(ans))
| [
"[email protected]"
] | |
7e78e0140b8c52cfeba359b20b98b72a72a0dcc1 | 60aa3bcf5ace0282210685e74ee8ed31debe1769 | /simulation/statistics/skill.py | cbea355ed3f7b1736b74d746ce506c074419def3 | [] | no_license | TheBreadGuy/sims4-ai-engine | 42afc79b8c02527353cc084117a4b8da900ebdb4 | 865212e841c716dc4364e0dba286f02af8d716e8 | refs/heads/master | 2023-03-16T00:57:45.672706 | 2016-05-01T17:26:01 | 2016-05-01T17:26:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,258 | py | from protocolbuffers import Commodities_pb2
import collections
import operator
from event_testing import test_events
from event_testing.resolver import SingleSimResolver
from sims import sim_info_types
from sims4.localization import TunableLocalizedString
from sims4.math import Threshold
from sims4.tuning.dynamic_enum import DynamicEnum
from sims4.tuning.geometric import TunableVector2, TunableCurve
from sims4.tuning.instances import HashedTunedInstanceMetaclass
from sims4.tuning.tunable import Tunable, TunableList, TunableEnumEntry, TunableMapping, TunableEntitlement, TunableSet, TunableResourceKey, TunableTuple, OptionalTunable, TunableInterval, TunableReference, TunableRange, HasTunableReference
from sims4.tuning.tunable_base import ExportModes, GroupNames
from sims4.utils import classproperty
from singletons import DEFAULT
from statistics.base_statistic import StatisticChangeDirection
from statistics.tunable import TunableStatAsmParam
from ui.ui_dialog import UiDialogResponse
from ui.ui_dialog_notification import UiDialogNotification
import caches
import enum
import gsi_handlers.sim_handlers_log
import mtx
import services.social_service
import sims4.log
import statistics.continuous_statistic_tuning
import tag
import telemetry_helper
import ui.screen_slam
logger = sims4.log.Logger('Skills')
TELEMETRY_GROUP_SKILLS = 'SKIL'
TELEMETRY_HOOK_SKILL_LEVEL_UP = 'SKLU'
TELEMETRY_HOOK_SKILL_INTERACTION = 'SKIA'
TELEMETRY_HOOK_SKILL_INTERACTION_FIRST_TIME = 'SKIF'
TELEMETRY_FIELD_SKILL_ID = 'skid'
TELEMETRY_FIELD_SKILL_LEVEL = 'sklv'
TELEMETRY_FIELD_SKILL_AFFORDANCE = 'skaf'
TELEMETRY_FIELD_SKILL_AFFORDANCE_SUCCESS = 'safs'
TELEMETRY_FIELD_SKILL_AFFORDANCE_VALUE_ADD = 'safv'
TELEMETRY_INTERACTION_NOT_AVAILABLE = 'not_available'
skill_telemetry_writer = sims4.telemetry.TelemetryWriter(TELEMETRY_GROUP_SKILLS)
class SkillLevelType(enum.Int):
__qualname__ = 'SkillLevelType'
MAJOR = 0
MINOR = 1
CHILD = 2
TEEN = 3
class SkillEffectiveness(DynamicEnum):
__qualname__ = 'SkillEffectiveness'
STANDARD = 0
class TunableSkillMultiplier(TunableTuple):
__qualname__ = 'TunableSkillMultiplier'
def __init__(self, **kwargs):
super().__init__(affordance_list=TunableList(description='\n List of affordances this multiplier will effect.\n ', tunable=TunableReference(manager=services.affordance_manager(), reload_dependent=True)), curve=TunableCurve(description='\n Tunable curve where the X-axis defines the skill level, and\n the Y-axis defines the associated multiplier.\n ', x_axis_name='Skill Level', y_axis_name='Multiplier'), use_effective_skill=Tunable(description='\n If checked, this modifier will look at the current\n effective skill value. If unchecked, this modifier will\n look at the actual skill value.\n ', tunable_type=bool, needs_tuning=True, default=True), **kwargs)
class Skill(HasTunableReference, statistics.continuous_statistic_tuning.TunedContinuousStatistic, metaclass=HashedTunedInstanceMetaclass, manager=services.get_instance_manager(sims4.resources.Types.STATISTIC)):
__qualname__ = 'Skill'
SKILL_LEVEL_LIST = TunableMapping(key_type=TunableEnumEntry(SkillLevelType, SkillLevelType.MAJOR), value_type=TunableList(Tunable(int, 0), description='The level boundaries for skill type, specified as a delta from the previous value'), export_modes=ExportModes.All)
SKILL_EFFECTIVENESS_GAIN = TunableMapping(key_type=TunableEnumEntry(SkillEffectiveness, SkillEffectiveness.STANDARD), value_type=TunableCurve(), description='Skill gain points based on skill effectiveness.')
DYNAMIC_SKILL_INTERVAL = TunableRange(description='\n Interval used when dynamic loot is used in a\n PeriodicStatisticChangeElement.\n ', tunable_type=float, default=1, minimum=1)
INSTANCE_TUNABLES = {'stat_name': TunableLocalizedString(description='\n Localized name of this Statistic\n ', export_modes=ExportModes.All), 'ad_data': TunableList(description='\n A list of Vector2 points that define the desire curve for this\n commodity.\n ', tunable=TunableVector2(description='\n Point on a Curve\n ', default=sims4.math.Vector2(0, 0))), 'weight': Tunable(description="\n The weight of the Skill with regards to autonomy. It's ignored \n for the purposes of sorting stats, but it's applied when scoring \n the actual statistic operation for the SI.\n ", tunable_type=float, default=0.5), 'skill_level_type': TunableEnumEntry(description='\n Skill level list to use.\n ', tunable_type=SkillLevelType, default=SkillLevelType.MAJOR, export_modes=ExportModes.All), 'locked_description': TunableLocalizedString(description="\n The skill description when it's locked.\n ", export_modes=ExportModes.All), 'skill_description': TunableLocalizedString(description="\n The skill's normal description.\n ", export_modes=ExportModes.All), 'is_default': Tunable(description='\n Whether Sim will default has this skill.\n ', tunable_type=bool, default=False), 'genders': TunableSet(description='\n Skill allowed gender, empty set means not specified\n ', tunable=TunableEnumEntry(tunable_type=sim_info_types.Gender, default=None, export_modes=ExportModes.All)), 'ages': TunableSet(description='\n Skill allowed ages, empty set means not specified\n ', tunable=TunableEnumEntry(tunable_type=sim_info_types.Age, default=None, export_modes=ExportModes.All)), 'entitlement': TunableEntitlement(description='\n Entitlement required to use this skill.\n '), 'icon': TunableResourceKey(description='\n Icon to be displayed for the Skill.\n ', default='PNG:missing_image', resource_types=sims4.resources.CompoundTypes.IMAGE, export_modes=ExportModes.All), 'tags': TunableList(description='\n The associated categories of the skill\n ', tunable=TunableEnumEntry(tunable_type=tag.Tag, default=tag.Tag.INVALID)), 'priority': Tunable(description='\n Skill priority. Higher priority skill will trump other skills when\n being displayed on the UI side. When a sim gains multiple skills at\n the same time only the highest priority one will display a progress\n bar over its head.\n ', tunable_type=int, default=1, export_modes=ExportModes.All), 'statistic_multipliers': TunableMapping(description='\n Multipliers this skill applies to other statistics based on its\n value.\n ', key_type=TunableReference(description='\n The statistic this multiplier will be applied to.\n ', manager=services.statistic_manager(), reload_dependent=True), value_type=TunableTuple(curve=TunableCurve(description='\n Tunable curve where the X-axis defines the skill level, and\n the Y-axis defines the associated multiplier.\n ', x_axis_name='Skill Level', y_axis_name='Multiplier'), direction=TunableEnumEntry(description="\n Direction where the multiplier should work on the\n statistic. For example, a tuned decrease for an object's\n brokenness rate will not also increase the time it takes to\n repair it.\n ", tunable_type=StatisticChangeDirection, default=StatisticChangeDirection.INCREASE), use_effective_skill=Tunable(description='\n If checked, this modifier will look at the current\n effective skill value. If unchecked, this modifier will\n look at the actual skill value.\n ', tunable_type=bool, needs_tuning=True, default=True)), tuning_group=GroupNames.MULTIPLIERS), 'success_chance_multipliers': TunableList(description='\n Multipliers this skill applies to the success chance of\n affordances.\n ', tunable=TunableSkillMultiplier(), tuning_group=GroupNames.MULTIPLIERS), 'monetary_payout_multipliers': TunableList(description='\n Multipliers this skill applies to the monetary payout amount of\n affordances.\n ', tunable=TunableSkillMultiplier(), tuning_group=GroupNames.MULTIPLIERS), 'next_level_teaser': TunableList(description='\n Tooltip which describes what the next level entails.\n ', tunable=TunableLocalizedString(), export_modes=(ExportModes.ClientBinary,)), 'level_data': TunableMapping(description='\n Level-specific information, such as notifications to be displayed to\n level up.\n ', key_type=int, value_type=TunableTuple(level_up_notification=UiDialogNotification.TunableFactory(description='\n The notification to display when the Sim obtains this level.\n The text will be provided two tokens: the Sim owning the\n skill and a number representing the 1-based skill level\n ', locked_args={'text_tokens': DEFAULT, 'icon': None, 'primary_icon_response': UiDialogResponse(text=None, ui_request=UiDialogResponse.UiDialogUiRequest.SHOW_SKILL_PANEL), 'secondary_icon': None}), level_up_screen_slam=OptionalTunable(description='\n Screen slam to show when reaches this skill level.\n Localization Tokens: Sim - {0.SimFirstName}, Skill Name - \n {1.String}, Skill Number - {2.Number}\n ', tunable=ui.screen_slam.TunableScreenSlamSnippet(), tuning_group=GroupNames.UI))), 'mood_id': TunableReference(description='\n When this mood is set and active sim matches mood, the UI will \n display a special effect on the skill bar to represent that this \n skill is getting a bonus because of the mood.\n ', manager=services.mood_manager(), export_modes=ExportModes.All), 'stat_asm_param': TunableStatAsmParam.TunableFactory(), 'tutorial': TunableReference(description='\n Tutorial instance for this skill. This will be used to bring up the \n skill lesson from the first notification for Sim to know this skill.\n ', manager=services.get_instance_manager(sims4.resources.Types.TUTORIAL), class_restrictions=('Tutorial',)), 'skill_unlocks_on_max': TunableList(description='\n A list of skills that become unlocked when this skill is maxed.\n ', tunable=TunableReference(description='\n A skill to unlock.\n ', manager=services.get_instance_manager(sims4.resources.Types.STATISTIC), class_restrictions=('Skill',)))}
REMOVE_INSTANCE_TUNABLES = ('min_value_tuning', 'max_value_tuning', 'decay_rate', '_default_convergence_value')
def __init__(self, tracker):
super().__init__(tracker, self.initial_value)
self._delta_enabled = True
self._callback_handle = None
if self.tracker.owner.is_simulating:
self.on_initial_startup()
self._max_level_update_sent = False
def on_initial_startup(self):
if self.tracker.owner.is_selectable:
self.refresh_level_up_callback()
def on_remove(self, on_destroy=False):
super().on_remove(on_destroy=on_destroy)
self._destory_callback_handle()
def _apply_multipliers_to_continuous_statistics(self):
for stat in self.statistic_multipliers:
while stat.continuous:
owner_stat = self.tracker.get_statistic(stat)
if owner_stat is not None:
owner_stat._recalculate_modified_decay_rate()
@caches.cached
def get_user_value(self):
return super(Skill, self).get_user_value()
def set_value(self, value, *args, from_load=False, interaction=None, **kwargs):
old_value = self.get_value()
super().set_value(value, *args, **kwargs)
self.get_user_value.cache.clear()
if not from_load:
new_value = self.get_value()
new_level = self.convert_to_user_value(value)
if old_value == self.initial_value and old_value != new_value:
sim_info = self._tracker._owner
services.get_event_manager().process_event(test_events.TestEvent.SkillLevelChange, sim_info=sim_info, statistic=self.stat_type)
old_level = self.convert_to_user_value(old_value)
if old_level < new_level:
self._apply_multipliers_to_continuous_statistics()
def add_value(self, add_amount, interaction=None, **kwargs):
old_value = self.get_value()
if old_value == self.initial_value:
telemhook = TELEMETRY_HOOK_SKILL_INTERACTION_FIRST_TIME
else:
telemhook = TELEMETRY_HOOK_SKILL_INTERACTION
super().add_value(add_amount, interaction=interaction)
self.get_user_value.cache.clear()
if interaction is not None:
self.on_skill_updated(telemhook, old_value, self.get_value(), interaction.affordance.__name__)
def _update_value(self):
old_value = self._value
if gsi_handlers.sim_handlers_log.skill_change_archiver.enabled:
last_update = self._last_update
time_delta = super()._update_value()
self.get_user_value.cache.clear()
new_value = self._value
if old_value == self.initial_value:
telemhook = TELEMETRY_HOOK_SKILL_INTERACTION_FIRST_TIME
self.on_skill_updated(telemhook, old_value, new_value, TELEMETRY_INTERACTION_NOT_AVAILABLE)
sim_info = self._tracker._owner
services.get_event_manager().process_event(test_events.TestEvent.SkillLevelChange, sim_info=sim_info, statistic=self.stat_type)
old_level = self.convert_to_user_value(old_value)
new_level = self.convert_to_user_value(new_value)
if gsi_handlers.sim_handlers_log.skill_change_archiver.enabled and self.tracker.owner.is_sim:
gsi_handlers.sim_handlers_log.archive_skill_change(self.tracker.owner, self, time_delta, old_value, new_value, new_level, last_update)
if old_value < new_value and old_level < new_level:
if self._tracker is not None:
self._tracker.notify_watchers(self.stat_type, self._value, self._value)
def on_skill_updated(self, telemhook, old_value, new_value, affordance_name):
owner_sim = self._tracker._owner
if owner_sim.is_selectable:
with telemetry_helper.begin_hook(skill_telemetry_writer, telemhook, sim=owner_sim) as hook:
hook.write_guid(TELEMETRY_FIELD_SKILL_ID, self.guid64)
hook.write_string(TELEMETRY_FIELD_SKILL_AFFORDANCE, affordance_name)
hook.write_bool(TELEMETRY_FIELD_SKILL_AFFORDANCE_SUCCESS, True)
hook.write_int(TELEMETRY_FIELD_SKILL_AFFORDANCE_VALUE_ADD, new_value - old_value)
if old_value == self.initial_value:
skill_level = self.convert_to_user_value(old_value)
self._show_level_notification(skill_level)
def _destory_callback_handle(self):
if self._callback_handle is not None:
self.remove_callback(self._callback_handle)
self._callback_handle = None
def refresh_level_up_callback(self):
self._destory_callback_handle()
def _on_level_up_callback(stat_inst):
new_level = stat_inst.get_user_value()
old_level = new_level - 1
stat_inst.on_skill_level_up(old_level, new_level)
stat_inst.refresh_level_up_callback()
self._callback_handle = self.add_callback(Threshold(self._get_next_level_bound(), operator.ge), _on_level_up_callback)
def on_skill_level_up(self, old_level, new_level):
tracker = self.tracker
sim_info = tracker._owner
if self.reached_max_level:
for skill in self.skill_unlocks_on_max:
skill_instance = tracker.add_statistic(skill, force_add=True)
skill_instance.set_value(skill.initial_value)
with telemetry_helper.begin_hook(skill_telemetry_writer, TELEMETRY_HOOK_SKILL_LEVEL_UP, sim=sim_info) as hook:
hook.write_guid(TELEMETRY_FIELD_SKILL_ID, self.guid64)
hook.write_int(TELEMETRY_FIELD_SKILL_LEVEL, new_level)
if sim_info.account is not None:
services.social_service.post_skill_message(sim_info, self, old_level, new_level)
self._show_level_notification(new_level)
services.get_event_manager().process_event(test_events.TestEvent.SkillLevelChange, sim_info=sim_info, statistic=self.stat_type)
def _show_level_notification(self, skill_level):
sim_info = self._tracker._owner
if not sim_info.is_npc:
level_data = self.level_data.get(skill_level)
if level_data is not None:
tutorial_id = None
if self.tutorial is not None and skill_level == 1:
tutorial_id = self.tutorial.guid64
notification = level_data.level_up_notification(sim_info, resolver=SingleSimResolver(sim_info))
notification.show_dialog(icon_override=(self.icon, None), secondary_icon_override=(None, sim_info), additional_tokens=(skill_level,), tutorial_id=tutorial_id)
if level_data.level_up_screen_slam is not None:
level_data.level_up_screen_slam.send_screen_slam_message(sim_info, sim_info, self.stat_name, skill_level)
@classproperty
def skill_type(cls):
return cls
@classproperty
def remove_on_convergence(cls):
return False
@classmethod
def can_add(cls, owner, force_add=False, **kwargs):
if force_add:
return True
if cls.genders and owner.gender not in cls.genders:
return False
if cls.ages and owner.age not in cls.ages:
return False
if cls.entitlement is None:
return True
if owner.is_npc:
return False
return mtx.has_entitlement(cls.entitlement)
@classmethod
def get_level_list(cls):
return cls.SKILL_LEVEL_LIST.get(cls.skill_level_type)
@classmethod
def get_max_skill_value(cls):
level_list = cls.get_level_list()
return sum(level_list)
@classmethod
def get_skill_value_for_level(cls, level):
level_list = cls.get_level_list()
if level > len(level_list):
logger.error('Level {} out of bounds', level)
return 0
return sum(level_list[:level])
@classmethod
def get_skill_effectiveness_points_gain(cls, effectiveness_level, level):
skill_gain_curve = cls.SKILL_EFFECTIVENESS_GAIN.get(effectiveness_level)
if skill_gain_curve is not None:
return skill_gain_curve.get(level)
logger.error('{} does not exist in SKILL_EFFECTIVENESS_GAIN mapping', effectiveness_level)
return 0
@classmethod
def _tuning_loaded_callback(cls):
super()._tuning_loaded_callback()
level_list = cls.get_level_list()
cls.max_level = len(level_list)
cls.min_value_tuning = 0
cls.max_value_tuning = sum(level_list)
cls._default_convergence_value = cls.min_value_tuning
cls._build_utility_curve_from_tuning_data(cls.ad_data)
for stat in cls.statistic_multipliers:
multiplier = cls.statistic_multipliers[stat]
curve = multiplier.curve
direction = multiplier.direction
use_effective_skill = multiplier.use_effective_skill
stat.add_skill_based_statistic_multiplier(cls, curve, direction, use_effective_skill)
for multiplier in cls.success_chance_multipliers:
curve = multiplier.curve
use_effective_skill = multiplier.use_effective_skill
for affordance in multiplier.affordance_list:
affordance.add_skill_multiplier(affordance.success_chance_multipliers, cls, curve, use_effective_skill)
for multiplier in cls.monetary_payout_multipliers:
curve = multiplier.curve
use_effective_skill = multiplier.use_effective_skill
for affordance in multiplier.affordance_list:
affordance.add_skill_multiplier(affordance.monetary_payout_multipliers, cls, curve, use_effective_skill)
@classmethod
def _verify_tuning_callback(cls):
success_multiplier_affordances = []
for multiplier in cls.success_chance_multipliers:
success_multiplier_affordances.extend(multiplier.affordance_list)
if len(success_multiplier_affordances) != len(set(success_multiplier_affordances)):
logger.error("The same affordance has been tuned more than once under {}'s success multipliers, and they will overwrite each other. Please fix in tuning.", cls, owner='tastle')
monetary_payout_multiplier_affordances = []
for multiplier in cls.monetary_payout_multipliers:
monetary_payout_multiplier_affordances.extend(multiplier.affordance_list)
if len(monetary_payout_multiplier_affordances) != len(set(monetary_payout_multiplier_affordances)):
logger.error("The same affordance has been tuned more than once under {}'s monetary payout multipliers, and they will overwrite each other. Please fix in tuning.", cls, owner='tastle')
@classmethod
def convert_to_user_value(cls, value):
if not cls.get_level_list():
return 0
current_value = value
for (level, level_threshold) in enumerate(cls.get_level_list()):
current_value -= level_threshold
while current_value < 0:
return level
return level + 1
@classmethod
def convert_from_user_value(cls, user_value):
(level_min, _) = cls._get_level_bounds(user_value)
return level_min
@classmethod
def _get_level_bounds(cls, level):
level_list = cls.get_level_list()
level_min = sum(level_list[:level])
if level < cls.max_level:
level_max = sum(level_list[:level + 1])
else:
level_max = sum(level_list)
return (level_min, level_max)
def _get_next_level_bound(self):
level = self.convert_to_user_value(self._value)
(_, level_max) = self._get_level_bounds(level)
return level_max
@property
def reached_max_level(self):
max_value = self.get_max_skill_value()
if self.get_value() >= max_value:
return True
return False
@property
def should_send_update(self):
if not self.reached_max_level:
return True
if not self._max_level_update_sent:
self._max_level_update_sent = True
return True
return False
@classproperty
def is_skill(cls):
return True
@classproperty
def autonomy_weight(cls):
return cls.weight
@classmethod
def create_skill_update_msg(cls, sim_id, stat_value):
if not cls.convert_to_user_value(stat_value) > 0:
return
skill_msg = Commodities_pb2.Skill_Update()
skill_msg.skill_id = cls.guid64
skill_msg.curr_points = int(stat_value)
skill_msg.sim_id = sim_id
return skill_msg
@property
def is_initial_value(self):
return self.initial_value == self.get_value()
@classproperty
def valid_for_stat_testing(cls):
return True
_SkillLootData = collections.namedtuple('_SkillLootData', ['level_range', 'stat', 'effectiveness'])
EMPTY_SKILL_LOOT_DATA = _SkillLootData(None, None, None)
class TunableSkillLootData(TunableTuple):
__qualname__ = 'TunableSkillLootData'
def __init__(self, **kwargs):
super().__init__(level_range=OptionalTunable(TunableInterval(description="\n Interval is used to clamp the sim's user facing\n skill level to determine how many point to give. If\n disabled, level passed to the dynamic skill loot\n will always be the current user facing skill level\n of sim. \n Example: if sim is level 7 in fitness but\n interaction skill level is only for 1 to 5 give the\n dynamic skill amount as if sim is level 5.\n ", tunable_type=int, default_lower=0, default_upper=1, minimum=0)), stat=TunableReference(description='\n The statistic we are operating on.\n ', manager=services.get_instance_manager(sims4.resources.Types.STATISTIC), class_restrictions=Skill), effectiveness=TunableEnumEntry(description='\n Enum to determine which curve to use when giving\n points to sim.\n ', tunable_type=SkillEffectiveness, needs_tuning=True, default=None), **kwargs)
| [
"[email protected]"
] | |
c55664a7817b6fa0a0b72e7a64a125a72c0afad7 | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/catapult/third_party/flask/flask/cli.py | c09b2cd0a0fa61c578127fc2fc39962b5e129169 | [
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 31,035 | py | # -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
from __future__ import print_function
import ast
import inspect
import os
import platform
import re
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock
from threading import Thread
import click
from werkzeug.utils import import_string
from ._compat import getargspec
from ._compat import itervalues
from ._compat import reraise
from ._compat import text_type
from .globals import current_app
from .helpers import get_debug_flag
from .helpers import get_env
from .helpers import get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
try:
import ssl
except ImportError:
ssl = None
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for v in itervalues(module.__dict__) if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
'Detected multiple Flask applications in module "{module}". Use '
'"FLASK_APP={module}:name" to specify the correct '
"one.".format(module=module.__name__)
)
# Search for app factory functions.
for attr_name in ("create_app", "make_app"):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
'Detected factory "{factory}" in module "{module}", but '
"could not call it without arguments. Use "
"\"FLASK_APP='{module}:{factory}(args)'\" to specify "
"arguments.".format(factory=attr_name, module=module.__name__)
)
raise NoAppException(
'Failed to find Flask application or factory in module "{module}". '
'Use "FLASK_APP={module}:name to specify one.'.format(module=module.__name__)
)
def call_factory(script_info, app_factory, arguments=()):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
args_spec = getargspec(app_factory)
arg_names = args_spec.args
arg_defaults = args_spec.defaults
if "script_info" in arg_names:
return app_factory(*arguments, script_info=script_info)
elif arguments:
return app_factory(*arguments)
elif not arguments and len(arg_names) == 1 and arg_defaults is None:
return app_factory(script_info)
return app_factory()
def _called_with_wrong_args(factory):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param factory: the factory function that was called
:return: true if the call failed
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is factory.__code__:
# in the factory, it was called successfully
return False
tb = tb.tb_next
# didn't reach the factory
return True
finally:
# explicitly delete tb as it is circular referenced
# https://docs.python.org/2/library/sys.html#sys.exc_info
del tb
def find_app_by_string(script_info, module, app_name):
"""Checks if the given string is a variable name or a function. If it is a
function, it checks for specified arguments and whether it takes a
``script_info`` argument and calls the function with the appropriate
arguments.
"""
from . import Flask
match = re.match(r"^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$", app_name)
if not match:
raise NoAppException(
'"{name}" is not a valid variable name or function '
"expression.".format(name=app_name)
)
name, args = match.groups()
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(e.args[0])
if inspect.isfunction(attr):
if args:
try:
args = ast.literal_eval("({args},)".format(args=args))
except (ValueError, SyntaxError) as e:
raise NoAppException(
"Could not parse the arguments in "
'"{app_name}".'.format(e=e, app_name=app_name)
)
else:
args = ()
try:
app = call_factory(script_info, attr, args)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
'{e}\nThe factory "{app_name}" in module "{module}" could not '
"be called with the specified arguments.".format(
e=e, app_name=app_name, module=module.__name__
)
)
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from "
'"{module}:{app_name}".'.format(module=module.__name__, app_name=app_name)
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == ".py":
path = fname
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True # noqa: F841
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
raise NoAppException(
'While importing "{name}", an ImportError was raised:'
"\n\n{tb}".format(name=module_name, tb=traceback.format_exc())
)
elif raise_if_not_found:
raise NoAppException('Could not import "{name}".'.format(name=module_name))
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
import werkzeug
from . import __version__
message = "Python %(python)s\nFlask %(flask)s\nWerkzeug %(werkzeug)s"
click.echo(
message
% {
"python": platform.python_version(),
"flask": __version__,
"werkzeug": werkzeug.__version__,
},
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the flask version",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True # noqa: F841
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True # noqa: F841
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True # noqa: F841
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True # noqa: F841
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Helper object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None, set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get("FLASK_APP")
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True # noqa: F841
if self._loaded_app is not None:
return self._loaded_app
app = None
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, 1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ("wsgi.py", "app.py"):
import_name = prepare_import(path)
app = locate_app(self, import_name, None, raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
"Could not locate a Flask application. You did not provide "
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault("cls", AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands will be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(
self,
add_default_commands=True,
create_app=None,
add_version_option=True,
load_dotenv=True,
set_debug_flag=True,
**extra
):
params = list(extra.pop("params", None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points("flask.commands"):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ["FLASK_RUN_FROM_CLI"] = "true"
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get("obj")
if obj is None:
obj = ScriptInfo(
create_app=self.create_app, set_debug_flag=self.set_debug_flag
)
kwargs["obj"] = obj
kwargs.setdefault("auto_envvar_prefix", "FLASK")
return super(FlaskGroup, self).main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
Changes the current working directory to the location of the first file
found, with the assumption that it is in the top level project directory
and will be where the Python path should import local packages from.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .env or .flaskenv files present."
' Do "pip install python-dotenv" to use them.',
fg="yellow",
err=True,
)
return False
# if the given path specifies the actual file then return True,
# else False
if path is not None:
if os.path.isfile(path):
return dotenv.load_dotenv(path)
return False
new_dir = None
for name in (".env", ".flaskenv"):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path)
if new_dir and os.getcwd() != new_dir:
os.chdir(new_dir)
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
return
if app_import_path is not None:
message = ' * Serving Flask app "{0}"'.format(app_import_path)
if not eager_loading:
message += " (lazy loading)"
click.echo(message)
click.echo(" * Environment: {0}".format(env))
if env == "production":
click.secho(
" WARNING: This is a development server. "
"Do not use it in a production deployment.",
fg="red",
)
click.secho(" Use a production WSGI server instead.", dim=True)
if debug is not None:
click.echo(" * Debug mode: {0}".format("on" if debug else "off"))
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = "path"
def __init__(self):
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
if ssl is None:
raise click.BadParameter(
'Using "--cert" requires Python to be compiled with SSL support.',
ctx,
param,
)
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import OpenSSL # noqa: F401
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires pyOpenSSL.", ctx, param
)
return value
obj = import_string(value, silent=True)
if sys.version_info < (2, 7, 9):
if obj:
return obj
else:
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
if sys.version_info < (2, 7, 9):
is_context = cert and not isinstance(cert, (text_type, bytes))
else:
is_context = isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.', ctx, param
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
class SeparatedPathType(click.Path):
"""Click option type that accepts a list of values separated by the
OS's path separator (``:``, ``;`` on Windows). Each value is
validated as a :class:`click.Path` type.
"""
def convert(self, value, param, ctx):
items = self.split_envvar_value(value)
super_convert = super(SeparatedPathType, self).convert
return [super_convert(item, param, ctx) for item in items]
@click.command("run", short_help="Run a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert", type=CertParamType(), help="Specify a certificate file to use HTTPS."
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--eager-loading/--lazy-loader",
default=None,
help="Enable or disable eager loading. By default eager "
"loading is enabled if the reloader is disabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@click.option(
"--extra-files",
default=None,
type=SeparatedPathType(),
help=(
"Extra files that trigger a reload on change. Multiple paths"
" are separated by '{}'.".format(os.path.pathsep)
),
)
@pass_script_info
def run_command(
info, host, port, reload, debugger, eager_loading, with_threads, cert, extra_files
):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
if eager_loading is None:
eager_loading = not reload
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
extra_files=extra_files,
)
@click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command():
"""Run an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from .globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = "Python %s on %s\nApp: %s [%s]\nInstance: %s" % (
sys.version,
sys.platform,
app.import_name,
app.env,
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup, "r") as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "rule", "match")),
default="endpoint",
help=(
'Method to sort routes by. "match" is the order that Flask will match '
"routes when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set(() if all_methods else ("HEAD", "OPTIONS"))
if sort in ("endpoint", "rule"):
rules = sorted(rules, key=attrgetter(sort))
elif sort == "methods":
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [", ".join(sorted(rule.methods - ignored_methods)) for rule in rules]
headers = ("Endpoint", "Methods", "Rule")
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = "{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}".format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*("-" * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(
help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd="export" if os.name == "posix" else "set",
prefix="$ " if os.name == "posix" else "> ",
)
)
def main(as_module=False):
# TODO omit sys.argv once https://github.com/pallets/click/issues/536 is fixed
cli.main(args=sys.argv[1:], prog_name="python -m flask" if as_module else None)
if __name__ == "__main__":
main(as_module=True)
| [
"[email protected]"
] | |
d8f15f849c53f1756f8f4388019e73e0e3cbef1d | 87fbed6f08a01437ecfc31eec3eb8a6558721678 | /bigtop-packages/src/charm/kafka/layer-kafka/tests/01-deploy.py | 62d6a55942f885c15da7aebb7a6ff622aeaafbe3 | [
"Apache-2.0",
"FreeBSD-DOC",
"MIT",
"DOC"
] | permissive | hdinsight/bigtop | 4427324380b3375741f816e9249f7fc910f80037 | 568252ea8fe5bd2c1bc50833501fef4d5a48bf0e | refs/heads/master | 2020-05-26T14:21:28.808326 | 2017-03-23T03:33:12 | 2017-03-23T03:33:12 | 82,482,997 | 0 | 1 | null | 2017-03-23T03:33:13 | 2017-02-19T19:15:23 | Java | UTF-8 | Python | false | false | 1,647 | py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import unittest
class TestDeploy(unittest.TestCase):
"""
Trivial deployment test for Apache Bigtop Kafka.
"""
@classmethod
def setUpClass(cls):
cls.d = amulet.Deployment(series='xenial')
cls.d.add('kafka', charm='kafka')
cls.d.add('zookeeper', charm='cs:xenial/zookeeper')
cls.d.relate('kafka:zookeeper', 'zookeeper:zookeeper')
cls.d.setup(timeout=1800)
cls.d.sentry.wait_for_messages({'kafka': 'ready'}, timeout=1800)
cls.kafka = cls.d.sentry['kafka'][0]
def test_deploy(self):
"""
Simple test to make sure the Kafka java process is running.
"""
output, retcode = self.kafka.run("pgrep -a java")
assert 'Kafka' in output, "Kafka daemon is not started"
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
aff7f9a5f0b21762a135c562ae38e61d20efde11 | 90bf2ffa7ee75ff266238bffd1b3edc6f83a2bbe | /WebApp_DataSupport/Pharmacy_store_database/RiteAids/step1_taskplan.py | 7a3f98b103ddd1e26984a095aa745ab90d3cc6ff | [] | no_license | MacHu-GWU/EFA-finished-projects | f7cf5e0f765aba78db2c1dd8729accff443aa6ee | 88c93b0e1c5880b710c11ef93254a732573c92ee | refs/heads/master | 2021-03-13T00:11:15.580259 | 2014-10-06T15:20:15 | 2014-10-06T15:20:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,081 | py | ##coding=utf8
''' THIS IS THE SCRIPT TO CRAWL RITEAID STORE LOCATION AND DETAIL INFORMATION
'''
from LinearSpider.crawler import Crawler, Taskplanner
from LinearSpider.jt import *
import bs4
import re, pprint
import jsontree
import itertools
'''
第一级,入口页面,内容是所有的rite aid的商店的url
https://www.riteaid.com/store-site-map
第二季,rite aid商店,内容是具体信息
https://www.riteaid.com/store-details?storeNumber=01140
'''
def step1_taskplan():
'''设定函数内常量'''
spider = Crawler()
TP = Taskplanner()
base_url = 'https://www.riteaid.com'
entrance_url = 'https://www.riteaid.com/store-site-map'
TP.todo.setdefault(entrance_url, {'data': None} ) # 给下一步预设空间的行为发生在当前页面爬完的情况下
html = spider.html(entrance_url) # 开始爬
if html:
soup = bs4.BeautifulSoup(html)
for a in soup.findAll(href = re.compile(r'https://www.riteaid.com/store-details\?storeNumber=\d*')):
TP.todo[entrance_url].setdefault( a['href'],
{'data': a.text} )
TP._dump_todo('riteaid-task.json', replace = True)
def validate(phone, hours, additional_info, detail): # 下
if len(hours) == 4: # phone 必须有14位长,例如(202)-001-1234;hour 必须有 Mon-Thur, Fri, Sat, Sun 四项,
# if (len(phone) == 14) & (len(hours) == 4):
return True
else:
return False
def step2_download():
spider = Crawler()
TP = Taskplanner()
TP._load_todo('riteaid_task.json')
base_url = 'https://www.riteaid.com'
entrance_url = 'https://www.riteaid.com/store-site-map'
riteaid = load_jt('riteaid_data.json')
counter = itertools.count(0)
for store_url in ignore_iterkeys(TP.todo[entrance_url] ):
## 首先处理随着url一块传入的reference data
text = TP.todo[entrance_url][store_url]['data']
storeID, address = text.split(',', 1)
storeID, address = storeID.strip(), address.strip()
## 然后处理每个url页面
if storeID not in riteaid: # 如果没有爬过
html = spider.html(store_url)
if html:
try:
soup = bs4.BeautifulSoup(html)
''' phone number '''
phone = ''
for p in soup.findAll('p', attrs = {'class', 'padding-phone'}):
phone = p.text.replace(p.strong.text, '').strip().replace(' ', '-') # process Phone
''' hour '''
hours = list()
for ul in soup.findAll('ul', attrs = {'class', 'days'}):
hours.append( ul.text.split() ) # process Office Hour
''' additional information '''
additional_info = list()
for div in soup.findAll('div', attrs = {'id': 'eventListId'}):
for li in div.findAll('li'):
additional_info.append( li.text ) # process Additional Information
''' store detail '''
detail = {}
for div in soup.findAll('div', attrs = {'class': 'storeDetailsAttributeCategory'}):
storeDetailsAttributeCategory = div.strong.text.strip()
detail.setdefault(storeDetailsAttributeCategory, list())
for subdiv in div.findAll('div', attrs = {'class': 'storeDetailsAttribute'}):
detail[storeDetailsAttributeCategory].append(subdiv.text.strip()) # process Store Detail
## validate the information I crawled
if validate(phone, hours, additional_info, detail): # <=== validate, sometime error
print "CORRECT"
riteaid.setdefault(storeID,
{'address': address,
'phone': phone,
'hours': hours,
'additional_info': additional_info,
'detail': detail} )
dump_jt(riteaid, 'riteaid_data.json', replace = True)
print storeID, counter.next() ## 只统计正确的
else:
print "ERROR!", (phone, hours, additional_info, detail)
print "\t%s" % store_url
print '%s.html' % (store_url[-5:],)
with open('%s.html' % store_url[-5:], 'wb') as f:
f.write(html)
except:
pass
def unit_test():
pass
if __name__ == '__main__':
# step1_taskplan() # 先执行taskplan
step2_download()
# unit_test()
| [
"[email protected]"
] | |
68bbdbbe3be2d7e90ddbed3f4526c3b1b1d5c6b1 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit2900.py | 86361f453a3ce3e1037cbed649c72e635d83d62a | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,269 | py | # qubit number=4
# total number=44
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=27
prog.h(input_qubit[3]) # number=34
prog.cz(input_qubit[0],input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=36
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[3],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.z(input_qubit[3]) # number=39
prog.cx(input_qubit[3],input_qubit[0]) # number=40
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.y(input_qubit[3]) # number=37
prog.h(input_qubit[0]) # number=14
prog.h(input_qubit[1]) # number=30
prog.cz(input_qubit[2],input_qubit[0]) # number=15
prog.h(input_qubit[0]) # number=16
prog.cx(input_qubit[0],input_qubit[2]) # number=20
prog.x(input_qubit[2]) # number=21
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.cx(input_qubit[0],input_qubit[2]) # number=17
prog.cx(input_qubit[0],input_qubit[2]) # number=23
prog.x(input_qubit[2]) # number=24
prog.cx(input_qubit[0],input_qubit[2]) # number=25
prog.cx(input_qubit[0],input_qubit[2]) # number=19
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2900.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
b0beeccbba6eae48ddb4620af81cb6474b233249 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /abstract_nas/train/utils.py | 081b0270fe1231712222e899b511aca62b6b905e | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 5,379 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training utils."""
import math
from typing import Any, Callable, Optional
from big_vision import utils as bv_utils
from big_vision.utils import create_learning_rate_schedule as bv_create_learning_rate_schedule
import flax
from flax import struct
import jax
import jax.numpy as jnp
# pytype:disable=attribute-error
@struct.dataclass
class ExponentialMovingAverage:
"""Exponential Moving Average as implemented in Tensorflow."""
# Moving average of the parameters.
state: Any
# Decay to use for the update (typical values are 0.999, 0.9999, etc...).
decay: float
# For how many steps we should just keep the new parameters instead of an
# average (useful if we don't want the initial weights to be included in the
# average).
warmup_steps: int
def update_moving_average(self, new_target,
step):
"""Updates the moving average of the target.
Args:
new_target: New values of the target (example: weights of a network
after gradient step).
step: Current step (used only for warmup).
Returns:
The updated ExponentialMovingAverage.
"""
factor = jnp.float32(step >= self.warmup_steps)
delta = step - self.warmup_steps
decay = jnp.minimum(self.decay, (1. + delta) / (10. + delta))
decay *= factor
new_target = flax.core.FrozenDict(new_target)
state = flax.core.FrozenDict(self.state)
weight_ema = jax.tree_map(lambda a, b: (1 - decay) * a + decay * b,
new_target, state)
return self.replace(state=weight_ema)
# pytype:enable=attribute-error
def create_exponential_rate_schedule(global_batch_size,
total_steps,
steps_per_epoch = None,
base = 0.0,
scale_with_batchsize = False,
warmup_steps = 0,
cooldown_steps = 0,
warmup_epochs = 0,
cooldown_epochs = 0,
**kw):
"""Creates exponential learning rate schedule.
Args:
global_batch_size: The global batch-size optionally used for scaling.
total_steps: The total number of steps to run.
steps_per_epoch: How many steps form an epoch. Needed only if anything is
passed in terms of epochs.
base: The starting learning-rate (without warmup).
scale_with_batchsize: Whether or not to scale lr automatically.
warmup_steps: how many steps to warm up for.
cooldown_steps: how many steps to cool down for.
warmup_epochs: how many epochs to warm up for.
cooldown_epochs: how many epochs to cool down for.
**kw: extra arguments specific to individual decay_types.
Returns:
A function learning_rate(step): float -> {"learning_rate": float}.
"""
# For convenience, convert {warmup,cooldown}_epochs to _steps.
assert bool(warmup_epochs) + bool(warmup_steps) < 2, "Only one!"
assert bool(cooldown_epochs) + bool(cooldown_steps) < 2, "Only one!"
if warmup_epochs:
warmup_steps = warmup_epochs * steps_per_epoch
assert warmup_steps < total_steps, "warmup_steps is >= total_steps"
if cooldown_epochs:
cooldown_steps = cooldown_epochs * steps_per_epoch
def step_fn(step):
"""Step to learning rate function."""
lr = base
# This implements the linear scaling rule following
# Goyal et al. at arxiv.org/abs/1706.02677.
# The reference batch size in literature is 256, so we scale the lr to
# adjust to the literature lr when bach_size changes.
if scale_with_batchsize:
lr = lr * global_batch_size / 256.0
progress = (step - warmup_steps) / float(total_steps - warmup_steps)
progress = jnp.clip(progress, 0.0, 1.0)
# At the end of the training, lr should be 1.2% of original value.
# This mimic the behavior from the efficientnet paper.
end_lr_ratio = kw.get("end_lr_ratio", 0.012)
lr = lr * jnp.exp(progress * math.log(end_lr_ratio))
if warmup_steps:
lr = lr * jnp.minimum(1., step / warmup_steps)
if cooldown_steps:
lr = lr * jnp.minimum(1., (total_steps - step) / cooldown_steps)
return jnp.asarray(lr, dtype=jnp.float32)
return step_fn
def create_learning_rate_schedule(*args,
decay_type = "stair",
**kwargs):
if decay_type != "exponential":
return bv_create_learning_rate_schedule(*args, decay_type=decay_type,
**kwargs)
else:
return create_exponential_rate_schedule(*args, **kwargs)
bv_utils.create_learning_rate_schedule = create_learning_rate_schedule
| [
"[email protected]"
] | |
a431c38e40d5c992bad2de38414efcdd2faf009f | 9fdee128812956e1e1919a58c7f64561543abf56 | /test/lesson7.py | 0a2d77f3ac3db358fa074a6c257dcca8f8fe1555 | [] | no_license | OleksandrMyshko/python | 38139b72a75d52ca0a6a5787c5e6357432ec6799 | 1caed3c05d513c0dd62d6ff77910e9596c50969f | refs/heads/master | 2021-07-03T03:31:11.198438 | 2017-09-25T17:43:59 | 2017-09-25T17:43:59 | 104,762,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | class Quadrangle:
def __init__(self, line1, line2, line3, angle1, angle2):
self.line1 = line1
self.line2 = line2
self.line3 = line3
self.anle1 = angle1
self.anle2 = angle2
class Paralelogram(Quadrangle):
def __init__(self, line1, line2, angle):
Quadrangle.__init__(self, line1, line2, line1, angle, 180 - angle)
class Rectangle(Paralelogram):
def __init__(self, line1, line2):
Paralelogram.__init__(self, line1, line2, 90)
class Romb(Paralelogram):
def __init__(self, line, angle):
Paralelogram.__init__(self, line, angle)
class Squere(Rectangle, Romb):
def __init__(self, line):
Rectangle.__init__(self, line, line)
Romb.__init__(self, line, 90) | [
"[email protected]"
] | |
bd56a0f44ae46f5794196f471684b9ae4761f9dc | 76e6d4f93078327fef8672133fc75a6f12abc240 | /ABC173/B.py | 6e91788c46fdff0b91536ff5bd3d7c1f1f6f90f1 | [] | no_license | adusa1019/atcoder | 1e8f33253f6f80a91d069b2f3b568ce7a2964940 | f7dbdfc021425160a072f4ce4e324953a376133a | refs/heads/master | 2021-08-08T04:41:36.098678 | 2021-02-01T07:34:34 | 2021-02-01T07:34:34 | 89,038,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | from collections import Counter
def solve(string):
n, *s = string.split()
c = Counter(s)
return "\n".join(f"{k} x {c[k]}" for k in "AC WA TLE RE".split())
if __name__ == '__main__':
import sys
print(solve(sys.stdin.read().strip()))
| [
"[email protected]"
] | |
a0768d99099f79c156b78ba507456b70a20cfef6 | 483424524c70852cc043e0d77bf1b757a61d797a | /deepspeed/runtime/data_pipeline/curriculum_scheduler.py | 23d747957dc4647e06fad0a94e5e4b071b6f6e23 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | microsoft/DeepSpeed | 810f1af320020718d0794f5a97cde6f1d17af122 | 55d9964c59c0c6e23158b5789a5c36c28939a7b0 | refs/heads/master | 2023-09-06T07:40:52.145692 | 2023-09-05T23:51:23 | 2023-09-05T23:51:23 | 235,860,204 | 27,557 | 3,347 | Apache-2.0 | 2023-09-14T21:38:46 | 2020-01-23T18:35:18 | Python | UTF-8 | Python | false | false | 10,025 | py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
from deepspeed.utils import logger
from .constants import *
class CurriculumScheduler(object):
def __init__(self, config):
super().__init__()
self.state = {}
assert CURRICULUM_LEARNING_MIN_DIFFICULTY in config, \
f"Curriculum learning requires the config '{CURRICULUM_LEARNING_MIN_DIFFICULTY}'"
assert CURRICULUM_LEARNING_MAX_DIFFICULTY in config, \
f"Curriculum learning requires the config '{CURRICULUM_LEARNING_MAX_DIFFICULTY}'"
assert CURRICULUM_LEARNING_SCHEDULE_TYPE in config, \
f"Curriculum learning requires the config '{CURRICULUM_LEARNING_SCHEDULE_TYPE}'"
self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY] = config[CURRICULUM_LEARNING_MIN_DIFFICULTY]
self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY] = config[CURRICULUM_LEARNING_MAX_DIFFICULTY]
self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = config[CURRICULUM_LEARNING_MIN_DIFFICULTY]
self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] = config[CURRICULUM_LEARNING_SCHEDULE_TYPE]
self.first_step = True
if config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE:
"""
The schedule_config is a list of difficulty and a list of max
step belonging to each difficulty. Example json config:
"schedule_config": {
"difficulty": [1,2,3],
"max_step": [5,10]
}
The "max_step" has one less element than "difficulty", because
the last difficulty will be used for all following steps.
The self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] is a dictionary of
difficulty : [max step for this difficulty, next difficulty].
"""
assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_discrete schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY}'"
assert CURRICULUM_LEARNING_SCHEDULE_MAX_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_discrete schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_MAX_STEP}'"
assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_MAX_STEP]) > 0
assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY]) > 0
assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY]) == len(
config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_MAX_STEP]) + 1
self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT:
"""
The schedule_config includes:
total_curriculum_step: how many steps the curriculum learning takes to go
from min difficulty to max difficulty.
difficulty_step: the difficulty level determined every time must
be a multiple of this difficulty_step. This is used to determine
the step of difficulty increase, and to ensure the use of NVIDIA
Tensor Core acceleration (requires multiple of 8 (FP16) or
16 (INT8)).
root_degree: the degree of the root function. Degree of 2 means
square root and degree of 3 means cube root. Degree of 1 is
equivalent to linear.
"schedule_config": {
"total_curriculum_step": 30000,
"difficulty_step": 8,
"root_degree": 2
}
"""
assert CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP}'"
assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP}'"
assert CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE}'"
if config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP] % 8 != 0:
logger.warning(
f'When using seqlen metric, the difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your metric/hardware.'
)
self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR:
"""
The schedule_config is the same as CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT but without the
root_degree.
"schedule_config": {
"total_curriculum_step": 30000,
"difficulty_step": 8
}
"""
assert CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_linear schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP}'"
assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_linear schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP}'"
if config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP] % 8 != 0:
logger.warning(
f'When using seqlen metric, the difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your metric/hardware.'
)
self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_CUSTOM:
"""
Fully customized schedule. User need to provide a custom schedule
function by using the set_custom_curriculum_learning_schedule API
in deepspeed/runtime/engine.py
"""
self.custom_get_difficulty = None
else:
raise RuntimeError('Unsupported curriculum schedule type')
def get_current_difficulty(self):
return self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY]
def set_current_difficulty(self, difficulty):
self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = difficulty
def set_custom_get_difficulty(self, schedule_function):
self.custom_get_difficulty = schedule_function
def get_state(self):
return self.state
def set_state(self, state):
self.state = state
def __fixed_discrete_get_difficulty(self, global_steps):
s_state = self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
if global_steps > s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP][-1]:
return s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY][-1]
for i in range(len(s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP])):
if global_steps <= s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP][i]:
return s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY][i]
def __fixed_root_get_difficulty(self, global_steps, root_degree=None):
s_state = self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
if root_degree is None:
root_degree = s_state[CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE]
next_difficulty = (float(global_steps) / s_state[CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP])**(1.0 / root_degree)
next_difficulty = math.floor(
next_difficulty *
(self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY] - self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY]) +
self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY])
next_difficulty -= (next_difficulty % s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP])
next_difficulty = min(next_difficulty, self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY])
return next_difficulty
def get_difficulty(self, global_steps):
if self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE:
return self.__fixed_discrete_get_difficulty(global_steps)
elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR:
return self.__fixed_root_get_difficulty(global_steps, 1)
elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT:
return self.__fixed_root_get_difficulty(global_steps)
elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_CUSTOM:
return self.custom_get_difficulty(global_steps)
else:
raise RuntimeError('Unsupported curriculum schedule type')
def update_difficulty(self, global_steps):
if self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] < self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY]:
self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = self.get_difficulty(global_steps)
return self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY]
| [
"[email protected]"
] | |
ff8f9fb12fc8a07674151d36db80f85f4f5c9b1a | 53c1de76b7959da4689b2be1c6508fc0d39f0e88 | /lv1_rf.py | 67ceb779a06ca6154fa363950114cf867d5aff64 | [
"MIT"
] | permissive | ak110/kaggle-otto | d1ada166f65752435ebe50ad292306eb00f91106 | 03d0bf045beeeed9754d872824c4b9649a3782a7 | refs/heads/master | 2021-04-12T22:55:18.201278 | 2020-06-17T02:40:54 | 2020-06-17T02:40:54 | 249,114,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,514 | py | #!/usr/bin/env python3
"""
acc: 0.809
"""
# region imports
# pylint: disable=unused-import
import functools # noqa: F401
import pathlib # noqa: F401
import random # noqa: F401
import albumentations as A # noqa: F401
import numpy as np # noqa: F401
import pandas as pd # noqa: F401
import sklearn.datasets # noqa: F401
import sklearn.ensemble # noqa: F401
import sklearn.linear_model # noqa: F401
import sklearn.metrics # noqa: F401
import sklearn.model_selection # noqa: F401
import sklearn.neighbors # noqa: F401
import tensorflow as tf # noqa: F401
import tensorflow_addons as tfa # noqa: F401
import _data
import pytoolkit as tk
# endregion
num_classes = 9
nfold = 5
split_seed = 1
models_dir = pathlib.Path(f"models/{pathlib.Path(__file__).stem}")
app = tk.cli.App(output_dir=models_dir)
logger = tk.log.get(__name__)
def create_model():
return tk.pipeline.SKLearnModel(
estimator=sklearn.ensemble.RandomForestClassifier(n_jobs=-1),
nfold=nfold,
models_dir=models_dir,
score_fn=score,
predict_method="predict_proba",
)
# region data/score
def load_train_data():
dataset = _data.load_train_data()
return dataset
def load_test_data():
dataset = _data.load_test_data()
return dataset
def score(
y_true: tk.data.LabelsType, y_pred: tk.models.ModelIOType
) -> tk.evaluations.EvalsType:
return tk.evaluations.evaluate_classification(y_true, y_pred)
# endregion
# region commands
@app.command(then="validate")
def train():
train_set = load_train_data()
folds = tk.validation.split(train_set, nfold, stratify=True, split_seed=split_seed)
model = create_model()
model.cv(train_set, folds)
@app.command(then="predict")
def validate():
train_set = load_train_data()
folds = tk.validation.split(train_set, nfold, stratify=True, split_seed=split_seed)
model = create_model().load()
pred = model.predict_oof(train_set, folds)
if tk.hvd.is_master():
tk.utils.dump(pred, models_dir / "pred_train.pkl")
tk.notifications.post_evals(score(train_set.labels, pred))
@app.command()
def predict():
test_set = load_test_data()
model = create_model().load()
pred_list = model.predict_all(test_set)
pred = np.mean(pred_list, axis=0)
if tk.hvd.is_master():
tk.utils.dump(pred_list, models_dir / "pred_test.pkl")
_data.save_prediction(models_dir, test_set, pred)
# endregion
if __name__ == "__main__":
app.run(default="train")
| [
"[email protected]"
] | |
0d36cabbeaf2eaf5b5c22ef55030ad917ec5946f | 85f5dff291acf1fe7ab59ca574ea9f4f45c33e3b | /api/tacticalrmm/checks/migrations/0029_alter_checkresult_alert_severity.py | 79d9fcd1af50910fe38897c0668156f57f23fe85 | [
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sadnub/tacticalrmm | a4ecaf994abe39244a6d75ed2166222abb00d4f4 | 0af95aa9b1084973642da80e9b01a18dcacec74a | refs/heads/develop | 2023-08-30T16:48:33.504137 | 2023-04-10T22:57:44 | 2023-04-10T22:57:44 | 243,405,684 | 0 | 2 | MIT | 2020-09-08T13:03:30 | 2020-02-27T01:43:56 | Python | UTF-8 | Python | false | false | 502 | py | # Generated by Django 4.0.3 on 2022-04-15 21:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checks', '0028_auto_20220401_2301'),
]
operations = [
migrations.AlterField(
model_name='checkresult',
name='alert_severity',
field=models.CharField(blank=True, choices=[('info', 'Informational'), ('warning', 'Warning'), ('error', 'Error')], max_length=15, null=True),
),
]
| [
"[email protected]"
] | |
73ff21278e42e7835eb250eb0db2d9850fa185fd | d66818f4b951943553826a5f64413e90120e1fae | /hackerearth/Basic Programming/Implementation/Basics of Implementation/Strings/test.py | 8b64720b4744115d68bb975ca590626995608952 | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 556 | py | import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch(
"builtins.input",
side_effect=[
"3",
"1 1",
"1 3",
"100 500",
],
)
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(), "YES\n" + "NO\n" + "NO\n")
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
e243f32d77ececd6663d33dde58d642e62f814bd | ad689bcffd4694957a236cf95cb55d0522f81ddb | /passive/mod3d/SConstruct | 16e0c0d38121759a17a01abce812fa6534aa418d | [] | no_license | raypenper/reproducible_research | fdd83e9680b83395cdc1c7fee6136af6d6e570e5 | ca48fc14537bb5d77e1e5d55e65f967732f1f1cd | refs/heads/master | 2023-08-21T02:32:00.780948 | 2021-10-28T02:33:31 | 2021-10-28T02:33:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,300 | from rsf.proj import *
#### COPYRIGHT: Chen et al. (2021)
#### The University of Texas at Austin
#### Chen, Y., O.M. Saad, M. Bai, X. Liu, and S. Fomel, 2021, A compact program for 3D passive seismic source-location imaging, Seismological Research Letters, doi: 10.1785/0220210050.
#### Part I: Specifying parameters ####
nt=1501 #number of samples
dt=0.001 #temporal sampling
nb=30 #size of ABC layers
ct=0.01 #ABC parameter
jsnap=4 #output wavefield interval
ng=4 #number of groups
nz=141 #samples in Z
nx=141 #samples in X
ny=141 #samples in Y
dz=20 #sampling in Z
dx=20 #sampling in X
dy=20 #sampling in Y
ic2=1 #squared cross-correlation IC
#for synthetic test
ns=3
sz='50,70,90'
sx='50,60,70'
sy='50,60,70'
f='10,10,10'
t='0.2,0.35,0.5'
A='1,2,2'
#### Part II: Compiling the program ####
exe=Program('mod3d.c')
#### Part III: Creating/Inputing the velocity model ####
Flow('vel',None,'spike n1=%d n2=%d n3=%d d1=%g d2=%g d3=%g mag=4600 | math output="1500+1.2*x1"'%(nz,nx,ny,dz,dx,dy))
## predefining source locatons, plotting the sources onto the velocity model
Flow('src',None,
'''
spike n1=%d n2=%d n3=%d d1=%g d2=%g d3=%g nsp=%d
k1=%s
k2=%s
k3=%s
mag=200000 | smooth rect1=4 rect2=4 rect3=4 repeat=1
'''%(nz,nx,ny,dz,dx,dy,ns,sz,sx,sy))
Flow('sov','vel src','add mode=a ${SOURCES[1]} ')
## plotting the velocity models with highlights on source locations
Result('vel1','vel src','add mode=a ${SOURCES[1]} |byte bar=bar.rsf mean=y|grey3 flat=n allpos=y bias=1500 color=j scalebar=n maxval=2200 title="Source Location and Velocity Model" barlabel="V" barunit="m/s" label1=Depth label2="Distance in X" label3="Distance in Y" unit1=m unit2=m unit3=m frame1=49 frame2=49 frame3=49 scalebar=y point1=0.7 point2=0.6')
Result('vel2','vel src','add mode=a ${SOURCES[1]} |byte bar=bar.rsf mean=y|grey3 flat=n allpos=y bias=1500 color=j scalebar=n maxval=2200 title="Source Location and Velocity Model" barlabel="V" barunit="m/s" label1=Depth label2="Distance in X" label3="Distance in Y" unit1=m unit2=m unit3=m frame1=69 frame2=59 frame3=59 scalebar=y point1=0.7 point2=0.6')
Result('vel3','vel src','add mode=a ${SOURCES[1]} |byte bar=bar.rsf mean=y|grey3 flat=n allpos=y bias=1500 color=j scalebar=n maxval=2200 title="Source Location and Velocity Model" barlabel="V" barunit="m/s" label1=Depth label2="Distance in X" label3="Distance in Y" unit1=m unit2=m unit3=m frame1=89 frame2=69 frame3=69 scalebar=y point1=0.7 point2=0.6')
#### Part IV: Generating/Inputing the recorded passive seismic data ####
Flow('data0','vel %s'%exe[0],
'''
./${SOURCES[1]} verb=y cmplx=n ps=y nt=%d dt=%g jsnap=1 abc=y nbt=%d ct=%g src=0 ns=%d
spz=%s
spx=%s
spy=%s
f0=%s
t0=%s
A=%s
'''%(nt,dt,nb,ct,ns,sz,sx,sy,f,t,A))
## add noise and sub samples
Flow('data','data0','noise var=0.001 type=y seed=12005|window j2=1 j3=1')
Result('data','data','byte | grey3 flat=n frame1=400 frame2=30 frame3=10 clip=1.0 title="Synthetic data" unit2=m unit3=m point1=0.7 point2=0.6 point1=0.7 point2=0.6')
#### Part V: Backward propagating of the grouped receiver wavefield ####
dg=(int)(nx-nb*2)/ng
#for python3 use
#print('Group interval is %d'%dg)
#for python2 use
# print 'Group interval is',dg
snaps_list = []
src_list = ''
for i in range(ng):
mask = 'mask%d' %i
data = 'data_mask%d' %i
img = 'img%d' %i
snaps = 'snaps%d' %i
Flow(mask,None,'spike n1=%d n2=%d mag=1 k1=%d l1=%d k2=%d l2=%d | sfdd type=int' %(nx-nb*2,ny-nb*2,dg*i+1,dg*i+dg,1,ny-nb*2))
Flow(data,['data',mask],'headercut mask=${SOURCES[1]}')
Flow([img,snaps],['vel',data,'%s'%exe[0]],
'''
./${SOURCES[2]} snaps=${TARGETS[1]} verb=y cmplx=n vref=1500 ps=y abc=y nbt=%d ct=%g tri=y dat=${SOURCES[1]} jsnap=%d
'''%(nb,ct,jsnap))
Result(snaps,'window j3=10 | grey gainpanel=a')
Result(img,'grey')
snaps_list += [snaps]
src_list += ' ${SOURCES[%d]}' %(i+1)
## view the grouped data
Result('data_mask0','byte | window max1=1.3 |grey3 flat=n frame1=400 frame2=10 frame3=40 clip=0.5 title="Group 1" label2="Distance in X" label3="Distance in Y" unit2=m unit3=m point1=0.7 point2=0.6')
Result('data_mask1','byte | window max1=1.3 |grey3 flat=n frame1=400 frame2=30 frame3=40 clip=0.5 title="Group 2" label2="Distance in X" label3="Distance in Y" unit2=m unit3=m point1=0.7 point2=0.6')
Result('data_mask2','byte | window max1=1.3 |grey3 flat=n frame1=400 frame2=50 frame3=40 clip=0.5 title="Group 3" label2="Distance in X" label3="Distance in Y" unit2=m unit3=m point1=0.7 point2=0.6')
Result('data_mask3','byte | window max1=1.3 |grey3 flat=n frame1=400 frame2=70 frame3=40 clip=0.5 title="Group 4" label2="Distance in X" label3="Distance in Y" unit2=m unit3=m point1=0.7 point2=0.6')
#### Part VI: Applying the cross-correlation imaging condition ####
if ic2:
if ng==2:
Flow('ccr0',snaps_list,'math a=${SOURCES[1]} output="input^2*a^2"')
elif ng==4:
Flow('ccr0',snaps_list,'math a=${SOURCES[1]} b=${SOURCES[2]} c=${SOURCES[3]} output="input^2*a^2*b^2*c^2"')
elif ng==6:
Flow('ccr0',snaps_list,'math a=${SOURCES[1]} b=${SOURCES[2]} c=${SOURCES[3]} d=${SOURCES[4]} e=${SOURCES[5]} output="input^2*a^2*b^2*c^2*d^2*e^2"')
elif ng==8:
Flow('ccr0',snaps_list,'math a=${SOURCES[1]} b=${SOURCES[2]} c=${SOURCES[3]} d=${SOURCES[4]} e=${SOURCES[5]} f=${SOURCES[6]} g=${SOURCES[7]} output="input^2*a^2*b^2*c^2*d^2*e^2*f^2*g^2"')
elif ng==10:
Flow('ccr0',snaps_list,'math a=${SOURCES[1]} b=${SOURCES[2]} c=${SOURCES[3]} d=${SOURCES[4]} e=${SOURCES[5]} f=${SOURCES[6]} g=${SOURCES[7]} h=${SOURCES[8]} i=${SOURCES[9]} output="input^2*a^2*b^2*c^2*d^2*e^2*f^2*g^2*h^2*i^2"')
else:
Flow('ccr0',snaps_list,'cat axis=5 ${SOURCES[1:%d]} | stack prod=y axis=5'%len(snaps_list))
Flow('location0','ccr0','stack axis=4 |pad beg1=%d end1=%d beg2=%d end2=%d beg3=%d end3=%d| put o1=0 o2=0 o3=0'%(nb,nb,nb,nb,nb,nb))
#### Part VII: Plotting the source locations ####
Result('location1','location0','threshold1 thr=0.08|byte |grey3 flat=n frame1=50 frame2=50 frame3=49 pclip=99.999999 title="Source Location Image" label1=Depth label2="Distance in X" label3="Distance in Y" unit1=m unit2=m unit3=m point1=0.7 point2=0.6')
Result('location2','location0','threshold1 thr=0.08|byte |grey3 flat=n frame1=70 frame2=60 frame3=60 pclip=99.999999 title="Source Location Image" label1=Depth label2="Distance in X" label3="Distance in Y" unit1=m unit2=m unit3=m point1=0.7 point2=0.6')
Result('location3','location0','threshold1 thr=0.08|byte |grey3 flat=n frame1=85 frame2=70 frame3=70 pclip=99.999999 title="Source Location Image" label1=Depth label2="Distance in X" label3="Distance in Y" unit1=m unit2=m unit3=m point1=0.7 point2=0.6')
#### Traditional Time-reversal imaging
Flow(['img','snaps'],['vel','data','%s'%exe[0]],
'''
./${SOURCES[2]} snaps=${TARGETS[1]} verb=y cmplx=n vref=1500 ps=y abc=y nbt=%d ct=%g tri=y dat=${SOURCES[1]} jsnap=%d
'''%(nb,ct,jsnap))
Flow('snaps-abs0','snaps','math output="input*input"')
# >>> 376-0.2/0.004
# 326.0
# >>> 376-0.32/0.004
# 296.0
# >>> 376-0.5/0.004
# 251.0
Result('location-tr1','snaps-abs0','window n4=1 f4=50 |pad beg1=%d end1=%d beg2=%d end2=%d beg3=%d end3=%d| put o1=0 o2=0 o3=0 |threshold1 thr=0.08| byte pclip=100|grey3 flat=n frame1=50 frame2=50 frame3=49 pclip=99.999999 title="Source Location Image" label1=Depth label2="Distance in X" label3="Distance in Y" unit1=m unit2=m unit3=m point1=0.7 point2=0.6'%(nb,nb,nb,nb,nb,nb))
Result('location-tr2','snaps-abs0','window n4=1 f4=83 |pad beg1=%d end1=%d beg2=%d end2=%d beg3=%d end3=%d| put o1=0 o2=0 o3=0 |threshold1 thr=0.1| byte pclip=100|grey3 flat=n frame1=70 frame2=60 frame3=60 pclip=99.999999 title="Source Location Image" label1=Depth label2="Distance in X" label3="Distance in Y" unit1=m unit2=m unit3=m point1=0.7 point2=0.6'%(nb,nb,nb,nb,nb,nb))
Result('location-tr3','snaps-abs0','window n4=1 f4=130 | pad beg1=%d end1=%d beg2=%d end2=%d beg3=%d end3=%d| put o1=0 o2=0 o3=0 |threshold1 thr=0.1| byte clip=10|grey3 flat=n frame1=85 frame2=70 frame3=70 pclip=99.999999 title="Source Location Image" label1=Depth label2="Distance in X" label3="Distance in Y" unit1=m unit2=m unit3=m point1=0.7 point2=0.6'%(nb,nb,nb,nb,nb,nb))
End()
| [
"[email protected]"
] | ||
8f20957355e95f44ff9740c2e760e94183f95483 | 1fe0b680ce53bb3bb9078356ea2b25e572d9cfdc | /venv/lib/python2.7/site-packages/ansible/modules/system/service_facts.py | ce61b0918e4e780cf16528dff8f8ae09ba698607 | [
"MIT"
] | permissive | otus-devops-2019-02/devopscourses_infra | 1929c4a9eace3fdb0eb118bf216f3385fc0cdb1c | e42e5deafce395af869084ede245fc6cff6d0b2c | refs/heads/master | 2020-04-29T02:41:49.985889 | 2019-05-21T06:35:19 | 2019-05-21T06:35:19 | 175,780,457 | 0 | 1 | MIT | 2019-05-21T06:35:20 | 2019-03-15T08:35:54 | HCL | UTF-8 | Python | false | false | 10,382 | py | #!/usr/bin/python
# (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# originally copied from AWX's scan_services module to bring this functionality
# into Core
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: service_facts
short_description: Return service state information as fact data
description:
- Return service state information as fact data for various service management utilities
version_added: "2.5"
requirements: ["Any of the following supported init systems: systemd, sysv, upstart"]
notes:
- When accessing the C(ansible_facts.services) facts collected by this module,
it is recommended to not use "dot notation" because services can have a C(-)
character in their name which would result in invalid "dot notation", such as
C(ansible_facts.services.zuul-gateway). It is instead recommended to
using the string value of the service name as the key in order to obtain
the fact data value like C(ansible_facts.services['zuul-gateway'])
author:
- Adam Miller (@maxamillion)
'''
EXAMPLES = '''
- name: populate service facts
service_facts:
- debug:
var: ansible_facts.services
'''
RETURN = '''
ansible_facts:
description: Facts to add to ansible_facts about the services on the system
returned: always
type: complex
contains:
services:
description: States of the services with service name as key.
returned: always
type: complex
contains:
source:
description: Init system of the service. One of C(systemd), C(sysv), C(upstart).
returned: always
type: str
sample: sysv
state:
description: State of the service. Either C(running), C(stopped), or C(unknown).
returned: always
type: str
sample: running
status:
description: State of the service. Either C(enabled), C(disabled), or C(unknown).
returned: systemd systems or RedHat/SUSE flavored sysvinit/upstart
type: string
sample: enabled
name:
description: Name of the service.
returned: always
type: str
sample: arp-ethers.service
'''
import re
from ansible.module_utils.basic import AnsibleModule
class BaseService(object):
def __init__(self, module):
self.module = module
self.incomplete_warning = False
class ServiceScanService(BaseService):
def gather_services(self):
services = {}
service_path = self.module.get_bin_path("service")
if service_path is None:
return None
initctl_path = self.module.get_bin_path("initctl")
chkconfig_path = self.module.get_bin_path("chkconfig")
# sysvinit
if service_path is not None and chkconfig_path is None:
rc, stdout, stderr = self.module.run_command("%s --status-all 2>&1 | grep -E \"\\[ (\\+|\\-) \\]\"" % service_path, use_unsafe_shell=True)
for line in stdout.split("\n"):
line_data = line.split()
if len(line_data) < 4:
continue # Skipping because we expected more data
service_name = " ".join(line_data[3:])
if line_data[1] == "+":
service_state = "running"
else:
service_state = "stopped"
services[service_name] = {"name": service_name, "state": service_state, "source": "sysv"}
# Upstart
if initctl_path is not None and chkconfig_path is None:
p = re.compile(r'^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
rc, stdout, stderr = self.module.run_command("%s list" % initctl_path)
real_stdout = stdout.replace("\r", "")
for line in real_stdout.split("\n"):
m = p.match(line)
if not m:
continue
service_name = m.group('name')
service_goal = m.group('goal')
service_state = m.group('state')
if m.group('pid'):
pid = m.group('pid')
else:
pid = None # NOQA
payload = {"name": service_name, "state": service_state, "goal": service_goal, "source": "upstart"}
services[service_name] = payload
# RH sysvinit
elif chkconfig_path is not None:
# print '%s --status-all | grep -E "is (running|stopped)"' % service_path
p = re.compile(
r'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
r'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True)
# Check for special cases where stdout does not fit pattern
match_any = False
for line in stdout.split('\n'):
if p.match(line):
match_any = True
if not match_any:
p_simple = re.compile(r'(?P<service>.*?)\s+(?P<rl0>on|off)')
match_any = False
for line in stdout.split('\n'):
if p_simple.match(line):
match_any = True
if match_any:
# Try extra flags " -l --allservices" needed for SLES11
rc, stdout, stderr = self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True)
elif '--list' in stderr:
# Extra flag needed for RHEL5
rc, stdout, stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True)
for line in stdout.split('\n'):
m = p.match(line)
if m:
service_name = m.group('service')
service_state = 'stopped'
service_status = "disabled"
if m.group('rl3') == 'on':
service_status = "enabled"
rc, stdout, stderr = self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True)
service_state = rc
if rc in (0,):
service_state = 'running'
# elif rc in (1,3):
else:
if 'root' in stderr or 'permission' in stderr.lower() or 'not in sudoers' in stderr.lower():
self.incomplete_warning = True
continue
else:
service_state = 'stopped'
service_data = {"name": service_name, "state": service_state, "status": service_status, "source": "sysv"}
services[service_name] = service_data
return services
class SystemctlScanService(BaseService):
def systemd_enabled(self):
# Check if init is the systemd command, using comm as cmdline could be symlink
try:
f = open('/proc/1/comm', 'r')
except IOError:
# If comm doesn't exist, old kernel, no systemd
return False
for line in f:
if 'systemd' in line:
return True
return False
def gather_services(self):
services = {}
if not self.systemd_enabled():
return None
systemctl_path = self.module.get_bin_path("systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"])
if systemctl_path is None:
return None
rc, stdout, stderr = self.module.run_command("%s list-units --no-pager --type service --all" % systemctl_path, use_unsafe_shell=True)
for line in [svc_line for svc_line in stdout.split('\n') if '.service' in svc_line and 'not-found' not in svc_line]:
service_name = line.split()[0]
if "running" in line:
state_val = "running"
else:
if 'failed' in line:
service_name = line.split()[1]
state_val = "stopped"
services[service_name] = {"name": service_name, "state": state_val, "status": "unknown", "source": "systemd"}
rc, stdout, stderr = self.module.run_command("%s list-unit-files --no-pager --type service --all" % systemctl_path, use_unsafe_shell=True)
for line in [svc_line for svc_line in stdout.split('\n') if '.service' in svc_line and 'not-found' not in svc_line]:
try:
service_name, status_val = line.split()
except ValueError:
self.module.fail_json(msg="Malformed output discovered from systemd list-unit-files: {0}".format(line))
if service_name not in services:
services[service_name] = {"name": service_name, "state": "unknown", "status": status_val, "source": "systemd"}
else:
services[service_name]["status"] = status_val
return services
def main():
module = AnsibleModule(argument_spec=dict(), supports_check_mode=True)
module.run_command_environ_update = dict(LANG="C", LC_ALL="C")
service_modules = (ServiceScanService, SystemctlScanService)
all_services = {}
incomplete_warning = False
for svc_module in service_modules:
svcmod = svc_module(module)
svc = svcmod.gather_services()
if svc is not None:
all_services.update(svc)
if svcmod.incomplete_warning:
incomplete_warning = True
if len(all_services) == 0:
results = dict(skipped=True, msg="Failed to find any services. Sometimes this is due to insufficient privileges.")
else:
results = dict(ansible_facts=dict(services=all_services))
if incomplete_warning:
results['msg'] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges."
module.exit_json(**results)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a538b99d09fb6b9ae2bcc408777ddcae02867ce4 | 6db7b8f3bba1fa4e9aec470373f86ef5077ed169 | /degroofpetercam/settings.py | e72b50dfff81424384fd874e148b1c442854b600 | [] | no_license | hristo-grudev/degroofpetercam | 810dbf26d36e08c890fc03bd2fdf3e77f791133d | f8cd6cbe665fa1817887965117f154ede9f6376d | refs/heads/main | 2023-03-04T13:28:04.152094 | 2021-02-19T10:19:27 | 2021-02-19T10:19:27 | 340,332,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | BOT_NAME = 'degroofpetercam'
SPIDER_MODULES = ['degroofpetercam.spiders']
NEWSPIDER_MODULE = 'degroofpetercam.spiders'
FEED_EXPORT_ENCODING = 'utf-8'
LOG_LEVEL = 'ERROR'
DOWNLOAD_DELAY = 0
ROBOTSTXT_OBEY = True
ITEM_PIPELINES = {
'degroofpetercam.pipelines.DegroofpetercamPipeline': 100,
}
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'
| [
"[email protected]"
] | |
fb0a573ec2a5f68e2ae5aa4aca36759cefd5d86f | 473035074bd546694d5e3dbe6decb900ba79e034 | /traffic fluid simulator/backend/env_3/matrices.py | 730beac8f975054a3ac9fc8b35d692736de800ac | [] | no_license | johny1614/magazyn | 35424203036191fb255c410412c195c8f41f0ba5 | a170fea3aceb20f59716a7b5088ccdcb6eea472f | refs/heads/master | 2022-03-26T01:10:04.472374 | 2019-09-19T16:34:22 | 2019-09-19T16:34:22 | 171,033,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | import numpy as np
# To trzeba bedzie zmieniac tutaj - bo na razie jest dla 1 enva
# Na froncie jest to net3
def hash_(action):
return tuple([tuple(a) for a in action])
x0 = np.array([1, 2, 3, 4, 5, 6]).transpose()
#
# -A-B-
# -E-F
# -c_D-
T = np.array([[0, 0, 0, 0, 0, 0], # A
[1, 1, 0, 0, 0, 0], # B
[0, 0, 0, 0, 0, 0], # C
[0, 0, 1, 1, 0, 0], # D
[0, 1, 0, 1, 0, 0], # E
[0, 0, 0, 0, 1, 0]]) # F
# A B C D E F
A_ORANGE = hash_(np.array([[0, 0, 0, 0, 0, 0], # A
[1, 1, 0, 0, 0, 0], # B
[0, 0, 0, 0, 0, 0], # C
[0, 0, 1, 1, 0, 0], # D
[0, 0, 0, 0, 0, 0], # E
[0, 0, 0, 0, 1, 0]]))# F
# A B C D E F
UP_A_green = hash_(np.array([[0, 0, 0, 0, 0, 0], # A
[1, 0, 0, 0, 0, 0], # B
[0, 0, 0, 0, 0, 0], # C
[0, 0, 1, 1, 0, 0], # D
[0, 1, 0, 0, 0, 0], # E
[0, 0, 0, 0, 1, 0]])) # F
# A B C D E F
DOWN_A_green = hash_(np.array([[0, 0, 0, 0, 0, 0], # A
[1, 1, 0, 0, 0, 0], # B
[0, 0, 0, 0, 0, 0], # C
[0, 0, 1, 0, 0, 0], # D
[0, 0, 0, 1, 0, 0], # E
[0, 0, 0, 0, 1, 0]])) # F
u = np.array([[2, 4, 6, 8, 10, 2, 4, 6, 8, 10, 2, 4, 6, 8, 10, 2, 4, 6, 8, 10, 2, 4, 6, 8, 10, 2, 4, 6, 8, 10, 2, 4, 6,
8, 10, 2, 4, 6, 8, 10],
[1, 3, 5, 7, 9, 1, 3, 5, 7, 9, 1, 3, 5, 7, 9, 1, 3, 5, 7, 9, 1, 3, 5, 7, 9, 1, 3, 5, 7, 9, 1, 3, 5, 7, 9,
1, 3, 5, 7, 9]]).transpose()
# turns = [["", "", ""],
# ["", "", ""],
# ["right_down_slightly_", "right_up_slightly_", ""], ]
| [
"[email protected]"
] | |
411c0adba6ce69bb6c765f142c29d111fdd4a152 | 71a91cac814ec167c4194d8446fe4f94a222a10c | /cems/src/main/python/usecase02_avhrr_n07_n06.py | c06214382200e84880e94ffefae90335d20d6f4b | [] | no_license | bcdev/fiduceo | dab0b3ae6d708d7b74d4c9f17c7dedf2e68472a6 | aea0d74c38e0f503dfe10ddc392f9e36ad420b94 | refs/heads/master | 2021-01-23T15:51:06.518930 | 2017-01-19T13:47:55 | 2017-01-19T13:47:55 | 40,123,794 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | from workflow import Workflow
w = Workflow('usecase02_avhrr_n07_n06', 7, '/group_workspaces/cems2/fiduceo/Software/mms/config')
w.add_primary_sensor('avhrr-n07', '1981-09-01', '1982-03-17', 'v01.2')
w.add_secondary_sensor('avhrr-n06', '1981-09-01', '1982-03-17', 'v01.2')
w.set_usecase_config('usecase-02.xml')
w.run_matchup(hosts=[('localhost', 24)]) | [
"[email protected]"
] | |
a87d65508e1eaf7f08e8b04b37df8871dfdf944f | fa148881657508f485936dd93ac9ca36072a6e87 | /setup.py | 35bf288f8dd73fffc4c28f395373c36f43b01f69 | [] | no_license | mwang87/qtp-mass-spec | c63eb04bee6581570cb4028fd34b178988ef5115 | 0e55d6187602ffaf074517a0a3672590b29365c6 | refs/heads/master | 2020-06-03T15:34:57.010394 | 2016-08-01T20:14:38 | 2016-08-01T20:14:38 | 64,163,383 | 0 | 0 | null | 2016-07-25T19:50:39 | 2016-07-25T19:50:38 | null | UTF-8 | Python | false | false | 1,709 | py | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Ming Wang.
#
# Distributed under the terms of the BSD 3-clause License License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from setuptools import setup
from glob import glob
__version__ = "0.1.0-dev"
classes = """
Development Status :: 3 - Alpha
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Software Development :: Libraries :: Application Frameworks
Topic :: Software Development :: Libraries :: Python Modules
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: Implementation :: CPython
Operating System :: POSIX :: Linux
Operating System :: MacOS :: MacOS X
"""
with open('README.rst') as f:
long_description = f.read()
classifiers = [s.strip() for s in classes.split('\n') if s]
setup(name='mass-spec Qiita Type Plugin',
version=__version__,
long_description=long_description,
license="BSD",
description='Qiita Type Plugin: mass-spec',
author="Ming Wang",
author_email="[email protected]",
url='https://github.com/qiita-spots/qtp-mass-spec',
test_suite='nose.collector',
packages=['qtp_mass_spec'],
package_data={'qtp_mass_spec': ['support_files/config_file.cfg']},
scripts=glob('scripts/*'),
extras_require={'test': ["nose >= 0.10.1", "pep8"]},
install_requires=['click >= 3.3', 'qiita_client'],
classifiers=classifiers
)
| [
"[email protected]"
] | |
983d970ccf1b28206a311df1cc69e69010b1b3a4 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_papered.py | 2e58967760d55c4fb726221badf00dc0b2145964 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.verbs._paper import _PAPER
#calss header
class _PAPERED(_PAPER, ):
def __init__(self,):
_PAPER.__init__(self)
self.name = "PAPERED"
self.specie = 'verbs'
self.basic = "paper"
self.jsondata = {}
| [
"[email protected]"
] | |
8c69df3edbd1c4b38ffab45797db6c04d37e8b3e | 0386591b51fdbf5759faef6afb8729b64a3f1589 | /imageserver/urls.py | 1088fcd312dee73113cfe3bcc60a5b36990fc8d2 | [
"BSD-3-Clause"
] | permissive | giscube/giscube-admin | 1e155402e094eb4db1f7ca260a8d1402e27a31df | 4ce285a6301f59a8e48ecf78d58ef83c3827b5e0 | refs/heads/main | 2023-07-11T17:23:56.531443 | 2023-02-06T15:12:31 | 2023-02-06T15:12:31 | 94,087,469 | 7 | 1 | BSD-3-Clause | 2023-07-07T13:22:09 | 2017-06-12T11:12:56 | Python | UTF-8 | Python | false | false | 1,069 | py | from django.conf import settings
from django.urls import path, re_path
from django.views.decorators.csrf import csrf_exempt
from .admin_views import RasterOptimizerView
from .views import (ImageServerMapViewerView, ImageServerTileCacheTilesView, ImageServerTileCacheView,
ImageServerWMSView)
if not settings.GISCUBE_IMAGE_SERVER_DISABLED:
urlpatterns = [
path('services/<str:service_name>/map/',
ImageServerMapViewerView.as_view(), name='imageserver-map-view'),
path('services/<str:service_name>/tilecache/',
ImageServerTileCacheView.as_view(), name='imageserver-tilecache'),
path('services/<str:service_name>/tilecache/<int:z>/<int:x>/<int:y>.<str:image_format>',
ImageServerTileCacheTilesView.as_view(), name='imageserver-tilecache-tiles'),
re_path(r'^services/(?P<service_name>[^/]+)(.*)',
csrf_exempt(ImageServerWMSView.as_view()), name='imageserver'),
path('raster_optimizer/', RasterOptimizerView.as_view(), name='raster_optimizer'),
]
| [
"[email protected]"
] | |
e3370fef915aee2f02a498d78b11c74e92d1404e | 1780cb2ba112f05f94d725b6cdab5ada09d89259 | /backend/home/migrations/0002_load_initial_data.py | 50373280982e1b13110eecb9c509a6ec7ac3961d | [] | no_license | crowdbotics-apps/medical-o-19581 | 257cf66392f01c760414ce0e25e12d41d831b7b0 | 84bd5e97699d2320f516627b0e950a2e6775b468 | refs/heads/master | 2022-11-28T16:03:58.515158 | 2020-08-15T16:58:09 | 2020-08-15T16:58:09 | 287,788,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "medical O"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">medical O</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "medical-o-19581.botics.co"
site_params = {
"name": "medical O",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
9a92432ffbb0a50fb5c847a399426c5b4fc5c0bb | ae87b11560c543cb678c52a28916ea2252d7aa52 | /tests/frontend/preg.py | a8e833e7f06543ee5209828ad3b7b34f0f1a4468 | [
"Apache-2.0"
] | permissive | CNR-ITTIG/plasodfaxp | 19ccf77d0be62cfa8a9b246eb6797cf64a480d80 | 923797fc00664fa9e3277781b0334d6eed5664fd | refs/heads/master | 2016-09-13T11:14:08.877399 | 2016-04-11T15:01:42 | 2016-04-11T15:01:42 | 55,975,921 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,673 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the preg front-end."""
import unittest
from dfvfs.helpers import source_scanner
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from plaso.engine import knowledge_base
from plaso.frontend import preg
from tests.frontend import test_lib
class PregFrontendTest(test_lib.FrontendTestCase):
"""Tests for the preg front-end."""
def _ConfigureSingleFileTest(self, knowledge_base_values=None):
"""Configure a single file test.
Args:
knowledge_base_values: optional dict containing the knowledge base
values.
"""
self._front_end = preg.PregFrontend()
self._front_end.SetSingleFile(True)
registry_file_path = self._GetTestFilePath([u'SYSTEM'])
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=registry_file_path)
self._front_end.SetSourcePath(registry_file_path)
self._front_end.SetSourcePathSpecs([path_spec])
self._knowledge_base_object = knowledge_base.KnowledgeBase()
if knowledge_base_values:
for identifier, value in knowledge_base_values.iteritems():
self._knowledge_base_object.SetValue(identifier, value)
self._front_end.SetKnowledgeBase(self._knowledge_base_object)
def _ConfigureStorageMediaFileTest(self):
"""Configure a test against a storage media file."""
self._front_end = preg.PregFrontend()
self._front_end.SetSingleFile(False)
self._knowledge_base_object = knowledge_base.KnowledgeBase()
self._front_end.SetKnowledgeBase(self._knowledge_base_object)
storage_media_path = self._GetTestFilePath([u'registry_test.dd'])
test_source_scanner = source_scanner.SourceScanner()
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(storage_media_path)
test_source_scanner.Scan(scan_context)
# Getting the most upper node.
scan_node = scan_context.GetRootScanNode()
while scan_node.sub_nodes:
scan_node = scan_node.sub_nodes[0]
self._front_end.SetSourcePath(storage_media_path)
self._front_end.SetSourcePathSpecs([scan_node.path_spec])
def testExpandKeysRedirect(self):
"""Tests the ExpandKeysRedirect function."""
self._ConfigureSingleFileTest()
registry_key_paths = [
u'\\Software\\Foobar',
u'\\Software\\Key\\SubKey\\MagicalKey',
u'\\Canons\\Blast\\Night',
u'\\EvilCorp\\World Plans\\Takeover']
self._front_end.ExpandKeysRedirect(registry_key_paths)
added_key_paths = [
u'\\Software\\Wow6432Node\\Foobar',
u'\\Software\\Wow6432Node\\Key\\SubKey\\MagicalKey']
for added_key_path in added_key_paths:
self.assertIn(added_key_path, registry_key_paths)
def testGetRegistryFilePaths(self):
"""Tests the GetRegistryFilePaths function."""
self._ConfigureSingleFileTest()
expected_paths = [u'%UserProfile%\\NTUSER.DAT']
registry_file_types = [u'NTUSER']
paths = self._front_end.GetRegistryFilePaths(registry_file_types)
self.assertEqual(sorted(paths), sorted(expected_paths))
expected_paths = [u'%SystemRoot%\\System32\\config\\SOFTWARE']
registry_file_types = [u'SOFTWARE']
paths = self._front_end.GetRegistryFilePaths(registry_file_types)
self.assertEqual(sorted(paths), sorted(expected_paths))
def testGetRegistryHelpers(self):
"""Test the GetRegistryHelpers function."""
self._ConfigureSingleFileTest()
with self.assertRaises(ValueError):
_ = self._front_end.GetRegistryHelpers()
registry_helpers = self._front_end.GetRegistryHelpers(
registry_file_types=[u'SYSTEM'])
self.assertEquals(len(registry_helpers), 1)
registry_helper = registry_helpers[0]
file_path = self._GetTestFilePath([u'SYSTEM'])
self.assertEquals(registry_helper.path, file_path)
self._ConfigureStorageMediaFileTest()
registry_helpers = self._front_end.GetRegistryHelpers(
registry_file_types=[u'NTUSER'])
self.assertEquals(len(registry_helpers), 3)
registry_helper = registry_helpers[0]
registry_helper.Open()
expected_file_type = preg.REGISTRY_FILE_TYPE_NTUSER
self.assertEquals(registry_helper.file_type, expected_file_type)
self.assertEquals(registry_helper.name, u'NTUSER.DAT')
self.assertEquals(registry_helper.collector_name, u'TSK')
registry_helper.Close()
registry_helpers = self._front_end.GetRegistryHelpers(
plugin_names=[u'userassist'])
self.assertEquals(len(registry_helpers), 3)
registry_helpers = self._front_end.GetRegistryHelpers(
registry_file_types=[u'SAM'])
self.assertEquals(len(registry_helpers), 1)
# TODO: Add a test for getting Registry helpers from a storage media file
# that contains VSS stores.
def testGetRegistryPlugins(self):
"""Test the GetRegistryPlugin function."""
self._ConfigureSingleFileTest()
usb_plugins = self._front_end.GetRegistryPlugins(u'usb')
self.assertIsNotNone(usb_plugins)
usb_plugin_names = [plugin.NAME for plugin in usb_plugins]
self.assertIn(u'windows_usb_devices', usb_plugin_names)
self.assertIn(u'windows_usbstor_devices', usb_plugin_names)
other_plugins = self._front_end.GetRegistryPlugins(u'user')
self.assertIsNotNone(other_plugins)
other_plugin_names = [plugin.NAME for plugin in other_plugins]
self.assertIn(u'userassist', other_plugin_names)
def testParseRegistry(self):
"""Test the ParseRegistryFile and ParseRegistryKey functions."""
self._ConfigureSingleFileTest()
registry_helpers = self._front_end.GetRegistryHelpers(
registry_file_types=[u'SYSTEM'])
registry_helper = registry_helpers[0]
plugins = self._front_end.GetRegistryPluginsFromRegistryType(u'SYSTEM')
key_list = []
plugin_list = []
for plugin in plugins:
plugin_list.append(plugin.NAME)
key_list.extend(plugin.GetKeyPaths())
self._front_end.ExpandKeysRedirect(key_list)
parsed_data = self._front_end.ParseRegistryFile(
registry_helper, key_paths=key_list, use_plugins=plugin_list)
for key_parsed in parsed_data:
self.assertIn(key_parsed, key_list)
usb_parsed_data = parsed_data.get(
u'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Enum\\USBSTOR', None)
self.assertIsNotNone(usb_parsed_data)
usb_key = usb_parsed_data.get(u'key', None)
self.assertIsNotNone(usb_key)
expected_key_path = (
u'HKEY_LOCAL_MACHINE\\System\\ControlSet001\\Enum\\USBSTOR')
self.assertEquals(usb_key.path, expected_key_path)
data = usb_parsed_data.get(u'data', None)
self.assertIsNotNone(data)
plugin_names = [plugin.NAME for plugin in data.keys()]
self.assertIn(u'windows_usbstor_devices', plugin_names)
usb_plugin = None
for plugin in data.keys():
if plugin.NAME == u'windows_usbstor_devices':
usb_plugin = plugin
break
event_objects = data.get(usb_plugin, [])
self.assertEquals(len(event_objects), 3)
event_object = event_objects[2]
self.assertEquals(event_object.data_type, u'windows:registry:key_value')
parse_key_data = self._front_end.ParseRegistryKey(
usb_key, registry_helper, use_plugins=u'windows_usbstor_devices')
self.assertEquals(len(parse_key_data.keys()), 1)
parsed_key_value = parse_key_data.values()[0]
for index, event_object in enumerate(event_objects):
parsed_key_event = parsed_key_value[index]
self.assertEquals(
event_object.EqualityString(), parsed_key_event.EqualityString())
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
b35fe4f83cad4155a1a9b795961c33cce5c14570 | 7a527060afabd2e0867d5dcf4b75592b43ef5005 | /Leetcode/二叉树/226. 翻转二叉树.py | dabbaea2e39fd8bfd026b47ad19dbeb63fd57c87 | [] | no_license | Stevenzzz1996/MLLCV | ff01a276cf40142c1b28612cb5b43e563ad3a24a | 314953b759212db5ad07dcb18854bf6d120ba172 | refs/heads/master | 2023-02-10T18:11:30.399042 | 2021-01-05T12:05:21 | 2021-01-05T12:05:21 | 267,804,954 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | #!usr/bin/env python
# -*- coding:utf-8 -*-
# author: sfhong2020 time:2020/5/7 20:47
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root: return
root.left, root.right = root.right, root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root | [
"[email protected]"
] | |
47421e349ecc14e6f4f3ea1699804de3ccc0655a | 46b086b8cd119f9067e6ab50ba0038e4703d6728 | /nlp/hotel_review_enc_dec_rnn.py | 5f0e1658e8244cd814034527718d56be30048751 | [] | no_license | wulfebw/rnn | 874ec8d1d53efe25ff7ab36c1cdc53019123f83a | 7cdba1ad581c61d08d5e8c4b22f7b952da3a64e1 | refs/heads/master | 2021-01-21T13:11:38.211329 | 2015-09-17T21:05:47 | 2015-09-17T21:05:47 | 42,426,369 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,212 | py | """
:description: This file contains 3 classes:
(1) An encoder-decoder recurrent neural net that contains an encoder and a deocder and orchestrates forward and backward propagation through them
(2) An encoder, which takes a input sequence and generates a hidden state aka representation of that sequence
(3) A decoder, which takes a hidden state as input and generates output
"""
import numpy as np
import theano
from theano import scan
import theano.tensor as T
from pylearn2.expr.nnet import arg_of_softmax
from pylearn2.utils import sharedX
class EncoderDecoderRNN(object):
def __init__(self,
encoder,
decoder,
cost=None):
"""
:description: A model that contains an encoder and decoder and orchestrates their combined usage and training
"""
self.encoder = encoder
self.decoder = decoder
self.cost = cost
self.return_indices = return_indices
def fprop(self, input, mask):
return self.decoder.fprop(self.encoder.fprop(input, mask))
def get_cost_updates(self, inputs, targets, mask, learning_rate=0.001, momentum=0.2):
predictions = self.fprop(inputs, mask)
if self.cost is not None:
cost = self.cost(predictions, targets)
else:
cost = T.mean(T.sqr(targets - predictions))
params = self.get_params()
# this does not work
try:
self.gparams = momentum * self.gparams + (1 - momentum) * T.grad(cost, params)
except:
self.gparams = T.grad(cost, params)
updates = [(param, param - learning_rate * gparam) for param, gparam in zip(params, self.gparams)]
return (cost, updates)
def get_params(self):
return self.encoder.params + self.decoder.params
class DecoderLSTM(object):
"""
:description: A decoder class. Takes a hidden state and generates an output sequence.
"""
def __init__(self,
n_hid,
n_classes,
layer_name,
rng=None,
return_indices=None,
param_init_range=0.02,
forget_gate_init_bias=0.05,
input_gate_init_bias=0.,
output_gate_init_bias=0.,
dropout_prob=0.0):
self.n_hid = n_hid
self.n_classes = n_classes
self.layer_name = layer_name
self.param_init_range = param_init_range
self.return_indices = return_indices
self.forget_gate_init_bias = forget_gate_init_bias
self.input_gate_init_bias = input_gate_init_bias
self.output_gate_init_bias = output_gate_init_bias
# only create random arrays once and reuse via copy()
irange = self.param_init_range
# input-to-hidden array, used for incorporating the generated output (conditioned on output)
init_Wxh = self.rng.uniform(-irange, irange, (self.n_classes, self.n_hid))
# hidden-to-hidden array
init_Whh = self.rng.uniform(-irange, irange, (self.n_hid, self.n_hid))
# hidden-to-output array, used only by the 'softmax' portion of the decoder
init_Whx = self.rng.uniform(-irange, irange, (self.n_hid, self.n_classes))
# input-to-hidden array, used for incorporating the generated output
self.Wxh = theano.shared(value=init_Wxh, name=self.layer_name + '_Wxh', borrow=True)
self.bxh = theano.shared(value=np.zeros(self.n_hid), name='bhx', borrow=True)
# hidden-to-hidden (rows, cols) = (n_hidden, n_hidden)
self.Whh = theano.shared(value=init_Whh, name=self.layer_name + '_Whh', borrow=True)
# hidden-to-output (rows, cols) = (n_hidden, n_classes)
self.Whx = theano.shared(value=init_Whx, name=self.layer_name + '_Whx', borrow=True)
self.bhx = theano.shared(value=np.zeros(self.n_classes), name='bhx', borrow=True)
# lstm parameters
# Output gate switch
self.O_b = sharedX(np.zeros((self.n_hid,)) + self.output_gate_init_bias, name=(self.layer_name + '_O_b'))
self.O_x = sharedX(init_Wxh, name=(self.layer_name + '_O_x'))
self.O_h = sharedX(init_Whh, name=(self.layer_name + '_O_h'))
self.O_c = sharedX(init_Whh.copy(), name=(self.layer_name + '_O_c'))
# Input gate switch
self.I_b = sharedX(np.zeros((self.n_hid,)) + self.input_gate_init_bias, name=(self.layer_name + '_I_b'))
self.I_x = sharedX(init_Wxh.copy(), name=(self.layer_name + '_I_x'))
self.I_h = sharedX(init_Whh.copy(), name=(self.layer_name + '_I_h'))
self.I_c = sharedX(init_Whh.copy(), name=(self.layer_name + '_I_c'))
# Forget gate switch
self.F_b = sharedX(np.zeros((self.n_hid,)) + self.forget_gate_init_bias, name=(self.layer_name + '_F_b'))
self.F_x = sharedX(init_Wxh.copy(), name=(self.layer_name + '_F_x'))
self.F_h = sharedX(init_Whh.copy(), name=(self.layer_name + '_F_h'))
self.F_c = sharedX(init_Whh.copy(), name=(self.layer_name + '_F_c'))
self.params = [self.Wxh, self.bxh, self.Whh, self.Whx, self.bhx, self.O_b, self.O_x, self.O_h, self.O_c, self.I_b, self.I_x, self.I_h, self.I_c, self.F_b, self.F_x, self.F_h, self.F_c]
def fprop(self, encoding):
"""
:description: calls decode function. Just here for some consistency.
"""
return self.decode(encoding)
def decode(self, encoding):
"""
:description: decodes an encoding into an output sequence.
:type encoding: tensor3
:param encoding: a batch of encodings with the shape (n_time_steps, n_batches, n_hidden). The reason n_time_steps takes the first dimension spot is that this allows for processing with the theano.scan function.
"""
pass
def decode_step(self, ):
pass
| [
"[email protected]"
] | |
125b74638bf1b8a47bbe87218c2f4e240f9982c0 | b6a84594f8c29d968014faaddd49abeb7537a5fc | /python/529.minesweeper.py | 45ee8e1b8e473f48e0b9a9c63955466d1e6c4ffa | [] | no_license | nickyfoto/lc | 8a6af3df114e693e265d0ede03f4d4e1283e010e | 3633b4df3e24968057c7d684689b931c5a8032d3 | refs/heads/master | 2020-09-16T19:23:07.765917 | 2020-06-07T17:18:06 | 2020-06-07T17:18:06 | 223,866,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,137 | py | #
# @lc app=leetcode id=529 lang=python3
#
# [529] Minesweeper
#
# https://leetcode.com/problems/minesweeper/description/
#
# algorithms
# Medium (54.13%)
# Total Accepted: 38.5K
# Total Submissions: 71.1K
# Testcase Example: '[["E","E","E","E","E"],["E","E","M","E","E"],["E","E","E","E","E"],["E","E","E","E","E"]]\n[3,0]'
#
# Let's play the minesweeper game (Wikipedia, online game)!
#
# You are given a 2D char matrix representing the game board. 'M' represents an
# unrevealed mine, 'E' represents an unrevealed empty square, 'B' represents a
# revealed blank square that has no adjacent (above, below, left, right, and
# all 4 diagonals) mines, digit ('1' to '8') represents how many mines are
# adjacent to this revealed square, and finally 'X' represents a revealed
# mine.
#
# Now given the next click position (row and column indices) among all the
# unrevealed squares ('M' or 'E'), return the board after revealing this
# position according to the following rules:
#
#
# If a mine ('M') is revealed, then the game is over - change it to 'X'.
# If an empty square ('E') with no adjacent mines is revealed, then change it
# to revealed blank ('B') and all of its adjacent unrevealed squares should be
# revealed recursively.
# If an empty square ('E') with at least one adjacent mine is revealed, then
# change it to a digit ('1' to '8') representing the number of adjacent
# mines.
# Return the board when no more squares will be revealed.
#
#
#
#
# Example 1:
#
#
# Input:
#
# [['E', 'E', 'E', 'E', 'E'],
# ['E', 'E', 'M', 'E', 'E'],
# ['E', 'E', 'E', 'E', 'E'],
# ['E', 'E', 'E', 'E', 'E']]
#
# Click : [3,0]
#
# Output:
#
# [['B', '1', 'E', '1', 'B'],
# ['B', '1', 'M', '1', 'B'],
# ['B', '1', '1', '1', 'B'],
# ['B', 'B', 'B', 'B', 'B']]
#
# Explanation:
#
#
#
# Example 2:
#
#
# Input:
#
# [['B', '1', 'E', '1', 'B'],
# ['B', '1', 'M', '1', 'B'],
# ['B', '1', '1', '1', 'B'],
# ['B', 'B', 'B', 'B', 'B']]
#
# Click : [1,2]
#
# Output:
#
# [['B', '1', 'E', '1', 'B'],
# ['B', '1', 'X', '1', 'B'],
# ['B', '1', '1', '1', 'B'],
# ['B', 'B', 'B', 'B', 'B']]
#
# Explanation:
#
#
#
#
#
# Note:
#
#
# The range of the input matrix's height and width is [1,50].
# The click position will only be an unrevealed square ('M' or 'E'), which also
# means the input board contains at least one clickable square.
# The input board won't be a stage when game is over (some mines have been
# revealed).
# For simplicity, not mentioned rules should be ignored in this problem. For
# example, you don't need to reveal all the unrevealed mines when the game is
# over, consider any cases that you will win the game or flag any squares.
#
#
#
from collections import defaultdict
from pprint import pprint
class Solution:
# def updateBoard(self, board: List[List[str]], click: List[int]) -> List[List[str]]:
def updateBoard(self, board, click):
class Node:
def __init__(self, r,c):
self.r = r
self.c = c
mine = self.count_mine_around()
if not mine:
board[self.r][self.c] = 'B'
else:
board[self.r][self.c] = str(mine)
def mine(self, r,c):
if board[r][c] == 'M':
return 1
else:
return 0
def border_valid(self, r,c):
return r >= 0 and r < n_rows and c >= 0 and c < n_cols
def check(self, r,c, func):
if not self.border_valid(r,c):
return 0
return func(r,c)
def count_mine_around(self):
res = 0
res += self.check(*self.up_left(),self.mine)
res += self.check(*self.up(),self.mine)
res += self.check(*self.up_right(),self.mine)
res += self.check(*self.left(),self.mine)
res += self.check(*self.right(),self.mine)
res += self.check(*self.down_left(),self.mine)
res += self.check(*self.down(),self.mine)
res += self.check(*self.down_right(),self.mine)
return res
def valid(self, r,c):
if not explored[(r,c)] and board[r][c] == 'E':
return Node(r,c)
else:
return None
def up_left(self):
return self.r - 1, self.c - 1
def up_right(self):
return self.r - 1, self.c + 1
def up(self):
return self.r - 1, self.c
def left(self):
return self.r, self.c - 1
def right(self):
return self.r, self.c + 1
def down_left(self):
return self.r + 1, self.c -1
def down(self):
return self.r + 1, self.c
def down_right(self):
return self.r + 1, self.c + 1
def explore(self, r,c):
node = self.check(r,c, self.valid)
if node:
node.dfs()
def dfs(self):
if board[self.r][self.c] == 'B':
# up_left = self.check(*self.up_left(), self.valid)
# if up_left:
# up_left.dfs()
self.explore(*self.up_left())
self.explore(*self.up())
self.explore(*self.up_right())
self.explore(*self.left())
self.explore(*self.right())
self.explore(*self.down())
self.explore(*self.down_left())
self.explore(*self.down_right())
n_rows = len(board)
n_cols = len(board[0])
r, c = tuple(click)
if board[r][c] == 'M':
board[r][c] = 'X'
return board
else:
explored = defaultdict(lambda: False)
explored[(r,c)] = True
node = Node(r,c)
node.dfs()
# pprint(board)
return board
# return board
# s = Solution()
# board = [['E', 'E', 'E', 'E', 'E'],
# ['E', 'E', 'M', 'E', 'E'],
# ['E', 'E', 'E', 'E', 'E'],
# ['E', 'E', 'E', 'E', 'E']]
# click = [3,0]
# print(s.updateBoard(board, click))
# board = [["E"]]
# click = [0,0]
# print(s.updateBoard(board, click))
# board = [["E","E","E","E","E","E","E","E"],
# ["E","E","E","E","E","E","E","M"],
# ["E","E","M","E","E","E","E","E"],
# ["M","E","E","E","E","E","E","E"],
# ["E","E","E","E","E","E","E","E"],["E","E","E","E","E","E","E","E"],["E","E","E","E","E","E","E","E"],["E","E","M","M","E","E","E","E"]]
# click = [0,0]
# print(s.updateBoard(board, click) == [["B","B","B","B","B","B","1","E"],
# ["B","1","1","1","B","B","1","M"],
# ["1","2","M","1","B","B","1","1"],
# ["M","2","1","1","B","B","B","B"],
# ["1","1","B","B","B","B","B","B"],
# ["B","B","B","B","B","B","B","B"],
# ["B","1","2","2","1","B","B","B"],
# ["B","1","M","M","1","B","B","B"]]) | [
"[email protected]"
] | |
b47c5764a7a3b6aeb63a374751b25c7caca6fbf4 | 456433ac78b70cb8ae076ae166a85e349f181d7f | /systems/KURSSKLAD/KURSTERM/SITEVERIFY/templates/palIWares.py | e78510698b4923b989120349376a9c0c642113ff | [] | no_license | shybkoi/WMS-Demo | 854c1679b121c68323445b60f3992959f922be8d | 2525559c4f56654acfbc21b41b3f5e40387b89e0 | refs/heads/master | 2021-01-23T01:51:20.074825 | 2017-03-23T11:51:18 | 2017-03-23T11:51:18 | 85,937,726 | 0 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 24,456 | py | #!/usr/bin/env python
# -*- coding: cp1251 -*-
##################################################
## DEPENDENCIES
import sys
import os
import os.path
from os.path import getmtime, exists
import time
import types
import __builtin__
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import DummyTransaction
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from systems.KURSSKLAD.KURSTERM.templates.main import main
from systems.KURSSKLAD.cheetahutils import viewQuantity
##################################################
## MODULE CONSTANTS
try:
True, False
except NameError:
True, False = (1==1), (1==0)
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.0rc8'
__CHEETAH_versionTuple__ = (2, 0, 0, 'candidate', 8)
__CHEETAH_genTime__ = 1482336170.8629999
__CHEETAH_genTimestamp__ = 'Wed Dec 21 18:02:50 2016'
__CHEETAH_src__ = 'systems\\KURSSKLAD\\KURSTERM\\SITEVERIFY\\templates\\palIWares.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Dec 21 09:10:13 2016'
__CHEETAH_docstring__ = 'Autogenerated by CHEETAH: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class palIWares(main):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
main.__init__(self, *args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def mainData(self, **KWS):
## CHEETAH: generated from #def mainData at line 5, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(''' <form action="palScan">
<input type="hidden" name="id" value="''')
_v = VFFSL(SL,"PAL_ID",True) # '$PAL_ID' on line 7, col 47
if _v is not None: write(_filter(_v, rawExpr='$PAL_ID')) # from line 7, col 47.
write('''">
<input type="hidden" name="wid" value="''')
_v = VFFSL(SL,"WID",True) # '$WID' on line 8, col 48
if _v is not None: write(_filter(_v, rawExpr='$WID')) # from line 8, col 48.
write('''">
''')
if False:
_('ШК')
_v = VFFSL(SL,"_",False)('ШК') # "$_('\xd8\xca')" on line 9, col 9
if _v is not None: write(_filter(_v, rawExpr="$_('\xd8\xca')")) # from line 9, col 9.
write(''': <input type="text" id=":scan:text" name="barcode" title="''')
if False:
_('МП')
_v = VFFSL(SL,"_",False)('МП') # "$_('\xcc\xcf')" on line 9, col 76
if _v is not None: write(_filter(_v, rawExpr="$_('\xcc\xcf')")) # from line 9, col 76.
write(''', ''')
if False:
_('Поддон')
_v = VFFSL(SL,"_",False)('Поддон') # "$_('\xcf\xee\xe4\xe4\xee\xed')" on line 9, col 86
if _v is not None: write(_filter(_v, rawExpr="$_('\xcf\xee\xe4\xe4\xee\xed')")) # from line 9, col 86.
write(''', ''')
if False:
_('Товар')
_v = VFFSL(SL,"_",False)('Товар') # "$_('\xd2\xee\xe2\xe0\xf0')" on line 9, col 100
if _v is not None: write(_filter(_v, rawExpr="$_('\xd2\xee\xe2\xe0\xf0')")) # from line 9, col 100.
write('''"><br>
\t</form><br>
''')
if VFFSL(SL,"varExists",False)('$PAL_NUM') and VFFSL(SL,"PAL_NUM",True): # generated from line 12, col 2
write('''\t\t<b><u>''')
_v = VFFSL(SL,"PAL_NUM",True) # '$PAL_NUM' on line 13, col 9
if _v is not None: write(_filter(_v, rawExpr='$PAL_NUM')) # from line 13, col 9.
write('''</u></b>(''')
_v = VFFSL(SL,"PAL_SNAME",True) # '$PAL_SNAME' on line 13, col 26
if _v is not None: write(_filter(_v, rawExpr='$PAL_SNAME')) # from line 13, col 26.
write(''')
''')
else: # generated from line 14, col 2
write('''\t <b>''')
_v = VFFSL(SL,"PAL_SNAME",True) # '$PAL_SNAME' on line 15, col 9
if _v is not None: write(_filter(_v, rawExpr='$PAL_SNAME')) # from line 15, col 9.
write('''</b>
''')
write('''
<br><b><u>(''')
_v = VFFSL(SL,"WCODE",True) # '$WCODE' on line 18, col 16
if _v is not None: write(_filter(_v, rawExpr='$WCODE')) # from line 18, col 16.
write(''')</u></b>''')
_v = VFFSL(SL,"WNAME",True) # '$WNAME' on line 18, col 31
if _v is not None: write(_filter(_v, rawExpr='$WNAME')) # from line 18, col 31.
write('''<br>
''')
if VFFSL(SL,"VWUID",True): # generated from line 19, col 5
write(''' <b>''')
_v = VFFSL(SL,"VWUCODE",True) # '$VWUCODE' on line 20, col 12
if _v is not None: write(_filter(_v, rawExpr='$VWUCODE')) # from line 20, col 12.
write(''' = ''')
_orig_filter_77433890 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"VWUFACTOR",True) # '$VWUFACTOR' on line 20, col 39
if _v is not None: write(_filter(_v, rawExpr='$VWUFACTOR')) # from line 20, col 39.
_filter = _orig_filter_77433890
write(''' ''')
_v = VFFSL(SL,"MWUCODE",True) # '$MWUCODE' on line 20, col 62
if _v is not None: write(_filter(_v, rawExpr='$MWUCODE')) # from line 20, col 62.
write(''' </b><br>
''')
if VFFSL(SL,"varExists",False)('$PALSELECT_TASKID') and VFFSL(SL,"PALSELECT_TASKID",True): # generated from line 22, col 5
write('''\t<hr>
''')
if VFFSL(SL,"varExists",False)('$PALSELECT_CLIENTNAME') and VFFSL(SL,"PALSELECT_CLIENTNAME",True): # generated from line 24, col 6
write('''\t <b><u>''')
_v = VFFSL(SL,"PALSELECT_CLIENTNAME",True) # '$PALSELECT_CLIENTNAME' on line 25, col 16
if _v is not None: write(_filter(_v, rawExpr='$PALSELECT_CLIENTNAME')) # from line 25, col 16.
write('''</u></b><br>
''')
if VFFSL(SL,"varExists",False)('$PALSELECT_DOCID') and VFFSL(SL,"PALSELECT_DOCID",True): # generated from line 27, col 6
write('''\t ''')
_orig_filter_89431690 = _filter
filterName = 'DateFilter2'
if self._CHEETAH__filters.has_key("DateFilter2"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"PALSELECT_DOCDATE",True) # '$PALSELECT_DOCDATE' on line 28, col 29
if _v is not None: write(_filter(_v, rawExpr='$PALSELECT_DOCDATE')) # from line 28, col 29.
_filter = _orig_filter_89431690
write(''' ''')
if False:
_('№')
_v = VFFSL(SL,"_",False)('№') # "$_('\xb9')" on line 28, col 60
if _v is not None: write(_filter(_v, rawExpr="$_('\xb9')")) # from line 28, col 60.
write(''' ''')
_v = VFFSL(SL,"PALSELECT_DOCNUM",True) # '$PALSELECT_DOCNUM' on line 28, col 68
if _v is not None: write(_filter(_v, rawExpr='$PALSELECT_DOCNUM')) # from line 28, col 68.
write(''' <br>
''')
if VFFSL(SL,"varExists",False)('$PALSELECT_TASKID') and VFFSL(SL,"PALSELECT_TASKID",True): # generated from line 30, col 6
write('''\t ''')
_v = VFFSL(SL,"PALSELECT_TASKID",True) # '$PALSELECT_TASKID' on line 31, col 11
if _v is not None: write(_filter(_v, rawExpr='$PALSELECT_TASKID')) # from line 31, col 11.
write('''
''')
if VFFSL(SL,"varExists",False)('$PALSELECT_MANFIO') and VFFSL(SL,"PALSELECT_MANFIO",True): # generated from line 32, col 10
write('''\t - <b>''')
_v = VFFSL(SL,"PALSELECT_MANFIO",True) # '$PALSELECT_MANFIO' on line 33, col 19
if _v is not None: write(_filter(_v, rawExpr='$PALSELECT_MANFIO')) # from line 33, col 19.
write('''</b>
''')
write(''' <hr>
''')
else: # generated from line 37, col 5
write(''' <br>
''')
write('''
''')
if VFFSL(SL,"varExists",False)('$datalist') and VFFSL(SL,"datalist",True) and len(VFFSL(SL,"datalist",True))>0: # generated from line 41, col 5
wuamount = 0
amount = 0
write(''' <form action=palletWaresSave method=post>
<input type=hidden name=waresid value=''')
_v = VFFSL(SL,"wid",True) # '$wid' on line 45, col 47
if _v is not None: write(_filter(_v, rawExpr='$wid')) # from line 45, col 47.
write('''>
<input type=hidden name=palletid value=''')
_v = VFFSL(SL,"pal_id",True) # '$pal_id' on line 46, col 48
if _v is not None: write(_filter(_v, rawExpr='$pal_id')) # from line 46, col 48.
write('''>
<input type=hidden name=dbeg value="''')
_v = VFFSL(SL,"CURRENTDATETIME",True) # '$CURRENTDATETIME' on line 47, col 45
if _v is not None: write(_filter(_v, rawExpr='$CURRENTDATETIME')) # from line 47, col 45.
write('''">
''')
if VFFSL(SL,"varExists",False)('$backurl') and VFFSL(SL,"backurl",True): # generated from line 48, col 9
write(''' <input type=hidden name=backurl value="''')
_v = VFFSL(SL,"backurl",True) # '$backurl' on line 49, col 52
if _v is not None: write(_filter(_v, rawExpr='$backurl')) # from line 49, col 52.
write('''">
''')
write(''' <table>
<thead>
<tr>
<th>''')
if False:
_('Дата')
_v = VFFSL(SL,"_",False)('Дата') # "$_('\xc4\xe0\xf2\xe0')" on line 54, col 25
if _v is not None: write(_filter(_v, rawExpr="$_('\xc4\xe0\xf2\xe0')")) # from line 54, col 25.
write('''</th>
<th>
<select name=wuid id=":focus:">
<option value=''')
_v = VFFSL(SL,"MWUID",True) # '$MWUID' on line 57, col 43
if _v is not None: write(_filter(_v, rawExpr='$MWUID')) # from line 57, col 43.
write(''' selected>''')
_v = VFFSL(SL,"MWUCODE",True) # '$MWUCODE' on line 57, col 59
if _v is not None: write(_filter(_v, rawExpr='$MWUCODE')) # from line 57, col 59.
write('''</option>
''')
if VFFSL(SL,"VWUID",True): # generated from line 58, col 27
write(''' <option value=''')
_v = VFFSL(SL,"VWUID",True) # '$VWUID' on line 59, col 43
if _v is not None: write(_filter(_v, rawExpr='$VWUID')) # from line 59, col 43.
write('''>''')
_v = VFFSL(SL,"VWUCODE",True) # '$VWUCODE' on line 59, col 50
if _v is not None: write(_filter(_v, rawExpr='$VWUCODE')) # from line 59, col 50.
write('''</option>
''')
write(''' </select>
</th>
<th>''')
if False:
_('Кол-во')
_v = VFFSL(SL,"_",False)('Кол-во') # "$_('\xca\xee\xeb-\xe2\xee')" on line 63, col 25
if _v is not None: write(_filter(_v, rawExpr="$_('\xca\xee\xeb-\xe2\xee')")) # from line 63, col 25.
write('''</th>
<th>''')
if False:
_('Резерв')
_v = VFFSL(SL,"_",False)('Резерв') # "$_('\xd0\xe5\xe7\xe5\xf0\xe2')" on line 64, col 25
if _v is not None: write(_filter(_v, rawExpr="$_('\xd0\xe5\xe7\xe5\xf0\xe2')")) # from line 64, col 25.
write('''</th>
<tr>
</thead>
<tbody>
''')
for item in VFFSL(SL,"datalist",True): # generated from line 68, col 13
if VFFSL(SL,"item.canedit",True) == '0': # generated from line 69, col 17
trClass = 'class="inactive"'
else: # generated from line 71, col 17
trClass = ''
write(''' <tr ''')
_v = VFFSL(SL,"trClass",True) # '$trClass' on line 74, col 21
if _v is not None: write(_filter(_v, rawExpr='$trClass')) # from line 74, col 21.
write('''>
<td>''')
_orig_filter_98946014 = _filter
filterName = 'DateFilter2'
if self._CHEETAH__filters.has_key("DateFilter2"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"item.productdate",True) # '$item.productdate' on line 75, col 44
if _v is not None: write(_filter(_v, rawExpr='$item.productdate')) # from line 75, col 44.
_filter = _orig_filter_98946014
write('''</td>
''')
if VFFSL(SL,"item.canedit",True) == '1': # generated from line 76, col 19
write(''' <td><input type="text" name="WL_''')
_v = VFFSL(SL,"item.WLOTID",True) # '$item.WLOTID' on line 77, col 53
if _v is not None: write(_filter(_v, rawExpr='$item.WLOTID')) # from line 77, col 53.
write('''" id="::float" title="''')
_v = VFFSL(SL,"item.WLNUMBER",True) # '$item.WLNUMBER' on line 77, col 87
if _v is not None: write(_filter(_v, rawExpr='$item.WLNUMBER')) # from line 77, col 87.
write('''" value="''')
_orig_filter_69437137 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"item.AMOUNT",True) # '$item.AMOUNT' on line 77, col 126
if _v is not None: write(_filter(_v, rawExpr='$item.AMOUNT')) # from line 77, col 126.
_filter = _orig_filter_69437137
write('''" size="4"></td>
''')
else: # generated from line 78, col 19
write(''' <td><a href=\'#\' title="''')
_v = VFFSL(SL,"item.WLNUMBER",True) # '$item.WLNUMBER' on line 79, col 44
if _v is not None: write(_filter(_v, rawExpr='$item.WLNUMBER')) # from line 79, col 44.
write('''">''')
_orig_filter_65958789 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"item.AMOUNT",True) # '$item.AMOUNT' on line 79, col 76
if _v is not None: write(_filter(_v, rawExpr='$item.AMOUNT')) # from line 79, col 76.
_filter = _orig_filter_65958789
write('''</a></td>
''')
# <td>$viewQuantity($item.AMOUNT,$VWUFACTOR,$VWUCODE,$MWUFACTOR,$MWUCODE)(<b><u>#filter Quantity$item.AMOUNT#end filter#</u></b>)</td>
write(''' <td>''')
_orig_filter_48149684 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"item.AMOUNT",True) # '$item.AMOUNT' on line 82, col 41
if _v is not None: write(_filter(_v, rawExpr='$item.AMOUNT')) # from line 82, col 41.
_filter = _orig_filter_48149684
write('''</td>
<td>''')
_orig_filter_76050613 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"item.RESERVE",True) # '$item.RESERVE' on line 83, col 41
if _v is not None: write(_filter(_v, rawExpr='$item.RESERVE')) # from line 83, col 41.
_filter = _orig_filter_76050613
write('''</td>
</tr>
''')
amount += float(VFFSL(SL,"item.AMOUNT",True))
write(''' </tbody>
<tfoot>
<tr>
<th>''')
if False:
_('Итого')
_v = VFFSL(SL,"_",False)('Итого') # "$_('\xc8\xf2\xee\xe3\xee')" on line 90, col 25
if _v is not None: write(_filter(_v, rawExpr="$_('\xc8\xf2\xee\xe3\xee')")) # from line 90, col 25.
write(''':</th>
<th colspan=2>''')
_v = VFFSL(SL,"viewQuantity",False)(VFFSL(SL,"amount",True),VFFSL(SL,"VWUFACTOR",True),VFFSL(SL,"VWUCODE",True),VFFSL(SL,"MWUFACTOR",True),VFFSL(SL,"MWUCODE",True)) # '$viewQuantity($amount,$VWUFACTOR,$VWUCODE,$MWUFACTOR,$MWUCODE)' on line 91, col 35
if _v is not None: write(_filter(_v, rawExpr='$viewQuantity($amount,$VWUFACTOR,$VWUCODE,$MWUFACTOR,$MWUCODE)')) # from line 91, col 35.
write('''(<b><u>''')
_orig_filter_25388359 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"amount",True) # '$amount' on line 91, col 120
if _v is not None: write(_filter(_v, rawExpr='$amount')) # from line 91, col 120.
_filter = _orig_filter_25388359
write('''</u></b>)</th>
\t\t\t\t\t<th></th>
</tr>
</tfoot>
</table>
<input type="submit" value="''')
if False:
_('Сохранить')
_v = VFFSL(SL,"_",False)('Сохранить') # "$_('\xd1\xee\xf5\xf0\xe0\xed\xe8\xf2\xfc')" on line 96, col 37
if _v is not None: write(_filter(_v, rawExpr="$_('\xd1\xee\xf5\xf0\xe0\xed\xe8\xf2\xfc')")) # from line 96, col 37.
write('''">
</form>
''')
write(''' <br>
<a href="palWaresAdd?id=''')
_v = VFFSL(SL,"PAL_ID",True) # '$PAL_ID' on line 100, col 29
if _v is not None: write(_filter(_v, rawExpr='$PAL_ID')) # from line 100, col 29.
write('''&wid=''')
_v = VFFSL(SL,"WID",True) # '$WID' on line 100, col 41
if _v is not None: write(_filter(_v, rawExpr='$WID')) # from line 100, col 41.
write('''&dbeg=''')
_v = VFFSL(SL,"CURRENTDATETIME",True) # '$CURRENTDATETIME' on line 100, col 51
if _v is not None: write(_filter(_v, rawExpr='$CURRENTDATETIME')) # from line 100, col 51.
write('''">''')
if False:
_('Новая партия')
_v = VFFSL(SL,"_",False)('Новая партия') # "$_('\xcd\xee\xe2\xe0\xff \xef\xe0\xf0\xf2\xe8\xff')" on line 100, col 69
if _v is not None: write(_filter(_v, rawExpr="$_('\xcd\xee\xe2\xe0\xff \xef\xe0\xf0\xf2\xe8\xff')")) # from line 100, col 69.
write('''</a>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def writeBody(self, **KWS):
## CHEETAH: main method generated for this template
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('''
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_palIWares= 'writeBody'
## END CLASS DEFINITION
if not hasattr(palIWares, '_initCheetahAttributes'):
templateAPIClass = getattr(palIWares, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(palIWares)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=palIWares()).run()
| [
"[email protected]"
] | |
d9f7024ed4779cb28010988aab7423be9857cb89 | b6c09a1b87074d6e58884211ce24df8ec354da5c | /344. 反转字符串.py | 8b92858d6a5fe0caaf3151c087ae5c3b43bb1d38 | [] | no_license | fengxiaolong886/leetcode | a0ee12d67c4a10fb12d6ca4369762ab5b090cab1 | 4c0897bc06a297fa9225a0c46d8ec9217d876db8 | refs/heads/master | 2023-03-18T22:16:29.212016 | 2021-03-07T03:48:16 | 2021-03-07T03:48:16 | 339,604,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | """
编写一个函数,其作用是将输入的字符串反转过来。输入字符串以字符数组 char[] 的形式给出。
不要给另外的数组分配额外的空间,你必须原地修改输入数组、使用 O(1) 的额外空间解决这一问题。
你可以假设数组中的所有字符都是 ASCII 码表中的可打印字符。
示例 1:
输入:["h","e","l","l","o"]
输出:["o","l","l","e","h"]
示例 2:
输入:["H","a","n","n","a","h"]
输出:["h","a","n","n","a","H"]
"""
def reverseString(s):
n = len(s)
for i in range(n//2):
# print(i)
s[i], s[n-i -1] = s[n-i-1], s[i]
return s
print(reverseString(["h","e","l","l","o"]))
print(reverseString(["H","a","n","n","a","h"])) | [
"[email protected]"
] | |
d77e8c94bb919365da58341726e79fd06355fd80 | 6dc716bbaf2e63da9153ff72e3c43364a1fcb5ff | /src/pyWebdriverAPI/19get_attribute.py | 9ff4350ae107f31f7565d6a64690d2bb49deb222 | [] | no_license | Fangziqiang/PythonAutoTest | bfa1d583a21768bcce45ac2348cd4913934b1703 | c9084004b6964fc0d59b98586d2986d0d7f938b1 | refs/heads/master | 2020-04-29T03:01:36.450658 | 2019-03-29T10:10:01 | 2019-03-29T10:10:01 | 175,793,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | #coding= utf-8
#<input type="checkbox" data-node="594434499" data-convert="1" data-type="file">
#<input type="checkbox" data-node="594434498" data-convert="1" data-type="file">
#<input type="checkbox" data-node="594434493" data-convert="1" data-type="file">
#<input type="checkbox" data-node="594434497" data-convert="1" data-type="file">
from selenium import webdriver
driver = webdriver.Firefox()
#选择页面上所有的tag_name为input的元素
inputs =driver.find_element_by_tag_name('input')
#循环遍历出data-node为594434493的元素,单击勾选
for input in inputs:
if input.get_attribute('data-node')=='594434493':
input.click() | [
"[email protected]"
] | |
6dc108d5aa1cddf54041d834cab22c1668506c85 | ca39938bcc1c04476bd33a52fcfeadd45a21192d | /classes1.py | 2def90a7910b1f31d712cb44baa9c7520f501713 | [] | no_license | ImayaDismas/python-programs | 57c2f2e633e3e10e42cfbfb873af60892041978d | 06102f505603220b5411d5777ceb2dd1f38c3f5d | refs/heads/master | 2021-01-10T16:29:42.530118 | 2016-02-16T17:01:33 | 2016-02-16T17:01:33 | 50,907,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | #!/usr/bin/python3
class Duck:
def __init__(self, value):
# self._v is attached to the object and not class.
# the value is part of the object(encapsulation)
self._v = value
def quack(self):
print('Quaaack!', self._v)
def walki(self):
print('Walks like a duck', self._v)
def main():
donald = Duck(52)
frank = Duck(151)
donald.quack()
donald.walki()
frank.quack()
frank.walki()
if __name__ == "__main__": main() | [
"[email protected]"
] | |
1d19dfcaa94d515c7b8e7eb32d38bfffef735c0b | ac0894b411507bfd027696b6bf11b5e384ed68fc | /need-to-do/python3------download-problem--of--leetcode/216.combination-sum-iii.py | 94404265e1d831a8428bfc30325a91714f84809d | [] | no_license | mkzpd/leetcode-solution | 1d19554628c34c74012fa52582c225e6dccb345c | 60c9b218683bcdee86477a910c58ec702185c726 | refs/heads/master | 2020-05-31T05:56:48.985529 | 2019-09-20T09:10:49 | 2019-09-20T09:10:49 | 190,128,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | #
# @lc app=leetcode id=216 lang=python3
#
# [216] Combination Sum III
#
# https://leetcode.com/problems/combination-sum-iii/description/
#
# algorithms
# Medium (53.03%)
# Total Accepted: 134.8K
# Total Submissions: 254.1K
# Testcase Example: '3\n7'
#
#
# Find all possible combinations of k numbers that add up to a number n, given
# that only numbers from 1 to 9 can be used and each combination should be a
# unique set of numbers.
#
# Note:
#
#
# All numbers will be positive integers.
# The solution set must not contain duplicate combinations.
#
#
# Example 1:
#
#
# Input: k = 3, n = 7
# Output: [[1,2,4]]
#
#
# Example 2:
#
#
# Input: k = 3, n = 9
# Output: [[1,2,6], [1,3,5], [2,3,4]]
#
#
#
class Solution:
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
| [
"[email protected]"
] | |
29838aa991d0e07bfc6098581bf37e44233a71f9 | 2368797b51548c0f6393d63bf4973898ac99d528 | /stack/hard/q772.py | 1bb2df7a709fdba2408d3fadf6b0799535b2bf3b | [] | no_license | pengzhefu/LeetCodePython | 595887d1625666962e7e959ffa148580f9b89ada | 59eff778a5fd5cff3b5b6b88c6c7e76dd213dfb0 | refs/heads/master | 2021-06-08T19:44:52.487031 | 2021-06-01T15:44:29 | 2021-06-01T15:44:29 | 175,763,155 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 23 00:17:51 2019
@author: pengz
"""
'''
Implement a basic calculator to evaluate a simple expression string.
The expression string may contain open ( and closing parentheses ), the plus + or minus sign -,
non-negative integers and empty spaces .
The expression string contains only non-negative integers, +, -, *, / operators , open ( and
closing parentheses ) and empty spaces . The integer division should truncate toward zero.
You may assume that the given expression is always valid. All intermediate results will be in the
range of [-2147483648, 2147483647].
Some examples:
"1 + 1" = 2
" 6-4 / 2 " = 4
"2*(5+5*2)/3+(6/2+8)" = 21
"(2+6* 3+5- (3*14/7+2)*5)+3"=-12
'''
## https://medium.com/@CalvinChankf/solving-basic-calculator-i-ii-iii-on-leetcode-74d926732437
def calculate3(s): ## other's solution, time is O(n^2),
if len(s) ==0:
return 0
stack = []
sign = '+' ## previous sign
num =0 ## tmp result
i =0 ## the index of s
while i <len(s):
c = s[i]
if c.isdigit():
num = 10*num + int(c)
if c == '(': ## 遇到相应的 ")"
# find the corresponding ")"
pCnt = 0
end = 0
clone = s[i:]
while end < len(clone):
if clone[end] == '(':
pCnt += 1
elif clone[end] == ')':
pCnt -= 1
if pCnt == 0:
break
end += 1
# do recursion to calculate the sum within the next (...)
num = self.calculate(s[i+1:i+end])
i += end
if i == len(s)-1 or (c == '+' or c == '-' or c == '*' or c == '/'): ## 这部分是CalculatorII
if sign == '+':
stack.append(num)
elif sign == '-':
stack.append(-num)
elif sign == '*':
stack[-1] = stack[-1]*num
elif sign == '/':
stack[-1] = int(stack[-1]/float(num))
sign = c
num = 0
i +=1
return sum(stack) | [
"[email protected]"
] | |
b0df441e56adb0a05833822a484bf3b07dc5ab63 | f3af143bada7f79db1e15b4386e5107bc99eb212 | /ProjectBaseTest1/工具练习/01-微信消息撤回/01-微信撤回消息.py | 5acc6172948fb9993db5ff078b58c9afe7ab8de5 | [] | no_license | xxxfly/PythonStudy | a5ceae1d2b16cfdba19871507458154fc292bca0 | 478d89ccefc91a84f935aebdca796c9d4c23ef61 | refs/heads/master | 2022-04-08T13:45:03.405768 | 2020-03-09T03:47:50 | 2020-03-09T03:47:50 | 45,187,749 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,497 | py | #-*-encoding:utf-8-*-
import os
import re
import shutil
import time
import itchat
from itchat.content import *
#相关资料
#安装itchat: pip3 install itchat
#itchat: https://github.com/liduanwei/ItChat
#中文API: http://itchat.readthedocs.io/zh/latest/
# 说明:可以撤回的有文本文字、语音、视频、图片、位置、名片、分享、附件
# {msg_id:(msg_from,msg_to,msg_time,msg_time_rec,msg_type,msg_content,msg_share_url)}
msg_dict={}
#文件存储的临时目录
rev_tmp_dir="RevDir/"
if not os.path.exists(rev_tmp_dir):
os.mkdir(rev_tmp_dir)
# 表情有一个问题 | 接受信息和接受note的msg_id不一致 巧合解决方案
face_bug=None
# 将接收到的消息存放在字典中,当接收到新消息时对字典中超时的消息进行清理 | 不接受不具有撤回功能的信息
# [TEXT, PICTURE, MAP, CARD, SHARING, RECORDING, ATTACHMENT, VIDEO, FRIENDS, NOTE]
@itchat.msg_register([TEXT, PICTURE, MAP, CARD, SHARING, RECORDING, ATTACHMENT, VIDEO])
def handler_receive_msg(msg):
global face_bug
# 获取的是本地时间戳并格式化本地时间戳 e:2018-04-21 13:08:21
msg_time_rec=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())
# 消息id
msg_id=msg['MsgId']
# 消息时间
msg_time=msg['CreateTime']
# 消息发送人昵称 | 这里也可以使用RemarkName备注 但是自己或者没有备注的人为None
msg_from=(itchat.search_friends(userName=msg['FromUserName']))['NickName']
# 消息内容
msg_content=None
# 分享的连接
msg_share_url=None
print("-->"+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())+"--接收消息:"+str(msg))
if msg['Type'] == 'Text' or msg['Type'] == 'Friends':
msg_content = msg['Text']
elif msg['Type']=='Recording' or msg['Type']=='Attachment' or msg['Type']=='Video' or msg['Type']=='Picture':
msg_content=r''+msg['FileName']
#保存文件
msg['Text'](rev_tmp_dir+msg['FileName'])
elif msg['Type'] == 'Card':
msg_content=msg['RecommendInfo']['NickName']+r"的名片"
elif msg['Type'] == 'Map':
x,y,location=re.search('<location x="(.*?)" y="(.*?)".*label="(.*?)".*', msg['OriContent']).group(1, 2, 3)
if location is None:
msg_content=r"维度->"+x.__str__()+"经度->"+y.__str__()
else:
msg_content=r""+location
elif msg['Type']=='Sharing':
msg_content=msg['Text']
msg_share_url = msg['Url']
face_bug=msg_content
# 更新字典
msg_dict.update({
msg_id:{
"msg_from":msg_from,"msg_time":msg_time,"msg_time_rec":msg_time_rec,
"msg_type": msg["Type"],
"msg_content": msg_content, "msg_share_url": msg_share_url
}
})
@itchat.msg_register([NOTE])
def send_msg_helper(msg):
global face_bug
print("-->"+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())+"--撤回消息:"+str(msg))
if re.search(r"<!\[CDATA\[.*撤回了一条消息\]\]>",msg["Content"]) is not None:
#获取消息的id
old_msg_id=re.search("<msgid>(.*?)</msgid>",msg['Content']).group(1)
old_msg=msg_dict.get(old_msg_id,{})
if len(old_msg_id)<11:
itchat.send_file(rev_tmp_dir+face_bug,toUserName='filehelper')
os.remove(rev_tmp_dir+face_bug)
else:
msg_body="告诉你一个秘密~"+""+old_msg.get("msg_from")+" 撤回了 "+old_msg.get("msg_type")+" 消息 "+" "+old_msg.get("msg_time_rec")+" "+"撤回了什么 ⇣"+" "+r""+old_msg.get("msg_content")
#如果是分享存在的连接
if old_msg['msg_type']=='Sharing':
msg_body+= " 就是这个链接➣ " + old_msg.get('msg_share_url')
#将撤回消息发送到文件助手
itchat.send(msg_body,toUserName='filehelper')
#有文件的话也要讲文件发送回去
if old_msg['msg_type']=='Picture' or old_msg['msg_type']=='Recording' or old_msg['msg_type']=='Video' or old_msg['msg_type']=='Attachment':
file='@file@%s'%(rev_tmp_dir+old_msg['msg_content'])
itchat.send(msg=file,toUserName='filehelper')
os.remove(rev_tmp_dir+old_msg['msg_content'])
#删除字典就消息
msg_dict.pop(old_msg_id)
if __name__ == '__main__':
itchat.auto_login(hotReload=True,enableCmdQR=2)
itchat.run()
# itchat.send('hello',toUserName='filehelper')
| [
"[email protected]"
] | |
758943cd58a34d2d241f98b466204942032ae6d2 | 9f26975c02f7a10ce23c5f6217fc1f4a80c5134c | /crawling_2.py | 9ec35f60cfda4ecb628605bbfbb835441996677f | [] | no_license | ravi4all/PythonReg2_30_2020 | d0ae837da4c19f1bff9a938e5a91c561c9288f36 | ccb85325e0efce60c771697f8d07dc5e63cfaa5b | refs/heads/master | 2020-12-18T04:16:27.399221 | 2020-03-20T15:05:30 | 2020-03-20T15:05:30 | 235,315,428 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | import bs4
import urllib.request as url
path = "https://www.flipkart.com/search?q=tv&otracker=search&otracker1=search&marketplace=FLIPKART&as-show=on&as=off&as-pos=0&as-type=HISTORY&as-backfill=on"
response = url.urlopen(path)
page = bs4.BeautifulSoup(response,'lxml')
div = page.findAll('div',class_='_3wU53n')
price = page.findAll('div',class_='_1vC4OE _2rQ-NK')
rating = page.findAll('div',class_='hGSR34')
for i in range(len(div)):
print(div[i].text)
print("Price :",price[i].text)
print("Rating :",rating[i].text)
print("="*20)
| [
"[email protected]"
] | |
7b7c0d425f0ee62c452f9a22495e459f13383251 | dffda5fa2233eee725ed33be87f1d607fcd444f6 | /python/ccxt/eterbase.py | fa4c11fde8f4c5be6325fcdbf9762fe22325f400 | [
"MIT"
] | permissive | samholt/ccxt | 90a60cc0173705878987aa6ce70e5305eecfcc8b | ca08b5c9d23dea98ffd4191609ec3d7d9f129fd3 | refs/heads/master | 2023-03-28T06:21:30.918201 | 2021-04-01T17:16:02 | 2021-04-01T17:16:02 | 106,024,622 | 4 | 3 | MIT | 2021-04-01T17:09:54 | 2017-10-06T16:08:46 | JavaScript | UTF-8 | Python | false | false | 40,774 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InvalidOrder
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class eterbase(Exchange):
def describe(self):
return self.deep_extend(super(eterbase, self).describe(), {
'id': 'eterbase',
'name': 'Eterbase',
'countries': ['SK'], # Slovakia
'rateLimit': 500,
'version': 'v1',
'certified': True,
'has': {
'CORS': False,
'publicAPI': True,
'privateAPI': True,
'cancelOrder': True,
'createOrder': True,
'deposit': False,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': False,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchOrderTrades': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'withdraw': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'1h': '60',
'4h': '240',
'1d': '1440',
'1w': '10080',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/82067900-faeb0f80-96d9-11ea-9f22-0071cfcb9871.jpg',
'api': 'https://api.eterbase.exchange',
'www': 'https://www.eterbase.com',
'doc': 'https://developers.eterbase.exchange',
'fees': 'https://www.eterbase.com/exchange/fees',
'referral': 'https://eterbase.exchange/invite/1wjjh4Pe',
},
'api': {
'markets': {
'get': [
'{id}/order-book',
],
},
'public': {
'get': [
'ping',
'assets',
'markets',
'tickers',
'tickers/{id}/ticker',
'markets/{id}/trades',
'markets/{id}/ohlcv',
'wstoken',
],
},
'private': {
'get': [
'accounts/{id}/balances',
'accounts/{id}/orders',
'accounts/{id}/fills',
'orders/{id}/fills',
'orders/{id}',
],
'post': [
'orders',
'accounts/{id}/withdrawals',
],
'delete': [
'orders/{id}',
],
},
'feed': {
'get': [
'feed',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.35 / 100,
'maker': 0.35 / 100,
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'precisionMode': SIGNIFICANT_DIGITS,
'options': {
'createMarketBuyOrderRequiresPrice': True,
},
'exceptions': {
'exact': {
'Invalid cost': InvalidOrder, # {"message":"Invalid cost","_links":{"self":{"href":"/orders","templated":false}}}
'Invalid order ID': InvalidOrder, # {"message":"Invalid order ID","_links":{"self":{"href":"/orders/4a151805-d594-4a96-9d64-e3984f2441f7","templated":false}}}
'Invalid market !': BadSymbol, # {"message":"Invalid market !","_links":{"self":{"href":"/markets/300/order-book","templated":false}}}
},
'broad': {
'Failed to convert argument': BadRequest,
},
},
})
def fetch_time(self, params={}):
response = self.publicGetPing(params)
#
# {"pong": 1556354416582}
#
return self.safe_integer(response, 'pong')
def fetch_markets(self, params={}):
response = self.publicGetMarkets(params)
#
# [
# {
# "id":33,
# "symbol":"ETHUSDT",
# "base":"ETH",
# "quote":"USDT",
# "priceSigDigs":5,
# "qtySigDigs":8,
# "costSigDigs":8,
# "verificationLevelUser":1,
# "verificationLevelCorporate":11,
# "group":"USD",
# "tradingRules":[
# {"attribute":"Qty","condition":"Min","value":0.006},
# {"attribute":"Qty","condition":"Max","value":1000},
# {"attribute":"Cost","condition":"Min","value":1},
# {"attribute":"Cost","condition":"Max","value":210000}
# ],
# "allowedOrderTypes":[1,2,3,4],
# "state":"Trading"
# }
# ]
#
result = []
for i in range(0, len(response)):
market = self.parse_market(response[i])
result.append(market)
return result
def parse_market(self, market):
#
# {
# "id":33,
# "symbol":"ETHUSDT",
# "base":"ETH",
# "quote":"USDT",
# "priceSigDigs":5,
# "qtySigDigs":8,
# "costSigDigs":8,
# "verificationLevelUser":1,
# "verificationLevelCorporate":11,
# "group":"USD",
# "tradingRules":[
# {"attribute":"Qty","condition":"Min","value":0.006},
# {"attribute":"Qty","condition":"Max","value":1000},
# {"attribute":"Cost","condition":"Min","value":1},
# {"attribute":"Cost","condition":"Max","value":210000}
# ],
# "allowedOrderTypes":[1,2,3,4],
# "state":"Trading"
# }
#
id = self.safe_string(market, 'id')
# numericId = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
state = self.safe_string(market, 'state')
active = (state == 'Trading')
precision = {
'price': self.safe_integer(market, 'priceSigDigs'),
'amount': self.safe_integer(market, 'qtySigDigs'),
'cost': self.safe_integer(market, 'costSigDigs'),
}
rules = self.safe_value(market, 'tradingRules', [])
minAmount = None
maxAmount = None
minCost = None
maxCost = None
for i in range(0, len(rules)):
rule = rules[i]
attribute = self.safe_string(rule, 'attribute')
condition = self.safe_string(rule, 'condition')
value = self.safe_number(rule, 'value')
if (attribute == 'Qty') and (condition == 'Min'):
minAmount = value
elif (attribute == 'Qty') and (condition == 'Max'):
maxAmount = value
elif (attribute == 'Cost') and (condition == 'Min'):
minCost = value
elif (attribute == 'Cost') and (condition == 'Max'):
maxCost = value
return {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': maxAmount,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': minCost,
'max': maxCost,
},
},
}
def fetch_currencies(self, params={}):
response = self.publicGetAssets(params)
#
# [
# {
# "id":"LINK",
# "name":"ChainLink Token",
# "precisionDisplay":8,
# "precisionMax":18,
# "precisionBasis":1000000000000000000,
# "precisionStep":1,
# "verificationLevelMin":"null",
# "cmcId":"LINK",
# "txnUrl":"https://etherscan.io/tx/{txnId}",
# "state":"Active",
# "type":"Crypto",
# "isReference":false,
# "withdrawalMin":"0",
# "withdrawalMax":"50587",
# "withdrawalFee":"0.55",
# "depositEnabled":true,
# "withdrawalEnabled":true,
# "description":"",
# "coingeckoUrl":"https://www.coingecko.com/en/coins/chainlink",
# "coinmarketcapUrl":"https://coinmarketcap.com/currencies/chainlink",
# "eterbaseUrl":"https://www.eterbase.com/system-status/LINK",
# "explorerUrl":"https://etherscan.io/token/0x514910771af9ca656af840dff83e8264ecf986ca",
# "withdrawalMemoAllowed":false,
# "countries":[],
# "networks":[]
# }
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'id')
precision = self.safe_integer(currency, 'precisionDisplay')
code = self.safe_currency_code(id)
depositEnabled = self.safe_value(currency, 'depositEnabled')
withdrawalEnabled = self.safe_value(currency, 'withdrawalEnabled')
state = self.safe_string(currency, 'state')
active = depositEnabled and withdrawalEnabled and (state == 'Active')
type = self.safe_string_lower(currency, 'type')
name = self.safe_string(currency, 'name')
result[code] = {
'id': id,
'info': currency,
'code': code,
'type': type,
'name': name,
'active': active,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMin'),
'max': self.safe_number(currency, 'withdrawalMax'),
},
},
}
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "time":1588778516608,
# "marketId":250,
# "symbol": "ETHUSDT",
# "price":0.0,
# "change":0.0,
# "volumeBase":0.0,
# "volume":0.0,
# "low":0.0,
# "high":0.0,
# }
#
marketId = self.safe_string(ticker, 'marketId')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer(ticker, 'time')
last = self.safe_number(ticker, 'price')
baseVolume = self.safe_number(ticker, 'volumeBase')
quoteVolume = self.safe_number(ticker, 'volume')
vwap = self.vwap(baseVolume, quoteVolume)
percentage = self.safe_number(ticker, 'change')
result = {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': vwap,
'open': None,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
}
response = self.publicGetTickersIdTicker(self.extend(request, params))
#
# {
# "time":1588778516608,
# "marketId":250,
# "price":0.0,
# "change":0.0,
# "volumeBase":0.0,
# "volume":0.0,
# "low":0.0,
# "high":0.0,
# }
#
return self.parse_ticker(response, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {
# 'quote': 'USDT', # identifier of a quote asset to filter the markets
}
response = self.publicGetTickers(self.extend(request, params))
#
# [
# {
# "time":1588831771698,
# "marketId":33,
# "price":204.54,
# "change":-1.03,
# "volumeBase":544.9801776699998,
# "volume":111550.433735,
# "low":200.33,
# "high":209.51
# },
# ]
#
return self.parse_tickers(response, symbols)
def parse_trade(self, trade, market):
#
# fetchTrades(public)
#
# {
# "id":251199246,
# "side":2,
# "price":0.022044,
# "executedAt":1588830682664,
# "qty":0.13545846,
# "makerId":"67ed6ef3-33d8-4389-ba70-5c68d9db9f6c",
# "takerId":"229ef0d6-fe67-4b5d-9733-824142fab8f3"
# }
#
# fetchMyTrades, fetchOrderTrades(private)
#
# {
# "id": 123,
# "marketId": 123,
# "side": 1,
# "qty": "1.23456",
# "price": "1.23456",
# "cost": "1.23456",
# "fee": "1.23456",
# "feeAsset": "XBASE",
# "liquidity": 1,
# "orderId": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "tradeId": 123,
# "filledAt": 1556355722341
# }
#
price = self.safe_number(trade, 'price')
amount = self.safe_number(trade, 'qty')
fee = None
feeCost = self.safe_number(trade, 'fee')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'feeAsset')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
cost = self.safe_number(trade, 'qty')
if (cost is None) and (price is not None) and (amount is not None):
cost = price * amount
timestamp = self.safe_integer_2(trade, 'executedAt', 'filledAt')
tradeSide = self.safe_string(trade, 'side')
side = 'buy' if (tradeSide == '1') else 'sell'
liquidity = self.safe_string(trade, 'liquidity')
takerOrMaker = None
if liquidity is not None:
takerOrMaker = 'maker' if (liquidity == '1') else 'taker'
orderId = self.safe_string(trade, 'orderId')
id = self.safe_string(trade, 'id')
marketId = self.safe_string(trade, 'marketId')
symbol = self.safe_symbol(marketId, market)
return {
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
# 'offset': 0 # the number of records to skip
}
if limit is not None:
request['limit'] = limit
response = self.publicGetMarketsIdTrades(self.extend(request, params))
#
# [
# {
# "id":251199246,
# "side":2,
# "price":0.022044,
# "executedAt":1588830682664,
# "qty":0.13545846,
# "makerId":"67ed6ef3-33d8-4389-ba70-5c68d9db9f6c",
# "takerId":"229ef0d6-fe67-4b5d-9733-824142fab8f3"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'id': self.market_id(symbol),
}
response = self.marketsGetIdOrderBook(self.extend(request, params))
#
# {
# "type":"ob_snapshot",
# "marketId":3,
# "timestamp":1588836429847,
# "bids":[
# [0.021694,8.8793688,1], # price, amount, count
# [0.01937,7.1340473,1],
# [0.020774,3.314881,1],
# ],
# "asks":[
# [0.02305,8.8793688,1],
# [0.028022,3.314881,1],
# [0.022598,3.314881,1],
# ]
# }
#
timestamp = self.safe_integer(response, 'timestamp')
return self.parse_order_book(response, timestamp)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "time":1588807500000,
# "open":0.022077,
# "high":0.022077,
# "low":0.022051,
# "close":0.022051,
# "volume":10.532025119999997
# }
#
return [
self.safe_integer(ohlcv, 'time'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
# 'id': market['id'],
'interval': self.timeframes[timeframe],
# 'start': 1588830682664, # milliseconds
# 'end': 1588830682664, # milliseconds
}
duration = self.parse_timeframe(timeframe)
now = self.milliseconds()
if since is not None:
request['start'] = since
if limit is None:
request['end'] = now
else:
request['end'] = self.sum(since, duration * limit * 1000)
elif limit is not None:
request['start'] = now - duration * limit * 1000
request['end'] = now
else:
raise ArgumentsRequired(self.id + ' fetchOHLCV() requires a since argument, or a limit argument, or both')
self.load_markets()
market = self.market(symbol)
request['id'] = market['id']
response = self.publicGetMarketsIdOhlcv(self.extend(request, params))
#
# [
# {"time":1588807500000,"open":0.022077,"high":0.022077,"low":0.022051,"close":0.022051,"volume":10.532025119999997},
# {"time":1588807800000,"open":0.022051,"high":0.022051,"low":0.022044,"close":0.022044,"volume":0.655987},
# {"time":1588808400000,"open":0.022044,"high":0.022044,"low":0.022044,"close":0.022044,"volume":3.9615545499999993},
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
request = {
'id': self.uid,
}
response = self.privateGetAccountsIdBalances(self.extend(request, params))
#
# [
# {
# "assetId":"USDT",
# "available":"25",
# "balance":"25",
# "reserved":"0",
# "balanceBtc":"0.0",
# "balanceRef":"0.0",
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'assetId')
code = self.safe_currency_code(currencyId)
account = {
'free': self.safe_number(balance, 'available'),
'used': self.safe_number(balance, 'reserved'),
'total': self.safe_number(balance, 'balance'),
}
result[code] = account
return self.parse_balance(result)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
response = self.privateGetOrdersId(self.extend(request, params))
#
# {
# "id": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "accountId": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "marketId": 123,
# "type": 1,
# "side": 1,
# "qty": "1.23456",
# "cost": "1.23456",
# "remainingQty": "1.23456",
# "remainingCost": "1.23456",
# "limitPrice": "1.23456",
# "stopPrice": "1.23456",
# "postOnly": False,
# "timeInForce": "GTC",
# "state": 1,
# "closeReason": "FILLED",
# "placedAt": 1556355722341,
# "closedAt": 1556355722341
# }
#
return self.parse_order(response)
def parse_order_status(self, status):
statuses = {
'1': None, # pending
'2': 'open', # open
'3': 'open', # partially filled
'4': 'closed', # closed
'FILLED': 'closed',
'USER_REQUESTED_CANCEL': 'canceled',
'ADMINISTRATIVE_CANCEL': 'canceled',
'NOT_ENOUGH_LIQUIDITY': 'canceled',
'EXPIRED': 'expired',
'ONE_CANCELS_OTHER': 'canceled',
}
return self.safe_string(statuses, status)
def parse_order(self, order, market=None):
#
# fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# {
# "id": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "accountId": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "marketId": 123,
# "type": 1,
# "side": 1,
# "qty": "1.23456",
# "cost": "1.23456",
# "remainingQty": "1.23456",
# "remainingCost": "1.23456",
# "limitPrice": "1.23456",
# "stopPrice": "1.23456",
# "postOnly": False,
# "timeInForce": "GTC",
# "state": 1,
# "closeReason": "FILLED",
# "placedAt": 1556355722341,
# "closedAt": 1556355722341
# }
#
# createOrder
#
# market buy
#
# {
# "id":"ff81127c-8fd5-4846-b683-110639dcd322",
# "accountId":"6d445378-d8a3-4932-91cd-545d0a4ad2a2",
# "marketId":33,
# "type":1,
# "side":1,
# "cost":"25",
# "postOnly":false,
# "timeInForce":"GTC",
# "state":1,
# "placedAt":1589510846735
# }
#
# market sell, limit buy, limit sell
#
# {
# "id":"042a38b0-e369-4ad2-ae73-a18ff6b1dcf1",
# "accountId":"6d445378-d8a3-4932-91cd-545d0a4ad2a2",
# "marketId":33,
# "type":2,
# "side":1,
# "qty":"1000",
# "limitPrice":"100",
# "postOnly":false,
# "timeInForce":"GTC",
# "state":1,
# "placedAt":1589403938682,
# }
#
id = self.safe_string(order, 'id')
timestamp = self.safe_integer(order, 'placedAt')
marketId = self.safe_integer(order, 'marketId')
symbol = self.safe_symbol(marketId, market)
status = self.parse_order_status(self.safe_string(order, 'state'))
if status == 'closed':
status = self.parse_order_status(self.safe_string(order, 'closeReason'))
orderSide = self.safe_string(order, 'side')
side = 'buy' if (orderSide == '1') else 'sell'
orderType = self.safe_string(order, 'type')
type = None
if orderType == '1':
type = 'market'
elif orderType == '2':
type = 'limit'
elif orderType == '3':
type = 'stopmarket'
else:
type = 'stoplimit'
price = self.safe_number(order, 'limitPrice')
amount = self.safe_number(order, 'qty')
remaining = self.safe_number(order, 'remainingQty')
filled = None
remainingCost = self.safe_number(order, 'remainingCost')
if (remainingCost is not None) and (remainingCost == 0.0):
remaining = 0
if (amount is not None) and (remaining is not None):
filled = max(0, amount - remaining)
cost = self.safe_number(order, 'cost')
if type == 'market':
if price == 0.0:
if (cost is not None) and (filled is not None):
if (cost > 0) and (filled > 0):
price = cost / filled
average = None
if cost is not None:
if filled:
average = cost / filled
timeInForce = self.safe_string(order, 'timeInForce')
stopPrice = self.safe_number(order, 'stopPrice')
postOnly = self.safe_value(order, 'postOnly')
return {
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
}
def fetch_orders_by_state(self, state, symbol=None, since=None, limit=None, params={}):
now = self.milliseconds()
ninetyDays = 90 * 24 * 60 * 60 * 1000 # 90 days timerange max
request = {
'id': self.uid,
'state': state,
# 'side': Integer, # 1 = buy, 2 = sell
# 'offset': 0, # the number of records to skip
}
if since is None:
request['from'] = now - ninetyDays
request['to'] = now
else:
request['from'] = since
request['to'] = self.sum(since, ninetyDays)
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request['marketId'] = market['id']
if limit is not None:
request['limit'] = limit # default 50
response = self.privateGetAccountsIdOrders(self.extend(request, params))
#
# [
# {
# "id": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "accountId": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "marketId": 123,
# "type": 1,
# "side": 1,
# "qty": "1.23456",
# "cost": "1.23456",
# "remainingQty": "1.23456",
# "remainingCost": "1.23456",
# "limitPrice": "1.23456",
# "stopPrice": "1.23456",
# "postOnly": False,
# "timeInForce": "GTC",
# "state": 1,
# "closeReason": "FILLED",
# "placedAt": 1556355722341,
# "closedAt": 1556355722341
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_state('INACTIVE', symbol, since, limit, params)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_state('ACTIVE', symbol, since, limit, params)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
now = self.milliseconds()
ninetyDays = 90 * 24 * 60 * 60 * 1000 # 90 days timerange max
request = {
'id': self.uid,
# 'side': Integer, # 1 = buy, 2 = sell
# 'offset': 0, # the number of records to skip
}
if since is None:
request['from'] = now - ninetyDays
request['to'] = now
else:
request['from'] = since
request['to'] = self.sum(since, ninetyDays)
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request['marketId'] = market['id']
if limit is not None:
request['limit'] = limit # default 50, max 200
response = self.privateGetAccountsIdFills(self.extend(request, params))
#
# [
# {
# "id": 123,
# "marketId": 123,
# "side": 1,
# "qty": "1.23456",
# "price": "1.23456",
# "cost": "1.23456",
# "fee": "1.23456",
# "feeAsset": "XBASE",
# "liquidity": 1,
# "orderId": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "tradeId": 123,
# "filledAt": 1556355722341
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'id': id,
}
trades = self.privateGetOrdersIdFills(self.extend(request, params))
return self.parse_trades(trades)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
uppercaseType = type.upper()
if uppercaseType == 'MARKET':
type = 1
elif uppercaseType == 'LIMIT':
type = 2
elif uppercaseType == 'STOPMARKET':
type = 3
elif uppercaseType == 'STOPLIMIT':
type = 4
uppercaseSide = side.upper()
side = uppercaseSide == 1 if 'BUY' else 2
request = {
'accountId': self.uid,
'marketId': market['id'],
'type': type,
'side': side,
# 'postOnly': False,
# 'timeInForce': 'GTC',
}
clientOrderId = self.safe_value_2(params, 'refId', 'clientOrderId')
query = params
if clientOrderId is not None:
request['refId'] = clientOrderId
query = self.omit(params, ['refId', 'clientOrderId'])
if (uppercaseType == 'MARKET') and (uppercaseSide == 'BUY'):
# for market buy it requires the amount of quote currency to spend
cost = self.safe_number(params, 'cost')
if self.options['createMarketBuyOrderRequiresPrice']:
if cost is None:
if price is not None:
cost = amount * price
else:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False to supply the cost in the amount argument(the exchange-specific behaviour)")
else:
cost = amount if (cost is None) else cost
precision = market['precision']['price']
request['cost'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['qty'] = self.amount_to_precision(symbol, amount)
if uppercaseType == 'LIMIT':
request['limitPrice'] = self.price_to_precision(symbol, price)
response = self.privatePostOrders(self.extend(request, query))
#
# market buy
#
# {
# "id":"ff81127c-8fd5-4846-b683-110639dcd322",
# "accountId":"6d445378-d8a3-4932-91cd-545d0a4ad2a2",
# "marketId":33,
# "type":1,
# "side":1,
# "cost":"25",
# "postOnly":false,
# "timeInForce":"GTC",
# "state":1,
# "placedAt":1589510846735
# }
#
# market sell, limit buy, limit sell
#
# {
# "id":"042a38b0-e369-4ad2-ae73-a18ff6b1dcf1",
# "accountId":"6d445378-d8a3-4932-91cd-545d0a4ad2a2",
# "marketId":33,
# "type":2,
# "side":1,
# "qty":"1000",
# "limitPrice":"100",
# "postOnly":false,
# "timeInForce":"GTC",
# "state":1,
# "placedAt":1589403938682,
# }
#
return self.parse_order(response, market)
def cancel_order(self, id, symbol=None, params={}):
request = {
'id': id,
}
return self.privateDeleteOrdersId(self.extend(request, params))
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'id': self.uid,
'accountId': self.uid,
'assetId': currency['id'],
'amount': amount,
# 'cryptoAddress': address,
# 'accountNumber': 'IBAN', # IBAN account number
# 'networkId': 'XBASE', # underlying network
}
if address is not None:
request['cryptoAddress'] = address
if tag is not None:
request['memo'] = tag
response = self.privatePostAccountsIdWithdrawals(self.extend(request, params))
#
# {
# "id": "98b62dde-a87f-45f0-8db8-80ae2d312fa6"
# }
#
return {
'info': response,
'id': self.safe_string(response, 'id'),
}
def sign(self, path, api='public', method='GET', params={}, httpHeaders=None, body=None):
query = self.omit(params, self.extract_params(path))
request = '/'
if api == 'public':
request += 'api/' + self.version
elif api == 'private':
request += 'api/' + self.version
elif api == 'markets':
request += 'api/' + api
request += '/' + self.implode_params(path, params)
if method == 'GET':
if query:
request += '?' + self.urlencode(query)
url = self.urls['api'] + request
if api == 'private':
self.check_required_credentials()
payload = ''
if method != 'GET':
if query:
body = self.json(query)
payload = body
# construct signature
hasBody = (method == 'POST') or (method == 'PUT') or (method == 'PATCH')
# date = 'Mon, 30 Sep 2019 13:57:23 GMT'
date = self.rfc2616(self.milliseconds())
headersCSV = 'date' + ' ' + 'request-line'
message = 'date' + ':' + ' ' + date + "\n" + method + ' ' + request + ' HTTP/1.1' # eslint-disable-line quotes
digest = ''
if hasBody:
digest = 'SHA-256=' + self.hash(payload, 'sha256', 'base64')
message += "\ndigest" + ':' + ' ' + digest # eslint-disable-line quotes
headersCSV += ' ' + 'digest'
signature = self.hmac(self.encode(message), self.encode(self.secret), hashlib.sha256, 'base64')
authorizationHeader = 'hmac username="' + self.apiKey + '",algorithm="hmac-sha256",headers="' + headersCSV + '",' + 'signature="' + signature + '"'
httpHeaders = {
'Date': date,
'Authorization': authorizationHeader,
'Content-Type': 'application/json',
}
if hasBody:
httpHeaders['Digest'] = digest
return {'url': url, 'method': method, 'body': body, 'headers': httpHeaders}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"message":"Invalid cost","_links":{"self":{"href":"/orders","templated":false}}}
#
message = self.safe_string(response, 'message')
if message is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
| [
"[email protected]"
] | |
5c1f9344d0bff5044ed0e8a71e5cabace9acb666 | 4166821e5d4cff87a3f178a0e3047ddd5d62bccf | /brigitte/accounts/migrations/0004_auto__add_field_sshpublickey_key_parsed.py | 6b9d6bc6e6a005d42af354722feb1eae262ef973 | [
"BSD-3-Clause"
] | permissive | stephrdev/brigitte | 0df208c797c4e26832fd30f0fdd6ec2db5212b4f | 473d3c30af728292693f4e94b3c9b34d2d784b41 | refs/heads/master | 2021-06-04T05:16:59.683660 | 2013-07-08T08:17:05 | 2013-07-08T08:17:05 | 1,267,927 | 12 | 4 | BSD-3-Clause | 2021-03-29T16:44:29 | 2011-01-18T18:00:42 | Python | UTF-8 | Python | false | false | 4,847 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SshPublicKey.key_parsed'
db.add_column('accounts_sshpublickey', 'key_parsed',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SshPublicKey.key_parsed'
db.delete_column('accounts_sshpublickey', 'key_parsed')
models = {
'accounts.profile': {
'Meta': {'object_name': 'Profile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'short_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'accounts.sshpublickey': {
'Meta': {'object_name': 'SshPublicKey'},
'can_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'key_parsed': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts'] | [
"[email protected]"
] | |
226f93b97333e8f4e387f28b0bc99298333d003a | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /accessanalyzer_write_2/resource_tag.py | ef443db399fe96ce9f2ed16a0e7b0822b37cb0c7 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/accessanalyzer/tag-resource.html
if __name__ == '__main__':
"""
untag-resource : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/accessanalyzer/untag-resource.html
"""
parameter_display_string = """
# resource-arn : The ARN of the resource to add the tag to.
# tags : The tags to add to the resource.
key -> (string)
value -> (string)
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_two_parameter("accessanalyzer", "tag-resource", "resource-arn", "tags", add_option_dict)
| [
"[email protected]"
] | |
459255c5aacaa2dc24b3cef00468477a0911bbdb | 80a7cd7958eae28af24c4cf2be167e425c7edaca | /utils/authors.py | 8030b5d1d40950b5b36ed33eec87a4b001f56ed7 | [
"MIT"
] | permissive | prophile/srweb-jekyll | 18918ea82bdf6a8850a20ef9be5e78a876d078b4 | faa1727e343d01fe4fa7b75c39a106ca895c8f2b | refs/heads/master | 2021-01-01T05:38:40.764190 | 2015-09-13T16:57:07 | 2015-09-13T16:57:07 | 27,548,340 | 0 | 2 | MIT | 2022-06-11T13:03:12 | 2014-12-04T16:09:02 | CSS | UTF-8 | Python | false | false | 946 | py | import subprocess
import re
import json
from collections import namedtuple
def is_valid(author):
if '[email protected]' in author.email:
return False
if 'BRIAN' in author.name:
return False
return True
DATA_REGEX = re.compile('^\s*(\d+)\t(.*) <(.*)>$')
Author = namedtuple('Author', 'commits name email')
authors = subprocess.check_output(('git', 'shortlog', '-sne'),
universal_newlines=True).splitlines()
authors = [DATA_REGEX.match(line).groups() for line in authors]
authors = [Author(int(commits), name, email)
for commits, name, email in authors]
authors = [author for author in authors if is_valid(author)]
with open('AUTHORS', 'w') as f:
for author in authors:
f.write("{} <{}>\n".format(author.name, author.email))
with open('_data/authors.yml', 'w') as f:
json.dump([dict(author._asdict()) for author in authors],
f)
| [
"[email protected]"
] | |
5ab5b4929ae8e1de1476ed79ba97ad12e4a9f58a | 4752d3379a536b491367665b10dbb9724b8804b2 | /ras_party/controllers/pending_survey_controller.py | a5af42ea9c59cf9fbcc88f7b3ce9da9730f69168 | [] | no_license | ONSdigital/ras-party | 6d050b068a0a424cbc3d701f1b62910c4393af3b | 7f69b8a664135918e9c55416379da05c8a0d81a8 | refs/heads/main | 2023-08-18T01:38:18.564259 | 2023-07-14T12:11:20 | 2023-07-14T12:11:20 | 92,487,741 | 2 | 2 | null | 2023-09-08T12:59:48 | 2017-05-26T08:10:47 | Python | UTF-8 | Python | false | false | 21,587 | py | import logging
import uuid
from datetime import datetime, timedelta
from urllib.error import HTTPError
import structlog
from flask import current_app
from itsdangerous import BadData, BadSignature, SignatureExpired
from sqlalchemy import and_
from sqlalchemy.exc import SQLAlchemyError
from werkzeug.exceptions import BadRequest, Conflict, InternalServerError, NotFound
from ras_party.clients.oauth_client import OauthClient
from ras_party.controllers.account_controller import (
get_single_respondent_by_email,
set_user_verified,
)
from ras_party.controllers.business_controller import get_business_by_id
from ras_party.controllers.notify_gateway import NotifyGateway
from ras_party.controllers.queries import (
delete_pending_survey_by_batch_no,
query_business_by_party_uuid,
query_business_respondent_by_respondent_id_and_business_id,
query_enrolment_by_business_and_survey_and_status,
query_pending_survey_by_batch_no,
query_pending_survey_by_shared_by,
query_pending_surveys_by_business_and_survey,
query_respondent_by_party_uuid,
)
from ras_party.controllers.respondent_controller import (
get_respondent_by_email,
get_respondent_by_id,
)
from ras_party.controllers.validate import Exists, Validator
from ras_party.models.models import (
BusinessRespondent,
Enrolment,
EnrolmentStatus,
PendingSurveys,
Respondent,
RespondentStatus,
)
from ras_party.support.session_decorator import (
with_db_session,
with_query_only_db_session,
with_quiet_db_session,
)
from ras_party.support.verification import decode_email_token
logger = structlog.wrap_logger(logging.getLogger(__name__))
@with_query_only_db_session
def get_users_enrolled_and_pending_survey_against_business_and_survey(
business_id: str, survey_id: str, is_transfer: bool, session
) -> int:
"""
Get total users count who are already enrolled and pending share survey against business id and survey id
Returns total user count
:param business_id: business party id
:param survey_id: survey id
:param is_transfer: if the request is to transfer share
:param session: db session
:rtype: int
"""
bound_logger = logger.bind(business_id=business_id, survey_id=survey_id)
bound_logger.info("Attempting to get enrolled users")
enrolled_users = query_enrolment_by_business_and_survey_and_status(business_id, survey_id, session)
bound_logger.info("Attempting to get pending survey users")
pending_survey_users = query_pending_surveys_by_business_and_survey(business_id, survey_id, session, is_transfer)
total_users = enrolled_users.count() + pending_survey_users.count()
bound_logger.info(f"total users count {total_users}")
return total_users
@with_db_session
def pending_survey_deletion(batch_no: str, session):
"""
Delete pending survey record against batch no
:param batch_no: batch_no
:param session:
"""
logger.info("Retrieving pending share record against batch number", batch_no=batch_no)
pending_surveys = session.query(PendingSurveys).filter(PendingSurveys.batch_no == batch_no)
if pending_surveys.count() > 0:
try:
pending_surveys.delete()
return "pending surveys successfully deleted", 202
except (SQLAlchemyError, Exception) as error:
logger.error("error while deleting pending surveys", error)
return "something went wrong", 500
else:
return "pending surveys does not exists.", 404
@with_db_session
def pending_survey_create(
business_id: str, survey_id: str, email_address: str, shared_by, batch_number, is_transfer, session
):
"""
creates a new record for pending survey
Returns void
:param business_id: business party id
:param survey_id: survey id
:param email_address: email_address
:param shared_by: respondent_party_uuid
:type shared_by: uuid
:param session: db session
:param batch_number: batch_number
:type batch_number: uuid
:param is_transfer: True if the record is to transfer survey
:type is_transfer: bool
:rtype: void
"""
pending_share = PendingSurveys(
business_id=business_id,
survey_id=survey_id,
email_address=email_address,
shared_by=shared_by,
batch_no=batch_number,
is_transfer=is_transfer,
)
session.add(pending_share)
@with_db_session
def delete_pending_surveys(session):
"""
Deletes all the existing pending surveys which have expired
:param session A db session
"""
_expired_hrs = datetime.utcnow() - timedelta(seconds=float(current_app.config["EMAIL_TOKEN_EXPIRY"]))
pending_shares = session.query(PendingSurveys).filter(PendingSurveys.time_shared < _expired_hrs)
pending_shares.delete()
logger.info("Deletion complete")
@with_db_session
def get_unique_pending_surveys(is_transfer, session):
"""
Gets unique pending shares which has passed expiration duration based on batch_id
:param is_transfer: if the request is to transfer surveys
:type is_transfer: bool
:param session A db session
"""
_expired_hrs = datetime.utcnow() - timedelta(seconds=float(current_app.config["EMAIL_TOKEN_EXPIRY"]))
pending_shares_ready_for_deletion = (
session.query(PendingSurveys)
.filter(PendingSurveys.time_shared < _expired_hrs)
.filter(PendingSurveys.is_transfer == is_transfer)
.distinct(PendingSurveys.batch_no)
)
unique_batch_record = pending_shares_ready_for_deletion.distinct(PendingSurveys.batch_no)
return [unique_batch_record.to_pending_surveys_dict() for unique_batch_record in unique_batch_record]
def validate_pending_survey_token(token):
"""
Validates the share survey token and returns the pending surveys against the batch number
:param: token
:param: session
:return: list of pending surveys
"""
logger.info("Attempting to verify share/transfer survey", token=token)
try:
duration = current_app.config["EMAIL_TOKEN_EXPIRY"]
batch_no = uuid.UUID(decode_email_token(token, duration))
except SignatureExpired:
logger.info("Expired share/transfer survey token")
raise Conflict("Expired share/transfer survey token")
except (BadSignature, BadData):
logger.exception("Bad token in validate_pending_survey_token")
raise NotFound("Unknown batch number in token")
return get_pending_survey_by_batch_number(batch_no)
@with_db_session
def confirm_pending_survey(batch_no, session):
"""
confirms share survey by creating a new db session
:param batch_no: share_survey batch number
:type batch_no: uuid
:param session: db session
:type session: session
"""
accept_pending_survey(session, batch_no)
def accept_pending_survey(session, batch_no, new_respondent=None):
"""
Confirms share surveys and transfer surveys
Creates Enrolment records
Business Respondent records
Removes pending shares
Removes Existing enrolment records and association for transfers
:param: batch_no
:param: session
"""
logger.info("Attempting to confirm pending share survey", batch_no=batch_no)
pending_surveys = query_pending_survey_by_batch_no(batch_no, session)
if len(pending_surveys) == 0:
raise NotFound("Batch number does not exist")
pending_surveys_list = [pending_survey.to_pending_surveys_dict() for pending_survey in pending_surveys]
pending_surveys_is_transfer = pending_surveys_list[0].get("is_transfer", False)
if not new_respondent:
respondent = get_respondent_by_email(pending_surveys_list[0]["email_address"])
new_respondent = query_respondent_by_party_uuid(respondent["id"], session)
for pending_survey in pending_surveys_list:
business_id = pending_survey["business_id"]
survey_id = pending_survey["survey_id"]
business_respondent = query_business_respondent_by_respondent_id_and_business_id(
business_id, new_respondent.id, session
)
if not business_respondent:
# Associate respondent with new business
business = query_business_by_party_uuid(business_id, session)
if not business:
logger.error("Could not find business", business_id=business_id)
raise InternalServerError("Could not locate business when creating business association")
business_respondent = BusinessRespondent(business=business, respondent=new_respondent)
session.add(business_respondent)
if not is_already_enrolled(survey_id, new_respondent.id, business_id, session):
try:
with session.begin_nested():
enrolment = Enrolment(
business_respondent=business_respondent,
survey_id=pending_survey["survey_id"],
status=EnrolmentStatus.ENABLED,
)
session.add(enrolment)
except SQLAlchemyError as e:
logger.exception("Unable to confirm pending survey", batch_no=batch_no)
else:
logger.info(
"Ignoring respondent as already enrolled",
business_id=business_id,
survey_id=survey_id,
email=pending_surveys_list[0]["email_address"],
)
delete_pending_survey_by_batch_no(batch_no, session)
session.commit()
if pending_surveys_is_transfer:
try:
logger.info(
"About to remove the originator association to the business",
business_id=business_id,
party_uuid=pending_surveys_list[0]["shared_by"],
)
remove_transfer_originator_business_association(pending_surveys_list)
except SQLAlchemyError as e:
logger.exception(
"Unable to remove previous enrolment for originator",
batch_no=batch_no,
party_uuid=pending_surveys_list[0]["shared_by"],
)
raise e
if pending_surveys_is_transfer:
send_pending_surveys_confirmation_email(pending_surveys_list, "transfer_survey_access_confirmation")
else:
send_pending_surveys_confirmation_email(pending_surveys_list, "share_survey_access_confirmation")
@with_db_session
def remove_transfer_originator_business_association(pending_surveys_list, session):
"""
De-register transfer originator from existing business association.
:param pending_surveys_list: Pending surveys list
:type pending_surveys_list: List
:param session
:return: On success it returns None, on failure will raise one of many different exceptions
"""
party_id = pending_surveys_list[0]["shared_by"]
logger.info("Starting to de register transfer originator from business", party_id=party_id)
transferred_by_respondent = get_respondent_by_id(str(party_id))
respondent = get_single_respondent_by_email(transferred_by_respondent["emailAddress"], session)
for pending_survey in pending_surveys_list:
business_id = pending_survey["business_id"]
survey_id = pending_survey["survey_id"]
logger.info(
"Starting to de register transfer originator from business",
party_id=party_id,
business_id=business_id,
survey_id=survey_id,
)
existing_enrolment = (
session.query(Enrolment)
.filter(Enrolment.respondent_id == respondent.id)
.filter(Enrolment.business_id == business_id)
.filter(Enrolment.survey_id == survey_id)
)
if existing_enrolment:
existing_enrolment.delete()
# check if there is existing enrolment on a different survey with the same business
additional_enrolment_on_business = (
session.query(Enrolment)
.filter(Enrolment.respondent_id == respondent.id)
.filter(Enrolment.business_id == business_id)
.filter(Enrolment.survey_id != survey_id)
.all()
)
if not additional_enrolment_on_business:
session.query(BusinessRespondent).filter(BusinessRespondent.respondent_id == respondent.id).filter(
BusinessRespondent.business_id == business_id
).delete()
logger.info(
"Un enrolled transfer originator for the surveys transferred",
party_id=party_id,
business_id=business_id,
survey_id=survey_id,
)
def is_already_enrolled(survey_id, respondent_pk, business_id, session):
"""
returns if enrollment already exists
:param survey_id
:param respondent_pk
:param business_id
:param session
:return bool
"""
enrolment = (
session.query(Enrolment)
.filter(
and_(
Enrolment.survey_id == survey_id,
Enrolment.business_id == business_id,
Enrolment.respondent_id == respondent_pk,
)
)
.first()
)
return False if not enrolment else True
@with_db_session
def get_pending_survey_by_batch_number(batch_number, session):
"""
gets list of share surveys against the batch number
:param batch_number: share survey batch number
:type batch_number: uuid
:param session: db session
:type session: db session
:return: list of pending share surveys
:rtype: list
"""
pending_surveys = query_pending_survey_by_batch_no(batch_number, session)
if len(pending_surveys) == 0:
raise NotFound("Batch number does not exist")
return [pending_surveys.to_pending_surveys_dict() for pending_surveys in pending_surveys]
@with_db_session
def get_pending_survey_by_originator_respondent_id(respondent_party_id: str, session):
"""
gets list of pending surveys against the respondent party id
:param respondent_party_id: respondent party id
:type respondent_party_id: str
:param session: db session
:type session: db session
:return: list of pending share surveys
:rtype: list
"""
pending_surveys = query_pending_survey_by_shared_by(respondent_party_id, session)
return [pending_surveys.to_pending_surveys_dict() for pending_surveys in pending_surveys]
# flake8: noqa: C901
@with_quiet_db_session
def post_pending_survey_respondent(party, session):
"""
Register respondent for share survey/transfer survey.
This will not create a pending enrolment and will make the respondent active
:param party: respondent to be created details
:param session
:return: created respondent
"""
# Validation, curation and checks
expected = ("emailAddress", "firstName", "lastName", "password", "telephone", "batch_no")
v = Validator(Exists(*expected))
if "id" in party:
# Note: there's not strictly a requirement to be able to pass in a UUID, this is currently supported to
# aid with testing.
logger.info("'id' in respondent post message")
try:
uuid.UUID(party["id"])
except ValueError:
logger.info("Invalid respondent id type", respondent_id=party["id"])
raise BadRequest(f"'{party['id']}' is not a valid UUID format for property 'id'")
if not v.validate(party):
logger.debug(v.errors)
raise BadRequest(v.errors)
# Chain of enrolment processes
translated_party = {
"party_uuid": party.get("id") or str(uuid.uuid4()),
"email_address": party["emailAddress"].lower(),
"first_name": party["firstName"],
"last_name": party["lastName"],
"telephone": party["telephone"],
"status": RespondentStatus.ACTIVE,
}
# This might look odd but it's done in the interest of keeping the code working in the same way.
# If raise_for_status in the function raises an error, it would've been caught by @with_db_session,
# rolled back the db and raised it. Whether that's something we want is another question.
try:
# create new share/transfer survey respondent
respondent = _add_pending_survey_respondent(session, translated_party, party)
respondent_dict = respondent.to_respondent_dict()
# Verify created user so that if the accept share fails the account is not in inconsistent state
set_user_verified(respondent.email_address)
# Accept share/transfer surveys surveys
accept_pending_survey(session, uuid.UUID(party["batch_no"]), respondent)
except HTTPError:
logger.error("adding new share survey/transfer survey respondent raised an HTTPError", exc_info=True)
session.rollback()
raise
except SQLAlchemyError:
logger.exception("adding new share survey/transfer survey respondent raise an SQL Error")
session.rollback()
raise
return respondent_dict
def _add_pending_survey_respondent(session, translated_party, party):
"""
Create and persist new party entities and attempt to register with auth service.
Auth fails lead to party entities being rolled back.
The context manager commits to sub session.
If final commit fails an account will be in auth not party, this circumstance is unlikely but possible
:param session: db session
:type session: db session
:param translated_party: respondent party dict
:type translated_party: dict
:param party: respondent party
:type party: dict
"""
try:
with session.begin_nested():
# Use a sub transaction to store party data
# Context manager will manage commits/rollback
# Create the enrolment respondent-business-survey associations
respondent = Respondent(**translated_party)
session.add(respondent)
except SQLAlchemyError as e:
logger.exception("Party service db post respondent caused exception", party_uuid=translated_party["party_uuid"])
raise # re raise the exception aimed at the generic handler
else:
# Register user to auth server after successful commit
oauth_response = OauthClient().create_account(party["emailAddress"].lower(), party["password"])
if not oauth_response.status_code == 201:
logger.info(
"Registering respondent auth service responded with",
status=oauth_response.status_code,
content=oauth_response.content,
)
session.rollback() # Rollback to SAVEPOINT
oauth_response.raise_for_status()
logger.info("New user has been registered via the auth-service", party_uuid=translated_party["party_uuid"])
return respondent
def _register_respondent_to_auth(party, session, translated_party):
# Register user to auth server after successful commit
oauth_response = OauthClient().create_account(party["emailAddress"].lower(), party["password"])
if not oauth_response.status_code == 201:
logger.info(
"Registering respondent auth service responded with",
status=oauth_response.status_code,
content=oauth_response.content,
)
session.rollback() # Rollback to SAVEPOINT
oauth_response.raise_for_status()
logger.info("New user has been registered via the auth-service", party_uuid=translated_party["party_uuid"])
def send_pending_surveys_confirmation_email(pending_surveys_list, confirmation_email_template):
"""
Sends confirmation email
:param pending_surveys_list:
:type pending_surveys_list:
:param confirmation_email_template:
:type confirmation_email_template:
:return:
:rtype:
"""
batch_no = str(pending_surveys_list[0]["batch_no"])
logger.info("sending confirmation email for pending share", batch_no=batch_no)
pending_surveys_is_transfer = pending_surveys_list[0].get("is_transfer", False)
try:
respondent = get_respondent_by_id(str(pending_surveys_list[0]["shared_by"]))
if pending_surveys_is_transfer:
confirmation_email_template = "transfer_survey_access_confirmation"
else:
confirmation_email_template = "share_survey_access_confirmation"
business_list = []
business_id_list = {pending_surveys["business_id"] for pending_surveys in pending_surveys_list}
for business_id in business_id_list:
business = get_business_by_id(str(business_id))
business_list.append(business["name"])
personalisation = {
"NAME": respondent["firstName"],
"COLLEAGUE_EMAIL_ADDRESS": pending_surveys_list[0]["email_address"],
"BUSINESSES": business_list,
}
NotifyGateway(current_app.config).request_to_notify(
email=respondent["emailAddress"], template_name=confirmation_email_template, personalisation=personalisation
)
logger.info("confirmation email for pending share send successfully", batch_no=batch_no)
# Exception is used to abide by the notify controller. At this point of time the pending share has been accepted
# hence if the email phase fails it should not disrupt the flow.
except Exception as e: # noqa
logger.error(
"Error sending confirmation email for pending share",
batch_no=batch_no,
email=pending_surveys_list[0]["shared_by"],
)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.