blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e612a774a20e51feed223e0a74a18ebcf53f4a2 | 76fa4bc242502bcd9dfe1053c964318b94acc6d8 | /matplotlib bar chart/df_barplot.py | fc8ef89725b545217214b8af713ce4b4e05eb56a | [] | no_license | phani-1995/Week3-python_libraries | 720156098ccab5301a58e39a4dd7af5a19a08008 | 1347b8dfd4980b37471a54ce991c967fdcb32e2b | refs/heads/master | 2021-04-01T17:42:54.855954 | 2020-03-23T06:50:18 | 2020-03-23T06:50:18 | 248,204,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data=np.array([[2,4,6,8,10],[4,2,4,2,2],[8,3,7,6,4],[5,4,4,4,3],[6,6,8,6,2]])
dataFrame=pd.DataFrame(data,columns=['a','b','c','d','e'], index=["Delhi",'Mumbai','Hyderabad','Pune','Bengalur'])
dataFrame.plot(kind='bar')
plt.show() | [
"[email protected]"
] | |
d700bfe0470ed942dca42727b21481b2d69a4bcd | 5e324af46c554b88b97ee26886b05c88457ff0f5 | /franchises/models/franchise.py | 8d73000fdaadc7d85bb373e0c6cadd7335661a11 | [] | no_license | doubleclickdetroit/dindintonight | 1bda8851e49782d4dc16ca77d46e4b1f431c2b52 | 9769e1a96730b02511d25af8828b075dff5c35b5 | refs/heads/master | 2016-08-04T22:01:08.083566 | 2014-07-26T18:58:58 | 2014-07-26T18:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | from autoslug import AutoSlugField
from django.db import models
from django.db.models.signals import post_save
from core.models import BaseModel
class Franchise(BaseModel):
id = models.AutoField(primary_key=True)
owner = models.OneToOneField('users.User', related_name='franchise_owners')
slug = AutoSlugField(populate_from='name', unique=True, db_index=True)
name = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
app_label = 'franchises'
db_table = 'franchises'
verbose_name = 'Franchise'
verbose_name_plural = 'Franchises'
def __unicode__(self):
return '{0} {1}'.format(self.owner.first_name, self.owner.last_name)
def franchise_post_save_handler(sender, instance, **kwargs):
pass
post_save.connect(franchise_post_save_handler, sender=Franchise)
| [
"[email protected]"
] | |
fc0b1a61451fe1c4b893d8ea586e3c6d8e04d357 | 7b2a3ea853dc44aea204f02abedaad6a2029f4ff | /inv_test.py | 46e208002c5331c95094449e682798e59a78e53a | [] | no_license | NoisyLeon/SW4Py | 7d45503282dc988b5f886c039706bd79fdd6b339 | 7029f18eb526bcb46b4aa244da1e088ca57a56aa | refs/heads/master | 2020-12-22T14:57:11.265397 | 2016-12-20T18:27:18 | 2016-12-20T18:27:18 | 56,792,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | # import obspy
#
# net=obspy.core.inventory.network.Network('SW4', ftanparams=[])
# inv=obspy.core.inventory.inventory.Inventory(networks=[net],source='CU')
# sta=obspy.core.inventory.ftanparam.Station('aa',13,132.4214,0.0)
| [
"[email protected]"
] | |
af2e9492dc28f8de8f275110fb743e9b78dbf797 | 3ef70fe63acaa665e2b163f30f1abd0a592231c1 | /stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_internal/vcs/__init__.py | 9cba76464ca1a26eb69d5c9dbf37f46eb9dc78f4 | [
"MIT"
] | permissive | wistbean/learn_python3_spider | 14914b63691ac032955ba1adc29ad64976d80e15 | 40861791ec4ed3bbd14b07875af25cc740f76920 | refs/heads/master | 2023-08-16T05:42:27.208302 | 2023-03-30T17:03:58 | 2023-03-30T17:03:58 | 179,152,420 | 14,403 | 3,556 | MIT | 2022-05-20T14:08:34 | 2019-04-02T20:19:54 | Python | UTF-8 | Python | false | false | 17,278 | py | """Handles all VCS (version control) support"""
from __future__ import absolute_import
import errno
import logging
import os
import shutil
import sys
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._internal.exceptions import BadCommand
from pip._internal.utils.misc import (
display_path, backup_dir, call_subprocess, rmtree, ask_path_exists,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import ( # noqa: F401
Any, Dict, Iterable, List, Mapping, Optional, Text, Tuple, Type
)
from pip._internal.utils.ui import SpinnerInterface # noqa: F401
AuthInfo = Tuple[Optional[str], Optional[str]]
__all__ = ['vcs']
logger = logging.getLogger(__name__)
class RemoteNotFoundError(Exception):
pass
class RevOptions(object):
"""
Encapsulates a VCS-specific revision to install, along with any VCS
install options.
Instances of this class should be treated as if immutable.
"""
def __init__(self, vcs, rev=None, extra_args=None):
# type: (VersionControl, Optional[str], Optional[List[str]]) -> None
"""
Args:
vcs: a VersionControl object.
rev: the name of the revision to install.
extra_args: a list of extra options.
"""
if extra_args is None:
extra_args = []
self.extra_args = extra_args
self.rev = rev
self.vcs = vcs
def __repr__(self):
return '<RevOptions {}: rev={!r}>'.format(self.vcs.name, self.rev)
@property
def arg_rev(self):
# type: () -> Optional[str]
if self.rev is None:
return self.vcs.default_arg_rev
return self.rev
def to_args(self):
# type: () -> List[str]
"""
Return the VCS-specific command arguments.
"""
args = [] # type: List[str]
rev = self.arg_rev
if rev is not None:
args += self.vcs.get_base_rev_args(rev)
args += self.extra_args
return args
def to_display(self):
# type: () -> str
if not self.rev:
return ''
return ' (to revision {})'.format(self.rev)
def make_new(self, rev):
# type: (str) -> RevOptions
"""
Make a copy of the current instance, but with a new rev.
Args:
rev: the name of the revision for the new object.
"""
return self.vcs.make_rev_options(rev, extra_args=self.extra_args)
class VcsSupport(object):
_registry = {} # type: Dict[str, Type[VersionControl]]
schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
def __init__(self):
# type: () -> None
# Register more schemes with urlparse for various version control
# systems
urllib_parse.uses_netloc.extend(self.schemes)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
return self._registry.__iter__()
@property
def backends(self):
# type: () -> List[Type[VersionControl]]
return list(self._registry.values())
@property
def dirnames(self):
# type: () -> List[str]
return [backend.dirname for backend in self.backends]
@property
def all_schemes(self):
# type: () -> List[str]
schemes = [] # type: List[str]
for backend in self.backends:
schemes.extend(backend.schemes)
return schemes
def register(self, cls):
# type: (Type[VersionControl]) -> None
if not hasattr(cls, 'name'):
logger.warning('Cannot register VCS %s', cls.__name__)
return
if cls.name not in self._registry:
self._registry[cls.name] = cls
logger.debug('Registered VCS backend: %s', cls.name)
def unregister(self, cls=None, name=None):
# type: (Optional[Type[VersionControl]], Optional[str]) -> None
if name in self._registry:
del self._registry[name]
elif cls in self._registry.values():
del self._registry[cls.name]
else:
logger.warning('Cannot unregister because no class or name given')
def get_backend_type(self, location):
# type: (str) -> Optional[Type[VersionControl]]
"""
Return the type of the version control backend if found at given
location, e.g. vcs.get_backend_type('/path/to/vcs/checkout')
"""
for vc_type in self._registry.values():
if vc_type.controls_location(location):
logger.debug('Determine that %s uses VCS: %s',
location, vc_type.name)
return vc_type
return None
def get_backend(self, name):
# type: (str) -> Optional[Type[VersionControl]]
name = name.lower()
if name in self._registry:
return self._registry[name]
return None
vcs = VcsSupport()
class VersionControl(object):
name = ''
dirname = ''
repo_name = ''
# List of supported schemes for this Version Control
schemes = () # type: Tuple[str, ...]
# Iterable of environment variable names to pass to call_subprocess().
unset_environ = () # type: Tuple[str, ...]
default_arg_rev = None # type: Optional[str]
def __init__(self, url=None, *args, **kwargs):
self.url = url
super(VersionControl, self).__init__(*args, **kwargs)
def get_base_rev_args(self, rev):
"""
Return the base revision arguments for a vcs command.
Args:
rev: the name of a revision to install. Cannot be None.
"""
raise NotImplementedError
def make_rev_options(self, rev=None, extra_args=None):
# type: (Optional[str], Optional[List[str]]) -> RevOptions
"""
Return a RevOptions object.
Args:
rev: the name of a revision to install.
extra_args: a list of extra options.
"""
return RevOptions(self, rev, extra_args=extra_args)
@classmethod
def _is_local_repository(cls, repo):
# type: (str) -> bool
"""
posix absolute paths start with os.path.sep,
win32 ones start with drive (like c:\\folder)
"""
drive, tail = os.path.splitdrive(repo)
return repo.startswith(os.path.sep) or bool(drive)
def export(self, location):
"""
Export the repository at the url to the destination location
i.e. only download the files, without vcs informations
"""
raise NotImplementedError
def get_netloc_and_auth(self, netloc, scheme):
"""
Parse the repository URL's netloc, and return the new netloc to use
along with auth information.
Args:
netloc: the original repository URL netloc.
scheme: the repository URL's scheme without the vcs prefix.
This is mainly for the Subversion class to override, so that auth
information can be provided via the --username and --password options
instead of through the URL. For other subclasses like Git without
such an option, auth information must stay in the URL.
Returns: (netloc, (username, password)).
"""
return netloc, (None, None)
def get_url_rev_and_auth(self, url):
# type: (str) -> Tuple[str, Optional[str], AuthInfo]
"""
Parse the repository URL to use, and return the URL, revision,
and auth info to use.
Returns: (url, rev, (username, password)).
"""
scheme, netloc, path, query, frag = urllib_parse.urlsplit(url)
if '+' not in scheme:
raise ValueError(
"Sorry, {!r} is a malformed VCS url. "
"The format is <vcs>+<protocol>://<url>, "
"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url)
)
# Remove the vcs prefix.
scheme = scheme.split('+', 1)[1]
netloc, user_pass = self.get_netloc_and_auth(netloc, scheme)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urllib_parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev, user_pass
def make_rev_args(self, username, password):
"""
Return the RevOptions "extra arguments" to use in obtain().
"""
return []
def get_url_rev_options(self, url):
# type: (str) -> Tuple[str, RevOptions]
"""
Return the URL and RevOptions object to use in obtain() and in
some cases export(), as a tuple (url, rev_options).
"""
url, rev, user_pass = self.get_url_rev_and_auth(url)
username, password = user_pass
extra_args = self.make_rev_args(username, password)
rev_options = self.make_rev_options(rev, extra_args=extra_args)
return url, rev_options
def normalize_url(self, url):
# type: (str) -> str
"""
Normalize a URL for comparison by unquoting it and removing any
trailing slash.
"""
return urllib_parse.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
# type: (str, str) -> bool
"""
Compare two repo URLs for identity, ignoring incidental differences.
"""
return (self.normalize_url(url1) == self.normalize_url(url2))
def fetch_new(self, dest, url, rev_options):
"""
Fetch a revision from a repository, in the case that this is the
first fetch from the repository.
Args:
dest: the directory to fetch the repository to.
rev_options: a RevOptions object.
"""
raise NotImplementedError
def switch(self, dest, url, rev_options):
"""
Switch the repo at ``dest`` to point to ``URL``.
Args:
rev_options: a RevOptions object.
"""
raise NotImplementedError
def update(self, dest, url, rev_options):
"""
Update an already-existing repo to the given ``rev_options``.
Args:
rev_options: a RevOptions object.
"""
raise NotImplementedError
def is_commit_id_equal(self, dest, name):
"""
Return whether the id of the current commit equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
raise NotImplementedError
def obtain(self, dest):
# type: (str) -> None
"""
Install or update in editable mode the package represented by this
VersionControl object.
Args:
dest: the repository directory in which to install or update.
"""
url, rev_options = self.get_url_rev_options(self.url)
if not os.path.exists(dest):
self.fetch_new(dest, url, rev_options)
return
rev_display = rev_options.to_display()
if self.is_repository_directory(dest):
existing_url = self.get_remote_url(dest)
if self.compare_urls(existing_url, url):
logger.debug(
'%s in %s exists, and has correct URL (%s)',
self.repo_name.title(),
display_path(dest),
url,
)
if not self.is_commit_id_equal(dest, rev_options.rev):
logger.info(
'Updating %s %s%s',
display_path(dest),
self.repo_name,
rev_display,
)
self.update(dest, url, rev_options)
else:
logger.info('Skipping because already up-to-date.')
return
logger.warning(
'%s %s in %s exists with URL %s',
self.name,
self.repo_name,
display_path(dest),
existing_url,
)
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warning(
'Directory %s already exists, and is not a %s %s.',
dest,
self.name,
self.repo_name,
)
# https://github.com/python/mypy/issues/1174
prompt = ('(i)gnore, (w)ipe, (b)ackup ', # type: ignore
('i', 'w', 'b'))
logger.warning(
'The plan is to install the %s repository %s',
self.name,
url,
)
response = ask_path_exists('What to do? %s' % prompt[0], prompt[1])
if response == 'a':
sys.exit(-1)
if response == 'w':
logger.warning('Deleting %s', display_path(dest))
rmtree(dest)
self.fetch_new(dest, url, rev_options)
return
if response == 'b':
dest_dir = backup_dir(dest)
logger.warning(
'Backing up %s to %s', display_path(dest), dest_dir,
)
shutil.move(dest, dest_dir)
self.fetch_new(dest, url, rev_options)
return
# Do nothing if the response is "i".
if response == 's':
logger.info(
'Switching %s %s to %s%s',
self.repo_name,
display_path(dest),
url,
rev_display,
)
self.switch(dest, url, rev_options)
def unpack(self, location):
# type: (str) -> None
"""
Clean up current location and download the url repository
(and vcs infos) into location
"""
if os.path.exists(location):
rmtree(location)
self.obtain(location)
@classmethod
def get_src_requirement(cls, location, project_name):
"""
Return a string representing the requirement needed to
redownload the files currently present in location, something
like:
{repository_url}@{revision}#egg={project_name}-{version_identifier}
"""
raise NotImplementedError
@classmethod
def get_remote_url(cls, location):
"""
Return the url used at location
Raises RemoteNotFoundError if the repository does not have a remote
url configured.
"""
raise NotImplementedError
@classmethod
def get_revision(cls, location):
"""
Return the current commit id of the files at the given location.
"""
raise NotImplementedError
@classmethod
def run_command(
cls,
cmd, # type: List[str]
show_stdout=True, # type: bool
cwd=None, # type: Optional[str]
on_returncode='raise', # type: str
extra_ok_returncodes=None, # type: Optional[Iterable[int]]
command_desc=None, # type: Optional[str]
extra_environ=None, # type: Optional[Mapping[str, Any]]
spinner=None # type: Optional[SpinnerInterface]
):
# type: (...) -> Optional[Text]
"""
Run a VCS subcommand
This is simply a wrapper around call_subprocess that adds the VCS
command name, and checks that the VCS is available
"""
cmd = [cls.name] + cmd
try:
return call_subprocess(cmd, show_stdout, cwd,
on_returncode=on_returncode,
extra_ok_returncodes=extra_ok_returncodes,
command_desc=command_desc,
extra_environ=extra_environ,
unset_environ=cls.unset_environ,
spinner=spinner)
except OSError as e:
# errno.ENOENT = no such file or directory
# In other words, the VCS executable isn't available
if e.errno == errno.ENOENT:
raise BadCommand(
'Cannot find command %r - do you have '
'%r installed and in your '
'PATH?' % (cls.name, cls.name))
else:
raise # re-raise exception if a different error occurred
@classmethod
def is_repository_directory(cls, path):
# type: (str) -> bool
"""
Return whether a directory path is a repository directory.
"""
logger.debug('Checking in %s for %s (%s)...',
path, cls.dirname, cls.name)
return os.path.exists(os.path.join(path, cls.dirname))
@classmethod
def controls_location(cls, location):
# type: (str) -> bool
"""
Check if a location is controlled by the vcs.
It is meant to be overridden to implement smarter detection
mechanisms for specific vcs.
This can do more than is_repository_directory() alone. For example,
the Git override checks that Git is actually available.
"""
return cls.is_repository_directory(location)
| [
"[email protected]"
] | |
9b52f8728284f014f32195d6f50595415bcec9bb | cf54adda6874a4256401e9e4eb28f353b28ae74b | /python-modules/python_call_django_view.py | f56832338684b861081db955189ae868d9eae874 | [] | no_license | oraant/study | c0ea4f1a7a8c3558c0eac4b4108bc681a54e8ebf | 7bce20f2ea191d904b4e932c8d0abe1b70a54f7e | refs/heads/master | 2020-09-23T02:08:07.279705 | 2016-11-21T06:30:26 | 2016-11-21T06:30:26 | 66,995,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | # coding:utf-8
# # tree /home/oraant/test/django_celery/|grep -v .pyc
# /home/oraant/test/django_celery/
# ├── django_celery
# │ ├── __init__.py
# │ ├── settings.py
# │ ├── urls.py
# │ ├── wsgi.py
# ├── manage.py
# └── myapp
# ├── admin.py
# ├── apps.py
# ├── __init__.py
# ├── migrations
# │ ├── __init__.py
# ├── models.py
# ├── tests.py
# └── views.py
#
# 3 directories, 25 files
import sys
sys.path.append('/home/oraant/test/django_celery/')
from myapp.views import test_add
print test_add(1, 2)
| [
"[email protected]"
] | |
33a26a9eff1d85003c886ec1259d2874765ba03b | a2b6bc9bdd2bdbe5871edb613065dd2397175cb3 | /中等/旋转图像.py | 239a028395365d7e1f8543fcf746f87fc6437301 | [] | no_license | Asunqingwen/LeetCode | ed8d2043a31f86e9e256123439388d7d223269be | b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee | refs/heads/master | 2022-09-26T01:46:59.790316 | 2022-09-01T08:20:37 | 2022-09-01T08:20:37 | 95,668,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | '''
给定一个 n × n 的二维矩阵表示一个图像。
将图像顺时针旋转 90 度。
说明:
你必须在原地旋转图像,这意味着你需要直接修改输入的二维矩阵。请不要使用另一个矩阵来旋转图像。
示例 1:
给定 matrix =
[
[1,2,3],
[4,5,6],
[7,8,9]
],
原地旋转输入矩阵,使其变为:
[
[7,4,1],
[8,5,2],
[9,6,3]
]
示例 2:
给定 matrix =
[
[ 5, 1, 9,11],
[ 2, 4, 8,10],
[13, 3, 6, 7],
[15,14,12,16]
],
原地旋转输入矩阵,使其变为:
[
[15,13, 2, 5],
[14, 3, 4, 1],
[12, 6, 8, 9],
[16, 7,10,11]
]
'''
from typing import List
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
row = len(matrix)
for i in range(row // 2):
matrix[i][:], matrix[row - i - 1][:] = matrix[row - i - 1][:], matrix[i][:]
for i in range(row):
for j in range(i):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
if __name__ == '__main__':
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
sol = Solution()
sol.rotate(matrix)
| [
"[email protected]"
] | |
df4bc3c52cb2cc13ff6155431b8a111077115ef7 | da6d44b06f631387739d04471920037e8541d6c0 | /problems/014.py | 8753c9f24c8c00abf2eddba5325e948652a085c7 | [
"MIT"
] | permissive | JoshKarpel/euler-python | f6d5d5551a0d77565c852e3eb1e89522675824ec | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | refs/heads/master | 2021-09-01T09:07:46.378352 | 2017-12-26T05:39:35 | 2017-12-26T05:39:35 | 64,712,642 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from problems import mymath, utils
@utils.memoize
def collatz_length(n):
if n == 1:
return 1
if n % 2 == 0:
return 1 + collatz_length(n / 2)
else:
return 1 + collatz_length((3 * n) + 1)
def solve():
collatz_lengths = {x: collatz_length(x) for x in range(1, 1000001)}
return mymath.key_of_max_value(collatz_lengths)
if __name__ == '__main__':
print(solve())
| [
"[email protected]"
] | |
fc0dfd542cb1fc87198d882b23f32e2a923cb059 | 8822149855c27522b54b05f796e292c1c63dbdf6 | /mnist.py | 105022e5d79ea5317478d7612e35b04793373105 | [] | no_license | jaythaceo/TensorFlow-Tutorial | 3c33844b473e67c63bfa9992c124e22ac2a394c3 | b4eca4f3f25eeedd868ee2a0645eb617c1b3208a | refs/heads/master | 2021-06-27T01:38:49.942255 | 2017-02-04T23:09:51 | 2017-02-04T23:09:51 | 59,586,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,675 | py | # Copyright 2016 Jason "jaythaceo" Brooks. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Builds the MNIST network
Implements the inference/loss/training pattern for model building.
1. inference() - Builds the model as far as is required for running the network
forward to make predictions.
2. loss() - Added to the inference model the layers required to generate loss.
3. training() - Adds to the loss model the Ops required to generate and
apply gradiants.
This file is used by the various "fully_connected_*.py" files and not meant to
be run.
"""
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
batch_size = 128
test_size = 256
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):
l1a = tf.nn.relu(tf.nn.conv2d(X, w, # l1a shape=(?, 28, 28, 32)
strides=[1, 1, 1, 1], padding='SAME'))
l1 = tf.nn.max_pool(l1a, ksize=[1, 2, 2, 1], # l1 shape=(?, 14, 14, 32)
strides=[1, 2, 2, 1], padding='SAME')
l1 = tf.nn.dropout(l1, p_keep_conv)
l2a = tf.nn.relu(tf.nn.conv2d(l1, w2, # l2a shape=(?, 14, 14, 64)
strides=[1, 1, 1, 1], padding='SAME'))
l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1], # l2 shape=(?, 7, 7, 64)
strides=[1, 2, 2, 1], padding='SAME')
l2 = tf.nn.dropout(l2, p_keep_conv)
l3a = tf.nn.relu(tf.nn.conv2d(l2, w3, # l3a shape=(?, 7, 7, 128)
strides=[1, 1, 1, 1], padding='SAME'))
l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1], # l3 shape=(?, 4, 4, 128)
strides=[1, 2, 2, 1], padding='SAME')
l3 = tf.reshape(l3, [-1, w4.get_shape().as_list()[0]]) # reshape to (?, 2048)
l3 = tf.nn.dropout(l3, p_keep_conv)
l4 = tf.nn.relu(tf.matmul(l3, w4))
l4 = tf.nn.dropout(l4, p_keep_hidden)
pyx = tf.matmul(l4, w_o)
return pyx
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
trX = trX.reshape(-1, 28, 28, 1) # 28x28x1 input img
teX = teX.reshape(-1, 28, 28, 1) # 28x28x1 input img
X = tf.placeholder("float", [None, 28, 28, 1])
Y = tf.placeholder("float", [None, 10])
w = init_weights([3, 3, 1, 32]) # 3x3x1 conv, 32 outputs
w2 = init_weights([3, 3, 32, 64]) # 3x3x32 conv, 64 outputs
w3 = init_weights([3, 3, 64, 128]) # 3x3x32 conv, 128 outputs
w4 = init_weights([128 * 4 * 4, 625]) # FC 128 * 4 * 4 inputs, 625 outputs
w_o = init_weights([625, 10]) # FC 625 inputs, 10 outputs (labels)
p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
py_x = model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y))
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(py_x, 1)
# Launch the graph in a session
with tf.Session() as sess:
# you need to initialize all variables
tf.global_variables_initializer().run()
for i in range(100):
training_batch = zip(range(0, len(trX), batch_size),
range(batch_size, len(trX)+1, batch_size))
for start, end in training_batch:
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
p_keep_conv: 0.8, p_keep_hidden: 0.5})
test_indices = np.arange(len(teX)) # Get A Test Batch
np.random.shuffle(test_indices)
test_indices = test_indices[0:test_size]
print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==
sess.run(predict_op, feed_dict={X: teX[test_indices],
p_keep_conv: 1.0,
p_keep_hidden: 1.0})))
| [
"[email protected]"
] | |
8a7fcd602ce6c36bfe796131b87836fae4d82507 | 04ebcbce9e6ba1329a080f6a970c92fa38bfd3ad | /wxVTKRenderWindowInteractor.py | b082593f5eab958cc247a4efb72b0cbd38ccb342 | [] | no_license | Garyfallidis/trn | a9400dfa6cf38887e8ba33a03bfdbc65222a82f6 | 558086a2c0c360dba9c204be35e9e206750fda5d | refs/heads/master | 2020-03-26T17:03:38.476085 | 2013-02-01T03:19:34 | 2013-02-01T03:19:34 | 2,917,853 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,333 | py | """
A VTK RenderWindowInteractor widget for wxPython.
Find wxPython info at http://wxPython.org
Created by Prabhu Ramachandran, April 2002
Based on wxVTKRenderWindow.py
Fixes and updates by Charl P. Botha 2003-2008
Updated to new wx namespace and some cleaning up by Andrea Gavana,
December 2006
"""
"""
Please see the example at the end of this file.
----------------------------------------
Creation:
wxVTKRenderWindowInteractor(parent, ID, stereo=0, [wx keywords]):
You should create a wx.PySimpleApp() or some other wx**App before
creating the window.
Behaviour:
Uses __getattr__ to make the wxVTKRenderWindowInteractor behave just
like a vtkGenericRenderWindowInteractor.
----------------------------------------
"""
# import usual libraries
import math, os, sys
import wx
import vtk
# wxPython 2.4.0.4 and newer prefers the use of True and False, standard
# booleans in Python 2.2 but not earlier. Here we define these values if
# they don't exist so that we can use True and False in the rest of the
# code. At the time of this writing, that happens exactly ONCE in
# CreateTimer()
try:
True
except NameError:
True = 1
False = 0
# a few configuration items, see what works best on your system
# Use GLCanvas as base class instead of wx.Window.
# This is sometimes necessary under wxGTK or the image is blank.
# (in wxWindows 2.3.1 and earlier, the GLCanvas had scroll bars)
baseClass = wx.Window
if wx.Platform == "__WXGTK__":
import wx.glcanvas
baseClass = wx.glcanvas.GLCanvas
# Keep capturing mouse after mouse is dragged out of window
# (in wxGTK 2.3.2 there is a bug that keeps this from working,
# but it is only relevant in wxGTK if there are multiple windows)
_useCapture = (wx.Platform == "__WXMSW__")
# end of configuration items
class EventTimer(wx.Timer):
"""Simple wx.Timer class.
"""
def __init__(self, iren):
"""Default class constructor.
@param iren: current render window
"""
wx.Timer.__init__(self)
self.iren = iren
def Notify(self):
""" The timer has expired.
"""
self.iren.TimerEvent()
class wxVTKRenderWindowInteractor(baseClass):
"""
A wxRenderWindow for wxPython.
Use GetRenderWindow() to get the vtkRenderWindow.
Create with the keyword stereo=1 in order to
generate a stereo-capable window.
"""
# class variable that can also be used to request instances that use
# stereo; this is overridden by the stereo=1/0 parameter. If you set
# it to True, the NEXT instantiated object will attempt to allocate a
# stereo visual. E.g.:
# wxVTKRenderWindowInteractor.USE_STEREO = True
# myRWI = wxVTKRenderWindowInteractor(parent, -1)
USE_STEREO = False
def __init__(self, parent, ID, *args, **kw):
"""Default class constructor.
@param parent: parent window
@param ID: window id
@param **kw: wxPython keywords (position, size, style) plus the
'stereo' keyword
"""
# private attributes
self.__RenderWhenDisabled = 0
# First do special handling of some keywords:
# stereo, position, size, style
stereo = 0
if kw.has_key('stereo'):
if kw['stereo']:
stereo = 1
del kw['stereo']
elif self.USE_STEREO:
stereo = 1
position, size = wx.DefaultPosition, wx.DefaultSize
if kw.has_key('position'):
position = kw['position']
del kw['position']
if kw.has_key('size'):
size = kw['size']
del kw['size']
# wx.WANTS_CHARS says to give us e.g. TAB
# wx.NO_FULL_REPAINT_ON_RESIZE cuts down resize flicker under GTK
style = wx.WANTS_CHARS | wx.NO_FULL_REPAINT_ON_RESIZE
if kw.has_key('style'):
style = style | kw['style']
del kw['style']
# the enclosing frame must be shown under GTK or the windows
# don't connect together properly
if wx.Platform != '__WXMSW__':
l = []
p = parent
while p: # make a list of all parents
l.append(p)
p = p.GetParent()
l.reverse() # sort list into descending order
for p in l:
p.Show(1)
# code added by cpbotha to enable stereo correctly where the user
# requests this; remember that the glXContext in this case is NOT
# allocated by VTK, but by WX, hence all of this.
if stereo and baseClass.__name__ == 'GLCanvas':
# initialize GLCanvas with correct attriblist for stereo
attribList = [wx.glcanvas.WX_GL_RGBA,
wx.glcanvas.WX_GL_MIN_RED, 1,
wx.glcanvas.WX_GL_MIN_GREEN, 1,
wx.glcanvas.WX_GL_MIN_BLUE, 1,
wx.glcanvas.WX_GL_DEPTH_SIZE, 1,
wx.glcanvas.WX_GL_DOUBLEBUFFER,
wx.glcanvas.WX_GL_STEREO]
try:
baseClass.__init__(self, parent, ID, position, size, style,
attribList=attribList)
except wx.PyAssertionError:
# stereo visual couldn't be allocated, so we go back to default
baseClass.__init__(self, parent, ID, position, size, style)
# and make sure everyone knows about it
stereo = 0
else:
baseClass.__init__(self, parent, ID, position, size, style)
# create the RenderWindow and initialize it
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow( vtk.vtkRenderWindow() )
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
self._Iren.GetRenderWindow().AddObserver('CursorChangedEvent',
self.CursorChangedEvent)
try:
self._Iren.GetRenderWindow().SetSize(size.width, size.height)
except AttributeError:
self._Iren.GetRenderWindow().SetSize(size[0], size[1])
if stereo:
self._Iren.GetRenderWindow().StereoCapableWindowOn()
self._Iren.GetRenderWindow().SetStereoTypeToCrystalEyes()
self.__handle = None
self.BindEvents()
# with this, we can make sure that the reparenting logic in
# Render() isn't called before the first OnPaint() has
# successfully been run (and set up the VTK/WX display links)
self.__has_painted = False
# set when we have captured the mouse.
self._own_mouse = False
# used to store WHICH mouse button led to mouse capture
self._mouse_capture_button = 0
# A mapping for cursor changes.
self._cursor_map = {0: wx.CURSOR_ARROW, # VTK_CURSOR_DEFAULT
1: wx.CURSOR_ARROW, # VTK_CURSOR_ARROW
2: wx.CURSOR_SIZENESW, # VTK_CURSOR_SIZENE
3: wx.CURSOR_SIZENWSE, # VTK_CURSOR_SIZENWSE
4: wx.CURSOR_SIZENESW, # VTK_CURSOR_SIZESW
5: wx.CURSOR_SIZENWSE, # VTK_CURSOR_SIZESE
6: wx.CURSOR_SIZENS, # VTK_CURSOR_SIZENS
7: wx.CURSOR_SIZEWE, # VTK_CURSOR_SIZEWE
8: wx.CURSOR_SIZING, # VTK_CURSOR_SIZEALL
9: wx.CURSOR_HAND, # VTK_CURSOR_HAND
10: wx.CURSOR_CROSS, # VTK_CURSOR_CROSSHAIR
}
def BindEvents(self):
"""Binds all the necessary events for navigation, sizing,
drawing.
"""
# refresh window by doing a Render
self.Bind(wx.EVT_PAINT, self.OnPaint)
# turn off background erase to reduce flicker
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda e: None)
# Bind the events to the event converters
self.Bind(wx.EVT_RIGHT_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_LEFT_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_MIDDLE_DOWN, self.OnButtonDown)
self.Bind(wx.EVT_RIGHT_UP, self.OnButtonUp)
self.Bind(wx.EVT_LEFT_UP, self.OnButtonUp)
self.Bind(wx.EVT_MIDDLE_UP, self.OnButtonUp)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
# If we use EVT_KEY_DOWN instead of EVT_CHAR, capital versions
# of all characters are always returned. EVT_CHAR also performs
# other necessary keyboard-dependent translations.
self.Bind(wx.EVT_CHAR, self.OnKeyDown)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.Bind(wx.EVT_SIZE, self.OnSize)
# the wx 2.8.7.1 documentation states that you HAVE to handle
# this event if you make use of CaptureMouse, which we do.
if _useCapture and hasattr(wx, 'EVT_MOUSE_CAPTURE_LOST'):
self.Bind(wx.EVT_MOUSE_CAPTURE_LOST,
self.OnMouseCaptureLost)
def __getattr__(self, attr):
"""Makes the object behave like a
vtkGenericRenderWindowInteractor.
"""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError, self.__class__.__name__ + \
" has no attribute named " + attr
def CreateTimer(self, obj, evt):
""" Creates a timer.
"""
self._timer = EventTimer(self)
self._timer.Start(10, True)
def DestroyTimer(self, obj, evt):
"""The timer is a one shot timer so will expire automatically.
"""
return 1
def _CursorChangedEvent(self, obj, evt):
"""Change the wx cursor if the renderwindow's cursor was
changed.
"""
cur = self._cursor_map[obj.GetCurrentCursor()]
c = wx.StockCursor(cur)
self.SetCursor(c)
def CursorChangedEvent(self, obj, evt):
"""Called when the CursorChangedEvent fires on the render
window."""
# This indirection is needed since when the event fires, the
# current cursor is not yet set so we defer this by which time
# the current cursor should have been set.
wx.CallAfter(self._CursorChangedEvent, obj, evt)
def HideCursor(self):
"""Hides the cursor."""
c = wx.StockCursor(wx.CURSOR_BLANK)
self.SetCursor(c)
def ShowCursor(self):
"""Shows the cursor."""
rw = self._Iren.GetRenderWindow()
cur = self._cursor_map[rw.GetCurrentCursor()]
c = wx.StockCursor(cur)
self.SetCursor(c)
def GetDisplayId(self):
"""Function to get X11 Display ID from WX and return it in a format
that can be used by VTK Python.
We query the X11 Display with a new call that was added in wxPython
2.6.0.1. The call returns a SWIG object which we can query for the
address and subsequently turn into an old-style SWIG-mangled string
representation to pass to VTK.
"""
d = None
try:
d = wx.GetXDisplay()
except NameError:
# wx.GetXDisplay was added by Robin Dunn in wxPython 2.6.0.1
# if it's not available, we can't pass it. In general,
# things will still work; on some setups, it'll break.
pass
else:
# wx returns None on platforms where wx.GetXDisplay is not relevant
if d:
d = hex(d)
# On wxPython-2.6.3.2 and above there is no leading '0x'.
if not d.startswith('0x'):
d = '0x' + d
# we now have 0xdeadbeef
# VTK wants it as: _deadbeef_void_p (pre-SWIG-1.3 style)
d = '_%s_%s' % (d[2:], 'void_p')
return d
def OnMouseCaptureLost(self, event):
"""This is signalled when we lose mouse capture due to an
external event, such as when a dialog box is shown. See the
wx documentation.
"""
# the documentation seems to imply that by this time we've
# already lost capture. I have to assume that we don't need
# to call ReleaseMouse ourselves.
if _useCapture and self._own_mouse:
self._own_mouse = False
def OnPaint(self,event):
"""Handles the wx.EVT_PAINT event for
wxVTKRenderWindowInteractor.
"""
# wx should continue event processing after this handler.
# We call this BEFORE Render(), so that if Render() raises
# an exception, wx doesn't re-call OnPaint repeatedly.
event.Skip()
dc = wx.PaintDC(self)
# make sure the RenderWindow is sized correctly
self._Iren.GetRenderWindow().SetSize(self.GetSizeTuple())
# Tell the RenderWindow to render inside the wx.Window.
if not self.__handle:
# on relevant platforms, set the X11 Display ID
d = self.GetDisplayId()
if d:
self._Iren.GetRenderWindow().SetDisplayId(d)
# store the handle
self.__handle = self.GetHandle()
# and give it to VTK
self._Iren.GetRenderWindow().SetWindowInfo(str(self.__handle))
# now that we've painted once, the Render() reparenting logic
# is safe
self.__has_painted = True
self.Render()
def OnSize(self,event):
"""Handles the wx.EVT_SIZE event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue (we call this before the
# Render(), in case it raises an exception)
event.Skip()
try:
width, height = event.GetSize()
except:
width = event.GetSize().width
height = event.GetSize().height
self._Iren.SetSize(width, height)
self._Iren.ConfigureEvent()
# this will check for __handle
self.Render()
def OnMotion(self,event):
"""Handles the wx.EVT_MOTION event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
# we call this early in case any of the VTK code raises an
# exception.
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.MouseMoveEvent()
def OnEnter(self,event):
"""Handles the wx.EVT_ENTER_WINDOW event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.EnterEvent()
def OnLeave(self,event):
"""Handles the wx.EVT_LEAVE_WINDOW event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
event.Skip()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
event.ControlDown(),
event.ShiftDown(),
chr(0), 0, None)
self._Iren.LeaveEvent()
def OnButtonDown(self,event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_DOWN events for
wxVTKRenderWindowInteractor.
"""
# allow wx event processing to continue
# on wxPython 2.6.0.1, omitting this will cause problems with
# the initial focus, resulting in the wxVTKRWI ignoring keypresses
# until we focus elsewhere and then refocus the wxVTKRWI frame
# we do it this early in case any of the following VTK code
# raises an exception.
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
button = 0
if event.RightDown():
self._Iren.RightButtonPressEvent()
button = 'Right'
elif event.LeftDown():
self._Iren.LeftButtonPressEvent()
button = 'Left'
elif event.MiddleDown():
self._Iren.MiddleButtonPressEvent()
button = 'Middle'
# save the button and capture mouse until the button is released
# we only capture the mouse if it hasn't already been captured
if _useCapture and not self._own_mouse:
self._own_mouse = True
self._mouse_capture_button = button
self.CaptureMouse()
def OnButtonUp(self,event):
"""Handles the wx.EVT_LEFT/RIGHT/MIDDLE_UP events for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
event.Skip()
button = 0
if event.RightUp():
button = 'Right'
elif event.LeftUp():
button = 'Left'
elif event.MiddleUp():
button = 'Middle'
# if the same button is released that captured the mouse, and
# we have the mouse, release it.
# (we need to get rid of this as soon as possible; if we don't
# and one of the event handlers raises an exception, mouse
# is never released.)
if _useCapture and self._own_mouse and \
button==self._mouse_capture_button:
self.ReleaseMouse()
self._own_mouse = False
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
if button == 'Right':
self._Iren.RightButtonReleaseEvent()
elif button == 'Left':
self._Iren.LeftButtonReleaseEvent()
elif button == 'Middle':
self._Iren.MiddleButtonReleaseEvent()
def OnMouseWheel(self,event):
"""Handles the wx.EVT_MOUSEWHEEL event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, chr(0), 0, None)
if event.GetWheelRotation() > 0:
self._Iren.MouseWheelForwardEvent()
else:
self._Iren.MouseWheelBackwardEvent()
def OnKeyDown(self,event):
"""Handles the wx.EVT_KEY_DOWN event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
keycode, keysym = event.GetKeyCode(), None
if keycode == wx.WXK_LEFT:
print('Left')
if keycode == wx.WXK_RIGHT:
print('Right')
if keycode == wx.WXK_UP:
print('Up')
if keycode == wx.WXK_DOWN:
print('Down')
key = chr(0)
if keycode < 256:
key = chr(keycode)
print(key)
# wxPython 2.6.0.1 does not return a valid event.Get{X,Y}()
# for this event, so we use the cached position.
(x,y)= self._Iren.GetEventPosition()
self._Iren.SetEventInformation(x, y,
ctrl, shift, key, 0,
keysym)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def OnKeyUp(self,event):
"""Handles the wx.EVT_KEY_UP event for
wxVTKRenderWindowInteractor.
"""
# event processing should continue
event.Skip()
ctrl, shift = event.ControlDown(), event.ShiftDown()
keycode, keysym = event.GetKeyCode(), None
key = chr(0)
if keycode < 256:
key = chr(keycode)
self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
ctrl, shift, key, 0,
keysym)
self._Iren.KeyReleaseEvent()
def GetRenderWindow(self):
"""Returns the render window (vtkRenderWindow).
"""
return self._Iren.GetRenderWindow()
def Render(self):
"""Actually renders the VTK scene on screen.
"""
RenderAllowed = 1
'''
if not self.__RenderWhenDisabled:
# the user doesn't want us to render when the toplevel frame
# is disabled - first find the top level parent
topParent = wx.GetTopLevelParent(self)
if topParent:
# if it exists, check whether it's enabled
# if it's not enabeld, RenderAllowed will be false
RenderAllowed = topParent.IsEnabled()
'''
if RenderAllowed:
if self.__handle and self.__handle == self.GetHandle():
self._Iren.GetRenderWindow().Render()
elif self.GetHandle() and self.__has_painted:
# this means the user has reparented us; let's adapt to the
# new situation by doing the WindowRemap dance
self._Iren.GetRenderWindow().SetNextWindowInfo(
str(self.GetHandle()))
# make sure the DisplayId is also set correctly
d = self.GetDisplayId()
if d:
self._Iren.GetRenderWindow().SetDisplayId(d)
# do the actual remap with the new parent information
self._Iren.GetRenderWindow().WindowRemap()
# store the new situation
self.__handle = self.GetHandle()
self._Iren.GetRenderWindow().Render()
def SetRenderWhenDisabled(self, newValue):
"""Change value of __RenderWhenDisabled ivar.
If __RenderWhenDisabled is false (the default), this widget will not
call Render() on the RenderWindow if the top level frame (i.e. the
containing frame) has been disabled.
This prevents recursive rendering during wx.SafeYield() calls.
wx.SafeYield() can be called during the ProgressMethod() callback of
a VTK object to have progress bars and other GUI elements updated -
it does this by disabling all windows (disallowing user-input to
prevent re-entrancy of code) and then handling all outstanding
GUI events.
However, this often triggers an OnPaint() method for wxVTKRWIs,
resulting in a Render(), resulting in Update() being called whilst
still in progress.
"""
self.__RenderWhenDisabled = bool(newValue)
#--------------------------------------------------------------------
def wxVTKRenderWindowInteractorConeExample():
"""Like it says, just a simple example
"""
# every wx app needs an app
app = wx.PySimpleApp()
# create the top-level frame, sizer and wxVTKRWI
frame = wx.Frame(None, -1, "wxVTKRenderWindowInteractor", size=(600,400))
widget = wxVTKRenderWindowInteractor(frame, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(widget, 1, wx.EXPAND)
frame.SetSizer(sizer)
frame.Layout()
# It would be more correct (API-wise) to call widget.Initialize() and
# widget.Start() here, but Initialize() calls RenderWindow.Render().
# That Render() call will get through before we can setup the
# RenderWindow() to render via the wxWidgets-created context; this
# causes flashing on some platforms and downright breaks things on
# other platforms. Instead, we call widget.Enable(). This means
# that the RWI::Initialized ivar is not set, but in THIS SPECIFIC CASE,
# that doesn't matter.
widget.Enable(1)
widget.AddObserver("ExitEvent", lambda o,e,f=frame: f.Close())
#widget.AddObserver("ExitEvent2", lambda o,e,f=frame: f.Close())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the window
frame.Show()
app.MainLoop()
if __name__ == "__main__":
wxVTKRenderWindowInteractorConeExample()
| [
"[email protected]"
] | |
c68040c95d47cb1dbdc67e5ffca73df49529010b | dec108426231384227c39fd83adbd196f9149329 | /forge/ethyr/io/__init__.py | 53417336b1d41cf6bd2eb3f817ada3db9621aa51 | [
"MIT"
] | permissive | Justin-Yuan/neural-mmo | 591495d32e20142f8156e09aa725dd124285fd9e | cde2c666225d1382abb33243735f60e37113a267 | refs/heads/master | 2020-09-06T19:55:26.249578 | 2019-11-09T17:49:18 | 2019-11-09T17:49:18 | 220,532,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | from .stimulus import Stimulus
from .action import Action
from .serial import Serial
from .io import IO
| [
"[email protected]"
] | |
74fdcfd69840950e1b3e336b45fef12d98d7d355 | 91ff6fdf7b2ccc58869d6ad41842f230644952c1 | /requirements/venky_task/String/7.py | 4f1a8999bfbb7bfa6d11aac952ba9d77b5cfcd61 | [] | no_license | KONASANI-0143/Dev | dd4564f54117f54ccfa003d1fcec4220e6cbe1f9 | 23d31fbeddcd303a7dc90ac9cfbe2c762d61c61e | refs/heads/master | 2023-08-14T15:59:59.012414 | 2021-10-13T14:54:49 | 2021-10-13T15:10:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | def venky(s):
n=s.find("not")
m=s.find("poor")
for i in s.split():
if i=="not":
c=s.replace(i,"poor")
print(s[n:]+str(c))
n=input("enter a string :")
venky(n)
| [
"[email protected]"
] | |
1f8c10416376d98fd9647224d5f6e4826a12517b | cd0cf1c75c715a67502ff7f164bb070da78956de | /calculation/migrations/0046_auto_20160310_0927.py | 2a0fc34d174875b0400e1f1c7e5a69becb00158e | [] | no_license | nustarnuclear/orient_linux | 9792fb4319007708861d619dac081fa32206d3f6 | 95082ea56a0dfc248024f9bf54897a017985ccdf | refs/heads/master | 2020-03-28T03:17:02.629719 | 2017-01-04T08:38:16 | 2017-01-04T08:38:16 | 43,117,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import calculation.models
class Migration(migrations.Migration):
dependencies = [
('calculation', '0045_server_queue'),
]
operations = [
migrations.RemoveField(
model_name='server',
name='status',
),
migrations.AddField(
model_name='robintask',
name='log_file',
field=models.FileField(upload_to=calculation.models.get_robintask_upload_path, blank=True, null=True),
),
migrations.AddField(
model_name='robintask',
name='output_file',
field=models.FileField(upload_to=calculation.models.get_robintask_upload_path, blank=True, null=True),
),
migrations.AlterField(
model_name='prerobintask',
name='server',
field=models.ForeignKey(to='calculation.Server', default=calculation.models.server_default, related_name='pre_robin_inputs'),
),
]
| [
"[email protected]"
] | |
5faf40bbcb2caaa7edd850c568952b71d9a6de70 | 05c22017cde07bb9fdff2c7f03f2602b1cd15323 | /src/textual/widget.py | e43372e6a165b2ec0153e5c91c32e700bf39cf10 | [
"MIT"
] | permissive | ramiro/textual | 00b0a7fc6fea95d327455c8328248cd926f3eaff | a6a912ab2713b0e1cb668224f7a38f31b1c9939c | refs/heads/main | 2023-06-14T01:22:40.975706 | 2021-07-05T14:06:16 | 2021-07-05T14:06:16 | 383,201,815 | 0 | 0 | MIT | 2021-07-05T16:25:54 | 2021-07-05T16:25:53 | null | UTF-8 | Python | false | false | 6,353 | py | from __future__ import annotations
from logging import getLogger
from typing import (
Callable,
cast,
ClassVar,
Generic,
Iterable,
NewType,
TypeVar,
TYPE_CHECKING,
)
from rich.align import Align
from rich.console import Console, RenderableType
from rich.pretty import Pretty
from rich.panel import Panel
import rich.repr
from rich.segment import Segment
from rich.style import Style
from . import events
from ._animator import BoundAnimator
from ._context import active_app
from ._loop import loop_last
from ._line_cache import LineCache
from .message import Message
from .messages import UpdateMessage, LayoutMessage
from .message_pump import MessagePump
from .geometry import Point, Dimensions
from .reactive import Reactive
if TYPE_CHECKING:
from .app import App
from .view import View
WidgetID = NewType("WidgetID", int)
log = getLogger("rich")
@rich.repr.auto
class Widget(MessagePump):
_id: ClassVar[int] = 0
_counts: ClassVar[dict[str, int]] = {}
can_focus: bool = False
def __init__(self, name: str | None = None) -> None:
class_name = self.__class__.__name__
Widget._counts.setdefault(class_name, 0)
Widget._counts[class_name] += 1
_count = self._counts[class_name]
self.id: WidgetID = cast(WidgetID, Widget._id)
Widget._id += 1
self.name = name or f"{class_name}#{_count}"
self.size = Dimensions(0, 0)
self.size_changed = False
self._repaint_required = False
self._layout_required = False
self._animate: BoundAnimator | None = None
super().__init__()
visible: Reactive[bool] = Reactive(True, layout=True)
layout_size: Reactive[int | None] = Reactive(None)
layout_fraction: Reactive[int] = Reactive(1)
layout_minimim_size: Reactive[int] = Reactive(1)
layout_offset_x: Reactive[float] = Reactive(0, layout=True)
layout_offset_y: Reactive[float] = Reactive(0, layout=True)
def __init_subclass__(cls, can_focus: bool = True) -> None:
super().__init_subclass__()
cls.can_focus = can_focus
def __rich_repr__(self) -> rich.repr.RichReprResult:
yield "name", self.name
def __rich__(self) -> RenderableType:
return self.render()
@property
def is_visual(self) -> bool:
return True
@property
def app(self) -> "App":
"""Get the current app."""
return active_app.get()
@property
def console(self) -> Console:
"""Get the current console."""
return active_app.get().console
@property
def root_view(self) -> "View":
"""Return the top-most view."""
return active_app.get().view
@property
def animate(self) -> BoundAnimator:
if self._animate is None:
self._animate = self.app.animator.bind(self)
assert self._animate is not None
return self._animate
@property
def layout_offset(self) -> tuple[int, int]:
"""Get the layout offset as a tuple."""
return (round(self.layout_offset_x), round(self.layout_offset_y))
def require_repaint(self) -> None:
"""Mark widget as requiring a repaint.
Actual repaint is done by parent on idle.
"""
self._repaint_required = True
self.post_message_no_wait(events.Null(self))
def require_layout(self) -> None:
self._layout_required = True
self.post_message_no_wait(events.Null(self))
def check_repaint(self) -> bool:
return self._repaint_required
def check_layout(self) -> bool:
return self._layout_required
def reset_check_repaint(self) -> None:
self._repaint_required = False
def reset_check_layout(self) -> None:
self._layout_required = False
def get_style_at(self, x: int, y: int) -> Style:
offset_x, offset_y = self.root_view.get_offset(self)
return self.root_view.get_style_at(x + offset_x, y + offset_y)
async def forward_event(self, event: events.Event) -> None:
await self.post_message(event)
async def refresh(self) -> None:
"""Re-render the window and repaint it."""
self.require_repaint()
await self.repaint()
async def repaint(self) -> None:
"""Instructs parent to repaint this widget."""
await self.emit(UpdateMessage(self, self))
async def update_layout(self) -> None:
await self.emit(LayoutMessage(self))
def render(self) -> RenderableType:
"""Get renderable for widget.
Returns:
RenderableType: Any renderable
"""
return Panel(
Align.center(Pretty(self), vertical="middle"), title=self.__class__.__name__
)
async def action(self, action: str, *params) -> None:
await self.app.action(action, self)
async def post_message(self, message: Message) -> bool:
if not self.check_message_enabled(message):
return True
return await super().post_message(message)
async def on_event(self, event: events.Event) -> None:
if isinstance(event, events.Resize):
new_size = Dimensions(event.width, event.height)
if self.size != new_size:
self.size = new_size
self.require_repaint()
await super().on_event(event)
async def on_idle(self, event: events.Idle) -> None:
if self.check_layout():
self.reset_check_repaint()
self.reset_check_layout()
await self.update_layout()
elif self.check_repaint():
self.reset_check_repaint()
self.reset_check_layout()
await self.repaint()
async def focus(self) -> None:
await self.app.set_focus(self)
async def capture_mouse(self, capture: bool = True) -> None:
await self.app.capture_mouse(self if capture else None)
async def on_mouse_move(self, event: events.MouseMove) -> None:
style_under_cursor = self.get_style_at(event.x, event.y)
log.debug("%r", style_under_cursor)
async def on_mouse_up(self, event: events.MouseUp) -> None:
style = self.get_style_at(event.x, event.y)
if "@click" in style.meta:
log.debug(style._link_id)
await self.app.action(style.meta["@click"], default_namespace=self)
| [
"[email protected]"
] | |
ad8df248427f7098d6463b39e0c10612baf026cc | 807305b8aefbd7aac4f44c67deed06c059ca02d9 | /src/stk/molecular/topology_graphs/polymer/linear/vertices.py | 95e4ae66c4c80eab25400a4a05c7c5504fb3b81f | [
"MIT"
] | permissive | supramolecular-toolkit/stk | c40103b4820c67d110cbddc7be30d9b58d85f7af | 46f70cd000890ca7c2312cc0fdbab306565f1400 | refs/heads/master | 2022-11-27T18:22:25.187588 | 2022-11-16T13:23:11 | 2022-11-16T13:23:11 | 129,884,045 | 22 | 5 | MIT | 2019-08-19T18:16:41 | 2018-04-17T09:58:28 | Python | UTF-8 | Python | false | false | 6,060 | py | """
Linear Polymer Vertices
=======================
"""
import logging
from ...topology_graph import Vertex
logger = logging.getLogger(__name__)
class LinearVertex(Vertex):
"""
Represents a vertex in the middle of a linear polymer chain.
"""
def __init__(self, id, position, flip):
"""
Initialize a :class:`.LinearVertex` instance.
Parameters
----------
id : :class:`int`
The id of the vertex.
position : :class:`numpy.ndarray`
The position of the vertex.
flip : :class:`bool`
If ``True`` any building block placed by the vertex will
have its orientation along the chain flipped.
"""
super().__init__(id, position)
self._flip = flip
def get_flip(self):
"""
Return ``True`` if the vertex flips building blocks it places.
Returns
-------
:class:`bool`
``True`` if the vertex flips building blocks it places.
"""
return self._flip
def clone(self):
clone = super().clone()
clone._flip = self._flip
return clone
def place_building_block(self, building_block, edges):
assert building_block.get_num_functional_groups() == 2, (
f"{building_block} needs to have exactly 2 functional "
"groups but has "
f"{building_block.get_num_functional_groups()}."
)
building_block = building_block.with_centroid(
position=self._position,
atom_ids=building_block.get_placer_ids(),
)
fg1, fg2 = building_block.get_functional_groups()
fg1_position = building_block.get_centroid(
atom_ids=fg1.get_placer_ids(),
)
fg2_position = building_block.get_centroid(
atom_ids=fg2.get_placer_ids(),
)
return building_block.with_rotation_between_vectors(
start=fg2_position - fg1_position,
target=[-1 if self._flip else 1, 0, 0],
origin=self._position,
).get_position_matrix()
def map_functional_groups_to_edges(self, building_block, edges):
fg1_id, fg2_id = self._sort_functional_groups(building_block)
edge1_id, edge2_id = self._sort_edges(edges)
return {
fg1_id: edge1_id,
fg2_id: edge2_id,
}
@staticmethod
def _sort_functional_groups(building_block):
fg1, fg2 = building_block.get_functional_groups()
x1, y1, z1 = building_block.get_centroid(
atom_ids=fg1.get_placer_ids(),
)
x2, y2, z2 = building_block.get_centroid(
atom_ids=fg2.get_placer_ids(),
)
return (0, 1) if x1 < x2 else (1, 0)
@staticmethod
def _sort_edges(edges):
edge1, edge2 = edges
x1, y1, z1 = edge1.get_position()
x2, y2, z2 = edge2.get_position()
if x1 < x2:
return edge1.get_id(), edge2.get_id()
else:
return edge2.get_id(), edge1.get_id()
def __str__(self):
return (
f"Vertex(id={self._id}, "
f"position={self._position.tolist()}, "
f"flip={self._flip})"
)
class TerminalVertex(LinearVertex):
"""
Represents a vertex at the end of a polymer chain.
Do not instantiate this class directly, use :class:`.HeadVertex`
or :class:`.TailVertex` instead.
"""
def place_building_block(self, building_block, edges):
if (
building_block.get_num_functional_groups() != 1
and building_block.get_num_placers() > 1
):
return super().place_building_block(building_block, edges)
building_block = building_block.with_centroid(
position=self._position,
atom_ids=building_block.get_placer_ids(),
)
fg, *_ = building_block.get_functional_groups()
fg_centroid = building_block.get_centroid(
atom_ids=fg.get_placer_ids(),
)
core_centroid = building_block.get_centroid(
atom_ids=building_block.get_core_atom_ids(),
)
return building_block.with_rotation_between_vectors(
start=fg_centroid - core_centroid,
# _cap_direction is defined by a subclass.
target=[self._cap_direction, 0, 0],
origin=self._position,
).get_position_matrix()
def map_functional_groups_to_edges(self, building_block, edges):
if building_block.get_num_functional_groups() == 2:
functional_groups = self._sort_functional_groups(
building_block=building_block,
)
index = 1 if self._cap_direction == 1 else 0
return {functional_groups[index]: edges[0].get_id()}
elif building_block.get_num_functional_groups() == 1:
return {0: edges[0].get_id()}
else:
raise ValueError(
"The building block of a polymer "
"must have 1 or 2 functional groups."
)
class HeadVertex(TerminalVertex):
"""
Represents a vertex at the head of a polymer chain.
"""
# The direction to use if the building block placed on the
# vertex only has 1 FunctionalGroup.
_cap_direction = 1
class TailVertex(TerminalVertex):
"""
Represents a vertex at the tail of a polymer chain.
"""
# The direction to use if the building block placed on the
# vertex only has 1 FunctionalGroup.
_cap_direction = -1
class UnaligningVertex(LinearVertex):
"""
Just places a building block, does not align.
"""
def place_building_block(self, building_block, edges):
return building_block.with_centroid(
position=self._position,
atom_ids=building_block.get_placer_ids(),
).get_position_matrix()
def map_functional_groups_to_edges(self, building_block, edges):
return {
fg_id: edge.get_id() for fg_id, edge in enumerate(edges)
}
| [
"[email protected]"
] | |
1d803ef9b328061175dc929664d4498660153ef4 | ac2f43c8e0d9649a7f063c59b3dffdfed9fd7ed7 | /tools/verified-boot/signing/gen-op-cert.py | 44f970940d9dbbd127253a9616fb4ac0335aa2bd | [] | no_license | facebook/openbmc | bef10604ced226288600f55248b7f1be9945aea4 | 32777c66a8410d767eae15baabf71c61a0bef13c | refs/heads/helium | 2023-08-17T03:13:54.729494 | 2023-08-16T23:24:18 | 2023-08-16T23:24:18 | 31,917,712 | 684 | 331 | null | 2023-07-25T21:19:08 | 2015-03-09T19:18:35 | C | UTF-8 | Python | false | false | 8,662 | py | #!/usr/bin/env python3
# Copyright (c) 2023-present, META, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import argparse
import io
import mmap
import os
import shutil
import subprocess
import sys
import tempfile
import time
import traceback
from typing import Optional
from image_meta import FBOBMCImageMeta
from measure_func import get_uboot_hash_algo_and_size
from pyfdt import pyfdt
from sh import cpp, dtc
GEN_OP_CERT_VERSION = 1
EC_SUCCESS = 0
EC_EXCEPT = 255
OP_CERT_DTS = """
/dts-v1/;
#define OP_CERT_DTS
#include "op-cert-binding.h"
/ {{
timestamp = <{gen_time}>;
no-fallback;
PROP_CERT_VER = <{cert_ver}>;
PROP_GIU_MODE = <{giu_mode}>;
PROP_UBOOT_HASH = [{uboot_hash}];
PROP_UBOOT_HASH_LEN = <{uboot_hash_len}>;
}};
"""
def save_tmp_src_file(fn_tmp: str, mid_ext: str) -> None:
if not args.debug:
return
dst_dir = os.path.dirname(args.output)
dst_base_name = os.path.basename(args.output).split(".")[0]
dst = os.path.join(dst_dir, f"{dst_base_name}.{mid_ext}.tmp")
shutil.copy2(fn_tmp, dst)
def extract_uboot_hash(fn_img: str) -> bytearray:
image_meta = FBOBMCImageMeta(fn_img)
fit = image_meta.get_part_info("u-boot-fit")
uboot_hash, uboot_hash_algo, _ = get_uboot_hash_algo_and_size(
image_meta.image, fit["offset"], 0x4000
)
return bytearray(uboot_hash)
def create_cert_dtb(fn_img: str, giu_mode: str, injerr: Optional[str] = None) -> str:
uboot_hash = extract_uboot_hash(fn_img)
if injerr == "hash":
uboot_hash[0] ^= 1
with tempfile.NamedTemporaryFile() as tmp_cert_dts_raw, \
tempfile.NamedTemporaryFile() as tmp_cert_dts, \
tempfile.NamedTemporaryFile(delete=False) as tmp_cert_dtb: # fmt:skip
cert_dts = OP_CERT_DTS.format(
gen_time=hex(int(time.time())),
cert_ver=(
"VBOOT_OP_CERT_VER"
if injerr != "ver"
else "VBOOT_OP_CERT_UNSUPPORT_VER"
),
giu_mode=(giu_mode if injerr != "mode" else 0xEE),
uboot_hash=bytes(uboot_hash).hex(),
uboot_hash_len=len(uboot_hash),
)
tmp_cert_dts_raw.write(cert_dts.encode("utf-8"))
tmp_cert_dts_raw.flush()
save_tmp_src_file(tmp_cert_dts_raw.name, "raw")
cpp(
"-nostdinc",
"-undef",
"-x",
"assembler-with-cpp",
"-I",
os.path.dirname(os.path.realpath(__file__)),
tmp_cert_dts_raw.name,
tmp_cert_dts.name,
)
save_tmp_src_file(tmp_cert_dts.name, "cpp")
dtc(
"-I",
"dts",
"-O",
"dtb",
"-o",
tmp_cert_dtb.name,
tmp_cert_dts.name,
)
return tmp_cert_dtb.name
OP_CERT_ITS = """
/dts-v1/;
/ {{
description = "vboot op-cert file";
images {{
fdt@1 {{
description = "vboot operation certificate";
data = /incbin/("{cert_dtb}");
hash@1 {{
algo = "{hash_algo}";
}};
signature@1 {{
algo = "sha256,rsa4096";
key-name-hint = "{key_name}";
}};
}};
}};
configurations {{
default = "conf@1";
conf@1 {{
firmware = "fdt@1";
}};
}};
}};
"""
CERT_SIGN_PATH = "/images/fdt@1/signature@1/value"
def create_cert_itb(
mkimage: str,
hsmkey: Optional[str],
keyfile: Optional[str],
hash_algo: str,
tmp_cert_dtb_name: str,
fn_cert: str,
) -> None:
if hsmkey:
requested_key_name = os.path.basename(hsmkey)
keydir = os.path.dirname(hsmkey)
else:
keybase, keyext = os.path.splitext(keyfile)
if keyext != ".key":
raise ValueError(f"private key file {keyfile} must be .key ext")
requested_key_name = os.path.basename(keybase)
keydir = os.path.dirname(os.path.abspath(keyfile))
with tempfile.NamedTemporaryFile() as tmp_cert_its:
cert_its = OP_CERT_ITS.format(
cert_dtb=tmp_cert_dtb_name,
hash_algo=hash_algo,
key_name=requested_key_name,
)
print(cert_its)
tmp_cert_its.write(cert_its.encode("utf-8"))
tmp_cert_its.flush()
save_tmp_src_file(tmp_cert_its.name, "its")
cmd = [
mkimage,
"-f",
tmp_cert_its.name,
"-k",
keydir,
"-r",
fn_cert,
]
if hsmkey:
cmd += ["-N", "FB-HSM"]
print(" ".join(cmd))
subprocess.run(cmd, check=True)
def decompile_dtb_file(fn_dtb: str, fn_src: str) -> None:
dtc("-I", "dtb", "-O", "dts", "-o", fn_src, fn_dtb)
def get_cert_sign(fn_cert: str) -> bytes:
with open(fn_cert, "rb") as fh:
cert_io = io.BytesIO(fh.read())
cert_fdt = pyfdt.FdtBlobParse(cert_io).to_fdt()
return cert_fdt.resolve_path(CERT_SIGN_PATH).to_raw()
class CertSignatureNotFind(Exception):
pass
def flip_bit_of_sign(fn_cert: str) -> None:
cert_sig = get_cert_sign(fn_cert)
with open(fn_cert, "r+b") as fh:
with mmap.mmap(fh.fileno(), 0) as mm:
sig_idx = mm.find(cert_sig)
if sig_idx < 0:
raise CertSignatureNotFind
mm[sig_idx] ^= 1
mm.flush()
def main(args: argparse.Namespace) -> int:
# Create the certificate data from OP_CERT_DTS
tmp_cert_dtb_name = None
try:
tmp_cert_dtb_name = create_cert_dtb(args.image, args.giu_mode, args.injerr)
create_cert_itb(
args.mkimage,
args.hsmkey,
args.keyfile,
args.hash_algo,
tmp_cert_dtb_name,
args.output,
)
dump_dir = os.path.dirname(args.output)
dump_base_name = os.path.basename(args.output).split(".")[0]
dump_base_path = os.path.join(dump_dir, dump_base_name)
if args.injerr == "sig":
if args.debug:
shutil.copy2(args.output, f"{dump_base_path}.orig.itb")
decompile_dtb_file(args.output, f"{dump_base_path}.orig.its")
flip_bit_of_sign(args.output)
if args.debug:
decompile_dtb_file(tmp_cert_dtb_name, f"{dump_base_path}.dts")
decompile_dtb_file(args.output, f"{dump_base_path}.its")
finally:
if tmp_cert_dtb_name:
os.unlink(tmp_cert_dtb_name)
return EC_SUCCESS
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a vboot operation certificate file."
)
parser.add_argument(
"--version",
action="version",
version="%(prog)s-v{}".format(GEN_OP_CERT_VERSION),
)
parser.add_argument(
"--mkimage",
required=True,
metavar="MKIMAGE",
help="Required path to mkimage, use openbmc built mkimage for HSM sign",
)
parser.add_argument(
"-i",
"--image",
required=True,
help="Required openbmc image the certifate bound to",
)
parser.add_argument(
"output",
metavar="CERT_FILE",
help="Output path of signed certificate file",
)
parser.add_argument(
"-m",
"--giu-mode",
default="GIU_CERT",
choices=["GIU_NONE", "GIU_CERT", "GIU_OPEN"],
help="Golden image mode",
)
parser.add_argument(
"--hash-algo",
default="sha256",
help="Specify hashing algorithm, default(sha256)",
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="save dts and its in same dir of output cert with same basename",
)
parser.add_argument(
"--injerr",
choices=["sig", "mode", "ver", "hash"],
help="generate bad certificate with errors for testing",
)
pkey = parser.add_mutually_exclusive_group(required=True)
pkey.add_argument(
"--keyfile",
help="certificate signing private key file must with .key ext",
)
pkey.add_argument(
"--hsmkey",
help="Use HSM based key to sign",
)
args = parser.parse_args()
# sanity check and normalize the input keydir
try:
sys.exit(main(args))
except Exception as e:
print("Exception: %s" % (str(e)))
traceback.print_exc()
sys.exit(EC_EXCEPT)
| [
"[email protected]"
] | |
9afca773cc3e575a5e99270fc96821846e41becd | 1eb7fa8b1745d4e51cefb4eceb44621862516aa6 | /Company Interview/FB/BiggestKValuesInBST.py | fd6ddc0d98fe0ee5e2f7a5090dd8918d9e3db922 | [] | no_license | geniousisme/CodingInterview | bd93961d728f1fe266ad5edf91adc5d024e5ca48 | a64bca9c07a7be8d4060c4b96e89d8d429a7f1a3 | refs/heads/master | 2021-01-10T11:15:31.305787 | 2017-03-06T00:03:13 | 2017-03-06T00:03:13 | 43,990,453 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution1(object):
def biggestKthValues(self, root):
res = []; stack = []
while root or stack:
if root:
stack.append(root)
root = root.right
else:
top = stack.pop()
res.append(top.val)
root = top.left
return res
class Solution(object): # iterative
def biggestKthValues(self, root, k):
res = count = 0; stack = [];
while root or stack:
if root:
stack.append(root)
root = root.right
else:
top = stack.pop()
if count == k - 1:
return top.val
else:
count += 1
root = top.left
return res
if __name__ == "__main__":
s = Solution()
t9 = TreeNode(9)
t1 = TreeNode(1)
t5 = TreeNode(5)
t7 = TreeNode(7)
t13 = TreeNode(13)
t11 = TreeNode(11)
t15 = TreeNode(15)
t9.left = t5
t9.right = t13
t5.left = t1
t5.right = t7
t13.left = t11
t13.right = t15
print s.biggestKthValues(t9, 3)
| [
"[email protected]"
] | |
5f52fdc03f0db7fb339060a70be115388bb1d11a | ed2d96ead522dd4dbd1dfdf4a6a776617f7dbcaf | /tutorial/settings.py | 2ab243e3f117195473def28fa8017680ee721604 | [] | no_license | Alexmhack/django_rest_quickstart | ff83f435b09f6e279d17c87ea53ad5719276d1f9 | b44be0cb8fd07d00ac8715934b1fe480e833e344 | refs/heads/master | 2020-04-01T06:45:04.591779 | 2018-10-14T12:22:00 | 2018-10-14T12:22:00 | 152,962,441 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,417 | py | """
Django settings for tutorial project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("PROJECT_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# django app
'quickstart',
# dependencies
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# REST FRAMEWORK
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10,
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
70bc0f58d1a7260e8aa0c009c423467c33acd8a0 | e6611443e946d1129985a95bc2dd2afc610f8292 | /CMS/apps/task_status/migrations/0003_taskstatus_category.py | 53b4aaa95708f0d4641fb077232356c169f2ceb3 | [] | no_license | Indus-Action/Campaign-Management-System | a761dd9bbc7967f8302bb3283230f87ccc2bd2a6 | 9c6f1193ff897b8cc53f2a1c3bca8d70a890e70f | refs/heads/master | 2020-03-12T19:49:19.329764 | 2018-05-15T06:37:41 | 2018-05-15T06:37:41 | 130,792,314 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-09-28 06:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('task_status_categories', '0001_initial'),
('task_status', '0002_taskstatus_desc'),
]
operations = [
migrations.AddField(
model_name='taskstatus',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_status', to='task_status_categories.TaskStatusCategory', null=True),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
fcdc3c6425304d12927eedb5366284da5f8f22cc | 67b04bf2bdfdfc8de4189a52fe431aa482c375ac | /example/app.py | 531fdcb75688c64a06ec95e82a0c65b0fe75b7d9 | [
"MIT"
] | permissive | d0ugal/aioauth-client | 2de6eeb25fd6582a34c8b144fff066d817b011db | 6fce61642c974ede8d800e476a4a5661778a180d | refs/heads/develop | 2023-04-10T03:59:04.766587 | 2020-01-22T19:46:26 | 2020-01-22T19:46:26 | 235,654,658 | 1 | 0 | MIT | 2023-04-04T01:21:32 | 2020-01-22T19:55:56 | Python | UTF-8 | Python | false | false | 5,114 | py | """ Aioauth-client example. """
import asyncio
from aiohttp import web
import html
from pprint import pformat
from aioauth_client import (
BitbucketClient,
FacebookClient,
GithubClient,
GoogleClient,
OAuth1Client,
TwitterClient,
YandexClient,
)
app = web.Application()
clients = {
'twitter': {
'class': TwitterClient,
'init': {
'consumer_key': 'oUXo1M7q1rlsPXm4ER3dWnMt8',
'consumer_secret': 'YWzEvXZJO9PI6f9w2FtwUJenMvy9SPLrHOvnNkVkc5LdYjKKup',
},
},
'github': {
'class': GithubClient,
'init': {
'client_id': 'b6281b6fe88fa4c313e6',
'client_secret': '21ff23d9f1cad775daee6a38d230e1ee05b04f7c',
},
},
'google': {
'class': GoogleClient,
'init': {
'client_id': '150775235058-9fmas709maee5nn053knv1heov12sh4n.apps.googleusercontent.com', # noqa
'client_secret': 'df3JwpfRf8RIBz-9avNW8Gx7',
'scope': 'email profile',
},
},
'yandex': {
'class': YandexClient,
'init': {
'client_id': 'e19388a76a824b3385f38beec67f98f1',
'client_secret': '1d2e6fdcc23b45849def6a34b43ac2d8',
},
},
'facebook': {
'class': FacebookClient,
'init': {
'client_id': '384739235070641',
'client_secret': '8e3374a4e1e91a2bd5b830a46208c15a',
'scope': 'email'
},
},
'bitbucket': {
'class': BitbucketClient,
'init': {
'consumer_key': '4DKzbyW8JSbnkFyRS5',
'consumer_secret': 'AvzZhtvRJhrEJMsGAMsPEuHTRWdMPX9z',
},
},
}
@asyncio.coroutine
def index(request):
return web.Response(text="""
<ul>
<li><a href="/oauth/bitbucket">Login with Bitbucket</a></li>
<li><a href="/oauth/facebook">Login with Facebook</a></li>
<li><a href="/oauth/github">Login with Github</a></li>
<li><a href="/oauth/google">Login with Google</a></li>
<li><a href="/oauth/twitter">Login with Twitter</a></li>
</ul>
""", content_type="text/html")
# Simple Github (OAuth2) example (not connected to app)
@asyncio.coroutine
def github(request):
github = GithubClient(
client_id='b6281b6fe88fa4c313e6',
client_secret='21ff23d9f1cad775daee6a38d230e1ee05b04f7c',
)
if 'code' not in request.query:
return web.HTTPFound(github.get_authorize_url(scope='user:email'))
# Get access token
code = request.query['code']
token, _ = yield from github.get_access_token(code)
assert token
# Get a resource `https://api.github.com/user`
response = yield from github.request('GET', 'user')
body = yield from response.read()
return web.Response(body=body, content_type='application/json')
@asyncio.coroutine
def oauth(request):
provider = request.match_info.get('provider')
if provider not in clients:
raise web.HTTPNotFound(reason='Unknown provider')
# Create OAuth1/2 client
Client = clients[provider]['class']
params = clients[provider]['init']
client = Client(**params)
client.params['oauth_callback' if issubclass(Client, OAuth1Client) else 'redirect_uri'] = \
'http://%s%s' % (request.host, request.path)
# Check if is not redirect from provider
if client.shared_key not in request.query:
# For oauth1 we need more work
if isinstance(client, OAuth1Client):
token, secret, _ = yield from client.get_request_token()
# Dirty save a token_secret
# Dont do it in production
request.app.secret = secret
request.app.token = token
# Redirect client to provider
return web.HTTPFound(client.get_authorize_url(access_type='offline'))
# For oauth1 we need more work
if isinstance(client, OAuth1Client):
client.oauth_token_secret = request.app.secret
client.oauth_token = request.app.token
_, meta = yield from client.get_access_token(request.query)
user, info = yield from client.user_info()
text = (
"<a href='/'>back</a><br/><br/>"
"<ul>"
"<li>ID: {u.id}</li>"
"<li>Username: {u.username}</li>"
"<li>First, last name: {u.first_name}, {u.last_name}</li>"
"<li>Gender: {u.gender}</li>"
"<li>Email: {u.email}</li>"
"<li>Link: {u.link}</li>"
"<li>Picture: {u.picture}</li>"
"<li>Country, city: {u.country}, {u.city}</li>"
"</ul>"
).format(u=user)
text += "<pre>%s</pre>" % html.escape(pformat(info))
text += "<pre>%s</pre>" % html.escape(pformat(meta))
return web.Response(text=text, content_type='text/html')
app.router.add_route('GET', '/', index)
app.router.add_route('GET', '/oauth/{provider}', oauth)
loop = asyncio.get_event_loop()
f = loop.create_server(app.make_handler(), '127.0.0.1', 5000)
srv = loop.run_until_complete(f)
print('serving on', srv.sockets[0].getsockname())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# pylama:ignore=D
| [
"[email protected]"
] | |
c422911d66d3c472a423daa9aae44836f52b2fba | 7add1f8fc31b09bb79efd2b25cc15e23666c1d1d | /tfx/orchestration/portable/tfx_runner.py | c431cf3f36a66e468fcc48190bbaac4331fed4f7 | [
"Apache-2.0"
] | permissive | twitter-forks/tfx | b867e9fee9533029ca799c4a4c5d1c5430ba05fe | cb3561224c54a5dad4d5679165d5b3bafc8b451b | refs/heads/master | 2021-11-19T18:45:09.157744 | 2021-10-19T00:02:34 | 2021-10-19T00:02:34 | 205,426,993 | 2 | 1 | Apache-2.0 | 2021-10-18T21:03:50 | 2019-08-30T17:21:03 | Python | UTF-8 | Python | false | false | 1,333 | py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of TFX runner base class."""
import abc
from typing import Any, Optional, Union
from tfx.orchestration import pipeline as pipeline_py
from tfx.proto.orchestration import pipeline_pb2
class TfxRunner(metaclass=abc.ABCMeta):
"""Base runner class for TFX.
This is the base class for every TFX runner.
"""
@abc.abstractmethod
def run(
self, pipeline: Union[pipeline_pb2.Pipeline,
pipeline_py.Pipeline]) -> Optional[Any]:
"""Runs a TFX pipeline on a specific platform.
Args:
pipeline: a pipeline_pb2.Pipeline message or pipeline.Pipeline instance
representing a pipeline definition.
Returns:
Optional platform-specific object.
"""
pass
| [
"[email protected]"
] | |
2adb5dfa09418d7c569c7c8a6bebcfa114e65261 | 4652840c8fa0d701aaca8de426bf64c340a5e831 | /third_party/WebKit/Tools/Scripts/webkitpy/w3c/test_importer.py | 3649c2d42293998faef2f42ce2b30daf3f057f7c | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
] | permissive | remzert/BraveBrowser | de5ab71293832a5396fa3e35690ebd37e8bb3113 | aef440e3d759cb825815ae12bd42f33d71227865 | refs/heads/master | 2022-11-07T03:06:32.579337 | 2017-02-28T23:02:29 | 2017-02-28T23:02:29 | 84,563,445 | 1 | 5 | BSD-3-Clause | 2022-10-26T06:28:58 | 2017-03-10T13:38:48 | null | UTF-8 | Python | false | false | 20,758 | py | # Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""This script imports a directory of W3C tests into Blink.
This script takes a source repository directory, which it searches for files,
then converts and copies files over to a destination directory.
Rules for importing:
* By default, only reference tests and JS tests are imported, (because pixel
tests take longer to run). This can be overridden with the --all flag.
* By default, if test files by the same name already exist in the destination
directory, they are overwritten. This is because this script is used to
refresh files periodically. This can be overridden with the --no-overwrite flag.
* All files are converted to work in Blink:
1. All CSS properties requiring the -webkit- vendor prefix are prefixed
(the list of what needs prefixes is read from Source/core/css/CSSProperties.in).
2. Each reftest has its own copy of its reference file following
the naming conventions new-run-webkit-tests expects.
3. If a reference files lives outside the directory of the test that
uses it, it is checked for paths to support files as it will be
imported into a different relative position to the test file
(in the same directory).
4. Any tags with the class "instructions" have style="display:none" added
to them. Some w3c tests contain instructions to manual testers which we
want to strip out (the test result parser only recognizes pure testharness.js
output and not those instructions).
* Upon completion, script outputs the total number tests imported,
broken down by test type.
* Also upon completion, if we are not importing the files in place, each
directory where files are imported will have a w3c-import.log file written with
a timestamp, the list of CSS properties used that require prefixes, the list
of imported files, and guidance for future test modification and maintenance.
On subsequent imports, this file is read to determine if files have been
removed in the newer changesets. The script removes these files accordingly.
"""
import logging
import mimetypes
import optparse
import os
import sys
from webkitpy.common.host import Host
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.models.test_expectations import TestExpectationParser
from webkitpy.w3c.test_parser import TestParser
from webkitpy.w3c.test_converter import convert_for_webkit
# Maximum length of import path starting from top of source repository.
# This limit is here because the Windows builders cannot create paths that are
# longer than the Windows max path length (260). See http://crbug.com/609871.
MAX_PATH_LENGTH = 125
_log = logging.getLogger(__name__)
def main(_argv, _stdout, _stderr):
options, args = parse_args()
host = Host()
source_repo_path = host.filesystem.normpath(os.path.abspath(args[0]))
if not host.filesystem.exists(source_repo_path):
sys.exit('Repository directory %s not found!' % source_repo_path)
configure_logging()
test_importer = TestImporter(host, source_repo_path, options)
test_importer.do_import()
def configure_logging():
class LogHandler(logging.StreamHandler):
def format(self, record):
if record.levelno > logging.INFO:
return "%s: %s" % (record.levelname, record.getMessage())
return record.getMessage()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = LogHandler()
handler.setLevel(logging.INFO)
logger.addHandler(handler)
return handler
def parse_args():
parser = optparse.OptionParser(usage='usage: %prog [options] source_repo_path')
parser.add_option('-n', '--no-overwrite', dest='overwrite', action='store_false', default=True,
help=('Flag to prevent duplicate test files from overwriting existing tests. '
'By default, they will be overwritten.'))
parser.add_option('-a', '--all', action='store_true', default=False,
help=('Import all tests including reftests, JS tests, and manual/pixel tests. '
'By default, only reftests and JS tests are imported.'))
parser.add_option('-d', '--dest-dir', dest='destination', default='w3c',
help=('Import into a specified directory relative to the LayoutTests root. '
'By default, files are imported under LayoutTests/w3c.'))
parser.add_option('--ignore-expectations', action='store_true', default=False,
help='Ignore the W3CImportExpectations file and import everything.')
parser.add_option('--dry-run', action='store_true', default=False,
help='Dryrun only (don\'t actually write any results).')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments; source repo path is required.')
return options, args
class TestImporter(object):
def __init__(self, host, source_repo_path, options):
self.host = host
self.source_repo_path = source_repo_path
self.options = options
self.filesystem = self.host.filesystem
self.webkit_finder = WebKitFinder(self.filesystem)
self._webkit_root = self.webkit_finder.webkit_base()
self.layout_tests_dir = self.webkit_finder.path_from_webkit_base('LayoutTests')
self.destination_directory = self.filesystem.normpath(
self.filesystem.join(
self.layout_tests_dir,
options.destination,
self.filesystem.basename(self.source_repo_path)))
self.import_in_place = (self.source_repo_path == self.destination_directory)
self.dir_above_repo = self.filesystem.dirname(self.source_repo_path)
self.import_list = []
def do_import(self):
_log.info("Importing %s into %s", self.source_repo_path, self.destination_directory)
self.find_importable_tests()
self.import_tests()
def find_importable_tests(self):
"""Walks through the source directory to find what tests should be imported.
This function sets self.import_list, which contains information about how many
tests are being imported, and their source and destination paths.
"""
paths_to_skip = self.find_paths_to_skip()
for root, dirs, files in self.filesystem.walk(self.source_repo_path):
cur_dir = root.replace(self.dir_above_repo + '/', '') + '/'
_log.info(' scanning ' + cur_dir + '...')
total_tests = 0
reftests = 0
jstests = 0
# Files in 'tools' are not for browser testing, so we skip them.
# See: http://testthewebforward.org/docs/test-format-guidelines.html#tools
DIRS_TO_SKIP = ('.git', 'test-plan', 'tools')
# We copy all files in 'support', including HTML without metadata.
# See: http://testthewebforward.org/docs/test-format-guidelines.html#support-files
DIRS_TO_INCLUDE = ('resources', 'support')
if dirs:
for d in DIRS_TO_SKIP:
if d in dirs:
dirs.remove(d)
for path in paths_to_skip:
path_base = path.replace(self.options.destination + '/', '')
path_base = path_base.replace(cur_dir, '')
path_full = self.filesystem.join(root, path_base)
if path_base in dirs:
dirs.remove(path_base)
if not self.options.dry_run and self.import_in_place:
_log.info(" pruning %s", path_base)
self.filesystem.rmtree(path_full)
else:
_log.info(" skipping %s", path_base)
copy_list = []
for filename in files:
path_full = self.filesystem.join(root, filename)
path_base = path_full.replace(self.source_repo_path + '/', '')
path_base = self.destination_directory.replace(self.layout_tests_dir + '/', '') + '/' + path_base
if path_base in paths_to_skip:
if not self.options.dry_run and self.import_in_place:
_log.info(" pruning %s", path_base)
self.filesystem.remove(path_full)
continue
else:
continue
# FIXME: This block should really be a separate function, but the early-continues make that difficult.
if filename.startswith('.') or filename.endswith('.pl'):
# The w3cs repos may contain perl scripts, which we don't care about.
continue
if filename == 'OWNERS' or filename == 'reftest.list':
# These files fail our presubmits.
# See http://crbug.com/584660 and http://crbug.com/582838.
continue
fullpath = self.filesystem.join(root, filename)
mimetype = mimetypes.guess_type(fullpath)
if ('html' not in str(mimetype[0]) and
'application/xhtml+xml' not in str(mimetype[0]) and
'application/xml' not in str(mimetype[0])):
copy_list.append({'src': fullpath, 'dest': filename})
continue
if self.filesystem.basename(root) in DIRS_TO_INCLUDE:
copy_list.append({'src': fullpath, 'dest': filename})
continue
test_parser = TestParser(fullpath, self.host)
test_info = test_parser.analyze_test()
if test_info is None:
copy_list.append({'src': fullpath, 'dest': filename})
continue
if self.path_too_long(path_full):
_log.warning('%s skipped due to long path. '
'Max length from repo base %d chars; see http://crbug.com/609871.',
path_full, MAX_PATH_LENGTH)
continue
if 'reference' in test_info.keys():
test_basename = self.filesystem.basename(test_info['test'])
# Add the ref file, following WebKit style.
# FIXME: Ideally we'd support reading the metadata
# directly rather than relying on a naming convention.
# Using a naming convention creates duplicate copies of the
# reference files (http://crrev.com/268729).
ref_file = self.filesystem.splitext(test_basename)[0] + '-expected'
# Make sure to use the extension from the *reference*, not
# from the test, because at least flexbox tests use XHTML
# references but HTML tests.
ref_file += self.filesystem.splitext(test_info['reference'])[1]
if not self.filesystem.exists(test_info['reference']):
_log.warning('%s skipped because ref file %s was not found.',
path_full, ref_file)
continue
if self.path_too_long(path_full.replace(filename, ref_file)):
_log.warning('%s skipped because path of ref file %s would be too long. '
'Max length from repo base %d chars; see http://crbug.com/609871.',
path_full, ref_file, MAX_PATH_LENGTH)
continue
reftests += 1
total_tests += 1
copy_list.append({'src': test_info['reference'], 'dest': ref_file,
'reference_support_info': test_info['reference_support_info']})
copy_list.append({'src': test_info['test'], 'dest': filename})
elif 'jstest' in test_info.keys():
jstests += 1
total_tests += 1
copy_list.append({'src': fullpath, 'dest': filename, 'is_jstest': True})
elif self.options.all:
total_tests += 1
copy_list.append({'src': fullpath, 'dest': filename})
if copy_list:
# Only add this directory to the list if there's something to import
self.import_list.append({'dirname': root, 'copy_list': copy_list,
'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests})
def find_paths_to_skip(self):
if self.options.ignore_expectations:
return set()
paths_to_skip = set()
port = self.host.port_factory.get()
w3c_import_expectations_path = self.webkit_finder.path_from_webkit_base('LayoutTests', 'W3CImportExpectations')
w3c_import_expectations = self.filesystem.read_text_file(w3c_import_expectations_path)
parser = TestExpectationParser(port, all_tests=(), is_lint_mode=False)
expectation_lines = parser.parse(w3c_import_expectations_path, w3c_import_expectations)
for line in expectation_lines:
if 'SKIP' in line.expectations:
if line.specifiers:
_log.warning("W3CImportExpectations:%s should not have any specifiers", line.line_numbers)
continue
paths_to_skip.add(line.name)
return paths_to_skip
def import_tests(self):
"""Reads |self.import_list|, and converts and copies files to their destination."""
total_imported_tests = 0
total_imported_reftests = 0
total_imported_jstests = 0
total_prefixed_properties = {}
for dir_to_copy in self.import_list:
total_imported_tests += dir_to_copy['total_tests']
total_imported_reftests += dir_to_copy['reftests']
total_imported_jstests += dir_to_copy['jstests']
prefixed_properties = []
if not dir_to_copy['copy_list']:
continue
orig_path = dir_to_copy['dirname']
subpath = self.filesystem.relpath(orig_path, self.source_repo_path)
new_path = self.filesystem.join(self.destination_directory, subpath)
if not self.filesystem.exists(new_path):
self.filesystem.maybe_make_directory(new_path)
copied_files = []
for file_to_copy in dir_to_copy['copy_list']:
# FIXME: Split this block into a separate function.
orig_filepath = self.filesystem.normpath(file_to_copy['src'])
if self.filesystem.isdir(orig_filepath):
# FIXME: Figure out what is triggering this and what to do about it.
_log.error('%s refers to a directory', orig_filepath)
continue
if not self.filesystem.exists(orig_filepath):
_log.error('%s not found. Possible error in the test.', orig_filepath)
continue
new_filepath = self.filesystem.join(new_path, file_to_copy['dest'])
if 'reference_support_info' in file_to_copy.keys() and file_to_copy['reference_support_info'] != {}:
reference_support_info = file_to_copy['reference_support_info']
else:
reference_support_info = None
if not self.filesystem.exists(self.filesystem.dirname(new_filepath)):
if not self.import_in_place and not self.options.dry_run:
self.filesystem.maybe_make_directory(self.filesystem.dirname(new_filepath))
relpath = self.filesystem.relpath(new_filepath, self.layout_tests_dir)
if not self.options.overwrite and self.filesystem.exists(new_filepath):
_log.info(' skipping %s', relpath)
else:
# FIXME: Maybe doing a file diff is in order here for existing files?
# In other words, there's no sense in overwriting identical files, but
# there's no harm in copying the identical thing.
_log.info(' %s', relpath)
# Only HTML, XML, or CSS should be converted.
# FIXME: Eventually, so should JS when support is added for this type of conversion.
mimetype = mimetypes.guess_type(orig_filepath)
if 'is_jstest' not in file_to_copy and (
'html' in str(mimetype[0]) or 'xml' in str(mimetype[0]) or 'css' in str(mimetype[0])):
converted_file = convert_for_webkit(
new_path, filename=orig_filepath,
reference_support_info=reference_support_info,
host=self.host)
if not converted_file:
if not self.import_in_place and not self.options.dry_run:
self.filesystem.copyfile(orig_filepath, new_filepath) # The file was unmodified.
else:
for prefixed_property in converted_file[0]:
total_prefixed_properties.setdefault(prefixed_property, 0)
total_prefixed_properties[prefixed_property] += 1
prefixed_properties.extend(set(converted_file[0]) - set(prefixed_properties))
if not self.options.dry_run:
self.filesystem.write_text_file(new_filepath, converted_file[1])
else:
if not self.import_in_place and not self.options.dry_run:
self.filesystem.copyfile(orig_filepath, new_filepath)
if self.filesystem.read_binary_file(orig_filepath)[:2] == '#!':
self.filesystem.make_executable(new_filepath)
copied_files.append(new_filepath.replace(self._webkit_root, ''))
_log.info('')
_log.info('Import complete')
_log.info('')
_log.info('IMPORTED %d TOTAL TESTS', total_imported_tests)
_log.info('Imported %d reftests', total_imported_reftests)
_log.info('Imported %d JS tests', total_imported_jstests)
_log.info('Imported %d pixel/manual tests', total_imported_tests - total_imported_jstests - total_imported_reftests)
_log.info('')
if total_prefixed_properties:
_log.info('Properties needing prefixes (by count):')
for prefixed_property in sorted(total_prefixed_properties, key=lambda p: total_prefixed_properties[p]):
_log.info(' %s: %s', prefixed_property, total_prefixed_properties[prefixed_property])
def path_too_long(self, source_path):
"""Checks whether a source path is too long to import.
Args:
Absolute path of file to be imported.
Returns:
True if the path is too long to import, False if it's OK.
"""
path_from_repo_base = os.path.relpath(source_path, self.source_repo_path)
return len(path_from_repo_base) > MAX_PATH_LENGTH
| [
"[email protected]"
] | |
f180ab018656525fd2a3a2e6f419270586db4dd0 | 947e71b34d21f3c9f5c0a197d91a880f346afa6c | /ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_datanode_unmounted_data_dir.py | 3bdec79c1f636e892ef6b9ed0a639903073d5fc8 | [
"MIT",
"Apache-2.0",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"OFL-1.1",
"MS-PL",
"AFL-2.1",
"GPL-2.0-only",
"Python-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | liuwenru/Apache-Ambari-ZH | 4bc432d4ea7087bb353a6dd97ffda0a85cb0fef0 | 7879810067f1981209b658ceb675ac76e951b07b | refs/heads/master | 2023-01-14T14:43:06.639598 | 2020-07-28T12:06:25 | 2020-07-28T12:06:25 | 223,551,095 | 38 | 44 | Apache-2.0 | 2023-01-02T21:55:10 | 2019-11-23T07:43:49 | Java | UTF-8 | Python | false | false | 10,468 | py | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# System imports
import os
import sys
import logging
from mock.mock import patch
# Local imports
from stacks.utils.RMFTestCase import *
import resource_management.libraries.functions.file_system
COMMON_SERVICES_ALERTS_DIR = "HDFS/2.1.0.2.0/package/alerts"
DATA_DIR_MOUNT_HIST_FILE_PATH = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
file_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(file_path)))))
file_path = os.path.join(file_path, "main", "resources", "common-services", COMMON_SERVICES_ALERTS_DIR)
RESULT_STATE_OK = "OK"
RESULT_STATE_WARNING = "WARNING"
RESULT_STATE_CRITICAL = "CRITICAL"
RESULT_STATE_UNKNOWN = "UNKNOWN"
class TestAlertDataNodeUnmountedDataDir(RMFTestCase):
def setUp(self):
"""
Import the class under test.
Because the class is present in a different folder, append its dir to the system path.
Also, shorten the import name and make it a global so the test functions can access it.
:return:
"""
self.logger = logging.getLogger()
sys.path.append(file_path)
global alert
import alert_datanode_unmounted_data_dir as alert
@patch("resource_management.libraries.functions.file_system.get_and_cache_mount_points")
def test_missing_configs(self, get_and_cache_mount_points_mock):
"""
Check that the status is UNKNOWN when configs are missing.
"""
configs = {}
[status, messages] = alert.execute(configurations=configs)
self.assertEqual(status, RESULT_STATE_UNKNOWN)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertTrue('is a required parameter for the script' in messages[0])
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}": ""
}
[status, messages] = alert.execute(configurations=configs)
self.assertNotEqual(status, RESULT_STATE_UNKNOWN)
@patch("resource_management.libraries.functions.file_system.get_and_cache_mount_points")
@patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir")
@patch("os.path.exists")
@patch("os.path.isdir")
def test_mount_history_file_does_not_exist(self, is_dir_mock, exists_mock, get_mount_mock, get_and_cache_mount_points_mock):
"""
Test that the status is WARNING when the data dirs are mounted on root, but the mount history file
does not exist.
"""
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data"
}
# Mock calls
exists_mock.return_value = False
is_dir_mock.return_value = True
get_mount_mock.return_value = "/"
[status, messages] = alert.execute(configurations=configs)
self.assertEqual(status, RESULT_STATE_WARNING)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertTrue("{0} was not found".format(DATA_DIR_MOUNT_HIST_FILE_PATH) in messages[0])
@patch("resource_management.libraries.functions.file_system.get_and_cache_mount_points")
@patch("resource_management.libraries.functions.mounted_dirs_helper.get_dir_to_mount_from_file")
@patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir")
@patch("os.path.exists")
@patch("os.path.isdir")
def test_all_dirs_on_root(self, is_dir_mock, exists_mock, get_mount_mock, get_data_dir_to_mount_from_file_mock, get_and_cache_mount_points_mock):
"""
Test that the status is OK when all drives are mounted on the root partition
and this coincides with the expected values.
"""
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data"
}
# Mock calls
exists_mock.return_value = True
is_dir_mock.return_value = True
get_mount_mock.return_value = "/"
get_data_dir_to_mount_from_file_mock.return_value = {"/grid/0/data": "/",
"/grid/1/data": "/",
"/grid/2/data": "/"}
[status, messages] = alert.execute(configurations=configs)
self.assertEqual(status, RESULT_STATE_OK)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertTrue("The following data dir(s) are valid" in messages[0])
@patch("resource_management.libraries.functions.file_system.get_and_cache_mount_points")
@patch("resource_management.libraries.functions.mounted_dirs_helper.get_dir_to_mount_from_file")
@patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir")
@patch("os.path.exists")
@patch("os.path.isdir")
def test_match_expected(self, is_dir_mock, exists_mock, get_mount_mock, get_data_dir_to_mount_from_file_mock, get_and_cache_mount_points_mock):
"""
Test that the status is OK when the mount points match the expected values.
"""
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data"
}
# Mock calls
exists_mock.return_value = True
is_dir_mock.return_value = True
get_mount_mock.side_effect = ["/device1", "/device2", "/"]
get_data_dir_to_mount_from_file_mock.return_value = {"/grid/0/data": "/device1",
"/grid/1/data": "/device2",
"/grid/2/data": "/"}
[status, messages] = alert.execute(configurations=configs)
self.assertEqual(status, RESULT_STATE_OK)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertTrue("The following data dir(s) are valid" in messages[0])
@patch("resource_management.libraries.functions.file_system.get_and_cache_mount_points")
@patch("resource_management.libraries.functions.mounted_dirs_helper.get_dir_to_mount_from_file")
@patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir")
@patch("os.path.exists")
@patch("os.path.isdir")
def test_critical_one_root_one_mounted(self, is_dir_mock, exists_mock, get_mount_mock, get_data_dir_to_mount_from_file_mock, get_and_cache_mount_points_mock):
"""
Test that the status is CRITICAL when the history file is missing
and at least one data dir is on a mount and at least one data dir is on the root partition.
"""
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data,/grid/3/data"
}
# Mock calls
exists_mock.return_value = False
is_dir_mock.return_value = True
# The first 2 data dirs will report an error.
get_mount_mock.side_effect = ["/", "/", "/device1", "/device2"]
[status, messages] = alert.execute(configurations=configs)
self.assertEqual(status, RESULT_STATE_CRITICAL)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertTrue("Detected at least one data dir on a mount point, but these are writing to the root partition:\n/grid/0/data\n/grid/1/data" in messages[0])
@patch("resource_management.libraries.functions.file_system.get_and_cache_mount_points")
@patch("resource_management.libraries.functions.mounted_dirs_helper.get_dir_to_mount_from_file")
@patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir")
@patch("os.path.exists")
@patch("os.path.isdir")
def test_critical_unmounted(self, is_dir_mock, exists_mock, get_mount_mock, get_data_dir_to_mount_from_file_mock, get_and_cache_mount_points_mock):
"""
Test that the status is CRITICAL when the history file exists and one of the dirs
became unmounted.
"""
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data,/grid/3/data"
}
# Mock calls
exists_mock.return_value = True
is_dir_mock.return_value = True
get_mount_mock.side_effect = ["/", "/", "/device3", "/device4"]
get_data_dir_to_mount_from_file_mock.return_value = {"/grid/0/data": "/", # remained on /
"/grid/1/data": "/device2", # became unmounted
"/grid/2/data": "/", # became mounted
"/grid/3/data": "/device4"} # remained mounted
[status, messages] = alert.execute(configurations=configs)
self.assertEqual(status, RESULT_STATE_CRITICAL)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertTrue("Detected data dir(s) that became unmounted and are now writing to the root partition:\n/grid/1/data" in messages[0])
@patch("resource_management.libraries.functions.file_system.get_and_cache_mount_points")
@patch("resource_management.libraries.functions.mounted_dirs_helper.get_dir_to_mount_from_file")
@patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir")
@patch("os.path.exists")
@patch("os.path.isdir")
def test_file_uri_and_meta_tags(self, is_dir_mock, exists_mock, get_mount_mock, get_data_dir_to_mount_from_file_mock, get_and_cache_mount_points_mock):
"""
Test that the status is OK when the locations include file:// schemes and meta tags.
"""
configs = {
"{{hdfs-site/dfs.datanode.data.dir}}":"[SSD]file:///grid/0/data"
}
# Mock calls
exists_mock.return_value = True
is_dir_mock.return_value = True
get_mount_mock.return_value = "/"
get_data_dir_to_mount_from_file_mock.return_value = {"/grid/0/data":"/"}
[status, messages] = alert.execute(configurations = configs)
self.assertEqual(status, RESULT_STATE_OK)
self.assertTrue(messages is not None and len(messages) == 1)
self.assertEqual("The following data dir(s) are valid:\n/grid/0/data", messages[0]) | [
"[email protected]"
] | |
1dc0356232f83b9f82596add14362a858c4e3774 | 1678abd4c1efb74993745b55bf5a5536c2205417 | /forum/migrations/0010_auto_20200414_2322.py | 59d3916c979c2e05ef688ccf22bdbfbb16dbbdc9 | [] | no_license | samosky123/Django-Forum | 7b8868338d09d4a02b61717454adb2297cafc44e | c1ee3c9261479ebf8039c3a6fc9a3aba06d2c870 | refs/heads/master | 2023-07-29T09:45:19.810265 | 2020-11-13T16:05:42 | 2020-11-13T16:05:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # Generated by Django 2.2 on 2020-04-14 17:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forum', '0009_auto_20200414_2313'),
]
operations = [
migrations.RenameField(
model_name='answer',
old_name='downvote',
new_name='downvotes',
),
migrations.RenameField(
model_name='answer',
old_name='upvote',
new_name='upvotes',
),
]
| [
"[email protected]"
] | |
682e6e9bf096cd8bc9cecd1f5476499372f6c040 | 61dcd9b485bc5e6d07c4adf14f138eabaa9a23b5 | /Own practice/2.2-2.10/2.8.py | a4281af24752c551736fa11d1a5726365e91a315 | [] | no_license | bong1915016/Introduction-to-Programming-Using-Python | d442d2252d13b731f6cd9c6356032e8b90aba9a1 | f23e19963183aba83d96d9d8a9af5690771b62c2 | refs/heads/master | 2020-09-25T03:09:34.384693 | 2019-11-28T17:33:28 | 2019-11-28T17:33:28 | 225,904,132 | 1 | 0 | null | 2019-12-04T15:56:55 | 2019-12-04T15:56:54 | null | UTF-8 | Python | false | false | 872 | py | """
程式設計練習題 2.2-2.10 2.8 計算能量.
請撰寫一程式,計算從起始溫度到最後溫度時熱水所需的能量。程式提示使用者數入多少公斤的水、起始溫度
及最後溫度。計算能量的公式如下:
Q = M * (finalTemperature - initialTemperature) * 4184
此處的M逝水的公斤數,溫度是攝氏溫度,而Q是以焦耳(joules)來衡量的能量。
以下是範例輸出的樣本:
```
Enter the amount of water in kilograms: 55.5
Enter the initial temperature: 3.5
Enter the final Temperature:10.5
The energy needed is 1625484.0
```
"""
M = eval(input("Enter the amount of water in kilograms:"))
initialTemperature = eval(input("Enter the initial temperature:"))
finalTemperature = eval(input("Enter the final Temperature:"))
Q = M * (finalTemperature - initialTemperature) * 4184
print("The energy needed is", Q)
| [
"[email protected]"
] | |
722f6cccafabb3e43a45d23835ec7dc65f373228 | f54e2067a0eb04540a925b4a22db1c341a964dac | /src/pyiem/nws/gini.py | 4bdc3f0357741772a951ee1a8f5f6c92de1a3761 | [
"MIT"
] | permissive | xoxo143/pyIEM | 4b4000acadce17720ccbe4ecf9cd8c54e0775b8d | 0dcf8ee65ac4b1acc11d7be61c62df815e3854f0 | refs/heads/master | 2023-01-24T11:17:32.450242 | 2020-12-02T04:06:09 | 2020-12-02T04:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,239 | py | """
Processing of GINI formatted data found on NOAAPORT
"""
import struct
import math
import zlib
from datetime import timezone, datetime
import os
import pyproj
import numpy as np
from pyiem.util import LOG
DATADIR = os.sep.join([os.path.dirname(__file__), "../data"])
M_PI_2 = 1.57079632679489661923
M_PI = 3.14159265358979323846
RE_METERS = 6371200.0
ENTITIES = [
"UNK",
"UNK",
"MISC",
"JERS",
"ERS",
"POES",
"COMP",
"DMSP",
"GMS",
"METEOSAT",
"GOES7",
"GOES8",
"GOES9",
"GOES10",
"GOES11",
"GOES12",
"GOES13",
"GOES14",
"GOES15",
]
LABELS = [
"UNK",
"UNK",
"MISC",
"JERS",
"ERS",
"POES",
"COMP",
"DMSP",
"GMS",
"METEOSAT",
"GOES",
"GOES",
"GOES",
"GOES",
"GOES",
"GOES",
"GOES",
"GOES",
"GOES",
]
CHANNELS = [
"",
"VIS",
"3.9",
"WV",
"IR",
"12",
"13.3",
"1.3",
"U8",
"U9",
"U10",
"U11",
"U12",
"LI",
"PW",
"SKIN",
"CAPE",
"TSURF",
"WINDEX",
]
for _u in range(22, 100):
CHANNELS.append(f"U{_u}")
SECTORS = [
"NHCOMP",
"EAST",
"WEST",
"AK",
"AKNAT",
"HI",
"HINAT",
"PR",
"PRNAT",
"SUPER",
"NHCOMP",
"CCONUS",
"EFLOAT",
"WFLOAT",
"CFLOAT",
"PFLOAT",
]
AWIPS_GRID_GUESS = {
"A": 207,
"B": 203,
"E": 211,
"F": 0,
"H": 208,
"I": 204,
"N": 0,
"P": 210,
"Q": 205,
"W": 211,
}
AWIPS_GRID = {
"TIGB": 203,
"TIGE": 211,
"TIGW": 211,
"TIGH": 208,
"TIGP": 210,
"TIGA": 207,
"TIGI": 204,
"TIGQ": 205,
"TICF": 201,
}
def uint24(data):
"""convert three byte data that represents an unsigned int"""
u = int(struct.unpack(">B", data[0:1])[0]) << 16
u += int(struct.unpack(">B", data[1:2])[0]) << 8
u += int(struct.unpack(">B", data[2:3])[0])
return u
def int24(data):
"""Convert to int."""
u = int(struct.unpack(">B", data[0:1])[0] & 127) << 16
u += int(struct.unpack(">B", data[1:2])[0]) << 8
u += int(struct.unpack(">B", data[2:3])[0])
if (struct.unpack(">B", data[0:1])[0] & 128) != 0:
u *= -1
return u
def get_ir_ramp():
""" Return a np 256x3 array of colors to use for IR """
fn = "%s/gini_ir_ramp.txt" % (DATADIR,)
data = np.zeros((256, 3), np.uint8)
for i, line in enumerate(open(fn)):
tokens = line.split()
data[i, :] = [int(tokens[0]), int(tokens[1]), int(tokens[2])]
return data
class GINIZFile:
"""
Deal with compressed GINI files, which are the standard on NOAAPORT
"""
def __init__(self, fobj):
"""Create a GNIFile instance with a compressed file object
Args:
fobj (file): A fileobject
"""
fobj.seek(0)
# WMO HEADER
self.wmo = (fobj.read(21)).strip().decode("utf-8")
d = zlib.decompressobj()
hdata = d.decompress(fobj.read())
self.metadata = self.read_header(hdata[21:])
self.init_projection()
totsz = len(d.unused_data)
# 5120 value chunks, so we need to be careful!
sdata = b""
chunk = b"x\xda"
i = 0
for part in d.unused_data.split(b"x\xda"):
if part == b"" and i == 0:
continue
chunk += part
try:
sdata += zlib.decompress(chunk)
i += 1
totsz -= len(chunk)
chunk = b"x\xda"
except Exception:
chunk += b"x\xda"
if totsz != 0:
LOG.info("Totalsize left: %s", totsz)
self.data = np.reshape(
np.fromstring(sdata, np.int8),
(self.metadata["numlines"] + 1, self.metadata["linesize"]),
)
def __str__(self):
"""return a string representation"""
text = "%s Line Size: %s Num Lines: %s" % (
self.wmo,
self.metadata["linesize"],
self.metadata["numlines"],
)
return text
def awips_grid(self):
"""
Return the awips grid number based on the WMO header
"""
try1 = AWIPS_GRID.get(self.wmo[:4], None)
if try1:
return try1
return AWIPS_GRID_GUESS.get(self.wmo[3], None)
def current_filename(self):
"""
Return a filename for this product, we'll use the format
{SOURCE}_{SECTOR}_{CHANNEL}_{VALID}.png
"""
return "%s_%s_%s.png" % (
LABELS[self.metadata["creating_entity"]],
SECTORS[self.metadata["sector"]],
CHANNELS[self.metadata["channel"]],
)
def get_bird(self):
"""
Return a string label for this satellite
"""
return ENTITIES[self.metadata["creating_entity"]]
def get_sector(self):
"""Return the sector."""
return SECTORS[self.metadata["sector"]]
def get_channel(self):
"""Return the channel."""
return CHANNELS[self.metadata["channel"]]
def archive_filename(self):
"""
Return a filename for this product, we'll use the format
{SOURCE}_{SECTOR}_{CHANNEL}_{VALID}.png
"""
return ("%s_%s_%s_%s.png") % (
LABELS[self.metadata["creating_entity"]],
SECTORS[self.metadata["sector"]],
CHANNELS[self.metadata["channel"]],
self.metadata["valid"].strftime("%Y%m%d%H%M"),
)
def init_llc(self):
"""
Initialize Lambert Conic Comformal
"""
self.metadata["proj"] = pyproj.Proj(
proj="lcc",
lat_0=self.metadata["latin"],
lat_1=self.metadata["latin"],
lat_2=self.metadata["latin"],
lon_0=self.metadata["lov"],
a=6371200.0,
b=6371200.0,
)
# s = 1.0
# if self.metadata['proj_center_flag'] != 0:
# s = -1.0
psi = M_PI_2 - abs(math.radians(self.metadata["latin"]))
cos_psi = math.cos(psi)
# r_E = RE_METERS / cos_psi
alpha = math.pow(math.tan(psi / 2.0), cos_psi) / math.sin(psi)
x0, y0 = self.metadata["proj"](
self.metadata["lon1"], self.metadata["lat1"]
)
self.metadata["x0"] = x0
self.metadata["y0"] = y0
# self.metadata['dx'] *= alpha
# self.metadata['dy'] *= alpha
self.metadata["y1"] = y0 + (self.metadata["dy"] * self.metadata["ny"])
(self.metadata["lon_ul"], self.metadata["lat_ul"]) = self.metadata[
"proj"
](self.metadata["x0"], self.metadata["y1"], inverse=True)
LOG.info(
(
"lat1: %.5f y0: %5.f y1: %.5f lat_ul: %.3f "
"lat_ur: %.3f lon_ur: %.3f alpha: %.5f dy: %.3f"
),
self.metadata["lat1"],
y0,
self.metadata["y1"],
self.metadata["lat_ul"],
self.metadata["lat_ur"],
self.metadata["lon_ur"],
alpha,
self.metadata["dy"],
)
def init_mercator(self):
"""
Compute mercator projection stuff
"""
self.metadata["proj"] = pyproj.Proj(
proj="merc",
lat_ts=self.metadata["latin"],
x_0=0,
y_0=0,
a=6371200.0,
b=6371200.0,
)
x0, y0 = self.metadata["proj"](
self.metadata["lon1"], self.metadata["lat1"]
)
self.metadata["x0"] = x0
self.metadata["y0"] = y0
x1, y1 = self.metadata["proj"](
self.metadata["lon2"], self.metadata["lat2"]
)
self.metadata["x1"] = x1
self.metadata["y1"] = y1
self.metadata["dx"] = (x1 - x0) / self.metadata["nx"]
self.metadata["dy"] = (y1 - y0) / self.metadata["ny"]
(self.metadata["lon_ul"], self.metadata["lat_ul"]) = self.metadata[
"proj"
](self.metadata["x0"], self.metadata["y1"], inverse=True)
LOG.info(
(
"latin: %.2f lat_ul: %.3f lon_ul: %.3f "
"y0: %5.f y1: %.5f dx: %.3f dy: %.3f"
),
self.metadata["latin"],
self.metadata["lat_ul"],
self.metadata["lon_ul"],
y0,
y1,
self.metadata["dx"],
self.metadata["dy"],
)
def init_stereo(self):
"""
Compute Polar Stereographic
"""
self.metadata["proj"] = pyproj.Proj(
proj="stere",
lat_ts=60,
lat_0=90,
lon_0=self.metadata["lov"],
x_0=0,
y_0=0,
a=6371200.0,
b=6371200.0,
)
# First point!
x0, y0 = self.metadata["proj"](
self.metadata["lon1"], self.metadata["lat1"]
)
self.metadata["x0"] = x0
self.metadata["y0"] = y0
self.metadata["y1"] = y0 + (self.metadata["dy"] * self.metadata["ny"])
(self.metadata["lon_ul"], self.metadata["lat_ul"]) = self.metadata[
"proj"
](x0, self.metadata["y1"], inverse=True)
LOG.info(
(
"lon_ul: %.2f lat_ul: %.2f "
"lon_ll: %.2f lat_ll: %.2f "
" lov: %.2f latin: %.2f lat1: %.2f lat2: %.2f "
"y0: %5.f y1: %.5f dx: %.3f dy: %.3f"
),
self.metadata["lon_ul"],
self.metadata["lat_ul"],
self.metadata["lon1"],
self.metadata["lat1"],
self.metadata["lov"],
self.metadata["latin"],
self.metadata["lat1"],
self.metadata["lat2"],
y0,
self.metadata["y1"],
self.metadata["dx"],
self.metadata["dy"],
)
def init_projection(self):
"""
Setup Grid and projection details
"""
if self.metadata["map_projection"] == 3:
self.init_llc()
elif self.metadata["map_projection"] == 1:
self.init_mercator()
elif self.metadata["map_projection"] == 5:
self.init_stereo()
else:
LOG.info("Unknown Projection: %s", self.metadata["map_projection"])
def read_header(self, hdata):
"""read the header!"""
meta = {}
meta["source"] = struct.unpack("> B", hdata[0:1])[0]
meta["creating_entity"] = struct.unpack("> B", hdata[1:2])[0]
meta["sector"] = struct.unpack("> B", hdata[2:3])[0]
meta["channel"] = struct.unpack("> B", hdata[3:4])[0]
meta["numlines"] = struct.unpack(">H", hdata[4:6])[0]
meta["linesize"] = struct.unpack(">H", hdata[6:8])[0]
yr = 1900 + struct.unpack("> B", hdata[8:9])[0]
mo = struct.unpack("> B", hdata[9:10])[0]
dy = struct.unpack("> B", hdata[10:11])[0]
hh = struct.unpack("> B", hdata[11:12])[0]
mi = struct.unpack("> B", hdata[12:13])[0]
ss = struct.unpack("> B", hdata[13:14])[0]
# hs = struct.unpack("> B", hdata[14:15] )[0]
meta["valid"] = datetime(yr, mo, dy, hh, mi, ss).replace(
tzinfo=timezone.utc
)
meta["map_projection"] = struct.unpack("> B", hdata[15:16])[0]
meta["proj_center_flag"] = struct.unpack("> B", hdata[36:37])[0] >> 7
meta["scan_mode"] = struct.unpack("> B", hdata[37:38])[0]
meta["nx"] = struct.unpack(">H", hdata[16:18])[0]
meta["ny"] = struct.unpack(">H", hdata[18:20])[0]
meta["res"] = struct.unpack(">B", hdata[41:42])[0]
# Is Calibration Info included?
# http://www.nws.noaa.gov/noaaport/document/ICD%20CH5-2005-1.pdf
# page24
# Mercator
if meta["map_projection"] == 1:
meta["lat1"] = int24(hdata[20:23])
meta["lon1"] = int24(hdata[23:26])
meta["lov"] = 0
meta["dx"] = struct.unpack(">H", hdata[33:35])[0]
meta["dy"] = struct.unpack(">H", hdata[35:37])[0]
meta["latin"] = int24(hdata[38:41])
meta["lat2"] = int24(hdata[27:30])
meta["lon2"] = int24(hdata[30:33])
meta["lat_ur"] = int24(hdata[55:58])
meta["lon_ur"] = int24(hdata[58:61])
# lambert == 3, polar == 5
else:
meta["lat1"] = int24(hdata[20:23])
meta["lon1"] = int24(hdata[23:26])
meta["lov"] = int24(hdata[27:30])
meta["dx"] = uint24(hdata[30:33])
meta["dy"] = uint24(hdata[33:36])
meta["latin"] = int24(hdata[38:41])
meta["lat2"] = 0
meta["lon2"] = 0
meta["lat_ur"] = int24(hdata[55:58])
meta["lon_ur"] = int24(hdata[58:61])
meta["dx"] = meta["dx"] / 10.0
meta["dy"] = meta["dy"] / 10.0
meta["lat1"] = meta["lat1"] / 10000.0
meta["lon1"] = meta["lon1"] / 10000.0
meta["lov"] = meta["lov"] / 10000.0
meta["latin"] = meta["latin"] / 10000.0
meta["lat2"] = meta["lat2"] / 10000.0
meta["lon2"] = meta["lon2"] / 10000.0
meta["lat_ur"] = meta["lat_ur"] / 10000.0
meta["lon_ur"] = meta["lon_ur"] / 10000.0
return meta
| [
"[email protected]"
] | |
4b4722dc364c71697c33815091831aec2badb373 | 0115cfe0ca89264d3e25616943c3437d24ac0497 | /pyx/finance/finance.py | 56f41561431419e6dbb05819a4c64021703e836c | [] | no_license | shakfu/polylab | 9024918681fe4807b4e5e2da4bba04453566bae1 | 9dce4d30120981e34bbbbc6f2caaff6e16a6cfbd | refs/heads/master | 2023-08-18T05:41:01.786936 | 2023-07-30T22:36:52 | 2023-07-30T22:36:52 | 62,841,098 | 3 | 0 | null | 2022-04-21T22:25:43 | 2016-07-07T22:08:47 | C | UTF-8 | Python | false | false | 4,309 | py | #!/usr/bin/env python
'''
A set of functions for quick financial analysis of an investment
opportunity and a series of projected cashflows.
For further details and pros/cons of each function please refer
to the respective wikipedia page:
payback_period
http://en.wikipedia.org/wiki/Payback_period
net present value
http://en.wikipedia.org/wiki/Net_present_value
internal rate of return
http://en.wikipedia.org/wiki/Internal_rate_of_return
'''
import sys
def payback_of_investment(investment, cashflows):
"""The payback period refers to the length of time required
for an investment to have its initial cost recovered.
>>> payback_of_investment(200.0, [60.0, 60.0, 70.0, 90.0])
3.1111111111111112
"""
total, years, cumulative = 0.0, 0, []
if not cashflows or (sum(cashflows) < investment):
raise Exception("insufficient cashflows")
for cashflow in cashflows:
total += cashflow
if total < investment:
years += 1
cumulative.append(total)
A = years
B = investment - cumulative[years-1]
C = cumulative[years] - cumulative[years-1]
return A + (B/C)
def payback(cashflows):
"""The payback period refers to the length of time required
for an investment to have its initial cost recovered.
(This version accepts a list of cashflows)
>>> payback([-200.0, 60.0, 60.0, 70.0, 90.0])
3.1111111111111112
"""
investment, cashflows = cashflows[0], cashflows[1:]
if investment < 0 : investment = -investment
return payback_of_investment(investment, cashflows)
def npv(rate, cashflows):
"""The total present value of a time series of cash flows.
>>> npv(0.1, [-100.0, 60.0, 60.0, 60.0])
49.211119459053322
"""
total = 0.0
for i, cashflow in enumerate(cashflows):
total += cashflow / (1 + rate)**i
return total
def irr(cashflows, iterations=100):
"""The IRR or Internal Rate of Return is the annualized effective
compounded return rate which can be earned on the invested
capital, i.e., the yield on the investment.
>>> irr([-100.0, 60.0, 60.0, 60.0])
0.36309653947517645
"""
rate = 1.0
investment = cashflows[0]
for i in range(1, iterations+1):
rate *= (1 - npv(rate, cashflows) / investment)
return rate
def investment_analysis(discount_rate, cashflows):
"""Provides summary investment analysis on a list of cashflows
and a discount_rate.
Assumes that the first element of the list (i.e. at period 0)
is the initial investment with a negative float value.
"""
_npv = npv(discount_rate, cashflows)
ts = [('year', 'cashflow')] + [(str(x), str(y)) for (x,y) in zip(
range(len(cashflows)), cashflows)]
print "-" * 70
for y,c in ts:
print y + (len(c) - len(y) + 1)*' ',
print
for y,c in ts:
print c + ' ',
print
print
print "Discount Rate: %.1f%%" % (discount_rate * 100)
print
print "Payback: %.2f years" % payback(cashflows)
print " IRR: %.2f%%" % (irr(cashflows) * 100)
print " NPV: %s" % _npv
print
print "==> %s investment of %s" % (
("Approve" if _npv > 0 else "Do Not Approve"), str(-cashflows[0]))
print "-" * 70
def main(inputs):
"""commandline entry point
"""
usage = '''Provides analysis of an investment and a series of cashflows.
usage: invest discount_rate [cashflow0, cashflow1, ..., cashflowN]
where
discount_rate is the rate used to discount future cashflows
to their present values
cashflow0 is the investment amount (always a negative value)
cashflow1 .. cashflowN values can be positive (net inflows)
or
negative (net outflows)
for example:
invest 0.05 -10000 6000 6000 6000
'''
try:
rate, cashflows = inputs[0], inputs[1:]
investment_analysis(float(rate), [float(c) for c in cashflows])
except IndexError:
print usage
sys.exit()
main(sys.argv[1:])
| [
"[email protected]"
] | |
ec63b954fd448cd482cec2bfb15b88afbea89cc4 | c3ff891e0e23c5f9488508d30349259cc6b64b4d | /python练习/基础代码/Demo33.py | 20ad574ee408810bd6658c854a8dd2e8ce4e4a44 | [] | no_license | JacksonMike/python_exercise | 2af2b8913ec8aded8a17a98aaa0fc9c6ccd7ba53 | 7698f8ce260439abb3cbdf478586fa1888791a61 | refs/heads/master | 2020-07-14T18:16:39.265372 | 2019-08-30T11:56:29 | 2019-08-30T11:56:29 | 205,370,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | infor = {"name":"Jim"}
infor["age"] = 19#添加
infor["QQ"] = 10086
infor["QQ"] = 10085#修改
del infor["QQ"] #删除
print(infor.get("name"))#查询
a = {} | [
"[email protected]"
] | |
a2397630ed41926dd03f160daaf34fd7b95a8670 | 45ab4c22d918dc4390572f53c267cf60de0d68fb | /src/Analysis/Engine/Impl/Typeshed/third_party/2and3/werkzeug/_compat.pyi | 74981331c7de5221f3fe7114e32b5f8d3c300296 | [
"MIT",
"Apache-2.0"
] | permissive | sourcegraph/python-language-server | 580a24fd15fe9d4abeb95e9333d61db1c11a2670 | 64eae156f14aa14642afcac0e7edaf5d7c6d1a1c | refs/heads/master | 2023-04-09T21:17:07.555979 | 2018-12-06T23:25:05 | 2018-12-06T23:25:05 | 155,174,256 | 2 | 2 | Apache-2.0 | 2018-10-29T08:06:49 | 2018-10-29T08:06:49 | null | UTF-8 | Python | false | false | 1,280 | pyi | import sys
from typing import Any
if sys.version_info < (3,):
import StringIO as BytesIO
else:
from io import StringIO as BytesIO
PY2 = ... # type: Any
WIN = ... # type: Any
unichr = ... # type: Any
text_type = ... # type: Any
string_types = ... # type: Any
integer_types = ... # type: Any
iterkeys = ... # type: Any
itervalues = ... # type: Any
iteritems = ... # type: Any
iterlists = ... # type: Any
iterlistvalues = ... # type: Any
int_to_byte = ... # type: Any
iter_bytes = ... # type: Any
def fix_tuple_repr(obj): ...
def implements_iterator(cls): ...
def implements_to_string(cls): ...
def native_string_result(func): ...
def implements_bool(cls): ...
range_type = ... # type: Any
NativeStringIO = ... # type: Any
def make_literal_wrapper(reference): ...
def normalize_string_tuple(tup): ...
def try_coerce_native(s): ...
wsgi_get_bytes = ... # type: Any
def wsgi_decoding_dance(s, charset='', errors=''): ...
def wsgi_encoding_dance(s, charset='', errors=''): ...
def to_bytes(x, charset=..., errors=''): ...
def to_native(x, charset=..., errors=''): ...
def reraise(tp, value, tb=None): ...
imap = ... # type: Any
izip = ... # type: Any
ifilter = ... # type: Any
def to_unicode(x, charset=..., errors='', allow_none_charset=False): ...
| [
"[email protected]"
] | |
3baa5490caeaee6f4b3444ff8bdbe2023f78f045 | 8dcd3ee098b4f5b80879c37a62292f42f6b2ae17 | /venv/Lib/site-packages/pandas/core/internals/blocks.py | 8cd8524762a8f465d7b25de9b5374adb5fbcf3d3 | [] | no_license | GregVargas1999/InfinityAreaInfo | 53fdfefc11c4af8f5d2b8f511f7461d11a3f7533 | 2e4a7c6a2424514ca0ec58c9153eb08dc8e09a4a | refs/heads/master | 2022-12-01T20:26:05.388878 | 2020-08-11T18:37:05 | 2020-08-11T18:37:05 | 286,821,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104,426 | py | import functools
import inspect
import re
import warnings
from datetime import datetime, timedelta
from typing import Any, List
import numpy as np
import pandas._libs.internals as libinternals
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.core.missing as missing
from pandas._libs import NaT, Timestamp, algos as libalgos, lib, tslib, writers
from pandas._libs.index import convert_scalar
from pandas._libs.tslibs import Timedelta, conversion
from pandas._libs.tslibs.timezones import tz_compare
from pandas.core.arrays import (
Categorical,
DatetimeArray,
ExtensionArray,
PandasArray,
PandasDtype,
TimedeltaArray,
)
from pandas.core.base import PandasObject
from pandas.core.construction import extract_array
from pandas.core.dtypes.cast import (
astype_nansafe,
find_common_type,
infer_dtype_from,
infer_dtype_from_scalar,
maybe_downcast_numeric,
maybe_downcast_to_dtype,
maybe_infer_dtype_type,
maybe_promote,
maybe_upcast,
soft_convert_objects,
)
from pandas.core.dtypes.common import (
_NS_DTYPE,
_TD_DTYPE,
ensure_platform_int,
is_bool_dtype,
is_categorical,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_integer,
is_integer_dtype,
is_interval_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_re,
is_re_compilable,
is_sparse,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.concat import concat_categorical, concat_datetime
from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCExtensionArray,
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.missing import (
_isna_compat,
array_equivalent,
is_valid_nat_for_dtype,
isna,
)
from pandas.core.indexers import (
check_setitem_lengths,
is_empty_indexer,
is_scalar_indexer,
)
from pandas.core.nanops import nanpercentile
from pandas.io.formats.printing import pprint_thing
from pandas.util._validators import validate_bool_kwarg
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ["_mgr_locs", "values", "ndim"]
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_datetimetz = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_extension = False
_can_hold_na = False
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = "dense"
_concatenator = staticmethod(np.concatenate)
def __init__(self, values, placement, ndim=None):
self.ndim = self._check_ndim(values, ndim)
self.mgr_locs = placement
self.values = values
if self._validate_ndim and self.ndim and len(self.mgr_locs) != len(self.values):
raise ValueError(
f"Wrong number of items passed {len(self.values)}, "
f"placement implies {len(self.mgr_locs)}"
)
def _check_ndim(self, values, ndim):
"""
ndim inference and validation.
Infers ndim from 'values' if not provided to __init__.
Validates that values.ndim and ndim are consistent if and only if
the class variable '_validate_ndim' is True.
Parameters
----------
values : array-like
ndim : int or None
Returns
-------
ndim : int
Raises
------
ValueError : the number of dimensions do not match
"""
if ndim is None:
ndim = values.ndim
if self._validate_ndim and values.ndim != ndim:
raise ValueError(
"Wrong number of dimensions. "
f"values.ndim != ndim [{values.ndim} != {ndim}]"
)
return ndim
@property
def _holder(self):
"""The array-like that can hold the underlying values.
None for 'Block', overridden by subclasses that don't
use an ndarray.
"""
return None
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if dtype is Categorical or dtype is CategoricalDtype:
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError(f"invalid type {dtype} for astype")
elif is_categorical_dtype(dtype):
return True
return False
def external_values(self, dtype=None):
"""
The array that Series.values returns (public attribute).
This has some historical constraints, and is overridden in block
subclasses to return the correct array (e.g. period returns
object ndarray and datetimetz a datetime64[ns] ndarray instead of
proper extension array).
"""
return self.values
def internal_values(self, dtype=None):
""" return an internal format, currently just the ndarray
this should be the pure internal API format
"""
return self.values
def array_values(self) -> ExtensionArray:
"""
The array that Series.array returns. Always an ExtensionArray.
"""
return PandasArray(self.values)
def get_values(self, dtype=None):
"""
return an internal format, currently just the ndarray
this is often overridden to handle to_dense like operations
"""
if is_object_dtype(dtype):
return self.values.astype(object)
return self.values
def get_block_values(self, dtype=None):
"""
This is used in the JSON C code
"""
return self.get_values(dtype=dtype)
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, libinternals.BlockPlacement):
new_mgr_locs = libinternals.BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return self.dtype
def make_block(self, values, placement=None) -> "Block":
"""
Create a new block, with type inference propagate any values that are
not specified
"""
if placement is None:
placement = self.mgr_locs
return make_block(values, placement=placement, ndim=self.ndim)
def make_block_same_class(self, values, placement=None, ndim=None):
""" Wrap given values in a block of same type as self. """
if placement is None:
placement = self.mgr_locs
if ndim is None:
ndim = self.ndim
return make_block(values, placement=placement, ndim=ndim, klass=type(self))
def __repr__(self) -> str:
# don't want to print out all of the items here
name = type(self).__name__
if self._is_single_block:
result = f"{name}: {len(self)} dtype: {self.dtype}"
else:
shape = " x ".join(pprint_thing(s) for s in self.shape)
result = (
f"{name}: {pprint_thing(self.mgr_locs.indexer)}, "
f"{shape}, dtype: {self.dtype}"
)
return result
def __len__(self) -> int:
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = libinternals.BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
if getattr(self.values, "_pandas_ftype", False):
dtype = self.dtype.subtype
else:
dtype = self.dtype
return f"{dtype}:{self._ftype}"
def merge(self, other):
return _merge_blocks([self, other])
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
"""
values = self._concatenator(
[blk.values for blk in to_concat], axis=self.ndim - 1
)
return self.make_block_same_class(
values, placement=placement or slice(0, len(values), 1)
)
def iget(self, i):
return self.values[i]
def set(self, locs, values):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not
one
"""
with np.errstate(all="ignore"):
result = func(self.values, **kwargs)
if is_extension_array_dtype(result) and result.ndim > 1:
# if we get a 2D ExtensionArray, we need to split it into 1D pieces
nbs = []
for i, loc in enumerate(self.mgr_locs):
vals = result[i]
nv = _block_shape(vals, ndim=self.ndim)
block = self.make_block(values=nv, placement=[loc])
nbs.append(block)
return nbs
if not isinstance(result, Block):
result = self.make_block(values=_block_shape(result, ndim=self.ndim))
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
""" fillna on the block with the value. If we fail, then convert to
ObjectBlock and try again
"""
inplace = validate_bool_kwarg(inplace, "inplace")
mask = isna(self.values)
if limit is not None:
limit = libalgos._validate_limit(None, limit=limit)
mask[mask.cumsum(self.ndim - 1) > limit] = False
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
if self._can_hold_element(value):
# equivalent: _try_coerce_args(value) would not raise
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
# operate column-by-column
def f(mask, val, idx):
block = self.coerce_to_target_dtype(value)
# slice out our block
if idx is not None:
# i.e. self.ndim == 2
block = block.getitem_block(slice(idx, idx + 1))
return block.fillna(value, limit=limit, inplace=inplace, downcast=None)
return self.split_and_operate(None, f, inplace)
def split_and_operate(self, mask, f, inplace: bool):
"""
split the block per-column, and apply the callable f
per-column, return a new block for each. Handle
masking which will not change a block unless needed.
Parameters
----------
mask : 2-d boolean mask
f : callable accepting (1d-mask, 1d values, indexer)
inplace : boolean
Returns
-------
list of blocks
"""
if mask is None:
mask = np.broadcast_to(True, shape=self.shape)
new_values = self.values
def make_a_block(nv, ref_loc):
if isinstance(nv, list):
assert len(nv) == 1, nv
assert isinstance(nv[0], Block)
block = nv[0]
else:
# Put back the dimension that was taken from it and make
# a block out of the result.
nv = _block_shape(nv, ndim=self.ndim)
block = self.make_block(values=nv, placement=ref_loc)
return block
# ndim == 1
if self.ndim == 1:
if mask.any():
nv = f(mask, new_values, None)
else:
nv = new_values if inplace else new_values.copy()
block = make_a_block(nv, self.mgr_locs)
return [block]
# ndim > 1
new_blocks = []
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
nv = f(m, v, i)
else:
nv = v if inplace else v.copy()
block = make_a_block(nv, [ref_loc])
new_blocks.append(block)
return new_blocks
def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]:
# no need to downcast our float
# unless indicated
if downcast is None and (
self.is_float or self.is_timedelta or self.is_datetime
):
return blocks
return _extend_blocks([b.downcast(downcast) for b in blocks])
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return self
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = "infer"
nv = maybe_downcast_to_dtype(values, dtypes)
return self.make_block(nv)
# ndim > 1
if dtypes is None:
return self
if not (dtypes == "infer" or isinstance(dtypes, dict)):
raise ValueError(
"downcast must have a dictionary or 'infer' as its argument"
)
elif dtypes != "infer":
raise AssertionError("dtypes as dict is not supported yet")
# operate column-by-column
# this is expensive as it splits the blocks items-by-item
def f(mask, val, idx):
val = maybe_downcast_to_dtype(val, dtype="infer")
return val
return self.split_and_operate(None, f, False)
def astype(self, dtype, copy: bool = False, errors: str = "raise"):
"""
Coerce to the new dtype.
Parameters
----------
dtype : str, dtype convertible
copy : bool, default False
copy if indicated
errors : str, {'raise', 'ignore'}, default 'ignore'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
Returns
-------
Block
"""
errors_legal_values = ("raise", "ignore")
if errors not in errors_legal_values:
invalid_arg = (
"Expected value of kwarg 'errors' to be one of "
f"{list(errors_legal_values)}. Supplied value is '{errors}'"
)
raise ValueError(invalid_arg)
if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):
msg = (
f"Expected an instance of {dtype.__name__}, "
"but got the class instead. Try instantiating 'dtype'."
)
raise TypeError(msg)
# may need to convert to categorical
if self.is_categorical_astype(dtype):
if is_categorical_dtype(self.values):
# GH 10696/18593: update an existing categorical efficiently
return self.make_block(self.values.astype(dtype, copy=copy))
return self.make_block(Categorical(self.values, dtype=dtype))
dtype = pandas_dtype(dtype)
# astype processing
if is_dtype_equal(self.dtype, dtype):
if copy:
return self.copy()
return self
# force the copy here
if self.is_extension:
# TODO: Should we try/except this astype?
values = self.values.astype(dtype)
else:
if issubclass(dtype.type, str):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
values = self.to_native_types()
# astype formatting
else:
values = self.get_values()
else:
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
vals1d = values.ravel()
try:
values = astype_nansafe(vals1d, dtype, copy=True)
except (ValueError, TypeError):
# e.g. astype_nansafe can fail on object-dtype of strings
# trying to convert to float
if errors == "raise":
raise
newb = self.copy() if copy else self
return newb
# TODO(extension)
# should we make this attribute?
if isinstance(values, np.ndarray):
values = values.reshape(self.shape)
newb = make_block(values, placement=self.mgr_locs, ndim=self.ndim)
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError(
f"cannot set astype for copy = [{copy}] for dtype "
f"({self.dtype.name} [{self.shape}]) to different shape "
f"({newb.dtype.name} [{newb.shape}])"
)
return newb
def convert(
self,
copy: bool = True,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
coerce: bool = False,
):
""" attempt to coerce any object types to better types return a copy
of the block (if copy = True) by definition we are not an ObjectBlock
here!
"""
return self.copy() if copy else self
def _can_hold_element(self, element: Any) -> bool:
""" require the same dtype as ourselves """
dtype = self.values.dtype.type
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, dtype)
return isinstance(element, dtype)
def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.get_values()
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
itemsize = writers.word_len(na_rep)
if not self.is_object and not quoting and itemsize:
values = values.astype(str)
if values.dtype.itemsize / np.dtype("U1").itemsize < itemsize:
# enlarge for the na_rep
values = values.astype(f"<U{itemsize}")
else:
values = np.array(values, dtype="object")
values[mask] = na_rep
return values
# block actions #
def copy(self, deep=True):
""" copy constructor """
values = self.values
if deep:
values = values.copy()
return self.make_block_same_class(values, ndim=self.ndim)
def replace(
self, to_replace, value, inplace=False, filter=None, regex=False, convert=True
):
"""replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API compatibility.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
original_to_replace = to_replace
# If we cannot replace with own dtype, convert to ObjectBlock and
# retry
if not self._can_hold_element(to_replace):
if not isinstance(to_replace, list):
if inplace:
return [self]
return [self.copy()]
to_replace = [x for x in to_replace if self._can_hold_element(x)]
if not len(to_replace):
# GH#28084 avoid costly checks since we can infer
# that there is nothing to replace in this block
if inplace:
return [self]
return [self.copy()]
if len(to_replace) == 1:
# _can_hold_element checks have reduced this back to the
# scalar case and we can avoid a costly object cast
return self.replace(
to_replace[0],
value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
# GH 22083, TypeError or ValueError occurred within error handling
# causes infinite loop. Cast and retry only if not objectblock.
if is_object_dtype(self):
raise AssertionError
# try again with a compatible block
block = self.astype(object)
return block.replace(
to_replace=to_replace,
value=value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
values = self.values
if lib.is_scalar(to_replace) and isinstance(values, np.ndarray):
# The only non-DatetimeLike class that also has a non-trivial
# try_coerce_args is ObjectBlock, but that overrides replace,
# so does not get here.
to_replace = convert_scalar(values, to_replace)
mask = missing.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
try:
blocks = self.putmask(mask, value, inplace=inplace)
# Note: it is _not_ the case that self._can_hold_element(value)
# is always true at this point. In particular, that can fail
# for:
# "2u" with bool-dtype, float-dtype
# 0.5 with int64-dtype
# np.nan with int64-dtype
except (TypeError, ValueError):
# GH 22083, TypeError or ValueError occurred within error handling
# causes infinite loop. Cast and retry only if not objectblock.
if is_object_dtype(self):
raise
if not self.is_extension:
# TODO: https://github.com/pandas-dev/pandas/issues/32586
# Need an ExtensionArray._can_hold_element to indicate whether
# a scalar value can be placed in the array.
assert not self._can_hold_element(value), value
# try again with a compatible block
block = self.astype(object)
return block.replace(
to_replace=original_to_replace,
value=value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
if convert:
blocks = [b.convert(numeric=False, copy=not inplace) for b in blocks]
return blocks
def _replace_single(self, *args, **kwargs):
""" no-op on a non-ObjectBlock """
return self if kwargs["inplace"] else self.copy()
def setitem(self, indexer, value):
"""
Set the value inplace, returning a a maybe different typed block.
Parameters
----------
indexer : tuple, list-like, array-like, slice
The subset of self.values to set
value : object
The value being set
Returns
-------
Block
Notes
-----
`indexer` is a direct slice/positional indexer. `value` must
be a compatible shape.
"""
transpose = self.ndim == 2
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce if block dtype can store value
values = self.values
if self._can_hold_element(value):
# We only get here for non-Extension Blocks, so _try_coerce_args
# is only relevant for DatetimeBlock and TimedeltaBlock
if lib.is_scalar(value):
value = convert_scalar(values, value)
else:
# current dtype cannot store value, coerce to common dtype
find_dtype = False
if hasattr(value, "dtype"):
dtype = value.dtype
find_dtype = True
elif lib.is_scalar(value) and not isna(value):
dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
find_dtype = True
if find_dtype:
dtype = find_common_type([values.dtype, dtype])
if not is_dtype_equal(self.dtype, dtype):
b = self.astype(dtype)
return b.setitem(indexer, value)
# value must be storeable at this moment
if is_extension_array_dtype(getattr(value, "dtype", None)):
# We need to be careful not to allow through strings that
# can be parsed to EADtypes
is_ea_value = True
arr_value = value
else:
is_ea_value = False
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = maybe_promote(arr_value.dtype)
values = values.astype(dtype)
if transpose:
values = values.T
# length checking
check_setitem_lengths(indexer, value, values)
exact_match = (
len(arr_value.shape)
and arr_value.shape[0] == values.shape[0]
and arr_value.size == values.size
)
if is_empty_indexer(indexer, arr_value):
# GH#8669 empty indexers
pass
elif is_scalar_indexer(indexer, arr_value):
# setting a single element for each dim and with a rhs that could
# be e.g. a list; see GH#6043
values[indexer] = value
elif (
exact_match
and is_categorical_dtype(arr_value.dtype)
and not is_categorical_dtype(values)
):
# GH25495 - If the current dtype is not categorical,
# we need to create a new categorical block
values[indexer] = value
return self.make_block(Categorical(self.values, dtype=arr_value.dtype))
elif exact_match and is_ea_value:
# GH#32395 if we're going to replace the values entirely, just
# substitute in the new array
return self.make_block(arr_value)
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif exact_match:
values[indexer] = value
try:
values = values.astype(arr_value.dtype)
except ValueError:
pass
# set
else:
values[indexer] = value
if transpose:
values = values.T
block = self.make_block(values)
return block
def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a list of new blocks, the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new = getattr(new, "values", new)
mask = getattr(mask, "values", mask)
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isna(new) and not self.is_object:
# FIXME: make sure we have compatible NA
new = self.fill_value
if self._can_hold_element(new):
# We only get here for non-Extension Blocks, so _try_coerce_args
# is only relevant for DatetimeBlock and TimedeltaBlock
if lib.is_scalar(new):
new = convert_scalar(new_values, new)
if transpose:
new_values = new_values.T
# If the default repeat behavior in np.putmask would go in the
# wrong direction, then explicitly repeat and reshape new instead
if getattr(new, "ndim", 0) >= 1:
if self.ndim - 1 == new.ndim and axis == 1:
new = np.repeat(new, new_values.shape[-1]).reshape(self.shape)
new = new.astype(new_values.dtype)
# we require exact matches between the len of the
# values we are setting (or is compat). np.putmask
# doesn't check this and will simply truncate / pad
# the output, but we want sane error messages
#
# TODO: this prob needs some better checking
# for 2D cases
if (
is_list_like(new)
and np.any(mask[mask])
and getattr(new, "ndim", 1) == 1
):
if mask[mask].shape[-1] == len(new):
# GH 30567
# If length of ``new`` is less than the length of ``new_values``,
# `np.putmask` would first repeat the ``new`` array and then
# assign the masked values hence produces incorrect result.
# `np.place` on the other hand uses the ``new`` values at it is
# to place in the masked locations of ``new_values``
np.place(new_values, mask, new)
elif mask.shape[-1] == len(new) or len(new) == 1:
np.putmask(new_values, mask, new)
else:
raise ValueError("cannot assign mismatch length to masked array")
else:
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
if transpose:
mask = mask.T
if isinstance(new, np.ndarray):
new = new.T
axis = new_values.ndim - axis - 1
# Pseudo-broadcast
if getattr(new, "ndim", 0) >= 1:
if self.ndim - 1 == new.ndim:
new_shape = list(new.shape)
new_shape.insert(axis, 1)
new = new.reshape(tuple(new_shape))
# operate column-by-column
def f(mask, val, idx):
if idx is None:
# ndim==1 case.
n = new
else:
if isinstance(new, np.ndarray):
n = np.squeeze(new[idx % new.shape[0]])
else:
n = np.array(new)
# type of the new block
dtype, _ = maybe_promote(n.dtype)
# we need to explicitly astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(val, mask, n)
return nv
new_blocks = self.split_and_operate(mask, f, inplace)
return new_blocks
if inplace:
return [self]
if transpose:
new_values = new_values.T
return [self.make_block(new_values)]
def coerce_to_target_dtype(self, other):
"""
coerce the current block to a dtype compat for other
we will return a block, possibly object, and not raise
we can also safely try to coerce to the same dtype
and will receive the same block
"""
# if we cannot then coerce to object
dtype, _ = infer_dtype_from(other, pandas_dtype=True)
if is_dtype_equal(self.dtype, dtype):
return self
if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):
# we don't upcast to bool
return self.astype(object)
elif (self.is_float or self.is_complex) and (
is_integer_dtype(dtype) or is_float_dtype(dtype)
):
# don't coerce float/complex to int
return self
elif (
self.is_datetime
or is_datetime64_dtype(dtype)
or is_datetime64tz_dtype(dtype)
):
# not a datetime
if not (
(is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype))
and self.is_datetime
):
return self.astype(object)
# don't upcast timezone with different timezone or no timezone
mytz = getattr(self.dtype, "tz", None)
othertz = getattr(dtype, "tz", None)
if not tz_compare(mytz, othertz):
return self.astype(object)
raise AssertionError(
f"possible recursion in coerce_to_target_dtype: {self} {other}"
)
elif self.is_timedelta or is_timedelta64_dtype(dtype):
# not a timedelta
if not (is_timedelta64_dtype(dtype) and self.is_timedelta):
return self.astype(object)
raise AssertionError(
f"possible recursion in coerce_to_target_dtype: {self} {other}"
)
try:
return self.astype(dtype)
except (ValueError, TypeError, OverflowError):
return self.astype(object)
def interpolate(
self,
method="pad",
axis=0,
index=None,
values=None,
inplace=False,
limit=None,
limit_direction="forward",
limit_area=None,
fill_value=None,
coerce=False,
downcast=None,
**kwargs,
):
inplace = validate_bool_kwarg(inplace, "inplace")
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = missing.clean_fill_method(method)
except ValueError:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(
method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast,
)
# validate the interp method
m = missing.clean_interp_method(method, **kwargs)
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(
method=m,
index=index,
values=values,
axis=axis,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs,
)
def _interpolate_with_fill(
self,
method="pad",
axis=0,
inplace=False,
limit=None,
fill_value=None,
coerce=False,
downcast=None,
):
""" fillna but using the interpolate machinery """
inplace = validate_bool_kwarg(inplace, "inplace")
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
values = self.values if inplace else self.values.copy()
# We only get here for non-ExtensionBlock
fill_value = convert_scalar(self.values, fill_value)
values = missing.interpolate_2d(
values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype,
)
blocks = [self.make_block_same_class(values, ndim=self.ndim)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(
self,
method=None,
index=None,
values=None,
fill_value=None,
axis=0,
limit=None,
limit_direction="forward",
limit_area=None,
inplace=False,
downcast=None,
**kwargs,
):
""" interpolate using scipy wrappers """
inplace = validate_bool_kwarg(inplace, "inplace")
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ("krogh", "piecewise_polynomial", "pchip"):
if not index.is_monotonic:
raise ValueError(
f"{method} interpolation requires that the index be monotonic."
)
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to missing.interpolate_1d
return missing.interpolate_1d(
index,
x,
method=method,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
bounds_error=False,
**kwargs,
)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [self.make_block_same_class(interp_values)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
# algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock
# so need to preserve types
# sparse is treated like an ndarray, but needs .get_values() shaping
values = self.values
if fill_tuple is None:
fill_value = self.fill_value
allow_fill = False
else:
fill_value = fill_tuple[0]
allow_fill = True
new_values = algos.take_nd(
values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value
)
# Called from three places in managers, all of which satisfy
# this assertion
assert not (axis == 0 and new_mgr_locs is None)
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
if not is_dtype_equal(new_values.dtype, self.dtype):
return self.make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def diff(self, n: int, axis: int = 1) -> List["Block"]:
""" return block for the diff of the values """
new_values = algos.diff(self.values, n, axis=axis, stacklevel=7)
# We use block_shape for ExtensionBlock subclasses, which may call here
# via a super.
new_values = _block_shape(new_values, ndim=self.ndim)
return [self.make_block(values=new_values)]
def shift(self, periods, axis=0, fill_value=None):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = maybe_upcast(self.values, fill_value)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, ensure_platform_int(periods), axis=axis)
axis_indexer = [slice(None)] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [self.make_block(new_values)]
def where(
self,
other,
cond,
align=True,
errors="raise",
try_cast: bool = False,
axis: int = 0,
) -> List["Block"]:
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
errors : str, {'raise', 'ignore'}, default 'raise'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
axis : int
Returns
-------
a new block(s), the result of the func
"""
import pandas.core.computation.expressions as expressions
assert errors in ["raise", "ignore"]
transpose = self.ndim == 2
values = self.values
orig_other = other
if transpose:
values = values.T
other = getattr(other, "_values", getattr(other, "values", other))
cond = getattr(cond, "values", cond)
# If the default broadcasting would go in the wrong direction, then
# explicitly reshape other instead
if getattr(other, "ndim", 0) >= 1:
if values.ndim - 1 == other.ndim and axis == 1:
other = other.reshape(tuple(other.shape + (1,)))
elif transpose and values.ndim == self.ndim - 1:
cond = cond.T
if not hasattr(cond, "shape"):
raise ValueError("where must have a condition that is ndarray like")
# our where function
def func(cond, values, other):
if not (
(self.is_integer or self.is_bool)
and lib.is_float(other)
and np.isnan(other)
):
# np.where will cast integer array to floats in this case
if not self._can_hold_element(other):
raise TypeError
if lib.is_scalar(other) and isinstance(values, np.ndarray):
other = convert_scalar(values, other)
fastres = expressions.where(cond, values, other)
return fastres
if cond.ravel().all():
result = values
else:
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
try:
result = func(cond, values, other)
except TypeError:
# we cannot coerce, return a compat dtype
# we are explicitly ignoring errors
block = self.coerce_to_target_dtype(other)
blocks = block.where(
orig_other,
cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=axis,
)
return self._maybe_downcast(blocks, "infer")
if self._can_hold_na or self.ndim == 1:
if transpose:
result = result.T
return [self.make_block(result)]
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])], dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
taken = result.take(m.nonzero()[0], axis=axis)
r = maybe_downcast_numeric(taken, self.dtype)
nb = self.make_block(r.T, placement=self.mgr_locs[m])
result_blocks.append(nb)
return result_blocks
def equals(self, other) -> bool:
if self.dtype != other.dtype or self.shape != other.shape:
return False
return array_equivalent(self.values, other.values)
def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
"""Return a list of unstacked blocks of self
Parameters
----------
unstacker_func : callable
Partially applied unstacker.
new_columns : Index
All columns of the unstacked BlockManager.
n_rows : int
Only used in ExtensionBlock._unstack
fill_value : int
Only used in ExtensionBlock._unstack
Returns
-------
blocks : list of Block
New blocks of unstacked values.
mask : array_like of bool
The mask of columns of `blocks` we should keep.
"""
unstacker = unstacker_func(self.values.T)
new_items = unstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = unstacker.get_new_values()
mask = mask.any(0)
new_values = new_values.T[mask]
new_placement = new_placement[mask]
blocks = [make_block(new_values, placement=new_placement)]
return blocks, mask
def quantile(self, qs, interpolation="linear", axis=0):
"""
compute the quantiles of the
Parameters
----------
qs: a scalar or list of the quantiles to be computed
interpolation: type of interpolation, default 'linear'
axis: axis to compute, default 0
Returns
-------
Block
"""
# We should always have ndim == 2 because Series dispatches to DataFrame
assert self.ndim == 2
values = self.get_values()
is_empty = values.shape[axis] == 0
orig_scalar = not is_list_like(qs)
if orig_scalar:
# make list-like, unpack later
qs = [qs]
if is_empty:
# create the array of na_values
# 2d len(values) * len(qs)
result = np.repeat(
np.array([self.fill_value] * len(qs)), len(values)
).reshape(len(values), len(qs))
else:
# asarray needed for Sparse, see GH#24600
mask = np.asarray(isna(values))
result = nanpercentile(
values,
np.array(qs) * 100,
axis=axis,
na_value=self.fill_value,
mask=mask,
ndim=values.ndim,
interpolation=interpolation,
)
result = np.array(result, copy=False)
result = result.T
if orig_scalar and not lib.is_scalar(result):
# result could be scalar in case with is_empty and self.ndim == 1
assert result.shape[-1] == 1, result.shape
result = result[..., 0]
result = lib.item_from_zerodim(result)
ndim = np.ndim(result)
return make_block(result, placement=np.arange(len(result)), ndim=ndim)
def _replace_coerce(
self, to_replace, value, inplace=True, regex=False, convert=False, mask=None
):
"""
Replace value corresponding to the given boolean array with another
value.
Parameters
----------
to_replace : object or pattern
Scalar to replace or regular expression to match.
value : object
Replacement object.
inplace : bool, default False
Perform inplace modification.
regex : bool, default False
If true, perform regular expression substitution.
convert : bool, default True
If true, try to coerce any object types to better types.
mask : array-like of bool, optional
True indicate corresponding element is ignored.
Returns
-------
A new block if there is anything to replace or the original block.
"""
if mask.any():
if not regex:
self = self.coerce_to_target_dtype(value)
return self.putmask(mask, value, inplace=inplace)
else:
return self._replace_single(
to_replace,
value,
inplace=inplace,
regex=regex,
convert=convert,
mask=mask,
)
return self
class NonConsolidatableMixIn:
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
def __init__(self, values, placement, ndim=None):
"""Initialize a non-consolidatable block.
'ndim' may be inferred from 'placement'.
This will call continue to call __init__ for the other base
classes mixed in with this Mixin.
"""
# Placement must be converted to BlockPlacement so that we can check
# its length
if not isinstance(placement, libinternals.BlockPlacement):
placement = libinternals.BlockPlacement(placement)
# Maybe infer ndim from placement
if ndim is None:
if len(placement) != 1:
ndim = 1
else:
ndim = 2
super().__init__(values, placement, ndim=ndim)
@property
def shape(self):
if self.ndim == 1:
return ((len(self.values)),)
return (len(self.mgr_locs), len(self.values))
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if not com.is_null_slice(col) and col != 0:
raise IndexError(f"{self} only contains one item")
elif isinstance(col, slice):
if col != slice(None):
raise NotImplementedError(col)
return self.values[[loc]]
return self.values[loc]
else:
if col != 0:
raise IndexError(f"{self} only contains one item")
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False):
"""
putmask the data to the block; we must be a single block and not
generate other blocks
return the resulting block
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block, the result of the putmask
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# use block's copy logic.
# .values may be an Index which does shallow copy by default
new_values = self.values if inplace else self.copy().values
if isinstance(new, np.ndarray) and len(new) == len(mask):
new = new[mask]
mask = _safe_reshape(mask, new_values.shape)
new_values[mask] = new
return [self.make_block(values=new_values)]
def _get_unstack_items(self, unstacker, new_columns):
"""
Get the placement, values, and mask for a Block unstack.
This is shared between ObjectBlock and ExtensionBlock. They
differ in that ObjectBlock passes the values, while ExtensionBlock
passes the dummy ndarray of positions to be used by a take
later.
Parameters
----------
unstacker : pandas.core.reshape.reshape._Unstacker
new_columns : Index
All columns of the unstacked BlockManager.
Returns
-------
new_placement : ndarray[int]
The placement of the new columns in `new_columns`.
new_values : Union[ndarray, ExtensionArray]
The first return value from _Unstacker.get_new_values.
mask : ndarray[bool]
The second return value from _Unstacker.get_new_values.
"""
# shared with ExtensionBlock
new_items = unstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = unstacker.get_new_values()
mask = mask.any(0)
return new_placement, new_values, mask
class ExtensionBlock(NonConsolidatableMixIn, Block):
"""Block for holding extension types.
Notes
-----
This holds all 3rd-party extension array types. It's also the immediate
parent class for our internal extension types' blocks, CategoricalBlock.
ExtensionArrays are limited to 1-D.
"""
is_extension = True
def __init__(self, values, placement, ndim=None):
values = self._maybe_coerce_values(values)
super().__init__(values, placement, ndim)
def _maybe_coerce_values(self, values):
"""
Unbox to an extension array.
This will unbox an ExtensionArray stored in an Index or Series.
ExtensionArrays pass through. No dtype coercion is done.
Parameters
----------
values : Index, Series, ExtensionArray
Returns
-------
ExtensionArray
"""
return extract_array(values)
@property
def _holder(self):
# For extension blocks, the holder is values-dependent.
return type(self.values)
@property
def fill_value(self):
# Used in reindex_indexer
return self.values.dtype.na_value
@property
def _can_hold_na(self):
# The default ExtensionArray._can_hold_na is True
return self._holder._can_hold_na
@property
def is_view(self):
"""Extension arrays are never treated as views."""
return False
@property
def is_numeric(self):
return self.values.dtype._is_numeric
def setitem(self, indexer, value):
"""Set the value inplace, returning a same-typed block.
This differs from Block.setitem by not allowing setitem to change
the dtype of the Block.
Parameters
----------
indexer : tuple, list-like, array-like, slice
The subset of self.values to set
value : object
The value being set
Returns
-------
Block
Notes
-----
`indexer` is a direct slice/positional indexer. `value` must
be a compatible shape.
"""
if isinstance(indexer, tuple):
# we are always 1-D
indexer = indexer[0]
check_setitem_lengths(indexer, value, self.values)
self.values[indexer] = value
return self
def get_values(self, dtype=None):
# ExtensionArrays must be iterable, so this works.
values = np.asarray(self.values)
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def array_values(self) -> ExtensionArray:
return self.values
def to_dense(self):
return np.asarray(self.values)
def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs):
"""override to use ExtensionArray astype for the conversion"""
values = self.values
if slicer is not None:
values = values[slicer]
mask = isna(values)
values = np.asarray(values.astype(object))
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1, len(values))
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take(indexer, fill_value=fill_value, allow_fill=True)
# Called from three places in managers, all of which satisfy
# this assertion
assert not (self.ndim == 1 and new_mgr_locs is None)
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def _can_hold_element(self, element: Any) -> bool:
# XXX: We may need to think about pushing this onto the array.
# We're doing the same as CategoricalBlock here.
return True
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim categorical")
slicer = slicer[1]
return self.values[slicer]
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
"""
values = self._holder._concat_same_type([blk.values for blk in to_concat])
placement = placement or slice(0, len(values), 1)
return self.make_block_same_class(values, ndim=self.ndim, placement=placement)
def fillna(self, value, limit=None, inplace=False, downcast=None):
values = self.values if inplace else self.values.copy()
values = values.fillna(value=value, limit=limit)
return [
self.make_block_same_class(
values=values, placement=self.mgr_locs, ndim=self.ndim
)
]
def interpolate(
self, method="pad", axis=0, inplace=False, limit=None, fill_value=None, **kwargs
):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(
values=values.fillna(value=fill_value, method=method, limit=limit),
placement=self.mgr_locs,
)
def diff(self, n: int, axis: int = 1) -> List["Block"]:
if axis == 1:
# we are by definition 1D.
axis = 0
return super().diff(n, axis)
def shift(
self, periods: int, axis: int = 0, fill_value: Any = None,
) -> List["ExtensionBlock"]:
"""
Shift the block by `periods`.
Dispatches to underlying ExtensionArray and re-boxes in an
ExtensionBlock.
"""
return [
self.make_block_same_class(
self.values.shift(periods=periods, fill_value=fill_value),
placement=self.mgr_locs,
ndim=self.ndim,
)
]
def where(
self,
other,
cond,
align=True,
errors="raise",
try_cast: bool = False,
axis: int = 0,
) -> List["Block"]:
if isinstance(other, ABCDataFrame):
# ExtensionArrays are 1-D, so if we get here then
# `other` should be a DataFrame with a single column.
assert other.shape[1] == 1
other = other.iloc[:, 0]
other = extract_array(other, extract_numpy=True)
if isinstance(cond, ABCDataFrame):
assert cond.shape[1] == 1
cond = cond.iloc[:, 0]
cond = extract_array(cond, extract_numpy=True)
if lib.is_scalar(other) and isna(other):
# The default `other` for Series / Frame is np.nan
# we want to replace that with the correct NA value
# for the type
other = self.dtype.na_value
if is_sparse(self.values):
# TODO(SparseArray.__setitem__): remove this if condition
# We need to re-infer the type of the data after doing the
# where, for cases where the subtypes don't match
dtype = None
else:
dtype = self.dtype
result = self.values.copy()
icond = ~cond
if lib.is_scalar(other):
set_other = other
else:
set_other = other[icond]
try:
result[icond] = set_other
except (NotImplementedError, TypeError):
# NotImplementedError for class not implementing `__setitem__`
# TypeError for SparseArray, which implements just to raise
# a TypeError
result = self._holder._from_sequence(
np.where(cond, self.values, other), dtype=dtype
)
return [self.make_block_same_class(result, placement=self.mgr_locs)]
@property
def _ftype(self):
return getattr(self.values, "_pandas_ftype", Block._ftype)
def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
# ExtensionArray-safe unstack.
# We override ObjectBlock._unstack, which unstacks directly on the
# values of the array. For EA-backed blocks, this would require
# converting to a 2-D ndarray of objects.
# Instead, we unstack an ndarray of integer positions, followed by
# a `take` on the actual values.
dummy_arr = np.arange(n_rows)
dummy_unstacker = functools.partial(unstacker_func, fill_value=-1)
unstacker = dummy_unstacker(dummy_arr)
new_placement, new_values, mask = self._get_unstack_items(
unstacker, new_columns
)
blocks = [
self.make_block_same_class(
self.values.take(indices, allow_fill=True, fill_value=fill_value),
[place],
)
for indices, place in zip(new_values.T, new_placement)
]
return blocks, mask
class ObjectValuesExtensionBlock(ExtensionBlock):
"""
Block providing backwards-compatibility for `.values`.
Used by PeriodArray and IntervalArray to ensure that
Series[T].values is an ndarray of objects.
"""
def external_values(self, dtype=None):
return self.values.astype(object)
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other) -> bool:
if self.dtype != other.dtype or self.shape != other.shape:
return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, (np.floating, np.integer)) and not issubclass(
tipo.type, (np.datetime64, np.timedelta64)
)
return isinstance(
element, (float, int, np.floating, np.int_)
) and not isinstance(
element,
(bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64),
)
def to_native_types(
self,
slicer=None,
na_rep="",
float_format=None,
decimal=".",
quoting=None,
**kwargs,
):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
# see gh-13418: no special formatting is desired at the
# output (important for appropriate 'quoting' behaviour),
# so do not pass it through the FloatArrayFormatter
if float_format is None and decimal == ".":
mask = isna(values)
if not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype="object")
values[mask] = na_rep
return values
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(
values,
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
quoting=quoting,
fixed_width=False,
)
return formatter.get_result_as_array()
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return issubclass(value.dtype.type, np.floating) and value.dtype == self.dtype
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating))
return isinstance(
element, (float, int, complex, np.float_, np.int_)
) and not isinstance(element, (bool, np.bool_))
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return (
issubclass(tipo.type, np.integer)
and not issubclass(tipo.type, (np.datetime64, np.timedelta64))
and self.dtype.itemsize >= tipo.itemsize
)
return is_integer(element)
def should_store(self, value):
return is_integer_dtype(value) and value.dtype == self.dtype
class DatetimeLikeBlockMixin:
"""Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock."""
@property
def _holder(self):
return DatetimeArray
@property
def fill_value(self):
return np.datetime64("NaT", "ns")
def get_values(self, dtype=None):
"""
return object dtype as boxed values, such as Timestamps/Timedelta
"""
if is_object_dtype(dtype):
values = self.values.ravel()
result = self._holder(values).astype(object)
return result.reshape(self.values.shape)
return self.values
def iget(self, key):
# GH#31649 we need to wrap scalars in Timestamp/Timedelta
# TODO(EA2D): this can be removed if we ever have 2D EA
result = super().iget(key)
if isinstance(result, np.datetime64):
result = Timestamp(result)
elif isinstance(result, np.timedelta64):
result = Timedelta(result)
return result
def shift(self, periods, axis=0, fill_value=None):
# TODO(EA2D) this is unnecessary if these blocks are backed by 2D EAs
values = self.array_values()
new_values = values.shift(periods, fill_value=fill_value, axis=axis)
return self.make_block_same_class(new_values)
class DatetimeBlock(DatetimeLikeBlockMixin, Block):
__slots__ = ()
is_datetime = True
def __init__(self, values, placement, ndim=None):
values = self._maybe_coerce_values(values)
super().__init__(values, placement=placement, ndim=ndim)
@property
def _can_hold_na(self):
return True
def _maybe_coerce_values(self, values):
"""
Input validation for values passed to __init__. Ensure that
we have datetime64ns, coercing if necessary.
Parameters
----------
values : array-like
Must be convertible to datetime64
Returns
-------
values : ndarray[datetime64ns]
Overridden by DatetimeTZBlock.
"""
if values.dtype != _NS_DTYPE:
values = conversion.ensure_datetime64ns(values)
if isinstance(values, DatetimeArray):
values = values._data
assert isinstance(values, np.ndarray), type(values)
return values
def astype(self, dtype, copy: bool = False, errors: str = "raise"):
"""
these automatically copy, so copy=True has no effect
raise on an except if raise == True
"""
dtype = pandas_dtype(dtype)
# if we are passed a datetime64[ns, tz]
if is_datetime64tz_dtype(dtype):
values = self.values
if copy:
# this should be the only copy
values = values.copy()
if getattr(values, "tz", None) is None:
values = DatetimeArray(values).tz_localize("UTC")
values = values.tz_convert(dtype.tz)
return self.make_block(values)
# delegate
return super().astype(dtype=dtype, copy=copy, errors=errors)
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
if self.is_datetimetz:
# require exact match, since non-nano does not exist
return is_dtype_equal(tipo, self.dtype) or is_valid_nat_for_dtype(
element, self.dtype
)
# GH#27419 if we get a non-nano datetime64 object
return is_datetime64_dtype(tipo)
elif element is NaT:
return True
elif isinstance(element, datetime):
if self.is_datetimetz:
return tz_compare(element.tzinfo, self.dtype.tz)
return element.tzinfo is None
return is_valid_nat_for_dtype(element, self.dtype)
def to_native_types(
self, slicer=None, na_rep=None, date_format=None, quoting=None, **kwargs
):
""" convert to our native types format, slicing if desired """
values = self.values
i8values = self.values.view("i8")
if slicer is not None:
values = values[..., slicer]
i8values = i8values[..., slicer]
from pandas.io.formats.format import _get_format_datetime64_from_values
fmt = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(
i8values.ravel(),
tz=getattr(self.values, "tz", None),
format=fmt,
na_rep=na_rep,
).reshape(i8values.shape)
return np.atleast_2d(result)
def should_store(self, value):
return (
issubclass(value.dtype.type, np.datetime64)
and not is_datetime64tz_dtype(value)
and not is_extension_array_dtype(value)
)
def set(self, locs, values):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
values = conversion.ensure_datetime64ns(values, copy=False)
self.values[locs] = values
def external_values(self):
return np.asarray(self.values.astype("datetime64[ns]", copy=False))
def array_values(self) -> ExtensionArray:
return DatetimeArray._simple_new(self.values)
class DatetimeTZBlock(ExtensionBlock, DatetimeBlock):
""" implement a datetime64 block with a tz attribute """
__slots__ = ()
is_datetimetz = True
is_extension = True
_can_hold_element = DatetimeBlock._can_hold_element
to_native_types = DatetimeBlock.to_native_types
fill_value = np.datetime64("NaT", "ns")
@property
def _holder(self):
return DatetimeArray
def _maybe_coerce_values(self, values):
"""Input validation for values passed to __init__. Ensure that
we have datetime64TZ, coercing if necessary.
Parameters
----------
values : array-like
Must be convertible to datetime64
Returns
-------
values : DatetimeArray
"""
if not isinstance(values, self._holder):
values = self._holder(values)
if values.tz is None:
raise ValueError("cannot create a DatetimeTZBlock without a tz")
return values
@property
def is_view(self):
""" return a boolean if I am possibly a view """
# check the ndarray values of the DatetimeIndex values
return self.values._data.base is not None
def get_values(self, dtype=None):
"""
Returns an ndarray of values.
Parameters
----------
dtype : np.dtype
Only `object`-like dtypes are respected here (not sure
why).
Returns
-------
values : ndarray
When ``dtype=object``, then and object-dtype ndarray of
boxed values is returned. Otherwise, an M8[ns] ndarray
is returned.
DatetimeArray is always 1-d. ``get_values`` will reshape
the return value to be the same dimensionality as the
block.
"""
values = self.values
if is_object_dtype(dtype):
values = values.astype(object)
values = np.asarray(values)
if self.ndim == 2:
# Ensure that our shape is correct for DataFrame.
# ExtensionArrays are always 1-D, even in a DataFrame when
# the analogous NumPy-backed column would be a 2-D ndarray.
values = values.reshape(1, -1)
return values
def to_dense(self):
# we request M8[ns] dtype here, even though it discards tzinfo,
# as lots of code (e.g. anything using values_from_object)
# expects that behavior.
return np.asarray(self.values, dtype=_NS_DTYPE)
def _slice(self, slicer):
""" return a slice of my values """
if isinstance(slicer, tuple):
col, loc = slicer
if not com.is_null_slice(col) and col != 0:
raise IndexError(f"{self} only contains one item")
return self.values[loc]
return self.values[slicer]
def diff(self, n: int, axis: int = 0) -> List["Block"]:
"""
1st discrete difference.
Parameters
----------
n : int
Number of periods to diff.
axis : int, default 0
Axis to diff upon.
Returns
-------
A list with a new TimeDeltaBlock.
Notes
-----
The arguments here are mimicking shift so they are called correctly
by apply.
"""
if axis == 0:
# Cannot currently calculate diff across multiple blocks since this
# function is invoked via apply
raise NotImplementedError
new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8
# Reshape the new_values like how algos.diff does for timedelta data
new_values = new_values.reshape(1, len(new_values))
new_values = new_values.astype("timedelta64[ns]")
return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]
def concat_same_type(self, to_concat, placement=None):
# need to handle concat([tz1, tz2]) here, since DatetimeArray
# only handles cases where all the tzs are the same.
# Instead of placing the condition here, it could also go into the
# is_uniform_join_units check, but I'm not sure what is better.
if len({x.dtype for x in to_concat}) > 1:
values = concat_datetime([x.values for x in to_concat])
placement = placement or slice(0, len(values), 1)
if self.ndim > 1:
values = np.atleast_2d(values)
return ObjectBlock(values, ndim=self.ndim, placement=placement)
return super().concat_same_type(to_concat, placement)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# We support filling a DatetimeTZ with a `value` whose timezone
# is different by coercing to object.
if self._can_hold_element(value):
return super().fillna(value, limit, inplace, downcast)
# different timezones, or a non-tz
return self.astype(object).fillna(
value, limit=limit, inplace=inplace, downcast=downcast
)
def setitem(self, indexer, value):
# https://github.com/pandas-dev/pandas/issues/24020
# Need a dedicated setitem until #24020 (type promotion in setitem
# for extension arrays) is designed and implemented.
if self._can_hold_element(value) or (
isinstance(indexer, np.ndarray) and indexer.size == 0
):
return super().setitem(indexer, value)
obj_vals = self.values.astype(object)
newb = make_block(
obj_vals, placement=self.mgr_locs, klass=ObjectBlock, ndim=self.ndim
)
return newb.setitem(indexer, value)
def equals(self, other) -> bool:
# override for significant performance improvement
if self.dtype != other.dtype or self.shape != other.shape:
return False
return (self.values.view("i8") == other.values.view("i8")).all()
def quantile(self, qs, interpolation="linear", axis=0):
naive = self.values.view("M8[ns]")
# kludge for 2D block with 1D values
naive = naive.reshape(self.shape)
blk = self.make_block(naive)
res_blk = blk.quantile(qs, interpolation=interpolation, axis=axis)
# ravel is kludge for 2D block with 1D values, assumes column-like
aware = self._holder(res_blk.values.ravel(), dtype=self.dtype)
return self.make_block_same_class(aware, ndim=res_blk.ndim)
class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
fill_value = np.timedelta64("NaT", "ns")
def __init__(self, values, placement, ndim=None):
if values.dtype != _TD_DTYPE:
values = conversion.ensure_timedelta64ns(values)
if isinstance(values, TimedeltaArray):
values = values._data
assert isinstance(values, np.ndarray), type(values)
super().__init__(values, placement=placement, ndim=ndim)
@property
def _holder(self):
return TimedeltaArray
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, np.timedelta64)
elif element is NaT:
return True
elif isinstance(element, (timedelta, np.timedelta64)):
return True
return is_valid_nat_for_dtype(element, self.dtype)
def fillna(self, value, **kwargs):
# allow filling with integers to be
# interpreted as nanoseconds
if is_integer(value):
# Deprecation GH#24694, GH#19233
raise TypeError(
"Passing integers to fillna for timedelta64[ns] dtype is no "
"longer supported. To obtain the old behavior, pass "
"`pd.Timedelta(seconds=n)` instead."
)
return super().fillna(value, **kwargs)
def should_store(self, value):
return issubclass(
value.dtype.type, np.timedelta64
) and not is_extension_array_dtype(value)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = "NaT"
rvalues[mask] = na_rep
imask = (~mask).ravel()
# FIXME:
# should use the formats.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array(
[Timedelta(val)._repr_base(format="all") for val in values.ravel()[imask]],
dtype=object,
)
return rvalues
def external_values(self, dtype=None):
return np.asarray(self.values.astype("timedelta64[ns]", copy=False))
def array_values(self) -> ExtensionArray:
return TimedeltaArray._simple_new(self.values)
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, np.bool_)
return isinstance(element, (bool, np.bool_))
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_) and not is_extension_array_dtype(
value
)
def replace(
self, to_replace, value, inplace=False, filter=None, regex=False, convert=True
):
inplace = validate_bool_kwarg(inplace, "inplace")
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super().replace(
to_replace,
value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, placement=None, ndim=2):
if issubclass(values.dtype.type, str):
values = np.array(values, dtype=object)
super().__init__(values, ndim=ndim, placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
def convert(
self,
copy: bool = True,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
coerce: bool = False,
):
""" attempt to coerce any object types to better types return a copy of
the block (if copy = True) by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# operate column-by-column
def f(mask, val, idx):
shape = val.shape
values = soft_convert_objects(
val.ravel(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
if isinstance(values, np.ndarray):
# TODO: allow EA once reshape is supported
values = values.reshape(shape)
values = _block_shape(values, ndim=self.ndim)
return values
if self.ndim == 2:
blocks = self.split_and_operate(None, f, False)
else:
values = f(None, self.values.ravel(), None)
blocks = [make_block(values, ndim=self.ndim, placement=self.mgr_locs)]
return blocks
def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]:
if downcast is not None:
return blocks
# split and convert the blocks
return _extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks])
def _can_hold_element(self, element: Any) -> bool:
return True
def should_store(self, value):
return not (
issubclass(
value.dtype.type,
(np.integer, np.floating, np.complexfloating, np.datetime64, np.bool_),
)
or is_extension_array_dtype(value)
)
def replace(
self, to_replace, value, inplace=False, filter=None, regex=False, convert=True
):
to_rep_is_list = is_list_like(to_replace)
value_is_list = is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
result_blocks = []
blocks = [self]
if not either_list and is_re(to_replace):
return self._replace_single(
to_replace,
value,
inplace=inplace,
filter=filter,
regex=True,
convert=convert,
)
elif not (either_list or regex):
return super().replace(
to_replace,
value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
elif both_lists:
for to_rep, v in zip(to_replace, value):
result_blocks = []
for b in blocks:
result = b._replace_single(
to_rep,
v,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
elif to_rep_is_list and regex:
for to_rep in to_replace:
result_blocks = []
for b in blocks:
result = b._replace_single(
to_rep,
value,
inplace=inplace,
filter=filter,
regex=regex,
convert=convert,
)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
return self._replace_single(
to_replace,
value,
inplace=inplace,
filter=filter,
convert=convert,
regex=regex,
)
def _replace_single(
self,
to_replace,
value,
inplace=False,
filter=None,
regex=False,
convert=True,
mask=None,
):
"""
Replace elements by the given value.
Parameters
----------
to_replace : object or pattern
Scalar to replace or regular expression to match.
value : object
Replacement object.
inplace : bool, default False
Perform inplace modification.
filter : list, optional
regex : bool, default False
If true, perform regular expression substitution.
convert : bool, default True
If true, try to coerce any object types to better types.
mask : array-like of bool, optional
True indicate corresponding element is ignored.
Returns
-------
a new block, the result after replacing
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# to_replace is regex compilable
to_rep_re = regex and is_re_compilable(to_replace)
# regex is regex compilable
regex_re = is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError(
"only one of to_replace and regex can be regex compilable"
)
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
if is_re(to_replace):
pattern = to_replace.pattern
else:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
return super().replace(
to_replace, value, inplace=inplace, filter=filter, regex=regex
)
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isna(value) or not isinstance(value, str):
def re_replacer(s):
if is_re(rx) and isinstance(s, str):
return value if rx.search(s) is not None else s
else:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
if is_re(rx) and isinstance(s, str):
return rx.sub(value, s)
else:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
if mask is None:
new_values[filt] = f(new_values[filt])
else:
new_values[filt][mask] = f(new_values[filt][mask])
# convert
block = self.make_block(new_values)
if convert:
block = block.convert(numeric=False)
return block
def _replace_coerce(
self, to_replace, value, inplace=True, regex=False, convert=False, mask=None
):
"""
Replace value corresponding to the given boolean array with another
value.
Parameters
----------
to_replace : object or pattern
Scalar to replace or regular expression to match.
value : object
Replacement object.
inplace : bool, default False
Perform inplace modification.
regex : bool, default False
If true, perform regular expression substitution.
convert : bool, default True
If true, try to coerce any object types to better types.
mask : array-like of bool, optional
True indicate corresponding element is ignored.
Returns
-------
A new block if there is anything to replace or the original block.
"""
if mask.any():
block = super()._replace_coerce(
to_replace=to_replace,
value=value,
inplace=inplace,
regex=regex,
convert=convert,
mask=mask,
)
if convert:
block = [b.convert(numeric=False, copy=True) for b in block]
return block
if convert:
return [self.convert(numeric=False, copy=True)]
return self
class CategoricalBlock(ExtensionBlock):
__slots__ = ()
is_categorical = True
_verify_integrity = True
_can_hold_na = True
_concatenator = staticmethod(concat_categorical)
def __init__(self, values, placement, ndim=None):
# coerce to categorical if we can
values = extract_array(values)
assert isinstance(values, Categorical), type(values)
super().__init__(values, placement=placement, ndim=ndim)
@property
def _holder(self):
return Categorical
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return np.object_
def to_dense(self):
# Categorical.get_values returns a DatetimeIndex for datetime
# categories, so we can't simply use `np.asarray(self.values)` like
# other types.
return self.values._internal_get_values()
def to_native_types(self, slicer=None, na_rep="", quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isna(values)
values = np.array(values, dtype="object")
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1, len(values))
def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
Note that this CategoricalBlock._concat_same_type *may* not
return a CategoricalBlock. When the categories in `to_concat`
differ, this will return an object ndarray.
If / when we decide we don't like that behavior:
1. Change Categorical._concat_same_type to use union_categoricals
2. Delete this method.
"""
values = self._concatenator(
[blk.values for blk in to_concat], axis=self.ndim - 1
)
# not using self.make_block_same_class as values can be object dtype
return make_block(
values, placement=placement or slice(0, len(values), 1), ndim=self.ndim
)
def replace(
self,
to_replace,
value,
inplace: bool = False,
filter=None,
regex: bool = False,
convert: bool = True,
):
inplace = validate_bool_kwarg(inplace, "inplace")
result = self if inplace else self.copy()
if filter is None: # replace was called on a series
result.values.replace(to_replace, value, inplace=True)
if convert:
return result.convert(numeric=False, copy=not inplace)
else:
return result
else: # replace was called on a DataFrame
if not isna(value):
result.values.add_categories(value, inplace=True)
return super(CategoricalBlock, result).replace(
to_replace, value, inplace, filter, regex, convert
)
# -----------------------------------------------------------------
# Constructor Helpers
def get_block_type(values, dtype=None):
"""
Find the appropriate Block subclass to use for the given values and dtype.
Parameters
----------
values : ndarray-like
dtype : numpy or pandas dtype
Returns
-------
cls : class, subclass of Block
"""
dtype = dtype or values.dtype
vtype = dtype.type
if is_sparse(dtype):
# Need this first(ish) so that Sparse[datetime] is sparse
cls = ExtensionBlock
elif is_categorical(values):
cls = CategoricalBlock
elif issubclass(vtype, np.datetime64):
assert not is_datetime64tz_dtype(values)
cls = DatetimeBlock
elif is_datetime64tz_dtype(values):
cls = DatetimeTZBlock
elif is_interval_dtype(dtype) or is_period_dtype(dtype):
cls = ObjectValuesExtensionBlock
elif is_extension_array_dtype(values):
cls = ExtensionBlock
elif issubclass(vtype, np.floating):
cls = FloatBlock
elif issubclass(vtype, np.timedelta64):
assert issubclass(vtype, np.integer)
cls = TimeDeltaBlock
elif issubclass(vtype, np.complexfloating):
cls = ComplexBlock
elif issubclass(vtype, np.integer):
cls = IntBlock
elif dtype == np.bool_:
cls = BoolBlock
else:
cls = ObjectBlock
return cls
def make_block(values, placement, klass=None, ndim=None, dtype=None):
# Ensure that we don't allow PandasArray / PandasDtype in internals.
# For now, blocks should be backed by ndarrays when possible.
if isinstance(values, ABCPandasArray):
values = values.to_numpy()
if ndim and ndim > 1:
values = np.atleast_2d(values)
if isinstance(dtype, PandasDtype):
dtype = dtype.numpy_dtype
if klass is None:
dtype = dtype or values.dtype
klass = get_block_type(values, dtype)
elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values):
# TODO: This is no longer hit internally; does it need to be retained
# for e.g. pyarrow?
values = DatetimeArray._simple_new(values, dtype=dtype)
return klass(values, ndim=ndim, placement=placement)
# -----------------------------------------------------------------
def _extend_blocks(result, blocks=None):
""" return a new extended blocks, given the result """
from pandas.core.internals import BlockManager
if blocks is None:
blocks = []
if isinstance(result, list):
for r in result:
if isinstance(r, list):
blocks.extend(r)
else:
blocks.append(r)
elif isinstance(result, BlockManager):
blocks.extend(result.blocks)
else:
blocks.append(result)
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim < ndim:
if shape is None:
shape = values.shape
if not is_extension_array_dtype(values):
# TODO: https://github.com/pandas-dev/pandas/issues/23023
# block.shape is incorrect for "2D" ExtensionArrays
# We can't, and don't need to, reshape.
values = values.reshape(tuple((1,) + shape))
return values
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len({b.dtype for b in blocks}) != 1:
raise AssertionError("_merge_blocks are invalid!")
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = np.vstack([b.values for b in blocks])
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values, placement=new_mgr_locs)
# no merge
return blocks
def _safe_reshape(arr, new_shape):
"""
If possible, reshape `arr` to have shape `new_shape`,
with a couple of exceptions (see gh-13012):
1) If `arr` is a ExtensionArray or Index, `arr` will be
returned as is.
2) If `arr` is a Series, the `_values` attribute will
be reshaped and returned.
Parameters
----------
arr : array-like, object to be reshaped
new_shape : int or tuple of ints, the new shape
"""
if isinstance(arr, ABCSeries):
arr = arr._values
if not isinstance(arr, ABCExtensionArray):
arr = arr.reshape(new_shape)
return arr
def _putmask_smart(v, mask, n):
"""
Return a new ndarray, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
mask : np.ndarray
Applies to both sides (array like).
n : `new values` either scalar or an array like aligned with `values`
Returns
-------
values : ndarray with updated values
this *may* be a copy of the original
See Also
--------
ndarray.putmask
"""
# we cannot use np.asarray() here as we cannot have conversions
# that numpy does when numeric are mixed with strings
# n should be the length of the mask or a scalar here
if not is_list_like(n):
n = np.repeat(n, len(mask))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[mask]
except TypeError:
# TypeError: only integer scalar arrays can be converted to a scalar index
pass
else:
# make sure that we have a nullable type
# if we have nulls
if not _isna_compat(v, nn[0]):
pass
elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)):
# only compare integers/floats
pass
elif not (is_float_dtype(v.dtype) or is_integer_dtype(v.dtype)):
# only compare integers/floats
pass
else:
# we ignore ComplexWarning here
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", np.ComplexWarning)
nn_at = nn.astype(v.dtype)
comp = nn == nn_at
if is_list_like(comp) and comp.all():
nv = v.copy()
nv[mask] = nn_at
return nv
n = np.asarray(n)
def _putmask_preserve(nv, n):
try:
nv[mask] = n[mask]
except (IndexError, ValueError):
nv[mask] = n
return nv
# preserves dtype if possible
if v.dtype.kind == n.dtype.kind:
return _putmask_preserve(v, n)
# change the dtype if needed
dtype, _ = maybe_promote(n.dtype)
if is_extension_array_dtype(v.dtype) and is_object_dtype(dtype):
v = v._internal_get_values(dtype)
else:
v = v.astype(dtype)
return _putmask_preserve(v, n)
| [
"[email protected]"
] | |
d911b045663d565be92524dcbdeb0dee537c4ee8 | a72106acf426859b49be66ec7a1d209d8ffb59d1 | /importer/indico_importer/converter.py | f8b138c0a1ca528382992688f5347a4d08c1ba43 | [
"MIT"
] | permissive | indico/indico-plugins-attic | 12502c891805e092b936c42a779fa9c667ee23d6 | 64a6bffe4dc7e30e2874dd4d6aac9908038910f1 | refs/heads/master | 2021-06-23T03:51:21.500524 | 2021-03-17T10:35:24 | 2021-03-17T10:35:24 | 201,440,329 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,001 | py | # This file is part of the Indico plugins.
# Copyright (C) 2002 - 2020 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from __future__ import unicode_literals
APPEND = object()
class RecordConverter(object):
"""
Converts a dictionary or list of dictionaries into another list of dictionaries. The goal
is to alter data fetched from connector class into a format that can be easily read by importer
plugin. The way dictionaries are converted depends on the 'conversion' variable.
conversion = [ (sourceKey, destinationKey, conversionFuncion(optional), converter(optional))... ]
It's a list tuples in which a single element represents a translation that will be made. Every
element of the list is a tuple that consists of from 1 to 4 entries.
The first one is the key name in the source dictionary, the value that applies to this key will
be the subject of the translation. The second is the key in the destination dictionary at which
translated value will be put. If not specified its value will be equal the value of the first
element. If the second element is equal *append* and the converted element is a dictionary or a
list of dictionaries, destination dictionary will be updated by the converted element.Third,
optional, element is the function that will take the value from the source dictionary and return
the value which will be inserted into result dictionary. If the third element is empty
defaultConversionMethod will be called. Fourth, optional, element is a RecordConverter class
which will be executed with converted value as an argument.
"""
conversion = []
@staticmethod
def default_conversion_method(attr):
"""
Method that will be used to convert an entry in dictionary unless other method is specified.
"""
return attr
@classmethod
def convert(cls, record):
"""
Converts a single dictionary or list of dictionaries into converted list of dictionaries.
"""
if isinstance(record, list):
return [cls._convert(r) for r in record]
else:
return [cls._convert(record)]
@classmethod
def _convert_internal(cls, record):
"""
Converts a single dictionary into converted dictionary or list of dictionaries into converted
list of dictionaries. Used while passing dictionaries to another converter.
"""
if isinstance(record, list):
return [cls._convert(r) for r in record]
else:
return cls._convert(record)
@classmethod
def _convert(cls, record):
"""
Core method of the converter. Converts a single dictionary into another dictionary.
"""
if not record:
return {}
converted_dict = {}
for field in cls.conversion:
key = field[0]
if len(field) >= 2 and field[1]:
converted_key = field[1]
else:
converted_key = key
if len(field) >= 3 and field[2]:
conversion_method = field[2]
else:
conversion_method = cls.default_conversion_method
if len(field) >= 4:
converter = field[3]
else:
converter = None
try:
value = conversion_method(record[key])
except KeyError:
continue
if converter:
value = converter._convert_internal(value)
if converted_key is APPEND:
if isinstance(value, list):
for v in value:
converted_dict.update(v)
else:
converted_dict.update(value)
else:
converted_dict[converted_key] = value
return converted_dict
| [
"[email protected]"
] | |
62a430d7658748dc827ca7a1a71a21975277174b | 2e70b3ce93762c5b66fba57f8b9cba37aacf0702 | /new/account/migrations/0005_auto_20190528_1610.py | 184bec8ce2a97ea9516a3f76e5495f0cfbb17c49 | [] | no_license | mahidul-islam/jamah | 02be511fe119e8934ec7d5aa1eaa8e2b24fad246 | c8ddf9a8094d33e8b1d6cb834eab3d9f18b1a9ea | refs/heads/master | 2022-05-13T15:11:38.609550 | 2019-06-08T04:52:09 | 2019-06-08T04:52:09 | 184,331,276 | 2 | 0 | null | 2022-04-22T21:27:18 | 2019-04-30T21:04:06 | Python | UTF-8 | Python | false | false | 541 | py | # Generated by Django 2.2.1 on 2019-05-28 16:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0004_account_mother_account'),
]
operations = [
migrations.AlterField(
model_name='transaction',
name='comes_from',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transaction_outs', to='account.Account'),
),
]
| [
"[email protected]"
] | |
1110f7e0dacac9ef0b6b69c736d03aa57d46b364 | 006341ca12525aa0979d6101600e78c4bd9532ab | /CMS/Zope-3.2.1/Dependencies/zope.component-Zope-3.2.1/zope.component/bbb/utility.py | f626f6c3e1a4329b351d849e1924758ce526722a | [
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] | permissive | germanfriday/code-examples-sandbox | d0f29e20a3eed1f8430d06441ac2d33bac5e4253 | 4c538584703754c956ca66392fdcecf0a0ca2314 | refs/heads/main | 2023-05-30T22:21:57.918503 | 2021-06-15T15:06:47 | 2021-06-15T15:06:47 | 377,200,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,507 | py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""utility service
$Id: utility.py 38178 2005-08-30 21:50:19Z mj $
"""
from zope.component.exceptions import Invalid, ComponentLookupError
from zope.component.interfaces import IUtilityService, IRegistry
from zope.component.service import GlobalService, IService, IServiceDefinition
from zope.component.site import UtilityRegistration
import zope.interface
class IGlobalUtilityService(IUtilityService, IRegistry):
def provideUtility(providedInterface, component, name='', info=''):
"""Provide a utility
A utility is a component that provides an interface.
"""
class UtilityService(object):
"""Provide IUtilityService
Mixin that superimposes utility management on adapter registery
implementation
"""
def __init__(self, sitemanager=None):
self.__parent__ = None
if sitemanager is None:
from zope.component.site import GlobalSiteManager
sitemanager = GlobalSiteManager()
self.sm = sitemanager
def __getattr__(self, name):
attr = getattr(self.sm, name)
if attr is not None:
return attr
attr = getattr(self.sm.utilities, name)
if attr is not None:
return attr
raise AttributeError(name)
class GlobalUtilityService(UtilityService, GlobalService):
zope.interface.implementsOnly(IGlobalUtilityService)
def __init__(self, sitemanager=None):
super(GlobalUtilityService, self).__init__(sitemanager)
def provideUtility(self, providedInterface, component, name='', info=''):
self.sm.provideUtility(providedInterface, component, name, info)
def registrations(self):
for reg in self.sm.registrations():
if isinstance(reg, UtilityRegistration):
if not reg.provided in (IService, IServiceDefinition):
yield reg
| [
"[email protected]"
] | |
244ecb3d7cda2b212c28968b72151583aa73ab22 | 7fb87945b77d3adaedd8a155c981e97946734e41 | /packstack/plugins/amqp_002.py | bc822100bc810d982c6734ba0f87cfae7797e907 | [
"Apache-2.0"
] | permissive | Tony910517/openstack | 916b36368ea9f17958e4eb04bd1f9daf3aba9213 | 4c1380a03c37e7950dcf2bba794e75b7e4a8dfd0 | refs/heads/master | 2020-05-20T01:05:22.499224 | 2019-05-07T01:11:05 | 2019-05-07T01:11:05 | 185,292,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,981 | py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Installs and configures AMQP
"""
from packstack.installer import basedefs
from packstack.installer import validators
from packstack.installer import processors
from packstack.installer import utils
from packstack.modules.common import filtered_hosts
from packstack.modules.documentation import update_params_usage
from packstack.modules.ospluginutils import appendManifestFile
from packstack.modules.ospluginutils import createFirewallResources
from packstack.modules.ospluginutils import getManifestTemplate
from packstack.modules.ospluginutils import generate_ssl_cert
# ------------- AMQP Packstack Plugin Initialization --------------
PLUGIN_NAME = "AMQP"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
params = [
{"CMD_OPTION": "amqp-backend",
"PROMPT": "Set the AMQP service backend",
"OPTION_LIST": ["rabbitmq"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "rabbitmq",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_AMQP_BACKEND",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_AMQP_SERVER']},
{"CMD_OPTION": "amqp-host",
"PROMPT": "Enter the host for the AMQP service",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_ssh],
"DEFAULT_VALUE": utils.get_localhost_ip(),
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_HOST",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-enable-ssl",
"PROMPT": "Enable SSL for the AMQP service?",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_AMQP_ENABLE_SSL",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-enable-auth",
"PROMPT": "Enable Authentication for the AMQP service?",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_options],
"DEFAULT_VALUE": "n",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_AMQP_ENABLE_AUTH",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
]
update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
group = {"GROUP_NAME": "AMQP",
"DESCRIPTION": "AMQP Config parameters",
"PRE_CONDITION": False,
"PRE_CONDITION_MATCH": True,
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, params)
params = [
{"CMD_OPTION": "amqp-nss-certdb-pw",
"PROMPT": "Enter the password for NSS certificate database",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"PROCESSORS": [processors.process_password],
"MASK_INPUT": True,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_NSS_CERTDB_PW",
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
]
update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
group = {"GROUP_NAME": "AMQPSSL",
"DESCRIPTION": "AMQP Config SSL parameters",
"PRE_CONDITION": "CONFIG_AMQP_ENABLE_SSL",
"PRE_CONDITION_MATCH": "y",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, params)
params = [
{"CMD_OPTION": "amqp-auth-user",
"PROMPT": "Enter the user for amqp authentication",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "amqp_user",
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_AUTH_USER",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
{"CMD_OPTION": "amqp-auth-password",
"PROMPT": "Enter the password for user authentication",
"OPTION_LIST": ["y", "n"],
"VALIDATORS": [validators.validate_not_empty],
"PROCESSORS": [processors.process_password],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"MASK_INPUT": True,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_AMQP_AUTH_PASSWORD",
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
]
update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
group = {"GROUP_NAME": "AMQPAUTH",
"DESCRIPTION": "AMQP Config Athentication parameters",
"PRE_CONDITION": "CONFIG_AMQP_ENABLE_AUTH",
"PRE_CONDITION_MATCH": "y",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, params)
def initSequences(controller):
amqpsteps = [
{'title': 'Adding AMQP manifest entries',
'functions': [create_manifest]}
]
controller.addSequence("Installing AMQP", [], [], amqpsteps)
# ------------------------ step functions -------------------------
def create_manifest(config, messages):
server = utils.ScriptRunner(config['CONFIG_AMQP_HOST'])
if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
config['CONFIG_AMQP_SSL_ENABLED'] = True
config['CONFIG_AMQP_PROTOCOL'] = 'ssl'
config['CONFIG_AMQP_CLIENTS_PORT'] = "5671"
amqp_host = config['CONFIG_AMQP_HOST']
service = 'AMQP'
ssl_key_file = '/etc/pki/tls/private/ssl_amqp.key'
ssl_cert_file = '/etc/pki/tls/certs/ssl_amqp.crt'
cacert = config['CONFIG_AMQP_SSL_CACERT_FILE'] = (
config['CONFIG_SSL_CACERT']
)
generate_ssl_cert(config, amqp_host, service, ssl_key_file,
ssl_cert_file)
else:
# Set default values
config['CONFIG_AMQP_CLIENTS_PORT'] = "5672"
config['CONFIG_AMQP_SSL_ENABLED'] = False
config['CONFIG_AMQP_PROTOCOL'] = 'tcp'
if config['CONFIG_AMQP_ENABLE_AUTH'] == 'n':
config['CONFIG_AMQP_AUTH_PASSWORD'] = 'guest'
config['CONFIG_AMQP_AUTH_USER'] = 'guest'
manifestfile = "%s_amqp.pp" % config['CONFIG_AMQP_HOST']
manifestdata = getManifestTemplate('amqp')
if config['CONFIG_IP_VERSION'] == 'ipv6':
config['CONFIG_AMQP_HOST_URL'] = "[%s]" % config['CONFIG_AMQP_HOST']
else:
config['CONFIG_AMQP_HOST_URL'] = config['CONFIG_AMQP_HOST']
fw_details = dict()
# All hosts should be able to talk to amqp
for host in filtered_hosts(config, exclude=False):
key = "amqp_%s" % host
fw_details.setdefault(key, {})
fw_details[key]['host'] = "%s" % host
fw_details[key]['service_name'] = "amqp"
fw_details[key]['chain'] = "INPUT"
fw_details[key]['ports'] = ['5671', '5672']
fw_details[key]['proto'] = "tcp"
config['FIREWALL_AMQP_RULES'] = fw_details
manifestdata += createFirewallResources('FIREWALL_AMQP_RULES')
appendManifestFile(manifestfile, manifestdata, 'pre')
| [
"[email protected]"
] | |
e71f0a615ae491bb9857459804dafdee895970ae | fd5bc0e8a3ac2b7ba793287084f725a8cd10b5ef | /tests/bench/loadrelated.py | 9942ed0b1e2b9a0ae4d8a8c7c923e95f0e30e58e | [
"BSD-3-Clause"
] | permissive | moyaya/python-stdnet | 404cb645b80c59b08ce4506480ce897c24032dcd | 8d6c41ba1ddb8024e6bfab859f99bf96966d04cf | refs/heads/master | 2021-01-24T01:00:18.203118 | 2012-01-13T18:23:20 | 2012-01-13T18:23:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | '''Benchmark load realted.'''
from stdnet import test, transaction
from stdnet.utils import populate, zip
from examples.data import FinanceTest, Instrument, Fund, Position
class LoadRelatedTest(FinanceTest):
def setUp(self):
self.data.makePositions()
def testLoad(self):
for p in Position.objects.all():
self.assertTrue(p.instrument.name)
def testLoadRelated(self):
for p in Position.objects.all().load_related('instrument'):
self.assertTrue(p.instrument.name)
| [
"[email protected]"
] | |
a5ae930e8fe263663440b7fda29bd5a056e44d78 | b589f3997e790c3760ab6ddce1dd1b7813cfab3a | /232.py | e2c5834920299064245bc4ccf2a5c4e5fe64f1ff | [] | no_license | higsyuhing/leetcode_easy | 56ceb2aab31f7c11671d311552aaf633aadd14a8 | 48d516fdbb086d697e2593a9ce1dbe6f40c3c701 | refs/heads/master | 2022-12-04T00:49:33.894066 | 2022-11-15T20:44:36 | 2022-11-15T20:44:36 | 135,224,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,159 | py | class MyQueue(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.stack1 = []
self.stack2 = []
self.slen = 0
self.curr = 1 # push stack, 2 for pop stack
def push(self, x):
"""
Push element x to the back of queue.
:type x: int
:rtype: void
"""
if self.curr == 1:
self.stack1.append(x)
self.slen = self.slen + 1
pass
else:
for index in range(self.slen):
self.stack1.append(self.stack2.pop())
pass
self.stack1.append(x)
self.slen = self.slen + 1
self.curr = 1
pass
return
def pop(self):
"""
Removes the element from in front of queue and returns that element.
:rtype: int
"""
if self.slen == 0:
print "Error! "
return 0
if self.curr == 1:
for index in range(self.slen-1):
self.stack2.append(self.stack1.pop())
pass
self.slen = self.slen - 1
self.curr = 2
return self.stack1.pop()
else:
self.slen = self.slen - 1
return self.stack2.pop()
pass
def peek(self):
"""
Get the front element.
:rtype: int
"""
if self.slen == 0:
print "Error! "
return 0
if self.curr == 1:
for index in range(self.slen):
self.stack2.append(self.stack1.pop())
pass
self.curr = 2
return self.stack2[self.slen-1]
else:
return self.stack2[self.slen-1]
pass
def empty(self):
"""
Returns whether the queue is empty.
:rtype: bool
"""
if self.slen == 0:
return True
else:
return False
# Your MyQueue object will be instantiated and called as such:
# obj = MyQueue()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.peek()
# param_4 = obj.empty()
| [
"[email protected]"
] | |
57cd82ee8cf61947cac176ab1e3935c3582c06d2 | bc2a96e8b529b0c750f6bc1d0424300af9743904 | /acapy_client/models/credential_definition_send_request.py | 8d1879bd302faeaabbeac1ec17a5fbce0eddf4c4 | [
"Apache-2.0"
] | permissive | TimoGlastra/acapy-client | d091fd67c97a57f2b3462353459780281de51281 | d92ef607ba2ff1152ec15429f2edb20976991424 | refs/heads/main | 2023-06-29T22:45:07.541728 | 2021-08-03T15:54:48 | 2021-08-03T15:54:48 | 396,015,854 | 1 | 0 | Apache-2.0 | 2021-08-14T13:22:28 | 2021-08-14T13:22:27 | null | UTF-8 | Python | false | false | 2,441 | py | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="CredentialDefinitionSendRequest")
@attr.s(auto_attribs=True)
class CredentialDefinitionSendRequest:
""" """
revocation_registry_size: Union[Unset, int] = UNSET
schema_id: Union[Unset, str] = UNSET
support_revocation: Union[Unset, bool] = UNSET
tag: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
revocation_registry_size = self.revocation_registry_size
schema_id = self.schema_id
support_revocation = self.support_revocation
tag = self.tag
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if revocation_registry_size is not UNSET:
field_dict["revocation_registry_size"] = revocation_registry_size
if schema_id is not UNSET:
field_dict["schema_id"] = schema_id
if support_revocation is not UNSET:
field_dict["support_revocation"] = support_revocation
if tag is not UNSET:
field_dict["tag"] = tag
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
revocation_registry_size = d.pop("revocation_registry_size", UNSET)
schema_id = d.pop("schema_id", UNSET)
support_revocation = d.pop("support_revocation", UNSET)
tag = d.pop("tag", UNSET)
credential_definition_send_request = cls(
revocation_registry_size=revocation_registry_size,
schema_id=schema_id,
support_revocation=support_revocation,
tag=tag,
)
credential_definition_send_request.additional_properties = d
return credential_definition_send_request
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| [
"[email protected]"
] | |
0bf464fb6204343b71d383b81c94bf835f6e6d58 | 58c34c597e825634fb5833b22e178df4fe570d39 | /lib/adapter/cheat_cheat.py | 9844008c6be19c9654ed8c373292de2a9e5132c6 | [
"MIT",
"CC-BY-SA-3.0"
] | permissive | sullivant/cheat.sh | 2eb731eb1d7c6b03d65b2dd5f9b6a325b167c005 | e2e69b61a361751a145b977ca2f58ae4a50d756e | refs/heads/master | 2020-05-30T09:36:58.834850 | 2019-05-31T19:47:53 | 2019-05-31T19:47:53 | 189,649,817 | 1 | 0 | MIT | 2019-05-31T19:45:23 | 2019-05-31T19:45:22 | null | UTF-8 | Python | false | false | 549 | py | """
Adapter for https://github.com/cheat/cheat
Cheatsheets are located in `cheat/cheatsheets/`
Each cheat sheet is a separate file without extension
"""
# pylint: disable=relative-import,abstract-method
from .git_adapter import GitRepositoryAdapter
class Cheat(GitRepositoryAdapter):
"""
cheat/cheat adapter
"""
_adapter_name = "cheat"
_output_format = "code"
_cache_needed = True
_repository_url = "https://github.com/cheat/cheat"
_cheatsheet_files_prefix = "cheat/cheatsheets/"
_cheatsheet_file_mask = "*"
| [
"[email protected]"
] | |
ade0b23d401c7a201eec94e034a7bb38e413996e | 9abc2f4fbf1b31b5a56507437b4a8d9c3f3db7e6 | /users/urls.py | 98678c65848e0cd95f42b0434063e6edf15da19f | [] | no_license | odbalogun/ticketr | e9fe8461d66dabe395f0e1af8fbecc67dbb16e97 | 94f24c82f407f861f1614a151feb3fdd62b283e5 | refs/heads/master | 2022-11-30T22:40:30.931160 | 2019-08-09T14:34:38 | 2019-08-09T14:34:38 | 188,833,600 | 0 | 0 | null | 2022-11-22T03:50:30 | 2019-05-27T11:50:07 | Python | UTF-8 | Python | false | false | 595 | py | from django.urls import path
# from .views import CustomLoginView, UserCreateView, UserListView, CustomLogoutView
from .views import CustomLoginView, CustomLogoutView, ProfileView
from django.contrib.auth.decorators import login_required
app_name = 'users'
urlpatterns = [
# path('', UserListView.as_view(), name='list'),
path('login/', CustomLoginView.as_view(), name='login'),
path('logout/', CustomLogoutView.as_view(), name='logout'),
path('profile/', login_required(ProfileView.as_view()), name='profile')
# path('create/', UserCreateView.as_view(), name='create'),
]
| [
"[email protected]"
] | |
27282a88578e3530b456399cac4b11018cde5044 | a8e2c66b3ebadfc17ee9aee197b3f466534cee16 | /ytn11/wh/wh/items.py | 0c48330df2942d96bbb37a839696a43850e30629 | [] | no_license | yintiannong/98kar | 49b6db186a4543a7c50671df990bb491846c1a98 | 3863529f57e9d2d9bc1bdf8188916e25ad289db0 | refs/heads/master | 2022-01-07T05:49:31.566453 | 2019-05-22T07:04:45 | 2019-05-22T07:04:45 | 187,794,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class WhItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
company = scrapy.Field()
company_id = scrapy.Field()
postion_id = scrapy.Field()
company_type = scrapy.Field()
company_size = scrapy.Field()
url = scrapy.Field()
postion = scrapy.Field()
salary = scrapy.Field()
education=scrapy.Field()
address = scrapy.Field()
exe = scrapy.Field()
job_type = scrapy.Field()
update_time = scrapy.Field()
data_from = scrapy.Field()
desc_job = scrapy.Field()
salary2 = scrapy.Field()
conpany_address = scrapy.Field()
phone_num = scrapy.Field()
hr_name = scrapy.Field()
| [
"[email protected]"
] | |
e8ae67da9e630730ae1df9ffca7fa2d4296f1d26 | 24dac117231c9ca39e09e1fd27db8de295a7fe45 | /Trident/settings.py | 2c943c27e79ee0020f4fe655aed1df9d616f3972 | [] | no_license | rohitrajput-42/Trident | 784f23b9fa02d405d55715ded627c274a1c887f2 | 0d75ef954c5d6f88d3b4937e90ab9aace120bdb9 | refs/heads/main | 2023-06-13T06:10:19.172276 | 2021-07-10T16:25:22 | 2021-07-10T16:25:22 | 384,705,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,092 | py | import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9ew388dr7b@prao9gium)@&@r0ma0dze5%-1fg!1jiwe)@hcpg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
'product',
'accounts',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Trident.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Trident.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
MEDIA_URL = "image/download/"
MEDIA_ROOT = BASE_DIR
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
CRISPY_TEMPLATE_PACK = 'bootstrap4' | [
"[email protected]"
] | |
311b64e499752b8f19be4d85c59a5b14455ada39 | a1a57977131ea917a3f3094dae4a3d18846103c0 | /unittests/pytests/problems/TestTimeStepUser.py | c53c39a3337525c4aa9aa5702ae8367062012113 | [
"MIT"
] | permissive | rwalkerlewis/pylith | cef02d5543e99a3e778a1c530967e6b5f1d5dcba | 8d0170324d3fcdc5e6c4281759c680faa5dd8d38 | refs/heads/master | 2023-08-24T18:27:30.877550 | 2020-08-05T16:37:28 | 2020-08-05T16:37:28 | 154,047,591 | 0 | 0 | MIT | 2018-10-21T20:05:59 | 2018-10-21T20:05:59 | null | UTF-8 | Python | false | false | 4,095 | py | #!/usr/bin/env python
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
## @file unittests/pytests/problems/TestTimeStepUser.py
## @brief Unit testing of TimeStepUser object.
import unittest
from pylith.problems.TimeStepUser import TimeStepUser
from pyre.units.time import second,year
stepsE = [2*1.0, 2*2.0, 2*3.0]
# ----------------------------------------------------------------------
class Integrator:
def __init__(self, dt):
self.dt = dt
def stableTimeStep(self, mesh):
return self.dt
# ----------------------------------------------------------------------
class TestTimeStepUser(unittest.TestCase):
"""
Unit testing of TimeStepUser object.
"""
def setUp(self):
from spatialdata.units.Nondimensional import Nondimensional
normalizer = Nondimensional()
normalizer._configure()
normalizer.setTimeScale(0.5*year)
tstep = TimeStepUser()
tstep._configure()
tstep.filename = "data/timesteps.txt"
tstep.preinitialize()
tstep.initialize(normalizer)
self.tstep = tstep
return
def test_constructor(self):
"""
Test constructor.
"""
tstep = TimeStepUser()
tstep._configure()
return
def test_initialize(self):
"""
Test initialize().
"""
tstep = self.tstep
for stepE, step in zip(stepsE, tstep.steps):
self.assertEqual(stepE, step)
return
def test_numTimeSteps(self):
"""
Test numTimeSteps().
"""
tstep = self.tstep
self.assertEqual(1, tstep.numTimeSteps())
tstep.totalTimeN = 12.0 / 0.5 # nondimensionalize
self.assertEqual(6, tstep.numTimeSteps())
tstep.loopSteps = True
tstep.totalTimeN = 7.0 / 0.5 # nondimensionalize
self.assertEqual(5, tstep.numTimeSteps())
return
def test_timeStep(self):
"""
Test timeStep().
"""
tstep = self.tstep
step1 = 1.0 / 0.5 # nondimensionalize
step2 = 2.0 / 0.5 # nondimensionalize
step3 = 3.0 / 0.5 # nondimensionalize
integrators = [Integrator(40.0),
Integrator(80.0)]
from pylith.topology.Mesh import Mesh
mesh = Mesh()
self.assertEqual(step1, tstep.timeStep(mesh, integrators))
self.assertEqual(step2, tstep.timeStep(mesh, integrators))
self.assertEqual(step3, tstep.timeStep(mesh, integrators))
self.assertEqual(step3, tstep.timeStep(mesh, integrators))
self.assertEqual(step3, tstep.timeStep(mesh, integrators))
tstep.index = 0
tstep.loopSteps = True
self.assertEqual(step1, tstep.timeStep(mesh, integrators))
self.assertEqual(step2, tstep.timeStep(mesh, integrators))
self.assertEqual(step3, tstep.timeStep(mesh, integrators))
self.assertEqual(step1, tstep.timeStep(mesh, integrators))
self.assertEqual(step2, tstep.timeStep(mesh, integrators))
integrators = [Integrator(0.01),
Integrator(8.0)]
caught = False
try:
tstep.timeStep(mesh, integrators)
except RuntimeError:
caught = True
self.failUnless(caught)
return
def test_currentStep(self):
"""
Test currentStep().
"""
tstep = self.tstep
integrators = [Integrator(4.0),
Integrator(8.0)]
from pylith.topology.Mesh import Mesh
from pylith.mpi.Communicator import petsc_comm_world
mesh = Mesh()
#mesh.setComm(petsc_comm_world())
tstep.timeStep(mesh, integrators)
stepE = 1.0 / 0.5 # Nondimensionalize
self.assertEqual(stepE, tstep.currentStep())
return
def test_factory(self):
"""
Test factory method.
"""
from pylith.problems.TimeStepUser import time_step
ts = time_step()
return
# End of file
| [
"[email protected]"
] | |
05aa5d78f1a77c1849dde9dff4856a79eddc89a7 | c1c87cd334972c01935dbb72769064e5d0066ac8 | /pickpack/robots/scratchpad.py | 2d469bd1d6d84f59c8bd0ec2db7949dd53ec5962 | [] | no_license | defgsus/pypickpack | 576e9471c9cc7cce60c1010d51b4ea85ec00ecfc | 8a604ec1502c615bf24d77f09d564962c3d04930 | refs/heads/master | 2022-12-28T13:17:18.306748 | 2020-10-09T00:50:41 | 2020-10-09T00:50:41 | 269,505,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,253 | py | import random
from .base import RobotBase
from .._2d import direction_int
from ..astar import astar_search
from ..log import log
from ..static_map import StaticMap
from ..items import Article, PickOrder
class RandomRobot(RobotBase):
def __init__(self, id):
super().__init__(id)
def process(self, world, time_delta):
if self.is_next_move_frame(world):
if random.randrange(10) == 0:
self.dir_x, self.dir_y = random.choice(((-1, 0), (1, 0), (0, -1), (0, 1)))
if not world.agent_move(self, self.direction):
self.dir_x, self.dir_y = random.choice(((-1, 0), (1, 0), (0, -1), (0, 1)))
class RobotFollowPlayer(RobotBase):
def __init__(self, id):
super().__init__(id)
def process(self, world, time_delta):
if self.is_next_move_frame(world):
way_to_player = astar_search(
self.position, world.player.position,
lambda pos: self.get_adjacent_nodes(world, pos, exclude_agents={self})
)
self.debug_way = None
if way_to_player:
next_pos = way_to_player[1]
dirx, diry = direction_int(self.position, next_pos)
if dirx or diry:
world.agent_move(self, (dirx, diry))
self.debug_way = way_to_player
class Robot(RobotBase):
def __init__(self, id):
super().__init__(id)
self.performance = 0
def copy(self):
c = super().copy()
c.performance = self.performance
return c
def on_has_put(self, item, position=None, other_agent=None):
from ..agents import Package
from ..items import Article
if isinstance(item, Article):
if isinstance(other_agent, Package):
self.performance += 1
def process(self, world, time_delta):
possible_actions = self.get_possible_actions(world)
evaluated_actions = self.evaluate_actions(world, possible_actions)
#possible_actions.sort(key=lambda action: action.get_estimated_cost(world, self))
#log(possible_actions)
#log(evaluated_actions)
if evaluated_actions:
log(evaluated_actions[0])
action = evaluated_actions[0]["action"]
#action = random.choice(possible_actions)
action.execute(world, self)
def get_possible_actions(self, world):
from ..actions import MoveTo, MoveBefore, PickDirection, PutDirection
from ..agents import Player, Package, Shelf, Computer
from ..items import Article, PickOrder
classes_to_approach = (Computer, PickOrder, Player, Robot, Package, Shelf, Article)
possible_actions = [
# MoveBefore(world.player.position),
PickDirection((-1, 0)),
PickDirection((1, 0)),
PickDirection((0, -1)),
PickDirection((0, 1)),
]
for item in self.items:
possible_actions += [
PutDirection((-1, 0), item.id),
PutDirection((1, 0), item.id),
PutDirection((0, -1), item.id),
PutDirection((0, 1), item.id),
]
for klass in classes_to_approach:
agent = world.get_closest_agent(self.position, klass, exclude_agents=[self])
if agent:
possible_actions.append(MoveBefore(agent.position))
return possible_actions
def evaluate_actions(self, world, actions):
ret_actions = []
for action in actions:
value = self._evaluate_action(world, action, depth=1)
if value is not None:
ret_actions.append({
"action": action,
"value": value,
})
ret_actions.sort(key=lambda a: -a["value"])
return ret_actions
def _evaluate_action(self, world, action, depth):
action = action.copy()
world_copy = world.copy()
self_copy = world_copy.agents.get_by_id(self.id)
action_passed = False
for i in range(100):
if not action.execute(world_copy, self_copy):
break
if action.is_finished(world_copy, self_copy):
action_passed = True
break
if not action_passed:
return
cur_value = self_copy.get_heuristic_value(world_copy)
if depth < 1:
return cur_value
best_action, best_value = None, None
new_actions = self_copy.get_possible_actions(world_copy)
for new_action in new_actions:
value = self._evaluate_action(world_copy, new_action, depth - 1)
if value is not None:
if best_value is None or value > best_value:
best_action, best_value = new_action, value
return max(best_value, cur_value) if best_value is not None else cur_value
def get_heuristic_value(self, world):
value = 0
value += min(0, self.max_items - len(self.items) * 4)
value += len(self.items_by_class(Article)) * 2
value += len(self.items_by_class(PickOrder)) * 3
value += self.performance * 5
return value
| [
"[email protected]"
] | |
b0ebf56863454ffb4571867555552aad6d06569d | 6527b66fd08d9e7f833973adf421faccd8b765f5 | /yuancloud/addons/hw_proxy/controllers/main.py | 1a934348be4f3f21a928d20583d78d39b10c4c17 | [] | no_license | cash2one/yuancloud | 9a41933514e57167afb70cb5daba7f352673fb4d | 5a4fd72991c846d5cb7c5082f6bdfef5b2bca572 | refs/heads/master | 2021-06-19T22:11:08.260079 | 2017-06-29T06:26:15 | 2017-06-29T06:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,759 | py | # -*- coding: utf-8 -*-
import logging
import commands
import json
import os
import os.path
import yuancloud
import time
import random
import subprocess
import json
import werkzeug
import werkzeug.wrappers
_logger = logging.getLogger(__name__)
from yuancloud import http
from yuancloud.http import request
# Those are the builtin raspberry pi USB modules, they should
# not appear in the list of connected devices.
BANNED_DEVICES = set([
"0424:9514", # Standard Microsystem Corp. Builtin Ethernet module
"1d6b:0002", # Linux Foundation 2.0 root hub
"0424:ec00", # Standard Microsystem Corp. Other Builtin Ethernet module
])
# drivers modules must add to drivers an object with a get_status() method
# so that 'status' can return the status of all active drivers
drivers = {}
class Proxy(http.Controller):
def get_status(self):
statuses = {}
for driver in drivers:
statuses[driver] = drivers[driver].get_status()
return statuses
@http.route('/hw_proxy/hello', type='http', auth='none', cors='*')
def hello(self):
return "ping"
@http.route('/hw_proxy/handshake', type='json', auth='none', cors='*')
def handshake(self):
return True
@http.route('/hw_proxy/status', type='http', auth='none', cors='*')
def status_http(self):
resp = """
<!DOCTYPE HTML>
<html>
<head>
<title>YuanCloud's PosBox</title>
<style>
body {
width: 480px;
margin: 60px auto;
font-family: sans-serif;
text-align: justify;
color: #6B6B6B;
}
.device {
border-bottom: solid 1px rgb(216,216,216);
padding: 9px;
}
.device:nth-child(2n) {
background:rgb(240,240,240);
}
</style>
</head>
<body>
<h1>Hardware Status</h1>
<p>The list of enabled drivers and their status</p>
"""
statuses = self.get_status()
for driver in statuses:
status = statuses[driver]
if status['status'] == 'connecting':
color = 'black'
elif status['status'] == 'connected':
color = 'green'
else:
color = 'red'
resp += "<h3 style='color:"+color+";'>"+driver+' : '+status['status']+"</h3>\n"
resp += "<ul>\n"
for msg in status['messages']:
resp += '<li>'+msg+'</li>\n'
resp += "</ul>\n"
resp += """
<h2>Connected Devices</h2>
<p>The list of connected USB devices as seen by the posbox</p>
"""
devices = commands.getoutput("lsusb").split('\n')
count = 0
resp += "<div class='devices'>\n"
for device in devices:
device_name = device[device.find('ID')+2:]
device_id = device_name.split()[0]
if not (device_id in BANNED_DEVICES):
resp+= "<div class='device' data-device='"+device+"'>"+device_name+"</div>\n"
count += 1
if count == 0:
resp += "<div class='device'>No USB Device Found</div>"
resp += "</div>\n</body>\n</html>\n\n"
return request.make_response(resp,{
'Cache-Control': 'no-cache',
'Content-Type': 'text/html; charset=utf-8',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
})
@http.route('/hw_proxy/status_json', type='json', auth='none', cors='*')
def status_json(self):
return self.get_status()
@http.route('/hw_proxy/scan_item_success', type='json', auth='none', cors='*')
def scan_item_success(self, ean):
"""
A product has been scanned with success
"""
print 'scan_item_success: ' + str(ean)
@http.route('/hw_proxy/scan_item_error_unrecognized', type='json', auth='none', cors='*')
def scan_item_error_unrecognized(self, ean):
"""
A product has been scanned without success
"""
print 'scan_item_error_unrecognized: ' + str(ean)
@http.route('/hw_proxy/help_needed', type='json', auth='none', cors='*')
def help_needed(self):
"""
The user wants an help (ex: light is on)
"""
print "help_needed"
@http.route('/hw_proxy/help_canceled', type='json', auth='none', cors='*')
def help_canceled(self):
"""
The user stops the help request
"""
print "help_canceled"
@http.route('/hw_proxy/payment_request', type='json', auth='none', cors='*')
def payment_request(self, price):
"""
The PoS will activate the method payment
"""
print "payment_request: price:"+str(price)
return 'ok'
@http.route('/hw_proxy/payment_status', type='json', auth='none', cors='*')
def payment_status(self):
print "payment_status"
return { 'status':'waiting' }
@http.route('/hw_proxy/payment_cancel', type='json', auth='none', cors='*')
def payment_cancel(self):
print "payment_cancel"
@http.route('/hw_proxy/transaction_start', type='json', auth='none', cors='*')
def transaction_start(self):
print 'transaction_start'
@http.route('/hw_proxy/transaction_end', type='json', auth='none', cors='*')
def transaction_end(self):
print 'transaction_end'
@http.route('/hw_proxy/cashier_mode_activated', type='json', auth='none', cors='*')
def cashier_mode_activated(self):
print 'cashier_mode_activated'
@http.route('/hw_proxy/cashier_mode_deactivated', type='json', auth='none', cors='*')
def cashier_mode_deactivated(self):
print 'cashier_mode_deactivated'
@http.route('/hw_proxy/open_cashbox', type='json', auth='none', cors='*')
def open_cashbox(self):
print 'open_cashbox'
@http.route('/hw_proxy/print_receipt', type='json', auth='none', cors='*')
def print_receipt(self, receipt):
print 'print_receipt' + str(receipt)
@http.route('/hw_proxy/is_scanner_connected', type='json', auth='none', cors='*')
def is_scanner_connected(self, receipt):
print 'is_scanner_connected?'
return False
@http.route('/hw_proxy/scanner', type='json', auth='none', cors='*')
def scanner(self, receipt):
print 'scanner'
time.sleep(10)
return ''
@http.route('/hw_proxy/log', type='json', auth='none', cors='*')
def log(self, arguments):
_logger.info(' '.join(str(v) for v in arguments))
@http.route('/hw_proxy/print_pdf_invoice', type='json', auth='none', cors='*')
def print_pdf_invoice(self, pdfinvoice):
print 'print_pdf_invoice' + str(pdfinvoice)
| [
"[email protected]"
] | |
f7b3b2faa86e0a9a1ac895411d5a0ba761b172ea | 9907134b0da8e5391c51b00c426c648eece7b4b9 | /Unidad 2/pfijo.py | a7a60477a2df6797926949881bacf7f7f695a593 | [] | no_license | hectorrdz98/metodos-numericos | 1fd21593c8c324f0e0e643cc08a8d930ea2e8cf3 | dab8e9425f454be60a74d30c985a643bcb915ce6 | refs/heads/master | 2022-01-22T07:26:48.566615 | 2019-05-29T12:26:43 | 2019-05-29T12:26:43 | 167,975,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import math
p0 = 3.8
n = 3
tol = 0.0001
def g(p):
return -4 + (4*p) - (0.5 * p * p)
flag = False
for i in range(n):
p = g(p0)
print('\nVamos en {}, con g(p0)={} y p0={}'.format(i+1,g(p0),p0))
print('El abs={}'.format(math.fabs(p-p0)))
if math.fabs(p-p0) <= tol:
print('\nEl valor de p0={} ya se encuentra dentro de la tol de {} con {} ite'.format(p0,tol,i+1))
flag = True
break
p0 = p
if not flag:
print('\nSe realizaron las {} iteraciones, pero no se llegó a la tol de {}'.format(n,tol))
print('Se llegó a p0={}'.format(p0)) | [
"="
] | = |
32fe115b47214dd5d925bc1419747dfcf52e0871 | 150d9e4cee92be00251625b7f9ff231cc8306e9f | /NextGreaterElement.py | eba1f8d0ae08308ff8e272cffeec6304822d027f | [] | no_license | JerinPaulS/Python-Programs | 0d3724ce277794be597104d9e8f8becb67282cb0 | d0778178d89d39a93ddb9b95ca18706554eb7655 | refs/heads/master | 2022-05-12T02:18:12.599648 | 2022-04-20T18:02:15 | 2022-04-20T18:02:15 | 216,547,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,114 | py | '''
496. Next Greater Element I
The next greater element of some element x in an array is the first greater element that is to the right of x in the same array.
You are given two distinct 0-indexed integer arrays nums1 and nums2, where nums1 is a subset of nums2.
For each 0 <= i < nums1.length, find the index j such that nums1[i] == nums2[j] and determine the next greater element of nums2[j] in nums2. If there is no next greater element, then the answer for this query is -1.
Return an array ans of length nums1.length such that ans[i] is the next greater element as described above.
Example 1:
Input: nums1 = [4,1,2], nums2 = [1,3,4,2]
Output: [-1,3,-1]
Explanation: The next greater element for each value of nums1 is as follows:
- 4 is underlined in nums2 = [1,3,4,2]. There is no next greater element, so the answer is -1.
- 1 is underlined in nums2 = [1,3,4,2]. The next greater element is 3.
- 2 is underlined in nums2 = [1,3,4,2]. There is no next greater element, so the answer is -1.
Example 2:
Input: nums1 = [2,4], nums2 = [1,2,3,4]
Output: [3,-1]
Explanation: The next greater element for each value of nums1 is as follows:
- 2 is underlined in nums2 = [1,2,3,4]. The next greater element is 3.
- 4 is underlined in nums2 = [1,2,3,4]. There is no next greater element, so the answer is -1.
Constraints:
1 <= nums1.length <= nums2.length <= 1000
0 <= nums1[i], nums2[i] <= 104
All integers in nums1 and nums2 are unique.
All the integers of nums1 also appear in nums2.
Follow up: Could you find an O(nums1.length + nums2.length) solution?
'''
class Solution(object):
def nextGreaterElement(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
next_great = {}
stack = []
result = []
for val in nums2:
while len(stack) > 0 and stack[len(stack) - 1] < val:
next_great[stack.pop()] = val
stack.append(val)
print next_great
for val in nums1:
if next_great.has_key(val):
result.append(next_great[val])
else:
result.append(-1)
return result
obj = Solution()
print(obj.nextGreaterElement([4,1,2],[1,3,4,8,7,6,5,10,2]))
print(obj.nextGreaterElement([137,59,92,122,52,131,79,236,94,171,141,86,169,199,248,120,196,168,77,71,5,198,215,230,176,87,189,206,115,76,13,216,197,26,183,54,250,27,109,140,147,25,96,105,30,207,241,8,217,40,0,35,221,191,83,132,9,144,12,91,175,65,170,149,174,82,102,167,62,70,44,143,10,153,160,142,188,81,146,212,15,162,103,163,123,48,245,116,192,14,211,126,63,180,88,155,224,148,134,158,119,165,130,112,166,93,125,1,11,208,150,100,106,194,124,2,184,75,113,104,18,210,202,111,84,223,173,238,41,33,154,47,244,232,249,60,164,227,253,56,157,99,179,6,203,110,127,152,252,55,185,73,67,219,22,156,118,234,37,193,90,187,181,23,220,72,255,58,204,7,107,239,42,139,159,95,45,242,145,172,209,121,24,21,218,246,49,46,243,178,64,161,117,20,214,17,114,69,182,85,229,32,129,29,226,136,39,36,233,43,240,254,57,251,78,51,195,98,205,108,61,66,16,213,19,68,237,190,3,200,133,80,177,97,74,138,38,235,135,186,89,201,4,101,151,31,228,231,34,225,28,222,128,53,50,247],
[137,59,92,122,52,131,79,236,94,171,141,86,169,199,248,120,196,168,77,71,5,198,215,230,176,87,189,206,115,76,13,216,197,26,183,54,250,27,109,140,147,25,96,105,30,207,241,8,217,40,0,35,221,191,83,132,9,144,12,91,175,65,170,149,174,82,102,167,62,70,44,143,10,153,160,142,188,81,146,212,15,162,103,163,123,48,245,116,192,14,211,126,63,180,88,155,224,148,134,158,119,165,130,112,166,93,125,1,11,208,150,100,106,194,124,2,184,75,113,104,18,210,202,111,84,223,173,238,41,33,154,47,244,232,249,60,164,227,253,56,157,99,179,6,203,110,127,152,252,55,185,73,67,219,22,156,118,234,37,193,90,187,181,23,220,72,255,58,204,7,107,239,42,139,159,95,45,242,145,172,209,121,24,21,218,246,49,46,243,178,64,161,117,20,214,17,114,69,182,85,229,32,129,29,226,136,39,36,233,43,240,254,57,251,78,51,195,98,205,108,61,66,16,213,19,68,237,190,3,200,133,80,177,97,74,138,38,235,135,186,89,201,4,101,151,31,228,231,34,225,28,222,128,53,50,247])) | [
"[email protected]"
] | |
7274d5d6acd06026bb0e3945cca73daf74e06bf3 | 4e163aa4aa0f4c4ddc22f74ae21b6fb1c85a7a09 | /134.加油站.py | 2f5d07a462e88ceb77dbdc591efd179e9402385b | [] | no_license | dxc19951001/Everyday_LeetCode | 72f46a0ec2fc651168129720ad0b1e7b5c372b0b | 3f7b2ea959308eb80f4c65be35aaeed666570f80 | refs/heads/master | 2023-08-03T09:22:08.467100 | 2023-07-23T17:08:27 | 2023-07-23T17:08:27 | 270,723,436 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | # coding=utf-8
"""
@project: Everyday_LeetCode
@Author:Charles
@file: 134.加油站.py
@date:2023/1/10 15:35
"""
class Solution(object):
def canCompleteCircuit(self, gas, cost):
"""
:type gas: List[int]
:type cost: List[int]
:rtype: int
"""
# 贪心算法
# 1.如果sum(gas) < sum(cost),说明加油满足不了消耗,所以无法跑完一圈
# 2.为了跑完一圈,则再前往一个加油站时要有油
# 所以cur_sum += gas[i] - cost[i],必须一直为正数
# 若出现负数则表示无法跑到下一个加油站
# 题目说明有唯一的解,所以当cur_sum一直大于0的起始点,就为出发点
if sum(gas) < sum(cost):
return -1
start = 0
cur_sum = 0
for i in range(len(gas)):
print(gas[i] - cost[i])
cur_sum += gas[i] - cost[i]
print("cur sun", cur_sum)
if cur_sum < 0:
cur_sum = 0
start = i + 1
return start | [
"[email protected]"
] | |
e1dcd2a11d7423ba518efc1697c3a148293ffa2a | 5456502f97627278cbd6e16d002d50f1de3da7bb | /components/google/core/browser/DEPS | 26e9743a04d2db628f4a7357a7d73e4ad5cf843a | [
"BSD-3-Clause"
] | permissive | TrellixVulnTeam/Chromium_7C66 | 72d108a413909eb3bd36c73a6c2f98de1573b6e5 | c8649ab2a0f5a747369ed50351209a42f59672ee | refs/heads/master | 2023-03-16T12:51:40.231959 | 2017-12-20T10:38:26 | 2017-12-20T10:38:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | include_rules = [
"+components/data_use_measurement/core",
"+components/keyed_service/core",
"+components/pref_registry",
]
| [
"[email protected]"
] | ||
bfe6d113c6248860931cd8d1870126fdd8a59693 | 2194b6c17f3153c5976d6ac4a9ab78211027adab | /otoroshi_admin_api_client/models/otoroshimodels_rs_algo_settings.py | d3bb0d40170f95f52921aa7d6749dcfb1d4114f7 | [] | no_license | krezreb/otoroshi-admin-api-client | 7fab5e873c9c5950d77fffce6bcf80d3fdf4c319 | 9b3156c11eac227024cfe4a26c0129618deb2c4d | refs/heads/master | 2023-05-08T08:32:00.982987 | 2021-05-27T09:55:00 | 2021-05-27T09:55:00 | 371,324,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,732 | py | from typing import Any, Dict, List, Type, TypeVar, Union, cast
import attr
from ..models.null import Null
from ..models.otoroshimodels_rs_algo_settings_type import OtoroshimodelsRSAlgoSettingsType
from ..types import UNSET, Unset
T = TypeVar("T", bound="OtoroshimodelsRSAlgoSettings")
@attr.s(auto_attribs=True)
class OtoroshimodelsRSAlgoSettings:
"""Settings to use RSA signing algorithm"""
private_key: Union[Null, Unset, str] = UNSET
size: Union[Unset, int] = UNSET
public_key: Union[Unset, str] = UNSET
type: Union[Unset, OtoroshimodelsRSAlgoSettingsType] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
private_key: Union[Dict[str, Any], Unset, str]
if isinstance(self.private_key, Unset):
private_key = UNSET
elif isinstance(self.private_key, Null):
private_key = UNSET
if not isinstance(self.private_key, Unset):
private_key = self.private_key.to_dict()
else:
private_key = self.private_key
size = self.size
public_key = self.public_key
type: Union[Unset, str] = UNSET
if not isinstance(self.type, Unset):
type = self.type.value
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if private_key is not UNSET:
field_dict["privateKey"] = private_key
if size is not UNSET:
field_dict["size"] = size
if public_key is not UNSET:
field_dict["publicKey"] = public_key
if type is not UNSET:
field_dict["type"] = type
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
def _parse_private_key(data: object) -> Union[Null, Unset, str]:
if isinstance(data, Unset):
return data
try:
if not isinstance(data, dict):
raise TypeError()
_private_key_type_0 = data
private_key_type_0: Union[Unset, Null]
if isinstance(_private_key_type_0, Unset):
private_key_type_0 = UNSET
else:
private_key_type_0 = Null.from_dict(_private_key_type_0)
return private_key_type_0
except: # noqa: E722
pass
return cast(Union[Null, Unset, str], data)
private_key = _parse_private_key(d.pop("privateKey", UNSET))
size = d.pop("size", UNSET)
public_key = d.pop("publicKey", UNSET)
_type = d.pop("type", UNSET)
type: Union[Unset, OtoroshimodelsRSAlgoSettingsType]
if isinstance(_type, Unset):
type = UNSET
else:
type = OtoroshimodelsRSAlgoSettingsType(_type)
otoroshimodels_rs_algo_settings = cls(
private_key=private_key,
size=size,
public_key=public_key,
type=type,
)
otoroshimodels_rs_algo_settings.additional_properties = d
return otoroshimodels_rs_algo_settings
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| [
"[email protected]"
] | |
f38e52cda7f5a3f771a65f7eeb92d6375981bb4a | f25440c9f9fd470ba44394a36d5659dd47ee8800 | /tests/conftest.py | 6ee0b688b6b226162706d75c8e1acd7eadcb3541 | [] | no_license | kqf/hubmap | 75010d9109f8b8656e244179de5de226be584d5b | 37b3d839f0ad3f47dc39c1b9b036cb1acc27ca2c | refs/heads/master | 2023-02-20T04:06:00.145932 | 2021-01-23T07:56:13 | 2021-01-23T07:56:13 | 317,635,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | import pytest
import tempfile
from pathlib import Path
from models.preprocess import write
from models.mc import make_blob, blob2image
@pytest.fixture
def size():
return 256
@pytest.fixture
def fake_dataset(size=256, nfiles=5):
with tempfile.TemporaryDirectory() as dirname:
path = Path(dirname)
for i in range(nfiles):
mask = make_blob(size, size)
write(mask, path / str(i) / "mask.png")
tile = blob2image(mask)
write(tile, path / str(i) / "tile.png")
yield path
| [
"[email protected]"
] | |
5fd5f69280f7e2c8dfa60b2c1d5a770471cc61ab | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2520/60790/274856.py | 18e4e0a2884251d28a2c4c3bc79d6f4d2f5ba4c8 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | R=int(input())
C=int(input())
r0=int(input())
c0=int(input())
print(sorted([[i, j] for i in range(R) for j in range(C)], key=lambda x: abs(x[0] - r0) + abs(x[1] - c0))) | [
"[email protected]"
] | |
b903d4dafdaad69917379130429923b552115ff8 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-workbench-ide/aliyunsdkworkbench_ide/request/v20210121/AddEnvironmentRequest.py | 0d20b6717b9971baad3c4aba3f7c1bdd0b316b36 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,700 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class AddEnvironmentRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Workbench-ide', '2021-01-21', 'AddEnvironment')
self.set_method('POST')
def get_ProductId(self): # Long
return self.get_query_params().get('ProductId')
def set_ProductId(self, ProductId): # Long
self.add_query_param('ProductId', ProductId)
def get_EnvName(self): # String
return self.get_query_params().get('EnvName')
def set_EnvName(self, EnvName): # String
self.add_query_param('EnvName', EnvName)
def get_CurrentOrgId(self): # String
return self.get_query_params().get('CurrentOrgId')
def set_CurrentOrgId(self, CurrentOrgId): # String
self.add_query_param('CurrentOrgId', CurrentOrgId)
def get_SecurityGroupId(self): # String
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupId(self, SecurityGroupId): # String
self.add_query_param('SecurityGroupId', SecurityGroupId)
def get_EnvDescription(self): # String
return self.get_query_params().get('EnvDescription')
def set_EnvDescription(self, EnvDescription): # String
self.add_query_param('EnvDescription', EnvDescription)
def get_SupportComputeTypess(self): # RepeatList
return self.get_query_params().get('SupportComputeTypes')
def set_SupportComputeTypess(self, SupportComputeTypes): # RepeatList
for depth1 in range(len(SupportComputeTypes)):
self.add_query_param('SupportComputeTypes.' + str(depth1 + 1), SupportComputeTypes[depth1])
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_IsOpenNatEip(self): # Boolean
return self.get_query_params().get('IsOpenNatEip')
def set_IsOpenNatEip(self, IsOpenNatEip): # Boolean
self.add_query_param('IsOpenNatEip', IsOpenNatEip)
| [
"[email protected]"
] | |
e79dca9531ee613ea930b7be4c7871b1eac88c18 | d608c2b9fbfcd142fa82875f01f70e1db95cecef | /FlaskAppVenv/Lib/site-packages/pymysql/tests/test_connection.py | c626a0d39468fc0249dbdd719881a28872564b48 | [
"MIT"
] | permissive | nidheekamble/SponsCentral | 9b30918006b98f242de86920a550f8e072ba093f | b8189993cb87cc2d83e36c9d72df7a3b7d620bd7 | refs/heads/master | 2022-12-21T11:14:36.565494 | 2021-01-31T16:15:33 | 2021-01-31T16:15:33 | 135,418,522 | 1 | 2 | MIT | 2022-12-08T07:57:59 | 2018-05-30T09:16:30 | Python | UTF-8 | Python | false | false | 24,709 | py | import datetime
import sys
import time
import unittest2
import pymysql
from pymysql.tests import base
from pymysql._compat import text_type
from pymysql.constants import CLIENT
class TempUser:
def __init__(self, c, user, db, auth=None, authdata=None, password=None):
self._c = c
self._user = user
self._db = db
create = "CREATE USER " + user
if password is not None:
create += " IDENTIFIED BY '%s'" % password
elif auth is not None:
create += " IDENTIFIED WITH %s" % auth
if authdata is not None:
create += " AS '%s'" % authdata
try:
c.execute(create)
self._created = True
except pymysql.err.InternalError:
# already exists - TODO need to check the same plugin applies
self._created = False
try:
c.execute("GRANT SELECT ON %s.* TO %s" % (db, user))
self._grant = True
except pymysql.err.InternalError:
self._grant = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._grant:
self._c.execute("REVOKE SELECT ON %s.* FROM %s" % (self._db, self._user))
if self._created:
self._c.execute("DROP USER %s" % self._user)
class TestAuthentication(base.PyMySQLTestCase):
socket_auth = False
socket_found = False
two_questions_found = False
three_attempts_found = False
pam_found = False
mysql_old_password_found = False
sha256_password_found = False
import os
osuser = os.environ.get('USER')
# socket auth requires the current user and for the connection to be a socket
# rest do grants @localhost due to incomplete logic - TODO change to @% then
db = base.PyMySQLTestCase.databases[0].copy()
socket_auth = db.get('unix_socket') is not None \
and db.get('host') in ('localhost', '127.0.0.1')
cur = pymysql.connect(**db).cursor()
del db['user']
cur.execute("SHOW PLUGINS")
for r in cur:
if (r[1], r[2]) != (u'ACTIVE', u'AUTHENTICATION'):
continue
if r[3] == u'auth_socket.so':
socket_plugin_name = r[0]
socket_found = True
elif r[3] == u'dialog_examples.so':
if r[0] == 'two_questions':
two_questions_found = True
elif r[0] == 'three_attempts':
three_attempts_found = True
elif r[0] == u'pam':
pam_found = True
pam_plugin_name = r[3].split('.')[0]
if pam_plugin_name == 'auth_pam':
pam_plugin_name = 'pam'
# MySQL: authentication_pam
# https://dev.mysql.com/doc/refman/5.5/en/pam-authentication-plugin.html
# MariaDB: pam
# https://mariadb.com/kb/en/mariadb/pam-authentication-plugin/
# Names differ but functionality is close
elif r[0] == u'mysql_old_password':
mysql_old_password_found = True
elif r[0] == u'sha256_password':
sha256_password_found = True
#else:
# print("plugin: %r" % r[0])
def test_plugin(self):
if not self.mysql_server_is(self.connections[0], (5, 5, 0)):
raise unittest2.SkipTest("MySQL-5.5 required for plugins")
cur = self.connections[0].cursor()
cur.execute("select plugin from mysql.user where concat(user, '@', host)=current_user()")
for r in cur:
self.assertIn(self.connections[0]._auth_plugin_name, (r[0], 'mysql_native_password'))
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipIf(socket_found, "socket plugin already installed")
def testSocketAuthInstallPlugin(self):
# needs plugin. lets install it.
cur = self.connections[0].cursor()
try:
cur.execute("install plugin auth_socket soname 'auth_socket.so'")
TestAuthentication.socket_found = True
self.socket_plugin_name = 'auth_socket'
self.realtestSocketAuth()
except pymysql.err.InternalError:
try:
cur.execute("install soname 'auth_socket'")
TestAuthentication.socket_found = True
self.socket_plugin_name = 'unix_socket'
self.realtestSocketAuth()
except pymysql.err.InternalError:
TestAuthentication.socket_found = False
raise unittest2.SkipTest('we couldn\'t install the socket plugin')
finally:
if TestAuthentication.socket_found:
cur.execute("uninstall plugin %s" % self.socket_plugin_name)
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(socket_found, "no socket plugin")
def testSocketAuth(self):
self.realtestSocketAuth()
def realtestSocketAuth(self):
with TempUser(self.connections[0].cursor(), TestAuthentication.osuser + '@localhost',
self.databases[0]['db'], self.socket_plugin_name) as u:
c = pymysql.connect(user=TestAuthentication.osuser, **self.db)
class Dialog(object):
fail=False
def __init__(self, con):
self.fail=TestAuthentication.Dialog.fail
pass
def prompt(self, echo, prompt):
if self.fail:
self.fail=False
return b'bad guess at a password'
return self.m.get(prompt)
class DialogHandler(object):
def __init__(self, con):
self.con=con
def authenticate(self, pkt):
while True:
flag = pkt.read_uint8()
echo = (flag & 0x06) == 0x02
last = (flag & 0x01) == 0x01
prompt = pkt.read_all()
if prompt == b'Password, please:':
self.con.write_packet(b'stillnotverysecret\0')
else:
self.con.write_packet(b'no idea what to do with this prompt\0')
pkt = self.con._read_packet()
pkt.check_error()
if pkt.is_ok_packet() or last:
break
return pkt
class DefectiveHandler(object):
def __init__(self, con):
self.con=con
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipIf(two_questions_found, "two_questions plugin already installed")
def testDialogAuthTwoQuestionsInstallPlugin(self):
# needs plugin. lets install it.
cur = self.connections[0].cursor()
try:
cur.execute("install plugin two_questions soname 'dialog_examples.so'")
TestAuthentication.two_questions_found = True
self.realTestDialogAuthTwoQuestions()
except pymysql.err.InternalError:
raise unittest2.SkipTest('we couldn\'t install the two_questions plugin')
finally:
if TestAuthentication.two_questions_found:
cur.execute("uninstall plugin two_questions")
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(two_questions_found, "no two questions auth plugin")
def testDialogAuthTwoQuestions(self):
self.realTestDialogAuthTwoQuestions()
def realTestDialogAuthTwoQuestions(self):
TestAuthentication.Dialog.fail=False
TestAuthentication.Dialog.m = {b'Password, please:': b'notverysecret',
b'Are you sure ?': b'yes, of course'}
with TempUser(self.connections[0].cursor(), 'pymysql_2q@localhost',
self.databases[0]['db'], 'two_questions', 'notverysecret') as u:
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_2q', **self.db)
pymysql.connect(user='pymysql_2q', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db)
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipIf(three_attempts_found, "three_attempts plugin already installed")
def testDialogAuthThreeAttemptsQuestionsInstallPlugin(self):
# needs plugin. lets install it.
cur = self.connections[0].cursor()
try:
cur.execute("install plugin three_attempts soname 'dialog_examples.so'")
TestAuthentication.three_attempts_found = True
self.realTestDialogAuthThreeAttempts()
except pymysql.err.InternalError:
raise unittest2.SkipTest('we couldn\'t install the three_attempts plugin')
finally:
if TestAuthentication.three_attempts_found:
cur.execute("uninstall plugin three_attempts")
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(three_attempts_found, "no three attempts plugin")
def testDialogAuthThreeAttempts(self):
self.realTestDialogAuthThreeAttempts()
def realTestDialogAuthThreeAttempts(self):
TestAuthentication.Dialog.m = {b'Password, please:': b'stillnotverysecret'}
TestAuthentication.Dialog.fail=True # fail just once. We've got three attempts after all
with TempUser(self.connections[0].cursor(), 'pymysql_3a@localhost',
self.databases[0]['db'], 'three_attempts', 'stillnotverysecret') as u:
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db)
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.DialogHandler}, **self.db)
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': object}, **self.db)
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.DefectiveHandler}, **self.db)
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'notdialogplugin': TestAuthentication.Dialog}, **self.db)
TestAuthentication.Dialog.m = {b'Password, please:': b'I do not know'}
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db)
TestAuthentication.Dialog.m = {b'Password, please:': None}
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db)
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipIf(pam_found, "pam plugin already installed")
@unittest2.skipIf(os.environ.get('PASSWORD') is None, "PASSWORD env var required")
@unittest2.skipIf(os.environ.get('PAMSERVICE') is None, "PAMSERVICE env var required")
def testPamAuthInstallPlugin(self):
# needs plugin. lets install it.
cur = self.connections[0].cursor()
try:
cur.execute("install plugin pam soname 'auth_pam.so'")
TestAuthentication.pam_found = True
self.realTestPamAuth()
except pymysql.err.InternalError:
raise unittest2.SkipTest('we couldn\'t install the auth_pam plugin')
finally:
if TestAuthentication.pam_found:
cur.execute("uninstall plugin pam")
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(pam_found, "no pam plugin")
@unittest2.skipIf(os.environ.get('PASSWORD') is None, "PASSWORD env var required")
@unittest2.skipIf(os.environ.get('PAMSERVICE') is None, "PAMSERVICE env var required")
def testPamAuth(self):
self.realTestPamAuth()
def realTestPamAuth(self):
db = self.db.copy()
import os
db['password'] = os.environ.get('PASSWORD')
cur = self.connections[0].cursor()
try:
cur.execute('show grants for ' + TestAuthentication.osuser + '@localhost')
grants = cur.fetchone()[0]
cur.execute('drop user ' + TestAuthentication.osuser + '@localhost')
except pymysql.OperationalError as e:
# assuming the user doesn't exist which is ok too
self.assertEqual(1045, e.args[0])
grants = None
with TempUser(cur, TestAuthentication.osuser + '@localhost',
self.databases[0]['db'], 'pam', os.environ.get('PAMSERVICE')) as u:
try:
c = pymysql.connect(user=TestAuthentication.osuser, **db)
db['password'] = 'very bad guess at password'
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user=TestAuthentication.osuser,
auth_plugin_map={b'mysql_cleartext_password': TestAuthentication.DefectiveHandler},
**self.db)
except pymysql.OperationalError as e:
self.assertEqual(1045, e.args[0])
# we had 'bad guess at password' work with pam. Well at least we get a permission denied here
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user=TestAuthentication.osuser,
auth_plugin_map={b'mysql_cleartext_password': TestAuthentication.DefectiveHandler},
**self.db)
if grants:
# recreate the user
cur.execute(grants)
# select old_password("crummy p\tassword");
#| old_password("crummy p\tassword") |
#| 2a01785203b08770 |
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(mysql_old_password_found, "no mysql_old_password plugin")
def testMySQLOldPasswordAuth(self):
if self.mysql_server_is(self.connections[0], (5, 7, 0)):
raise unittest2.SkipTest('Old passwords aren\'t supported in 5.7')
# pymysql.err.OperationalError: (1045, "Access denied for user 'old_pass_user'@'localhost' (using password: YES)")
# from login in MySQL-5.6
if self.mysql_server_is(self.connections[0], (5, 6, 0)):
raise unittest2.SkipTest('Old passwords don\'t authenticate in 5.6')
db = self.db.copy()
db['password'] = "crummy p\tassword"
with self.connections[0] as c:
# deprecated in 5.6
if sys.version_info[0:2] >= (3,2) and self.mysql_server_is(self.connections[0], (5, 6, 0)):
with self.assertWarns(pymysql.err.Warning) as cm:
c.execute("SELECT OLD_PASSWORD('%s')" % db['password'])
else:
c.execute("SELECT OLD_PASSWORD('%s')" % db['password'])
v = c.fetchone()[0]
self.assertEqual(v, '2a01785203b08770')
# only works in MariaDB and MySQL-5.6 - can't separate out by version
#if self.mysql_server_is(self.connections[0], (5, 5, 0)):
# with TempUser(c, 'old_pass_user@localhost',
# self.databases[0]['db'], 'mysql_old_password', '2a01785203b08770') as u:
# cur = pymysql.connect(user='old_pass_user', **db).cursor()
# cur.execute("SELECT VERSION()")
c.execute("SELECT @@secure_auth")
secure_auth_setting = c.fetchone()[0]
c.execute('set old_passwords=1')
# pymysql.err.Warning: 'pre-4.1 password hash' is deprecated and will be removed in a future release. Please use post-4.1 password hash instead
if sys.version_info[0:2] >= (3,2) and self.mysql_server_is(self.connections[0], (5, 6, 0)):
with self.assertWarns(pymysql.err.Warning) as cm:
c.execute('set global secure_auth=0')
else:
c.execute('set global secure_auth=0')
with TempUser(c, 'old_pass_user@localhost',
self.databases[0]['db'], password=db['password']) as u:
cur = pymysql.connect(user='old_pass_user', **db).cursor()
cur.execute("SELECT VERSION()")
c.execute('set global secure_auth=%r' % secure_auth_setting)
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(sha256_password_found, "no sha256 password authentication plugin found")
def testAuthSHA256(self):
c = self.connections[0].cursor()
with TempUser(c, 'pymysql_sha256@localhost',
self.databases[0]['db'], 'sha256_password') as u:
if self.mysql_server_is(self.connections[0], (5, 7, 0)):
c.execute("SET PASSWORD FOR 'pymysql_sha256'@'localhost' ='Sh@256Pa33'")
else:
c.execute('SET old_passwords = 2')
c.execute("SET PASSWORD FOR 'pymysql_sha256'@'localhost' = PASSWORD('Sh@256Pa33')")
db = self.db.copy()
db['password'] = "Sh@256Pa33"
# not implemented yet so thows error
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_256', **db)
class TestConnection(base.PyMySQLTestCase):
def test_utf8mb4(self):
"""This test requires MySQL >= 5.5"""
arg = self.databases[0].copy()
arg['charset'] = 'utf8mb4'
conn = pymysql.connect(**arg)
def test_largedata(self):
"""Large query and response (>=16MB)"""
cur = self.connections[0].cursor()
cur.execute("SELECT @@max_allowed_packet")
if cur.fetchone()[0] < 16*1024*1024 + 10:
print("Set max_allowed_packet to bigger than 17MB")
return
t = 'a' * (16*1024*1024)
cur.execute("SELECT '" + t + "'")
assert cur.fetchone()[0] == t
def test_autocommit(self):
con = self.connections[0]
self.assertFalse(con.get_autocommit())
cur = con.cursor()
cur.execute("SET AUTOCOMMIT=1")
self.assertTrue(con.get_autocommit())
con.autocommit(False)
self.assertFalse(con.get_autocommit())
cur.execute("SELECT @@AUTOCOMMIT")
self.assertEqual(cur.fetchone()[0], 0)
def test_select_db(self):
con = self.connections[0]
current_db = self.databases[0]['db']
other_db = self.databases[1]['db']
cur = con.cursor()
cur.execute('SELECT database()')
self.assertEqual(cur.fetchone()[0], current_db)
con.select_db(other_db)
cur.execute('SELECT database()')
self.assertEqual(cur.fetchone()[0], other_db)
def test_connection_gone_away(self):
"""
http://dev.mysql.com/doc/refman/5.0/en/gone-away.html
http://dev.mysql.com/doc/refman/5.0/en/error-messages-client.html#error_cr_server_gone_error
"""
con = self.connect()
cur = con.cursor()
cur.execute("SET wait_timeout=1")
time.sleep(2)
with self.assertRaises(pymysql.OperationalError) as cm:
cur.execute("SELECT 1+1")
# error occures while reading, not writing because of socket buffer.
#self.assertEqual(cm.exception.args[0], 2006)
self.assertIn(cm.exception.args[0], (2006, 2013))
def test_init_command(self):
conn = self.connect(
init_command='SELECT "bar"; SELECT "baz"',
client_flag=CLIENT.MULTI_STATEMENTS)
c = conn.cursor()
c.execute('select "foobar";')
self.assertEqual(('foobar',), c.fetchone())
conn.close()
with self.assertRaises(pymysql.err.Error):
conn.ping(reconnect=False)
def test_read_default_group(self):
conn = self.connect(
read_default_group='client',
)
self.assertTrue(conn.open)
def test_context(self):
with self.assertRaises(ValueError):
c = self.connect()
with c as cur:
cur.execute('create table test ( a int ) ENGINE=InnoDB')
c.begin()
cur.execute('insert into test values ((1))')
raise ValueError('pseudo abort')
c.commit()
c = self.connect()
with c as cur:
cur.execute('select count(*) from test')
self.assertEqual(0, cur.fetchone()[0])
cur.execute('insert into test values ((1))')
with c as cur:
cur.execute('select count(*) from test')
self.assertEqual(1,cur.fetchone()[0])
cur.execute('drop table test')
def test_set_charset(self):
c = self.connect()
c.set_charset('utf8mb4')
# TODO validate setting here
def test_defer_connect(self):
import socket
d = self.databases[0].copy()
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(d['unix_socket'])
except KeyError:
sock.close()
sock = socket.create_connection(
(d.get('host', 'localhost'), d.get('port', 3306)))
for k in ['unix_socket', 'host', 'port']:
try:
del d[k]
except KeyError:
pass
c = pymysql.connect(defer_connect=True, **d)
self.assertFalse(c.open)
c.connect(sock)
c.close()
sock.close()
@unittest2.skipUnless(sys.version_info[0:2] >= (3,2), "required py-3.2")
def test_no_delay_warning(self):
current_db = self.databases[0].copy()
current_db['no_delay'] = True
with self.assertWarns(DeprecationWarning) as cm:
conn = pymysql.connect(**current_db)
# A custom type and function to escape it
class Foo(object):
value = "bar"
def escape_foo(x, d):
return x.value
class TestEscape(base.PyMySQLTestCase):
def test_escape_string(self):
con = self.connections[0]
cur = con.cursor()
self.assertEqual(con.escape("foo'bar"), "'foo\\'bar'")
# added NO_AUTO_CREATE_USER as not including it in 5.7 generates warnings
# mysql-8.0 removes the option however
if self.mysql_server_is(con, (8, 0, 0)):
cur.execute("SET sql_mode='NO_BACKSLASH_ESCAPES'")
else:
cur.execute("SET sql_mode='NO_BACKSLASH_ESCAPES,NO_AUTO_CREATE_USER'")
self.assertEqual(con.escape("foo'bar"), "'foo''bar'")
def test_escape_builtin_encoders(self):
con = self.connections[0]
cur = con.cursor()
val = datetime.datetime(2012, 3, 4, 5, 6)
self.assertEqual(con.escape(val, con.encoders), "'2012-03-04 05:06:00'")
def test_escape_custom_object(self):
con = self.connections[0]
cur = con.cursor()
mapping = {Foo: escape_foo}
self.assertEqual(con.escape(Foo(), mapping), "bar")
def test_escape_fallback_encoder(self):
con = self.connections[0]
cur = con.cursor()
class Custom(str):
pass
mapping = {text_type: pymysql.escape_string}
self.assertEqual(con.escape(Custom('foobar'), mapping), "'foobar'")
def test_escape_no_default(self):
con = self.connections[0]
cur = con.cursor()
self.assertRaises(TypeError, con.escape, 42, {})
def test_escape_dict_value(self):
con = self.connections[0]
cur = con.cursor()
mapping = con.encoders.copy()
mapping[Foo] = escape_foo
self.assertEqual(con.escape({'foo': Foo()}, mapping), {'foo': "bar"})
def test_escape_list_item(self):
con = self.connections[0]
cur = con.cursor()
mapping = con.encoders.copy()
mapping[Foo] = escape_foo
self.assertEqual(con.escape([Foo()], mapping), "(bar)")
def test_previous_cursor_not_closed(self):
con = self.connect(
init_command='SELECT "bar"; SELECT "baz"',
client_flag=CLIENT.MULTI_STATEMENTS)
cur1 = con.cursor()
cur1.execute("SELECT 1; SELECT 2")
cur2 = con.cursor()
cur2.execute("SELECT 3")
self.assertEqual(cur2.fetchone()[0], 3)
def test_commit_during_multi_result(self):
con = self.connect(client_flag=CLIENT.MULTI_STATEMENTS)
cur = con.cursor()
cur.execute("SELECT 1; SELECT 2")
con.commit()
cur.execute("SELECT 3")
self.assertEqual(cur.fetchone()[0], 3)
| [
"[email protected]"
] | |
62c20ca9fb15d381b187ac793e03b1b5242e6d37 | 495b0b8de3ecc341511cdb10f11368b35b585bea | /SoftLayer/CLI/modules/filters.py | 1e4274ac04ae064468c5d1d0736b540b8f35416c | [] | no_license | hugomatic/softlayer-api-python-client | cf6c1e6bfa32e559e72f8b0b069339ae8edd2ede | 9c115f0912ee62763b805941593f6dd50de37068 | refs/heads/master | 2021-01-18T11:09:19.122162 | 2013-04-09T01:44:51 | 2013-04-09T01:44:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | """
usage: sl help filters
Filters are used to limit the amount of results. Some commands will accept a
filter operation for certain fields. Filters can be applied across multiple
fields in most cases.
Available Operations:
Case Insensitive
'value' Exact value match
'value*' Begins with value
'*value' Ends with value
'*value*' Contains value
Case Sensitive
'~ value' Exact value match
'> value' Greater than value
'< value' Less than value
'>= value' Greater than or equal to value
'<= value' Less than or equal to value
Examples:
sl cci list --datacenter=dal05
sl cci list --hostname='prod*'
sl cci list --network=100 --cpu=2
sl cci list --network='< 100' --cpu=2
sl cci list --memory='>= 2048'
Note: Comparison operators (>, <, >=, <=) can be used with integers, floats,
and strings.
"""
# :copyright: (c) 2013, SoftLayer Technologies, Inc. All rights reserved.
# :license: BSD, see LICENSE for more details.
| [
"[email protected]"
] | |
af585888517df64c46a62653fa6ff3912e6b9f0d | 508c5e01aa7dce530093d5796250eff8d74ba06c | /code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/tests/test_schema_get.py | d39692be0008269bf1791e585f1e0e92b09181fa | [
"MIT",
"PostgreSQL"
] | permissive | jhkuang11/UniTrade | f220b0d84db06ff17626b3daa18d4cb8b72a5d3f | 5f68b853926e167936b58c8543b8f95ebd6f5211 | refs/heads/master | 2022-12-12T15:58:30.013516 | 2019-02-01T21:07:15 | 2019-02-01T21:07:15 | 166,479,655 | 0 | 0 | MIT | 2022-12-07T03:59:47 | 2019-01-18T22:19:45 | Python | UTF-8 | Python | false | false | 2,132 | py | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.browser.server_groups.servers.tests import utils as server_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
class SchemaGetTestCase(BaseTestGenerator):
""" This class will add new schema under database node. """
scenarios = [
# Fetching default URL for extension node.
('Check Schema Node URL', dict(url='/browser/schema/obj/'))
]
def runTest(self):
""" This function will delete schema under database node. """
schema = parent_node_dict["schema"][-1]
db_id = schema["db_id"]
server_id = schema["server_id"]
server_response = server_utils.connect_server(self, server_id)
if not server_response["data"]["connected"]:
raise Exception("Could not connect to server to connect the"
" database.")
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
server_id,
db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database to get the schema.")
schema_id = schema["schema_id"]
schema_response = self.tester.get(
self.url + str(utils.SERVER_GROUP) + '/' +
str(server_id) + '/' + str(db_id) +
'/' + str(schema_id),
content_type='html/json')
self.assertEquals(schema_response.status_code, 200)
# Disconnect the database
database_utils.disconnect_database(self, server_id, db_id)
| [
"[email protected]"
] | |
11aa915574de5fc4f11f5c7671205cfbaa964fe2 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/copp/lacpallowhist5min.py | 2d5afaedb106d24fcc43463d8548e0ce36b681e4 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,598 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class LacpAllowHist5min(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.copp.LacpAllowHist5min", "Per Interface Allow Counters for Lacp")
counter = CounterMeta("bytesRate", CounterCategory.GAUGE, "bytes-per-second", "LacpAllowed Bytes rate")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "bytesRateMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "bytesRateMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "bytesRateAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "bytesRateSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "bytesRateThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "bytesRateTr"
meta._counters.append(counter)
counter = CounterMeta("bytes", CounterCategory.COUNTER, "bytes", "LacpAllowed Bytes")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "bytesCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "bytesPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "bytesMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "bytesMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "bytesAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "bytesSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "bytesThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "bytesTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "bytesRate"
meta._counters.append(counter)
counter = CounterMeta("pktsRate", CounterCategory.GAUGE, "packets-per-second", "LacpAllowed Packets rate")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "pktsRateMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "pktsRateMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "pktsRateAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsRateSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsRateThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsRateTr"
meta._counters.append(counter)
counter = CounterMeta("pkts", CounterCategory.COUNTER, "packets", "LacpAllowed Packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "pktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "pktsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "pktsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "pktsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "pktsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "pktsRate"
meta._counters.append(counter)
meta.moClassName = "coppLacpAllowHist5min"
meta.rnFormat = "HDcoppLacpAllow5min-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Per Interface Allow Counters for Lacp stats in 5 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.svi.If")
meta.parentClasses.add("cobra.model.pc.AggrIf")
meta.parentClasses.add("cobra.model.l1.PhysIf")
meta.parentClasses.add("cobra.model.l3.RtdIf")
meta.parentClasses.add("cobra.model.l3.EncRtdIf")
meta.superClasses.add("cobra.model.copp.LacpAllowHist")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.rnPrefixes = [
('HDcoppLacpAllow5min-', True),
]
prop = PropMeta("str", "bytesAvg", "bytesAvg", 32068, PropCategory.IMPLICIT_AVG)
prop.label = "LacpAllowed Bytes average value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesAvg", prop)
prop = PropMeta("str", "bytesCum", "bytesCum", 32064, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "LacpAllowed Bytes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesCum", prop)
prop = PropMeta("str", "bytesMax", "bytesMax", 32067, PropCategory.IMPLICIT_MAX)
prop.label = "LacpAllowed Bytes maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesMax", prop)
prop = PropMeta("str", "bytesMin", "bytesMin", 32066, PropCategory.IMPLICIT_MIN)
prop.label = "LacpAllowed Bytes minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesMin", prop)
prop = PropMeta("str", "bytesPer", "bytesPer", 32065, PropCategory.IMPLICIT_PERIODIC)
prop.label = "LacpAllowed Bytes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesPer", prop)
prop = PropMeta("str", "bytesRate", "bytesRate", 32072, PropCategory.IMPLICIT_RATE)
prop.label = "LacpAllowed Bytes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRate", prop)
prop = PropMeta("str", "bytesRateAvg", "bytesRateAvg", 32084, PropCategory.IMPLICIT_AVG)
prop.label = "LacpAllowed Bytes rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateAvg", prop)
prop = PropMeta("str", "bytesRateMax", "bytesRateMax", 32083, PropCategory.IMPLICIT_MAX)
prop.label = "LacpAllowed Bytes rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateMax", prop)
prop = PropMeta("str", "bytesRateMin", "bytesRateMin", 32082, PropCategory.IMPLICIT_MIN)
prop.label = "LacpAllowed Bytes rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateMin", prop)
prop = PropMeta("str", "bytesRateSpct", "bytesRateSpct", 32085, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LacpAllowed Bytes rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateSpct", prop)
prop = PropMeta("str", "bytesRateThr", "bytesRateThr", 32086, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LacpAllowed Bytes rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("bytesRateThr", prop)
prop = PropMeta("str", "bytesRateTr", "bytesRateTr", 32087, PropCategory.IMPLICIT_TREND)
prop.label = "LacpAllowed Bytes rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateTr", prop)
prop = PropMeta("str", "bytesSpct", "bytesSpct", 32069, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LacpAllowed Bytes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesSpct", prop)
prop = PropMeta("str", "bytesThr", "bytesThr", 32070, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LacpAllowed Bytes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("bytesThr", prop)
prop = PropMeta("str", "bytesTr", "bytesTr", 32071, PropCategory.IMPLICIT_TREND)
prop.label = "LacpAllowed Bytes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesTr", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 31203, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "pktsAvg", "pktsAvg", 32104, PropCategory.IMPLICIT_AVG)
prop.label = "LacpAllowed Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsAvg", prop)
prop = PropMeta("str", "pktsCum", "pktsCum", 32100, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "LacpAllowed Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsCum", prop)
prop = PropMeta("str", "pktsMax", "pktsMax", 32103, PropCategory.IMPLICIT_MAX)
prop.label = "LacpAllowed Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsMax", prop)
prop = PropMeta("str", "pktsMin", "pktsMin", 32102, PropCategory.IMPLICIT_MIN)
prop.label = "LacpAllowed Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsMin", prop)
prop = PropMeta("str", "pktsPer", "pktsPer", 32101, PropCategory.IMPLICIT_PERIODIC)
prop.label = "LacpAllowed Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsPer", prop)
prop = PropMeta("str", "pktsRate", "pktsRate", 32108, PropCategory.IMPLICIT_RATE)
prop.label = "LacpAllowed Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRate", prop)
prop = PropMeta("str", "pktsRateAvg", "pktsRateAvg", 32120, PropCategory.IMPLICIT_AVG)
prop.label = "LacpAllowed Packets rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateAvg", prop)
prop = PropMeta("str", "pktsRateMax", "pktsRateMax", 32119, PropCategory.IMPLICIT_MAX)
prop.label = "LacpAllowed Packets rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateMax", prop)
prop = PropMeta("str", "pktsRateMin", "pktsRateMin", 32118, PropCategory.IMPLICIT_MIN)
prop.label = "LacpAllowed Packets rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateMin", prop)
prop = PropMeta("str", "pktsRateSpct", "pktsRateSpct", 32121, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LacpAllowed Packets rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateSpct", prop)
prop = PropMeta("str", "pktsRateThr", "pktsRateThr", 32122, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LacpAllowed Packets rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsRateThr", prop)
prop = PropMeta("str", "pktsRateTr", "pktsRateTr", 32123, PropCategory.IMPLICIT_TREND)
prop.label = "LacpAllowed Packets rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateTr", prop)
prop = PropMeta("str", "pktsSpct", "pktsSpct", 32105, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LacpAllowed Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsSpct", prop)
prop = PropMeta("str", "pktsThr", "pktsThr", 32106, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LacpAllowed Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsThr", prop)
prop = PropMeta("str", "pktsTr", "pktsTr", 32107, PropCategory.IMPLICIT_TREND)
prop.label = "LacpAllowed Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsTr", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("l1EthIfToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
6c16d977d5da188d8203250fd478cfac76c891cc | 85c9d6fdff58b9cb40f5fdb9f01ff1a0dd386113 | /bot_tests/reminder.py | ef7aa772e1bbf39b40113c0d3d7e94d3036748d1 | [] | no_license | jmccormac01/karmafleet | 5874644c496b0bbcb2037404ad7ed43a1e4caaae | 57ebefbbc6ec3aae634cd9196950f103d48eae95 | refs/heads/master | 2020-03-25T17:24:39.187176 | 2019-04-20T18:17:05 | 2019-04-20T18:17:05 | 143,976,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,922 | py | """
Bot for converting EVE times to local timezones
"""
from datetime import datetime
from pytz import timezone
import discord
from discord.ext import commands
import asyncio
# pylint: disable=invalid-name
Client = discord.Client()
client = commands.Bot(command_prefix="!")
reminders = {}
async def reminder_handler(reminders):
await client.wait_until_ready()
while not client.is_closed:
broke = False
print('Checking reminders...')
print(reminders)
now = datetime.utcnow()
for a in reminders:
print('Checking for author {}'.format(a))
for t in reminders[a]:
if now > t:
print(a, reminders[a][t])
await client.send_message(a, reminders[a][t])
# remove the reminder from the list
del reminders[a][t]
broke = True
break
if broke:
break
await asyncio.sleep(10)
@client.event
async def on_ready():
"""
Simple print to say we're ready
"""
print('Ready for remembering stuff...')
@client.event
async def on_message(message):
"""
Handle incoming messages and convert time requests
"""
sp = message.content.split()
return_message = ""
error_count = 0
# check we want time conversion from eve time
if len(sp) >= 3 and sp[0].lower() == '!reminder':
author = message.author
await client.delete_message(message)
# split the command up
reminder_time = datetime.strptime(sp[1], '%Y-%m-%dT%H:%M')
note = ' '.join(sp[2:])
if author not in reminders.keys():
reminders[author] = {}
reminders[author][reminder_time] = note
print(reminders)
client.loop.create_task(reminder_handler(reminders))
client.run('NDk0OTQ2Mzg3ODM5MDI1MTYz.Do66Yw.nsleHS3S8UvbWdBugiDtPWHrIKY')
| [
"[email protected]"
] | |
98239088c3b4a53c50df2bc9f8bf239942107bf9 | a36d54fb56bc2898089d6ad407bc2039a55271d4 | /zdevicemanager/base/tools.py | 8385f630bed268e1b477abec92e22fe0662faa58 | [] | no_license | zerynth/core-zerynth-toolchain | 443e5180d87b3b783c2b3ec69f24918761715b63 | d27b0d6ee47b9c4f320f518705074f1032fedf8a | refs/heads/master | 2021-07-25T00:28:00.192322 | 2021-05-17T14:53:20 | 2021-05-17T14:53:20 | 122,219,458 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,153 | py | from .base import *
from .fs import *
from .cfg import *
from .pygtrie import *
__all__ = ["tools"]
class Tools():
def __init__(self):
self.tools = {}
self.installed = {}
def init(self):
#register platform tools
if env.is_windows():
self.tools["stty"]="mode"
elif env.is_linux():
self.tools["stty"]="/bin/stty -F"
else:
self.tools["stty"]="/bin/stty -f"
for tooldir in fs.dirs(env.sys):
self.add_tool(tooldir)
for tooldir in fs.dirs(fs.path(env.dist,"sys")):
self.add_tool(tooldir)
ifile = fs.path(env.dist,"installed.json")
self.installed = fs.get_json(ifile)
def get_package(self,fullname):
return env.repo["packs"][env.repo["byname"][fullname]]
def get_packages_by_tag(self,tag):
idx = env.repo["bytag"][tag]
res = set()
for i in idx:
pack = env.repo["packs"][i]
if pack.get("sys") and pack.get("sys")!=env.platform:
# skip other platforms
continue
res.add(pack["fullname"])
return sorted(list(res))
def get_package_deps(self,fullname):
try:
pack = self.get_package(fullname)
except:
pack = {}
res = []
for dep in pack.get("deps",[]):
res.extend(self.get_packages_by_tag(dep))
res = sorted(list(set(res)))
return res
def has_all_deps(self,fullname):
deps = self.get_package_deps(fullname)
for fname in deps:
if fname not in self.installed:
return False
return True
def get_pack_info(self,packdir):
pfiles = [fs.path(packdir,"z.yml"), fs.path(packdir,"package.json")]
for pfile in pfiles:
if fs.exists(pfile):
pkg = fs.get_yaml_or_json(pfile)
return pkg
return None
def add_tool(self,tooldir):
if fs.basename(tooldir) in ["browser","newbrowser","newpython"]:
# ignore some sys packages
return
try:
pkg = self.get_pack_info(tooldir)
if pkg is None:
warning("Can't load tool package",tooldir)
return
else:
fullname = pkg["fullname"]
toolname = pkg.get("tool")
pkg = pkg["sys"]
except Exception as e:
warning("Can't load tool",tooldir,e)
return
if toolname:
self.tools[toolname]={}
addto = self.tools[toolname]
else:
addto = self.tools
if isinstance(pkg,dict):
for k,v in pkg.items():
addto[k]=fs.path(env.sys,tooldir,v)
elif isinstance(pkg,list) or isinstance(pkg,tuple):
for k,v in pkg:
addto[k]=fs.path(env.sys,tooldir,v)
else:
warning("Can't load tool info",tooldir,err=True)
#print(self.tools)
def get_tool_dir(self,toolname):
for tooldir in fs.dirs(env.sys):
if fs.basename(tooldir)==toolname:
return tooldir
for tooldir in fs.dirs(fs.path(env.dist,"sys")):
if fs.basename(tooldir)==toolname:
return tooldir
return None
def __getattr__(self,attr):
if attr in self.tools:
return self.tools[attr]
raise AttributeError
def __getitem__(self,attr):
if attr in self.tools:
return self.tools[attr]
raise KeyError
def get_vm(self,vmuid,version,chipid,target):
vmpath = fs.path(env.vms,target,chipid)
vmfs = fs.glob(vmpath,"*.vm")
vm = None
for vmf in vmfs:
vmm = fs.basename(vmf)
if vmm.startswith(vmuid+"_"+version+"_"):
vm=vmf
return vm
def get_vm_by_uid(self,vmuid):
#for root,dirnames,files in os.walk(fs.path(env.vms)):
for target in fs.dirs(env.vms):
for chid in fs.dirs(fs.path(env.vms,target)):
for ff in fs.files(fs.path(env.vms,target,chid)):
path_splitted = ff.split('/')
ff_ = fs.basename(ff)
if ff_.startswith(vmuid+"_"):
return fs.path(ff)
return None
def get_vms(self,target,chipid=None,full_info=False):
vms = {}
targetpath = fs.path(env.vms,target)
if not fs.exists(targetpath):
return vms
for chid in fs.dirs(targetpath):
chid=fs.basename(chid)
if chipid and chipid!=chid:
continue
vmfs = fs.glob(fs.path(targetpath,chid),"*.vm")
for vmf in vmfs:
vmbf = fs.basename(vmf)
rpos = vmbf.rfind("_") #rtos
hpos = vmbf.rfind("_",0,rpos-1) #hash
vpos = vmbf.rfind("_",0,hpos-1) #version
vmrtos = vmbf[rpos+1:-3]
vmhash = vmbf[hpos+1:rpos]
vmversion = vmbf[vpos+1:hpos]
vmuid = vmbf[0:vpos] #TODO: add check
if full_info:
vms[vmuid]=(vmf,vmversion,vmrtos,vmhash)
else:
vms[vmuid]=vmf
return vms
def get_vm_by_prefix(self,vmuid):
#for root,dirnames,files in os.walk(fs.path(env.vms)):
res = []
for target in fs.dirs(env.vms):
for chid in fs.dirs(fs.path(env.vms,target)):
for ff in fs.files(fs.path(env.vms,target,chid)):
path_splitted = ff.split('/')
ff_ = fs.basename(ff)
if ff_.startswith(vmuid):
res.append(fs.path(ff))
return res
def _parse_order(self,path):
try:
order = fs.readfile(fs.path(path,"order.txt"))
debug("Can't open order.txt at",path)
except:
return []
lines = order.split("\n")
stack = []
rs = []
for line in lines:
line = line.strip()
if not line or len(line)<4 or line.startswith(";"):
continue
pos = line.count("#")
if pos>0:
label = line[pos:]
while (len(stack)>=(pos)): stack.pop()
stack.append(label)
else:
try:
ex = {
"tag":list(stack),
"name":line.replace("_"," "),
"path":fs.path(path,line),
"desc":fs.readfile(fs.path(path,line,"project.md")),
"code":fs.readfile(fs.path(path,line,"main.py")),
}
rs.append(ex)
except:
pass
return rs
def _get_examples(self,path):
return self._parse_order(path)
def get_examples(self):
exs = {}
exr = []
srcs = [(fs.path(env.stdlib,"examples"),"core.zerynth.stdlib")]
repos = fs.dirs(env.libs)
if "official" in repos: #put official on top
repos.remove("official")
repos = ["official"]+repos
for repo in repos:
nms = fs.dirs(repo)
for nm in nms:
libs = fs.dirs(nm)
for lib in libs:
srcs.append((fs.path(lib,"examples"),"lib."+fs.basename(nm)+"."+fs.basename(lib)))
for exlib,lib in srcs:
if fs.exists(exlib):
ee = self._get_examples(exlib)
for eee in ee:
eee["lib"]=lib
exr.extend(ee)
return exr
def get_devices(self):
bdirs = fs.dirs(env.devices)
for bdir in bdirs:
try:
pkg = self.get_pack_info(bdir)
if pkg is None:
continue
bj = fs.get_json(fs.path(bdir,"device.json"))
bj["path"] = bdir
bj["deps"] = self.get_package_deps(pkg["fullname"])
bj["has_all_deps"] = self.has_all_deps(pkg["fullname"])
bj["fullname"] = pkg["fullname"]
yield bj
except Exception as e:
warning(e)
#load custom devices
cdirs = fs.dirs(env.cvm)
for cdir in cdirs:
if not fs.exists(fs.path(cdir,"active")):
#not compiled yet, skip
continue
try:
pkg = self.get_pack_info(bdir)
if pkg is None:
continue
bj = fs.get_json(fs.path(cdir,"device.json"))
bj["path"] = cdir
bj["deps"] = self.get_package_deps(pkg["fullname"])
bj["has_all_deps"] = self.has_all_deps(pkg["fullname"])
bj["fullname"] = pkg["fullname"]
yield bj
except Exception as e:
warning(e)
def get_specs(self,specs):
options = {}
for spec in specs:
pc = spec.find(":")
if pc<0:
fatal("invalid spec format. Give key:value")
thespec = spec[pc+1:]
if thespec=="null":
thespec=None
options[spec[:pc]]=thespec
return options
def get_target(self,target,options={}):
import devices
_dsc = devices.Discover()
return _dsc.get_target(target,options)
def get_modules(self):
res = {}
# libraries
rdirs = fs.dirs(env.libs)
for r in rdirs:
repo = fs.basename(r)
nsdirs = fs.dirs(r)
for ns in nsdirs:
namespace = fs.basename(ns)
lbdirs = fs.dirs(ns)
for l in lbdirs:
lib = fs.basename(l)
if repo=="official":
if namespace=="zerynth":
module = lib
else:
module = namespace+"."+lib
else:
module = repo+"."+namespace+"."+lib
imports = []
for f in fs.files(l):
fl = fs.basename(f)
if fl.endswith(".py") and fl!="main.py":
imports.append(fl[0:-3])
res[module]=imports
return res
def get_vhal(self):
vhal = {}
arch_dirs = fs.dirs(env.vhal)
for ad in arch_dirs:
fmdirs = fs.dirs(ad)
for fm in fmdirs:
vhal_file = fs.path(fm,"vhal.json")
if fs.exists(vhal_file):
vj = fs.get_json(vhal_file)
vhal.update(vj)
return vhal
def disk_usage(self):
bytes = fs.dir_size(env.home)
return bytes
#fs.set_json(rj["data"], fs.path(vmpath,uid+"_"+version+"_"+rj["data"]["hash_features"]+"_"+rj["data"]["rtos"]+".vm"))
tools = Tools()
# add_init(tools.init)
| [
"[email protected]"
] | |
41dfb043debbb31d564d9bdcdda0dd997a4a98a5 | dca5705c291da76cbfaf3897680eb0ae2eb56e2b | /aayushg_assgn/myauth/views.py | face35c4566395dead6248d30c8430cf8b2fedf8 | [] | no_license | gadia-aayush/Django-API-1 | 41a40598653009def8ca5bda9a578a26b8bf9115 | 307202ad0aa4357408e756cd74f3723e74fca253 | refs/heads/master | 2022-12-13T23:09:45.960562 | 2020-08-30T19:36:16 | 2020-08-30T19:36:16 | 273,763,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,723 | py | from django.shortcuts import render
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.contrib.auth import authenticate, login, logout
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from rest_framework import views
from rest_framework.authtoken.models import Token
from rest_framework.permissions import IsAuthenticated
import re
def user_login(request):
if request.method == 'POST':
username = request.POST.get('phone')
password = request.POST.get('password')
user = authenticate(username = username, password = password)
if user :
if user.is_active:
login(request,user)
data = {"code" : 200, "status" : "OK", "message" : "LogIn Successfull"}
return JsonResponse(data)
else:
data = {"code" : 403, "status" : "Forbidden", "message" : "User Disabled"}
return JsonResponse(data)
else:
data = {"code" : 401, "status" : "Unauthorized", "message" : "Invalid Login Credentials"}
return JsonResponse(data)
else:
return render(request,'login.html')
# Django Rest Framework used
class logout(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
user = request.user
token = Token.objects.get(user=user)
if token:
token.delete()
data = {"code" : 200, "status" : "OK", "message" : "Log Out Successfull"}
return Response(data)
def user_signup(request):
if request.method == 'POST':
username = request.POST.get('phone')
password = request.POST.get('password')
name = request.POST.get('name')
email = request.POST.get('email')
#validate whether the phone number is registered or not
try:
if User.objects.get(username = username):
data = {"code" : 403, "status" : "Forbidden", "message" : "Entered Mobile Number is already registered. Try loggin-in"}
return JsonResponse(data)
except:
pass
#validate mobile number [must be 10 digits. assumed that all are of India, so ignored prefixed country codes]
phoneregex = re.compile(r'^[1-9]\d{9}$')
if phoneregex.search(str(username)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Mobile Number should be of 10 digits- ^[1-9]\d{9}$"}
return JsonResponse(data)
#validate name, making sure it is not empty
firstregex = re.compile(r"^[A-Za-z][A-Za-z,.'].*$")
if firstregex.search(str(name)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Name should start with an alphabet- ^[A-Za-z][A-Za-z,.']*$"}
return JsonResponse(data)
#validate email address
emailregex = re.compile(r"^([\w\.\-]+)@([\w\-]+)((\.(\w){2,3})+)$")
if str(email) != "":
if emailregex.search(str(email)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Enter a valid email address- ^([\w\.\-]+)@([\w\-]+)((\.(\w){2,3})+)$"}
return JsonResponse(data)
#validate password
passregex = re.compile(r"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,15}$")
if passregex.search(str(password)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Password should be between 8 to 15 characters which contain at least one lowercase letter, one uppercase letter, one numeric digit, and one special character- ^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,15}$"}
return JsonResponse(data)
authobj = User.objects.create_user(username = username, password = password, first_name = name, email = email)
authobj.save()
data = {"code" : 201, "status" : "Created", "message" : "Sign-Up Successfull"}
return JsonResponse(data)
else:
return render(request,'user_signup.html')
# Django Rest Framework used
@api_view(['POST', ])
def get_token(request):
if request.method == 'POST':
username = request.data.get('phone')
password = request.data.get('password')
user = authenticate(username = username, password = password)
if user :
if user.is_active:
tokened = Token.objects.filter(user=user)
data = {}
if tokened.count()>0:
data["code"] = 200
data["status"] = "OK"
data["message"] = "Token already Exists"
data["phone"] = username
data["Token"] = tokened[0].key
return Response(data)
else:
token = Token.objects.create(user=user)
data["code"] = 201
data["status"] = "Created"
data["message"] = "Token Created"
data["Token"] = token.key
data["phone"] = username
return Response(data)
else:
data = {"code" : 403, "status" : "Forbidden", "message" : "User Disabled"}
return Response(data)
else:
data = {"code" : 401, "status" : "Unauthorized", "message" : "Invalid Login Credentials"}
return Response(data)
| [
"[email protected]"
] | |
ab04985a81690a29fc99f93e08d4a4ec4e364ad5 | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/Sudoku_II_004_20180618143456.py | c999da2e6ae97112548cc81b5e4e3de4c117dc62 | [] | no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | from random import randint
# Sudoku1 almost solved
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, ' ', ' ', ' ', ' ', ' ', ' ', 2]
]
# Sudoku 2 almost solved
# row1 = [9,8,7,4,3,2,5,6,1]
# row2 = [2,4,3,5,1,6,8,7,9]
# row3 = [5,6,1,7,9,8,4,3,2]
# row4 = [3,9,5,6,4,7,2,1,8]
# row5 = [8,2,4,3,5,1,6,9,7]
# row6 = [1,7,6,2,8,9,3,4,5]
# row7 = [7,1,2,8,6,3,9,5,4]
# row8 = [4,3,8,9,7,5,1,2,6]
# row9 = [' ',5,' ',' ',2,' ',7,' ',' ']
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
spaceBar = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku1[i], spaceBar,i+1))
i = i + 1
while True: # prints Sudoku until is solved
print("Your sudoku to solve:")
printSudoku()
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku1[int(x[0])-1][int(x[2])-1] = x[4]
try:
i = 0
for item in sudoku1:
if sum(item) == 45:
i = i + 1
if i == 9:
print("YOU WIN")
break
except TypeError:
print()
'''
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
if choice == "R" or choice == "r":
sudoku_number = randint(0, 1)
rows_fill(sudoku_number)
elif int(choice) == 1:
rows_fill(0)
elif int(choice) == 2:
rows_fill(1)
elif int(choice) == 3:
rows_fill(0)
'''
| [
"[email protected]"
] | |
c75ea51b954cef8081502d553948e07b0487abe9 | bf813d2b877fb8ba62feb4263484db3d0f26d5cd | /early-phd/map_to_flux.py | 1c2d0eab20e2c6fa5e1fe3228a8f9507a9b7ba48 | [] | no_license | 9217392354A/astro-scripts | 1e8e8c827097a877518d1f3e10870a5c2609417c | cd7a175bd504b4e291020b551db3077b067bc632 | refs/heads/master | 2021-01-13T00:40:57.481755 | 2016-03-25T17:04:28 | 2016-03-25T17:04:28 | 54,730,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | #Program created by Chris Fuller to test a function for extracting flux's from a fits file using appature photomotry
#import stuff
from numpy import *
import numpy
import scipy
import math
import sys
import os
from os.path import join as pj
#File stuff
cat = "bigcoma.csv"
catfolder = "/Users/chrisfuller/Dropbox/coma/Catalogues"
catout ="comaTEST.csv"
folder = "/Users/chrisfuller/Dropbox/coma/flux2/" | [
"[email protected]"
] | |
8d8b46573115c470483434c30bc2fd15efceb159 | 73785aea08895d0fc15e914ce329716712f057ec | /recipes/errorAnal.py | 9208c6a48ac906004212b9520360e38dbc9b8806 | [] | no_license | Peder2911/ModelComp | 5e93e6db7fbc809e7444448729a91ff7a762b0cc | 91ee3835ddc560adeb4af457953905aaeca79cd6 | refs/heads/master | 2020-05-20T05:09:01.877547 | 2019-05-18T13:37:34 | 2019-05-18T13:37:34 | 185,397,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py |
ppSentences(sentences,y,x):
for i,s in enumerate(errorSents):
print('#'*38)
print(f'{s} - pred: {prediction[err][i]} | actual: {actual[err][i]}')
print('\n')
| [
"[email protected]"
] | |
99cd43a8c940db281d4db4d33d06b1cee795bc61 | c5291e50a3c72c885922378573a0ad423fcedf05 | /analysis/data/urls.py | e7638f31b2b04491d30e6f29d5a4d9826f2a05c3 | [] | no_license | raghurammanyam/django-projects | bcc3ed6285882af437a2995514cef33760fb063e | dd20ae354f7f111a0176a1cc047c099bd23e9f05 | refs/heads/master | 2022-12-12T19:22:31.698114 | 2018-12-09T09:41:45 | 2018-12-09T09:41:45 | 137,443,359 | 0 | 0 | null | 2022-11-22T03:01:07 | 2018-06-15T05:08:15 | Python | UTF-8 | Python | false | false | 196 | py |
from django.conf.urls import url
from django.urls import path
from .views import test,get
from django.http import HttpResponse
urlpatterns = [
url(r'^date/',test),
url(r'^get/',get)
]
| [
"[email protected]"
] | |
f231f73dec833a474cefcee2707d8742f92f9d51 | 125bc51efb95f383257e7bdb50ae74e5dc05b7f7 | /src/belajarIntegerString.py | f28765c84ddfefc5911c0710cd851199053fcd21 | [] | no_license | frestea09/learn_ch1_python | f9688fffda5f0fa312b82bd25081b986fa0779e9 | 510ea59bf85ec024ebc473db2533e92becaefbf3 | refs/heads/master | 2020-05-26T18:22:31.171688 | 2019-05-26T05:42:08 | 2019-05-26T05:42:08 | 188,334,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from __future__ import print_function
def main():
variabelNama = input('nama : ')
variabelInteger = int(input('Umur'))
print('Nama anda %s dan umur anda %d'%(variabelNama,variabelInteger))
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
a6d6d50572836ba4614154dce36cf5e2c21f9c51 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02679/s613915096.py | fec86a56bc93ae2efcf62264eb570f7a448a4ed4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | import math, collections
N = int(input())
AB = [[int(_) for _ in input().split()] for _ in range(N)]
mod = 10**9 + 7
C = collections.Counter()
gcd = math.gcd
a0 = 0
for a, b in AB:
if a == b == 0:
a0 += 1
elif a == 0:
C[0, -1] += 1
else:
g = gcd(a, b)
a //= g
b //= g
if a < 0:
a *= -1
b *= -1
C[a, b] += 1
ans = 1
for a, b in C:
if C[b, -a]:
continue
elif C[-b, a]:
ans *= (pow(2, C[a, b], mod) + pow(2, C[-b, a], mod) - 1) % mod
ans %= mod
else:
ans *= pow(2, C[a, b], mod)
ans %= mod
ans += a0 - 1
ans %= mod
print(ans)
| [
"[email protected]"
] | |
8951afe2b51d654fd469ed7fd936879e3610aa30 | 35894bca47cf0c9a51a05caf7b56a0d69c05b033 | /04_subrotinas_numpy/25_fibonacci.py | 1067f8b8abc1c15bc44a985e9b4f892471d34f46 | [] | no_license | alcebytes/Phyton-Estudo | 0a2d33f5f3e668e6ab2f99e5e4499545a3bc1273 | a3f9a0b3e0a91d71a9359480d6ec17e692572694 | refs/heads/master | 2023-01-14T17:24:16.486956 | 2020-10-08T02:02:02 | 2020-10-08T02:02:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | import time as time
num_iter = int(input("Digitar o valor do número máximo para a sequência de Fibonacci = "))
tempo_inicio = time.time()
#tempo_inicio_CPU = time.clock() #ABSOLETO
tempo_inicio_CPU = time.process_time()
tempo_inicio_CPU_2 = time.perf_counter()
# f(0)
f = []
f.append(0)
print(f)
# f(1)
f.append(1)
print(f)
"""
f(n + 2) = f(n) + f(n + 1)
for n in range(0, num_iter - 2, 1)
f.append(f[n] + f[n + 1] )
"""
n = 0
while n <= num_iter - 3:
f.append(f[n] + f[n + 1])
n = n + 1
print(f)
# Imprimir último termo de f
print(f[-1])
# Outra forma:
print(f[len(f) - 1])
tempo_fim = time.time() - tempo_inicio
print("O tempo de execução da aplicação é", tempo_fim, "s")
tempo_fim_CPU_2 = time.perf_counter() - tempo_inicio_CPU_2
print("O tempo de execução da CPU é", tempo_fim_CPU_2)
tempo_fim_CPU = time.process_time() - tempo_inicio_CPU
print("O tempo de execução da CPU é", tempo_fim_CPU)
| [
"[email protected]"
] | |
9a0f0433298aaf2b0b0aa33f5a64b0273f639e93 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /eXe/rev2669-2722/left-trunk-2722/twisted/internet/iocpreactor/udp.py | 3bf7a5bba392de8252482bdf0e1ba0600cfe27fa | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,644 | py | import socket
from twisted.internet import interfaces, defer, error, protocol, address
from twisted.internet.abstract import isIPAddress
from twisted.persisted import styles
from twisted.python import log, failure, reflect
from ops import ReadFileOp, WriteFileOp, WSARecvFromOp, WSASendToOp
from util import StateEventMachineType
from zope.interface import implements
ERROR_PORT_UNREACHABLE = 1234
class Port(log.Logger, styles.Ephemeral, object):
__metaclass__ = StateEventMachineType
implements(interfaces.IUDPTransport)
events = ["startListening", "stopListening", "write", "readDone", "readErr", "writeDone", "writeErr", "connect"]
sockinfo = (socket.AF_INET, socket.SOCK_DGRAM, 0)
read_op_class = WSARecvFromOp
write_op_class = WSASendToOp
reading = False
_realPortNumber = None
disconnected = property(lambda self: self.state == "disconnected")
def __init__(self, bindAddress, proto, maxPacketSize=8192):
assert isinstance(proto, protocol.DatagramProtocol)
self.state = "disconnected"
from twisted.internet import reactor
self.bindAddress = bindAddress
self._connectedAddr = None
self.protocol = proto
self.maxPacketSize = maxPacketSize
self.logstr = reflect.qual(self.protocol.__class__) + " (UDP)"
self.read_op = self.read_op_class(self)
self.readbuf = reactor.AllocateReadBuffer(maxPacketSize)
self.reactor = reactor
def __repr__(self):
if self._realPortNumber is not None:
return "<%s on %s>" % (self.protocol.__class__, self._realPortNumber)
else:
return "<%s not connected>" % (self.protocol.__class__,)
def handle_listening_connect(self, host, port):
if not isIPAddress(host):
raise ValueError, "please pass only IP addresses, not domain names"
self.state = "connecting"
return defer.maybeDeferred(self._connectDone, host, port)
def handle_connecting_connect(self, host, port):
raise RuntimeError, "already connected, reconnecting is not currently supported (talk to itamar if you want this)"
handle_connected_connect = handle_connecting_connect
def _connectDone(self, host, port):
self._connectedAddr = (host, port)
self.state = "connected"
self.socket.connect((host, port))
return self._connectedAddr
def handle_disconnected_startListening(self):
self._bindSocket()
host, port = self.bindAddress
if isIPAddress(host):
return defer.maybeDeferred(self._connectSocket, host)
else:
d = self.reactor.resolve(host)
d.addCallback(self._connectSocket)
return d
def _bindSocket(self):
try:
skt = socket.socket(*self.sockinfo)
skt.bind(self.bindAddress)
except socket.error, le:
raise error.CannotListenError, (None, None, le)
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s"%(self.protocol.__class__, self._realPortNumber))
self.socket = skt
def _connectSocket(self, host):
self.bindAddress = (host, self.bindAddress[1])
self.protocol.makeConnection(self)
self.startReading()
self.state = "listening"
def startReading(self):
self.reading = True
try:
self.read_op.initiateOp(self.socket.fileno(), self.readbuf)
except WindowsError, we:
log.msg("initiating read failed with args %s" % (we,))
def stopReading(self):
self.reading = False
def handle_listening_readDone(self, bytes, addr = None):
if addr:
self.protocol.datagramReceived(self.readbuf[:bytes], addr)
else:
self.protocol.datagramReceived(self.readbuf[:bytes])
if self.reading:
self.startReading()
handle_connecting_readDone = handle_listening_readDone
handle_connected_readDone = handle_listening_readDone
def handle_listening_readErr(self, ret, bytes):
log.msg("read failed with err %s" % (ret,))
if ret == 1234: # ERROR_PORT_UNREACHABLE
self.protocol.connectionRefused()
if self.reading:
self.startReading()
handle_connecting_readErr = handle_listening_readErr
handle_connected_readErr = handle_listening_readErr
def handle_disconnected_readErr(self, ret, bytes):
pass # no kicking the dead horse
def handle_disconnected_readDone(self, bytes, addr = None):
pass # no kicking the dead horse
def handle_listening_write(self, data, addr):
self.performWrite(data, addr)
def handle_connected_write(self, data, addr = None):
assert addr in (None, self._connectedAddr)
self.performWrite(data, addr)
def performWrite(self, data, addr = None):
self.writing = True
try:
write_op = self.write_op_class(self)
if not addr:
addr = self._connectedAddr
write_op.initiateOp(self.socket.fileno(), data, addr)
except WindowsError, we:
log.msg("initiating write failed with args %s" % (we,))
def handle_listening_writeDone(self, bytes):
log.msg("write success with bytes %s" % (bytes,))
handle_connecting_writeDone = handle_listening_writeDone
handle_connected_writeDone = handle_listening_writeDone
def handle_listening_writeErr(self, ret, bytes):
log.msg("write failed with err %s" % (ret,))
if ret == ERROR_PORT_UNREACHABLE:
self.protocol.connectionRefused()
handle_connecting_writeErr = handle_listening_writeErr
handle_connected_writeErr = handle_listening_writeErr
def handle_disconnected_writeErr(self, ret, bytes):
pass # no kicking the dead horse
def handle_disconnected_writeDone(self, bytes):
pass # no kicking the dead horse
def writeSequence(self, seq, addr):
self.write("".join(seq), addr)
def handle_listening_stopListening(self):
self.stopReading()
self.connectionLost()
handle_connecting_stopListening = handle_listening_stopListening
handle_connected_stopListening = handle_listening_stopListening
def connectionLost(self, reason=None):
log.msg('(Port %s Closed)' % self._realPortNumber)
self._realPortNumber = None
self.protocol.doStop()
self.socket.close()
del self.socket
self.state = "disconnected"
def logPrefix(self):
return self.logstr
def getHost(self):
return address.IPv4Address('UDP', *(self.socket.getsockname() + ('INET_UDP',)))
| [
"[email protected]"
] | |
1d539066706ca4f69d3130d49688deb922c477b3 | 98311c7b2b2257f14f0f4a0657363e893872798e | /project/src/python/practicum.py | e3f1dfcf9ef76f4b71a4dd1106d26832dc48802f | [
"MIT"
] | permissive | aslupin/Yak-Ngaen-Project | fed9a264a863e1174c00ec8ad360f1c03422f393 | c91b3cc83d2eda22b62fe877276bdd1a8a1b24fd | refs/heads/master | 2022-01-28T02:44:39.385903 | 2019-05-09T13:36:04 | 2019-05-09T13:36:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,616 | py | import usb
# RQ_SET_LED = 0
# RQ_SET_LED_VALUE = 1
# RQ_GET_SWITCH = 2
# RQ_GET_LIGHT = 3
RQ_GET_SOUND_PLAYER_I = 1
RQ_GET_SOUND_PLAYER_II = 2
####################################
def find_mcu_boards():
'''
Find all Practicum MCU boards attached to the machine, then return a list
of USB device handles for all the boards
>>> devices = find_mcu_boards()
>>> first_board = McuBoard(devices[0])
'''
boards = [dev for bus in usb.busses()
for dev in bus.devices
if (dev.idVendor,dev.idProduct) == (0x16c0,0x05dc)]
return boards
####################################
class McuBoard:
'''
Generic class for accessing Practicum MCU board via USB connection.
'''
################################
def __init__(self, dev):
self.device = dev
self.handle = dev.open()
################################
def usb_write(self, request, data=[], index=0, value=0):
'''
Send data output to the USB device (i.e., MCU board)
request: request number to appear as bRequest field on the USB device
index: 16-bit value to appear as wIndex field on the USB device
value: 16-bit value to appear as wValue field on the USB device
'''
reqType = usb.TYPE_VENDOR | usb.RECIP_DEVICE | usb.ENDPOINT_OUT
self.handle.controlMsg(
reqType, request, data, value=value, index=index)
################################
def usb_read(self, request, length=1, index=0, value=0):
'''
Request data input from the USB device (i.e., MCU board)
request: request number to appear as bRequest field on the USB device
length: number of bytes to read from the USB device
index: 16-bit value to appear as wIndex field on the USB device
value: 16-bit value to appear as wValue field on the USB device
If successful, the method returns a tuple of length specified
containing data returned from the MCU board.
'''
reqType = usb.TYPE_VENDOR | usb.RECIP_DEVICE | usb.ENDPOINT_IN
buf = self.handle.controlMsg(
reqType, request, length, value=value, index=index)
return buf
####################################
class PeriBoard:
################################
def __init__(self, mcu):
self.mcu = mcu
################################
# def get_sound_playeri(self):
# sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_I, length=2)
# return sound[0]
# def get_sound_playerii(self):
# sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_II, length=2)
# return sound[0]
def get_sound(self, player):
'''
Return the current reading of light sensor on peripheral board
'''
if(player == RQ_GET_SOUND_PLAYER_I):
sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_I, length=2)
return sound[0]
# return sound[0]
elif(player == RQ_GET_SOUND_PLAYER_II):
sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_II, length=2)
# return sound[0]
return sound[0]
# light[1] *= 256
# result = light[1] + light[0]
# return (sound[1] * 256 ) + sound[0]
# ################################
# def set_led(self, led_no, led_state):
# '''
# Set status of LED led_no on peripheral board to led_state
# (0 => off, 1 => on)
# '''
# self.mcu.usb_write(request=RQ_SET_LED, index=led_no, value=led_state)
# # return
# ################################
# def set_led_value(self, value):
# '''
# Display right 3 bits of value on peripheral board's LEDs
# '''
# self.mcu.usb_write(request=RQ_SET_LED_VALUE, value=value)
# # return
# ################################
# def get_switch(self):
# '''
# Return a boolean value indicating whether the switch on the peripheral
# board is currently pressed
# '''
# state = self.mcu.usb_read(request=RQ_GET_SWITCH, length=1)
# return state[0] == 1
# ################################
# def get_light(self):
# '''
# Return the current reading of light sensor on peripheral board
# '''
# light = self.mcu.usb_read(request=RQ_GET_LIGHT, length=2)
# # light[1] *= 256
# # result = light[1] + light[0]
# return ( light[1] * 256 ) + light[0]
| [
"[email protected]"
] | |
5dcf1531f3266b5a1c867bd6a62ba36a36b2bbc2 | 7b08ceb8c901a09e41d4a67804e2adf94142cb17 | /description2process/__init__.py | 2f99a8019b7c0dace78658a646cc5d28bfb7d318 | [] | no_license | simrit1/Description2Process | 1e7cfcc4dc6bb762d69f27bbe1eedd4e0cef6a38 | 223372f3588f7ac67537eae3012667951b5543e0 | refs/heads/master | 2023-08-25T23:12:50.838804 | 2019-05-16T16:51:51 | 2019-05-16T16:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,370 | py | import tensorflow as tf
# We need to enable eager execution for inference at the end of this notebook.
tfe = tf.contrib.eager
tfe.enable_eager_execution()
TFVERSION='1.13'
import os
os.environ['TFVERSION'] = TFVERSION
# Import library
from description2process import data_generation
from description2process import contraction_expansion
from description2process import coreference_resolution
from description2process import clause_extraction
from description2process import activity_recognition
from description2process import activity_extraction
from description2process import structured_description
from description2process import xml_model
from description2process import visualization
from description2process import evaluation
# Returns the visualisation of a process description
# INPUT: process description in string format
def description2model(description, png = False):
# step1 : contraction expansion
description = contraction_expansion.expand_contractions(description)
print("Step 1/8 DONE: contraction expansion")
# step2 : coreference resolution
description = coreference_resolution.resolve_coreferences(description)
print("Step 2/8 DONE: coreference resolution")
# step3 : clause extraction
subsentences = clause_extraction.get_clauses(description)
print("Step 3/8 DONE: extracted clauses ")
# step4: label clauses
labeled_clauses_df = activity_recognition.contains_activity_list(subsentences)
print("Step 4/8 DONE: labeled clauses ")
# step5: activity extraction
df_activities = activity_extraction.get_activity_df(labeled_clauses_df)
print("Step 5/8 DONE: extracted activities ")
# step6: get a structured_descriptions
str_descr = structured_description.get_structured_description(description, df_activities)
print("Step 6/8 DONE: semi-structured descriptions")
# step7: get XML format of models
xml = xml_model.structured2xml(str_descr)
print("Step 7/8 DONE: model in XML")
# step8: Visualize the model in xml
model = visualization.xml2model(xml, png)
print("Step 8/8 DONE: Visualize model")
return model
# Returns the xml format of the process description
# INPUT: process description in string format
def description2xml(description):
# step1 : contraction expansion
description = contraction_expansion.expand_contractions(description)
print("Step 1/7 DONE: contraction expansion")
# step2 : coreference resolution
description = coreference_resolution.resolve_coreferences(description)
print("Step 2/7 DONE: coreference resolution")
# step3 : clause extraction
subsentences = clause_extraction.get_clauses(description)
print("Step 3/7 DONE: extracted clauses ")
# step4: label clauses
labeled_clauses_df = activity_recognition.contains_activity_list(subsentences)
print("Step 4/7 DONE: labeled clauses ")
# step5: activity extraction
df_activities = activity_extraction.get_activity_df(labeled_clauses_df)
print("Step 5/7 DONE: extracted activities ")
# step6: get a structured_descriptions
str_descr = structured_description.get_structured_description(description, df_activities)
print("Step 6/7 DONE: semi-structured descriptions")
# step7: get XML format of models
xml = xml_model.structured2xml(str_descr)
print("Step 7/7 DONE: model in XML")
return xml
# returns the structured description of raw process descriptions
# Input: pandas dataframe of process descriptions
def description2structured_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
print("Step 1/6 DONE: contraction expansion")
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
print("Step 2/6 DONE: coreference resolution")
# step3 : clause extraction
description_df = clause_extraction.get_clauses_df(description_df)
print("Step 3/6 DONE: extracted clauses ")
# step4: label clauses
labeled_clauses = activity_recognition.contains_activity_df(description_df)
print("Step 4/6 DONE: labeled clauses ")
# step5: activity extraction
df_activities = activity_extraction.get_activity_df(labeled_clauses)
print("Step 5/6 DONE: extracted activities ")
# step6: get a structured_descriptions
str_descr = structured_description.get_structured_description_df(description_df, df_activities)
print("Step 6/6 DONE: returned structured descriptions")
return str_descr
# return the descripition after contraction expansion and coreference resolution.
# This type of description can be seen as a cleaned version of the original one.
# Input: pandas dataframe of process descriptions
def description2referenceresolved_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
return description_df
# Return the description with a list containing the description's extracted clauses
# Input: pandas dataframe of process description
def description2clauses_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
# step3 : clause extraction
description_df = clause_extraction.get_clauses_df(description_df)
return description_df
# Return the description with a list containg the descriptions's extracted clauses
# + an extra dataframe with all its labeled clauses
# Input: pandas dataframe of process descriptions
def description2labeledclauses_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
# step3 : clause extraction
description_df = clause_extraction.get_clauses_df(description_df)
# step4: label clauses
labeled_clauses = activity_recognition.contains_activity_df(description_df)
return labeled_clauses, description_df
| [
"[email protected]"
] | |
d008e616c943f18e5f7f5c090bc112e713db99cf | c4b7b5a9c56a9b6394a14704d2faf76754175473 | /rooms/templatetags/is_booked.py | da615b5d82465d9cb146e16beb8eeaefaf53bbc4 | [] | no_license | seungjinhan/airbnb_clone_django | 71a15e5242bad28fd96d5f47652a049a77f12f61 | 4c38780746409ea1ed9b4f5b02abca60326752c2 | refs/heads/master | 2022-12-02T15:14:39.341441 | 2020-08-23T13:50:42 | 2020-08-23T13:50:42 | 280,878,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | import datetime
from django import template
from reservations import models as reservation_model
register = template.Library()
@register.simple_tag
def is_booked(room, day):
if day.number == 0:
return False
try:
date = datetime.datetime(
year=day.year, month=day.month, day=day.number)
reservation_model.BookedDay.objects.get(
day=date, reservation__room=room)
print(date)
print(room)
return True
except reservation_model.BookedDay.DoesNotExist:
return False
| [
"[email protected]"
] | |
e5f7852757d20d9217562defb3d22da0c1893cb6 | 5e809acc62b080f1adad2c34e647241cdc5ad297 | /myenv/bin/markdown_py | fa2c63491a1647ccda5e1725538898c521cfc6a8 | [
"MIT"
] | permissive | RootenberG/My-blog-project | f520af79a2f3eb416b3dadee46813a812ce9d53d | 7ef4670cfa9d54d9345d52ca008aae5fed5605bc | refs/heads/master | 2020-08-15T20:04:29.478049 | 2020-02-08T21:57:46 | 2020-02-08T21:57:46 | 215,400,930 | 0 | 0 | MIT | 2019-10-30T20:54:38 | 2019-10-15T21:34:30 | Python | UTF-8 | Python | false | false | 255 | #!/home/den/devspace/My-blog-project/myenv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from markdown.__main__ import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
98ae73f5af580dce3fc708af8516af5e1c67bbf3 | 50e03dae243af6bfab19f8cf42494284ff70fbd3 | /BIG-BIRD/RelGAN.py | 05e0634536e46c4d7140e7c904e0f5d7773baeb5 | [] | no_license | BritneyMuller/Summarization-Lab | bf2d79abe724e999e4017d4ffe6220863fe7f162 | 4b40f5ac7a629f509c323bf426d3058268628186 | refs/heads/master | 2021-01-25T23:13:13.669487 | 2019-09-30T14:38:13 | 2019-09-30T14:38:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,010 | py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import matplotlib.pyplot as plt
import os
import torch.autograd as autograd
from RelationalMemory import *
from Transformer import *
class BigBird():
#generator is translator here
def __init__(self, generator, discriminator, reconstructor, dictionary, gamma = 0.99, clip_value = 0.1, lr_G = 5e-5, lr_D = 5e-5, lr_R = 1e-4, LAMBDA = 10, TEMP_END = 0.5, vq_coef =0.8, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")):
super(BigBird, self).__init__()
self.device = device
self.dictionary = dictionary
self.generator = generator.to(self.device)
self.reconstructor = reconstructor.to(self.device)
self.discriminator = discriminator.to(self.device)
self.gamma = gamma
self.eps = np.finfo(np.float32).eps.item()
self.optimizer_R = torch.optim.Adam(list(self.generator.parameters()) + list(self.reconstructor.parameters()), lr=lr_R)
#normal WGAN
self.optimizer_G = torch.optim.RMSprop(self.generator.parameters(), lr=lr_G)
self.optimizer_D = torch.optim.RMSprop(self.discriminator.parameters(), lr=lr_D)
#WGAN GP
#self.LAMBDA = LAMBDA # Gradient penalty lambda hyperparameter
#self.optimizer_G = torch.optim.Adam(self.generator.parameters(), lr=lr_G, betas=(0.0, 0.9))
#self.optimizer_D = torch.optim.Adam(self.discriminator.parameters(), lr=lr_D, betas=(0.0, 0.9))
self.clip_value = clip_value
self.TEMP_END = TEMP_END
self.lr_G = lr_G
self.lr_D = lr_D
self.lr_R = lr_R
self.total_steps = 0
self.vq_coef = 0.8
self.epoch = 0
def calc_gradient_penalty(self, netD, real_data, fake_data):
#print real_data.size()
BATCH_SIZE = real_data.shape[0]
dim_1 = real_data.shape[1]
dim_2 = real_data.shape[2]
alpha = torch.rand(BATCH_SIZE, dim_1)
alpha = alpha.view(-1,1).expand(dim_1 * BATCH_SIZE, dim_2).view(BATCH_SIZE, dim_1, dim_2)
alpha = alpha.to(self.device)
#print(real_data.shape) #[BATCH_SIZE, 19, vocab_sz]
#print(fake_data.shape) #[BATCH_SIZE, 19, vocab_sz]
interpolates_data = ( alpha * real_data.float() + ((1 - alpha) * fake_data.float()) )
interpolates = interpolates_data.to(self.device)
#interpolates = netD.disguised_embed(interpolates_data)
interpolates = autograd.Variable(interpolates, requires_grad=True)
src_mask = (interpolates_data.argmax(-1) != netD.padding_index).type_as(interpolates_data).unsqueeze(-2)
disc_interpolates = netD.transformer_encoder( interpolates, src_mask )
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(self.device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * self.LAMBDA
return gradient_penalty
def _to_one_hot(self, y, n_dims):
scatter_dim = len(y.size())
y_tensor = y.to(self.device).long().view(*y.size(), -1)
zeros = torch.zeros(*y.size(), n_dims).to(self.device)
return zeros.scatter(scatter_dim, y_tensor, 1)
def train_D(self, fake_datas, real_datas):
## train discriminator
# print("real")
# print(real_datas[:10])
real_score = torch.mean(self.discriminator(real_datas))
# print("fake")
# print(fake_datas[:10])
fake_score = torch.mean(self.discriminator(fake_datas))
batch_d_loss = -real_score + fake_score #+ self.calc_gradient_penalty(self.discriminator, real_datas, fake_datas)
return batch_d_loss, real_score.item(), fake_score.item()
def train_G(self, fake_datas):
self.optimizer_G.zero_grad()
batch_g_loss = -torch.mean(self.discriminator(fake_datas))
batch_g_loss.backward(retain_graph=True)
self.optimizer_G.step()
return batch_g_loss.item()
def indicies2string(self, indices):
inv_map = {v: k for k, v in self.dictionary.items()}
return ' '.join([inv_map[i.item()] for i in indices])
def train(self):
self.generator.train()
self.reconstructor.train()
self.discriminator.train()
def eval(self):
self.generator.eval()
self.reconstructor.eval()
self.discriminator.eval()
def load(self, load_path):
print('load Bird from', load_path)
loader = torch.load(load_path)
self.generator.load_state_dict(loader['generator'])
self.discriminator.load_state_dict(loader['discriminator'])
self.reconstructor.load_state_dict(loader['reconstructor'])
self.total_steps = loader['total_steps']
self.epoch = loader['epoch']
self.gumbel_temperature = loader['gumbel_temperature']
def save(self, save_path):
print('lay egg to ./Nest ... save as', save_path)
torch.save({'generator':self.generator.state_dict(),
'reconstructor':self.reconstructor.state_dict(),
'discriminator':self.discriminator.state_dict(),
'total_steps':self.total_steps,
'epoch':self.epoch,
'gumbel_temperature':self.gumbel_temperature
},save_path)
def eval_iter(self, src, src_mask, max_len, real_data, ct, verbose = 1):
with torch.no_grad():
batch_size = src.shape[0]
memory = self.generator.initial_state(batch_size, trainable=True).to(self.device)
summary_sample, summary_log_values, summary_probs, gumbel_one_hot = self.generator(src, max_len, memory, self.dictionary['[CLS]'], temperature = self.gumbel_temperature)
memory = self.reconstructor.initial_state(batch_size, trainable=True).to(self.device)
CE_loss, acc, out = self.reconstructor.reconstruct_forward(gumbel_one_hot, src, memory, self.dictionary['[CLS]'])
if verbose == 1 and ct % 1 == 0:
print("origin:")
print(self.indicies2string(src[0]))
print("summary:")
print(self.indicies2string(summary_sample[0]))
print("real summary:")
print(self.indicies2string(real_data[0]))
print("reconsturct out:")
print(self.indicies2string(out[0]))
print("")
return acc, CE_loss.item()
def pretrainGAN_run_iter(self, src, src_mask, max_len, real_data, D_iters = 5, D_toggle = 'On', verbose = 1):
batch_size = src.shape[0]
memory = self.generator.initial_state(batch_size, trainable=True).to(self.device)
self.gumbel_temperature = max(self.TEMP_END, math.exp(-1e-4*self.total_steps))
summary_sample, summary_log_values, summary_probs, gumbel_one_hot = self.generator(src, max_len, memory, self.dictionary['[CLS]'], temperature = self.gumbel_temperature)
batch_G_loss = 0
NNcriterion = nn.NLLLoss().to(self.device)
batch_G_loss = NNcriterion(summary_probs.log().contiguous().view(batch_size * max_len, -1), real_data.contiguous().view(-1))
self.optimizer_G.zero_grad()
batch_G_loss.backward()
self.optimizer_G.step()
self.total_steps += 1
if self.total_steps % 500 == 0:
if not os.path.exists("./Nest"):
os.makedirs("./Nest")
self.save("./Nest/Pretrain_RelGAN")
if verbose == 1 and self.total_steps % 1000 == 0:
print("origin:")
print(self.indicies2string(src[0]))
print("summary:")
print(self.indicies2string(summary_sample[0]))
print("real summary:")
print(self.indicies2string(real_data[0]))
print("")
distrib = summary_probs[0,0, :100].cpu().detach().numpy()
one_hot_out = gumbel_one_hot[0,0, :100].cpu().detach().numpy()
return [batch_G_loss, 0], [0], [0, 0, 0], [self.indicies2string(src[0]), self.indicies2string(summary_sample[0]), 0], distrib, one_hot_out
def run_iter(self, src, src_mask, max_len, real_data, D_iters = 5, D_toggle = 'On', verbose = 1, writer = None):
#summary_logits have some problem
#summary = self.generator(src, src_mask, max_len, self.dictionary['[CLS]'])
batch_size = src.shape[0]
memory = self.generator.initial_state(batch_size, trainable=True).to(self.device)
self.gumbel_temperature = max(self.TEMP_END, math.exp(-1e-4*self.total_steps))
summary_sample, summary_log_values, summary_probs, gumbel_one_hot = self.generator(src, max_len, memory, self.dictionary['[CLS]'], temperature = self.gumbel_temperature)
batch_D_loss = 0
if(D_toggle == 'On'):
for i in range(D_iters):
self.optimizer_D.zero_grad()
batch_d_loss, real_score, fake_score = self.train_D(gumbel_one_hot, self._to_one_hot(real_data, len(self.dictionary)))
batch_D_loss += batch_d_loss
batch_d_loss.backward(retain_graph=True);
#Clip critic weights
for p in self.discriminator.parameters():
p.data.clamp_(-self.clip_value, self.clip_value)
self.optimizer_D.step();
batch_D_loss = batch_D_loss.item()/D_iters
batch_G_loss = 0
if(D_toggle == 'On'):
#print(gumbel_one_hot.shape)
batch_G_loss = self.train_G(gumbel_one_hot)
self.gumbel_temperature = max(self.TEMP_END, math.exp(-1e-4*self.total_steps))
memory = self.reconstructor.initial_state(batch_size, trainable=True).to(self.device)
CE_loss, acc, out = self.reconstructor.reconstruct_forward(gumbel_one_hot, src, memory, self.dictionary['[CLS]'])
rec_loss = CE_loss #+ self.vq_coef * vq_loss + 0.25 * self.vq_coef * commit_loss
self.optimizer_R.zero_grad()
rec_loss.backward()
nn.utils.clip_grad_norm_(list(self.generator.parameters()) + list(self.reconstructor.parameters()), 0.1)
self.optimizer_R.step()
self.total_steps += 1
if self.total_steps % 500 == 0:
if not os.path.exists("./Nest"):
os.makedirs("./Nest")
self.save("./Nest/DoubleRelationMEM_GAN")
#for i in range(5):
#plt.plot(range(1000),summary_probs.cpu().detach().numpy()[0,i,:1000] )
# wandb.log({"prob {}".format(i): wandb.Histogram(summary_probs.cpu().detach().numpy()[0,i,:1000])},step=step)
if verbose == 1 and self.total_steps % 100 == 0:
print("origin:")
print(self.indicies2string(src[0]))
print("summary:")
print(self.indicies2string(summary_sample[0]))
print("real summary:")
print(self.indicies2string(real_data[0]))
print("reconsturct out:")
print(self.indicies2string(out[0]))
# print("sentiment:",label[0].item())
# print("y:",sentiment_label[0].item())
# print("reward:",rewards[0].item())
print("")
# for name, param in self.generator.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), self.total_steps)
# for name, param in self.reconstructor.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), self.total_steps)
distrib = summary_probs.cpu().detach().numpy()[0,0, :100]
one_hot_out = gumbel_one_hot.cpu().detach().numpy()[0,0, :100]
return [batch_G_loss, batch_D_loss], [CE_loss.item()], [real_score, fake_score, acc], [self.indicies2string(src[0]), self.indicies2string(summary_sample[0]), self.indicies2string(out[0])], distrib, one_hot_out
class LSTMEncoder(nn.Module):
def __init__(self, vocab_sz, hidden_dim, padding_index):
super().__init__()
self.src_embed = nn.Embedding(vocab_sz, hidden_dim)
self.rnn_cell = nn.LSTM(hidden_dim, hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
self.padding_index = padding_index
self.outsize = hidden_dim*2
def forward(self, x):
#src_mask = (x != self.padding_index).type_as(x).unsqueeze(-2)
out, (h,c) = self.rnn_cell( self.src_embed(x))
return out
# class LSTM_Gumbel_Encoder_Decoder(nn.Module):
# def __init__(self, hidden_dim, emb_dim, input_len, output_len, voc_size, device, eps=1e-8, num_layers = 2):
# super().__init__()
# self.hidden_dim = hidden_dim
# self.emb_dim = emb_dim
# #self.input_len = input_len
# #self.output_len = output_len
# #self.voc_size = voc_size
# #self.teacher_prob = 1.
# #self.epsilon = eps
# self.emb_layer = nn.Embedding(voc_size, emb_dim)
# self.num_layers = num_layers
# self.encoder = nn.LSTM(emb_dim, hidden_dim, num_layers=num_layers, batch_first=True, bidirectional=True)
# self.decoder = nn.LSTM(emb_dim, hidden_dim*2, num_layers=num_layers, batch_first=True)
# self.device = device
# self.attention_softmax = nn.Softmax(dim=1)
# # self.pro_layer = nn.Sequential(
# # nn.Linear(hidden_dim*4, voc_size, bias=True)
# # )
# self.adaptive_softmax = torch.nn.AdaptiveLogSoftmaxWithLoss(hidden_dim*4, voc_size, [100, 1000, 10000], div_value=4.0, head_bias=False)
# def forward(self, x, src_mask, max_len, start_symbol, mode = 'argmax', temp = 2.0):
# batch_size = x.shape[0]
# input_len = x.shape[1]
# device = x.device
# # encoder
# x_emb = self.emb_layer(x)
# memory, (h, c) = self.encoder(x_emb)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# h = h.view(batch_size, self.num_layers, h.shape[-1]*2)
# c = c.view(batch_size, self.num_layers, c.shape[-1]*2)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# ## decoder
# out_h, out_c = (h, c)
# ys = torch.ones(batch_size, 1).fill_(start_symbol).type_as(x.data)
# values = []
# all_probs = []
# gumbel_one_hots = []
# for i in range(max_len-1):
# ans_emb = self.emb_layer(ys[:,-1]).view(batch_size, 1, self.emb_dim)
# out, (out_h, out_c) = self.decoder(ans_emb, (out_h, out_c))
# attention = torch.bmm(memory, out.transpose(1, 2)).view(batch_size, input_len)
# attention = self.attention_softmax(attention)
# context_vector = torch.bmm(attention.view(batch_size, 1, input_len), memory)
# logits = torch.cat((out, context_vector), -1).view(batch_size, -1)
# one_hot, next_words, value, prob = self.gumbel_softmax(logits, temp)
# # print(feature.shape)
# # print(one_hot.shape)
# # print(next_words.shape)
# # print(values.shape)
# # print(log_probs.shape)
# # input("")
# ys = torch.cat((ys, next_words.view(batch_size, 1)), dim=1)
# values.append(value)
# all_probs.append(prob)
# gumbel_one_hots.append(one_hot)
# values = torch.stack(values,1)
# all_probs = torch.stack(all_probs,1)
# gumbel_one_hots = torch.stack(gumbel_one_hots, 1)
# return ys, values, all_probs, gumbel_one_hots
# def sample_gumbel(self, shape, eps=1e-20):
# U = torch.rand(shape).to(self.device)
# return -Variable(torch.log(-torch.log(U + eps) + eps))
# def gumbel_softmax_sample(self, logits, temperature):
# y = logits + self.sample_gumbel(logits.size())
# #the formula should be prob not logprob, I guess it still works
# return self.adaptive_softmax.log_prob(logits).exp()
# #return F.softmax(y / temperature, dim=-1)
# def gumbel_softmax(self, logits, temperature):
# """
# ST-gumple-softmax
# input: [*, n_class]
# return: flatten --> [*, n_class] an one-hot vector
# """
# y = self.gumbel_softmax_sample(logits, temperature)
# shape = y.size()
# values, ind = y.max(dim=-1)
# y_hard = torch.zeros_like(y).view(-1, shape[-1])
# y_hard.scatter_(1, ind.view(-1, 1), 1)
# y_hard = y_hard.view(*shape)
# y_hard = (y_hard - y).detach() + y
# return y_hard.view(logits.shape[0], -1), ind, values, y
# class LSTM_Normal_Encoder_Decoder(nn.Module):
# def __init__(self, hidden_dim, emb_dim, input_len, output_len, voc_size, pad_index, device, eps=1e-8, num_layers = 2):
# super().__init__()
# self.hidden_dim = hidden_dim
# self.emb_dim = emb_dim
# self.device = device
# #self.input_len = input_len
# #self.output_len = output_len
# #self.voc_size = voc_size
# #self.teacher_prob = 1.
# #self.epsilon = eps
# self.num_layers = num_layers
# #self.emb_layer = nn.Embedding(voc_size, emb_dim)
# self.disguise_embed = nn.Linear(voc_size, emb_dim)
# self.encoder = nn.LSTM(emb_dim, hidden_dim, num_layers=num_layers, batch_first=True, bidirectional=True)
# self.decoder = nn.LSTM(emb_dim, hidden_dim*2, num_layers=num_layers, batch_first=True)
# self.attention_softmax = nn.Softmax(dim=1)
# self.vocab_sz = voc_size
# self.criterion = torch.nn.AdaptiveLogSoftmaxWithLoss(hidden_dim*4, voc_size, [1000, 5000, 20000], div_value=4.0, head_bias=False)
# def forward(self, x, src_mask, max_len, start_symbol, y, mode = 'argmax', temp = 2.0):
# batch_size = x.shape[0]
# input_len = x.shape[1]
# device = x.device
# # encoder
# x_emb = self.disguise_embed(x)
# memory, (h, c) = self.encoder(x_emb)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# h = h.view(batch_size, self.num_layers, h.shape[-1]*2)
# c = c.view(batch_size, self.num_layers, c.shape[-1]*2)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# ## decoder
# out_h, out_c = (h, c)
# logits = []
# for i in range(max_len):
# ans_emb = self.disguise_embed(self._to_one_hot(y[:,i], self.vocab_sz)).view(batch_size, 1, self.emb_dim)
# out, (out_h, out_c) = self.decoder(ans_emb, (out_h, out_c))
# attention = torch.bmm(memory, out.transpose(1, 2)).view(batch_size, input_len)
# attention = self.attention_softmax(attention)
# context_vector = torch.bmm(attention.view(batch_size, 1, input_len), memory)
# logit = torch.cat((out, context_vector), -1).view(batch_size, -1)
# # if mode == 'argmax':
# # values, next_words = torch.max(log_probs, dim=-1, keepdim=True)
# # if mode == 'sample':
# # m = torch.distributions.Categorical(logits=log_probs)
# # next_words = m.sample()
# # values = m.log_prob(next_words)
# logits.append(logit)
# logits = torch.stack(logits, 1)
# _ ,loss = self.criterion(logits[:,:-1].contiguous().view(batch_size * (max_len - 1), -1), y[:,1:].contiguous().view(batch_size * (max_len-1)))
# #y from one to get rid of [CLS]
# log_argmaxs = self.criterion.predict(logits[:,:-1].contiguous().view(batch_size * (max_len - 1), -1)).view(batch_size, max_len-1)
# acc = ( log_argmaxs== y[:,1:]).float().mean()
# return loss, acc, log_argmaxs
# def _to_one_hot(self, y, n_dims):
# scatter_dim = len(y.size())
# y_tensor = y.to(self.device).long().view(*y.size(), -1)
# zeros = torch.zeros(*y.size(), n_dims).to(self.device)
# return zeros.scatter(scatter_dim, y_tensor, 1)
class Discriminator(nn.Module):
def __init__(self, transformer_encoder, hidden_dim, vocab_sz, padding_index):
super(Discriminator, self).__init__()
self.padding_index = padding_index
self.disguise_embed = nn.Linear(vocab_sz, hidden_dim)
self.transformer_encoder = transformer_encoder
self.linear = nn.Linear(self.transformer_encoder.layers[-1].size, 1)
#self.sigmoid = nn.Sigmoid()
def forward(self, x):
src_mask = (x.argmax(-1) != self.padding_index).type_as(x).unsqueeze(-2)
x = self.transformer_encoder(self.disguise_embed(x), src_mask)
score = self.linear(x)
return score
| [
"[email protected]"
] | |
cb87f2390f4328b284144e4fa1564341cb8bdcf7 | c27c51f5c33e0431dbe7db6e18c21b249d476cfa | /OpenSource_Python_Code/nova-2013.2/nova/tests/api/ec2/test_faults.py | 36cee0663bf4ff4b4c640f0b081a869d016d26a6 | [
"Apache-2.0"
] | permissive | bopopescu/Python_Stuff | 9bef74e0db17bb5e3ba2d908ced01ee744820d80 | 9aa94a0fa5e4e802090c7b29ec88b840e304d9e5 | refs/heads/master | 2022-11-20T06:54:36.581623 | 2017-12-04T18:56:02 | 2017-12-04T18:56:02 | 282,171,169 | 0 | 0 | null | 2020-07-24T08:54:37 | 2020-07-24T08:54:36 | null | UTF-8 | Python | false | false | 1,914 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
import webob
from nova.api.ec2 import faults
from nova import test
from nova import wsgi
class TestFaults(test.NoDBTestCase):
"""Tests covering ec2 Fault class."""
def test_fault_exception(self):
# Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPBadRequest(
explanation='test'))
self.assertTrue(isinstance(fault.wrapped_exc,
webob.exc.HTTPBadRequest))
def test_fault_exception_status_int(self):
# Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPNotFound(explanation='test'))
self.assertEquals(fault.wrapped_exc.status_int, 404)
def test_fault_call(self):
# Ensure proper EC2 response on faults.
message = 'test message'
ex = webob.exc.HTTPNotFound(explanation=message)
fault = faults.Fault(ex)
req = wsgi.Request.blank('/test')
req.GET['AWSAccessKeyId'] = "test_user_id:test_project_id"
self.mox.StubOutWithMock(faults, 'ec2_error_response')
faults.ec2_error_response(mox.IgnoreArg(), 'HTTPNotFound',
message=message, status=ex.status_int)
self.mox.ReplayAll()
fault(req)
| [
"[email protected]"
] | |
8de31727528745859574b0a71d4d7f4265c46740 | 2718b6f68a717b24cd6238a20d4116b3dea3201b | /BlogTemplate/mysite_env/mysite/apps/blog/views.py | 39b584eea388bcf248d6a6d595bae4840b4bf60b | [] | no_license | tminlun/BlogTemplate | e94654e01e170f27c97c197c898c102518ad13ab | d475587fdd9e111961bbfa56666255d38cfdc056 | refs/heads/master | 2022-12-11T00:51:53.019391 | 2018-12-05T14:54:04 | 2018-12-05T14:54:04 | 138,825,320 | 0 | 0 | null | 2022-12-08T02:25:29 | 2018-06-27T03:30:20 | Python | UTF-8 | Python | false | false | 4,519 | py | from django.shortcuts import render,get_object_or_404
from django.core.paginator import Paginator
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db.models.aggregates import Count
from read_statistics.utils import read_statistics_once_read
from comment.models import Comment
from .models import Blog,BlogType
#获取博客列表共同的数据,设置参数blog_all_list全部博客,因为每个方法都有不同的获取方法
def get_blog_list_common_data(request, blog_all_list):
paginator = Paginator(blog_all_list, settings.EACH_PAGE_BLOG_NUMBER) # 每一页10篇博客
page_num = request.GET.get('page', 1) # 获取页码参数,get请求
page_of_blogs = paginator.get_page(page_num) # 获取当前页码
current_page_num = page_of_blogs.number # 获取当前页码
# current_page_num - 2 , 1 只是拿1和currentr_page_num - 2比,range范围还是
# current_page_num - 2, currentr_page_num
page_range = list(range(max(current_page_num - 2, 1), current_page_num)) + \
list(range(current_page_num, min(current_page_num + 2, paginator.num_pages) + 1))
# 添加省略
if page_range[0] - 1 >= 2:
page_range.insert(0, '...')
# 如果总页 - 最后一页 大于等于2
if paginator.num_pages - page_range[-1] >= 2:
page_range.append('...')
# 添加第一页和最后一页
if page_range[0] != 1:
page_range.insert(0, 1) # 将第一个页码变成1(insert在第一个插入)
if page_range[-1] != paginator.num_pages:
page_range.append(paginator.num_pages) # 添加总页码到最后显示页码(append在尾部添加)
blog_dates = Blog.objects.dates('created_time','month',order="DESC")
blog_dates_dict = {}
for blog_date in blog_dates:
date_count = Blog.objects.filter(created_time__year=blog_date.year,created_time__month=blog_date.month).count()
blog_dates_dict[blog_date] = date_count
context = {}
context['page_of_blogs'] = page_of_blogs # 当前页码
context['page_range'] = page_range # 返回所有页码给模板
context['blogs'] = page_of_blogs.object_list # 获取所有博客
# annotate自动返回BlogType的所有数据
context['blog_types']=BlogType.objects.annotate(type_count = Count('blog')).filter(type_count__gt=0)
# 获取到全部的年和月
context['blog_dates'] = blog_dates_dict # 这里是一个坑,记住把日期和数量给对象
return context #返回给模板 render(request,'?.html',context)
def blog_list(request):
blog_all_list = Blog.objects.all()#全部的博客列表
context = get_blog_list_common_data(request,blog_all_list) #传递给context
return render(request, 'blog/blog_list.html', context)
def blogs_with_type(request,blog_with_type_pk):
blog_type = get_object_or_404(BlogType,pk = blog_with_type_pk)#获取分类
blog_all_list = Blog.objects.filter(blog_type=blog_type)#获取所有筛选类型博客
context = get_blog_list_common_data(request, blog_all_list)
context['blog_type'] = blog_type # 分类名
return render(request, 'blog/blogs_with_type.html', context)
def blogs_with_date(request,year,month):
#获取到对应年和月的博客
blog_all_list = Blog.objects.filter(created_time__year=year, created_time__month=month)
context = get_blog_list_common_data(request, blog_all_list)
context['blog_with_date'] = "%s年%s月" %(year,month) #当前的年月
return render(request, 'blog/blogs_with_date.html', context)
#博客细节
def blog_detail(request,blog_pk):
context = {}
blog = get_object_or_404(Blog, pk = blog_pk)
#判断浏览器是否有cookie记录,有不加数,没有加数;get获取字典的key
read_cookie_key = read_statistics_once_read(request, blog)
blog_content_type = ContentType.objects.get_for_model(blog)
comments = Comment.objects.filter(content_type=blog_content_type,object_id=blog.pk)
context['blog'] = blog
#前一篇博客,大于:__gt=
context['previous_blog'] = Blog.objects.filter(created_time__gt=blog.created_time).last()
#后一篇博客,小于:__lt=
context['next_blog'] = Blog.objects.filter(created_time__lt=blog.created_time).first()
context['user'] = request.user
context['comments'] = comments
response=render(request, 'blog/blog_detail.html', context)
response.set_cookie(read_cookie_key, 'ture') #坑,值 记得填写
return response
| [
"[email protected]"
] | |
4da9c1e6ca004b93d1f275e2bd86ea3be8e69b31 | 52bb1d25a8c146b81b876343f861025e034fa964 | /roglick/dungeon/utils.py | fcf6a2a864c5ae7cc6c50f2c302b33b63529bf23 | [
"MIT"
] | permissive | Kromey/roglick | b3fc7a6bce7e60a150c9a9ed1cc825ef3988cf8a | b76202af71df0c30be0bd5f06a3428c990476e0e | refs/heads/master | 2020-12-14T15:49:53.163385 | 2016-05-24T16:29:06 | 2016-05-24T16:29:06 | 21,549,421 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,519 | py | from roglick.engine import random
from roglick.utils import clamp
def smoothstep(a, b, x):
"""Basic S-curve interpolation function.
Based on reference implementation available at
https://en.wikipedia.org/wiki/Smoothstep
"""
x = clamp((x - a)/(b - a), 0.0, 1.0)
return x*x*(3 - 2*x)
def smootherstep(a, b, x):
"""Improved S-curve interpolation function.
Based on reference implementation of the improved algorithm proposed by
Ken Perlin that is available at https://en.wikipedia.org/wiki/Smoothstep
"""
x = clamp((x - a)/(b - a), 0.0, 1.0)
return x*x*x*(x*(x*6 - 15) + 10);
def lerp(a, b, x):
"""Linear interpolation function."""
return a + x * (b - a)
class PerlinNoise2D(object):
def __init__(self, seed=None):
self.p = [x for x in range(256)]
if seed is None:
seed = random.get_int()
rand = random.Random(seed)
rand.shuffle(self.p)
def octave(self, x, y, octaves=5, persistence=0.5):
total = 0
frequency = 1
amplitude = 1
max_val = 0
for i in range(octaves):
total += self.noise(x*frequency, y*frequency) * amplitude
max_val += amplitude
amplitude *= persistence
frequency *= 2
return total / max_val
def noise(self, x, y):
xi = int(x)
yi = int(y)
xf = x - xi
yf = y - yi
u = self.fade(xf)
v = self.fade(yf)
aa = self.p_hash(self.p_hash( xi )+ yi )
ab = self.p_hash(self.p_hash( xi )+ yi+1)
ba = self.p_hash(self.p_hash(xi+1)+ yi )
bb = self.p_hash(self.p_hash(xi+1)+ yi+1)
x1 = lerp(self.grad(aa, xf , yf), self.grad(ba, xf-1, yf), u)
x2 = lerp(self.grad(ab, xf , yf-1), self.grad(bb, xf-1, yf-1), u)
return (lerp(x1, x2, v) + 1) / 2 # Constrain to [0,1] rather than [-1,1]
def fade(self, t):
return smootherstep(0.0, 1.0, t)
def p_hash(self, i):
i = i & 255
return self.p[i]
def grad(self, h, x, y):
"""This gradient function is based on Riven's optimization
Source: http://riven8192.blogspot.com/2010/08/calculate-perlinnoise-twice-as-fast.html
"""
h = h % 4
if h == 0:
return x + y
elif h == 1:
return -x + y
elif h == 2:
return x - y
elif h == 3:
return -x - y
else:
# Never happens
return 0
| [
"[email protected]"
] | |
113b1426d9036aee80c7202882206d1f33646a46 | fa1e90dedb7f9b84cd210420215ff6a9bf7e6f2d | /airmozilla/suggest/forms.py | 605254a63fff168bd1e667a2ed8a5f5f55e9866b | [] | no_license | sara-mansouri/airmozilla | f7bdf6aeafa9a7a299fc69c506e186ba47be7ccb | 8f93162be46044798df1e6d0ce80c8407fc41995 | refs/heads/master | 2021-01-16T18:28:35.569244 | 2014-03-28T02:59:31 | 2014-03-28T02:59:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,080 | py | from django import forms
from django.conf import settings
from django.template.defaultfilters import filesizeformat
from django.utils.timesince import timesince
from django.utils.safestring import mark_safe
from django.db.models import Q
from slugify import slugify
import requests
from funfactory.urlresolvers import reverse
from airmozilla.base.forms import BaseModelForm
from airmozilla.main.models import (
SuggestedEvent,
Event,
Tag,
Channel,
SuggestedEventComment
)
from airmozilla.comments.models import SuggestedDiscussion
from airmozilla.uploads.models import Upload
from . import utils
class StartForm(BaseModelForm):
event_type = forms.ChoiceField(
label='',
choices=[
('upcoming', 'Upcoming'),
('pre-recorded', 'Pre-recorded'),
('popcorn', 'Popcorn')
],
widget=forms.widgets.RadioSelect()
)
class Meta:
model = SuggestedEvent
fields = ('title',)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(StartForm, self).__init__(*args, **kwargs)
# self.fields['upcoming'].label = ''
# self.fields['upcoming'].widget = forms.widgets.RadioSelect(
# choices=[(True, 'Upcoming'), (False, 'Pre-recorded')]
# )
def clean_title(self):
value = self.cleaned_data['title']
if Event.objects.filter(title__iexact=value):
raise forms.ValidationError("Event title already used")
if SuggestedEvent.objects.filter(title__iexact=value, user=self.user):
raise forms.ValidationError(
"You already have a suggest event with this title"
)
return value
class TitleForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('title', 'slug')
def clean_slug(self):
value = self.cleaned_data['slug']
if value:
if Event.objects.filter(slug__iexact=value):
raise forms.ValidationError('Already taken')
return value
def clean_title(self):
value = self.cleaned_data['title']
if Event.objects.filter(title__iexact=value):
raise forms.ValidationError("Event title already used")
return value
def clean(self):
cleaned_data = super(TitleForm, self).clean()
if 'slug' in cleaned_data and 'title' in cleaned_data:
if not cleaned_data['slug']:
cleaned_data['slug'] = slugify(cleaned_data['title']).lower()
if Event.objects.filter(slug=cleaned_data['slug']):
raise forms.ValidationError('Slug already taken')
return cleaned_data
class ChooseFileForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('upload',)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(ChooseFileForm, self).__init__(*args, **kwargs)
this_or_nothing = (
Q(suggested_event__isnull=True) |
Q(suggested_event=self.instance)
)
uploads = (
Upload.objects
.filter(user=self.user)
.filter(this_or_nothing)
.order_by('created')
)
self.fields['upload'].widget = forms.widgets.RadioSelect(
choices=[(x.pk, self.describe_upload(x)) for x in uploads]
)
@staticmethod
def describe_upload(upload):
html = (
'%s <br><span class="metadata">(%s) uploaded %s ago</span>' % (
upload.file_name,
filesizeformat(upload.size),
timesince(upload.created)
)
)
return mark_safe(html)
class PopcornForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('popcorn_url',)
def __init__(self, *args, **kwargs):
super(PopcornForm, self).__init__(*args, **kwargs)
self.fields['popcorn_url'].label = 'Popcorn URL'
def clean_popcorn_url(self):
url = self.cleaned_data['popcorn_url']
if '://' not in url:
url = 'http://' + url
response = requests.get(url)
if response.status_code != 200:
raise forms.ValidationError('URL can not be found')
return url
class DescriptionForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('description', 'short_description')
def __init__(self, *args, **kwargs):
super(DescriptionForm, self).__init__(*args, **kwargs)
self.fields['description'].help_text = (
"Write a description of your event that will entice viewers to "
"watch.<br>"
"An interesting description improves the chances of your "
"presentation being picked up by bloggers and other websites."
"<br>"
"Please phrase your description in the present tense. "
)
self.fields['short_description'].help_text = (
"This Short Description is used in public feeds and tweets. "
"<br>If your event is non-public be careful "
"<b>not to "
"disclose sensitive information here</b>."
"<br>If left blank the system will use the first few "
"words of the description above."
)
class DetailsForm(BaseModelForm):
tags = forms.CharField(required=False)
enable_discussion = forms.BooleanField(required=False)
class Meta:
model = SuggestedEvent
fields = (
'location',
'start_time',
'privacy',
'category',
'tags',
'channels',
'additional_links',
'remote_presenters',
)
def __init__(self, *args, **kwargs):
super(DetailsForm, self).__init__(*args, **kwargs)
self.fields['channels'].required = False
if not self.instance.upcoming:
del self.fields['location']
del self.fields['start_time']
del self.fields['remote_presenters']
else:
self.fields['location'].required = True
self.fields['start_time'].required = True
self.fields['location'].help_text = (
"Choose an Air Mozilla origination point. <br>"
"If the location of your event isn't on the list, "
"choose Live Remote. <br>"
"Note that live remote dates and times are UTC."
)
self.fields['remote_presenters'].help_text = (
"If there will be presenters who present remotely, please "
"enter email addresses, names and locations about these "
"presenters."
)
self.fields['remote_presenters'].widget.attrs['rows'] = 3
if 'instance' in kwargs:
event = kwargs['instance']
if event.pk:
tag_format = lambda objects: ','.join(map(unicode, objects))
tags_formatted = tag_format(event.tags.all())
self.initial['tags'] = tags_formatted
self.fields['tags'].help_text = (
"Enter some keywords to help viewers find the recording of your "
"event. <br>Press return between keywords"
)
self.fields['channels'].help_text = (
"Should your event appear in one or more particular "
"Air Mozilla Channels? <br>If in doubt, select Main."
)
self.fields['additional_links'].help_text = (
"If you have links to slides, the presenter's blog, or other "
"relevant links, list them here and they will appear on "
"the event page."
)
self.fields['additional_links'].widget.attrs['rows'] = 3
def clean_tags(self):
tags = self.cleaned_data['tags']
split_tags = [t.strip() for t in tags.split(',') if t.strip()]
final_tags = []
for tag_name in split_tags:
t, __ = Tag.objects.get_or_create(name=tag_name)
final_tags.append(t)
return final_tags
def clean_channels(self):
channels = self.cleaned_data['channels']
if not channels:
return Channel.objects.filter(slug=settings.DEFAULT_CHANNEL_SLUG)
return channels
class DiscussionForm(BaseModelForm):
emails = forms.CharField(required=False, label="Moderators")
class Meta:
model = SuggestedDiscussion
fields = ('enabled', 'moderate_all')
def __init__(self, *args, **kwargs):
super(DiscussionForm, self).__init__(*args, **kwargs)
event = self.instance.event
self.fields['moderate_all'].help_text = (
'That every comment has to be approved before being shown '
'publically. '
)
self.fields['emails'].widget.attrs.update({
'data-autocomplete-url': reverse('suggest:autocomplete_emails')
})
if event.privacy != Event.PRIVACY_COMPANY:
self.fields['moderate_all'].widget.attrs.update(
{'disabled': 'disabled'}
)
self.fields['moderate_all'].help_text += (
'<br>If the event is not MoCo private you have to have '
'full moderation on '
'all the time.'
)
def clean_emails(self):
value = self.cleaned_data['emails']
emails = list(set([
x.lower().strip() for x in value.split(',') if x.strip()
]))
for email in emails:
if not utils.is_valid_email(email):
raise forms.ValidationError(
'%s is not a valid email address' % (email,)
)
return emails
class PlaceholderForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('placeholder_img',)
def __init__(self, *args, **kwargs):
super(PlaceholderForm, self).__init__(*args, **kwargs)
self.fields['placeholder_img'].help_text = (
"We need a placeholder image for your event. <br>"
"A recent head-shot of the speaker is preferred. <br>"
"Placeholder images should be 200 x 200 px or larger."
)
#class ParticipantsForm(BaseModelForm):
#
# participants = forms.CharField(required=False)
#
# class Meta:
# model = SuggestedEvent
# fields = ('participants',)
#
# def clean_participants(self):
# participants = self.cleaned_data['participants']
# split_participants = [p.strip() for p in participants.split(',')
# if p.strip()]
# final_participants = []
# for participant_name in split_participants:
# p = Participant.objects.get(name=participant_name)
# final_participants.append(p)
# return final_participants
#
class SuggestedEventCommentForm(BaseModelForm):
class Meta:
model = SuggestedEventComment
fields = ('comment',)
| [
"[email protected]"
] | |
c2a6d24f20bb1c2478b4feea8182623aca53bac4 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_14413.py | 5e67c83692878ae8becbb59fe8019e05781959d1 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | # changing type of a entry in dictionary throws error
d = {'today': datetime.today()}
d['today'] = d['today'].strftime(<your format>)
| [
"[email protected]"
] | |
eeb85c0763b4b58838c030ceccd1de9ec42a82e6 | 5cea11c9373d997430b523227ce81b61972ad1e3 | /tests/test_client_events.py | bd3bc8ac4bf3a96cd62673408ee09427626646ff | [
"BSD-3-Clause"
] | permissive | tinylambda/grpclib | fcc0d4f5723fe36359ceb9655764e9a37c87ebc1 | 948e32a29a4ad82ebbfdbb681f7a797f6233bff3 | refs/heads/master | 2023-07-15T16:19:59.776603 | 2021-08-25T19:56:10 | 2021-08-25T19:56:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,234 | py | import pytest
from multidict import MultiDict
from google.rpc.error_details_pb2 import ResourceInfo
from grpclib.const import Status
from grpclib.events import listen, SendRequest, SendMessage, RecvMessage
from grpclib.events import RecvInitialMetadata, RecvTrailingMetadata
from grpclib.testing import ChannelFor
from grpclib._compat import nullcontext
from grpclib.exceptions import GRPCError
from dummy_pb2 import DummyRequest, DummyReply
from dummy_grpc import DummyServiceStub, DummyServiceBase
class DummyService(DummyServiceBase):
def __init__(self, fail=False):
self.fail = fail
async def UnaryUnary(self, stream):
await stream.recv_message()
await stream.send_initial_metadata(metadata={'initial': 'true'})
await stream.send_message(DummyReply(value='pong'))
if self.fail:
await stream.send_trailing_metadata(
status=Status.NOT_FOUND,
status_message="Everything is not OK",
status_details=[ResourceInfo()],
metadata={'trailing': 'true'},
)
else:
await stream.send_trailing_metadata(metadata={'trailing': 'true'})
async def UnaryStream(self, stream):
raise GRPCError(Status.UNIMPLEMENTED)
async def StreamUnary(self, stream):
raise GRPCError(Status.UNIMPLEMENTED)
async def StreamStream(self, stream):
raise GRPCError(Status.UNIMPLEMENTED)
async def _test(event_type, *, fail=False):
service = DummyService(fail)
events = []
async def callback(event_):
events.append(event_)
async with ChannelFor([service]) as channel:
listen(channel, event_type, callback)
stub = DummyServiceStub(channel)
ctx = pytest.raises(GRPCError) if fail else nullcontext()
with ctx:
reply = await stub.UnaryUnary(DummyRequest(value='ping'),
timeout=1,
metadata={'request': 'true'})
assert reply == DummyReply(value='pong')
event, = events
return event
@pytest.mark.asyncio
async def test_send_request():
event = await _test(SendRequest)
assert event.metadata == MultiDict({'request': 'true'})
assert event.method_name == '/dummy.DummyService/UnaryUnary'
assert event.deadline.time_remaining() > 0
assert event.content_type == 'application/grpc'
@pytest.mark.asyncio
async def test_send_message():
event = await _test(SendMessage)
assert event.message == DummyRequest(value='ping')
@pytest.mark.asyncio
async def test_recv_message():
event = await _test(RecvMessage)
assert event.message == DummyReply(value='pong')
@pytest.mark.asyncio
async def test_recv_initial_metadata():
event = await _test(RecvInitialMetadata)
assert event.metadata == MultiDict({'initial': 'true'})
@pytest.mark.asyncio
async def test_recv_trailing_metadata():
event = await _test(RecvTrailingMetadata, fail=True)
assert event.metadata == MultiDict({'trailing': 'true'})
assert event.status is Status.NOT_FOUND
assert event.status_message == "Everything is not OK"
assert isinstance(event.status_details[0], ResourceInfo)
| [
"[email protected]"
] | |
31dd5fd0705bfebccf299f10eb6ba594038b885d | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /5ejvPTQeiioTTA9xZ_0.py | 9b5d0b04aa8e5dca2af5037100305f74b9f4c108 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
Create a function that checks if the argument is an integer or a string.
Return `"int"` if it's an integer and `"str"` if it's a string.
### Examples
int_or_string(8) ➞ "int"
int_or_string("Hello") ➞ "str"
int_or_string(9843532) ➞ "int"
### Notes
Input will either be an integer or a string.
"""
def int_or_string(var):
return var.__class__.__name__
| [
"[email protected]"
] | |
7b288b67b9fa3473f2fb3c72085b6de7ea893109 | 6cecdc007a3aafe0c0d0160053811a1197aca519 | /apps/receiver/management/commands/generate_submissions.py | ae672ba20a318c1fc46d7ecce22a17363b20c062 | [] | no_license | commtrack/temp-aquatest | 91d678c927cc4b2dce6f709afe7faf2768b58157 | 3b10d179552b1e9d6a0e4ad5e91a92a05dba19c7 | refs/heads/master | 2016-08-04T18:06:47.582196 | 2010-09-29T13:20:13 | 2010-09-29T13:20:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,172 | py | """ This script generates all the necessary data to
synchronize with a remote CommCareHQ server on that server.
This is only really useful if you intend to manually
scp/rsync data to your local server, which requires a
login to the remote server. So this is not the standard
synchronization workflow (but is necessary for low-connectivity
settings)
"""
import bz2
import sys
import urllib2
import httplib
import cStringIO
from urlparse import urlparse
from optparse import make_option
from django.core.management.base import LabelCommand, CommandError
from django_rest_interface import util as rest_util
from receiver.models import Submission
class Command(LabelCommand):
option_list = LabelCommand.option_list + (
make_option('-a','--all', action='store_true', dest='all', \
default=False, help='Generate all files'),
make_option('-?','--debug', action='store_true', dest='debug', \
default=False, help='Generate some files'),
make_option('-d','--download', action='store_true', dest='download', \
default=False, help='Download files.'),
)
help = "Generate synchronization files on a CommCareHQ remote server."
args = "<remote_url username password>"
label = 'IP address of the remote server (including port), username, and password'
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError('Please specify %s.' % self.label)
remote_url = args[0]
username = args[1]
password = args[2]
print "Generating synchronization data from %s" % remote_url
all = options.get('all', False)
debug = options.get('debug', False)
download = options.get('download', False)
generate_submissions(remote_url, username, password, not all, debug, download)
def __del__(self):
pass
def generate_submissions(remote_url, username, password, latest=True, debug=False, download=False, to='submissions.tar'):
""" Generate sync data from remote server
remote_url: url of remote server (ip:port)
username, password: credentials for logging in
"""
status = rest_util.login(remote_url, username, password)
if not status:
print "Sorry. Your credentials were not accepted."
sys.exit()
url = 'http://%s/api/submissions/' % remote_url
if latest:
MD5_buffer = rest_util.get_field_as_bz2(Submission, 'checksum', debug)
response = rest_util.request(url, username, password, MD5_buffer)
print "Generated latest remote submissions"
else:
response = urllib2.urlopen(url)
print "Generated all remote submissions archive"
if download:
fout = open(to, 'w+b')
fout.write(response.read())
fout.close()
print "Submissions downloaded to %s" % to
else:
# Check for status messages
# (i think tar payloads always begin 'BZ'...)
response = response.read(255)
if response[:2] != "BZ":
print response
return response
| [
"[email protected]"
] | |
5a5a5583911ddb9db5402f6b3d6030070b115f57 | 1e50f1643376039ca988d909e79f528e01fa1371 | /leetcode/editor/cn/292.nim-游戏.py | 174da887a6b080c9b99b41e140bf445662a9f611 | [] | no_license | mahatmaWM/leetcode | 482a249e56e2121f4896e34c58d9fa44d6d0034b | 4f41dad6a38d3cac1c32bc1f157e20aa14eab9be | refs/heads/master | 2022-09-04T17:53:54.832210 | 2022-08-06T07:29:46 | 2022-08-06T07:29:46 | 224,415,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | #
# @lc app=leetcode.cn id=292 lang=python3
#
# [292] Nim 游戏
#
# https://leetcode-cn.com/problems/nim-game/description/
#
# algorithms
# Easy (69.45%)
# Likes: 326
# Dislikes: 0
# Total Accepted: 50K
# Total Submissions: 71.9K
# Testcase Example: '4'
#
# 你和你的朋友,两个人一起玩 Nim 游戏:桌子上有一堆石头,每次你们轮流拿掉 1 - 3 块石头。 拿掉最后一块石头的人就是获胜者。你作为先手。
#
# 你们是聪明人,每一步都是最优解。 编写一个函数,来判断你是否可以在给定石头数量的情况下赢得游戏。
#
# 示例:
#
# 输入: 4
# 输出: false
# 解释: 如果堆中有 4 块石头,那么你永远不会赢得比赛;
# 因为无论你拿走 1 块、2 块 还是 3 块石头,最后一块石头总是会被你的朋友拿走。
#
#
#
# @lc code=start
class Solution:
def canWinNim(self, n: int) -> bool:
return False if n % 4 == 0 else True
# @lc code=end
| [
"[email protected]"
] | |
22f4ffa79f304c929e6c0680c0a2228d0e15dd2b | dbf2d3f8eb11d04123894e398446b56ca791c9f6 | /examples/02.py | c9847666ba51a1574e379280d847d651e7982b21 | [] | no_license | podhmo/nendo | ed8d9a62ab23f7409a8ce519f28deff7d3642942 | 841ec7a990019596c769a2f581a1190aeb8cbd56 | refs/heads/master | 2021-01-22T17:47:58.964323 | 2015-06-28T11:37:38 | 2015-06-28T11:37:38 | 37,828,656 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # -*- coding:utf-8 -*-
import logging
logger = logging.getLogger(__name__)
"""
-- select explicitly
SELECT open_emp_id, product_cd
FROM account
ORDER BY open_emp_id, product_cd;
"""
from nendo import Query, make_record, render
from nendo.value import List
Account = make_record("account", "account_id product_cd open_date avail_balance open_emp_id")
query = (Query()
.from_(Account)
.order_by(List([Account.open_emp_id, Account.product_cd]).desc())
.select(Account.open_emp_id, Account.product_cd))
print(render(query))
| [
"[email protected]"
] | |
e5ae86739f26139d2a56b19277ea7832e21d41bd | f74dd098c3e665d8f605af5ebe7e2874ac31dd2f | /aiogithubapi/namespaces/user.py | 1d1bd8b8cab4928b70f10f1d9836568e6cc2db64 | [
"MIT"
] | permissive | ludeeus/aiogithubapi | ce87382698827939aaa127b378b9a11998f13c06 | 90f3fc98e5096300269763c9a5857481b2dec4d2 | refs/heads/main | 2023-08-20T19:30:05.309844 | 2023-08-14T20:24:21 | 2023-08-14T20:24:21 | 198,505,021 | 21 | 20 | MIT | 2023-09-11T06:12:10 | 2019-07-23T20:39:53 | Python | UTF-8 | Python | false | false | 2,993 | py | """
Methods for the authenticated user namespace
https://docs.github.com/en/rest/reference/users#get-the-authenticated-user
"""
from __future__ import annotations
from typing import Any, Dict
from ..const import GitHubRequestKwarg
from ..models.organization import GitHubOrganizationMinimalModel
from ..models.repository import GitHubRepositoryModel
from ..models.response import GitHubResponseModel
from ..models.user import GitHubAuthenticatedUserModel
from .base import BaseNamespace
from .projects import GitHubUserProjectsNamespace
class GitHubUserNamespace(BaseNamespace):
"""Methods for the user namespace"""
def __post_init__(self) -> None:
self._projects = GitHubUserProjectsNamespace(self._client)
@property
def projects(self) -> GitHubUserProjectsNamespace:
"""Property to access the users projects namespace"""
return self._projects
async def get(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[GitHubAuthenticatedUserModel]:
"""
Get the authenticated user
https://docs.github.com/en/rest/reference/users#get-a-user
"""
response = await self._client.async_call_api(
endpoint="/user",
**kwargs,
)
response.data = GitHubAuthenticatedUserModel(response.data)
return response
async def starred(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[list[GitHubRepositoryModel]]:
"""
Get the authenticated user starred repositories
https://docs.github.com/en/rest/reference/users#get-a-user
"""
response = await self._client.async_call_api(
endpoint="/user/starred",
**kwargs,
)
response.data = [GitHubRepositoryModel(data) for data in response.data]
return response
async def repos(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[list[GitHubRepositoryModel]]:
"""
Get the repositories for the authenticated user
https://docs.github.com/en/rest/reference/repos#list-repositories-for-a-user
"""
response = await self._client.async_call_api(
endpoint="/user/repos",
**kwargs,
)
response.data = [GitHubRepositoryModel(data) for data in response.data]
return response
async def orgs(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[list[GitHubOrganizationMinimalModel]]:
"""
List public organization memberships for the specified user.
https://docs.github.com/en/rest/reference/orgs#list-organizations-for-the-authenticated-user
"""
response = await self._client.async_call_api(endpoint="/user/orgs", **kwargs)
response.data = [GitHubOrganizationMinimalModel(data) for data in response.data or []]
return response
| [
"[email protected]"
] | |
d556f5c5b3363e7fc2bbc713413256455f6f53d3 | 7b6e3c5e6b963c749da9f946275661ae0e67dbd2 | /src/model/test/yolo_v2_test.py | f12f453a1da9ecff535acc2209d498da9c687322 | [] | no_license | WeiZongqi/yolo-tensorflow | c8237295b41beb61943207d8511c80a0f33507f2 | 53eaa2ad779918ced2ded2834e09abf2e0ed7202 | refs/heads/master | 2021-01-25T14:26:58.371334 | 2017-12-28T08:18:59 | 2017-12-28T08:18:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,688 | py | # -*- coding: utf8 -*-
# author: ronniecao
from __future__ import print_function
import sys
import os
import time
import numpy
import matplotlib.pyplot as plt
import tensorflow as tf
from src.data.image import ImageProcessor
from src.model.yolo_v2 import TinyYolo
class TinyYoloTestor:
def test_calculate_loss(self):
self.batch_size = 1
self.cell_size = 2
self.n_boxes = 2
self.max_objects = 3
self.n_classes = 5
coord_pred = numpy.zeros((1, 2, 2, 2, 4))
coord_pred[0,0,0,0,:] = [0.4, 0.4, 0.1, 0.1]
coord_pred[0,0,0,1,:] = [0.1, 0.1, 0.1, 0.1]
coord_pred[0,0,1,0,:] = [0.75, 0.25, 0.1, 0.1]
coord_pred[0,0,1,1,:] = [0.7, 0.2, 0.1, 0.1]
coord_pred[0,1,0,0,:] = [0.3, 0.8, 0.1, 0.1]
coord_pred[0,1,0,1,:] = [0.25, 0.75, 0.1, 0.1]
coord_pred[0,1,1,0,:] = [0.75, 0.75, 0.1, 0.1]
coord_pred[0,1,1,1,:] = [0.7, 0.8, 0.1, 0.1]
conf_pred = numpy.zeros((1, 2, 2, 2, 1))
conf_pred[0,0,0,0,0] = 1.0
conf_pred[0,0,0,1,0] = 1.0
conf_pred[0,0,1,0,0] = 1.0
conf_pred[0,0,1,1,0] = 0.2
conf_pred[0,1,0,0,0] = 0.1
conf_pred[0,1,0,1,0] = 0.9
conf_pred[0,1,1,0,0] = 1.0
class_pred = numpy.zeros((1, 2, 2, 2, 5))
class_pred[0,0,0,0,0] = 0.9
class_pred[0,0,0,0,1] = 0.1
class_pred[0,0,0,1,1] = 1.0
class_pred[0,0,1,0,4] = 0.8
class_pred[0,0,1,0,3] = 0.1
class_pred[0,0,1,0,2] = 0.1
class_pred[0,1,0,1,2] = 1.0
class_pred[0,1,1,0,3] = 0.8
class_pred[0,1,1,0,0] = 0.05
class_pred[0,1,1,0,1] = 0.05
class_pred[0,1,1,0,2] = 0.05
class_pred[0,1,1,0,4] = 0.05
coord_true = numpy.zeros((1, 2, 2, 3, 4))
coord_true[0,0,0,0,:] = [0.1, 0.1, 0.1, 0.1]
coord_true[0,0,0,1,:] = [0.4, 0.4, 0.1, 0.1]
coord_true[0,0,1,0,:] = [0.75, 0.25, 0.1, 0.1]
coord_true[0,1,0,0,:] = [0.25, 0.75, 0.1, 0.1]
coord_true[0,1,1,0,:] = [0.75, 0.75, 0.1, 0.1]
class_true = numpy.zeros((1, 2, 2, 3, 5))
class_true[0,0,0,0,1] = 1.0
class_true[0,0,0,1,0] = 1.0
class_true[0,0,1,0,4] = 1.0
class_true[0,1,0,0,2] = 1.0
class_true[0,1,1,0,3] = 1.0
object_mask = numpy.zeros((1, 2, 2, 3))
object_mask[0,0,0,0] = 1
object_mask[0,0,0,1] = 1
object_mask[0,0,1,0] = 1
object_mask[0,1,0,0] = 1
object_mask[0,1,1,0] = 1
coord_true_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 3, 4], name='coord_true_tf')
coord_pred_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 2, 4], name='coord_pred_tf')
conf_pred_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 2, 1], name='conf_pred_tf')
class_true_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 3, 5], name='class_true_tf')
class_pred_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 2, 5], name='class_pred_tf')
object_mask_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 3], name='object_mask_tf')
coord_pred_iter = tf.tile(
tf.reshape(coord_pred_tf, shape=[
self.batch_size, self.cell_size, self.cell_size, self.n_boxes, 1, 4]),
[1, 1, 1, 1, self.max_objects, 1])
coord_true_iter = tf.reshape(coord_true_tf, shape=[
self.batch_size, self.cell_size, self.cell_size, 1, self.max_objects, 4])
coord_true_iter = tf.tile(coord_true_iter, [1, 1, 1, self.n_boxes, 1, 1])
iou_tensor = self.calculate_iou_tf(coord_pred_iter, coord_true_iter)
iou_tensor_max = tf.reduce_max(iou_tensor, 3, keep_dims=True)
iou_tensor_mask = tf.cast(
(iou_tensor >= iou_tensor_max), dtype=tf.float32) * tf.reshape(
object_mask_tf, shape=(
self.batch_size, self.cell_size, self.cell_size, 1, self.max_objects, 1))
iou_tensor_pred_mask = tf.reduce_sum(iou_tensor_mask, axis=4)
coord_label = tf.reduce_max(iou_tensor_mask * coord_true_iter, axis=4)
coord_loss = tf.nn.l2_loss((coord_pred_tf - coord_label) * iou_tensor_pred_mask) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
iou_value = tf.reduce_sum(
tf.reduce_max(iou_tensor, axis=4) * iou_tensor_pred_mask, axis=[0,1,2,3]) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
conf_label = tf.reduce_max(iou_tensor_mask * tf.ones(shape=(
self.batch_size, self.cell_size, self.cell_size,
self.n_boxes, self.max_objects, 1)), axis=4)
object_loss = tf.nn.l2_loss(
(conf_pred_tf - conf_label) * iou_tensor_pred_mask) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
object_value = tf.reduce_sum(
conf_pred_tf * iou_tensor_pred_mask, axis=[0,1,2,3]) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
inv_iou_tensor_pred_mask = tf.ones(shape=(
self.batch_size, self.cell_size, self.cell_size,
self.n_boxes, 1)) - iou_tensor_pred_mask
noobject_loss = tf.nn.l2_loss(
(conf_pred_tf - conf_label) * inv_iou_tensor_pred_mask) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
noobject_value = tf.reduce_sum(
conf_pred_tf * inv_iou_tensor_pred_mask, axis=[0,1,2,3]) / (
tf.reduce_sum(inv_iou_tensor_pred_mask, axis=[0,1,2,3]))
class_true_iter = tf.reshape(class_true_tf, shape=[
self.batch_size, self.cell_size, self.cell_size, 1, self.max_objects, self.n_classes])
class_true_iter = tf.tile(class_true_iter, [1, 1, 1, self.n_boxes, 1, 1])
class_label = tf.reduce_max(iou_tensor_mask * class_true_iter, axis=4)
class_loss = tf.nn.l2_loss(
(class_pred_tf - class_label) * iou_tensor_pred_mask) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
class_value = tf.reduce_sum(
class_pred_tf * class_label * iou_tensor_pred_mask, axis=[0,1,2,3,4]) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
sess = tf.Session()
[output] = sess.run(
fetches=[class_value],
feed_dict={coord_true_tf: coord_true, coord_pred_tf: coord_pred,
conf_pred_tf: conf_pred,
class_true_tf: class_true, class_pred_tf: class_pred,
object_mask_tf: object_mask})
print(output)
def calculate_iou_tf(self, box_pred, box_true):
box1 = tf.stack([
box_pred[:,:,:,:,:,0] - box_pred[:,:,:,:,:,2] / 2.0,
box_pred[:,:,:,:,:,1] - box_pred[:,:,:,:,:,3] / 2.0,
box_pred[:,:,:,:,:,0] + box_pred[:,:,:,:,:,2] / 2.0,
box_pred[:,:,:,:,:,1] + box_pred[:,:,:,:,:,3] / 2.0])
box1 = tf.transpose(box1, perm=[1, 2, 3, 4, 5, 0])
box2 = tf.stack([
box_true[:,:,:,:,:,0] - box_true[:,:,:,:,:,2] / 2.0,
box_true[:,:,:,:,:,1] - box_true[:,:,:,:,:,3] / 2.0,
box_true[:,:,:,:,:,0] + box_true[:,:,:,:,:,2] / 2.0,
box_true[:,:,:,:,:,1] + box_true[:,:,:,:,:,3] / 2.0])
box2 = tf.transpose(box2, perm=[1, 2, 3, 4, 5, 0])
left_top = tf.maximum(box1[:,:,:,:,:,0:2], box2[:,:,:,:,:,0:2])
right_bottom = tf.minimum(box1[:,:,:,:,:,2:4], box2[:,:,:,:,:,2:4])
intersection = right_bottom - left_top
inter_area = intersection[:,:,:,:,:,0] * intersection[:,:,:,:,:,1]
mask = tf.cast(intersection[:,:,:,:,:,0] > 0, tf.float32) * \
tf.cast(intersection[:,:,:,:,:,1] > 0, tf.float32)
inter_area = inter_area * mask
box1_area = (box1[:,:,:,:,:,2]-box1[:,:,:,:,:,0]) * (box1[:,:,:,:,:,3]-box1[:,:,:,:,:,1])
box2_area = (box2[:,:,:,:,:,2]-box2[:,:,:,:,:,0]) * (box2[:,:,:,:,:,3]-box2[:,:,:,:,:,1])
iou = inter_area / (box1_area + box2_area - inter_area + 1e-6)
return tf.reshape(iou, shape=[
self.batch_size, self.cell_size, self.cell_size, self.n_boxes, self.max_objects, 1])
def test_get_box_pred(self):
label = [[0, 0, 0, 0, 0]] * 5
label[0] = [0.5, 0.15, 0.8, 0.2, 1]
label[1] = [0.5, 0.7, 0.1, 0.2, 1]
label[2] = [0.5, 0.9, 0.6, 0.1, 1]
pred = numpy.zeros(shape=(3,3,6,5))
pred[0,1,4,:] = [-1.6, -1.73, 0.09, -0.09, 1.0]
# pred[1,0,4,:] = [0.0, 0.0, 0.0, 0.0, 1.0]
image_processor = ImageProcessor(
'Z:', image_size=96, max_objects_per_image=5, cell_size=3, n_classes=1)
class_label, class_mask, box_label, object_num = \
image_processor.process_label(label)
tiny_yolo = TinyYolo(
n_channel=3, n_classes=1, image_size=96, max_objects_per_image=5,
box_per_cell=6, object_scala=10, nobject_scala=5,
coord_scala=10, class_scala=1, batch_size=1)
box_pred = tf.placeholder(
dtype=tf.float32, shape=[3, 3, 6, 4], name='box_pred')
box_truth = tf.placeholder(
dtype=tf.float32, shape=[3, 3, 1, 4], name='box_truth')
iou_matrix = tiny_yolo.get_box_pred(box_pred)
sess = tf.Session()
[output] = sess.run(
fetches=[iou_matrix],
feed_dict={box_pred: pred[:,:,:,0:4]})
sess.close()
print(output, output.shape)
# 画图
image = numpy.zeros(shape=(256, 256, 3), dtype='uint8') + 255
cv2.line(image, (0, int(256/3.0)), (256, int(256/3.0)), (100, 149, 237), 1)
cv2.line(image, (0, int(256*2.0/3.0)), (256, int(256*2.0/3.0)), (100, 149, 237), 1)
cv2.line(image, (int(256/3.0), 0), (int(256/3.0), 256), (100, 149, 237), 1)
cv2.line(image, (int(256*2.0/3.0), 0), (int(256*2.0/3.0), 256), (100, 149, 237), 1)
for center_x, center_y, w, h, prob in label:
if prob != 1.0:
continue
# 画中心点
cv2.circle(image, (int(center_x*256), int(center_y*256)), 2, (255, 99, 71), 0)
# 画真实框
xmin = int((center_x - w / 2.0) * 256)
xmax = int((center_x + w / 2.0) * 256)
ymin = int((center_y - h / 2.0) * 256)
ymax = int((center_y + h / 2.0) * 256)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (255, 99, 71), 0)
for x in range(3):
for y in range(3):
for n in range(2):
[center_x, center_y, w, h, prob] = pred[x, y, n, :]
# 画中心点
cv2.circle(image, (int(center_x*256), int(center_y*256)), 2, (238, 130, 238), 0)
# 画预测框
xmin = int((center_x - w / 2.0) * 256)
xmax = int((center_x + w / 2.0) * 256)
ymin = int((center_y - h / 2.0) * 256)
ymax = int((center_y + h / 2.0) * 256)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (238, 130, 238), 0)
plt.imshow(image)
plt.show() | [
"[email protected]"
] | |
4f7ae60a8596d2b441a4ff0da86b405f6c80aba6 | ad5d38fce4785037c108186f17eb1c64380355ef | /sddsd/google-cloud-sdk.staging/lib/googlecloudsdk/calliope/arg_parsers.py | 106bfe82ce32e1f5504ba759ff9f2da633c36cc4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | saranraju90/multik8s | 75864b605a139ddb7947ed4de4ae8466bdd49acb | 428576dedef7bb9cd6516e2c1ab2714581e1137c | refs/heads/master | 2023-03-03T21:56:14.383571 | 2021-02-20T14:56:42 | 2021-02-20T14:56:42 | 339,665,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,962 | py | # -*- coding: utf-8 -*- #
# Copyright 2013 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module that provides parsing utilities for argparse.
For details of how argparse argument pasers work, see:
http://docs.python.org/dev/library/argparse.html#type
Example usage:
import argparse
import arg_parsers
parser = argparse.ArgumentParser()
parser.add_argument(
'--metadata',
type=arg_parsers.ArgDict())
parser.add_argument(
'--delay',
default='5s',
type=arg_parsers.Duration(lower_bound='1s', upper_bound='10s')
parser.add_argument(
'--disk-size',
default='10GB',
type=arg_parsers.BinarySize(lower_bound='1GB', upper_bound='10TB')
res = parser.parse_args(
'--names --metadata x=y,a=b,c=d --delay 1s --disk-size 10gb'.split())
assert res.metadata == {'a': 'b', 'c': 'd', 'x': 'y'}
assert res.delay == 1
assert res.disk_size == 10737418240
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import collections
import copy
import re
from dateutil import tz
from googlecloudsdk.calliope import parser_errors
from googlecloudsdk.core import log
from googlecloudsdk.core import yaml
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import times
import six
from six.moves import zip # pylint: disable=redefined-builtin
__all__ = ['Duration', 'BinarySize']
class Error(Exception):
"""Exceptions that are defined by this module."""
class ArgumentTypeError(Error, argparse.ArgumentTypeError):
"""Exceptions for parsers that are used as argparse types."""
class ArgumentParsingError(Error, argparse.ArgumentError):
"""Raised when there is a problem with user input.
argparse.ArgumentError takes both the action and a message as constructor
parameters.
"""
def _GenerateErrorMessage(error, user_input=None, error_idx=None):
"""Constructs an error message for an exception.
Args:
error: str, The error message that should be displayed. This
message should not end with any punctuation--the full error
message is constructed by appending more information to error.
user_input: str, The user input that caused the error.
error_idx: int, The index at which the error occurred. If None,
the index will not be printed in the error message.
Returns:
str: The message to use for the exception.
"""
if user_input is None:
return error
elif not user_input: # Is input empty?
return error + '; received empty string'
elif error_idx is None:
return error + '; received: ' + user_input
return ('{error_message} at index {error_idx}: {user_input}'
.format(error_message=error, user_input=user_input,
error_idx=error_idx))
_VALUE_PATTERN = r"""
^ # Beginning of input marker.
(?P<amount>\d+) # Amount.
((?P<suffix>[-/a-zA-Z]+))? # Optional scale and type abbr.
$ # End of input marker.
"""
_RANGE_PATTERN = r'^(?P<start>[0-9]+)(-(?P<end>[0-9]+))?$'
_SECOND = 1
_MINUTE = 60 * _SECOND
_HOUR = 60 * _MINUTE
_DAY = 24 * _HOUR
# The units are adopted from sleep(1):
# http://linux.die.net/man/1/sleep
_DURATION_SCALES = {
's': _SECOND,
'm': _MINUTE,
'h': _HOUR,
'd': _DAY,
}
_BINARY_SIZE_SCALES = {
'': 1,
'K': 1 << 10,
'M': 1 << 20,
'G': 1 << 30,
'T': 1 << 40,
'P': 1 << 50,
'Ki': 1 << 10,
'Mi': 1 << 20,
'Gi': 1 << 30,
'Ti': 1 << 40,
'Pi': 1 << 50,
}
def GetMultiCompleter(individual_completer):
"""Create a completer to handle completion for comma separated lists.
Args:
individual_completer: A function that completes an individual element.
Returns:
A function that completes the last element of the list.
"""
def MultiCompleter(prefix, parsed_args, **kwargs):
start = ''
lst = prefix.rsplit(',', 1)
if len(lst) > 1:
start = lst[0] + ','
prefix = lst[1]
matches = individual_completer(prefix, parsed_args, **kwargs)
return [start + match for match in matches]
return MultiCompleter
def _DeleteTypeAbbr(suffix, type_abbr='B'):
"""Returns suffix with trailing type abbreviation deleted."""
if not suffix:
return suffix
s = suffix.upper()
i = len(s)
for c in reversed(type_abbr.upper()):
if not i:
break
if s[i - 1] == c:
i -= 1
return suffix[:i]
def GetBinarySizePerUnit(suffix, type_abbr='B'):
"""Returns the binary size per unit for binary suffix string.
Args:
suffix: str, A case insensitive unit suffix string with optional type
abbreviation.
type_abbr: str, The optional case insensitive type abbreviation following
the suffix.
Raises:
ValueError for unknown units.
Returns:
The binary size per unit for a unit+type_abbr suffix.
"""
unit = _DeleteTypeAbbr(suffix.upper(), type_abbr)
return _BINARY_SIZE_SCALES.get(unit)
def _ValueParser(scales, default_unit, lower_bound=None, upper_bound=None,
strict_case=True, type_abbr='B',
suggested_binary_size_scales=None):
"""A helper that returns a function that can parse values with units.
Casing for all units matters.
Args:
scales: {str: int}, A dictionary mapping units to their magnitudes in
relation to the lowest magnitude unit in the dict.
default_unit: str, The default unit to use if the user's input is
missing unit.
lower_bound: str, An inclusive lower bound.
upper_bound: str, An inclusive upper bound.
strict_case: bool, whether to be strict on case-checking
type_abbr: str, the type suffix abbreviation, e.g., B for bytes, b/s for
bits/sec.
suggested_binary_size_scales: list, A list of strings with units that will
be recommended to user.
Returns:
A function that can parse values.
"""
def UnitsByMagnitude(suggested_binary_size_scales=None):
"""Returns a list of the units in scales sorted by magnitude."""
scale_items = sorted(six.iteritems(scales),
key=lambda value: (value[1], value[0]))
if suggested_binary_size_scales is None:
return [key + type_abbr for key, _ in scale_items]
return [key + type_abbr for key, _ in scale_items
if key + type_abbr in suggested_binary_size_scales]
def Parse(value):
"""Parses value that can contain a unit and type avvreviation."""
match = re.match(_VALUE_PATTERN, value, re.VERBOSE)
if not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'given value must be of the form INTEGER[UNIT] where units '
'can be one of {0}'
.format(', '.join(UnitsByMagnitude(suggested_binary_size_scales))),
user_input=value))
amount = int(match.group('amount'))
suffix = match.group('suffix') or ''
unit = _DeleteTypeAbbr(suffix, type_abbr)
if strict_case:
unit_case = unit
default_unit_case = _DeleteTypeAbbr(default_unit, type_abbr)
scales_case = scales
else:
unit_case = unit.upper()
default_unit_case = _DeleteTypeAbbr(default_unit.upper(), type_abbr)
scales_case = dict([(k.upper(), v) for k, v in scales.items()])
if not unit and unit == suffix:
return amount * scales_case[default_unit_case]
elif unit_case in scales_case:
return amount * scales_case[unit_case]
else:
raise ArgumentTypeError(_GenerateErrorMessage(
'unit must be one of {0}'.format(', '.join(UnitsByMagnitude())),
user_input=unit))
if lower_bound is None:
parsed_lower_bound = None
else:
parsed_lower_bound = Parse(lower_bound)
if upper_bound is None:
parsed_upper_bound = None
else:
parsed_upper_bound = Parse(upper_bound)
def ParseWithBoundsChecking(value):
"""Same as Parse except bound checking is performed."""
if value is None:
return None
else:
parsed_value = Parse(value)
if parsed_lower_bound is not None and parsed_value < parsed_lower_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
elif parsed_upper_bound is not None and parsed_value > parsed_upper_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
else:
return parsed_value
return ParseWithBoundsChecking
def RegexpValidator(pattern, description):
"""Returns a function that validates a string against a regular expression.
For example:
>>> alphanumeric_type = RegexpValidator(
... r'[a-zA-Z0-9]+',
... 'must contain one or more alphanumeric characters')
>>> parser.add_argument('--foo', type=alphanumeric_type)
>>> parser.parse_args(['--foo', '?'])
>>> # SystemExit raised and the error "error: argument foo: Bad value [?]:
>>> # must contain one or more alphanumeric characters" is displayed
Args:
pattern: str, the pattern to compile into a regular expression to check
description: an error message to show if the argument doesn't match
Returns:
function: str -> str, usable as an argparse type
"""
def Parse(value):
if not re.match(pattern + '$', value):
raise ArgumentTypeError('Bad value [{0}]: {1}'.format(value, description))
return value
return Parse
def CustomFunctionValidator(fn, description, parser=None):
"""Returns a function that validates the input by running it through fn.
For example:
>>> def isEven(val):
... return val % 2 == 0
>>> even_number_parser = arg_parsers.CustomFunctionValidator(
isEven, 'This is not even!', parser=arg_parsers.BoundedInt(0))
>>> parser.add_argument('--foo', type=even_number_parser)
>>> parser.parse_args(['--foo', '3'])
>>> # SystemExit raised and the error "error: argument foo: Bad value [3]:
>>> # This is not even!" is displayed
Args:
fn: str -> boolean
description: an error message to show if boolean function returns False
parser: an arg_parser that is applied to to value before validation. The
value is also returned by this parser.
Returns:
function: str -> str, usable as an argparse type
"""
def Parse(value):
"""Validates and returns a custom object from an argument string value."""
try:
parsed_value = parser(value) if parser else value
except ArgumentTypeError:
pass
else:
if fn(parsed_value):
return parsed_value
encoded_value = console_attr.SafeText(value)
formatted_err = 'Bad value [{0}]: {1}'.format(encoded_value, description)
raise ArgumentTypeError(formatted_err)
return Parse
def Duration(default_unit='s',
lower_bound='0',
upper_bound=None,
parsed_unit='s'):
"""Returns a function that can parse time durations.
See times.ParseDuration() for details. If the unit is omitted, seconds is
assumed. The parsed unit is assumed to be seconds, but can be specified as
ms or us.
For example:
parser = Duration()
assert parser('10s') == 10
parser = Duration(parsed_unit='ms')
assert parser('10s') == 10000
parser = Duration(parsed_unit='us')
assert parser('10s') == 10000000
Args:
default_unit: str, The default duration unit.
lower_bound: str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
parsed_unit: str, The unit that the result should be returned as. Can be
's', 'ms', or 'us'.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single time duration as input to be
parsed.
"""
def Parse(value):
"""Parses a duration from value and returns integer of the parsed_unit."""
if parsed_unit == 'ms':
multiplier = 1000
elif parsed_unit == 'us':
multiplier = 1000000
elif parsed_unit == 's':
multiplier = 1
else:
raise ArgumentTypeError(
_GenerateErrorMessage('parsed_unit must be one of s, ms, us.'))
try:
duration = times.ParseDuration(value, default_suffix=default_unit)
return int(duration.total_seconds * multiplier)
except times.Error as e:
message = six.text_type(e).rstrip('.')
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse duration: {0}'.format(message, user_input=value)))
parsed_lower_bound = Parse(lower_bound)
if upper_bound is None:
parsed_upper_bound = None
else:
parsed_upper_bound = Parse(upper_bound)
def ParseWithBoundsChecking(value):
"""Same as Parse except bound checking is performed."""
if value is None:
return None
parsed_value = Parse(value)
if parsed_lower_bound is not None and parsed_value < parsed_lower_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
if parsed_upper_bound is not None and parsed_value > parsed_upper_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
return parsed_value
return ParseWithBoundsChecking
def BinarySize(lower_bound=None, upper_bound=None,
suggested_binary_size_scales=None, default_unit='G',
type_abbr='B'):
"""Returns a function that can parse binary sizes.
Binary sizes are defined as base-2 values representing number of
bytes.
Input to the parsing function must be a string of the form:
INTEGER[UNIT]
The integer must be non-negative. Valid units are "B", "KB", "MB",
"GB", "TB", "KiB", "MiB", "GiB", "TiB", "PiB". If the unit is
omitted then default_unit is assumed.
The result is parsed in bytes. For example:
parser = BinarySize()
assert parser('10GB') == 1073741824
Args:
lower_bound: str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
suggested_binary_size_scales: list, A list of strings with units that will
be recommended to user.
default_unit: str, unit used when user did not specify unit.
type_abbr: str, the type suffix abbreviation, e.g., B for bytes, b/s for
bits/sec.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single binary size as input to be
parsed.
"""
return _ValueParser(
_BINARY_SIZE_SCALES, default_unit=default_unit, lower_bound=lower_bound,
upper_bound=upper_bound, strict_case=False, type_abbr=type_abbr,
suggested_binary_size_scales=suggested_binary_size_scales)
_KV_PAIR_DELIMITER = '='
class Range(object):
"""Range of integer values."""
def __init__(self, start, end):
self.start = start
self.end = end
@staticmethod
def Parse(string_value):
"""Creates Range object out of given string value."""
match = re.match(_RANGE_PATTERN, string_value)
if not match:
raise ArgumentTypeError('Expected a non-negative integer value or a '
'range of such values instead of "{0}"'
.format(string_value))
start = int(match.group('start'))
end = match.group('end')
if end is None:
end = start
else:
end = int(end)
if end < start:
raise ArgumentTypeError('Expected range start {0} smaller or equal to '
'range end {1} in "{2}"'.format(
start, end, string_value))
return Range(start, end)
def Combine(self, other):
"""Combines two overlapping or adjacent ranges, raises otherwise."""
if self.end + 1 < other.start or self.start > other.end + 1:
raise Error('Cannot combine non-overlapping or non-adjacent ranges '
'{0} and {1}'.format(self, other))
return Range(min(self.start, other.start), max(self.end, other.end))
def __eq__(self, other):
if isinstance(other, Range):
return self.start == other.start and self.end == other.end
return False
def __lt__(self, other):
if self.start == other.start:
return self.end < other.end
return self.start < other.start
def __str__(self):
if self.start == self.end:
return six.text_type(self.start)
return '{0}-{1}'.format(self.start, self.end)
class HostPort(object):
"""A class for holding host and port information."""
IPV4_OR_HOST_PATTERN = r'^(?P<address>[\w\d\.-]+)?(:|:(?P<port>[\d]+))?$'
# includes hostnames
IPV6_PATTERN = r'^(\[(?P<address>[\w\d:]+)\])(:|:(?P<port>[\d]+))?$'
def __init__(self, host, port):
self.host = host
self.port = port
@staticmethod
def Parse(s, ipv6_enabled=False):
"""Parse the given string into a HostPort object.
This can be used as an argparse type.
Args:
s: str, The string to parse. If ipv6_enabled and host is an IPv6 address,
it should be placed in square brackets: e.g.
[2001:db8:0:0:0:ff00:42:8329]
or
[2001:db8:0:0:0:ff00:42:8329]:8080
ipv6_enabled: boolean, If True then accept IPv6 addresses.
Raises:
ArgumentTypeError: If the string is not valid.
Returns:
HostPort, The parsed object.
"""
if not s:
return HostPort(None, None)
match = re.match(HostPort.IPV4_OR_HOST_PATTERN, s, re.UNICODE)
if ipv6_enabled and not match:
match = re.match(HostPort.IPV6_PATTERN, s, re.UNICODE)
if not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse host and port. Expected format \n\n'
' IPv4_ADDRESS_OR_HOSTNAME:PORT\n\n'
'or\n\n'
' [IPv6_ADDRESS]:PORT\n\n'
'(where :PORT is optional).',
user_input=s))
elif not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse host and port. Expected format \n\n'
' IPv4_ADDRESS_OR_HOSTNAME:PORT\n\n'
'(where :PORT is optional).',
user_input=s))
return HostPort(match.group('address'), match.group('port'))
class Day(object):
"""A class for parsing a datetime object for a specific day."""
@staticmethod
def Parse(s):
if not s:
return None
try:
return times.ParseDateTime(s, '%Y-%m-%d').date()
except times.Error as e:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse date: {0}'.format(six.text_type(e)),
user_input=s))
class Datetime(object):
"""A class for parsing a datetime object."""
@staticmethod
def Parse(s):
"""Parses a string value into a Datetime object in local timezone."""
if not s:
return None
try:
return times.ParseDateTime(s)
except times.Error as e:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse date/time: {0}'.format(six.text_type(e)),
user_input=s))
@staticmethod
def ParseUtcTime(s):
"""Parses a string representing a time in UTC into a Datetime object."""
if not s:
return None
try:
return times.ParseDateTime(s, tzinfo=tz.tzutc())
except times.Error as e:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse UTC time: {0}'.format(six.text_type(e)),
user_input=s))
class DayOfWeek(object):
"""A class for parsing a day of the week."""
DAYS = ['SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT']
@staticmethod
def Parse(s):
"""Validates and normalizes a string as a day of the week."""
if not s:
return None
fixed = s.upper()[:3]
if fixed not in DayOfWeek.DAYS:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse day of week. Value should be one of {0}'.format(
', '.join(DayOfWeek.DAYS)),
user_input=s))
return fixed
def _BoundedType(type_builder, type_description,
lower_bound=None, upper_bound=None, unlimited=False):
"""Returns a function that can parse given type within some bound.
Args:
type_builder: A callable for building the requested type from the value
string.
type_description: str, Description of the requested type (for verbose
messages).
lower_bound: of type compatible with type_builder,
The value must be >= lower_bound.
upper_bound: of type compatible with type_builder,
The value must be <= upper_bound.
unlimited: bool, If True then a value of 'unlimited' means no limit.
Returns:
A function that can parse given type within some bound.
"""
def Parse(value):
"""Parses value as a type constructed by type_builder.
Args:
value: str, Value to be converted to the requested type.
Raises:
ArgumentTypeError: If the provided value is out of bounds or unparsable.
Returns:
Value converted to the requested type.
"""
if unlimited and value == 'unlimited':
return None
try:
v = type_builder(value)
except ValueError:
raise ArgumentTypeError(
_GenerateErrorMessage('Value must be {0}'.format(type_description),
user_input=value))
if lower_bound is not None and v < lower_bound:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
if upper_bound is not None and upper_bound < v:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
return v
return Parse
def BoundedInt(*args, **kwargs):
return _BoundedType(int, 'an integer', *args, **kwargs)
def BoundedFloat(*args, **kwargs):
return _BoundedType(float, 'a floating point number', *args, **kwargs)
def _TokenizeQuotedList(arg_value, delim=','):
"""Tokenize an argument into a list.
Args:
arg_value: str, The raw argument.
delim: str, The delimiter on which to split the argument string.
Returns:
[str], The tokenized list.
"""
if arg_value:
if not arg_value.endswith(delim):
arg_value += delim
return arg_value.split(delim)[:-1]
return []
class ArgType(object):
"""Base class for arg types."""
class ArgBoolean(ArgType):
"""Interpret an argument value as a bool."""
def __init__(
self, truthy_strings=None, falsey_strings=None, case_sensitive=False):
self._case_sensitive = case_sensitive
if truthy_strings:
self._truthy_strings = truthy_strings
else:
self._truthy_strings = ['true', 'yes']
if falsey_strings:
self._falsey_strings = falsey_strings
else:
self._falsey_strings = ['false', 'no']
def __call__(self, arg_value):
if not self._case_sensitive:
normalized_arg_value = arg_value.lower()
else:
normalized_arg_value = arg_value
if normalized_arg_value in self._truthy_strings:
return True
if normalized_arg_value in self._falsey_strings:
return False
raise ArgumentTypeError(
'Invalid flag value [{0}], expected one of [{1}]'.format(
arg_value,
', '.join(self._truthy_strings + self._falsey_strings)
)
)
class ArgList(ArgType):
"""Interpret an argument value as a list.
Intended to be used as the type= for a flag argument. Splits the string on
commas or another delimiter and returns a list.
By default, splits on commas:
'a,b,c' -> ['a', 'b', 'c']
There is an available syntax for using an alternate delimiter:
'^:^a,b:c' -> ['a,b', 'c']
'^::^a:b::c' -> ['a:b', 'c']
'^,^^a^,b,c' -> ['^a^', ',b', 'c']
"""
DEFAULT_DELIM_CHAR = ','
ALT_DELIM_CHAR = '^'
def __init__(self,
element_type=None,
min_length=0,
max_length=None,
choices=None,
custom_delim_char=None,
visible_choices=None):
"""Initialize an ArgList.
Args:
element_type: (str)->str, A function to apply to each of the list items.
min_length: int, The minimum size of the list.
max_length: int, The maximum size of the list.
choices: [element_type], a list of valid possibilities for elements. If
None, then no constraints are imposed.
custom_delim_char: char, A customized delimiter character.
visible_choices: [element_type], a list of valid possibilities for
elements to be shown to the user. If None, defaults to choices.
Returns:
(str)->[str], A function to parse the list of values in the argument.
Raises:
ArgumentTypeError: If the list is malformed.
"""
self.element_type = element_type
self.choices = choices
self.visible_choices = (
visible_choices if visible_choices is not None else choices)
if self.visible_choices:
def ChoiceType(raw_value):
if element_type:
typed_value = element_type(raw_value)
else:
typed_value = raw_value
if typed_value not in choices:
raise ArgumentTypeError('{value} must be one of [{choices}]'.format(
value=typed_value,
choices=', '.join(
[six.text_type(choice) for choice in self.visible_choices])))
return typed_value
self.element_type = ChoiceType
self.min_length = min_length
self.max_length = max_length
self.custom_delim_char = custom_delim_char
def __call__(self, arg_value): # pylint:disable=missing-docstring
if isinstance(arg_value, list):
arg_list = arg_value
elif not isinstance(arg_value, six.string_types):
raise ArgumentTypeError('Invalid type [{}] for flag value [{}]'.format(
type(arg_value).__name__, arg_value))
else:
delim = self.custom_delim_char or self.DEFAULT_DELIM_CHAR
if (arg_value.startswith(self.ALT_DELIM_CHAR) and
self.ALT_DELIM_CHAR in arg_value[1:]):
delim, arg_value = arg_value[1:].split(self.ALT_DELIM_CHAR, 1)
if not delim:
raise ArgumentTypeError(
'Invalid delimeter. Please see `gcloud topic flags-file` or '
'`gcloud topic escaping` for information on providing list or '
'dictionary flag values with special characters.')
arg_list = _TokenizeQuotedList(arg_value, delim=delim)
# TODO(b/35944028): These exceptions won't present well to the user.
if len(arg_list) < self.min_length:
raise ArgumentTypeError('not enough args')
if self.max_length is not None and len(arg_list) > self.max_length:
raise ArgumentTypeError('too many args')
if self.element_type:
arg_list = [self.element_type(arg) for arg in arg_list]
return arg_list
_MAX_METAVAR_LENGTH = 30 # arbitrary, but this is pretty long
def GetUsageMsg(self, is_custom_metavar, metavar):
"""Get a specially-formatted metavar for the ArgList to use in help.
An example is worth 1,000 words:
>>> ArgList().GetUsageMetavar('FOO')
'[FOO,...]'
>>> ArgList(min_length=1).GetUsageMetavar('FOO')
'FOO,[FOO,...]'
>>> ArgList(max_length=2).GetUsageMetavar('FOO')
'FOO,[FOO]'
>>> ArgList(max_length=3).GetUsageMetavar('FOO') # One, two, many...
'FOO,[FOO,...]'
>>> ArgList(min_length=2, max_length=2).GetUsageMetavar('FOO')
'FOO,FOO'
>>> ArgList().GetUsageMetavar('REALLY_VERY_QUITE_LONG_METAVAR')
'REALLY_VERY_QUITE_LONG_METAVAR,[...]'
Args:
is_custom_metavar: unused in GetUsageMsg
metavar: string, the base metavar to turn into an ArgList metavar
Returns:
string, the ArgList usage metavar
"""
del is_custom_metavar # Unused in GetUsageMsg
delim_char = self.custom_delim_char or self.DEFAULT_DELIM_CHAR
required = delim_char.join([metavar] * self.min_length)
if self.max_length:
num_optional = self.max_length - self.min_length
else:
num_optional = None
# Use the "1, 2, many" approach to counting
if num_optional == 0:
optional = ''
elif num_optional == 1:
optional = '[{}]'.format(metavar)
elif num_optional == 2:
optional = '[{0}{1}[{0}]]'.format(metavar, delim_char)
else:
optional = '[{}{}...]'.format(metavar, delim_char)
msg = delim_char.join([x for x in [required, optional] if x])
if len(msg) < self._MAX_METAVAR_LENGTH:
return msg
# With long metavars, only put it in once.
if self.min_length == 0:
return '[{}{}...]'.format(metavar, delim_char)
if self.min_length == 1:
return '{}{}[...]'.format(metavar, delim_char)
else:
return '{0}{1}...{1}[...]'.format(metavar, delim_char)
class ArgDict(ArgList):
"""Interpret an argument value as a dict.
Intended to be used as the type= for a flag argument. Splits the string on
commas to get a list, and then splits the items on equals to get a set of
key-value pairs to get a dict.
"""
def __init__(self, key_type=None, value_type=None, spec=None, min_length=0,
max_length=None, allow_key_only=False, required_keys=None,
operators=None):
"""Initialize an ArgDict.
Args:
key_type: (str)->str, A function to apply to each of the dict keys.
value_type: (str)->str, A function to apply to each of the dict values.
spec: {str: (str)->str}, A mapping of expected keys to functions.
The functions are applied to the values. If None, an arbitrary
set of keys will be accepted. If not None, it is an error for the
user to supply a key that is not in the spec. If the function specified
is None, then accept a key only without '=value'.
min_length: int, The minimum number of keys in the dict.
max_length: int, The maximum number of keys in the dict.
allow_key_only: bool, Allow empty values.
required_keys: [str], Required keys in the dict.
operators: operator_char -> value_type, Define multiple single character
operators, each with its own value_type converter. Use value_type==None
for no conversion. The default value is {'=': value_type}
Returns:
(str)->{str:str}, A function to parse the dict in the argument.
Raises:
ArgumentTypeError: If the list is malformed.
ValueError: If both value_type and spec are provided.
"""
super(ArgDict, self).__init__(min_length=min_length, max_length=max_length)
if spec and value_type:
raise ValueError('cannot have both spec and sub_type')
self.key_type = key_type
self.spec = spec
self.allow_key_only = allow_key_only
self.required_keys = required_keys or []
if not operators:
operators = {'=': value_type}
for op in operators.keys():
if len(op) != 1:
raise ArgumentTypeError(
'Operator [{}] must be one character.'.format(op))
ops = ''.join(six.iterkeys(operators))
key_op_value_pattern = '([^{ops}]+)([{ops}]?)(.*)'.format(
ops=re.escape(ops))
self.key_op_value = re.compile(key_op_value_pattern, re.DOTALL)
self.operators = operators
def _ApplySpec(self, key, value):
if key in self.spec:
if self.spec[key] is None:
if value:
raise ArgumentTypeError('Key [{0}] does not take a value'.format(key))
return None
return self.spec[key](value)
else:
raise ArgumentTypeError(
_GenerateErrorMessage(
'valid keys are [{0}]'.format(
', '.join(sorted(self.spec.keys()))),
user_input=key))
def _ValidateKeyValue(self, key, value, op='='):
"""Converts and validates <key,value> and returns (key,value)."""
if (not op or value is None) and not self.allow_key_only:
raise ArgumentTypeError(
'Bad syntax for dict arg: [{0}]. Please see '
'`gcloud topic flags-file` or `gcloud topic escaping` for '
'information on providing list or dictionary flag values with '
'special characters.'.format(key))
if self.key_type:
try:
key = self.key_type(key)
except ValueError:
raise ArgumentTypeError('Invalid key [{0}]'.format(key))
convert_value = self.operators.get(op, None)
if convert_value:
try:
value = convert_value(value)
except ValueError:
raise ArgumentTypeError('Invalid value [{0}]'.format(value))
if self.spec:
value = self._ApplySpec(key, value)
return key, value
def __call__(self, arg_value): # pylint:disable=missing-docstring
if isinstance(arg_value, dict):
raw_dict = arg_value
arg_dict = collections.OrderedDict()
for key, value in six.iteritems(raw_dict):
key, value = self._ValidateKeyValue(key, value)
arg_dict[key] = value
elif not isinstance(arg_value, six.string_types):
raise ArgumentTypeError('Invalid type [{}] for flag value [{}]'.format(
type(arg_value).__name__, arg_value))
else:
arg_list = super(ArgDict, self).__call__(arg_value)
arg_dict = collections.OrderedDict()
for arg in arg_list:
match = self.key_op_value.match(arg)
# TODO(b/35944028): These exceptions won't present well to the user.
if not match:
raise ArgumentTypeError('Invalid flag value [{0}]'.format(arg))
key, op, value = match.group(1), match.group(2), match.group(3)
key, value = self._ValidateKeyValue(key, value, op=op)
arg_dict[key] = value
for required_key in self.required_keys:
if required_key not in arg_dict:
raise ArgumentTypeError(
'Key [{0}] required in dict arg but not provided'.format(
required_key))
return arg_dict
def GetUsageMsg(self, is_custom_metavar, metavar):
# If we're not using a spec to limit the key values or if metavar
# has been overridden, then use the normal ArgList formatting
if not self.spec or is_custom_metavar:
return super(ArgDict, self).GetUsageMsg(is_custom_metavar, metavar)
msg_list = []
spec_list = sorted(six.iteritems(self.spec))
# First put the spec keys with no value followed by those that expect a
# value
for spec_key, spec_function in spec_list:
if spec_function is None:
if not self.allow_key_only:
raise ArgumentTypeError(
'Key [{0}] specified in spec without a function but '
'allow_key_only is set to False'.format(spec_key))
msg_list.append(spec_key)
for spec_key, spec_function in spec_list:
if spec_function is not None:
msg_list.append('{0}={1}'.format(spec_key, spec_key.upper()))
msg = '[' + '],['.join(msg_list) + ']'
return msg
class UpdateAction(argparse.Action):
r"""Create a single dict value from delimited or repeated flags.
This class is intended to be a more flexible version of
argparse._AppendAction.
For example, with the following flag definition:
parser.add_argument(
'--inputs',
type=arg_parsers.ArgDict(),
action='append')
a caller can specify on the command line flags such as:
--inputs k1=v1,k2=v2
and the result will be a list of one dict:
[{ 'k1': 'v1', 'k2': 'v2' }]
Specifying two separate command line flags such as:
--inputs k1=v1 \
--inputs k2=v2
will produce a list of dicts:
[{ 'k1': 'v1'}, { 'k2': 'v2' }]
The UpdateAction class allows for both of the above user inputs to result
in the same: a single dictionary:
{ 'k1': 'v1', 'k2': 'v2' }
This gives end-users a lot more flexibility in constructing their command
lines, especially when scripting calls.
Note that this class will raise an exception if a key value is specified
more than once. To allow for a key value to be specified multiple times,
use UpdateActionWithAppend.
"""
def OnDuplicateKeyRaiseError(self, key, existing_value=None, new_value=None):
if existing_value is None:
user_input = None
else:
user_input = ', '.join([existing_value, new_value])
raise argparse.ArgumentError(self, _GenerateErrorMessage(
'"{0}" cannot be specified multiple times'.format(key),
user_input=user_input))
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None, # pylint:disable=redefined-builtin
choices=None,
required=False,
help=None, # pylint:disable=redefined-builtin
metavar=None,
onduplicatekey_handler=OnDuplicateKeyRaiseError):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != argparse.OPTIONAL:
raise ValueError('nargs must be %r to supply const' % argparse.OPTIONAL)
self.choices = choices
if isinstance(choices, dict):
choices = sorted(choices.keys())
super(UpdateAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
self.onduplicatekey_handler = onduplicatekey_handler
def _EnsureValue(self, namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, dict):
# Get the existing arg value (if any)
items = copy.copy(self._EnsureValue(
namespace, self.dest, collections.OrderedDict()))
# Merge the new key/value pair(s) in
for k, v in six.iteritems(values):
if k in items:
v = self.onduplicatekey_handler(self, k, items[k], v)
items[k] = v
else:
# Get the existing arg value (if any)
items = copy.copy(self._EnsureValue(namespace, self.dest, []))
# Merge the new key/value pair(s) in
for k in values:
if k in items:
self.onduplicatekey_handler(self, k)
else:
items.append(k)
# Saved the merged dictionary
setattr(namespace, self.dest, items)
class UpdateActionWithAppend(UpdateAction):
"""Create a single dict value from delimited or repeated flags.
This class provides a variant of UpdateAction, which allows for users to
append, rather than reject, duplicate key values. For example, the user
can specify:
--inputs k1=v1a --inputs k1=v1b --inputs k2=v2
and the result will be:
{ 'k1': ['v1a', 'v1b'], 'k2': 'v2' }
"""
def OnDuplicateKeyAppend(self, key, existing_value=None, new_value=None):
if existing_value is None:
return key
elif isinstance(existing_value, list):
return existing_value + [new_value]
else:
return [existing_value, new_value]
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None, # pylint:disable=redefined-builtin
choices=None,
required=False,
help=None, # pylint:disable=redefined-builtin
metavar=None,
onduplicatekey_handler=OnDuplicateKeyAppend):
super(UpdateActionWithAppend, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar,
onduplicatekey_handler=onduplicatekey_handler)
class RemainderAction(argparse._StoreAction): # pylint: disable=protected-access
"""An action with a couple of helpers to better handle --.
argparse on its own does not properly handle -- implementation args.
argparse.REMAINDER greedily steals valid flags before a --, and nargs='*' will
bind to [] and not parse args after --. This Action represents arguments to
be passed through to a subcommand after --.
Primarily, this Action provides two utility parsers to help a modified
ArgumentParser parse -- properly.
There is one additional property kwarg:
example: A usage statement used to construct nice additional help.
"""
def __init__(self, *args, **kwargs):
if kwargs['nargs'] is not argparse.REMAINDER:
raise ValueError(
'The RemainderAction should only be used when '
'nargs=argparse.REMAINDER.')
# Create detailed help.
self.explanation = (
"The '--' argument must be specified between gcloud specific args on "
'the left and {metavar} on the right.'
).format(metavar=kwargs['metavar'])
if 'help' in kwargs:
kwargs['help'] += '\n+\n' + self.explanation
if 'example' in kwargs:
kwargs['help'] += ' Example:\n\n' + kwargs['example']
del kwargs['example']
super(RemainderAction, self).__init__(*args, **kwargs)
def _SplitOnDash(self, args):
split_index = args.index('--')
# Remove -- before passing through
return args[:split_index], args[split_index + 1:]
def ParseKnownArgs(self, args, namespace):
"""Binds all args after -- to the namespace."""
# Not [], so that we can distinguish between empty remainder args and
# absent remainder args.
remainder_args = None
if '--' in args:
args, remainder_args = self._SplitOnDash(args)
self(None, namespace, remainder_args)
return namespace, args
def ParseRemainingArgs(self, remaining_args, namespace, original_args):
"""Parses the unrecognized args from the end of the remaining_args.
This method identifies all unrecognized arguments after the last argument
recognized by a parser (but before --). It then either logs a warning and
binds them to the namespace or raises an error, depending on strictness.
Args:
remaining_args: A list of arguments that the parsers did not recognize.
namespace: The Namespace to bind to.
original_args: The full list of arguments given to the top parser,
Raises:
ArgumentError: If there were remaining arguments after the last recognized
argument and this action is strict.
Returns:
A tuple of the updated namespace and unrecognized arguments (before the
last recognized argument).
"""
# Only parse consecutive unknown args from the end of the original args.
# Strip out everything after '--'
if '--' in original_args:
original_args, _ = self._SplitOnDash(original_args)
# Find common suffix between remaining_args and original_args
split_index = 0
for i, (arg1, arg2) in enumerate(
zip(reversed(remaining_args), reversed(original_args))):
if arg1 != arg2:
split_index = len(remaining_args) - i
break
pass_through_args = remaining_args[split_index:]
remaining_args = remaining_args[:split_index]
if pass_through_args:
msg = ('unrecognized args: {args}\n' + self.explanation).format(
args=' '.join(pass_through_args))
raise parser_errors.UnrecognizedArgumentsError(msg)
self(None, namespace, pass_through_args)
return namespace, remaining_args
class StoreOnceAction(argparse.Action):
r"""Create a single dict value from delimited flags.
For example, with the following flag definition:
parser.add_argument(
'--inputs',
type=arg_parsers.ArgDict(),
action=StoreOnceAction)
a caller can specify on the command line flags such as:
--inputs k1=v1,k2=v2
and the result will be a list of one dict:
[{ 'k1': 'v1', 'k2': 'v2' }]
Specifying two separate command line flags such as:
--inputs k1=v1 \
--inputs k2=v2
will raise an exception.
Note that this class will raise an exception if a key value is specified
more than once. To allow for a key value to be specified multiple times,
use UpdateActionWithAppend.
"""
def OnSecondArgumentRaiseError(self):
raise argparse.ArgumentError(self, _GenerateErrorMessage(
'"{0}" argument cannot be specified multiple times'.format(self.dest)))
def __init__(self, *args, **kwargs):
self.dest_is_populated = False
super(StoreOnceAction, self).__init__(*args, **kwargs)
# pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
# Make sure no existing arg value exist
if self.dest_is_populated:
self.OnSecondArgumentRaiseError()
self.dest_is_populated = True
setattr(namespace, self.dest, values)
class _HandleNoArgAction(argparse.Action):
"""This class should not be used directly, use HandleNoArgAction instead."""
def __init__(self, none_arg, deprecation_message, **kwargs):
super(_HandleNoArgAction, self).__init__(**kwargs)
self.none_arg = none_arg
self.deprecation_message = deprecation_message
def __call__(self, parser, namespace, value, option_string=None):
if value is None:
log.warning(self.deprecation_message)
if self.none_arg:
setattr(namespace, self.none_arg, True)
setattr(namespace, self.dest, value)
def HandleNoArgAction(none_arg, deprecation_message):
"""Creates an argparse.Action that warns when called with no arguments.
This function creates an argparse action which can be used to gracefully
deprecate a flag using nargs=?. When a flag is created with this action, it
simply log.warning()s the given deprecation_message and then sets the value of
the none_arg to True.
This means if you use the none_arg no_foo and attach this action to foo,
`--foo` (no argument), it will have the same effect as `--no-foo`.
Args:
none_arg: a boolean argument to write to. For --no-foo use "no_foo"
deprecation_message: msg to tell user to stop using with no arguments.
Returns:
An argparse action.
"""
def HandleNoArgActionInit(**kwargs):
return _HandleNoArgAction(none_arg, deprecation_message, **kwargs)
return HandleNoArgActionInit
class FileContents(object):
"""Creates an argparse type that reads the contents of a file or stdin.
This is similar to argparse.FileType, but unlike FileType it does not leave
a dangling file handle open. The argument stored in the argparse Namespace
is the file's contents.
Attributes:
binary: bool, If True, the contents of the file will be returned as bytes.
Returns:
A function that accepts a filename, or "-" representing that stdin should be
used as input.
"""
def __init__(self, binary=False):
self.binary = binary
def __call__(self, name):
"""Return the contents of the file with the specified name.
If name is "-", stdin is read until EOF. Otherwise, the named file is read.
Args:
name: str, The file name, or '-' to indicate stdin.
Returns:
The contents of the file.
Raises:
ArgumentTypeError: If the file cannot be read or is too large.
"""
try:
return console_io.ReadFromFileOrStdin(name, binary=self.binary)
except files.Error as e:
raise ArgumentTypeError(e)
class YAMLFileContents(object):
"""Creates an argparse type that reads the contents of a YAML or JSON file.
This is similar to argparse.FileType, but unlike FileType it does not leave
a dangling file handle open. The argument stored in the argparse Namespace
is the file's contents parsed as a YAML object.
Attributes:
validator: function, Function that will validate the provided input
file contents.
Returns:
A function that accepts a filename that should be parsed as a YAML
or JSON file.
"""
def __init__(self, validator=None):
if validator and not callable(validator):
raise ArgumentTypeError('Validator must be callable')
self.validator = validator
def _AssertJsonLike(self, yaml_data):
if not (yaml.dict_like(yaml_data) or yaml.list_like(yaml_data)):
raise ArgumentTypeError('Invalid YAML/JSON Data [{}]'.format(yaml_data))
def _LoadSingleYamlDocument(self, name):
"""Returns the yaml data for a file or from stdin for a single document.
YAML allows multiple documents in a single file by using `---` as a
separator between documents. See https://yaml.org/spec/1.1/#id857577.
However, some YAML-generating tools generate a single document followed by
this separator before ending the file.
This method supports the case of a single document in a file that contains
superfluous document separators, but still throws if multiple documents are
actually found.
Args:
name: str, The file path to the file or "-" to read from stdin.
Returns:
The contents of the file parsed as a YAML data object.
"""
if name == '-':
stdin = console_io.ReadStdin() # Save to potentially reuse below
yaml_data = yaml.load_all(stdin)
else:
yaml_data = yaml.load_all_path(name)
yaml_data = [d for d in yaml_data if d is not None] # Remove empty docs
# Return the single document if only 1 is found.
if len(yaml_data) == 1:
return yaml_data[0]
# Multiple (or 0) documents found. Try to parse again with single-document
# loader so its error is propagated rather than creating our own.
if name == '-':
return yaml.load(stdin)
else:
return yaml.load_path(name)
def __call__(self, name):
"""Load YAML data from file path (name) or stdin.
If name is "-", stdin is read until EOF. Otherwise, the named file is read.
If self.validator is set, call it on the yaml data once it is loaded.
Args:
name: str, The file path to the file.
Returns:
The contents of the file parsed as a YAML data object.
Raises:
ArgumentTypeError: If the file cannot be read or is not a JSON/YAML like
object.
ValueError: If file content fails validation.
"""
try:
yaml_data = self._LoadSingleYamlDocument(name)
self._AssertJsonLike(yaml_data)
if self.validator:
if not self.validator(yaml_data):
raise ValueError('Invalid YAML/JSON content [{}]'.format(yaml_data))
return yaml_data
except (yaml.YAMLParseError, yaml.FileLoadError) as e:
raise ArgumentTypeError(e)
class StoreTrueFalseAction(argparse._StoreTrueAction): # pylint: disable=protected-access
"""Argparse action that acts as a combination of store_true and store_false.
Calliope already gives any bool-type arguments the standard and `--no-`
variants. In most cases we only want to document the option that does
something---if we have `default=False`, we don't want to show `--no-foo`,
since it won't do anything.
But in some cases we *do* want to show both variants: one example is when
`--foo` means "enable," `--no-foo` means "disable," and neither means "do
nothing." The obvious way to represent this is `default=None`; however, (1)
the default value of `default` is already None, so most boolean actions would
have this setting by default (not what we want), and (2) we still want an
option to have this True/False/None behavior *without* the flag documentation.
To get around this, we have an opt-in version of the same thing that documents
both the flag and its inverse.
"""
def __init__(self, *args, **kwargs):
super(StoreTrueFalseAction, self).__init__(*args, default=None, **kwargs)
def StoreFilePathAndContentsAction(binary=False):
"""Returns Action that stores both file content and file path.
Args:
binary: boolean, whether or not this is a binary file.
Returns:
An argparse action.
"""
class Action(argparse.Action):
"""Stores both file content and file path.
Stores file contents under original flag DEST and stores file path under
DEST_path.
"""
def __init__(self, *args, **kwargs):
super(Action, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_string=None):
"""Stores the contents of the file and the file name in namespace."""
try:
content = console_io.ReadFromFileOrStdin(value, binary=binary)
except files.Error as e:
raise ArgumentTypeError(e)
setattr(namespace, self.dest, content)
new_dest = '{}_path'.format(self.dest)
setattr(namespace, new_dest, value)
return Action
| [
"[email protected]"
] | |
88c38efa8ff0a8056b6fc309011e034888426fa0 | 26acc7e23024098661a42da37e2cb4ed56c21b44 | /dgp/genera/load/loader.py | daf5ca8acee012f9dd328fd48ef0fb2baf85a38a | [
"MIT"
] | permissive | dataspot/dgp | 80536c0e296570c109511de3dae6e0297bb8b0fd | e86d604c8af5534985f9b788ba809facbc325152 | refs/heads/master | 2023-03-16T05:15:38.362702 | 2023-03-09T07:07:28 | 2023-03-09T07:07:28 | 169,378,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,879 | py | import os
import json
import requests
from hashlib import md5
from dataflows import Flow, load, dump_to_path
from dataflows.base.schema_validator import ignore
from ...core import BaseDataGenusProcessor, Required, Validator, ConfigurableDGP
from .analyzers import FileFormatDGP, StructureDGP
from ...config.consts import CONFIG_URL, CONFIG_PUBLISH_ALLOWED, RESOURCE_NAME
from ...config.log import logger
class LoaderDGP(BaseDataGenusProcessor):
PRE_CHECKS = Validator(
Required(CONFIG_URL, 'Source data URL or path')
)
def init(self):
self.steps = self.init_classes([
FileFormatDGP,
StructureDGP,
])
def hash_key(self, *args):
data = json.dumps(args, sort_keys=True, ensure_ascii=False)
return md5(data.encode('utf8')).hexdigest()
def flow(self):
if len(self.errors) == 0:
config = self.config._unflatten()
source = config['source']
ref_hash = self.hash_key(source, config['structure'], config.get('publish'))
cache_path = os.path.join('.cache', ref_hash)
datapackage_path = os.path.join(cache_path, 'datapackage.json')
structure_params = self.context._structure_params()
http_session = self.context.http_session()
loader = load(source.pop('path'), validate=False,
name=RESOURCE_NAME,
**source, **structure_params,
http_session=http_session,
http_timeout=120,
infer_strategy=load.INFER_PYTHON_TYPES,
cast_strategy=load.CAST_DO_NOTHING,
limit_rows=(
None
if self.config.get(CONFIG_PUBLISH_ALLOWED)
else 5000
))
if self.config.get(CONFIG_PUBLISH_ALLOWED):
return Flow(
loader,
)
else:
if not os.path.exists(datapackage_path):
logger.info('Caching source data into %s', cache_path)
Flow(
loader,
dump_to_path(cache_path, validator_options=dict(on_error=ignore)),
# printer(),
).process()
logger.info('Using cached source data from %s', cache_path)
return Flow(
load(datapackage_path, resources=RESOURCE_NAME),
)
class PostLoaderDGP(ConfigurableDGP):
def init(self):
super().init('loading', per_taxonomy=False)
self._flows = None
class PreLoaderDGP(ConfigurableDGP):
def init(self):
super().init('preloading', per_taxonomy=False)
self._flows = None
| [
"[email protected]"
] | |
449a5b4d464ce12c138b35ee87635fe1817540fc | 13d3a44447f6a7d8b0d61c2fb445fa6aa76c2f95 | /stackdio/core/viewsets.py | 3708da69f32348e2a5e6effb26d7be236dfe77f5 | [
"Apache-2.0"
] | permissive | stackdio/stackdio | 6ba4ad6c2ef10a323cbd955e6d6d5bd7917c17c2 | 84be621705031d147e104369399b872d5093ef64 | refs/heads/master | 2021-04-09T16:36:38.220557 | 2018-08-13T18:25:29 | 2018-08-13T18:25:29 | 17,679,603 | 9 | 11 | Apache-2.0 | 2020-03-19T17:21:45 | 2014-03-12T19:02:06 | Python | UTF-8 | Python | false | false | 13,461 | py | # -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.auth.models import Group
from django.http import Http404
from guardian.shortcuts import get_groups_with_perms, get_users_with_perms, remove_perm
from rest_framework import viewsets
from rest_framework.serializers import ListField, SlugRelatedField, ValidationError
from stackdio.api.users.models import get_user_queryset
from stackdio.core import fields, mixins, serializers
from stackdio.core.config import StackdioConfigException
from stackdio.core.permissions import StackdioPermissionsModelPermissions
from stackdio.core.shortcuts import get_groups_with_model_perms, get_users_with_model_perms
try:
from django_auth_ldap.backend import LDAPBackend
except ImportError:
LDAPBackend = None
logger = logging.getLogger(__name__)
def _filter_perms(available_perms, perms):
ret = []
for perm in perms:
if perm in available_perms:
ret.append(perm)
return ret
class UserSlugRelatedField(SlugRelatedField):
def to_internal_value(self, data):
try:
return super(UserSlugRelatedField, self).to_internal_value(data)
except ValidationError:
if settings.LDAP_ENABLED:
if LDAPBackend is None:
raise StackdioConfigException('LDAP is enabled, but django_auth_ldap isn\'t '
'installed. Please install django_auth_ldap')
# Grab the ldap user and try again
user = LDAPBackend().populate_user(data)
if user is not None:
return super(UserSlugRelatedField, self).to_internal_value(data)
# Nothing worked, just re-raise the exception
raise
class StackdioBasePermissionsViewSet(mixins.BulkUpdateModelMixin, viewsets.ModelViewSet):
"""
Viewset for creating permissions endpoints
"""
user_or_group = None
model_or_object = None
lookup_value_regex = r'[\w.@+-]+'
parent_lookup_field = 'pk'
parent_lookup_url_kwarg = None
def get_model_name(self):
raise NotImplementedError('`get_model_name()` must be implemented.')
def get_app_label(self):
raise NotImplementedError('`get_app_label()` must be implemented.')
def get_serializer_class(self):
user_or_group = self.get_user_or_group()
model_or_object = self.get_model_or_object()
model_name = self.get_model_name()
app_label = self.get_app_label()
super_cls = self.switch_model_object(serializers.StackdioModelPermissionsSerializer,
serializers.StackdioObjectPermissionsSerializer)
default_parent_lookup_url_kwarg = 'parent_{}'.format(self.parent_lookup_field)
url_field_kwargs = {
'view_name': 'api:{0}:{1}-{2}-{3}-permissions-detail'.format(
app_label,
model_name,
model_or_object,
user_or_group
),
'permission_lookup_field': self.lookup_field,
'permission_lookup_url_kwarg': self.lookup_url_kwarg or self.lookup_field,
'lookup_field': self.parent_lookup_field,
'lookup_url_kwarg': self.parent_lookup_url_kwarg or default_parent_lookup_url_kwarg,
}
url_field_cls = self.switch_model_object(
fields.HyperlinkedModelPermissionsField,
fields.HyperlinkedObjectPermissionsField,
)
# Create a class
class StackdioUserPermissionsSerializer(super_cls):
user = UserSlugRelatedField(slug_field='username', queryset=get_user_queryset())
url = url_field_cls(**url_field_kwargs)
permissions = ListField()
class Meta(super_cls.Meta):
update_lookup_field = 'user'
class StackdioGroupPermissionsSerializer(super_cls):
group = SlugRelatedField(slug_field='name', queryset=Group.objects.all())
url = url_field_cls(**url_field_kwargs)
permissions = ListField()
class Meta(super_cls.Meta):
update_lookup_field = 'group'
return self.switch_user_group(StackdioUserPermissionsSerializer,
StackdioGroupPermissionsSerializer)
def get_user_or_group(self):
assert self.user_or_group in ('user', 'group'), (
"'%s' should include a `user_or_group` attribute that is one of 'user' or 'group'."
% self.__class__.__name__
)
return self.user_or_group
def switch_user_group(self, if_user, if_group):
return {
'user': if_user,
'group': if_group,
}.get(self.get_user_or_group())
def get_model_or_object(self):
assert self.model_or_object in ('model', 'object'), (
"'%s' should include a `model_or_object` attribute that is one of 'model' or 'object'."
% self.__class__.__name__
)
return self.model_or_object
def switch_model_object(self, if_model, if_object):
return {
'model': if_model,
'object': if_object,
}.get(self.get_model_or_object())
def _transform_perm(self, model_name):
def do_tranform(item):
# pylint: disable=unused-variable
perm, sep, empty = item.partition('_' + model_name)
return perm
return do_tranform
def get_object(self):
queryset = self.get_queryset()
url_kwarg = self.lookup_url_kwarg or self.lookup_field
name_attr = self.switch_user_group('username', 'name')
for obj in queryset:
auth_obj = obj[self.get_user_or_group()]
if self.kwargs[url_kwarg] == getattr(auth_obj, name_attr):
return obj
raise Http404('No permissions found for %s' % self.kwargs[url_kwarg])
class StackdioModelPermissionsViewSet(StackdioBasePermissionsViewSet):
model_cls = None
model_or_object = 'model'
permission_classes = (StackdioPermissionsModelPermissions,)
def get_model_cls(self):
assert self.model_cls, (
"'%s' should include a `model_cls` attribute or override the `get_model_cls()` method."
% self.__class__.__name__
)
return self.model_cls
def get_model_name(self):
return self.get_model_cls()._meta.model_name
def get_app_label(self):
ret = self.get_model_cls()._meta.app_label
if ret == 'auth':
# one-off thing, since users/groups are in the `users` app, not `auth`
return 'users'
return ret
def get_model_permissions(self):
return getattr(self.get_model_cls(),
'model_permissions',
getattr(self, 'model_permissions', ()))
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
ret = []
for permission_cls in self.permission_classes:
permission = permission_cls()
# Inject our model_cls into the permission
if isinstance(permission, StackdioPermissionsModelPermissions) \
and permission.model_cls is None:
permission.model_cls = self.model_cls
ret.append(permission)
return ret
def get_queryset(self): # pylint: disable=method-hidden
model_cls = self.get_model_cls()
model_name = model_cls._meta.model_name
model_perms = self.get_model_permissions()
# Grab the perms for either the users or groups
perm_map_func = self.switch_user_group(
lambda: get_users_with_model_perms(model_cls, attach_perms=True,
with_group_users=False),
lambda: get_groups_with_model_perms(model_cls, attach_perms=True),
)
# Do this as a function so we don't fetch both the user AND group permissions on each
# request
perm_map = perm_map_func()
ret = []
sorted_perms = sorted(perm_map.items(), key=lambda x: getattr(x[0], self.lookup_field))
for auth_obj, perms in sorted_perms:
new_perms = [self._transform_perm(model_name)(perm) for perm in perms]
ret.append({
self.get_user_or_group(): auth_obj,
'permissions': _filter_perms(model_perms, new_perms),
})
return ret
def list(self, request, *args, **kwargs):
response = super(StackdioModelPermissionsViewSet, self).list(request, *args, **kwargs)
# add available permissions to the response
response.data['available_permissions'] = sorted(self.get_model_permissions())
return response
def perform_create(self, serializer):
serializer.save(model_cls=self.get_model_cls())
def perform_update(self, serializer):
serializer.save(model_cls=self.get_model_cls())
def perform_destroy(self, instance):
model_cls = self.get_model_cls()
app_label = model_cls._meta.app_label
model_name = model_cls._meta.model_name
for perm in instance['permissions']:
remove_perm('%s.%s_%s' % (app_label, perm, model_name),
instance[self.get_user_or_group()])
class StackdioModelUserPermissionsViewSet(StackdioModelPermissionsViewSet):
user_or_group = 'user'
lookup_field = 'username'
lookup_url_kwarg = 'username'
class StackdioModelGroupPermissionsViewSet(StackdioModelPermissionsViewSet):
user_or_group = 'group'
lookup_field = 'name'
lookup_url_kwarg = 'groupname'
class StackdioObjectPermissionsViewSet(StackdioBasePermissionsViewSet):
"""
Viewset for creating permissions endpoints
"""
model_or_object = 'object'
def get_permissioned_object(self):
raise NotImplementedError('`get_permissioned_object()` must be implemented.')
def get_model_name(self):
return self.get_permissioned_object()._meta.model_name
def get_app_label(self):
ret = self.get_permissioned_object()._meta.app_label
if ret == 'auth':
# one-off thing, since users/groups are in the `users` app, not `auth`
return 'users'
return ret
def get_object_permissions(self):
return getattr(self.get_permissioned_object(),
'object_permissions',
getattr(self, 'object_permissions', ()))
def get_queryset(self): # pylint: disable=method-hidden
obj = self.get_permissioned_object()
model_name = obj._meta.model_name
object_perms = self.get_object_permissions()
# Grab the perms for either the users or groups
perm_map_func = self.switch_user_group(
lambda: get_users_with_perms(obj, attach_perms=True,
with_superusers=False, with_group_users=False),
lambda: get_groups_with_perms(obj, attach_perms=True),
)
perm_map = perm_map_func()
ret = []
sorted_perms = sorted(perm_map.items(), key=lambda x: getattr(x[0], self.lookup_field))
for auth_obj, perms in sorted_perms:
new_perms = [self._transform_perm(model_name)(perm) for perm in perms]
ret.append({
self.get_user_or_group(): auth_obj,
'permissions': _filter_perms(object_perms, new_perms),
})
return ret
def list(self, request, *args, **kwargs):
response = super(StackdioObjectPermissionsViewSet, self).list(request, *args, **kwargs)
# add available permissions to the response
response.data['available_permissions'] = sorted(self.get_object_permissions())
return response
def perform_create(self, serializer):
serializer.save(object=self.get_permissioned_object())
def perform_update(self, serializer):
serializer.save(object=self.get_permissioned_object())
def perform_destroy(self, instance):
obj = self.get_permissioned_object()
app_label = obj._meta.app_label
model_name = obj._meta.model_name
for perm in instance['permissions']:
remove_perm('%s.%s_%s' % (app_label, perm, model_name),
instance[self.get_user_or_group()],
obj)
# pylint: disable=abstract-method
class StackdioObjectUserPermissionsViewSet(StackdioObjectPermissionsViewSet):
user_or_group = 'user'
lookup_field = 'username'
lookup_url_kwarg = 'username'
class StackdioObjectGroupPermissionsViewSet(StackdioObjectPermissionsViewSet):
user_or_group = 'group'
lookup_field = 'name'
lookup_url_kwarg = 'groupname'
| [
"[email protected]"
] | |
56441abdcb0c3e4c4bc9e6e51c066f53b4474751 | d09fd96bbc931fbb8522e5c991973f064a4ded50 | /baxter/devel/lib/python2.7/dist-packages/baxter_core_msgs/msg/_AssemblyStates.py | e02cc99b32b08bd3ff9a2b09c7d226c451abe8d2 | [] | no_license | rymonyu/EE4-Robotics | b3827ba0dff5bdfdd1e47fe07a40e955c5226f38 | 6cf9272abd7fe8a074dc74a032f6e0b35edb8548 | refs/heads/master | 2020-08-22T15:09:39.706809 | 2019-12-15T23:35:45 | 2019-12-15T23:35:45 | 216,420,098 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | /home/rob/baxter/devel/.private/baxter_core_msgs/lib/python2.7/dist-packages/baxter_core_msgs/msg/_AssemblyStates.py | [
"[email protected]"
] | |
1be87f33c8660ad3c54efa5eb9f2ada26d9a1e6b | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_PYTHON/DATA_STRUC_PYTHON_NOTES/python-prac/mini-scripts/python_NumPy_Products_1.txt.py | d720b0dffc122f94e2aecfe055e2ab67998d23f1 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 77 | py | import numpy as np
arr = np.array([1, 2, 3, 4])
x = np.prod(arr)
print(x)
| [
"[email protected]"
] | |
a0bcf1146515c5da0c64441490de32599b91f02e | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/12/01/14.py | fd0cb48a6c9d059526138c98e8ba82d309f6802b | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | #!/usr/bin/python2
### Google Code Jam template
# Futures
from __future__ import division
from __future__ import with_statement
from __future__ import print_function
## Library
# @memoized
def memoized(func):
mem = {}
def wrapped(*args):
if args not in mem:
mem[args] = func(*args)
return mem[args]
return wrapped
## Setup
from os.path import basename, splitext
# Task letter
TASK=splitext(basename(__file__))[0]
print("Task {}".format(TASK))
## Input templates
# Line as int
#int(infile.readline())
# Line as many ints
#(int(s) for s in infile.readline().split())
## Precalculation
print("Precalculation...")
from string import maketrans
src = """aozq
ejp mysljylc kd kxveddknmc re jsicpdrysi
rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd
de kr kd eoya kw aej tysr re ujdr lkgc jv"""
dst = """yeqz
our language is impossible to understand
there are twenty six factorial possibilities
so it is okay if you want to just give up"""
table = maketrans(src, dst)
print("Precalculation done.")
## Calculation
print("Calculation...")
with open(TASK+".in") as infile:
with open(TASK+".out",mode="wt") as outfile:
cases = int(infile.readline())
for ncase in range(cases):
print("Case #{nc}".format(nc=ncase+1))
# Perform all nessesary calculation
text = infile.readline().strip()
data = text.translate(table)
outfile.write("Case #{nc}: {data}\n".format(nc=ncase+1,data=data))
print("Calculation done.")
| [
"[email protected]"
] | |
5efd766bb70d94a197cb80cb858d7211c005cb27 | 4de2b914e4607dd0ca7eec60b21026af6b6c4797 | /Old_work/valdambrini_cheli_papallo_tarmati/catkin_ws/build/navigation/clear_costmap_recovery/catkin_generated/pkg.develspace.context.pc.py | cb8deb76dfb119ed5c90cb0df8ac2a426a6fc434 | [] | no_license | ABiondi12/project_sgn | 5203d21f2753dcdf7c53b153324dc75bc1221549 | 570b7be0b01e7c83cb927945e532d6a2213ebf65 | refs/heads/main | 2023-06-18T12:59:18.337096 | 2021-07-21T10:27:08 | 2021-07-21T10:27:08 | 307,121,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/robot/catkin_ws/src/navigation/map_server/include".split(';') if "/home/robot/catkin_ws/src/navigation/map_server/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nav_msgs;tf2".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmap_server_image_loader".split(';') if "-lmap_server_image_loader" != "" else []
PROJECT_NAME = "map_server"
PROJECT_SPACE_DIR = "/home/robot/catkin_ws/devel"
PROJECT_VERSION = "1.16.2"
| [
"[email protected]"
] | |
c34f28b064723496b9f76b20880853d1b861b23c | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/aio/operations/_tables_operations.py | 1cfeb4326088fb395f5d11a07bd548a359826a3a | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 32,472 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union, cast
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._tables_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_workspace_request, build_migrate_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TablesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.loganalytics.aio.LogAnalyticsManagementClient`'s
:attr:`tables` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_workspace(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> AsyncIterable[_models.TablesListResult]:
"""Gets all the tables for the specified Log Analytics workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword api_version: Api Version. Default value is "2021-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TablesListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.loganalytics.models.TablesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.TablesListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_workspace_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
api_version=api_version,
template_url=self.list_by_workspace.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_by_workspace_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TablesListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
parameters: _models.Table,
**kwargs: Any
) -> Optional[_models.Table]:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[_models.Table]]
_json = self._serialize.body(parameters, 'Table')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Table', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
parameters: _models.Table,
**kwargs: Any
) -> AsyncLROPoller[_models.Table]:
"""Update or Create a Log Analytics workspace table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param table_name: The name of the table.
:type table_name: str
:param parameters: The parameters required to update table properties.
:type parameters: ~azure.mgmt.loganalytics.models.Table
:keyword api_version: Api Version. Default value is "2021-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Table or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.Table]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.Table]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Table', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: AsyncPollingMethod
elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
parameters: _models.Table,
**kwargs: Any
) -> Optional[_models.Table]:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[_models.Table]]
_json = self._serialize.body(parameters, 'Table')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Table', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
parameters: _models.Table,
**kwargs: Any
) -> AsyncLROPoller[_models.Table]:
"""Update a Log Analytics workspace table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param table_name: The name of the table.
:type table_name: str
:param parameters: The parameters required to update table properties.
:type parameters: ~azure.mgmt.loganalytics.models.Table
:keyword api_version: Api Version. Default value is "2021-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Table or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.loganalytics.models.Table]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.Table]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial( # type: ignore
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Table', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: AsyncPollingMethod
elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
**kwargs: Any
) -> _models.Table:
"""Gets a Log Analytics workspace table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param table_name: The name of the table.
:type table_name: str
:keyword api_version: Api Version. Default value is "2021-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Table, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.Table
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.Table]
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Table', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
@distributed_trace_async
async def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a Log Analytics workspace table.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param table_name: The name of the table.
:type table_name: str
:keyword api_version: Api Version. Default value is "2021-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
api_version=api_version,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: AsyncPollingMethod
elif polling is False: polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}"} # type: ignore
@distributed_trace_async
async def migrate( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
workspace_name: str,
table_name: str,
**kwargs: Any
) -> None:
"""Migrate a Log Analytics table from support of the Data Collector API and Custom Fields features
to support of Data Collection Rule-based Custom Logs.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param table_name: The name of the table.
:type table_name: str
:keyword api_version: Api Version. Default value is "2021-12-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-12-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_migrate_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table_name=table_name,
api_version=api_version,
template_url=self.migrate.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
migrate.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/tables/{tableName}/migrate"} # type: ignore
| [
"[email protected]"
] | |
0a948854e027da6e1d313f2c60f11f0684e5b0f2 | e7e497b20442a4220296dea1550091a457df5a38 | /main_project/AdHot/monitorsystem/monitorsystem/controllers/zoom_graph.py | 4e70d23eb63430c0430abba719d0b4142562c92e | [] | no_license | gunner14/old_rr_code | cf17a2dedf8dfcdcf441d49139adaadc770c0eea | bb047dc88fa7243ded61d840af0f8bad22d68dee | refs/heads/master | 2021-01-17T18:23:28.154228 | 2013-12-02T23:45:33 | 2013-12-02T23:45:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,518 | py | import logging
import rrdtool
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from monitorsystem.lib.base import BaseController, render
from monitorsystem.lib.app_globals import Globals as g
from monitorsystem.lib.get_chart_info import GetAllCatagory
log = logging.getLogger(__name__)
class ZoomGraphController(BaseController):
def index(self):
# Return a rendered template
#return render('/zoom_graph.mako')
# or, return a string
return 'Hello World'
def zoomin(self, dbname, datasource, resolution, title, points, limit, description):
limit = int(limit)
img_path = "/data/xce/pylons/monitorsystem/monitorsystem/public/img/" + str(dbname) + "_big.png"
rrd_path = "/data/xce/monitor/data/" + str(dbname) + ".rrd";
title = str(title);
font = "TITLE:10:/data/xce/monitor/fonts/simhei.ttf"
passed_time = 60 * int(points);
start = "now-" + str(passed_time)
datasource = str(datasource)
resolution = str(resolution)
rra1_points = 1200;
ds_def_1 = "DEF:value1=" + rrd_path + ":" + datasource + ":AVERAGE"
ds_def_2 = "DEF:value2=" + rrd_path + ":" + datasource + ":MAX"
if(limit > 0):
c_def_1 = "CDEF:value3=value1," + str(limit) + ",GT,value1,UNKN,IF"
c_def_2 = "CDEF:value4=value2," + str(limit) + ",GT,value2,UNKN,IF"
elif(limit < 0):
c_def_1 = "CDEF:value3=value1," + str(-limit) + ",LT,value1,UNKN,IF"
c_def_2 = "CDEF:value4=value2," + str(-limit) + ",LT,value2,UNKN,IF"
graph_def_1 = "AREA:value1#00FF00:average"
graph_def_2 = "LINE1:value2#0000FF:max"
graph_def_3 = "AREA:value3#FF0000:warning "
graph_def_4 = "LINE1:value4#FF0000"
width = "500"
height = "400"
comments = "COMMENT:Average--------------MAX--------------MIN-------------- "
g_print_1 = "GPRINT:value1:AVERAGE:%1.2lf"
g_print_2 = "GPRINT:value1:MAX:%18.2lf"
g_print_3 = "GPRINT:value1:MIN:%15.2lf"
if(limit == 0):
if(int(points) <= rra1_points):
ret = rrdtool.graph(img_path, "-w", width, "-h", height, "-n", font, "-t", title, "-S", resolution, "--start", start, ds_def_1, graph_def_1, comments, g_print_1, g_print_2, g_print_3)
else:
ret = rrdtool.graph(img_path, "-w", width, "-h", height, "-n", font, "-t", title, "-S", resolution, "--start", start, "--vertical-label=", ds_def_1, ds_def_2, graph_def_1, graph_def_2, comments, g_print_1, g_print_2, g_print_3)
else:
if(int(points) <= rra1_points):
ret = rrdtool.graph(img_path, "-w", width, "-h", height, "-n", font, "-t", title, "-S", resolution, "--start", start, ds_def_1, c_def_1, graph_def_1, graph_def_3, comments, g_print_1, g_print_2, g_print_3)
else:
ret = rrdtool.graph(img_path, "-w", width, "-h", height, "-n", font, "-t", title, "-S", resolution, "--start", start, "--vertical-label=", ds_def_1, ds_def_2, c_def_1, c_def_2, graph_def_1, graph_def_2, graph_def_3, graph_def_4)
c.img_path = "img/" + str(dbname) + "_big.png"
c.description = description
c.catalist = GetAllCatagory();
return render('/zoom.mako')
# return "Viewing " + str(dbname) + " " + str(resolution) + " " + str(points) + " " + str(limit)
| [
"[email protected]"
] | |
d1a37f55af2498bbddef30e64ab5cf173cdc0d1e | 7f2612e5132e1583e5ba9758f299a8f301f0dc70 | /FB/257-binary-tree-paths.py | eada330e2dcde3095e5ceb1523f68ee52d0cba47 | [] | no_license | taeheechoi/coding-practice | 380e263a26ed4de9e542c51e3baa54315127ae4f | 9528b5e85b0ea2960c994ffea62b5be86481dc38 | refs/heads/main | 2022-07-09T11:22:18.619712 | 2022-06-28T14:55:51 | 2022-06-28T14:55:51 | 447,082,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # https://leetcode.com/problems/binary-tree-paths/
# Input: root = [1,2,3,null,5]
# Output: ["1->2->5","1->3"]
# Example 2:
# Input: root = [1]
# Output: ["1"]
class Solution:
def binaryTreePath(self, root):
elements = []
def dfs(node, s):
if not node: return
if node.left is None and node.right is None:
elements.append(s + str(root.val))
s += str(node.val) + '->'
dfs(root.left, s)
dfs(root.right, s)
dfs(root, '')
return elements
| [
"[email protected]"
] | |
ebe6ba66f1743f17d66488c547d62eb1dd646dc3 | a972c5de4144940d1c5903bb5636df4bcaf4b283 | /ejerciciokenwin/__init__.py | bcbe09dd494756b2f4afdb3392ceb03bc3b19d99 | [] | no_license | apocalipsys/ejerciciopyramid-2020 | 5dafe2926bb78338eb1eca17d2be8f6ef2eba8fa | 2411601f4e2e0dd9aa49951251f9acfe73d43777 | refs/heads/master | 2020-12-21T00:51:38.700245 | 2020-02-07T07:24:51 | 2020-02-07T07:24:51 | 236,258,661 | 3 | 1 | null | 2020-02-05T06:29:14 | 2020-01-26T02:42:08 | Python | UTF-8 | Python | false | false | 802 | py | #This is a config file, necesary to include the views, moludes, models and so on
#Este archivo de configuracion sirve para incluir las vistas, modelo de base de datos, modulos etc.
from pyramid.config import Configurator
from pyramid.session import SignedCookieSessionFactory
import os
static_dir = os.path.abspath(os.path.dirname(__file__))
def main(global_config, **settings):
my_session_factory = SignedCookieSessionFactory(
'itsaseekreet')
with Configurator(settings=settings,session_factory=my_session_factory) as config:
config.include('.models')
config.include('pyramid_jinja2')
#config.add_jinja2_renderer('.html')
config.include('.security')
config.include('.routes')
config.scan('.views')
return config.make_wsgi_app()
| [
"[email protected]"
] | |
05316d88a35289d491a107f6328cede2a1c6eb9f | 4eaa1b9b08914e0a2cc9276363e489ccef19d3a2 | /ch9/electric_car.py | a3903742781c7a56beb7c524f66ba35a4bb8f545 | [] | no_license | melihcanyardi/Python-Crash-Course-2e-Part-I | 69b3b5b3f63cdbd7be6fabd6d4f2ddfd9a3434a3 | 0c9b250f512985c04b2c0397f3afaa8bf3a57f17 | refs/heads/main | 2023-03-12T21:43:14.012537 | 2021-03-03T19:23:41 | 2021-03-03T19:23:41 | 344,236,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | from car import Car
class Battery():
"""A simple attempt to model a battery for an electric car."""
def __init__(self, battery_size=75):
"""Initialize the battery's attributes."""
self.battery_size = battery_size
def describe_battery(self):
"""Print a statement describing the battery size."""
print(f"This car has a {self.battery_size}-kWh battery.")
def get_range(self):
"""Print a statement about the range this battery provides."""
if self.battery_size == 75:
range = 260
elif self.battery_size == 100:
range = 315
print(f"This car can go about {range} miles on a full charge.")
class ElectricCar(Car):
"""Represent aspects of a car, specific to electric vehicles."""
def __init__(self, make, model, year):
"""
Initialize attributes of the parent class.
Then initialize attributes specific to an electric car.
"""
super().__init__(make, model, year)
self.battery = Battery()
| [
"[email protected]"
] | |
9878aa70c6c4979e347708410e1b4bfdc95469e0 | f4fbeb3b1f91043b82d1aacf7b40608644b4130e | /tensorflow/python/keras/_impl/keras/layers/normalization_test.py | 84f0b2776c9980e0bdc00c173b275604ce16697a | [
"Apache-2.0"
] | permissive | angelmorinigo/tensorflow | 0609a99122315ef466bfb1f8e5334b45361b9d29 | 3176ba990070cdde62b7cdf81747d70107d2e032 | refs/heads/master | 2020-03-17T15:33:47.145977 | 2018-05-16T16:58:05 | 2018-05-16T16:58:05 | 133,715,400 | 1 | 0 | Apache-2.0 | 2018-05-16T19:45:02 | 2018-05-16T19:45:02 | null | UTF-8 | Python | false | false | 8,571 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.keras._impl.keras import testing_utils
from tensorflow.python.platform import test
class NormalizationLayersTest(test.TestCase):
def test_basic_batchnorm(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'momentum': 0.9,
'epsilon': 0.1,
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
def test_batchnorm_weights(self):
with self.test_session():
layer = keras.layers.BatchNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 2)
layer = keras.layers.BatchNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 4)
def test_batchnorm_regularization(self):
with self.test_session():
layer = keras.layers.BatchNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.BatchNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
def test_batchnorm_correctness(self):
with self.test_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
def test_batchnorm_convnet(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=1, input_shape=(3, 4, 4), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))
np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
def test_batchnorm_convnet_channel_last(self):
with self.test_session():
# keras.backend.set_learning_phase(True)
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))
np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
def test_shared_batchnorm(self):
"""Test that a BN layer can be shared across different data streams.
"""
with self.test_session():
# Test single layer reuse
bn = keras.layers.BatchNormalization()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1)
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = keras.models.Model(x2, y2)
model.compile('sgd', 'mse')
model.train_on_batch(x, x)
self.assertEqual(len(bn.updates), 4)
self.assertEqual(len(model.updates), 2)
self.assertEqual(len(model.get_updates_for(x1)), 0)
self.assertEqual(len(model.get_updates_for(x2)), 2)
# Test model-level reuse
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
new_model = keras.models.Model(x3, y3, name='new_model')
self.assertEqual(len(new_model.updates), 2)
self.assertEqual(len(model.updates), 4)
self.assertEqual(len(new_model.get_updates_for(x3)), 2)
new_model.compile('sgd', 'mse')
new_model.train_on_batch(x, x)
def test_that_trainable_disables_updates(self):
with self.test_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
a = keras.layers.Input(shape=(4,))
layer = keras.layers.BatchNormalization(input_shape=(4,))
b = layer(a)
model = keras.models.Model(a, b)
model.trainable = False
assert not model.updates
model.compile('sgd', 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile('sgd', 'mse')
assert model.updates
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile('sgd', 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
def test_batchnorm_trainable(self):
"""Tests that batchnorm layer is trainable when learning phase is enabled.
Computes mean and std for current inputs then
applies batch normalization using them.
"""
with self.test_session():
bn_mean = 0.5
bn_std = 10.
val_a = np.expand_dims(np.arange(10.), axis=1)
def get_model(bn_mean, bn_std):
inp = keras.layers.Input(shape=(1,))
x = keras.layers.BatchNormalization()(inp)
model1 = keras.models.Model(inp, x)
model1.set_weights([
np.array([1.]),
np.array([0.]),
np.array([bn_mean]),
np.array([bn_std**2])
])
return model1
# Simulates training-mode with trainable layer.
# Should use mini-batch statistics.
keras.backend.set_learning_phase(1)
model = get_model(bn_mean, bn_std)
model.compile(loss='mse', optimizer='rmsprop')
out = model.predict(val_a)
self.assertAllClose(
(val_a - np.mean(val_a)) / np.std(val_a), out, atol=1e-3)
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
4331f303b88abc1007c44aedec54876888a6b860 | 1a74a9ec3e51e7776e5c15e98c66b4cb5a9f8144 | /source/webapp/views/base_views.py | 3bf9e6278427c473397fb2e32f09ab53e41e9079 | [] | no_license | Aitmatow/issue_tracker | d66e47a7f633a455e28a1921c5220c60a4c5907f | 96f482be1251d9c557446bc0bfa0e949cc3129d9 | refs/heads/master | 2022-11-26T19:59:12.929073 | 2019-12-09T12:52:13 | 2019-12-09T12:52:13 | 211,033,057 | 0 | 0 | null | 2022-11-22T04:47:23 | 2019-09-26T07:57:27 | Python | UTF-8 | Python | false | false | 4,098 | py | from django.db.models import Q
from django.shortcuts import get_object_or_404, render, redirect
from django.utils.http import urlencode
from django.views import View
from django.views.generic import TemplateView, ListView
class DetailView(TemplateView):
context_key = 'objects'
model = None
def get_context_data(self, **kwargs):
pk = kwargs.get('pk')
context = super().get_context_data(**kwargs)
context[self.context_key] = get_object_or_404(self.model, pk=pk)
return context
def get_objects(self):
return self.model.objects.all()
class UpdateView(View):
form_class = None
template_name = None
redirect_url = ''
model = None
key_kwarg = 'pk'
context_key = 'object'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.form_class(initial=self.get_form_initial())
context = self.make_context(form)
return render(request, self.template_name, context=context)
def get_form_initial(self):
model_fields = [field.name for field in self.model._meta.fields]
initial = {}
for field in model_fields:
initial[field] = getattr(self.object, field)
print(initial)
return initial
def post(self, request, *args, **kwargs):
form = self.form_class(data=request.POST)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
self.object = self.get_object()
for field, value in form.cleaned_data.items():
setattr(self.object, field, value)
self.object.save()
return redirect(self.get_redirect_url())
def form_invalid(self, form):
context = self.make_context(form)
return render(self.request, self.template_name, context=context)
def get_object(self):
pk = self.kwargs.get(self.key_kwarg)
return get_object_or_404(self.model, pk=pk)
def make_context(self, form):
return {
'form': form,
self.context_key: self.object
}
def get_redirect_url(self):
return self.redirect_url
class DeleteView(View):
template_name = None
model = None
redirect_url = None
confirmation_for_delete = None
def get(self, request, *args, **kwargs):
object = get_object_or_404(self.model, pk=kwargs.get('pk'))
if self.confirmation_for_delete == True:
context = {'object': object}
return render(self.request, self.template_name, context)
else:
object.delete()
return redirect(self.get_redirect_url())
def post(self, request, *args, **kwargs):
object = get_object_or_404(self.model, pk = kwargs.get('pk'))
object.delete()
return redirect(self.get_redirect_url())
def get_redirect_url(self):
return self.redirect_url
class SearchView(ListView):
template_name = None
model = None
paginate_by = 10
paginate_orphans = 1
page_kwarg = 'page'
form = None
def get(self, request, *args, **kwargs):
self.form = self.get_search_form()
self.search_value = self.get_search_value()
return super().get(request, *args, **kwargs)
def get_search_form(self):
return self.form(data=self.request.GET)
def get_search_value(self):
if self.form.is_valid():
return self.form.cleaned_data['search']
return None
def get_queryset(self):
queryset = super().get_queryset()
if self.search_value:
queryset = queryset.filter(
self.get_query()
)
return queryset
def get_query(self):
pass
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(object_list=object_list, **kwargs)
context['form'] = self.form
if self.search_value:
context['query'] = urlencode({'search' : self.search_value})
return context | [
"[email protected]"
] | |
466c50bd91fc4be61abb950479c4d47fb1041ed9 | 8ed80561e1b3c0bcdb6201cae8af845d5da23edc | /guppe/exercicios_secao_8/ex_10.py | 62a551fc304de9116280ee458a9d1eaa9680822e | [] | no_license | Fulvio7/curso-python-guppe | 42d5a1ecd80c1f3b27dc3f5dad074a51c9b774eb | 98966963f698eb33e65ed58a84f96e28f675848a | refs/heads/main | 2023-08-28T13:31:12.916407 | 2021-10-09T19:03:17 | 2021-10-09T19:03:17 | 415,393,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
10- Faça uma função que receba dois números e retorne qual deles
é o maior.
"""
def retorna_maior(n1, n2):
if n1 > n2:
return f'{n1} é maior.'
elif n2 > n1:
return f'{n2} é maior.'
return 'Os dois números são iguais! XP '
print('Descubra o maior número')
num1 = int(input('Num1 = '))
num2 = int(input('Num2 = '))
print(retorna_maior(num1, num2))
| [
"[email protected]"
] | |
3518fd2cc14d2ce5ab3297741d351dc4635fe976 | 5a04919d14d3f34815987a202d520609c17cc605 | /problems/forritun/hefja_i_veldi/tests/gen.py | 1397acd8903770e81c30e87255b3e79a5210b121 | [] | no_license | SuprDewd/forritunarverkefni | 49e9864c6efaa192747b3f004f79a53a4519c10a | 702819f9fa2d106fede4ff2284a00b5141662493 | refs/heads/master | 2021-01-15T11:14:44.316308 | 2014-08-09T15:09:09 | 2014-08-09T15:09:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | import math
tests = [ (10, 4), (10, -4), (15, -1), (1, 4), (0, 3), (1, -2), (-1, 2), (-1, 3), (-1, 0), (13, 2), (-13, 2), (13, 3), (-13, 3), (-5, -6) ]
for i, t in enumerate(tests):
with open('T%d.in' % i, 'w') as f: f.write('%d %d\n' % tuple(t))
with open('T%d.out' % i, 'w') as f: f.write('%f\n' % math.pow(t[0], t[1]))
| [
"[email protected]"
] | |
1dfaa8cf11a2d14dd19b5bf31b58f44bf15e34a0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03379/s477030145.py | 93534519416891d22f6c4c276609f50101689a1d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | def main():
n = int(input())
x_lst = list(map(int, input().split()))
x_sorted_lst = sorted(x_lst)
median1 = x_sorted_lst[n // 2 - 1]
median2 = x_sorted_lst[n // 2]
if median1 == median2:
lst = [median1] * n
else:
lst = []
for i in range(n):
x = x_lst[i]
if x <= median1:
lst.append(median2)
elif median2 <= x:
lst.append(median1)
for i in range(n):
print(lst[i])
if __name__ == '__main__':
main() | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.