blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9b4cee01a7a4aad6bd4aa41ff11599feddafe8b0 | 14b5679d88afa782dc5d6b35878ab043089a060a | /students/LvTao/20200515/测试鼠标是否移动.py | 317ea2ada7e5968f4d2f40aad82bbbc2832c59da | [] | no_license | mutiangua/EIS2020 | c541ef32623f67f9277945cd39cff3c02f06e4dd | 92aa2711b763a2c93be238825c445bf2db8da391 | refs/heads/master | 2022-11-18T05:21:47.567342 | 2020-07-11T10:11:21 | 2020-07-11T10:11:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | import time
import pyautogui
def pos():
pos_mouse=pyautogui.position()
time.sleep(1)
return pos_mouse
while True:
if pos()==pyautogui.position():
continue
else:
x,y=pyautogui.position()
print('当前位置X{},Y{}'.format(x,y))
| [
"[email protected]"
] | |
74c590dee70d866754a3bfddb67a69646b5796c8 | 7837961d07a64aa1f73d88ed1012ec5e322ab370 | /src/generative_playground/molecules/lean_settings.py | 5af24245d63c36b4de49a57e16e9310343c74414 | [
"MIT"
] | permissive | markharley/generative_playground | 1281f13cc28c43ede9695e3ffa98713e613023d4 | 56e826e5ca453ee19b0d4298ed27b4db5efd6fd9 | refs/heads/master | 2020-05-18T09:50:27.820273 | 2019-05-05T12:03:26 | 2019-05-05T12:03:26 | 184,337,386 | 0 | 0 | null | 2019-04-30T22:01:43 | 2019-04-30T22:01:42 | null | UTF-8 | Python | false | false | 386 | py | import inspect
import os
molecules_root_location = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + '/'
def get_data_location(molecules=True):
if molecules:
return {'source_data': molecules_root_location + 'data/250k_rndm_zinc_drugs_clean.smi'}
else:
return {'source_data': molecules_root_location + 'data/equation2_15_dataset.txt'} | [
"[email protected]"
] | |
6af841fb17dd20c39b1e70b06965fed9ff8a9455 | b287c232165bb0d248d619e978f57c864cc36a1c | /leo/plugins/importers/javascript.py | 38fa8e7d788aec325dd87180f3c2c238a1199e35 | [
"BSD-3-Clause",
"MIT"
] | permissive | tbpassin/leo-editor | 39ec8cfc6c35c867b89a21f495b8049b1254bcc6 | 76b60e5c58781f84c86a603b9c50b709250682b8 | refs/heads/master | 2023-08-09T05:05:54.064246 | 2023-03-01T14:12:14 | 2023-03-01T14:12:14 | 247,962,821 | 0 | 0 | NOASSERTION | 2020-03-17T12:22:03 | 2020-03-17T12:22:02 | null | UTF-8 | Python | false | false | 11,266 | py | #@+leo-ver=5-thin
#@+node:ekr.20140723122936.18144: * @file ../plugins/importers/javascript.py
"""The @auto importer for JavaScript."""
import re
from typing import Any, Dict, Generator
from leo.core import leoGlobals as g # Required
from leo.core.leoCommands import Commands as Cmdr
from leo.core.leoNodes import Position
from leo.plugins.importers.linescanner import Importer
#@+others
#@+node:ekr.20140723122936.18049: ** class JS_Importer
class JS_Importer(Importer):
def __init__(self, c: Cmdr) -> None:
"""The ctor for the JS_ImportController class."""
# Init the base class.
super().__init__(c, language='javascript')
#@+others
#@+node:ekr.20161101183354.1: *3* js_i.compute_headline
clean_regex_list1 = [
# (function name (
re.compile(r'\s*\(?(function\b\s*[\w]*)\s*\('),
# name: (function (
re.compile(r'\s*(\w+\s*\:\s*\(*\s*function\s*\()'),
# const|let|var name = .* =>
re.compile(r'\s*(?:const|let|var)\s*(\w+\s*(?:=\s*.*)=>)'),
]
clean_regex_list2 = [
re.compile(r'(.*\=)(\s*function)'), # .* = function
]
clean_regex_list3 = [
re.compile(r'(.*\=\s*new\s*\w+)\s*\(.*(=>)'), # .* = new name .* =>
re.compile(r'(.*)\=\s*\(.*(=>)'), # .* = ( .* =>
re.compile(r'(.*)\((\s*function)'), # .* ( function
re.compile(r'(.*)\(.*(=>)'), # .* ( .* =>
re.compile(r'(.*)(\(.*\,\s*function)'), # .* \( .*, function
]
clean_regex_list4 = [
re.compile(r'(.*)\(\s*(=>)'), # .* ( =>
]
def compute_headline(self, s: str) -> str:
"""Return a cleaned up headline s."""
s = s.strip()
# Don't clean a headline twice.
if s.endswith('>>') and s.startswith('<<'): # pragma: no cover (missing test)
return s
for ch in '{(=':
if s.endswith(ch):
s = s[:-1].strip()
# First regex cleanup. Use \1.
for pattern in self.clean_regex_list1:
m = pattern.match(s)
if m:
s = m.group(1)
break
# Second regex cleanup. Use \1 + \2
for pattern in self.clean_regex_list2:
m = pattern.match(s)
if m:
s = m.group(1) + m.group(2)
break
# Third regex cleanup. Use \1 + ' ' + \2
for pattern in self.clean_regex_list3:
m = pattern.match(s)
if m:
s = m.group(1) + ' ' + m.group(2)
break
# Fourth cleanup. Use \1 + ' ' + \2 again
for pattern in self.clean_regex_list4: # pragma: no cover (mysterious)
m = pattern.match(s)
if m:
s = m.group(1) + ' ' + m.group(2)
break
# Final whitespace cleanups.
s = s.replace(' ', ' ')
s = s.replace(' (', '(')
return g.truncate(s, 100)
#@-others
#@+node:ekr.20200131110322.2: ** JsLexer...
# JsLex: a lexer for Javascript
# Written by Ned Batchelder. Used by permission.
#
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/jslex/src/default/NOTICE.txt
#@+node:ekr.20200131110322.4: *3* class Tok
class Tok:
"""A specification for a token class."""
num = 0
def __init__(self, name: str, regex: str, next: str = None) -> None:
self.id = Tok.num
Tok.num += 1
self.name = name
self.regex = regex
self.next = next
#@+node:ekr.20200131110322.7: *3* class Lexer
class Lexer:
"""A generic multi-state regex-based lexer."""
#@+others
#@+node:ekr.20200131110322.8: *4* Lexer.__init__
def __init__(self, states: Dict, first: Any) -> None:
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE) # |re.UNICODE)
self.state = first
#@+node:ekr.20200131110322.9: *4* Lexer.lex
def lex(self, text: str) -> Generator:
"""Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
"""
end = len(text)
state = self.state
regexes = self.regexes
toks = self.toks
start = 0
while start < end:
for match in regexes[state].finditer(text, start):
# g.trace(state, start, text, match)
# g.printObj(regexes[state])
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
start += len(toktext)
yield(tok.name, toktext)
if tok.next:
state = tok.next
break
self.state = state
#@-others
#@+node:ekr.20200131110322.6: *3* function: literals
def literals(choices: str, prefix: str = "", suffix: str = "") -> str:
"""
Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
"""
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
#@+node:ekr.20200131110322.10: *3* class JsLexer(Lexer)
class JsLexer(Lexer):
"""A Javascript lexer
>>> lexer = JsLexer()
>>> list(lexer.lex("a = 1"))
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
This doesn't properly handle non-Ascii characters in the Javascript source.
"""
# EKR: Happily, the JS importer doesn't need to handle id's carefully.
#@+<< constants >>
#@+node:ekr.20200131190707.1: *4* << constants >> (JsLexer)
# Because these tokens are matched as alternatives in a regex, longer possibilities
# must appear in the list before shorter ones, for example, '>>' before '>'.
#
# Note that we don't have to detect malformed Javascript, only properly lex
# correct Javascript, so much of this is simplified.
# Details of Javascript lexical structure are taken from
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
# A useful explanation of automatic semicolon insertion is at
# http://inimino.org/~inimino/blog/javascript_semicolons
# See https://stackoverflow.com/questions/6314614/match-any-unicode-letter
both_before = [
Tok("comment", r"/\*(.|\n)*?\*/"),
Tok("linecomment", r"//.*?$"),
Tok("ws", r"\s+"),
Tok("keyword", literals("""
async await
break case catch class const continue debugger
default delete do else enum export extends
finally for function if import in instanceof new
return super switch this throw try typeof var
void while with
""", suffix=r"\b"), next='reg'),
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
#
# EKR: This would work if patterns were compiled with the re.UNICODE flag.
# However, \w is not the same as valid JS characters.
# In any case, the JS importer doesn't need to handle id's carefully.
#
# Tok("id", r"""([\w$])([\w\d]*)""", next='div'),
#
Tok("id", r"""
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
""", next='div'),
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
Tok("onum", r"0[0-7]+"),
Tok("dnum", r"""
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
\. # dot
[0-9]* # DecimalDigits-opt
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
\. # dot
[0-9]+ # DecimalDigits
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
([eE][-+]?[0-9]+)? # ExponentPart-opt
)
""", next='div'),
Tok("punct", literals("""
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|| += -= *= %= &= |= ^=
"""), next="reg"),
Tok("punct", literals("++ -- ) ]"), next='div'),
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
]
both_after = [
Tok("other", r"."),
]
states = {
'div': # slash will mean division
both_before + [
Tok("punct", literals("/= /"), next='reg'),
] + both_after,
'reg': # slash will mean regex
both_before + [
Tok("regex",
r"""
/ # opening slash
# First character is..
( [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
( [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
""", next='div'),
] + both_after,
}
#@-<< constants >>
def __init__(self) -> None:
super().__init__(self.states, 'reg')
#@-others
def do_import(c: Cmdr, parent: Position, s: str) -> None:
"""The importer callback for javascript."""
JS_Importer(c).import_from_string(parent, s)
importer_dict = {
'extensions': ['.js',],
'func': do_import,
}
#@@language python
#@@tabwidth -4
#@-leo
| [
"[email protected]"
] | |
98cc0764581e92078db33632b9a8330ad97806de | 51d7e8c09793b50d45731bd5ab9b531b525cf6db | /tests/garage/torch/algos/test_maml_ppo.py | ea4ac63fd8c01d020ad7379470f45d65de0217bd | [
"MIT"
] | permissive | fangqyi/garage | 454247849a6a3f547557b3fac3787ba9eeb0391f | ddafba385ef005f46f913ab352f9638760e5b412 | refs/heads/master | 2023-02-25T00:43:18.903328 | 2021-01-26T01:52:15 | 2021-01-26T01:52:15 | 267,667,220 | 0 | 0 | MIT | 2020-05-28T18:35:08 | 2020-05-28T18:35:07 | null | UTF-8 | Python | false | false | 2,582 | py | """This script is a test that fails when MAML-TRPO performance is too low."""
import pytest
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
import torch
from garage.envs import GarageEnv
from garage.envs import normalize
from garage.envs.mujoco import HalfCheetahDirEnv
from garage.experiment import deterministic, LocalRunner
from garage.torch.algos import MAMLPPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
from tests.fixtures import snapshot_config
@pytest.mark.mujoco
class TestMAMLPPO:
"""Test class for MAML-PPO."""
def setup_method(self):
"""Setup method which is called before every test."""
self.env = GarageEnv(
normalize(HalfCheetahDirEnv(), expected_action_scale=10.))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
self.value_function = GaussianMLPValueFunction(env_spec=self.env.spec,
hidden_sizes=(32, 32))
def teardown_method(self):
"""Teardown method which is called after every test."""
self.env.close()
def test_ppo_pendulum(self):
"""Test PPO with Pendulum environment."""
deterministic.set_seed(0)
rollouts_per_task = 5
max_path_length = 100
runner = LocalRunner(snapshot_config)
algo = MAMLPPO(env=self.env,
policy=self.policy,
value_function=self.value_function,
max_path_length=max_path_length,
meta_batch_size=5,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10,
batch_size=rollouts_per_task *
max_path_length)
assert last_avg_ret > -5
| [
"[email protected]"
] | |
fac3f04df019414ae685c3823333bcb2f171d65d | 52381a4fc02e90ce1fcfffd8d9876d9e8f44c248 | /core/jobs/batch_jobs/email_deletion_jobs.py | 895c4067d3191f9ccbef1490e639ea0c12d09bab | [
"Apache-2.0"
] | permissive | ankita240796/oppia | 18aa1609a0f237ce76142b2a0d3169e830e5bcdd | ba4f072e494fd59df53fecc37e67cea7f9727234 | refs/heads/develop | 2022-07-11T01:11:53.136252 | 2022-06-30T08:55:49 | 2022-06-30T08:55:49 | 160,626,761 | 0 | 0 | Apache-2.0 | 2020-04-28T16:12:26 | 2018-12-06T06:02:18 | Python | UTF-8 | Python | false | false | 5,406 | py | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation Jobs for blog models"""
from __future__ import annotations
from core.jobs import base_jobs
from core.jobs.io import ndb_io
from core.jobs.transforms import job_result_transforms
from core.jobs.types import job_run_result
from core.platform import models
import apache_beam as beam
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import email_models
from mypy_imports import feedback_models
from mypy_imports import user_models
(email_models, feedback_models, user_models) = models.Registry.import_models([
models.NAMES.email, models.NAMES.feedback, models.NAMES.user
])
class DeleteUnneededEmailRelatedModelsJob(base_jobs.JobBase):
"""Job that deletes emails models that belonged to users that were deleted
as part of the wipeout process.
"""
def run(self) -> beam.PCollection[job_run_result.JobRunResult]:
deleted_user_ids_collection = (
self.pipeline
| 'Get all deleted user models' >> ndb_io.GetModels(
user_models.DeletedUserModel.get_all())
| 'Extract user IDs' >> beam.Map(
lambda deleted_user_model: deleted_user_model.id)
)
deleted_user_ids = beam.pvalue.AsIter(deleted_user_ids_collection)
sent_email_models_to_delete = (
self.pipeline
| 'Get all sent email models' >> ndb_io.GetModels(
email_models.SentEmailModel.get_all())
| 'Filter sent email models that belong to deleted users' >> (
beam.Filter(
lambda model, ids: (
model.sender_id in ids or model.recipient_id in ids),
ids=deleted_user_ids
))
)
sent_email_models_to_delete_result = (
sent_email_models_to_delete
| 'Count sent email models to be deleted' >> (
job_result_transforms.CountObjectsToJobRunResult('SENT EMAILS'))
)
bulk_email_models_to_delete = (
self.pipeline
| 'Get all bulk email models' >> ndb_io.GetModels(
email_models.BulkEmailModel.get_all())
| 'Filter bulk email models that belong to deleted users' >> (
beam.Filter(
lambda model, ids: model.sender_id in ids,
ids=deleted_user_ids
))
)
bulk_email_models_to_delete_result = (
bulk_email_models_to_delete
| 'Count bulk email models to be deleted' >> (
job_result_transforms.CountObjectsToJobRunResult('BULK EMAILS'))
)
unsent_feedback_email_models_to_delete = (
self.pipeline
| 'Get all unsent feedback models' >> ndb_io.GetModels(
feedback_models.UnsentFeedbackEmailModel.get_all())
| 'Filter unsent feedback models that belong to deleted users' >> (
beam.Filter(
lambda model, ids: model.id in ids, ids=deleted_user_ids))
)
unsent_feedback_email_models_to_delete_result = (
unsent_feedback_email_models_to_delete
| 'Count unsent feedback email models to be deleted' >> (
job_result_transforms.CountObjectsToJobRunResult(
'FEEDBACK EMAILS'))
)
user_bulk_emails_models_to_delete = (
self.pipeline
| 'Get all user bulk email models' >> ndb_io.GetModels(
user_models.UserBulkEmailsModel.get_all())
| 'Filter user bulk email models that belong to deleted users' >> (
beam.Filter(
lambda model, ids: model.id in ids, ids=deleted_user_ids))
)
user_bulk_emails_models_to_delete_result = (
user_bulk_emails_models_to_delete
| 'Count user bulk email models to be deleted' >> (
job_result_transforms.CountObjectsToJobRunResult(
'USER BULK EMAILS'))
)
unused_models_deletion = (
(
sent_email_models_to_delete,
bulk_email_models_to_delete,
unsent_feedback_email_models_to_delete,
user_bulk_emails_models_to_delete
)
| 'Merge models' >> beam.Flatten()
| 'Extract keys' >> beam.Map(lambda model: model.key)
| 'Delete models' >> ndb_io.DeleteModels()
)
return (
(
sent_email_models_to_delete_result,
bulk_email_models_to_delete_result,
unsent_feedback_email_models_to_delete_result,
user_bulk_emails_models_to_delete_result,
)
| 'Merge results' >> beam.Flatten()
)
| [
"[email protected]"
] | |
58e763898710361ea138991802ef384274628d64 | f0681b8c129e8afce21e340697502230f45ce930 | /venv/Lib/site-packages/com/vmware/vcenter/vm_client.py | a08c07d9358ac1cb0cb378966e1199db49d71547 | [] | no_license | dungla2011/python_pyvmomi_working_sample_vmware_easy | 8852b6fdcd0f7d0f648f6f7b6c6e4f70c7213746 | a3b6d86a802f28c7ee249fc03523d5e5f0a2e3bd | refs/heads/main | 2023-07-05T14:56:46.551091 | 2021-08-20T12:19:39 | 2021-08-20T12:19:39 | 395,496,219 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109,741 | py | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2021 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.vcenter.vm.
#---------------------------------------------------------------------------
"""
The ``com.vmware.vcenter.vm_client`` module provides classes for managing
virtual machines.
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class GuestOS(Enum):
"""
The ``GuestOS`` class defines the valid guest operating system types used
for configuring a virtual machine.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
DOS = None
"""
MS-DOS.
"""
WIN_31 = None
"""
Windows 3.1
"""
WIN_95 = None
"""
Windows 95
"""
WIN_98 = None
"""
Windows 98
"""
WIN_ME = None
"""
Windows Millennium Edition
"""
WIN_NT = None
"""
Windows NT 4
"""
WIN_2000_PRO = None
"""
Windows 2000 Professional
"""
WIN_2000_SERV = None
"""
Windows 2000 Server
"""
WIN_2000_ADV_SERV = None
"""
Windows 2000 Advanced Server
"""
WIN_XP_HOME = None
"""
Windows XP Home Edition
"""
WIN_XP_PRO = None
"""
Windows XP Professional
"""
WIN_XP_PRO_64 = None
"""
Windows XP Professional Edition (64 bit)
"""
WIN_NET_WEB = None
"""
Windows Server 2003, Web Edition
"""
WIN_NET_STANDARD = None
"""
Windows Server 2003, Standard Edition
"""
WIN_NET_ENTERPRISE = None
"""
Windows Server 2003, Enterprise Edition
"""
WIN_NET_DATACENTER = None
"""
Windows Server 2003, Datacenter Edition
"""
WIN_NET_BUSINESS = None
"""
Windows Small Business Server 2003
"""
WIN_NET_STANDARD_64 = None
"""
Windows Server 2003, Standard Edition (64 bit)
"""
WIN_NET_ENTERPRISE_64 = None
"""
Windows Server 2003, Enterprise Edition (64 bit)
"""
WIN_LONGHORN = None
"""
Windows Longhorn (experimental)
"""
WIN_LONGHORN_64 = None
"""
Windows Longhorn (64 bit) (experimental)
"""
WIN_NET_DATACENTER_64 = None
"""
Windows Server 2003, Datacenter Edition (64 bit) (experimental)
"""
WIN_VISTA = None
"""
Windows Vista
"""
WIN_VISTA_64 = None
"""
Windows Vista (64 bit)
"""
WINDOWS_7 = None
"""
Windows 7
"""
WINDOWS_7_64 = None
"""
Windows 7 (64 bit)
"""
WINDOWS_7_SERVER_64 = None
"""
Windows Server 2008 R2 (64 bit)
"""
WINDOWS_8 = None
"""
Windows 8
"""
WINDOWS_8_64 = None
"""
Windows 8 (64 bit)
"""
WINDOWS_8_SERVER_64 = None
"""
Windows 8 Server (64 bit)
"""
WINDOWS_9 = None
"""
Windows 10
"""
WINDOWS_9_64 = None
"""
Windows 10 (64 bit)
"""
WINDOWS_9_SERVER_64 = None
"""
Windows 10 Server (64 bit)
"""
WINDOWS_HYPERV = None
"""
Windows Hyper-V
"""
WINDOWS_SERVER_2019 = None
"""
Windows Server 2019. This class attribute was added in vSphere API 7.0.0.0.
"""
WINDOWS_SERVER_2021 = None
"""
Windows Server 2022. This class attribute was added in vSphere API 7.0.1.0.
"""
FREEBSD = None
"""
FreeBSD 10 or earlier
"""
FREEBSD_64 = None
"""
FreeBSD 10 x64 or earlier
"""
FREEBSD_11 = None
"""
FreeBSD 11. This class attribute was added in vSphere API 6.7.
"""
FREEBSD_12 = None
"""
FreeBSD 12. This class attribute was added in vSphere API 6.7.
"""
FREEBSD_13 = None
"""
FreeBSD 13 or later. This class attribute was added in vSphere API 7.0.1.0.
"""
FREEBSD_11_64 = None
"""
FreeBSD 11 x64. This class attribute was added in vSphere API 6.7.
"""
FREEBSD_12_64 = None
"""
FreeBSD 12 x64. This class attribute was added in vSphere API 6.7.
"""
FREEBSD_13_64 = None
"""
FreeBSD 13 x64 or later. This class attribute was added in vSphere API
7.0.1.0.
"""
REDHAT = None
"""
Red Hat Linux 2.1
"""
RHEL_2 = None
"""
Red Hat Enterprise Linux 2
"""
RHEL_3 = None
"""
Red Hat Enterprise Linux 3
"""
RHEL_3_64 = None
"""
Red Hat Enterprise Linux 3 (64 bit)
"""
RHEL_4 = None
"""
Red Hat Enterprise Linux 4
"""
RHEL_4_64 = None
"""
Red Hat Enterprise Linux 4 (64 bit)
"""
RHEL_5 = None
"""
Red Hat Enterprise Linux 5
"""
RHEL_5_64 = None
"""
Red Hat Enterprise Linux 5 (64 bit) (experimental)
"""
RHEL_6 = None
"""
Red Hat Enterprise Linux 6
"""
RHEL_6_64 = None
"""
Red Hat Enterprise Linux 6 (64 bit)
"""
RHEL_7 = None
"""
Red Hat Enterprise Linux 7
"""
RHEL_7_64 = None
"""
Red Hat Enterprise Linux 7 (64 bit)
"""
RHEL_8_64 = None
"""
Red Hat Enterprise Linux 8 (64 bit). This class attribute was added in
vSphere API 6.7.
"""
RHEL_9_64 = None
"""
Red Hat Enterprise Linux 9 (64 bit). This class attribute was added in
vSphere API 7.0.1.0.
"""
CENTOS = None
"""
CentOS 4/5
"""
CENTOS_64 = None
"""
CentOS 4/5 (64-bit)
"""
CENTOS_6 = None
"""
CentOS 6
"""
CENTOS_6_64 = None
"""
CentOS 6 (64-bit)
"""
CENTOS_7 = None
"""
CentOS 7
"""
CENTOS_7_64 = None
"""
CentOS 7 (64-bit)
"""
CENTOS_8_64 = None
"""
CentOS 8 (64-bit). This class attribute was added in vSphere API 6.7.
"""
CENTOS_9_64 = None
"""
CentOS 9 (64-bit). This class attribute was added in vSphere API 7.0.1.0.
"""
ORACLE_LINUX = None
"""
Oracle Linux 4/5
"""
ORACLE_LINUX_64 = None
"""
Oracle Linux 4/5 (64-bit)
"""
ORACLE_LINUX_6 = None
"""
Oracle Linux 6
"""
ORACLE_LINUX_6_64 = None
"""
Oracle Linux 6 (64-bit)
"""
ORACLE_LINUX_7 = None
"""
Oracle Linux 7
"""
ORACLE_LINUX_7_64 = None
"""
Oracle Linux 7 (64-bit)
"""
ORACLE_LINUX_8_64 = None
"""
Oracle Linux 8 (64-bit). This class attribute was added in vSphere API 6.7.
"""
ORACLE_LINUX_9_64 = None
"""
Oracle Linux 9 (64-bit). This class attribute was added in vSphere API
7.0.1.0.
"""
SUSE = None
"""
Suse Linux
"""
SUSE_64 = None
"""
Suse Linux (64 bit)
"""
SLES = None
"""
Suse Linux Enterprise Server 9
"""
SLES_64 = None
"""
Suse Linux Enterprise Server 9 (64 bit)
"""
SLES_10 = None
"""
Suse linux Enterprise Server 10
"""
SLES_10_64 = None
"""
Suse Linux Enterprise Server 10 (64 bit) (experimental)
"""
SLES_11 = None
"""
Suse linux Enterprise Server 11
"""
SLES_11_64 = None
"""
Suse Linux Enterprise Server 11 (64 bit)
"""
SLES_12 = None
"""
Suse linux Enterprise Server 12
"""
SLES_12_64 = None
"""
Suse Linux Enterprise Server 12 (64 bit)
"""
SLES_15_64 = None
"""
Suse Linux Enterprise Server 15 (64 bit). This class attribute was added in
vSphere API 6.7.
"""
SLES_16_64 = None
"""
Suse Linux Enterprise Server 16 (64 bit). This class attribute was added in
vSphere API 7.0.1.0.
"""
NLD_9 = None
"""
Novell Linux Desktop 9
"""
OES = None
"""
Open Enterprise Server
"""
SJDS = None
"""
Sun Java Desktop System
"""
MANDRAKE = None
"""
Mandrake Linux
"""
MANDRIVA = None
"""
Mandriva Linux
"""
MANDRIVA_64 = None
"""
Mandriva Linux (64 bit)
"""
TURBO_LINUX = None
"""
Turbolinux
"""
TURBO_LINUX_64 = None
"""
Turbolinux (64 bit)
"""
UBUNTU = None
"""
Ubuntu Linux
"""
UBUNTU_64 = None
"""
Ubuntu Linux (64 bit)
"""
DEBIAN_4 = None
"""
Debian GNU/Linux 4
"""
DEBIAN_4_64 = None
"""
Debian GNU/Linux 4 (64 bit)
"""
DEBIAN_5 = None
"""
Debian GNU/Linux 5
"""
DEBIAN_5_64 = None
"""
Debian GNU/Linux 5 (64 bit)
"""
DEBIAN_6 = None
"""
Debian GNU/Linux 6
"""
DEBIAN_6_64 = None
"""
Debian GNU/Linux 6 (64 bit)
"""
DEBIAN_7 = None
"""
Debian GNU/Linux 7
"""
DEBIAN_7_64 = None
"""
Debian GNU/Linux 7 (64 bit)
"""
DEBIAN_8 = None
"""
Debian GNU/Linux 8
"""
DEBIAN_8_64 = None
"""
Debian GNU/Linux 8 (64 bit)
"""
DEBIAN_9 = None
"""
Debian GNU/Linux 9
"""
DEBIAN_9_64 = None
"""
Debian GNU/Linux 9 (64 bit)
"""
DEBIAN_10 = None
"""
Debian GNU/Linux 10
"""
DEBIAN_10_64 = None
"""
Debian GNU/Linux 10 (64 bit)
"""
DEBIAN_11 = None
"""
Debian GNU/Linux 11. This class attribute was added in vSphere API 7.0.0.0.
"""
DEBIAN_11_64 = None
"""
Debian GNU/Linux 11 (64 bit). This class attribute was added in vSphere API
7.0.0.0.
"""
ASIANUX_3 = None
"""
Asianux Server 3
"""
ASIANUX_3_64 = None
"""
Asianux Server 3 (64 bit)
"""
ASIANUX_4 = None
"""
Asianux Server 4
"""
ASIANUX_4_64 = None
"""
Asianux Server 4 (64 bit)
"""
ASIANUX_5_64 = None
"""
Asianux Server 5 (64 bit)
"""
ASIANUX_7_64 = None
"""
Asianux Server 7 (64 bit)
"""
ASIANUX_8_64 = None
"""
Asianux Server 8 (64 bit). This class attribute was added in vSphere API
6.7.
"""
ASIANUX_9_64 = None
"""
Asianux Server 9 (64 bit). This class attribute was added in vSphere API
7.0.1.0.
"""
OPENSUSE = None
"""
OpenSUSE Linux
"""
OPENSUSE_64 = None
"""
OpenSUSE Linux (64 bit)
"""
FEDORA = None
"""
Fedora Linux
"""
FEDORA_64 = None
"""
Fedora Linux (64 bit)
"""
COREOS_64 = None
"""
CoreOS Linux (64 bit)
"""
VMWARE_PHOTON_64 = None
"""
VMware Photon (64 bit)
"""
OTHER_24X_LINUX = None
"""
Linux 2.4x Kernel
"""
OTHER_24X_LINUX_64 = None
"""
Linux 2.4x Kernel (64 bit) (experimental)
"""
OTHER_26X_LINUX = None
"""
Linux 2.6x Kernel
"""
OTHER_26X_LINUX_64 = None
"""
Linux 2.6x Kernel (64 bit) (experimental)
"""
OTHER_3X_LINUX = None
"""
Linux 3.x Kernel
"""
OTHER_3X_LINUX_64 = None
"""
Linux 3.x Kernel (64 bit)
"""
OTHER_4X_LINUX = None
"""
Linux 4.x Kernel. This class attribute was added in vSphere API 6.7.
"""
OTHER_4X_LINUX_64 = None
"""
Linux 4.x Kernel (64 bit). This class attribute was added in vSphere API
6.7.
"""
OTHER_5X_LINUX = None
"""
Linux 5.x Kernel. This class attribute was added in vSphere API 7.0.1.0.
"""
OTHER_5X_LINUX_64 = None
"""
Linux 5.x Kernel (64 bit). This class attribute was added in vSphere API
7.0.1.0.
"""
OTHER_LINUX = None
"""
Linux 2.2x Kernel
"""
GENERIC_LINUX = None
"""
Other Linux
"""
OTHER_LINUX_64 = None
"""
Linux (64 bit) (experimental)
"""
SOLARIS_6 = None
"""
Solaris 6
"""
SOLARIS_7 = None
"""
Solaris 7
"""
SOLARIS_8 = None
"""
Solaris 8
"""
SOLARIS_9 = None
"""
Solaris 9
"""
SOLARIS_10 = None
"""
Solaris 10 (32 bit) (experimental)
"""
SOLARIS_10_64 = None
"""
Solaris 10 (64 bit) (experimental)
"""
SOLARIS_11_64 = None
"""
Solaris 11 (64 bit)
"""
OS2 = None
"""
OS/2
"""
ECOMSTATION = None
"""
eComStation 1.x
"""
ECOMSTATION_2 = None
"""
eComStation 2.0
"""
NETWARE_4 = None
"""
Novell NetWare 4
"""
NETWARE_5 = None
"""
Novell NetWare 5.1
"""
NETWARE_6 = None
"""
Novell NetWare 6.x
"""
OPENSERVER_5 = None
"""
SCO OpenServer 5
"""
OPENSERVER_6 = None
"""
SCO OpenServer 6
"""
UNIXWARE_7 = None
"""
SCO UnixWare 7
"""
DARWIN = None
"""
Mac OS 10.5
"""
DARWIN_64 = None
"""
Mac OS 10.5 (64 bit)
"""
DARWIN_10 = None
"""
Mac OS 10.6
"""
DARWIN_10_64 = None
"""
Mac OS 10.6 (64 bit)
"""
DARWIN_11 = None
"""
Mac OS 10.7
"""
DARWIN_11_64 = None
"""
Mac OS 10.7 (64 bit)
"""
DARWIN_12_64 = None
"""
Mac OS 10.8 (64 bit)
"""
DARWIN_13_64 = None
"""
Mac OS 10.9 (64 bit)
"""
DARWIN_14_64 = None
"""
Mac OS 10.10 (64 bit)
"""
DARWIN_15_64 = None
"""
Mac OS 10.11 (64 bit)
"""
DARWIN_16_64 = None
"""
Mac OS 10.12 (64 bit)
"""
DARWIN_17_64 = None
"""
Mac OS 10.13 (64 bit). This class attribute was added in vSphere API 6.7.
"""
DARWIN_18_64 = None
"""
Mac OS 10.14 (64 bit). This class attribute was added in vSphere API 6.7.
"""
DARWIN_19_64 = None
"""
Mac OS 10.15 (64 bit). This class attribute was added in vSphere API
7.0.0.0.
"""
DARWIN_20_64 = None
"""
Mac OS 11 (64 bit). This class attribute was added in vSphere API 7.0.1.0.
"""
DARWIN_21_64 = None
"""
Mac OS 12 (64 bit). This class attribute was added in vSphere API 7.0.1.0.
"""
VMKERNEL = None
"""
VMware ESX 4
"""
VMKERNEL_5 = None
"""
VMware ESX 5
"""
VMKERNEL_6 = None
"""
VMware ESX 6
"""
VMKERNEL_65 = None
"""
VMware ESX 6.5
"""
VMKERNEL_7 = None
"""
VMware ESX 7. This class attribute was added in vSphere API 7.0.0.0.
"""
AMAZONLINUX2_64 = None
"""
Amazon Linux 2 (64 bit). This class attribute was added in vSphere API
6.7.1.
"""
AMAZONLINUX3_64 = None
"""
Amazon Linux 3 (64 bit). This class attribute was added in vSphere API
7.0.1.0.
"""
CRXPOD_1 = None
"""
CRX Pod 1. This class attribute was added in vSphere API 7.0.0.0.
"""
OTHER = None
"""
Other Operating System
"""
OTHER_64 = None
"""
Other Operating System (64 bit) (experimental)
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`GuestOS` instance.
"""
Enum.__init__(string)
GuestOS._set_values([
GuestOS('DOS'),
GuestOS('WIN_31'),
GuestOS('WIN_95'),
GuestOS('WIN_98'),
GuestOS('WIN_ME'),
GuestOS('WIN_NT'),
GuestOS('WIN_2000_PRO'),
GuestOS('WIN_2000_SERV'),
GuestOS('WIN_2000_ADV_SERV'),
GuestOS('WIN_XP_HOME'),
GuestOS('WIN_XP_PRO'),
GuestOS('WIN_XP_PRO_64'),
GuestOS('WIN_NET_WEB'),
GuestOS('WIN_NET_STANDARD'),
GuestOS('WIN_NET_ENTERPRISE'),
GuestOS('WIN_NET_DATACENTER'),
GuestOS('WIN_NET_BUSINESS'),
GuestOS('WIN_NET_STANDARD_64'),
GuestOS('WIN_NET_ENTERPRISE_64'),
GuestOS('WIN_LONGHORN'),
GuestOS('WIN_LONGHORN_64'),
GuestOS('WIN_NET_DATACENTER_64'),
GuestOS('WIN_VISTA'),
GuestOS('WIN_VISTA_64'),
GuestOS('WINDOWS_7'),
GuestOS('WINDOWS_7_64'),
GuestOS('WINDOWS_7_SERVER_64'),
GuestOS('WINDOWS_8'),
GuestOS('WINDOWS_8_64'),
GuestOS('WINDOWS_8_SERVER_64'),
GuestOS('WINDOWS_9'),
GuestOS('WINDOWS_9_64'),
GuestOS('WINDOWS_9_SERVER_64'),
GuestOS('WINDOWS_HYPERV'),
GuestOS('WINDOWS_SERVER_2019'),
GuestOS('WINDOWS_SERVER_2021'),
GuestOS('FREEBSD'),
GuestOS('FREEBSD_64'),
GuestOS('FREEBSD_11'),
GuestOS('FREEBSD_12'),
GuestOS('FREEBSD_13'),
GuestOS('FREEBSD_11_64'),
GuestOS('FREEBSD_12_64'),
GuestOS('FREEBSD_13_64'),
GuestOS('REDHAT'),
GuestOS('RHEL_2'),
GuestOS('RHEL_3'),
GuestOS('RHEL_3_64'),
GuestOS('RHEL_4'),
GuestOS('RHEL_4_64'),
GuestOS('RHEL_5'),
GuestOS('RHEL_5_64'),
GuestOS('RHEL_6'),
GuestOS('RHEL_6_64'),
GuestOS('RHEL_7'),
GuestOS('RHEL_7_64'),
GuestOS('RHEL_8_64'),
GuestOS('RHEL_9_64'),
GuestOS('CENTOS'),
GuestOS('CENTOS_64'),
GuestOS('CENTOS_6'),
GuestOS('CENTOS_6_64'),
GuestOS('CENTOS_7'),
GuestOS('CENTOS_7_64'),
GuestOS('CENTOS_8_64'),
GuestOS('CENTOS_9_64'),
GuestOS('ORACLE_LINUX'),
GuestOS('ORACLE_LINUX_64'),
GuestOS('ORACLE_LINUX_6'),
GuestOS('ORACLE_LINUX_6_64'),
GuestOS('ORACLE_LINUX_7'),
GuestOS('ORACLE_LINUX_7_64'),
GuestOS('ORACLE_LINUX_8_64'),
GuestOS('ORACLE_LINUX_9_64'),
GuestOS('SUSE'),
GuestOS('SUSE_64'),
GuestOS('SLES'),
GuestOS('SLES_64'),
GuestOS('SLES_10'),
GuestOS('SLES_10_64'),
GuestOS('SLES_11'),
GuestOS('SLES_11_64'),
GuestOS('SLES_12'),
GuestOS('SLES_12_64'),
GuestOS('SLES_15_64'),
GuestOS('SLES_16_64'),
GuestOS('NLD_9'),
GuestOS('OES'),
GuestOS('SJDS'),
GuestOS('MANDRAKE'),
GuestOS('MANDRIVA'),
GuestOS('MANDRIVA_64'),
GuestOS('TURBO_LINUX'),
GuestOS('TURBO_LINUX_64'),
GuestOS('UBUNTU'),
GuestOS('UBUNTU_64'),
GuestOS('DEBIAN_4'),
GuestOS('DEBIAN_4_64'),
GuestOS('DEBIAN_5'),
GuestOS('DEBIAN_5_64'),
GuestOS('DEBIAN_6'),
GuestOS('DEBIAN_6_64'),
GuestOS('DEBIAN_7'),
GuestOS('DEBIAN_7_64'),
GuestOS('DEBIAN_8'),
GuestOS('DEBIAN_8_64'),
GuestOS('DEBIAN_9'),
GuestOS('DEBIAN_9_64'),
GuestOS('DEBIAN_10'),
GuestOS('DEBIAN_10_64'),
GuestOS('DEBIAN_11'),
GuestOS('DEBIAN_11_64'),
GuestOS('ASIANUX_3'),
GuestOS('ASIANUX_3_64'),
GuestOS('ASIANUX_4'),
GuestOS('ASIANUX_4_64'),
GuestOS('ASIANUX_5_64'),
GuestOS('ASIANUX_7_64'),
GuestOS('ASIANUX_8_64'),
GuestOS('ASIANUX_9_64'),
GuestOS('OPENSUSE'),
GuestOS('OPENSUSE_64'),
GuestOS('FEDORA'),
GuestOS('FEDORA_64'),
GuestOS('COREOS_64'),
GuestOS('VMWARE_PHOTON_64'),
GuestOS('OTHER_24X_LINUX'),
GuestOS('OTHER_24X_LINUX_64'),
GuestOS('OTHER_26X_LINUX'),
GuestOS('OTHER_26X_LINUX_64'),
GuestOS('OTHER_3X_LINUX'),
GuestOS('OTHER_3X_LINUX_64'),
GuestOS('OTHER_4X_LINUX'),
GuestOS('OTHER_4X_LINUX_64'),
GuestOS('OTHER_5X_LINUX'),
GuestOS('OTHER_5X_LINUX_64'),
GuestOS('OTHER_LINUX'),
GuestOS('GENERIC_LINUX'),
GuestOS('OTHER_LINUX_64'),
GuestOS('SOLARIS_6'),
GuestOS('SOLARIS_7'),
GuestOS('SOLARIS_8'),
GuestOS('SOLARIS_9'),
GuestOS('SOLARIS_10'),
GuestOS('SOLARIS_10_64'),
GuestOS('SOLARIS_11_64'),
GuestOS('OS2'),
GuestOS('ECOMSTATION'),
GuestOS('ECOMSTATION_2'),
GuestOS('NETWARE_4'),
GuestOS('NETWARE_5'),
GuestOS('NETWARE_6'),
GuestOS('OPENSERVER_5'),
GuestOS('OPENSERVER_6'),
GuestOS('UNIXWARE_7'),
GuestOS('DARWIN'),
GuestOS('DARWIN_64'),
GuestOS('DARWIN_10'),
GuestOS('DARWIN_10_64'),
GuestOS('DARWIN_11'),
GuestOS('DARWIN_11_64'),
GuestOS('DARWIN_12_64'),
GuestOS('DARWIN_13_64'),
GuestOS('DARWIN_14_64'),
GuestOS('DARWIN_15_64'),
GuestOS('DARWIN_16_64'),
GuestOS('DARWIN_17_64'),
GuestOS('DARWIN_18_64'),
GuestOS('DARWIN_19_64'),
GuestOS('DARWIN_20_64'),
GuestOS('DARWIN_21_64'),
GuestOS('VMKERNEL'),
GuestOS('VMKERNEL_5'),
GuestOS('VMKERNEL_6'),
GuestOS('VMKERNEL_65'),
GuestOS('VMKERNEL_7'),
GuestOS('AMAZONLINUX2_64'),
GuestOS('AMAZONLINUX3_64'),
GuestOS('CRXPOD_1'),
GuestOS('OTHER'),
GuestOS('OTHER_64'),
])
GuestOS._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.guest_OS',
GuestOS))
class GuestOSFamily(Enum):
"""
The ``GuestOSFamily`` class defines the valid guest operating system family
types reported by a virtual machine. This enumeration was added in vSphere
API 6.7.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
WINDOWS = None
"""
Windows operating system. This class attribute was added in vSphere API
6.7.
"""
LINUX = None
"""
Linux operating system. This class attribute was added in vSphere API 6.7.
"""
NETWARE = None
"""
Novell Netware. This class attribute was added in vSphere API 6.7.
"""
SOLARIS = None
"""
Solaris operating system. This class attribute was added in vSphere API
6.7.
"""
DARWIN = None
"""
Mac OS operating system. This class attribute was added in vSphere API 6.7.
"""
OTHER = None
"""
Other operating systems. This class attribute was added in vSphere API 6.7.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`GuestOSFamily` instance.
"""
Enum.__init__(string)
GuestOSFamily._set_values([
GuestOSFamily('WINDOWS'),
GuestOSFamily('LINUX'),
GuestOSFamily('NETWARE'),
GuestOSFamily('SOLARIS'),
GuestOSFamily('DARWIN'),
GuestOSFamily('OTHER'),
])
GuestOSFamily._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.guest_OS_family',
GuestOSFamily))
class Hardware(VapiInterface):
"""
The ``Hardware`` class provides methods for configuring the virtual
hardware of a virtual machine.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.vm.hardware'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _HardwareStub)
self._VAPI_OPERATION_IDS = {}
class Version(Enum):
"""
The ``Hardware.Version`` class defines the valid virtual hardware versions
for a virtual machine. See https://kb.vmware.com/s/article/1003746 (Virtual
machine hardware versions (1003746)).
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
VMX_03 = None
"""
Hardware version 3, first supported in ESXi 2.5.
"""
VMX_04 = None
"""
Hardware version 4, first supported in ESXi 3.0.
"""
VMX_06 = None
"""
Hardware version 6, first supported in WS 6.0.
"""
VMX_07 = None
"""
Hardware version 7, first supported in ESXi 4.0.
"""
VMX_08 = None
"""
Hardware version 8, first supported in ESXi 5.0.
"""
VMX_09 = None
"""
Hardware version 9, first supported in ESXi 5.1.
"""
VMX_10 = None
"""
Hardware version 10, first supported in ESXi 5.5.
"""
VMX_11 = None
"""
Hardware version 11, first supported in ESXi 6.0.
"""
VMX_12 = None
"""
Hardware version 12, first supported in Workstation 12.0.
"""
VMX_13 = None
"""
Hardware version 13, first supported in ESXi 6.5.
"""
VMX_14 = None
"""
Hardware version 14, first supported in ESXi 6.7. This class attribute was
added in vSphere API 6.7.
"""
VMX_15 = None
"""
Hardware version 15, first supported in ESXi 6.7 Update 2. This class
attribute was added in vSphere API 6.7.2.
"""
VMX_16 = None
"""
Hardware version 16, first supported in Workstation 15.0. This class
attribute was added in vSphere API 7.0.0.0.
"""
VMX_17 = None
"""
Hardware version 17, first supported in ESXi 7.0.0-0. This class attribute
was added in vSphere API 7.0.0.0.
"""
VMX_18 = None
"""
Hardware version 18, first supported in ESXi 7.0 U1. This class attribute
was added in vSphere API 7.0.1.0.
"""
VMX_19 = None
"""
Hardware version 19, first supported in ESXi 7.0 U2. This class attribute
was added in vSphere API 7.0.2.0.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`Version` instance.
"""
Enum.__init__(string)
Version._set_values([
Version('VMX_03'),
Version('VMX_04'),
Version('VMX_06'),
Version('VMX_07'),
Version('VMX_08'),
Version('VMX_09'),
Version('VMX_10'),
Version('VMX_11'),
Version('VMX_12'),
Version('VMX_13'),
Version('VMX_14'),
Version('VMX_15'),
Version('VMX_16'),
Version('VMX_17'),
Version('VMX_18'),
Version('VMX_19'),
])
Version._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.hardware.version',
Version))
class UpgradePolicy(Enum):
"""
The ``Hardware.UpgradePolicy`` class defines the valid virtual hardware
upgrade policies for a virtual machine.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
NEVER = None
"""
Do not upgrade the virtual machine when it is powered on.
"""
AFTER_CLEAN_SHUTDOWN = None
"""
Run scheduled upgrade when the virtual machine is powered on after a clean
shutdown of the guest operating system.
"""
ALWAYS = None
"""
Run scheduled upgrade when the virtual machine is powered on.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`UpgradePolicy` instance.
"""
Enum.__init__(string)
UpgradePolicy._set_values([
UpgradePolicy('NEVER'),
UpgradePolicy('AFTER_CLEAN_SHUTDOWN'),
UpgradePolicy('ALWAYS'),
])
UpgradePolicy._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.hardware.upgrade_policy',
UpgradePolicy))
class UpgradeStatus(Enum):
"""
The ``Hardware.UpgradeStatus`` class defines the valid virtual hardware
upgrade statuses for a virtual machine.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
NONE = None
"""
No scheduled upgrade has been attempted.
"""
PENDING = None
"""
Upgrade is scheduled but has not yet been run.
"""
SUCCESS = None
"""
The most recent scheduled upgrade was successful.
"""
FAILED = None
"""
The most recent scheduled upgrade was not successful.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`UpgradeStatus` instance.
"""
Enum.__init__(string)
UpgradeStatus._set_values([
UpgradeStatus('NONE'),
UpgradeStatus('PENDING'),
UpgradeStatus('SUCCESS'),
UpgradeStatus('FAILED'),
])
UpgradeStatus._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.hardware.upgrade_status',
UpgradeStatus))
class Info(VapiStruct):
"""
The ``Hardware.Info`` class contains information related to the virtual
hardware of a virtual machine.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'upgrade_policy',
{
'AFTER_CLEAN_SHUTDOWN' : [('upgrade_version', True)],
'ALWAYS' : [('upgrade_version', True)],
'NEVER' : [],
}
),
UnionValidator(
'upgrade_status',
{
'FAILED' : [('upgrade_error', True)],
'NONE' : [],
'PENDING' : [],
'SUCCESS' : [],
}
),
]
def __init__(self,
version=None,
upgrade_policy=None,
upgrade_version=None,
upgrade_status=None,
upgrade_error=None,
):
"""
:type version: :class:`Hardware.Version`
:param version: Virtual hardware version.
:type upgrade_policy: :class:`Hardware.UpgradePolicy`
:param upgrade_policy: Scheduled upgrade policy.
:type upgrade_version: :class:`Hardware.Version`
:param upgrade_version: Target hardware version to be used on the next scheduled virtual
hardware upgrade.
This attribute is optional and it is only relevant when the value
of ``upgradePolicy`` is one of
:attr:`Hardware.UpgradePolicy.AFTER_CLEAN_SHUTDOWN` or
:attr:`Hardware.UpgradePolicy.ALWAYS`.
:type upgrade_status: :class:`Hardware.UpgradeStatus`
:param upgrade_status: Scheduled upgrade status.
:type upgrade_error: :class:`Exception`
:param upgrade_error: Reason for the scheduled upgrade failure.
This attribute is optional and it is only relevant when the value
of ``upgradeStatus`` is :attr:`Hardware.UpgradeStatus.FAILED`.
"""
self.version = version
self.upgrade_policy = upgrade_policy
self.upgrade_version = upgrade_version
self.upgrade_status = upgrade_status
self.upgrade_error = upgrade_error
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.hardware.info', {
'version': type.ReferenceType(__name__, 'Hardware.Version'),
'upgrade_policy': type.ReferenceType(__name__, 'Hardware.UpgradePolicy'),
'upgrade_version': type.OptionalType(type.ReferenceType(__name__, 'Hardware.Version')),
'upgrade_status': type.ReferenceType(__name__, 'Hardware.UpgradeStatus'),
'upgrade_error': type.OptionalType(type.AnyErrorType()),
},
Info,
False,
None))
class UpdateSpec(VapiStruct):
"""
The ``Hardware.UpdateSpec`` class describes the updates to virtual hardware
settings of a virtual machine.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'upgrade_policy',
{
'AFTER_CLEAN_SHUTDOWN' : [('upgrade_version', False)],
'ALWAYS' : [('upgrade_version', False)],
'NEVER' : [],
}
),
]
def __init__(self,
upgrade_policy=None,
upgrade_version=None,
):
"""
:type upgrade_policy: :class:`Hardware.UpgradePolicy` or ``None``
:param upgrade_policy: Scheduled upgrade policy.
If set to :attr:`Hardware.UpgradePolicy.NEVER`, the
:attr:`Hardware.Info.upgrade_version` attribute will be reset to
None.
If None, the value is unchanged.
:type upgrade_version: :class:`Hardware.Version` or ``None``
:param upgrade_version: Target hardware version to be used on the next scheduled virtual
hardware upgrade.
If specified, this attribute must represent a newer virtual
hardware version than the current virtual hardware version reported
in :attr:`Hardware.Info.version`.
If :attr:`Hardware.UpdateSpec.upgrade_policy` is set to
:attr:`Hardware.UpgradePolicy.NEVER`, this attribute must be None.
Otherwise, if this attribute is None, default to the most recent
virtual hardware version supported by the server.
"""
self.upgrade_policy = upgrade_policy
self.upgrade_version = upgrade_version
VapiStruct.__init__(self)
UpdateSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.hardware.update_spec', {
'upgrade_policy': type.OptionalType(type.ReferenceType(__name__, 'Hardware.UpgradePolicy')),
'upgrade_version': type.OptionalType(type.ReferenceType(__name__, 'Hardware.Version')),
},
UpdateSpec,
False,
None))
def get(self,
vm,
):
"""
Returns the virtual hardware settings of a virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:rtype: :class:`Hardware.Info`
:return: Virtual hardware settings of the virtual machine.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInaccessible`
if the virtual machine's configuration state cannot be accessed.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
"""
return self._invoke('get',
{
'vm': vm,
})
def update(self,
vm,
spec,
):
"""
Updates the virtual hardware settings of a virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:type spec: :class:`Hardware.UpdateSpec`
:param spec: Specification for updating the virtual hardware settings of the
virtual machine.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the virtual machine is already configured for the desired
hardware version.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if the requested virtual hardware version is not newer than the
current version.
:raise: :class:`com.vmware.vapi.std.errors_client.Unsupported`
if the requested virtual hardware version is not supported by the
server.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceBusy`
if the virtual machine is busy performing another operation.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInaccessible`
if the virtual machine's configuration state cannot be accessed.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
"""
return self._invoke('update',
{
'vm': vm,
'spec': spec,
})
def upgrade(self,
vm,
version=None,
):
"""
Upgrades the virtual machine to a newer virtual hardware version.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:type version: :class:`Hardware.Version` or ``None``
:param version: New virtual machine version.
If None, defaults to the most recent virtual hardware version
supported by the server.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the virtual machine is not powered off.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the virtual machine is already configured for the desired
hardware version.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if ``version`` is older than the current virtual hardware version.
:raise: :class:`com.vmware.vapi.std.errors_client.Unsupported`
if ``version`` is not supported by the server.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceBusy`
if the virtual machine is busy performing another operation.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInaccessible`
if the virtual machine's configuration state cannot be accessed.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
"""
return self._invoke('upgrade',
{
'vm': vm,
'version': version,
})
class Identity(VapiInterface):
"""
The ``Identity`` class provides methods for managing the identity of a
virtual machine. This class was added in vSphere API 6.7.1.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.vm.identity'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _IdentityStub)
self._VAPI_OPERATION_IDS = {}
class Info(VapiStruct):
"""
The ``Identity.Info`` class contains information about the identity of a
virtual machine. This class was added in vSphere API 6.7.1.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
name=None,
bios_uuid=None,
instance_uuid=None,
):
"""
:type name: :class:`str`
:param name: Virtual machine name. This attribute was added in vSphere API
6.7.1.
:type bios_uuid: :class:`str`
:param bios_uuid: 128-bit SMBIOS UUID of a virtual machine represented as a
hexadecimal string in "12345678-abcd-1234-cdef-123456789abc"
format. This attribute was added in vSphere API 6.7.1.
:type instance_uuid: :class:`str`
:param instance_uuid: VirtualCenter-specific 128-bit UUID of a virtual machine,
represented as a hexademical string. This identifier is used by
VirtualCenter to uniquely identify all virtual machine instances,
including those that may share the same SMBIOS UUID. This attribute
was added in vSphere API 6.7.1.
"""
self.name = name
self.bios_uuid = bios_uuid
self.instance_uuid = instance_uuid
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.identity.info', {
'name': type.StringType(),
'bios_uuid': type.StringType(),
'instance_uuid': type.StringType(),
},
Info,
False,
None))
class LibraryItem(VapiInterface):
"""
The ``LibraryItem`` class provides methods to identify virtual machines
managed by Content Library. This class was added in vSphere API 6.9.1.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.vm.library_item'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _LibraryItemStub)
self._VAPI_OPERATION_IDS = {}
class Info(VapiStruct):
"""
The ``LibraryItem.Info`` class contains information about the library item
associated with a virtual machine. This class was added in vSphere API
6.9.1.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
check_out=None,
):
"""
:type check_out: :class:`LibraryItem.CheckOutInfo` or ``None``
:param check_out: Information about the checked out virtual machine. This attribute
was added in vSphere API 6.9.1.
If None, the virtual machine is not checked out from a library
item.
"""
self.check_out = check_out
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.library_item.info', {
'check_out': type.OptionalType(type.ReferenceType(__name__, 'LibraryItem.CheckOutInfo')),
},
Info,
False,
None))
class CheckOutInfo(VapiStruct):
"""
The ``LibraryItem.CheckOutInfo`` class contains information about a virtual
machine checked out of a content library item. This class was added in
vSphere API 6.9.1.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
library_item=None,
):
"""
:type library_item: :class:`str`
:param library_item: Identifier of the library item that the virtual machine is checked
out from. This attribute was added in vSphere API 6.9.1.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.library.Item``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.content.library.Item``.
"""
self.library_item = library_item
VapiStruct.__init__(self)
CheckOutInfo._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.library_item.check_out_info', {
'library_item': type.IdType(resource_types='com.vmware.content.library.Item'),
},
CheckOutInfo,
False,
None))
def get(self,
vm,
):
"""
Returns the information about the library item associated with the
virtual machine. This method was added in vSphere API 6.9.1.
:type vm: :class:`str`
:param vm: Identifier of the virtual machine.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:rtype: :class:`LibraryItem.Info`
:return: Information about the library item associated with the virtual
machine.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user that requested the method cannot be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user that requested the method is not authorized to perform
the method.
"""
return self._invoke('get',
{
'vm': vm,
})
class Power(VapiInterface):
"""
The ``Power`` class provides methods for managing the power state of a
virtual machine.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.vm.power'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _PowerStub)
self._VAPI_OPERATION_IDS = {}
class State(Enum):
"""
The ``Power.State`` class defines the valid power states for a virtual
machine.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
POWERED_OFF = None
"""
The virtual machine is powered off.
"""
POWERED_ON = None
"""
The virtual machine is powered on.
"""
SUSPENDED = None
"""
The virtual machine is suspended.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`State` instance.
"""
Enum.__init__(string)
State._set_values([
State('POWERED_OFF'),
State('POWERED_ON'),
State('SUSPENDED'),
])
State._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.power.state',
State))
class Info(VapiStruct):
"""
The ``Power.Info`` class contains information about the power state of a
virtual machine.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'state',
{
'POWERED_OFF' : [('clean_power_off', True)],
'POWERED_ON' : [],
'SUSPENDED' : [],
}
),
]
def __init__(self,
state=None,
clean_power_off=None,
):
"""
:type state: :class:`Power.State`
:param state: Power state of the virtual machine.
:type clean_power_off: :class:`bool`
:param clean_power_off: Flag indicating whether the virtual machine was powered off
cleanly. This attribute may be used to detect that the virtual
machine crashed unexpectedly and should be restarted.
This attribute is optional and it is only relevant when the value
of ``state`` is :attr:`Power.State.POWERED_OFF`.
"""
self.state = state
self.clean_power_off = clean_power_off
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.power.info', {
'state': type.ReferenceType(__name__, 'Power.State'),
'clean_power_off': type.OptionalType(type.BooleanType()),
},
Info,
False,
None))
def get(self,
vm,
):
"""
Returns the power state information of a virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:rtype: :class:`Power.Info`
:return: Power state information for the specified virtual machine.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInaccessible`
if the virtual machine's configuration or execution state cannot be
accessed.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``VirtualMachine`` referenced by the parameter
``vm`` requires ``System.Read``.
"""
return self._invoke('get',
{
'vm': vm,
})
def start(self,
vm,
):
"""
Powers on a powered-off or suspended virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the virtual machine is already powered on.
:raise: :class:`com.vmware.vapi.std.errors_client.Unsupported`
if the virtual machine does not support being powered on (e.g.
marked as a template, serving as a fault-tolerance secondary
virtual machine).
:raise: :class:`com.vmware.vapi.std.errors_client.UnableToAllocateResource`
if resources cannot be allocated for the virtual machine (e.g.
physical resource allocation policy cannot be satisfied,
insufficient licenses are available to run the virtual machine).
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInaccessible`
if resources required by the virtual machine are not accessible
(e.g. virtual machine configuration files or virtual disks are on
inaccessible storage, no hosts are available to run the virtual
machine).
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInUse`
if resources required by the virtual machine are in use (e.g.
virtual machine configuration files or virtual disks are locked,
host containing the virtual machine is an HA failover host).
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceBusy`
if the virtual machine is performing another operation.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``VirtualMachine`` referenced by the parameter
``vm`` requires ``VirtualMachine.Interact.PowerOn``.
"""
return self._invoke('start',
{
'vm': vm,
})
def stop(self,
vm,
):
"""
Powers off a powered-on or suspended virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the virtual machine is already powered off.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceBusy`
if the virtual machine is performing another operation.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``VirtualMachine`` referenced by the parameter
``vm`` requires ``VirtualMachine.Interact.PowerOff``.
"""
return self._invoke('stop',
{
'vm': vm,
})
def suspend(self,
vm,
):
"""
Suspends a powered-on virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the virtual machine is already suspended.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the virtual machine is powered off.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceBusy`
if the virtual machine is performing another operation.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``VirtualMachine`` referenced by the parameter
``vm`` requires ``VirtualMachine.Interact.Suspend``.
"""
return self._invoke('suspend',
{
'vm': vm,
})
def reset(self,
vm,
):
"""
Resets a powered-on virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the virtual machine is powered off or suspended.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceBusy`
if the virtual machine is performing another operation
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``VirtualMachine`` referenced by the parameter
``vm`` requires ``VirtualMachine.Interact.Reset``.
"""
return self._invoke('reset',
{
'vm': vm,
})
class Tools(VapiInterface):
"""
The ``Tools`` class provides methods for managing VMware Tools in the guest
operating system. This class was added in vSphere API 7.0.0.0.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.vm.tools'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ToolsStub)
self._VAPI_OPERATION_IDS = {}
class RunState(Enum):
"""
Current run state of VMware Tools in the guest operating system. This
enumeration was added in vSphere API 7.0.0.0.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
NOT_RUNNING = None
"""
VMware Tools is not running. This class attribute was added in vSphere API
7.0.0.0.
"""
RUNNING = None
"""
VMware Tools is running. This class attribute was added in vSphere API
7.0.0.0.
"""
EXECUTING_SCRIPTS = None
"""
VMware Tools is running scripts as part of a state transition. This class
attribute was added in vSphere API 7.0.0.0.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`RunState` instance.
"""
Enum.__init__(string)
RunState._set_values([
RunState('NOT_RUNNING'),
RunState('RUNNING'),
RunState('EXECUTING_SCRIPTS'),
])
RunState._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.tools.run_state',
RunState))
class UpgradePolicy(Enum):
"""
The ``Tools.UpgradePolicy`` class defines when Tools are auto-upgraded for
a virtual machine. This enumeration was added in vSphere API 7.0.0.0.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
MANUAL = None
"""
No auto-upgrades for Tools will be performed for this virtual machine.
Users must manually invoke the :func:`Tools.upgrade` method to update
Tools. This class attribute was added in vSphere API 7.0.0.0.
"""
UPGRADE_AT_POWER_CYCLE = None
"""
When the virtual machine is power-cycled, the system checks for a newer
version of Tools when the virtual machine is powered on. If it is
available, a Tools upgrade is automatically performed on the virtual
machine and it is rebooted if necessary. This class attribute was added in
vSphere API 7.0.0.0.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`UpgradePolicy` instance.
"""
Enum.__init__(string)
UpgradePolicy._set_values([
UpgradePolicy('MANUAL'),
UpgradePolicy('UPGRADE_AT_POWER_CYCLE'),
])
UpgradePolicy._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.tools.upgrade_policy',
UpgradePolicy))
class VersionStatus(Enum):
"""
The ``Tools.VersionStatus`` class defines the version status types of
VMware Tools installed in the guest operating system. This enumeration was
added in vSphere API 7.0.0.0.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
NOT_INSTALLED = None
"""
VMware Tools has never been installed. This class attribute was added in
vSphere API 7.0.0.0.
"""
CURRENT = None
"""
VMware Tools is installed, and the version is current. This class attribute
was added in vSphere API 7.0.0.0.
"""
UNMANAGED = None
"""
VMware Tools is installed, but it is not managed by VMware. This includes
open-vm-tools or OSPs which should be managed by the guest operating
system. This class attribute was added in vSphere API 7.0.0.0.
"""
TOO_OLD_UNSUPPORTED = None
"""
VMware Tools is installed, but the version is too old. This class attribute
was added in vSphere API 7.0.0.0.
"""
SUPPORTED_OLD = None
"""
VMware Tools is installed, supported, but a newer version is available.
This class attribute was added in vSphere API 7.0.0.0.
"""
SUPPORTED_NEW = None
"""
VMware Tools is installed, supported, and newer than the version available
on the host. This class attribute was added in vSphere API 7.0.0.0.
"""
TOO_NEW = None
"""
VMware Tools is installed, and the version is known to be too new to work
correctly with this virtual machine. This class attribute was added in
vSphere API 7.0.0.0.
"""
BLACKLISTED = None
"""
VMware Tools is installed, but the installed version is known to have a
grave bug and should be immediately upgraded. This class attribute was
added in vSphere API 7.0.0.0.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`VersionStatus` instance.
"""
Enum.__init__(string)
VersionStatus._set_values([
VersionStatus('NOT_INSTALLED'),
VersionStatus('CURRENT'),
VersionStatus('UNMANAGED'),
VersionStatus('TOO_OLD_UNSUPPORTED'),
VersionStatus('SUPPORTED_OLD'),
VersionStatus('SUPPORTED_NEW'),
VersionStatus('TOO_NEW'),
VersionStatus('BLACKLISTED'),
])
VersionStatus._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.tools.version_status',
VersionStatus))
class ToolsInstallType(Enum):
"""
The ``Tools.ToolsInstallType`` class defines the installation type of the
Tools in the guest operating system. This enumeration was added in vSphere
API 7.0.0.0.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
UNKNOWN = None
"""
Installation type is not known. Most likely tools have been installed by
OSPs or open-vm-tools, but a version that does not report its install type
or an install type that we do not recognize. This class attribute was added
in vSphere API 7.0.0.0.
"""
MSI = None
"""
MSI is the installation type used for VMware Tools on Windows. This class
attribute was added in vSphere API 7.0.0.0.
"""
TAR = None
"""
Tools have been installed by the tar installer. This class attribute was
added in vSphere API 7.0.0.0.
"""
OSP = None
"""
OSPs are RPM or Debian packages tailored for the OS in the VM. See
http://packages.vmware.com. This class attribute was added in vSphere API
7.0.0.0.
"""
OPEN_VM_TOOLS = None
"""
open-vm-tools are the open-source version of VMware Tools, may have been
packaged by the OS vendor. This class attribute was added in vSphere API
7.0.0.0.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`ToolsInstallType` instance.
"""
Enum.__init__(string)
ToolsInstallType._set_values([
ToolsInstallType('UNKNOWN'),
ToolsInstallType('MSI'),
ToolsInstallType('TAR'),
ToolsInstallType('OSP'),
ToolsInstallType('OPEN_VM_TOOLS'),
])
ToolsInstallType._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.tools.tools_install_type',
ToolsInstallType))
class Info(VapiStruct):
"""
The ``Tools.Info`` class describes the VMWare Tools properties of a virtual
machine. This class was added in vSphere API 7.0.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
auto_update_supported=None,
install_attempt_count=None,
error=None,
version_number=None,
version=None,
upgrade_policy=None,
version_status=None,
install_type=None,
run_state=None,
):
"""
:type auto_update_supported: :class:`bool`
:param auto_update_supported: Set if the virtual machine supports auto-upgrading Tools via
:class:`Tools.UpgradePolicy`. This attribute was added in vSphere
API 7.0.0.0.
:type install_attempt_count: :class:`long` or ``None``
:param install_attempt_count: Number of attempts that have been made to install or upgrade the
version of Tools installed on this virtual machine. This attribute
was added in vSphere API 7.0.0.0.
This attribute will be None if there have been no Tools install or
upgrade attempt.
:type error: :class:`Exception` or ``None``
:param error: Error that happened, if any, during last attempt to upgrade or
install Tools. This attribute was added in vSphere API 7.0.0.0.
This attribute will be None if a the last Tools install or upgrade
attempt succeeded.
:type version_number: :class:`long` or ``None``
:param version_number: Version of VMware Tools installed on the guest operating system.
This attribute was added in vSphere API 7.0.0.0.
This attribute wil be None if VMWare Tools is not installed. This
is an integer constructed as follows: (((MJR) << 10) + ((MNR) << 5)
+ (REV)) Where MJR is tha major verson, MNR is the minor version
and REV is the revision. Tools version = T Tools Version Major =
MJR = (T / 1024) Tools Version Minor = MNR = ((T % 1024) / 32)
Tools Version Revision = BASE = ((T % 1024) % 32) Tools actual
version = MJR.MNR.REV
:type version: :class:`str` or ``None``
:param version: Version of VMware Tools installed on the guest operating system.
This is a human-readable value that should not be parsed. This
attribute was added in vSphere API 7.0.0.0.
This attribute wil be None if VMWare Tools is not installed.
:type upgrade_policy: :class:`Tools.UpgradePolicy`
:param upgrade_policy: Tools upgrade policy setting for the virtual machine.
:class:`Tools.UpgradePolicy`. This attribute was added in vSphere
API 7.0.0.0.
:type version_status: :class:`Tools.VersionStatus` or ``None``
:param version_status: Current version status of VMware Tools in the guest operating
system, if known. This attribute was added in vSphere API 7.0.0.0.
This attribute will be None if the version status is not known, for
example if VMware Tools is too old to report the information.
:type install_type: :class:`Tools.ToolsInstallType` or ``None``
:param install_type: Current installation type of VMware Tools in the guest operating
system. This attribute was added in vSphere API 7.0.0.0.
This attribute will be None if the installation type is not known,
for example if VMware Tools is too old to report the information.
:type run_state: :class:`Tools.RunState`
:param run_state: Current run state of VMware Tools in the guest operating system.
This attribute was added in vSphere API 7.0.0.0.
"""
self.auto_update_supported = auto_update_supported
self.install_attempt_count = install_attempt_count
self.error = error
self.version_number = version_number
self.version = version
self.upgrade_policy = upgrade_policy
self.version_status = version_status
self.install_type = install_type
self.run_state = run_state
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.tools.info', {
'auto_update_supported': type.BooleanType(),
'install_attempt_count': type.OptionalType(type.IntegerType()),
'error': type.OptionalType(type.AnyErrorType()),
'version_number': type.OptionalType(type.IntegerType()),
'version': type.OptionalType(type.StringType()),
'upgrade_policy': type.ReferenceType(__name__, 'Tools.UpgradePolicy'),
'version_status': type.OptionalType(type.ReferenceType(__name__, 'Tools.VersionStatus')),
'install_type': type.OptionalType(type.ReferenceType(__name__, 'Tools.ToolsInstallType')),
'run_state': type.ReferenceType(__name__, 'Tools.RunState'),
},
Info,
False,
None))
class UpdateSpec(VapiStruct):
"""
The (\\\\@name UpdateSpec} class describes the VMware Tools properties of a
virtual machine that can be updated. This class was added in vSphere API
7.0.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
upgrade_policy=None,
):
"""
:type upgrade_policy: :class:`Tools.UpgradePolicy` or ``None``
:param upgrade_policy: Tools upgrade policy setting for the virtual machine.
:class:`Tools.UpgradePolicy`. This attribute was added in vSphere
API 7.0.0.0.
If None the upgrade policy will not be modified.
"""
self.upgrade_policy = upgrade_policy
VapiStruct.__init__(self)
UpdateSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.tools.update_spec', {
'upgrade_policy': type.OptionalType(type.ReferenceType(__name__, 'Tools.UpgradePolicy')),
},
UpdateSpec,
False,
None))
def get(self,
vm,
):
"""
Get the properties of VMware Tools. This method was added in vSphere
API 7.0.0.0.
:type vm: :class:`str`
:param vm: Identifier of the virtual machine.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:rtype: :class:`Tools.Info`
:return: VMware Tools properties.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
"""
return self._invoke('get',
{
'vm': vm,
})
def update(self,
vm,
spec,
):
"""
Update the properties of VMware Tools. This method was added in vSphere
API 7.0.0.0.
:type vm: :class:`str`
:param vm: Identifier of the virtual machine.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:type spec: :class:`Tools.UpdateSpec`
:param spec: The new values.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if the :attr:`Tools.UpdateSpec.upgrade_policy` attribute contains a
value that is not supported by the server.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
"""
return self._invoke('update',
{
'vm': vm,
'spec': spec,
})
def upgrade(self,
vm,
command_line_options=None,
):
"""
Begins the Tools upgrade process. To monitor the status of the Tools
upgrade, clients should check the Tools status by calling
:func:`Tools.get` and examining ``versionStatus`` and ``runState``.
This method was added in vSphere API 7.0.0.0.
:type vm: :class:`str`
:param vm: Identifier of the virtual machine.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:type command_line_options: :class:`str` or ``None``
:param command_line_options: Command line options passed to the installer to modify the
installation procedure for Tools.
Set if any additional options are desired.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the VMware Tools are not running.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the virtual machine is not powered on.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
is an upgrade is already in progress.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the upgrade process fails inside the guest operating system.
"""
return self._invoke('upgrade',
{
'vm': vm,
'command_line_options': command_line_options,
})
class _HardwareStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.resource_inaccessible':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInaccessible'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vcenter/vm/{vm}/hardware',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
'spec': type.ReferenceType(__name__, 'Hardware.UpdateSpec'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.unsupported':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unsupported'),
'com.vmware.vapi.std.errors.resource_busy':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceBusy'),
'com.vmware.vapi.std.errors.resource_inaccessible':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInaccessible'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/vcenter/vm/{vm}/hardware',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for upgrade operation
upgrade_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
'version': type.OptionalType(type.ReferenceType(__name__, 'Hardware.Version')),
})
upgrade_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.unsupported':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unsupported'),
'com.vmware.vapi.std.errors.resource_busy':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceBusy'),
'com.vmware.vapi.std.errors.resource_inaccessible':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInaccessible'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
upgrade_input_value_validator_list = [
]
upgrade_output_validator_list = [
]
upgrade_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/vcenter/vm/{vm}/hardware/action/upgrade',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'Hardware.Info'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.VoidType(),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
'upgrade': {
'input_type': upgrade_input_type,
'output_type': type.VoidType(),
'errors': upgrade_error_dict,
'input_value_validator_list': upgrade_input_value_validator_list,
'output_validator_list': upgrade_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
'update': update_rest_metadata,
'upgrade': upgrade_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.vm.hardware',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _IdentityStub(ApiInterfaceStub):
def __init__(self, config):
operations = {
}
rest_metadata = {
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.vm.identity',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _LibraryItemStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vcenter/vm/{vm}/library-item',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'LibraryItem.Info'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.vm.library_item',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _PowerStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.resource_inaccessible':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInaccessible'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vcenter/vm/{vm}/power',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for start operation
start_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
start_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.unsupported':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unsupported'),
'com.vmware.vapi.std.errors.unable_to_allocate_resource':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'UnableToAllocateResource'),
'com.vmware.vapi.std.errors.resource_inaccessible':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInaccessible'),
'com.vmware.vapi.std.errors.resource_in_use':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInUse'),
'com.vmware.vapi.std.errors.resource_busy':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceBusy'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
start_input_value_validator_list = [
]
start_output_validator_list = [
]
start_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/vcenter/vm/{vm}/power/start',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for stop operation
stop_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
stop_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.resource_busy':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceBusy'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
stop_input_value_validator_list = [
]
stop_output_validator_list = [
]
stop_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/vcenter/vm/{vm}/power/stop',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for suspend operation
suspend_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
suspend_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.resource_busy':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceBusy'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
suspend_input_value_validator_list = [
]
suspend_output_validator_list = [
]
suspend_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/vcenter/vm/{vm}/power/suspend',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for reset operation
reset_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
reset_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.resource_busy':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceBusy'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
reset_input_value_validator_list = [
]
reset_output_validator_list = [
]
reset_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/vcenter/vm/{vm}/power/reset',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'Power.Info'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'start': {
'input_type': start_input_type,
'output_type': type.VoidType(),
'errors': start_error_dict,
'input_value_validator_list': start_input_value_validator_list,
'output_validator_list': start_output_validator_list,
'task_type': TaskType.NONE,
},
'stop': {
'input_type': stop_input_type,
'output_type': type.VoidType(),
'errors': stop_error_dict,
'input_value_validator_list': stop_input_value_validator_list,
'output_validator_list': stop_output_validator_list,
'task_type': TaskType.NONE,
},
'suspend': {
'input_type': suspend_input_type,
'output_type': type.VoidType(),
'errors': suspend_error_dict,
'input_value_validator_list': suspend_input_value_validator_list,
'output_validator_list': suspend_output_validator_list,
'task_type': TaskType.NONE,
},
'reset': {
'input_type': reset_input_type,
'output_type': type.VoidType(),
'errors': reset_error_dict,
'input_value_validator_list': reset_input_value_validator_list,
'output_validator_list': reset_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
'start': start_rest_metadata,
'stop': stop_rest_metadata,
'suspend': suspend_rest_metadata,
'reset': reset_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.vm.power',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _ToolsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vcenter/vm/{vm}/tools',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
'spec': type.ReferenceType(__name__, 'Tools.UpdateSpec'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/vcenter/vm/{vm}/tools',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for upgrade operation
upgrade_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
'command_line_options': type.OptionalType(type.StringType()),
})
upgrade_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
}
upgrade_input_value_validator_list = [
]
upgrade_output_validator_list = [
]
upgrade_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/vcenter/vm/{vm}/tools',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'Tools.Info'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.VoidType(),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
'upgrade': {
'input_type': upgrade_input_type,
'output_type': type.VoidType(),
'errors': upgrade_error_dict,
'input_value_validator_list': upgrade_input_value_validator_list,
'output_validator_list': upgrade_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
'update': update_rest_metadata,
'upgrade': upgrade_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.vm.tools',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class StubFactory(StubFactoryBase):
_attrs = {
'Hardware': Hardware,
'Identity': Identity,
'LibraryItem': LibraryItem,
'Power': Power,
'Tools': Tools,
'console': 'com.vmware.vcenter.vm.console_client.StubFactory',
'guest': 'com.vmware.vcenter.vm.guest_client.StubFactory',
'hardware': 'com.vmware.vcenter.vm.hardware_client.StubFactory',
'storage': 'com.vmware.vcenter.vm.storage_client.StubFactory',
'tools': 'com.vmware.vcenter.vm.tools_client.StubFactory',
}
| [
"[email protected]"
] | |
c5382963180478fd862fc67e67e37e67fa689e13 | f829d2c4347ce85ae6dd769f0aab2491d8ee4751 | /old/.history/a_20201125194051.py | f79c3e45da0a2e2a0076008652c3fd3694e249f9 | [
"LicenseRef-scancode-mulanpsl-2.0-en",
"MulanPSL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | pscly/bisai1 | 0ef18d4aa12541322947a5be250ef7f260c93276 | 257c02c8f23e373834a6275683470f3f081b7373 | refs/heads/18ti | 2023-01-21T22:02:15.582281 | 2020-11-29T09:30:33 | 2020-11-29T09:30:33 | 316,918,262 | 0 | 0 | NOASSERTION | 2020-11-29T12:33:33 | 2020-11-29T09:39:21 | Python | UTF-8 | Python | false | false | 969 | py | # for n in range(400,500):
# i = n // 100
# j = n // 10 % 10
# k = n % 10
# if n == i ** 3 + j ** 3 + k ** 3:
# print(n)
# 第一道题(16)
# input("请输入(第一次):")
# s1 = input("请输入(第二次):")
# l1 = s1.split(' ')
# l2 = []
# for i in l1:
# if i.isdigit():
# l2.append(int(i))
# for i in l2:
# if not (i % 6):
# print(i, end=" ")
# 第二道题(17)
out_l1 = []
def bian_int_list(l1):
re_l1 = [] # 返回出去的列表
for i in l1:
re_l1.append(int(i))
return re_l1
def jisuan(int_num):
he1 = 0
global out_l1
for i in str(int_num):
he1 += int(i)**2
if he1 > int(str_num):
out_l1.append(str_num)
return True
return None
while 1:
in_1 = input("请输入数值:")
nums_l1 = in_1.split(' ')
for i in range(nums_l1[0, nums_l1[1]+1]):
if jisuan(i):
out_l1.append(i)
print(i)
| [
"[email protected]"
] | |
0a64700b1408521e4cb652493afa4c3773da70d3 | fde90006ac56f38863ebbff75fe0da7296d8d4b6 | /src/cfehome/old_settings.py | c58c730fb22a082126c92754a918c204c7235049 | [] | no_license | carter3689/django-intro | c1c32d742548e27732580d32321648f054d1338d | 155042398d9f2505e44dfa9cfe0a2f7ad3f8131d | refs/heads/master | 2021-01-01T04:32:49.347962 | 2017-07-14T04:57:02 | 2017-07-14T04:57:02 | 97,194,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,099 | py | """
Django settings for cfehome project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p!@78+nocob7yj%nean8wwes$s_vmp2$!sahv8#gopd0mi20zn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cfehome.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cfehome.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
8f779ae7bd790997e2a3fce3a42a64b70bbd7709 | 3047f66549c5928cf07bc14bd3ff276ce8458f22 | /config.py | bf1021d3b9f955d335b7c9d6608e18fcdcae53d8 | [] | no_license | 2429581027/spe2018 | b47faf01b5954552cbfe4caed32923663c716396 | 3649104935fc8b519450d6d12c78110a40f5aaec | refs/heads/master | 2022-12-06T17:12:08.324913 | 2020-08-09T16:34:07 | 2020-08-09T16:34:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,514 | py | '''
file: config.py
date: 2018_09_19
author: Junjie Cao
'''
import argparse
###################################
## shared parameters
parser = argparse.ArgumentParser(description = 'spe 2019, reconstruction from incompleted points')
#parser.add_argument('--data_root', type = str, default = '/data/spe_database_old', help = 'it is a shared parameter')
parser.add_argument('--data_root', type = str,default = '../../data', help = 'it is a shared parameter') # for my macbook
parser.add_argument('--outf', type=str, default='../../data/spe_out', help='output folder')# /Users/jjcao/data/spe_data_train_11348
parser.add_argument('--model', type=str, default = './model/0.pkl', help='saved/pre_trained model')
# parser.add_argument('--modelBeta', type=str, default = './model/SPENetSiam.pkl', help='saved/pre_trained model')
# parser.add_argument('--modelPose', type=str, default = './model/SPENetSiam.pkl', help='saved/pre_trained model')
# parser.add_argument('--modelGen', type=str, default = './model/SPENetSiam_pointnetmini_PointGenCon_84_0.109_s0.106_p0.001_3d0.0004_decoded0.0002_j0.0001-centerBinput-stnOutput.pkl', help='saved/pre_trained model')
parser.add_argument('--center_input', default = True, type = bool, help = 'center input in dataset')
parser.add_argument('--trans_smpl_generated', default = 'stn', type = str, help = 'None, stn, center')
# should >= number of GPU*2. e.g. 72 batch in 3 GPU leads to 24 batch in each GPU. # If the batches number on each GPU == 1, nn.BatchNorm1d fails.
# large batch size => better convergence. # 16 for 6-9G gpu with decoder, 24 for ? without decoder
#parser.add_argument('--batch_size', type=int, default=128, help='input batch size') #72=24*3=18*4, 96=24*4
parser.add_argument('--batch_size', type=int, default=2, help='input batch size') # for debug on mac
parser.add_argument('--start_epoch', type=int, default = 0, help='')
parser.add_argument('--no_epoch', type=int, default = 121, help='number of epochs to train for')#121
parser.add_argument('--lr',type = float,default = 0.001,help = 'learning rate')#0.001
parser.add_argument('--step_lr', type = float, default = 10, help = 'encoder learning rate.')
parser.add_argument('--step_save', type = float, default = 2, help = 'step for saving model.')
parser.add_argument('--shape_ratio',type = float, default = 40.0 ,help = 'weight of shape loss') #40 for GMOF loss function
parser.add_argument('--pose_ratio',type = float, default = 400.0, help = 'weight of pose')# 400 for GMOF loss function
#default: 400. 20 is enough for making sure that predicated pose parameter does not contain global rotation
parser.add_argument('--threeD_ratio',type = float, default = 400.0, help = 'weight of vertices decoded by smpl')
#default: 200. 20 is enough for making sure that predicated pose parameter does not contain global rotation
parser.add_argument('--j3d_ratio',type = float, default = 0.0, help = 'weight of 3d key points decoded by smpl') #200
parser.add_argument('--decoded_ratio',type = float, default = 400.0, help = 'weight of vertices decoded by decoder')#400,
#parser.add_argument('--with_chamfer',default = False, type = bool,help = 'use chamfer loss')
#parser.add_argument('--chamfer_ratio',type = float, default = 0.0, help = 'weight of 3d chamfer distance')#50
###################################
## parameters for training
parser.add_argument('--network', type = str,default = 'SPENet',help = 'SPENet, SPENetSiam, SPENetBeta, SPENetPose')
parser.add_argument('--encoder', type = str,default = 'pointnetmini',help = 'pointnetmini, pointnet or pointnet2')
parser.add_argument('--decoder', type = str,default = 'None',help = 'None, PointGenCon or pointnet2 or dispNet?')
parser.add_argument('--with_stn', default = 'STN3dTR', type = str, help = 'use STN3dR, STN3dRQuad, STN3dTR, or None in encoder')
parser.add_argument('--with_stn_feat', default = False, type = bool, help = 'use stn feature transform in encoder or not')
parser.add_argument('--pervertex_weight', type = str, default = 'None', help = 'None or ')#./data/pervertex_weight_sdf.npz
parser.add_argument('--point_count', type=int, default=2500, help='the count of vertices in the input pointcloud for training')
parser.add_argument('--workers', type=int, default=0, help='number of data loading workers - 0 means same thread as main execution')
parser.add_argument('--momentum',type = float,default = 0.9,help = 'momentum')
# weight decay = 0.0001, it is very important for training the network using adam
parser.add_argument('--wd', type = float, default = 0.0001, help = 'encoder weight decay rate.')
parser.add_argument('--ls', type = str, default = 'L2', help = 'loss function: L2, L1, or GMOF (from less robust to more robust).')
parser.add_argument('--vis', type=str, default= 'spe', help='visdom environment, use visualization in training')
parser.add_argument('--smpl_mean_theta_path', type = str, default = './data/neutral_smpl_mean_params.h5', help = 'the path for mean smpl theta value')
parser.add_argument('--smpl_model',type = str,
default = './data/neutral_smpl_with_cocoplus_reg.txt',
help = 'smpl model path')
########
# for reconstruction, correspondence
parser.add_argument('--HR', type=int, default=0, help='Use high Resolution template for better precision in the nearest neighbor step ?')
parser.add_argument('--nepoch', type=int, default=3000, help='number of epochs to train for during the regression step')
# parser.add_argument('--inputA', type=str, default = "/data/MPI-FAUST/test/scans/test_scan_021.ply", help='your path to mesh 0')
# parser.add_argument('--inputB', type=str, default = "/data/MPI-FAUST/test/scans/test_scan_011.ply", help='your path to mesh 1')
parser.add_argument('--inputA', type=str, default = "data/example_0.ply", help='your path to mesh 0')
parser.add_argument('--inputB', type=str, default = "data/example_1.ply", help='your path to mesh 1')
#parser.add_argument('--num_points', type=int, default = 6890, help='number of points fed to poitnet') # point_count
#parser.add_argument('--num_angles', type=int, default = 300, help='number of angle in the search of optimal reconstruction. Set to 1, if you mesh are already facing the cannonical direction as in data/example_1.ply')
parser.add_argument('--clean', type=int, default=1, help='if 1, remove points that dont belong to any edges')
parser.add_argument('--scale', type=int, default=1, help='if 1, scale input mesh to have same volume as the template')
parser.add_argument('--project_on_target', type=int, default=0, help='if 1, projects predicted correspondences point on target mesh')
########
# for data generation
parser.add_argument('--human_count', type = int, default = 30000, help = 'the count of male/femal in generated database')
parser.add_argument('--sample_count', type = int, default = 0, help = 'the count of samples of a SMPL template mesh') # 2500
parser.add_argument('--op', type = str, default = 'generate', help = 'generate, distill, unify')
parser.add_argument('--gender', type = str, default = 'm', help = 'm for male, f for female, b for both')
parser.add_argument('--data_type', type = str, default = 'w', help = 'w for whole, f for front view, fb for front & back view')
# spe_dataset_train_specifiedPose
parser.add_argument('--database_train', type = str, default = 'spe_dataset_train', help = 'name')
parser.add_argument('--database_val', type = str, default = 'spe_dataset_val', help = 'name')
args = parser.parse_args() | [
"[email protected]"
] | |
5836ad6384982599fa5386c942f276b1fcbd7022 | 05fc3134da52ab0f1d95d9c4304bde68fc2a56cc | /tasks.py | a5661e372b313f07d146231967b867407d64dc2f | [
"AGPL-3.0-only"
] | permissive | lino-framework/extjs6 | b046d43bac3676afd2bbad825a8c478c2007471f | 6c8cf927e265bf23ad15d07da0b01c087c7bff07 | refs/heads/master | 2023-07-21T15:39:04.616082 | 2023-07-10T20:35:39 | 2023-07-10T20:35:39 | 46,885,420 | 6 | 1 | BSD-2-Clause | 2018-02-13T05:52:43 | 2015-11-25T20:40:26 | CSS | UTF-8 | Python | false | false | 448 | py | from atelier.invlib import setup_from_tasks
ns = setup_from_tasks(
globals(), "lino_extjs6",
languages="en de fr et".split(),
# tolerate_sphinx_warnings=True,
blogref_url = 'https://luc.lino-framework.org',
revision_control_system='git',
# locale_dir='lino_extjs/extjs/locale',
cleanable_files=['docs/api/lino_extjs6.*'],
demo_projects=[
'lino_extjs6.projects.team6',
'lino_extjs6.projects.lydia6'])
| [
"[email protected]"
] | |
677352f08e920cb21713ec2f072334eb23f02ebb | a56e5570ab57e4d3c44c9c6ba44bdacac9fa1ad8 | /insertion_sort.py | 027c54743f008f5ce2dac82c48a2eeee27837080 | [] | no_license | teknofage/CS-2.1-Sorting_Algorithms | a7db54c29af5c939022d4dd6453a0529256a3bc1 | e42b64c4d606d76102b5930ae8e74822a75999ae | refs/heads/main | 2023-01-20T16:52:00.816333 | 2020-12-05T07:50:55 | 2020-12-05T07:50:55 | 308,201,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | def insertionSort(alist):
for i in range(1,len(alist)):
#element to be compared
current = alist[i]
#comparing the current element with the sorted portion and swapping
while i>0 and alist[i-1]>current:
alist[i] = alist[i-1]
i = i-1
alist[i] = current
#print(alist)
return alist
print([5,2,1,9,0,4,6])
print(insertionSort([5,2,1,9,0,4,6])) | [
"[email protected]"
] | |
61d67338da326c0b82ae9ef359f504ccba54da59 | ed298f7b16e0a1fcc4d5ddc9da324247d200bc8a | /cleanup.py | 03ca72d1bca9728c96256d120fb9e0c22c7a7d14 | [] | no_license | stella-gao/deepfunc | ed1a67f0a0e682a2e0d1fde05a13fe190ec6f07e | a587512519c234c7ab70eb3fd504a98cd935b4ab | refs/heads/master | 2021-01-21T00:11:48.502524 | 2016-04-28T17:18:44 | 2016-04-28T17:18:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,207 | py | #!/usr/bin/env python
'''
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python gen_next_level_data.py
'''
import numpy
from keras.models import Sequential
from keras.layers.core import (
Dense, Dropout, Activation, Flatten)
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.embeddings import Embedding
from keras.optimizers import SGD
from sklearn.metrics import classification_report
from keras.utils import np_utils
from utils import (
shuffle, train_val_test_split,
get_gene_ontology,
get_model_max_features,
encode_seq_one_hot)
import sys
import os
from collections import deque
LAMBDA = 24
DATA_ROOT = 'data/cnn/'
CUR_LEVEL = 'level_2/'
NEXT_LEVEL = 'level_3/'
MAXLEN = 1000
def get_model(
go_id,
parent_id,
nb_filter=64,
nb_row=3,
nb_col=3,
pool_length=2):
filepath = DATA_ROOT + CUR_LEVEL + parent_id + '/' + go_id + '.hdf5'
model = Sequential()
model.add(Convolution2D(nb_filter, nb_row, nb_col,
border_mode='valid',
input_shape=(1, MAXLEN, 20)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_length, pool_length)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(
loss='binary_crossentropy', optimizer='adam', class_mode='binary')
# Loading saved weights
print 'Loading weights for ' + go_id
model.load_weights(filepath)
return model
def main(*args, **kwargs):
if len(args) < 3:
raise Exception('Please provide function id')
parent_id = args[1]
go_id = args[2]
if len(args) == 4:
level = int(args[3])
global CUR_LEVEL
global NEXT_LEVEL
CUR_LEVEL = 'level_' + str(level) + '/'
NEXT_LEVEL = 'level_' + str(level + 1) + '/'
try:
model = get_model(go_id, parent_id)
except Exception, e:
print e
filepath = DATA_ROOT + CUR_LEVEL + parent_id + '/' + go_id + '.hdf5'
print "Removing " + filepath
os.remove(filepath)
if __name__ == '__main__':
main(*sys.argv)
| [
"[email protected]"
] | |
5f87f8a1dd51bbbd5ab67cacd2e7b1bf4819ff49 | 6cd2afb703f0037c38ebaaa7e3fc132d7acbfa31 | /viewer_19580/urls.py | 76bf364518c599d0bd749c685c1e2f4d16291727 | [] | no_license | crowdbotics-apps/viewer-19580 | 46aeb079e7cf8214232d2491e1f15f8ffdaab538 | 8cdc0dd08a7f66dc4cd58299c42b83bd59f6b692 | refs/heads/master | 2022-11-29T05:23:02.074502 | 2020-08-15T15:54:35 | 2020-08-15T15:54:35 | 287,777,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,907 | py | """viewer_19580 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Viewer"
admin.site.site_title = "Viewer Admin Portal"
admin.site.index_title = "Viewer Admin"
# swagger
api_info = openapi.Info(
title="Viewer API",
default_version="v1",
description="API documentation for Viewer App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] | |
f4277637101ca2452185a124b44a2047eef1c208 | b1931901a2599e170f4c0dbbecc1678ecd976904 | /Tools/Scripts/webkitpy/port/simulator_process.py | c1147b2bbf734a964fff63af7b4931702f8a1399 | [] | no_license | walmis/WPEWebKit-upstream | b75872f73073a2d58da0a9a51fc9aab891fb897d | 4b3a7b8cdd8afc12162fc2e0dcf474685e3fcf58 | refs/heads/master | 2023-03-10T11:19:26.173072 | 2017-03-22T09:28:59 | 2017-03-22T09:28:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,495 | py | # Copyright (C) 2017 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import os
import signal
import time
from webkitpy.port.server_process import ServerProcess
from webkitpy.xcode.simulator import Simulator
class SimulatorProcess(ServerProcess):
class Popen(object):
def __init__(self, pid, stdin, stdout, stderr):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pid = pid
self.returncode = None
def poll(self):
if self.returncode:
return self.returncode
try:
os.kill(self.pid, 0)
except OSError, err:
assert err.errno == errno.ESRCH
self.returncode = 1
return self.returncode
def wait(self):
while not self.poll():
time.sleep(0.01) # In seconds
return self.returncode
def __init__(self, port_obj, name, cmd, env=None, universal_newlines=False, treat_no_data_as_crash=False, worker_number=None):
self._bundle_id = port_obj.app_identifier_from_bundle(cmd[0])
self._device = port_obj.device_for_worker_number(worker_number)
env['IPC_IDENTIFIER'] = self._bundle_id + '-' + self._device.udid
# This location matches the location used by WebKitTestRunner and DumpRenderTree
# for the other side of these fifos.
file_location = '/tmp/' + env['IPC_IDENTIFIER']
self._in_path = file_location + '_IN'
self._out_path = file_location + '_OUT'
self._error_path = file_location + '_ERROR'
super(SimulatorProcess, self).__init__(port_obj, name, cmd, env, universal_newlines, treat_no_data_as_crash)
def _reset(self):
super(SimulatorProcess, self)._reset()
# Unlinks are needed on reset in the event that the Python code unexpectedly
# fails between _start() and kill(). This can be caused by a SIGKILL or a crash.
# This ensures that os.mkfifo() will not be obstructed by previous fifos.
# Other files will still cause os.mkfifo() to fail.
try:
os.unlink(self._in_path)
except:
pass
try:
os.unlink(self._out_path)
except:
pass
try:
os.unlink(self._error_path)
except:
pass
def _start(self):
if self._proc:
raise ValueError('{} already running'.format(self._name))
self._reset()
FIFO_PERMISSION_FLAGS = 0600 # Only owner can read and write
os.mkfifo(self._in_path, FIFO_PERMISSION_FLAGS)
os.mkfifo(self._out_path, FIFO_PERMISSION_FLAGS)
os.mkfifo(self._error_path, FIFO_PERMISSION_FLAGS)
stdout = os.fdopen(os.open(self._out_path, os.O_RDONLY | os.O_NONBLOCK), 'rb')
stderr = os.fdopen(os.open(self._error_path, os.O_RDONLY | os.O_NONBLOCK), 'rb')
self._pid = self._device.launch_app(self._bundle_id, self._cmd[1:], env=self._env)
def handler(signum, frame):
assert signum == signal.SIGALRM
raise Exception('Timed out waiting for process to open {}'.format(self._in_path))
signal.signal(signal.SIGALRM, handler)
signal.alarm(3) # In seconds
stdin = None
try:
stdin = open(self._in_path, 'w', 0) # Opening with no buffering, like popen
except:
# We set self._proc as _reset() and _kill() depend on it.
self._proc = SimulatorProcess.Popen(self._pid, stdin, stdout, stderr)
if self._proc.poll() is not None:
self._reset()
raise Exception('App {} crashed before stdin could be attached'.format(os.path.basename(self._cmd[0])))
self._kill()
self._reset()
raise
signal.alarm(0) # Cancel alarm
self._proc = SimulatorProcess.Popen(self._pid, stdin, stdout, stderr)
def stop(self, timeout_secs=3.0):
try:
os.kill(self._pid, signal.SIGTERM)
except OSError as err:
assert err.errno == errno.ESRCH
pass
return super(SimulatorProcess, self).stop(timeout_secs)
| [
"[email protected]@268f45cc-cd09-0410-ab3c-d52691b4dbfc"
] | [email protected]@268f45cc-cd09-0410-ab3c-d52691b4dbfc |
ab3168a7ed6a211db35ec3e6069861560ba39898 | 1986f044d6476fab476a9b5eb9a95cc30d6a8eac | /Chapter07/pygal_1.py | c30537be5d2fb85031674c73d8f2dbb96a6b3e07 | [
"MIT"
] | permissive | PacktPublishing/Mastering-Python-Networking | 711f47ecff9ca2fec51f948badff22cd8c73ada4 | 52a2827919db1773f66700f3946390f200bd6dab | refs/heads/master | 2023-02-08T01:39:44.670413 | 2023-01-30T09:03:30 | 2023-01-30T09:03:30 | 82,666,812 | 138 | 127 | MIT | 2020-11-05T11:34:15 | 2017-02-21T10:25:34 | Python | UTF-8 | Python | false | false | 865 | py | #!/usr/bin/env python3
import pygal
x_time = []
out_octets = []
out_packets = []
in_octets = []
in_packets = []
with open('results.txt', 'r') as f:
for line in f.readlines():
# eval(line) reads in each line as dictionary instead of string
line = eval(line)
x_time.append(line['Time'])
out_packets.append(float(line['Gig0-0_Out_uPackets']))
out_octets.append(float(line['Gig0-0_Out_Octet']))
in_packets.append(float(line['Gig0-0_In_uPackets']))
in_octets.append(float(line['Gig0-0_In_Octet']))
line_chart = pygal.Line()
line_chart.title = "Router 1 Gig0/0"
line_chart.x_labels = x_time
line_chart.add('out_octets', out_octets)
line_chart.add('out_packets', out_packets)
line_chart.add('in_octets', in_octets)
line_chart.add('in_packets', in_packets)
line_chart.render_to_file('pygal_example_2.svg')
| [
"[email protected]"
] | |
d616c9ac31f6b34ba0c1d64c0a527e44a5450332 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/layout/scene/zaxis/tickfont/_color.py | c9c85093d3ba20c31f8f2d30cc4ebd575af30377 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 449 | py | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='layout.scene.zaxis.tickfont',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='style',
**kwargs
)
| [
"[email protected]"
] | |
287a191c2572037ada7b9ea37bd0ecbd1f5e4bc0 | 856e9a8afcb81ae66dd998b0d2cc3556c9f315ea | /tests/plugins/test_git_filters.py | 2a4efe5e20573ce2e090ef58b7c78ce98a865449 | [
"MIT"
] | permissive | dexy/dexy | 1d5c999830de4663c05a09f4cd00b1628dfc8d46 | 323c1806e51f75435e11d2265703e68f46c8aef3 | refs/heads/develop | 2023-06-10T08:02:45.076551 | 2021-02-28T22:40:41 | 2021-02-28T22:40:41 | 1,506,989 | 141 | 34 | MIT | 2020-06-15T17:44:50 | 2011-03-21T14:48:28 | Python | UTF-8 | Python | false | false | 3,046 | py | from dexy.exceptions import UserFeedback
from dexy.filters.git import repo_from_path
from dexy.filters.git import repo_from_url
from dexy.filters.git import generate_commit_info
from tests.utils import assert_in_output
from tests.utils import runfilter
from tests.utils import tempdir
from nose.exc import SkipTest
import os
import json
REMOTE_REPO_HTTPS = "https://github.com/ananelson/dexy-templates"
PATH_TO_LOCAL_REPO = os.path.expanduser("~/dev/testrepo")
# TODO use subprocess to check out a repo to a temp dir, or have a repo in data
# dir, or use [gasp] submodules.
try:
import pygit2
import urllib
no_local_repo = not os.path.exists(PATH_TO_LOCAL_REPO)
try:
urllib.urlopen("http://google.com")
no_internet = False
except IOError:
no_internet = True
if no_local_repo:
SKIP = (True, "No local repo at %s." % PATH_TO_LOCAL_REPO)
elif no_internet:
SKIP = (True, "Internet not available.")
else:
SKIP = (False, None)
except ImportError:
SKIP = (True, "pygit2 not installed")
def skip():
if SKIP[0]:
raise SkipTest(SKIP[1])
skip()
def test_run_gitrepo():
with runfilter("repo", REMOTE_REPO_HTTPS) as doc:
assert len(doc.wrapper.nodes) > 20
def test_generate_commit_info():
repo, remote = repo_from_url(REMOTE_REPO_HTTPS)
refs = repo.listall_references()
ref = repo.lookup_reference(refs[0])
commit = repo[ref.target]
commit_info = generate_commit_info(commit)
assert commit_info['author-name'] == "Ana Nelson"
assert commit_info['author-email'] == "[email protected]"
def test_git_commit():
with runfilter("gitcommit", REMOTE_REPO_HTTPS) as doc:
output = doc.output_data()
patches = json.loads(output['patches'])
assert output['author-name'] == "Ana Nelson"
assert output['author-email'] == "[email protected]"
#assert output['message'] == "Add README file."
#assert output['hex'] == "2f15837e64a70e4d34b924f6f8c371a266d16845"
def test_git_log():
assert_in_output("gitlog", PATH_TO_LOCAL_REPO,
"Add README file.")
def test_git_log_remote():
assert_in_output("gitlog", REMOTE_REPO_HTTPS,
"Rename")
def test_repo_from_url():
repo, remote = repo_from_url(REMOTE_REPO_HTTPS)
assert remote.name == 'origin'
assert remote.url == REMOTE_REPO_HTTPS
def test_repo_from_path():
repo, remote = repo_from_path(PATH_TO_LOCAL_REPO)
assert ".git" in repo.path
#assert isinstance(repo.head, pygit2.Object)
# assert "README" in repo.head.message
def test_repo_from_invalid_path():
with tempdir():
try:
repo, remote = repo_from_path(".")
assert False
except UserFeedback as e:
assert "no git repository was found at '.'" in str(e)
def test_run_git():
with runfilter("git", PATH_TO_LOCAL_REPO) as doc:
doc.output_data()
def test_run_git_remote():
with runfilter("git", REMOTE_REPO_HTTPS) as doc:
doc.output_data()
| [
"[email protected]"
] | |
6cc1dc4c8e6b81d2106b35562acc5a9448a76b64 | fd7a9faee9e2a6dbf89e54e1a7f228fcaf6911e1 | /tests/test_cnocr.py | 68b2776100394422842303886c7a0172e6ee7cb5 | [
"NCSA",
"Zlib",
"Intel",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-2-Clause-Views",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | wting861006/cnocr | f685c607e7ba84a8ead5a6a72301768c832a6320 | 9cb1cd57c2795007850bd25616880b15e4a3029d | refs/heads/master | 2023-09-04T18:36:30.822721 | 2021-11-05T12:03:23 | 2021-11-05T12:03:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,410 | py | # coding: utf-8
# Copyright (C) 2021, [Breezedeus](https://github.com/breezedeus).
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import pytest
import numpy as np
from PIL import Image
import Levenshtein
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(1, os.path.dirname(os.path.abspath(__file__)))
from cnocr import CnOcr
from cnocr.utils import read_img
from cnocr.consts import NUMBERS, AVAILABLE_MODELS
from cnocr.line_split import line_split
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
example_dir = os.path.join(root_dir, 'docs/examples')
CNOCR = CnOcr(model_name='densenet-s-fc', model_epoch=None)
SINGLE_LINE_CASES = [
('20457890_2399557098.jpg', ['就会哈哈大笑。3.0']),
('rand_cn1.png', ['笠淡嘿骅谧鼎皋姚歼蠢驼耳胬挝涯狗蒽孓犷']),
('rand_cn2.png', ['凉芦']),
('helloworld.jpg', ['Hello world!你好世界']),
]
MULTIPLE_LINE_CASES = [
('hybrid.png', ['o12345678']),
(
'multi-line_en_black.png',
[
'transforms the image many times. First, the image goes through many convolutional layers. In those',
'convolutional layers, the network learns new and increasingly complex features in its layers. Then the ',
'transformed image information goes through the fully connected layers and turns into a classification ',
'or prediction.',
],
),
(
'multi-line_en_white.png',
[
'This chapter is currently only available in this web version. ebook and print will follow.',
'Convolutional neural networks learn abstract features and concepts from raw image pixels. Feature',
'Visualization visualizes the learned features by activation maximization. Network Dissection labels',
'neural network units (e.g. channels) with human concepts.',
],
),
(
'multi-line_cn1.png',
[
'网络支付并无本质的区别,因为',
'每一个手机号码和邮件地址背后',
'都会对应着一个账户--这个账',
'户可以是信用卡账户、借记卡账',
'户,也包括邮局汇款、手机代',
'收、电话代收、预付费卡和点卡',
'等多种形式。',
],
),
(
'multi-line_cn2.png',
[
'当然,在媒介越来越多的情形下,',
'意味着传播方式的变化。过去主流',
'的是大众传播,现在互动性和定制',
'性带来了新的挑战——如何让品牌',
'与消费者更加互动。',
],
),
]
CASES = SINGLE_LINE_CASES + MULTIPLE_LINE_CASES
def print_preds(pred):
pred = [''.join(line_p) for line_p, _ in pred]
print("Predicted Chars:", pred)
def cal_score(preds, expected):
if len(preds) != len(expected):
return 0
total_cnt = 0
total_dist = 0
for real, (pred, _) in zip(expected, preds):
pred = ''.join(pred)
distance = Levenshtein.distance(real, pred)
total_dist += distance
total_cnt += len(real)
return 1.0 - float(total_dist) / total_cnt
@pytest.mark.parametrize('img_fp, expected', CASES)
def test_ocr(img_fp, expected):
ocr = CNOCR
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
img_fp = os.path.join(root_dir, 'examples', img_fp)
pred = ocr.ocr(img_fp)
print('\n')
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
img = read_img(img_fp)
pred = ocr.ocr(img)
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
img = read_img(img_fp, gray=False)
pred = ocr.ocr(img)
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
@pytest.mark.parametrize('img_fp, expected', SINGLE_LINE_CASES)
def test_ocr_for_single_line(img_fp, expected):
ocr = CNOCR
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
img_fp = os.path.join(root_dir, 'examples', img_fp)
pred = ocr.ocr_for_single_line(img_fp)
print('\n')
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
img = read_img(img_fp)
pred = ocr.ocr_for_single_line(img)
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
img = read_img(img_fp, gray=False)
pred = ocr.ocr_for_single_line(img)
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
img = np.array(Image.fromarray(img).convert('L'))
assert len(img.shape) == 2
pred = ocr.ocr_for_single_line(img)
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
img = np.expand_dims(img, axis=2)
assert len(img.shape) == 3 and img.shape[2] == 1
pred = ocr.ocr_for_single_line(img)
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
@pytest.mark.parametrize('img_fp, expected', MULTIPLE_LINE_CASES)
def test_ocr_for_single_lines(img_fp, expected):
ocr = CNOCR
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
img_fp = os.path.join(root_dir, 'examples', img_fp)
img = read_img(img_fp)
if img.mean() < 145: # 把黑底白字的图片对调为白底黑字
img = 255 - img
line_imgs = line_split(np.squeeze(img, -1), blank=True)
line_img_list = [line_img for line_img, _ in line_imgs]
pred = ocr.ocr_for_single_lines(line_img_list)
print('\n')
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
line_img_list = [np.array(line_img) for line_img in line_img_list]
pred = ocr.ocr_for_single_lines(line_img_list)
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
def test_cand_alphabet():
img_fp = os.path.join(example_dir, 'hybrid.png')
ocr = CnOcr(cand_alphabet=NUMBERS)
pred = ocr.ocr(img_fp)
pred = [''.join(line_p) for line_p, _ in pred]
print("Predicted Chars:", pred)
assert len(pred) == 1 and pred[0] == '012345678'
INSTANCE_ID = 0
@pytest.mark.parametrize('model_name', AVAILABLE_MODELS.keys())
def test_multiple_instances(model_name):
global INSTANCE_ID
print('test multiple instances for model_name: %s' % model_name)
img_fp = os.path.join(example_dir, 'hybrid.png')
INSTANCE_ID += 1
print('instance id: %d' % INSTANCE_ID)
cnocr1 = CnOcr(model_name, name='instance-%d' % INSTANCE_ID)
print_preds(cnocr1.ocr(img_fp))
INSTANCE_ID += 1
print('instance id: %d' % INSTANCE_ID)
cnocr2 = CnOcr(model_name, name='instance-%d' % INSTANCE_ID, cand_alphabet=NUMBERS)
print_preds(cnocr2.ocr(img_fp))
| [
"[email protected]"
] | |
88f8b1496a92b386d3f5362f2a3adf58da85382b | 8a699595e7f156b1ade42f6042900b3331831fbf | /src/transformers/models/swin/modeling_swin.py | 81e91a19dccac92e0612504cac8f5c8dba33da2b | [
"Apache-2.0"
] | permissive | stas00/transformers | ab654371a387c5883fc882dd0286177875d6d3b4 | 7c5d79912a21880ce13d77881940458e90d98917 | refs/heads/master | 2023-02-16T00:22:41.298155 | 2022-04-08T20:55:42 | 2022-04-08T20:55:42 | 278,214,696 | 6 | 0 | Apache-2.0 | 2022-01-28T18:39:00 | 2020-07-08T23:24:49 | Python | UTF-8 | Python | false | false | 51,021 | py | # coding=utf-8
# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Swin Transformer model."""
import collections.abc
import math
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_swin import SwinConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "SwinConfig"
_FEAT_EXTRACTOR_FOR_DOC = "AutoFeatureExtractor"
# Base docstring
_CHECKPOINT_FOR_DOC = "microsoft/swin-tiny-patch4-window7-224"
_EXPECTED_OUTPUT_SHAPE = [1, 49, 768]
# Image classification docstring
_IMAGE_CLASS_CHECKPOINT = "microsoft/swin-tiny-patch4-window7-224"
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/swin-tiny-patch4-window7-224",
# See all Swin models at https://huggingface.co/models?filter=swin
]
# to_2tuple, drop_path, SwinPatchEmbeddings, SwinPatchMerging and SwinDropPath are from the timm library.
@dataclass
class SwinEncoderOutput(ModelOutput):
"""
Swin encoder's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class SwinModelOutput(ModelOutput):
"""
Swin model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Average pooling of the last layer hidden-state.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class SwinMaskedImageModelingOutput(ModelOutput):
"""
Swin masked image model outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
Masked image modeling (MLM) loss.
logits (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Reconstructed pixel values.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class SwinImageClassifierOutput(ModelOutput):
"""
Swin outputs for image classification.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
# Copied from transformers.models.vit.modeling_vit.to_2tuple
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
def window_partition(input_feature, window_size):
"""
Partitions the given input into windows.
"""
batch_size, height, width, num_channels = input_feature.shape
input_feature = input_feature.view(
batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
)
windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows
def window_reverse(windows, window_size, height, width):
"""
Merges windows to produce higher resolution features.
"""
batch_size = int(windows.shape[0] / (height * width / window_size / window_size))
windows = windows.view(batch_size, height // window_size, width // window_size, window_size, window_size, -1)
windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(batch_size, height, width, -1)
return windows
def drop_path(input, drop_prob=0.0, training=False, scale_by_keep=True):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = input.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return input * random_tensor
class SwinEmbeddings(nn.Module):
"""
Construct the patch and position embeddings. Optionally, also the mask token.
"""
def __init__(self, config, use_mask_token=False):
super().__init__()
self.patch_embeddings = SwinPatchEmbeddings(
image_size=config.image_size,
patch_size=config.patch_size,
num_channels=config.num_channels,
embed_dim=config.embed_dim,
)
num_patches = self.patch_embeddings.num_patches
self.patch_grid = self.patch_embeddings.grid_size
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
if config.use_absolute_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
else:
self.position_embeddings = None
self.norm = nn.LayerNorm(config.embed_dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, pixel_values, bool_masked_pos=None):
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
if self.position_embeddings is not None:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings, output_dimensions
class SwinPatchEmbeddings(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(self, image_size=224, patch_size=16, num_channels=3, embed_dim=768):
super().__init__()
image_size = to_2tuple(image_size)
patch_size = to_2tuple(patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_patches = num_patches
self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
def maybe_pad(self, pixel_values, height, width):
if width % self.patch_size[1] != 0:
pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
pixel_values = nn.functional.pad(pixel_values, pad_values)
if height % self.patch_size[0] != 0:
pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
pixel_values = nn.functional.pad(pixel_values, pad_values)
return pixel_values
def forward(self, pixel_values):
_, _, height, width = pixel_values.shape
# pad the input to be divisible by self.patch_size, if needed
pixel_values = self.maybe_pad(pixel_values, height, width)
embeddings = self.projection(pixel_values)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
return embeddings, output_dimensions
class SwinPatchMerging(nn.Module):
"""
Patch Merging Layer.
Args:
input_resolution (`Tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = (height % 2 == 1) or (width % 2 == 1)
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature, input_dimensions):
height, width = input_dimensions
# `dim` is height * width
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
# pad input to be disible by width and height, if needed
input_feature = self.maybe_pad(input_feature, height, width)
# [batch_size, height/2, width/2, num_channels]
input_feature_0 = input_feature[:, 0::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
# batch_size height/2 width/2 4*num_channels
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C
input_feature = self.norm(input_feature)
input_feature = self.reduction(input_feature)
return input_feature
class SwinDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None, scale_by_keep=True):
super(SwinDropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, input):
return drop_path(input, self.drop_prob, self.training, self.scale_by_keep)
class SwinSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads):
super().__init__()
if dim % num_heads != 0:
raise ValueError(
f"The hidden size ({dim}) is not a multiple of the number of attention " f"heads ({num_heads})"
)
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = to_2tuple(config.window_size)
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w]))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer("relative_position_index", relative_position_index)
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
batch_size, dim, num_channels = hidden_states.shape
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
relative_position_bias = relative_position_bias.view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in SwinModel forward() function)
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(
batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class SwinSelfOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class SwinAttention(nn.Module):
def __init__(self, config, dim, num_heads):
super().__init__()
self.self = SwinSelfAttention(config, dim, num_heads)
self.output = SwinSelfOutput(config, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class SwinIntermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class SwinOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class SwinLayer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, shift_size=0):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.shift_size = shift_size
self.window_size = config.window_size
self.input_resolution = input_resolution
self.set_shift_and_window_size(input_resolution)
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.attention = SwinAttention(config, dim, num_heads)
self.drop_path = SwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.intermediate = SwinIntermediate(config, dim)
self.output = SwinOutput(config, dim)
def set_shift_and_window_size(self, input_resolution):
if min(input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(input_resolution)
def get_attn_mask(self, height, width):
if self.shift_size > 0:
# calculate attention mask for SW-MSA
img_mask = torch.zeros((1, height, width, 1))
height_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
width_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
count = 0
for height_slice in height_slices:
for width_slice in width_slices:
img_mask[:, height_slice, width_slice, :] = count
count += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
return attn_mask
def maybe_pad(self, hidden_states, height, width):
pad_right = (self.window_size - width % self.window_size) % self.window_size
pad_bottom = (self.window_size - height % self.window_size) % self.window_size
pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return hidden_states, pad_values
def forward(self, hidden_states, input_dimensions, head_mask=None, output_attentions=False):
self.set_shift_and_window_size(input_dimensions)
height, width = input_dimensions
batch_size, _, channels = hidden_states.size()
shortcut = hidden_states
hidden_states = self.layernorm_before(hidden_states)
hidden_states = hidden_states.view(batch_size, height, width, channels)
# pad hidden_states to multiples of window size
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
# cyclic shift
if self.shift_size > 0:
shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_hidden_states = hidden_states
# partition windows
hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
attn_mask = self.get_attn_mask(height_pad, width_pad)
if attn_mask is not None:
attn_mask = attn_mask.to(hidden_states_windows.device)
attention_outputs = self.attention(
hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions
)
attention_output = attention_outputs[0]
attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
# reverse cyclic shift
if self.shift_size > 0:
attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
attention_windows = shifted_windows
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_windows = attention_windows[:, :height, :width, :].contiguous()
attention_windows = attention_windows.view(batch_size, height * width, channels)
hidden_states = shortcut + self.drop_path(attention_windows)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = hidden_states + self.output(layer_output)
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
class SwinStage(nn.Module):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
super().__init__()
self.config = config
self.dim = dim
self.blocks = nn.ModuleList(
[
SwinLayer(
config=config,
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
shift_size=0 if (i % 2 == 0) else config.window_size // 2,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(self, hidden_states, input_dimensions, head_mask=None, output_attentions=False):
height, width = input_dimensions
for i, layer_module in enumerate(self.blocks):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
if self.downsample is not None:
height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
output_dimensions = (height, width, height_downsampled, width_downsampled)
hidden_states = self.downsample(layer_outputs[0], input_dimensions)
else:
output_dimensions = (height, width, height, width)
stage_outputs = (hidden_states, output_dimensions)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
class SwinEncoder(nn.Module):
def __init__(self, config, grid_size):
super().__init__()
self.num_layers = len(config.depths)
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
self.layers = nn.ModuleList(
[
SwinStage(
config=config,
dim=int(config.embed_dim * 2**i_layer),
input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
depth=config.depths[i_layer],
num_heads=config.num_heads[i_layer],
drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
downsample=SwinPatchMerging if (i_layer < self.num_layers - 1) else None,
)
for i_layer in range(self.num_layers)
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
input_dimensions,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_input_dimensions = ()
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, layer_module in enumerate(self.layers):
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module), hidden_states, input_dimensions, layer_head_mask
)
else:
layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
output_dimensions = layer_outputs[1]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
all_input_dimensions += (input_dimensions,)
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if output_attentions:
all_self_attentions += layer_outputs[2:]
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return SwinEncoderOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
reshaped_hidden_states=all_reshaped_hidden_states,
)
class SwinPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = SwinConfig
base_model_prefix = "swin"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, SwinEncoder):
module.gradient_checkpointing = value
SWIN_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`SwinConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SWIN_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See
[`AutoFeatureExtractor.__call__`] for details.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Swin Model transformer outputting raw hidden-states without any specific head on top.",
SWIN_START_DOCSTRING,
)
class SwinModel(SwinPreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
super().__init__(config)
self.config = config
self.num_layers = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
self.embeddings = SwinEmbeddings(config, use_mask_token=use_mask_token)
self.encoder = SwinEncoder(config, self.embeddings.patch_grid)
self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SwinModelOutput,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
pixel_values=None,
bool_masked_pos=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, len(self.config.depths))
embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
encoder_outputs = self.encoder(
embedding_output,
input_dimensions,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
output = (sequence_output, pooled_output) + encoder_outputs[1:]
return output
return SwinModelOutput(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
)
@add_start_docstrings(
"Swin Model with a decoder on top for masked image modeling, as proposed in `SimMIM <https://arxiv.org/abs/2111.09886>`__.",
SWIN_START_DOCSTRING,
)
class SwinForMaskedImageModeling(SwinPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.swin = SwinModel(config, add_pooling_layer=False, use_mask_token=True)
num_features = int(config.embed_dim * 2 ** (config.num_layers - 1))
self.decoder = nn.Sequential(
nn.Conv2d(in_channels=num_features, out_channels=config.encoder_stride**2 * 3, kernel_size=1),
nn.PixelShuffle(config.encoder_stride),
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=SwinMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values=None,
bool_masked_pos=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
Returns:
Examples:
```python
>>> from transformers import AutoFeatureExtractor, SwinForMaskedImageModeling
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/swin-tiny-patch4-window7-224")
>>> model = SwinForMaskedImageModeling.from_pretrained("microsoft/swin-tiny-patch4-window7-224")
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
>>> pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values
>>> # create random boolean mask of shape (batch_size, num_patches)
>>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
>>> loss, reconstructed_pixel_values = outputs.loss, outputs.logits
>>> list(reconstructed_pixel_values.shape)
[1, 3, 224, 224]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.swin(
pixel_values,
bool_masked_pos=bool_masked_pos,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
# Reshape to (batch_size, num_channels, height, width)
sequence_output = sequence_output.transpose(1, 2)
batch_size, num_channels, sequence_length = sequence_output.shape
height = width = int(sequence_length**0.5)
sequence_output = sequence_output.reshape(batch_size, num_channels, height, width)
# Reconstruct pixel values
reconstructed_pixel_values = self.decoder(sequence_output)
masked_im_loss = None
if bool_masked_pos is not None:
size = self.config.image_size // self.config.patch_size
bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
mask = (
bool_masked_pos.repeat_interleave(self.config.patch_size, 1)
.repeat_interleave(self.config.patch_size, 2)
.unsqueeze(1)
.contiguous()
)
reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none")
masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels
if not return_dict:
output = (reconstructed_pixel_values,) + outputs[2:]
return ((masked_im_loss,) + output) if masked_im_loss is not None else output
return SwinMaskedImageModelingOutput(
loss=masked_im_loss,
logits=reconstructed_pixel_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
reshaped_hidden_states=outputs.reshaped_hidden_states,
)
@add_start_docstrings(
"""
Swin Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
the [CLS] token) e.g. for ImageNet.
""",
SWIN_START_DOCSTRING,
)
class SwinForImageClassification(SwinPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.swin = SwinModel(config)
# Classifier head
self.classifier = (
nn.Linear(self.swin.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=SwinImageClassifierOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
def forward(
self,
pixel_values=None,
head_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.swin(
pixel_values,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SwinImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
reshaped_hidden_states=outputs.reshaped_hidden_states,
)
| [
"[email protected]"
] | |
f8631d259c1277c1890704d217d2a61336e0cbbc | 46ae8264edb9098c9875d2a0a508bc071201ec8b | /res/scripts/client/gui/scaleform/daapi/view/lobby/customizationfilter_popover.py | 05360ff3fd0c540a1aff0057dc445aea0b6e0707 | [] | no_license | Difrex/wotsdk | 1fc6156e07e3a5302e6f78eafdea9bec4c897cfb | 510a34c67b8f4c02168a9830d23f5b00068d155b | refs/heads/master | 2021-01-01T19:12:03.592888 | 2016-10-08T12:06:04 | 2016-10-08T12:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,724 | py | # Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/customization/filter_popover.py
from constants import IGR_TYPE
from debug_utils import LOG_DEBUG
from gui import GUI_SETTINGS
from gui.game_control import getIGRCtrl
from gui.shared.formatters import text_styles, icons
from gui.shared.utils.functions import makeTooltip
from gui.Scaleform.locale.VEHICLE_CUSTOMIZATION import VEHICLE_CUSTOMIZATION
from gui.Scaleform.daapi.view.meta.CustomizationFiltersPopoverMeta import CustomizationFiltersPopoverMeta
from helpers.i18n import makeString as _ms
from gui.customization import g_customizationController
from gui.customization.shared import CUSTOMIZATION_TYPE, getBonusIcon16x16, FILTER_TYPE, QUALIFIER_TYPE_INDEX, PURCHASE_TYPE, DEFAULT_GROUP_VALUE, EMBLEM_IGR_GROUP_NAME
_BONUS_TOOLTIPS = (VEHICLE_CUSTOMIZATION.CUSTOMIZATION_TOOLTIP_BONUS_ENTIRECREW,
VEHICLE_CUSTOMIZATION.CUSTOMIZATION_TOOLTIP_BONUS_COMMANDER,
VEHICLE_CUSTOMIZATION.CUSTOMIZATION_TOOLTIP_BONUS_AIMER,
VEHICLE_CUSTOMIZATION.CUSTOMIZATION_TOOLTIP_BONUS_DRIVER,
VEHICLE_CUSTOMIZATION.CUSTOMIZATION_TOOLTIP_BONUS_RADIOMAN,
VEHICLE_CUSTOMIZATION.CUSTOMIZATION_TOOLTIP_BONUS_LOADER)
_PURCHASE_TYPE_LABELS = (VEHICLE_CUSTOMIZATION.FILTER_POPOVER_WAYSTOBUY_BUY, VEHICLE_CUSTOMIZATION.FILTER_POPOVER_WAYSTOBUY_MISSIONS, icons.premiumIgrSmall())
def _getPurchaseTypeVO():
result = []
for purchaseType, label in zip(PURCHASE_TYPE.ALL, _PURCHASE_TYPE_LABELS):
purchaseVO = {'label': label,
'enabled': True}
if purchaseType == PURCHASE_TYPE.IGR:
if not GUI_SETTINGS.igrEnabled:
continue
purchaseVO['enabled'] = getIGRCtrl().getRoomType() == IGR_TYPE.PREMIUM
purchaseVO['tooltipDisabled'] = makeTooltip(_ms(VEHICLE_CUSTOMIZATION.FILTER_TOOLTIP_IGR_DISABLED_HEADER), _ms(VEHICLE_CUSTOMIZATION.FILTER_TOOLTIP_IGR_DISABLED_BODY, icon=_ms(icons.premiumIgrSmall())))
result.append(purchaseVO)
return result
def _getBonusTypeVO(selectedBonuses):
result = []
for bonusType, tooltipText in zip(QUALIFIER_TYPE_INDEX, _BONUS_TOOLTIPS):
tooltip = makeTooltip(_ms(VEHICLE_CUSTOMIZATION.CUSTOMIZATION_FILTERPOPOVER_BONUSDESCRIPTION_HEADER, bonus=_ms(tooltipText)), _ms(VEHICLE_CUSTOMIZATION.CUSTOMIZATION_FILTERPOPOVER_BONUSDESCRIPTION_BODY, bonus=_ms(tooltipText)))
result.append({'selected': selectedBonuses[bonusType],
'value': getBonusIcon16x16(bonusType),
'tooltip': tooltip})
return result
class FilterPopover(CustomizationFiltersPopoverMeta):
def __init__(self, ctx = None):
super(FilterPopover, self).__init__()
self.__filter = None
self.__groupsMap = []
return
def changeFilter(self, filterGroup, filterGroupValue):
applyFilter = True
if filterGroup == FILTER_TYPE.GROUP:
filterGroupValue = self.__groupsMap[self.__filter.currentType][filterGroupValue][0]
if self.__filter.currentGroup == filterGroupValue:
applyFilter = False
elif filterGroup == FILTER_TYPE.PURCHASE_TYPE:
filterGroupValue = PURCHASE_TYPE.ALL[filterGroupValue]
if self.__filter.purchaseType == filterGroupValue:
applyFilter = False
elif self.__filter.currentType != CUSTOMIZATION_TYPE.CAMOUFLAGE:
self.__switchIGRFilter(filterGroupValue == PURCHASE_TYPE.IGR)
if applyFilter:
self.__filter.set(filterGroup, filterGroupValue)
self.as_enableDefBtnS(not self.__filter.isDefaultFilterSet())
def setDefaultFilter(self):
self.__filter.setDefault()
updateVO = self.__createUpdateVO()
self.as_setStateS({'bonusTypeSelected': updateVO['bonusTypeSelected'],
'customizationTypeSelectedIndex': updateVO['groupsSelectIndex'],
'purchaseTypeSelectedIndex': updateVO['purchaseTypeSelectedIndex'],
'enableGroupFilter': updateVO['enableGroupFilter']})
self.as_enableDefBtnS(False)
def _populate(self):
super(FilterPopover, self)._populate()
self.__filter = g_customizationController.filter
self.__groupsMap = [[('all_groups', VEHICLE_CUSTOMIZATION.FILTER_POPOVER_GROUPS_ALL)], [('all_groups', VEHICLE_CUSTOMIZATION.FILTER_POPOVER_GROUPS_ALL)], [('all_groups', VEHICLE_CUSTOMIZATION.FILTER_POPOVER_GROUPS_ALL)]]
for cType in CUSTOMIZATION_TYPE.ALL:
for groupName, userName in self.__filter.availableGroupNames[cType]:
if groupName != EMBLEM_IGR_GROUP_NAME and groupName != 'IGR':
self.__groupsMap[cType].append((groupName, userName))
self.as_setInitDataS(self.__createInitialVO())
self.as_enableDefBtnS(not self.__filter.isDefaultFilterSet())
def _dispose(self):
self.__filter = None
self.__groupsMap = []
super(FilterPopover, self)._dispose()
return
def __createInitialVO(self):
isTypeNotCamouflage = self.__filter.currentType != CUSTOMIZATION_TYPE.CAMOUFLAGE
groupsUserNames = []
for _, groupName in self.__groupsMap[self.__filter.currentType]:
groupsUserNames.append(groupName)
updateVO = self.__createUpdateVO()
return {'lblTitle': text_styles.highTitle(VEHICLE_CUSTOMIZATION.FILTER_POPOVER_TITLE),
'lblBonusType': text_styles.standard(VEHICLE_CUSTOMIZATION.FILTER_POPOVER_BONUSTYPE_TITLE),
'lblCustomizationType': text_styles.standard(VEHICLE_CUSTOMIZATION.FILTER_POPOVER_GROUPS_TITLE),
'lblPurchaseType': text_styles.standard(VEHICLE_CUSTOMIZATION.FILTER_POPOVER_WAYSTOBUY_TITLE),
'btnDefault': VEHICLE_CUSTOMIZATION.FILTER_POPOVER_GETDEFAULTSETTINGS,
'bonusTypeId': FILTER_TYPE.QUALIFIER,
'bonusType': _getBonusTypeVO(self.__filter.selectedBonuses),
'customizationBonusTypeVisible': isTypeNotCamouflage,
'enableGroupFilter': updateVO['enableGroupFilter'],
'customizationTypeId': FILTER_TYPE.GROUP,
'customizationType': groupsUserNames,
'customizationTypeSelectedIndex': updateVO['groupsSelectIndex'],
'customizationTypeVisible': isTypeNotCamouflage,
'bonusTypeDisableTooltip': makeTooltip(VEHICLE_CUSTOMIZATION.TOOLTIP_FILTER_GROUPS_DISABLED_HEADER, VEHICLE_CUSTOMIZATION.TOOLTIP_FILTER_GROUPS_DISABLED_BODY),
'refreshTooltip': makeTooltip(VEHICLE_CUSTOMIZATION.CUSTOMIZATION_FILTERPOPOVER_REFRESH_HEADER, VEHICLE_CUSTOMIZATION.CUSTOMIZATION_FILTERPOPOVER_REFRESH_BODY),
'purchaseTypeId': FILTER_TYPE.PURCHASE_TYPE,
'purchaseType': _getPurchaseTypeVO(),
'purchaseTypeSelectedIndex': PURCHASE_TYPE.ALL.index(self.__filter.purchaseType)}
def __createUpdateVO(self):
groupsList = []
bonusTypeSelected = []
for bonusType in QUALIFIER_TYPE_INDEX:
bonusTypeSelected.append(self.__filter.selectedBonuses[bonusType])
for group, _ in self.__groupsMap[self.__filter.currentType]:
groupsList.append(group)
if self.__filter.currentType != CUSTOMIZATION_TYPE.CAMOUFLAGE:
groupsSelectIndex = groupsList.index(self.__filter.currentGroup)
enableGroupFilter = self.__filter.isGroupFilterEnabled()
else:
groupsSelectIndex = 0
enableGroupFilter = True
return {'bonusTypeSelected': bonusTypeSelected,
'groupsSelectIndex': groupsSelectIndex,
'purchaseTypeSelectedIndex': PURCHASE_TYPE.ALL.index(self.__filter.purchaseType),
'enableGroupFilter': enableGroupFilter}
def __switchIGRFilter(self, disableGroupFilter):
""" Turn on/off group filter.
When IGR (purchase type) is selected, group filter has to become disabled, and
it has to change it's value to 'All groups', but when user selects another purchase
type, previous group value should be restored.
:param disableGroupFilter: enable or disable group filter.
"""
if self.__filter.isGroupFilterEnabled() == disableGroupFilter:
self.__filter.toggleGroupFilterEnabled()
if disableGroupFilter:
groupToSet = DEFAULT_GROUP_VALUE
else:
groupToSet = self.__filter.currentGroup
self.__filter.set(FILTER_TYPE.GROUP, groupToSet)
updateVO = self.__createUpdateVO()
self.as_setStateS({'bonusTypeSelected': updateVO['bonusTypeSelected'],
'customizationTypeSelectedIndex': updateVO['groupsSelectIndex'],
'purchaseTypeSelectedIndex': updateVO['purchaseTypeSelectedIndex'],
'enableGroupFilter': updateVO['enableGroupFilter']}) | [
"[email protected]"
] | |
24caadb1da40e28f0a1b19027c888aef7f29a004 | 8983b23a25fcc3739fc977850d242ebcc64434ce | /jqurity/urls.py | a1b034bb4034894993d2bac31814d1ce65d4a60f | [] | no_license | jakiiii/django-blog | 595d834c44c4b45817091da812b90b6fa7a34aab | 260aa75b89cd9875a2e0ab1e0f9588dffd8f5281 | refs/heads/master | 2020-03-29T19:53:57.752279 | 2018-09-25T15:39:21 | 2018-09-25T15:42:39 | 150,286,125 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | """jqurity URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('accounts/', include('accounts.urls'))
]
if settings.DEBUG:
urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
384339a14d72cafb57e028d6b4112d06e5c27362 | 5774101105b47d78adb7a57eefdfa21502bbd70c | /project-follow/MadKing-master/assets/serializers.py | d39be9254ac56c2e85c54ce840290990ba81359f | [] | no_license | zhlthunder/python-study | 34d928f0ebbdcd5543ae0f41baaea955c92f5c56 | 0f25dd5105ba46791842d66babbe4c3a64819ee5 | refs/heads/master | 2023-01-12T18:39:47.184978 | 2018-10-07T23:48:04 | 2018-10-07T23:48:04 | 90,516,611 | 0 | 1 | null | 2022-12-26T19:46:22 | 2017-05-07T07:39:48 | HTML | UTF-8 | Python | false | false | 736 | py | #_*_coding:utf-8_*_
__author__ = 'jieli'
from assets.myauth import UserProfile
from assets import models
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UserProfile
# fields = ('url', 'name', 'email')
fields = ('url', 'name', 'email','is_admin')
class AssetSerializer(serializers.ModelSerializer):
class Meta:
model = models.Asset
#加上这个,可以同时显示server中的详细的信息;
depth=2
fields = ('name', 'sn','server','networkdevice')
class ServerSerializer(serializers.ModelSerializer):
class Meta:
model = models.Server
#fields = ('name', 'sn','server') | [
"[email protected]"
] | |
9b2cdeb86d06087f1f5fa0e0cfb88b8fab1f3579 | 11cd362cdd78c2fc48042ed203614b201ac94aa6 | /desktop/core/ext-py3/boto-2.49.0/bin/cwutil | 280d53f33edf02cafec34709b3684b22dfcc950c | [
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] | permissive | cloudera/hue | b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908 | dccb9467675c67b9c3399fc76c5de6d31bfb8255 | refs/heads/master | 2023-08-31T06:49:25.724501 | 2023-08-28T20:45:00 | 2023-08-28T20:45:00 | 732,593 | 5,655 | 2,244 | Apache-2.0 | 2023-09-14T03:05:41 | 2010-06-21T19:46:51 | JavaScript | UTF-8 | Python | false | false | 5,046 | #!/usr/bin/env python
# Author: Chris Moyer <[email protected]>
# Description: CloudWatch Utility
# For listing stats, creating alarms, and managing
# other CloudWatch aspects
import boto
cw = boto.connect_cloudwatch()
from datetime import datetime, timedelta
def _parse_time(time_string):
"""Internal function to parse a time string"""
def _parse_dict(d_string):
result = {}
if d_string:
for d in d_string.split(","):
d = d.split(":")
result[d[0]] = d[1]
return result
def ls(namespace=None):
"""
List metrics, optionally filtering by a specific namespace
namespace: Optional Namespace to filter on
"""
print "%-10s %-50s %s" % ("Namespace", "Metric Name", "Dimensions")
print "-"*80
for m in cw.list_metrics():
if namespace is None or namespace.upper() in m.namespace:
print "%-10s %-50s %s" % (m.namespace, m.name, m.dimensions)
def stats(namespace, metric_name, dimensions=None, statistics="Average", start_time=None, end_time=None, period=60, unit=None):
"""
Lists the statistics for a specific metric
namespace: The namespace to use, usually "AWS/EC2", "AWS/SQS", etc.
metric_name: The name of the metric to track, pulled from `ls`
dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue)
statistics: The statistics to measure, defaults to "Average"
'Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount'
start_time: Start time, default to now - 1 day
end_time: End time, default to now
period: Period/interval for counts, default to 60 minutes
unit: Unit to track, default depends on what metric is being tracked
"""
# Parse the dimensions
dimensions = _parse_dict(dimensions)
# Parse the times
if end_time:
end_time = _parse_time(end_time)
else:
end_time = datetime.utcnow()
if start_time:
start_time = _parse_time(start_time)
else:
start_time = datetime.utcnow() - timedelta(days=1)
print "%-30s %s" % ('Timestamp', statistics)
print "-"*50
data = {}
for m in cw.get_metric_statistics(int(period), start_time, end_time, metric_name, namespace, statistics, dimensions, unit):
data[m['Timestamp']] = m[statistics]
keys = data.keys()
keys.sort()
for k in keys:
print "%-30s %s" % (k, data[k])
def put(namespace, metric_name, dimensions=None, value=None, unit=None, statistics=None, timestamp=None):
"""
Publish custom metrics
namespace: The namespace to use; values starting with "AWS/" are reserved
metric_name: The name of the metric to update
dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue)
value: The value to store, mutually exclusive with `statistics`
statistics: The statistics to store, mutually exclusive with `value`
(must specify all of "Minimum", "Maximum", "Sum", "SampleCount")
timestamp: The timestamp of this measurement, default is current server time
unit: Unit to track, default depends on what metric is being tracked
"""
def simplify(lst):
return lst[0] if len(lst) == 1 else lst
print cw.put_metric_data(namespace, simplify(metric_name.split(';')),
dimensions = simplify(map(_parse_dict, dimensions.split(';'))) if dimensions else None,
value = simplify(value.split(';')) if value else None,
statistics = simplify(map(_parse_dict, statistics.split(';'))) if statistics else None,
timestamp = simplify(timestamp.split(';')) if timestamp else None,
unit = simplify(unit.split(';')) if unit else None)
def help(fnc=None):
"""
Print help message, optionally about a specific function
"""
import inspect
self = sys.modules['__main__']
if fnc:
try:
cmd = getattr(self, fnc)
except:
cmd = None
if not inspect.isfunction(cmd):
print "No function named: %s found" % fnc
sys.exit(2)
(args, varargs, varkw, defaults) = inspect.getargspec(cmd)
print cmd.__doc__
print "Usage: %s %s" % (fnc, " ".join([ "[%s]" % a for a in args]))
else:
print "Usage: cwutil [command]"
for cname in dir(self):
if not cname.startswith("_") and not cname == "cmd":
cmd = getattr(self, cname)
if inspect.isfunction(cmd):
doc = cmd.__doc__
print "\t%s - %s" % (cname, doc)
sys.exit(1)
if __name__ == "__main__":
import sys
self = sys.modules['__main__']
if len(sys.argv) >= 2:
try:
cmd = getattr(self, sys.argv[1])
except:
cmd = None
args = sys.argv[2:]
else:
cmd = help
args = []
if not cmd:
cmd = help
try:
cmd(*args)
except TypeError as e:
print e
help(cmd.__name__)
| [
"[email protected]"
] | ||
82cdd53d1dcf9e33c62000824cbb3912abc74ad3 | 5f22ddbd3eeb99709e43e7b9a7958c9987c7efa4 | /interview_bits/level_2/02_binary_search/02_search_step_simulation/01_implement-power-function.py | 7f76f3870d716a1ce3475e367399e4163af05c04 | [] | no_license | salvador-dali/algorithms_general | 04950bd823fc354adc58a4f23b7d2f3d39664798 | aeee3356e2488c6fab08741b1ac26e8bd5e4ac0d | refs/heads/master | 2020-12-14T06:24:10.466601 | 2016-07-17T06:00:17 | 2016-07-17T06:00:17 | 47,397,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | # https://www.interviewbit.com/problems/implement-power-function/
def power(a, b, m):
if a == 0:
return 0
res, mul = 1, a % m
while b:
if b % 2:
res = (res * mul) % m
mul = (mul * mul) % m
b /= 2
return res | [
"[email protected]"
] | |
a48345655e91b63f5ae905da3ad7b8a15ef14273 | edcc0afdff7a7d01fa05664006d495627e9568e0 | /tests/snapshot/test_destroy.py | 420d09cd7da71f55fe79d6edcc08b8eaaf999984 | [] | no_license | b-a-t/zettarepl | 871538cc83e9e0ec3cf0c7f4a66bba21559127e4 | 6596fb85f31919edf8eadeee47552d14f3d62db3 | refs/heads/master | 2020-04-01T23:22:27.097027 | 2018-10-16T18:45:10 | 2018-10-16T18:45:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # -*- coding=utf-8 -*-
from unittest.mock import call, Mock
from zettarepl.snapshot.destroy import destroy_snapshots
from zettarepl.snapshot.snapshot import Snapshot
def test__destroy_snapshots__works():
shell = Mock()
destroy_snapshots(shell, [Snapshot("data", "snap-1"), Snapshot("data/work", "snap-1"), Snapshot("data", "snap-2")])
assert shell.exec.call_count == 2
shell.exec.assert_has_calls([
call(["zfs", "destroy", "data@snap-1%snap-2"]),
call(["zfs", "destroy", "data/work@snap-1"])
], True)
| [
"[email protected]"
] | |
5e44569573fdd9f885721843380c33d0416aa9f1 | f21bf9e70a32de28a159473c6752fab7159f4e88 | /ms_deisotope/feature_map/feature_fit.py | 1aa3bf8e53cc8f46a0826f2ff6477ecbd0a90d55 | [
"Apache-2.0"
] | permissive | mstim/ms_deisotope | c1d9cd8b5a3ab36c28c53d8988803cd268b240c5 | 29f4f466e92e66b65a2d21eca714aa627caa21db | refs/heads/master | 2023-03-20T05:02:09.088420 | 2021-03-04T21:44:35 | 2021-03-04T21:44:35 | 261,802,498 | 0 | 0 | Apache-2.0 | 2020-05-06T15:32:03 | 2020-05-06T15:32:02 | null | UTF-8 | Python | false | false | 12,644 | py | from collections import namedtuple
import numpy as np
# from brainpy import neutral_mass as calc_neutral_mass
from ms_peak_picker import FittedPeak
from ms_deisotope.averagine import glycan
from ms_deisotope.scoring import g_test_scaled
from .shape_fitter import AdaptiveMultimodalChromatogramShapeFitter
from .lcms_feature import (
EmptyFeature,
LCMSFeature,
LCMSFeatureTreeNode,
RunningWeightedAverage,
NodeFeatureSetIterator)
class map_coord(namedtuple("map_coord", ("mz", 'time'))):
def __repr__(self):
return "(%0.3f, %0.3f)" % self
class LCMSFeatureSetFit(object):
def __init__(self, features, theoretical, score, charge,
missing_features=0, supporters=None, data=None,
neutral_mass=None, scores=None, times=None):
if supporters is None:
supporters = []
if scores is None:
scores = np.array([])
if times is None:
times = np.array([])
self.features = features
self.theoretical = theoretical
self.score = score
self.charge = charge
self.data = data
self.missing_features = missing_features
self.monoisotopic_feature = features[0]
self.supporters = supporters
self.mz = theoretical.monoisotopic_mz
if neutral_mass is None:
neutral_mass = neutral_mass(self.mz, self.charge)
self.neutral_mass = neutral_mass
self.scores = scores
self.times = times
def count_null_features(self):
n_null = 0
for feature in self.features:
if feature is None or isinstance(feature, EmptyFeature):
n_null += 1
return n_null
def has_multiple_real_features(self):
return len(self) - self.count_null_features() > 1
def clone(self):
return self.__class__(
self.features, self.theoretical, self.score, self.charge,
self.missing_features, self.supporters, self.data,
self.neutral_mass, self.scores, self.times)
def __reduce__(self):
return self.__class__, (
self.features, self.theoretical, self.score, self.charge,
self.missing_features, self.supporters, self.data, self.neutral_mass,
self.scores, self.times)
def __eq__(self, other):
val = (self.score == other.score and
self.charge == other.charge and
self.features == other.features and
self.theoretical == other.theoretical)
if self.data is not None or other.data is not None:
val = val and (self.data == other.data)
return val
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.score < other.score
def __gt__(self, other):
return self.score > other.score
def __hash__(self):
return hash((self.monoisotopic_feature.mz, self.charge))
def __iter__(self):
return iter(self.features)
def __len__(self):
return len(self.features)
@property
def npeaks(self):
return len(self)
def __repr__(self):
return "LCMSFeatureSetFit(score=%0.5f, charge=%d, size=%d, monoisotopic_mz=%0.5f, %0.2f-%0.2f)" % (
self.score, self.charge, len(self), self.monoisotopic_feature.mz,
self.start.time, self.end.time)
@property
def start(self):
first = self.features[0]
if first is None:
raise Exception()
return map_coord(first.mz, first.start_time)
@property
def end(self):
for last in reversed(self.features):
if last is None:
continue
return map_coord(last.mz, last.end_time)
class DeconvolutedLCMSFeatureTreeNode(LCMSFeatureTreeNode):
__slots__ = ["_neutral_mass", "charge", "precursor_information"]
def __init__(self, time=None, members=None, precursor_information=None):
if precursor_information is None:
precursor_information = []
self._neutral_mass = 0
self.charge = 0
super(DeconvolutedLCMSFeatureTreeNode, self).__init__(time, members)
self.precursor_information = precursor_information
def _recalculate(self):
self._calculate_most_abundant_member()
self._mz = self._most_abundant_member.mz
self._neutral_mass = self._most_abundant_member.neutral_mass
self.charge = self._most_abundant_member.charge
@property
def neutral_mass(self):
if self._neutral_mass == 0:
if self._most_abundant_member is not None:
self._neutral_mass = self._most_abundant_member.neutral_mass
return self._neutral_mass
class DeconvolutedLCMSFeature(LCMSFeature):
def __init__(self, nodes=None, charge=None, adducts=None, used_as_adduct=None, score=0.0,
n_features=0, feature_id=None, supporters=None):
if supporters is None:
supporters = []
self.charge = charge
self.score = score
self._neutral_mass = None
self._last_neutral_mass = None
self._precursor_information = None
self.n_features = n_features
self.supporters = supporters
super(DeconvolutedLCMSFeature, self).__init__(nodes, adducts, used_as_adduct, feature_id=feature_id)
@property
def precursor_information(self):
if self._precursor_information is None:
pinfo = []
for node in self:
pinfo.extend(node.precursor_information)
self._precursor_information = tuple(pinfo)
return self._precursor_information
def clone(self, deep=False, cls=None):
if cls is None:
cls = self.__class__
return cls(
self.nodes.clone(deep=deep), self.charge, self.adducts, self.used_as_adduct, self.score,
self.n_features, self.feature_id, list(self.supporters))
def _invalidate(self, reaverage=False):
self._last_neutral_mass = self._neutral_mass if self._neutral_mass is not None else 0.
self._neutral_mass = None
self._precursor_information = None
super(DeconvolutedLCMSFeature, self)._invalidate(reaverage)
@property
def neutral_mass(self):
if self._neutral_mass is None:
avger = DeconvolutedRunningWeightedAverage()
for node in self.nodes:
avger.update(node.members)
self._neutral_mass = self._last_neutral_mass = avger.current_mean
return self._neutral_mass
def _copy_chunk(self, nodes, *args, **kwargs):
x = self.__class__(
nodes, self.charge, list(self.adducts), list(self.used_as_adduct),
self.score, self.n_features, None, list(self.supporters))
return x
def sum(self, other):
missing = []
feat_iter = NodeFeatureSetIterator([self, other])
for nodes in feat_iter:
base = nodes[0]
new = nodes[1]
if base is None:
missing.append(new)
elif new is not None:
base.members[0].intensity += new.members[0].intensity
base.precursor_information.extend(new.precursor_information)
if missing:
for node in missing:
self.insert_node(DeconvolutedLCMSFeatureTreeNode(
node.time, list(node.members), list(node.precursor_information)))
self.supporters.extend(other.supporters)
return self
def __repr__(self):
return "%s(%0.4f, %d, %0.2f, %0.2f, %0.2f)" % (
self.__class__.__name__, self.neutral_mass,
self.charge, self.score,
self.start_time, self.end_time)
class DeconvolutedRunningWeightedAverage(RunningWeightedAverage):
def add(self, peak):
if peak.intensity == 0:
if self.current_mean == 0 and self.total_weight == 0:
self.current_mean = peak.neutral_mass
self.total_weight = 1
else:
return
self.accumulator.append(peak)
agg = (self.total_weight * self.current_mean) + \
(peak.neutral_mass * peak.intensity)
self.total_weight += peak.intensity
self.current_mean = agg / self.total_weight
self.current_count += 1
return self
def recompute(self):
weight = 0
total = 0
for peak in self.accumulator:
weight += peak.intensity
total += peak.intensity * peak.neutral_mass
return total / weight
class DriftTimeRunningWeightedAverage(RunningWeightedAverage):
def add(self, peak):
if peak.intensity == 0:
if self.current_mean == 0 and self.total_weight == 0:
self.current_mean = peak.drift_time
self.total_weight = 1
else:
return
self.accumulator.append(peak)
agg = (self.total_weight * self.current_mean) + \
(peak.drift_time * peak.intensity)
self.total_weight += peak.intensity
self.current_mean = agg / self.total_weight
self.current_count += 1
return self
def recompute(self):
weight = 0
total = 0
for peak in self.accumulator:
weight += peak.intensity
total += peak.intensity * peak.drift_time
return total / weight
class IonMobilityDeconvolutedLCMSFeature(DeconvolutedLCMSFeature):
def __init__(self, nodes=None, charge=None, adducts=None, used_as_adduct=None, score=0.0,
n_features=0, feature_id=None, supporters=None):
self._drift_time = None
self._last_drift_time = None
super(IonMobilityDeconvolutedLCMSFeature, self).__init__(
nodes=nodes, charge=charge, adducts=adducts, used_as_adduct=used_as_adduct, score=score,
n_features=n_features, feature_id=feature_id, supporters=supporters)
def _invalidate(self, reaverage=False):
self._last_drift_time = self._drift_time if self._drift_time is not None else 0.
self._drift_time = None
return super(IonMobilityDeconvolutedLCMSFeature, self)._invalidate(reaverage=reaverage)
@property
def drift_time(self):
if self._drift_time is None:
avger = DriftTimeRunningWeightedAverage()
for node in self.nodes:
avger.update(node.members)
self._drift_time = self._last_drift_time = avger.current_mean
return self._drift_time
def __repr__(self):
return "%s(%0.4f, %0.4f, %d, %0.2f, %0.2f, %0.2f)" % (
self.__class__.__name__, self.neutral_mass, self.drift_time,
self.charge, self.score,
self.start_time, self.end_time)
def envelope_to_peak_list(envelope):
return [FittedPeak(e[0], e[1], 0, 0, 0, 0, 0, 0, 0) for e in envelope]
def scale_theoretical_isotopic_pattern(eid, tid):
total = sum(p.intensity for p in eid)
for p in tid:
p.intensity *= total
def isotopic_consistency(eic, averagine=glycan, truncate_after=0.95):
peak_scores = []
peak_abundances = []
for node in eic:
for peak in node.members:
eid = envelope_to_peak_list(peak.envelope)
tid = averagine.isotopic_cluster(peak.mz, peak.charge, truncate_after=truncate_after)
tid.scale(eid)
peak_scores.append(abs(g_test_scaled(None, eid, tid.truncated_tid)))
peak_abundances.append(peak.intensity)
return max(1 - np.average(peak_scores, weights=peak_abundances), 1e-4)
def spacing_fit(eic):
times, intensities = eic.as_arrays()
last_rt = times[0]
last_int = intensities[0]
rt_deltas = []
intensity_deltas = []
for rt, inten in zip(times[1:], intensities[1:]):
d_rt = rt - last_rt
rt_deltas.append(d_rt)
intensity_deltas.append(abs(last_int - inten))
last_rt = rt
last_int = inten
return max(1 - np.average(rt_deltas, weights=intensity_deltas) * 2, 1e-4)
def shape_fit(eic, smooth=0.15):
return max(1 - AdaptiveMultimodalChromatogramShapeFitter(eic, smooth=smooth).line_test, 1e-4)
def profile_qc(eic, smooth=0.15, averagine=glycan, truncate_after=0.95):
v = 1.0
v *= isotopic_consistency(eic, averagine, truncate_after)
v *= spacing_fit(eic)
v *= shape_fit(eic, smooth)
return v
try:
has_c = True
_map_coord = map_coord
_LCMSFeatureSetFit = LCMSFeatureSetFit
from ms_deisotope._c.feature_map.feature_fit import (LCMSFeatureSetFit, map_coord)
except ImportError as e:
print(e)
has_c = False
| [
"[email protected]"
] | |
c922049e1d08e7a7dd1929f419415ed617b2dccc | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/171/41957/submittedfiles/testes.py | 16dd14c2d0278fb3b37bde0222232be8c114fd08 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | # -*- coding: utf-8 -*-
import math
#COMECE AQUI ABAIXO
a=float(input('digite a base:'))
b=float(input('digite o expoente:'))
cont=0
i=0
c=a**b
while i<cont:
c=a**b
cont=cont+1
print('%d'%c) | [
"[email protected]"
] | |
8203f8ceb30d5186a154e4b31d9a972deba8201b | 8b4d37632e0435fe5f78bf1631dd74766e8db411 | /xrandroll/xrandr.py | 96ceed2ae8f3e366d30c4851a91de8b1c339fe25 | [
"MIT"
] | permissive | RakhithJK/xrandroll | ca876c35fda3235b81362bce9ff6779759d810a5 | 7d294ea15a639d9b15a55c0bfc13161307425554 | refs/heads/master | 2022-04-07T03:13:53.816999 | 2020-02-07T12:55:02 | 2020-02-07T12:55:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,071 | py | """Read/Write system display state using xrandr."""
import subprocess
from .monitor import Monitor, _split_by_lines_matching
def is_replica_of(a, b):
"""Return True if monitor a is a replica of b.
Replica means same resolution and position.
"""
return (
a.pos_x == b.pos_x
and a.pos_y == b.pos_y
and a.res_x == b.res_x
and a.res_y == b.res_y
and b.enabled
)
class Screen:
"""A Screen is a collection of monitors."""
def __init__(self, data):
self.monitors = {}
for monitor_data in _split_by_lines_matching(r"^[^ \t].*", data[1:]):
m = Monitor(monitor_data)
self.monitors[m.output] = m
self.update_replica_of()
def generate(self):
"""Create a list of xrandr invocations to match this state."""
results = []
for output, mon in self.monitors.items():
cli = ["xrandr"]
cli.append(f"--output {output}")
if not mon.enabled:
cli.append("--off")
else:
mode = mon.get_current_mode()
cli.append(f"--pos {int(mon.pos_x)}x{int(mon.pos_y)}")
cli.append(f"--mode {mode.res_x}x{mode.res_y}")
mod_x, mod_y = mode.res_x, mode.res_y
if mon.orientation in ("left", "right"):
mod_x, mod_y = mod_y, mod_x
cli.append(f"--scale {mon.res_x/mod_x}x{mon.res_y/mod_y}")
cli.append(f"--rotate {mon.orientation}")
if mon.primary:
cli.append("--primary")
results.append(" ".join(cli))
return results
def update_replica_of(self):
"""Decide which monitors are replicas of each other and
mark them as such."""
for a in self.monitors:
self.monitors[a].replica_of = []
for b in self.monitors:
if a != b and is_replica_of(self.monitors[a], self.monitors[b]):
self.monitors[a].replica_of.append(b)
def choose_a_monitor(self):
"""Choose what monitor to select by default.
* Not disabled
* Primary, if possible
"""
candidate = None
for name, mon in self.monitors.items():
if not mon.enabled:
continue
if mon.primary:
return name
candidate = name
return candidate
def get_primary(self):
"""Return the primary monitor, if any."""
for mon in self.monitors.values():
if mon.primary:
return mon
return None
def set_primary(self, name):
for mon in self.monitors.values():
mon.primary = name == mon.output
def read_data():
data = subprocess.check_output(
["xrandr", "--verbose"], encoding="utf-8"
).splitlines()
return data
def parse_data(data):
# Going to pretend there can only be one screen because life is short.
return Screen(_split_by_lines_matching("^Screen ", data)[0])
| [
"[email protected]"
] | |
3d1cde7505953c42c17da27c37c33aaa338acc32 | 8441f156e53afcc6c2b5190de2439c68eb40f218 | /python/nistoar/testing/__init__.py | d92c9b8c26da11199ab8542e66d9baff95a31408 | [] | no_license | usnistgov/oar-metadata | 99436a84d32d623d77310e75eee834c683ea1d5b | 2190bfc79d97f81d52dd24df0d4e9dc844065b67 | refs/heads/integration | 2023-07-08T16:06:23.258608 | 2023-04-22T21:00:09 | 2023-04-22T21:00:09 | 82,972,531 | 4 | 7 | null | 2023-06-30T18:27:38 | 2017-02-23T21:20:34 | Python | UTF-8 | Python | false | false | 5,200 | py | """
test infrastructure and utilities usable throughout the nistoar library
"""
# this code was copied from the testing infrastructure for ejsonschema
import os, shutil
__all__ = [
'ensure_tmpdir', 'tmpdir', 'rmtmpdir', 'Tempfiles', 'artifactdir'
]
tmpname = "_test"
def ensure_tmpdir(basedir=None, dirname=None):
"""
ensure the existance of a directory where temporary inputs and outputs
can be placed. This directory is not cleaned up after use.
:argument str basedir: the desired path to tmp directory's parent directory.
if not provided, the directory will be placed in the
current working directory.
:return str: the path to the temporary directory
"""
tdir = tmpdir(basedir, dirname)
if not os.path.isdir(tdir):
os.mkdir(tdir)
return tdir
def tmpdir(basedir=None, dirname=None):
"""
return the name of a temporary directory where temporary inputs and outputs
can be placed.
:argument str basedir: the desired path to tmp directory's parent directory.
if not provided, the directory will be placed in the
current working directory.
:argument str dirname: the desired name for the directory
:return str: the path to the temporary directory
"""
if not dirname:
dirname = tmpname + str(os.getpid())
if not basedir:
basedir = os.getcwd()
return os.path.join(basedir, dirname)
def rmdir(dirpath):
"""
remove the given path and all its contents
"""
shutil.rmtree(dirpath)
def rmtmpdir(basedir=None, dirname=None):
"""
remove the default
:argument str basedir: the path to tmp directory's parent directory.
if not provided, the current working directory will
be assumed.
:argument str dirname: the name for the directory
:return str: the path to the removed temporary directory
"""
tdir = tmpdir(basedir, dirname)
if os.path.exists(tdir):
rmdir(tdir)
class Tempfiles(object):
"""
A class for creating temporary testing space that hides the configured
absolute location.
It is instantiated with a base directory where temporary directories and
files can be created. Full paths to a temporary file or directory can
be gotten, then, by calling the instance as a function:
.. code-block:: python
ts = Tempfiles(basedir)
tmpfile = ts("testoutput.txt")
If you want the file to be automatically cleaned up, use the track()
function:
tmpfile = ts.track("testoutput.txt")
Temporary directories that should be cleaned up can be created with mkdir():
.. code-block:: python
tmpdir = ts.mkdir("mytempdir")
All directories and files created below the configured base can be removed
by calling clean() explicitly or by using autoclean=True as a constructor
parameter; the latter will remove the files and directories when the
instance is destroyed.
"""
def __init__(self, tempdir=None, autoclean=False):
if not tempdir:
tempdir = ensure_tmpdir()
assert os.path.exists(tempdir)
self._root = tempdir
self._files = set()
self._autoclean = autoclean
@property
def root(self):
"""
the base directory below which is where temporary files and directories
can be created and tracked
"""
return self._root
def __call__(self, child):
return os.path.join(self.root, child)
def mkdir(self, dirname):
"""
create and track a directory given as a relative path
"""
d = os.path.join(self.root, dirname)
if not os.path.isdir(d):
os.mkdir(d)
self.track(dirname)
return d
def track(self, filename):
"""
keep track of a file or directory that has a relative path given by
filename. It will be removed upon a call to clean()
"""
self._files.add(filename)
return self.__call__(filename)
def clean(self):
"""
remove all files and directories being tracked by this instance.
"""
for i in range(len(self._files)):
filen = self._files.pop()
path = os.path.join(self._root, filen)
if os.path.exists(path):
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
finally:
if os.path.exists(path):
self._files.add(filen)
def __del__(self):
if self._autoclean:
self.clean()
def artifactdir(mod=None):
out = os.environ.get('OAR_TEST_ARTIFACT_DIR')
if not out or not os.path.isdir(out):
return tmpdir()
if not isinstance(mod, str) and hasattr(mod, '__name__'):
mod = mod.__name__
if not isinstance(mod, str):
return out
out = os.path.join(out, mod)
if not os.path.exists(out):
os.mkdir(out)
return out
| [
"[email protected]"
] | |
00588c59ef606ca06a81ac2cc3da8e2270175594 | 52e8dce655b89a260d049d34e74bc0cd3caf6f07 | /torchreid/__init__.py | 3403b86662515fb3072ca4ac7f8f659b96c4a42f | [
"MIT"
] | permissive | digitalbrain79/deep-person-reid | b527d0e8bd9a4a72209728c105fe5cd1773041dc | 0e7026be11dab7cb6991c43ea0b36765445507f9 | refs/heads/master | 2020-05-20T02:50:24.406708 | 2019-05-06T21:28:34 | 2019-05-06T21:28:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | from __future__ import absolute_import
from __future__ import print_function
__version__ = '0.7.4'
__author__ = 'Kaiyang Zhou'
__description__ = 'Deep learning person re-identification in PyTorch'
from torchreid import (
engine,
models,
losses,
metrics,
data,
optim,
utils
)
| [
"[email protected]"
] | |
dd72fcfd037b92916bb36a734e3754cf57ff6822 | dfaa71f8064d3d0773941cf14ab86ff57ff67284 | /part45/blog/models.py | d5edd654805cf32352512470306c70d8c055de71 | [
"Apache-2.0"
] | permissive | yllew36/WellyGI | e94c5000ff3a7f2fd7316d22ad166fbf7916ea23 | 7d53fac4c81bb994f61b22761e5ac7e48994ade4 | refs/heads/master | 2020-09-05T15:49:37.386078 | 2019-11-15T08:16:59 | 2019-11-15T08:16:59 | 220,148,061 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | from django.db import models
from django.utils.text import slugify
# Create your models here.
class ArtikelModel(models.Model):
judul = models.CharField(max_length=255)
isi = models.TextField()
penulis = models.CharField(max_length=255)
publish = models.DateTimeField(auto_now_add = True)
update = models.DateTimeField(auto_now=True)
slug = models.SlugField(blank=True,editable=False)
def save(self):
self.slug = slugify(self.judul)
super(Artikel, self).save()
def __str__(self):
return "{}. {}".format(self.id,self.judul) | [
"[email protected]"
] | |
d5710045f064d84d667dfa28c760ba605ec4e832 | f1ee4b96f37419504576dc8b0d5b708bd5b9ba29 | /builder/main.py | 7a06353e59a01b076b8af1324a542b80ce572c60 | [] | no_license | OS-Q/P254 | 6d850efdd9da8a76d3cc2a4340c62cd8039dacdc | e3b542ec8020d280ab41ea5f2496b260e710f6d1 | refs/heads/master | 2023-04-19T11:03:23.733720 | 2021-05-04T03:48:12 | 2021-05-04T03:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,291 | py | # Copyright 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from platform import system
from os import makedirs
from os.path import isdir, join
from SCons.Script import (ARGUMENTS, COMMAND_LINE_TARGETS, AlwaysBuild,
Builder, Default, DefaultEnvironment)
env = DefaultEnvironment()
env.SConscript("compat.py", exports="env")
platform = env.PioPlatform()
board = env.BoardConfig()
env.Replace(
AR="arm-none-eabi-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
GDB="arm-none-eabi-gdb",
OBJCOPY="arm-none-eabi-objcopy",
RANLIB="arm-none-eabi-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rc"],
SIZEPROGREGEXP=r"^(?:\.text|\.data|\.rodata|\.text.align|\.ARM.exidx)\s+(\d+).*",
SIZEDATAREGEXP=r"^(?:\.data|\.bss|\.noinit)\s+(\d+).*",
SIZECHECKCMD="$SIZETOOL -A -d $SOURCES",
SIZEPRINTCMD='$SIZETOOL -B -d $SOURCES',
PROGSUFFIX=".elf"
)
# Allow user to override via pre:script
if env.get("PROGNAME", "program") == "program":
env.Replace(PROGNAME="firmware")
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".bin"
),
ElfToHex=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".hex"
)
)
)
if not env.get("PIOFRAMEWORK"):
env.SConscript("frameworks/_bare.py")
#
# Target: Build executable and linkable firmware
#
if "zephyr" in env.get("PIOFRAMEWORK", []):
env.SConscript(
join(platform.get_package_dir(
"framework-zephyr"), "scripts", "platformio", "platformio-build-pre.py"),
exports={"env": env}
)
target_elf = None
if "nobuild" in COMMAND_LINE_TARGETS:
target_elf = join("$BUILD_DIR", "${PROGNAME}.elf")
target_firm = join("$BUILD_DIR", "${PROGNAME}.bin")
else:
target_elf = env.BuildProgram()
target_firm = env.ElfToBin(join("$BUILD_DIR", "${PROGNAME}"), target_elf)
env.Depends(target_firm, "checkprogsize")
AlwaysBuild(env.Alias("nobuild", target_firm))
target_buildprog = env.Alias("buildprog", target_firm, target_firm)
#
# Target: Print binary size
#
target_size = env.Alias(
"size", target_elf,
env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE"))
AlwaysBuild(target_size)
#
# Target: Upload by default .bin file
#
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
upload_actions = []
if upload_protocol == "mbed":
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for upload disk..."),
env.VerboseAction(env.UploadToDisk, "Uploading $SOURCE")
]
elif upload_protocol.startswith("jlink"):
def _jlink_cmd_script(env, source):
build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
makedirs(build_dir)
script_path = join(build_dir, "upload.jlink")
commands = [
"h",
"loadbin %s, %s" % (source, board.get(
"upload.offset_address", "0x0")),
"r",
"q"
]
with open(script_path, "w") as fp:
fp.write("\n".join(commands))
return script_path
env.Replace(
__jlink_cmd_script=_jlink_cmd_script,
UPLOADER="JLink.exe" if system() == "Windows" else "JLinkExe",
UPLOADERFLAGS=[
"-device", board.get("debug", {}).get("jlink_device"),
"-speed", env.GetProjectOption("debug_speed", "4000"),
"-if", ("jtag" if upload_protocol == "jlink-jtag" else "swd"),
"-autoconnect", "1",
"-NoGui", "1"
],
UPLOADCMD='$UPLOADER $UPLOADERFLAGS -CommanderScript "${__jlink_cmd_script(__env__, SOURCE)}"'
)
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
elif upload_protocol.startswith("blackmagic"):
env.Replace(
UPLOADER="$GDB",
UPLOADERFLAGS=[
"-nx",
"--batch",
"-ex", "target extended-remote $UPLOAD_PORT",
"-ex", "monitor %s_scan" %
("jtag" if upload_protocol == "blackmagic-jtag" else "swdp"),
"-ex", "attach 1",
"-ex", "load",
"-ex", "compare-sections",
"-ex", "kill"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $BUILD_DIR/${PROGNAME}.elf"
)
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for BlackMagic port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol == "cmsis-dap":
debug_server = board.get("debug.tools", {}).get(
upload_protocol, {}).get("server")
assert debug_server
if debug_server.get("package") == "tool-pyocd":
env.Replace(
UPLOADER=join(platform.get_package_dir("tool-pyocd") or "",
"pyocd-flashtool.py"),
UPLOADERFLAGS=debug_server.get("arguments", [])[1:],
UPLOADCMD='"$PYTHONEXE" "$UPLOADER" $UPLOADERFLAGS $SOURCE'
)
elif debug_server.get("package") == "tool-openocd":
openocd_args = [
"-d%d" % (2 if int(ARGUMENTS.get("PIOVERBOSE", 0)) else 1)
]
openocd_args.extend(debug_server.get("arguments", []))
if env.GetProjectOption("debug_speed"):
openocd_args.extend(
["-c", "adapter speed %s" % env.GetProjectOption("debug_speed")]
)
openocd_args.extend([
"-c", "program {$SOURCE} %s verify reset; shutdown;" %
board.get("upload.offset_address", "")
])
openocd_args = [
f.replace("$PACKAGE_DIR",
platform.get_package_dir("tool-openocd") or "")
for f in openocd_args
]
env.Replace(
UPLOADER="openocd",
UPLOADERFLAGS=openocd_args,
UPLOADCMD="$UPLOADER $UPLOADERFLAGS")
upload_actions = [
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
# custom upload tool
elif upload_protocol == "custom":
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
if not upload_actions:
sys.stderr.write("Warning! Unknown upload protocol %s\n" % upload_protocol)
AlwaysBuild(env.Alias("upload", target_firm, upload_actions))
#
# Default targets
#
Default([target_buildprog, target_size])
| [
"[email protected]"
] | |
b1877b2bf819138238459ec197dd6bdf01e9b712 | 3d2a74a859b0ea2a2f12315fd781154eae8449c5 | /LeetCode/min_size_suba_sum.py | 0b8ec9e1f641060914e8bb23000cbca0b64a88c5 | [] | no_license | jacobfelknor/practice_interview_questions | 1e929b0fdb4f816202f000de96b9f66fb119802b | 942f0ec730d7f0af650ddcee1abc5d17827c953c | refs/heads/master | 2021-11-22T07:27:25.986891 | 2021-11-09T02:12:13 | 2021-11-09T02:12:13 | 227,508,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | """
Given an array of n positive integers and a positive integer s,
find the minimal length of a contiguous subarray of which the sum ≥ s.
If there isn't one, return 0 instead.
>>> min_sub_array_length([2,3,1,2,4,3], 7)
2
"""
from typing import List
def min_sub_array_length(nums: List[int], nsum: int) -> int:
start = 0
# end = 0
min_len = float("inf")
cur_sum = 0
for end in range(len(nums)):
cur_sum += nums[end]
while cur_sum >= nsum:
min_len = min(min_len, end - start + 1)
cur_sum -= nums[start]
start += 1
if min_len == float("inf"):
return 0
return min_len
print(min_sub_array_length([2, 3, 1, 2, 4, 2], 7))
| [
"[email protected]"
] | |
7a5222fd8eda27337c2d12c3e550a83aa9fa6281 | 231f8a898b20e475a5cabff439600de211d825c0 | /deploy_tools/fabfile.py | 33f3f66d5a1f450f1ea86a8eed1c19c182d68253 | [
"MIT"
] | permissive | thewchan/superlists | f7370b341ce7c37b8cae506eb5bafdd2fb31b07a | af41636b2cdafb45c638e36076b9cdefc5586aad | refs/heads/master | 2023-05-26T11:01:24.310480 | 2021-06-11T21:12:20 | 2021-06-11T21:12:20 | 361,209,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 1,841 | py | """Fabric deployment configuration and script."""
import random
from fabric.contrib.files import append, exists
from fabric.api import cd, env, local, run
REPO_URL = "https://github.com/thewchan/superlists.git"
def deploy() -> None:
"""Deploy site to server."""
site_folder = f"/home/{env.user}/sites/{env.host}"
run(f"mkdir -p {site_folder}")
with cd(site_folder):
_get_latest_source()
_update_virtualenv()
_create_or_update_dotenv()
_update_static_files()
_update_database()
def _get_latest_source() -> None:
"""Fetch the latest source code."""
if exists(".git"):
run("git fetch")
else:
run(f"git clone {REPO_URL} .")
current_commit = local("git log -n 1 --format=%H", capture=True)
run(f"git reset --hard {current_commit}")
def _update_virtualenv() -> None:
"""Updates the virtual environment at the server."""
if not exists("virtualenv/bin/pip"):
run("python3.7 -m venv virtualenv")
run("./virtualenv/bin/pip install -r requirements.txt")
def _create_or_update_dotenv() -> None:
"""Create or update environment file as needed."""
append(".env", "DJANGO_DEBUG_FALSE=y")
append(".env", f"SITENAME={env.host}")
current_contents = run("cat .env")
if "DJANGO_SECRET_KEY" not in current_contents:
new_secret = "".join(
random.SystemRandom().choices(
"abcdefghijklmnopqrstuvwxyz0123456789", k=50
)
)
append(".env", f"DJANGO_SECRET_KEY={new_secret}")
def _update_static_files() -> None:
"""Update static files as needed."""
run("./virtualenv/bin/python manage.py collectstatic --noinput")
def _update_database() -> None:
"""Migrate database as necessary."""
run("./virtualenv/bin/python manage.py migrate --noinput")
| [
"[email protected]"
] | |
ef36d5f6cc4c8c0f5464bce23e67c44306bfe522 | 1086ef8bcd54d4417175a4a77e5d63b53a47c8cf | /Forks/Online-Judges-Problems-SourceCode-master/Hackerrank/AI/Statistics-MachineLearning/correlation_and_regression_lines_5.py | 6bbdb8ea12b6aa32461da6be7494d8e242886a3f | [] | no_license | wisdomtohe/CompetitiveProgramming | b883da6380f56af0c2625318deed3529cb0838f6 | a20bfea8a2fd539382a100d843fb91126ab5ad34 | refs/heads/master | 2022-12-18T17:33:48.399350 | 2020-09-25T02:24:41 | 2020-09-25T02:24:41 | 298,446,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | ___author__ = 'Ahmed Hani Ibrahim'
print(16)
| [
"[email protected]"
] | |
1f4566fe4bab4acc5b0a1372b183c37d6628e045 | a262151ecb151b4c8335354c972fb166b81f4635 | /sdk/cdn/azure-mgmt-cdn/azure/mgmt/cdn/aio/operations/_rule_sets_operations.py | f09eaf880fb6c66476285ef0d97beaf70a93e6c0 | [
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | fenwickt/azure-sdk-for-python | 5fc8f3383caa4e5e7a61f5b497a73635c4377935 | 0d1f644925d2472c72b195588508bd0efc4baf0c | refs/heads/master | 2023-03-31T08:02:37.322485 | 2021-03-29T07:48:41 | 2021-03-29T07:48:41 | 319,299,226 | 0 | 0 | MIT | 2020-12-07T11:31:48 | 2020-12-07T11:31:48 | null | UTF-8 | Python | false | false | 26,253 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RuleSetsOperations:
"""RuleSetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cdn.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_profile(
self,
resource_group_name: str,
profile_name: str,
**kwargs
) -> AsyncIterable["_models.RuleSetListResult"]:
"""Lists existing AzureFrontDoor rule sets within a profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RuleSetListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cdn.models.RuleSetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuleSetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_profile.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RuleSetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.AfdErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets'} # type: ignore
async def get(
self,
resource_group_name: str,
profile_name: str,
rule_set_name: str,
**kwargs
) -> "_models.RuleSet":
"""Gets an existing AzureFrontDoor rule set with the specified rule set name under the specified
subscription, resource group and profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param rule_set_name: Name of the rule set under the profile which is unique globally.
:type rule_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RuleSet, or the result of cls(response)
:rtype: ~azure.mgmt.cdn.models.RuleSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuleSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'ruleSetName': self._serialize.url("rule_set_name", rule_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.AfdErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RuleSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets/{ruleSetName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
profile_name: str,
rule_set_name: str,
rule_set: "_models.RuleSet",
**kwargs
) -> "_models.RuleSet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuleSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'ruleSetName': self._serialize.url("rule_set_name", rule_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(rule_set, 'RuleSet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.AfdErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RuleSet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RuleSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets/{ruleSetName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
profile_name: str,
rule_set_name: str,
rule_set: "_models.RuleSet",
**kwargs
) -> AsyncLROPoller["_models.RuleSet"]:
"""Creates a new rule set within the specified profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param rule_set_name: Name of the rule set under the profile which is unique globally.
:type rule_set_name: str
:param rule_set: RuleSet properties.
:type rule_set: ~azure.mgmt.cdn.models.RuleSet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RuleSet or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cdn.models.RuleSet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuleSet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
profile_name=profile_name,
rule_set_name=rule_set_name,
rule_set=rule_set,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RuleSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'ruleSetName': self._serialize.url("rule_set_name", rule_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets/{ruleSetName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
profile_name: str,
rule_set_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'ruleSetName': self._serialize.url("rule_set_name", rule_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.AfdErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets/{ruleSetName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
profile_name: str,
rule_set_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes an existing AzureFrontDoor rule set with the specified rule set name under the
specified subscription, resource group and profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param rule_set_name: Name of the rule set under the profile which is unique globally.
:type rule_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
profile_name=profile_name,
rule_set_name=rule_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'ruleSetName': self._serialize.url("rule_set_name", rule_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets/{ruleSetName}'} # type: ignore
def list_resource_usage(
self,
resource_group_name: str,
profile_name: str,
rule_set_name: str,
**kwargs
) -> AsyncIterable["_models.UsagesListResult"]:
"""Checks the quota and actual usage of endpoints under the given CDN profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param rule_set_name: Name of the rule set under the profile which is unique globally.
:type rule_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsagesListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cdn.models.UsagesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsagesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_resource_usage.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'ruleSetName': self._serialize.url("rule_set_name", rule_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('UsagesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.AfdErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_resource_usage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/ruleSets/{ruleSetName}/usages'} # type: ignore
| [
"[email protected]"
] | |
20437c1a84eb98ac587f50388c9768487f5ca702 | b26448cd43ac991c6277b588a1dcb6da53afe10a | /users/forms.py | 54880d817fdc01332a72a06f7e769d744f2d5c8f | [] | no_license | Xednom/e-wallet | 76da2658c34391c5d38e9d73ebce8f4ea80be87e | 97e83849296fa9678b6fdcb0737dfe09ee268a3f | refs/heads/master | 2023-01-29T04:27:51.833449 | 2019-10-16T07:34:25 | 2019-10-16T07:34:25 | 239,905,317 | 1 | 0 | null | 2023-01-04T14:20:08 | 2020-02-12T01:55:27 | Python | UTF-8 | Python | false | false | 763 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django_registration.forms import RegistrationForm
from .models import CustomUser
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = (
'first_name', 'last_name', 'email', 'address',
'country', 'state', 'zip_code'
)
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = (
'first_name', 'last_name', 'email', 'address',
'country', 'state', 'zip_code'
)
class CustomUserForm(RegistrationForm):
class Meta(RegistrationForm.Meta):
model = CustomUser | [
"[email protected]"
] | |
a722ff76b03c3ec84e50f9fb3054123fce8d77e9 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R1/benchmark/startQiskit_Class74.py | aad134186459476f44dc4c2f3adf503ded4612fb | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,187 | py | # qubit number=3
# total number=13
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_Class74.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"[email protected]"
] | |
485a56d5d4b5bbc4ce35a0d79cf74af9937dee85 | 64f365bf14a3c700ac3dab4a43a2bccd7ad0f222 | /setup.py | ea34a7eb8bee6edea5c9c57b41d1aaf016932e65 | [
"MIT"
] | permissive | russmain/leafmap | a4e8d081a5a3c973d2eb87616340dc44fd277fbd | 277edabfba56bfe133f507173e6005b5a7504234 | refs/heads/master | 2023-07-15T23:11:16.445456 | 2021-09-02T03:04:59 | 2021-09-02T03:04:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,729 | py | #!/usr/bin/env python
"""The setup script."""
import io
from os import path as op
from setuptools import setup, find_packages
with open("README.md") as readme_file:
readme = readme_file.read()
here = op.abspath(op.dirname(__file__))
# get the dependencies and installs
with io.open(op.join(here, "requirements.txt"), encoding="utf-8") as f:
all_reqs = f.read().split("\n")
install_requires = [x.strip() for x in all_reqs if "git+" not in x]
dependency_links = [x.strip().replace("git+", "") for x in all_reqs if "git+" not in x]
requirements = []
setup_requirements = []
test_requirements = []
setup(
author="Qiusheng Wu",
author_email="[email protected]",
python_requires=">=3.7",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
description="A Python package for geospatial analysis and interactive mapping in a Jupyter environment.",
install_requires=install_requires,
dependency_links=dependency_links,
license="MIT license",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
keywords="leafmap",
name="leafmap",
packages=find_packages(include=["leafmap", "leafmap.*"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/giswqs/leafmap",
version="0.4.1",
zip_safe=False,
)
| [
"[email protected]"
] | |
1a480f0e4af30873cf5daa67189f7085fb570119 | ee561aa019a80f621007f82bdb21fe6ed8b6278f | /devel/ros_control-melodic-devel/hardware_interface/catkin_generated/pkg.develspace.context.pc.py | 0b881c3ecc6378010075a3d5b58fcdccc75ddd34 | [] | no_license | allanwhledu/agv_edu_prj | 4fb5fbf14cf0a14edd57ee9bd87903dc25d4d4f2 | 643a8a96ca7027529332f25208350de78c07e33d | refs/heads/master | 2020-09-23T23:32:54.430035 | 2019-12-04T07:47:55 | 2019-12-04T07:47:55 | 225,613,426 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/sjtuwhl/ROBOTLAB_WS/src/ros_control-melodic-devel/hardware_interface/include".split(';') if "/home/sjtuwhl/ROBOTLAB_WS/src/ros_control-melodic-devel/hardware_interface/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hardware_interface"
PROJECT_SPACE_DIR = "/home/sjtuwhl/ROBOTLAB_WS/devel"
PROJECT_VERSION = "0.15.1"
| [
"[email protected]"
] | |
2843225ad98b83b0dfefd872c82ee2088e5571c4 | 0b16b44e4fc8c98c9ea3f9d4b8b470f4f62f918d | /Core/migrations/0005_auto_20201105_0936.py | bedc07c9a234fd96f3fc7bd257cbcec57776181d | [] | no_license | AthifSaheer/DipakNiroula-Django-Ecom | 342eece90211fe80c41ba72bf69a50e63c5ea901 | 94ead608919c5bb076387e26f396e6c38319433e | refs/heads/main | 2023-02-05T06:52:24.204206 | 2020-12-24T13:19:13 | 2020-12-24T13:19:13 | 324,160,212 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | # Generated by Django 2.2.14 on 2020-11-05 04:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Core', '0004_auto_20201104_1147'),
]
operations = [
migrations.AlterField(
model_name='order',
name='payment_method',
field=models.CharField(choices=[('Cash On Delivery ', 'Cash On Delivery '), ('Khalti ', 'Khalti '), ('Esewa ', 'Esewa ')], default='Khalti', max_length=20),
),
]
| [
"[email protected]"
] | |
a56c8b2eeff6a702bb2f1dca4925a23f4f0d3ad8 | 4c61c2ca62ab84c240664cb8fad6535b282b95f7 | /python/lsst/sims/skybrightness_pre/SkyModelPre.py | e6bddf7216336bd41d5aea1de858fc44dcbf38b4 | [] | no_license | andrewbheyer/sims_skybrightness_pre | 40b864ab35df28ef4a5ebaf7100a3c7460109401 | 558b32b4fdca57f79f7f5452813f3336f2c9afe9 | refs/heads/master | 2021-05-16T12:41:26.870222 | 2017-08-30T20:55:11 | 2017-08-30T20:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,123 | py | from builtins import object
import numpy as np
import glob
import os
import healpy as hp
from lsst.utils import getPackageDir
import warnings
from lsst.sims.utils import haversine
__all__ = ['SkyModelPre']
class SkyModelPre(object):
"""
Load pre-computed sky brighntess maps for the LSST site and use them to interpolate to
arbitrary dates.
"""
def __init__(self, data_path=None, opsimFields=False, preload=True, speedLoad=False, verbose=False):
self.info = None
self.sb = None
self.opsimFields = opsimFields
self.verbose = verbose
# Look in default location for .npz files to load
if 'SIMS_SKYBRIGHTNESS_DATA' in os.environ:
data_dir = os.environ['SIMS_SKYBRIGHTNESS_DATA']
else:
data_dir = os.path.join(getPackageDir('sims_skybrightness_pre'), 'data')
if data_path is None:
if opsimFields:
data_path = os.path.join(data_dir, 'opsimFields')
else:
data_path = os.path.join(data_dir, 'healpix')
self.files = glob.glob(os.path.join(data_path, '*.npz*'))
if len(self.files) == 0:
errmssg = 'Failed to find pre-computed .npz files. '
errmssg += 'Copy data from NCSA with sims_skybrightness_pre/data/data_down.sh \n'
errmssg += 'or build by running sims_skybrightness_pre/data/generate_sky.py'
raise ValueError(errmssg)
mjd_left = []
mjd_right = []
# Expect filenames of the form mjd1_mjd2.npz, e.g., 59632.155_59633.2.npz
big_files = glob.glob(os.path.join(data_path, '*.npz'))
if len(big_files) != 0:
self.files = big_files
for filename in big_files:
temp = os.path.split(filename)[-1].replace('.npz', '').split('_')
mjd_left.append(float(temp[0]))
mjd_right.append(float(temp[1]))
self.mjd_left = np.array(mjd_left)
self.mjd_right = np.array(mjd_right)
# Go ahead and load the first one by default
if speedLoad:
self._load_data(59580., filename=os.path.join(data_dir, 'healpix/small_example.npz_small'))
else:
if preload:
self._load_data(self.mjd_left[0])
else:
self.loaded_range = np.array([-1])
def _load_data(self, mjd, filename=None):
"""
Load up the .npz file to interpolate things
"""
if filename is None:
# Figure out which file to load.
file_indx = np.where((mjd >= self.mjd_left) & (mjd <= self.mjd_right))[0]
if np.size(file_indx) == 0:
raise ValueError('MJD = %f is out of range for the files found (%f-%f)' % (mjd,
self.mjd_left.min(),
self.mjd_right.max()))
filename = self.files[file_indx.min()]
self.loaded_range = np.array([self.mjd_left[file_indx], self.mjd_right[file_indx]])
else:
self.loaded_range = None
if self.verbose:
print('Loading file %s' % os.path.split(filename)[1])
# Add encoding kwarg to restore Python 2.7 generated files
data = np.load(filename, encoding='bytes')
self.info = data['dict_of_lists'][()]
self.sb = data['sky_brightness'][()]
self.header = data['header'][()]
data.close()
# Step to make sure keys are strings not bytes
all_dicts = [self.info, self.sb, self.header]
for selfDict in all_dicts:
for key in list(selfDict.keys()):
if type(key) != str:
selfDict[key.decode("utf-8")] = selfDict.pop(key)
self.filter_names = list(self.sb.keys())
if self.verbose:
print('%s loaded' % os.path.split(filename)[1])
if not self.opsimFields:
self.nside = hp.npix2nside(self.sb[self.filter_names[0]][0, :].size)
if self.loaded_range is None:
self.loaded_range = np.array([self.info['mjds'].min(), self.info['mjds'].max()])
def returnSunMoon(self, mjd):
"""
Return dictionary with the interpolated positions for sun and moon
Parameters
----------
mjd : float
Modified Julian Date to interpolate to
Returns
-------
sunMoon : dict
Dict with keys for the sun and moon RA and Dec and the
mooon-sun separation.
"""
keys = ['sunAlts', 'moonAlts', 'moonRAs', 'moonDecs', 'sunRAs',
'sunDecs', 'moonSunSep']
if (mjd < self.loaded_range.min() or (mjd > self.loaded_range.max())):
self._load_data(mjd)
left = np.searchsorted(self.info['mjds'], mjd)-1
right = left+1
# If we are out of bounds
if right >= self.info['mjds'].size:
right -= 1
baseline = 1.
elif left < 0:
left += 1
baseline = 1.
else:
baseline = self.info['mjds'][right] - self.info['mjds'][left]
wterm = (mjd - self.info['mjds'][left])/baseline
w1 = (1. - wterm)
w2 = wterm
result = {}
for key in keys:
if key[-1] == 's':
newkey = key[:-1]
else:
newkey = key
result[newkey] = self.info[key][left] * w1 + self.info[key][right] * w2
return result
def returnAirmass(self, mjd, maxAM=10., indx=None, badval=hp.UNSEEN):
"""
Parameters
----------
mjd : float
Modified Julian Date to interpolate to
indx : List of int(s) (None)
indices to interpolate the sky values at. Returns full sky if None. If the class was
instatiated with opsimFields, indx is the field ID, otherwise it is the healpix ID.
maxAM : float (10)
The maximum airmass to return, everything above this airmass will be set to badval
Returns
-------
airmass : np.array
Array of airmass values. If the MJD is between sunrise and sunset, all values are masked.
"""
if (mjd < self.loaded_range.min() or (mjd > self.loaded_range.max())):
self._load_data(mjd)
left = np.searchsorted(self.info['mjds'], mjd)-1
right = left+1
# If we are out of bounds
if right >= self.info['mjds'].size:
right -= 1
baseline = 1.
elif left < 0:
left += 1
baseline = 1.
else:
baseline = self.info['mjds'][right] - self.info['mjds'][left]
if indx is None:
result_size = self.sb[list(self.sb.keys())[0]][left, :].size
indx = np.arange(result_size)
else:
result_size = len(indx)
# Check if we are between sunrise/set
if baseline > self.header['timestep_max']:
warnings.warn('Requested MJD between sunrise and sunset, returning closest maps')
diff = np.abs(self.info['mjds'][left.max():right.max()+1]-mjd)
closest_indx = np.array([left, right])[np.where(diff == np.min(diff))]
airmass = self.info['airmass'][closest_indx, indx]
mask = np.where((self.info['airmass'][closest_indx, indx].ravel() < 1.) |
(self.info['airmass'][closest_indx, indx].ravel() > maxAM))
airmass = airmass.ravel()
else:
wterm = (mjd - self.info['mjds'][left])/baseline
w1 = (1. - wterm)
w2 = wterm
airmass = self.info['airmass'][left, indx] * w1 + self.info['airmass'][right, indx] * w2
mask = np.where((self.info['airmass'][left, indx] < 1.) |
(self.info['airmass'][left, indx] > maxAM) |
(self.info['airmass'][right, indx] < 1.) |
(self.info['airmass'][right, indx] > maxAM))
airmass[mask] = badval
return airmass
def returnMags(self, mjd, indx=None, airmass_mask=True, planet_mask=True,
moon_mask=True, zenith_mask=True, badval=hp.UNSEEN,
filters=['u', 'g', 'r', 'i', 'z', 'y'], extrapolate=False):
"""
Return a full sky map or individual pixels for the input mjd
Parameters
----------
mjd : float
Modified Julian Date to interpolate to
indx : List of int(s) (None)
indices to interpolate the sky values at. Returns full sky if None. If the class was
instatiated with opsimFields, indx is the field ID, otherwise it is the healpix ID.
airmass_mask : bool (True)
Set high (>2.5) airmass pixels to badval.
planet_mask : bool (True)
Set sky maps to badval near (2 degrees) bright planets.
moon_mask : bool (True)
Set sky maps near (10 degrees) the moon to badval.
zenith_mask : bool (True)
Set sky maps at high altitude (>86.5) to badval.
badval : float (-1.6375e30)
Mask value. Defaults to the healpy mask value.
filters : list
List of strings for the filters that should be returned.
extrapolate : bool (False)
In indx is set, extrapolate any masked pixels to be the same as the nearest non-masked
value from the full sky map.
Returns
-------
sbs : dict
A dictionary with filter names as keys and np.arrays as values which
hold the sky brightness maps in mag/sq arcsec.
"""
if (mjd < self.loaded_range.min() or (mjd > self.loaded_range.max())):
self._load_data(mjd)
mask_rules = {'airmass': airmass_mask, 'planet': planet_mask,
'moon': moon_mask, 'zenith': zenith_mask}
left = np.searchsorted(self.info['mjds'], mjd)-1
right = left+1
# Do full sky by default
if indx is None:
indx = np.arange(self.sb['r'].shape[1])
full_sky = True
else:
full_sky = False
# If we are out of bounds
if right >= self.info['mjds'].size:
right -= 1
baseline = 1.
elif left < 0:
left += 1
baseline = 1.
else:
baseline = self.info['mjds'][right] - self.info['mjds'][left]
# Check if we are between sunrise/set
if baseline > self.header['timestep_max']:
warnings.warn('Requested MJD between sunrise and sunset, returning closest maps')
diff = np.abs(self.info['mjds'][left.max():right.max()+1]-mjd)
closest_indx = np.array([left, right])[np.where(diff == np.min(diff))].min()
sbs = {}
for filter_name in filters:
sbs[filter_name] = self.sb[filter_name][closest_indx, indx]
for mask_name in mask_rules:
if mask_rules[mask_name]:
toMask = np.where(self.info[mask_name+'_masks'][closest_indx, indx])
sbs[filter_name][toMask] = badval
sbs[filter_name][np.isinf(sbs[filter_name])] = badval
sbs[filter_name][np.where(sbs[filter_name] == hp.UNSEEN)] = badval
else:
wterm = (mjd - self.info['mjds'][left])/baseline
w1 = (1. - wterm)
w2 = wterm
sbs = {}
for filter_name in filters:
sbs[filter_name] = self.sb[filter_name][left, indx] * w1 + \
self.sb[filter_name][right, indx] * w2
for mask_name in mask_rules:
if mask_rules[mask_name]:
toMask = np.where(self.info[mask_name+'_masks'][left, indx] |
self.info[mask_name+'_masks'][right, indx] |
np.isinf(sbs[filter_name]))
sbs[filter_name][toMask] = badval
sbs[filter_name][np.where(sbs[filter_name] == hp.UNSEEN)] = badval
sbs[filter_name][np.where(sbs[filter_name] == hp.UNSEEN)] = badval
# If requested a certain pixel(s), and want to extrapolate.
if (not full_sky) & extrapolate:
masked_pix = False
for filter_name in filters:
if (badval in sbs[filter_name]) | (True in np.isnan(sbs[filter_name])):
masked_pix = True
if masked_pix:
# We have pixels that are masked that we want reasonable values for
full_sky_sb = self.returnMags(mjd, airmass_mask=False, planet_mask=False, moon_mask=False,
zenith_mask=False, filters=filters)
good = np.where((full_sky_sb[filters[0]] != badval) & ~np.isnan(full_sky_sb[filters[0]]))[0]
ra_full = np.radians(self.header['ra'][good])
dec_full = np.radians(self.header['dec'][good])
for filtername in filters:
full_sky_sb[filtername] = full_sky_sb[filtername][good]
# Going to assume the masked pixels are the same in all filters
masked_indx = np.where((sbs[filters[0]].ravel() == badval) |
np.isnan(sbs[filters[0]].ravel()))[0]
for i, mi in enumerate(masked_indx):
# Note, this is going to be really slow for many pixels, should use a kdtree
dist = haversine(np.radians(self.header['ra'][indx][i]),
np.radians(self.header['dec'][indx][i]),
ra_full, dec_full)
closest = np.where(dist == dist.min())[0]
for filtername in filters:
sbs[filtername].ravel()[mi] = np.min(full_sky_sb[filtername][closest])
return sbs
| [
"[email protected]"
] | |
0cb6d6ce63e06611b90c62a58cf84c65f89759e2 | 3c2b5fd20c7372fccb97fa76deb0980a173b5991 | /PythonFullStack/000Basic/day06/02-文件的读写.py | 0c155ffb3e9c6c9d76f6d864f0c9700496908cc1 | [] | no_license | softwarefaith/PythonFullStack | 560cdc2c0c38831e8304751b8b2bf680cb2f23e5 | 292cc0a5eee3ed8eb8a8d5e14673226533d2651e | refs/heads/master | 2021-05-15T09:57:37.812869 | 2019-02-21T10:37:41 | 2019-02-21T10:37:41 | 108,229,662 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,715 | py | #文件的访问模式:
#1.r:只读,文件不存在会崩溃
#2.w:只写
#3.a:追加写入
#4.rb:以二进制方式读取文件数据:常用
#5.wb:以二进制方式写入文件数据:常用
#6:ab:以二进制方式增加文件数据:常用
#爬视频,图片数据,文本数据,音频数据
# r+ w+ a+ 支持读写
#rb+ wb+ ab+ 支持二进制方式读写操作
#打开文件使用open函数
#------------r模式(只读)-----------
# 如果没有此文件会崩溃
# file = open("1.txt","r",encoding="utf-8")
# #读取文件中所有的数据
# content = file.read()
# print(content)
# #必须关闭
# file.close()
#--------w模式----------------------
#提示:如果文件不存在,会创建一个文件并打开,
#encoding="utf-8"设置编码方式(mac.linux)
#GBK cp936
#提示:w模式:如果文件存在,那么会文件中,原有数据清空,在写入数据
# file = open("1.txt","w",encoding="utf-8")
#1.txt写入数据
#打开文件后多次写入数据,不会覆盖数据
# file.write("A")
# file.write("哈哈")
# file.write("说大事大所大所多")
# #查看当前的编码格式(cp936)
# result = file.encoding
# print(result)
# # 记住所有对于文件的操作,最后一步都是close
# file.close()
#a------------追加数据
#
# file = open("1.txt","a",encoding="utf-8")
# file.write("BBB")
# file.close()
#在python2里面是不支持中文:
#python3默认支持中文
#_*_ coding:utf-8
# print("啊哈哈")
#rb-----------以二进制方式读取数据
file = open("1.txt","rb")
#binary mode doesn't take an encoding argument
#如果是二进制方式不需要指定编码格式
#读取数据
#中文打印会出现\xe5 一个中文三个字节
# file_data = file.read()
# #解码的操作
# content = file_data.decode("utf-8")
# #打印的就是解码后的数据
# print(content)
# #不支持写入数据
# file.write("aaaa")
#
# file.close()
#wb--------------以二进制方式写入数据
#前面是w就会覆盖原来的数据
# file = open("1.txt","wb")
# content = "hello 哈哈"
# #content包装成二进制人间,对content进行二进制编码
# file_data =content.encode("utf-8")
# file.write(file_data)
# file.close()
#ab-------二进制方式追加数据
# #如果两种模式同时存在,下方代码不会执行
# file = open("1.txt","ab")
# content = "hello"
# #追加也必须是二进制人间
# file_data =content.encode("utf-8")
# file.write(file_data)
# #不可读数据
# file.close()
#r+-------------------读写
#为了兼容不同操作系统,只要没有看到b模式就可以使用encoding方式指定编码
#基本操作,很多的坑
#正则表达式
file = open("1.txt","r+",encoding="utf-8")
file.write("abc")
result = file.read()
print(result)
file.close()
| [
"[email protected]"
] | |
123a0cd3e2885c33639ca783c268bbee0e3fa695 | bc63598033c6ca4ac7f257897aec0b23eaff60d1 | /test/mitmproxy/test_proxy_config.py | e2c39846c7e7b8d19edbed878fb14cf9b84d42ad | [
"MIT"
] | permissive | Scalr/mitmproxy | 4aee723aef2f34fa1209364b5b03cedff7d3f85e | a6c608e08595e95279713e51e2a346344bd290c0 | refs/heads/master | 2020-06-27T08:52:29.441895 | 2016-11-23T00:27:23 | 2016-11-23T00:27:23 | 74,527,489 | 0 | 2 | MIT | 2018-05-03T00:00:18 | 2016-11-23T01:10:39 | Python | UTF-8 | Python | false | false | 726 | py | from mitmproxy.test import tutils
from mitmproxy.proxy import config
def test_parse_server_spec():
tutils.raises(
"Invalid server specification", config.parse_server_spec, ""
)
assert config.parse_server_spec("http://foo.com:88") == (
"http", ("foo.com", 88)
)
assert config.parse_server_spec("http://foo.com") == (
"http", ("foo.com", 80)
)
assert config.parse_server_spec("https://foo.com") == (
"https", ("foo.com", 443)
)
tutils.raises(
"Invalid server specification",
config.parse_server_spec,
"foo.com"
)
tutils.raises(
"Invalid server specification",
config.parse_server_spec,
"http://"
)
| [
"[email protected]"
] | |
18c980d503bf6b4c69c1adfc9b18247782543587 | ac6e4102dfb49a4e49de0e2766feb6e80ab0b5c2 | /h1/models/storage_project_disk_update.py | db3461e12902a70bd45008c134567f0cb69ccd06 | [
"MIT"
] | permissive | hyperonecom/h1-client-python | df01f05ad295121e3dd391a3274c41e2f5b88e53 | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | refs/heads/master | 2023-04-05T01:51:31.637002 | 2021-03-29T00:05:41 | 2021-03-29T00:05:41 | 319,309,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,246 | py | # coding: utf-8
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from h1.configuration import Configuration
class StorageProjectDiskUpdate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
"""StorageProjectDiskUpdate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self.discriminator = None
if name is not None:
self.name = name
@property
def name(self):
"""Gets the name of this StorageProjectDiskUpdate. # noqa: E501
:return: The name of this StorageProjectDiskUpdate. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this StorageProjectDiskUpdate.
:param name: The name of this StorageProjectDiskUpdate. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StorageProjectDiskUpdate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, StorageProjectDiskUpdate):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
e3f94648f1b2d25797273b156ae51df153c72c27 | b90975e4d7acf7c9ad26ef5fc3e6247c95e2c540 | /installation_test.py | 73686a13ee12869e973416d273dd0707ec2ee9bb | [] | no_license | lfernandez55/tensorflow_pluralsight | 720de593a010d392d35b9da7263972148ec5076b | fc519c2154b90b40900df81fcdfd72f84d4eac22 | refs/heads/master | 2020-06-13T00:13:08.906189 | 2019-06-30T04:50:32 | 2019-06-30T04:50:32 | 194,470,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | import tensorflow as tf
sess = tf.Session()
#Verify we can print a string
hello = tf.constant("hello world from tensorflow")
print(sess.run(hello))
#Perform some simple math
a = tf.constant(20)
b = tf.constant(22)
print('a + b = {0}'.format(sess.run(a+b)))
| [
"[email protected]"
] | |
4be2e8189f05febeb17633e6c20fdd4ab01b805f | 268a6b7a1138dce434c6b7a54eb36cb4ae799ddd | /topo/custom/tests/test_delegate_forward.py | e01c1c60f6a5ea1c9407d803a176f66799f06906 | [
"BSD-2-Clause"
] | permissive | rubiruchi/fdeval | 2b0592853a684a8c5b87aeb363e4ccff61f47c0c | f6463c1c7549b8ac7fc39854e87c88d3cac858a0 | refs/heads/master | 2022-11-08T17:56:34.188225 | 2020-06-23T16:46:13 | 2020-06-23T16:46:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | from topo.custom.topo import Topo
from . import testutil as testutil
import math
from core.engine import Engine
class TestEngine(Engine):
def on_EVSwitchStats(self, switch, ev):
#
es = self.ctx.topo.get_switch_by_label('ES')
if switch.label == 'DS':
if math.isclose(ev.tick, 3):
print("@%.0f add" % ev.tick)
for id, flow in self.active_flows.items():
self.add_delegation(ev.tick, flow, switch, es)
super().on_EVSwitchStats(switch, ev)
def on_EVSwitchNewFlow(self, switch, ev):
# forward flow on next switch in path
super().on_EVSwitchNewFlow(switch, ev)
class MyTopo( Topo ):
"delegate to a switch that is used again on the path afterwards, i.e., ..->ds->es->ds->es->s2->... "
def __init__( self, ctx ):
propagation_delay = float(ctx.config.get("topo.propagation_delay", 0.5))
processing_delay = float(ctx.config.get("topo.processing_delay", 0))
# Initialize
Topo.__init__( self )
ds = self.addSwitch( 'DS', x=2, y=1, engine=TestEngine(ctx, processing_delay=processing_delay))
ds2 = self.addSwitch( 'DS2', x=2, y=1, engine=TestEngine(ctx, processing_delay=processing_delay))
es = self.addSwitch( 'ES', x=1, y=1, engine=TestEngine(ctx, processing_delay=processing_delay))
h1 = self.addHost( 'h1', x=4, y=1)
h2 = self.addHost( 'h2',x=4, y=3)
self.addLink( ds, es, capacity=1000, propagation_delay=propagation_delay )
self.addLink( ds2, es, capacity=1000, propagation_delay=propagation_delay )
self.addLink( h1, ds, capacity=1000, propagation_delay=propagation_delay )
self.addLink( h2, ds2, capacity=1000, propagation_delay=propagation_delay )
# add traffic
self.addTraffic(
dict(fg_class='Single', fg_label="f0", fg_start=0, fg_demand=100, fg_duration=10,
fg_fixed_path=['h1', 'DS', 'ES', 'DS2', 'h2']))
# call on_done if simulation is finished
ctx.on_test_finished = self.on_done
def on_done(self, ctx):
testutil.print_summary(ctx)
print(testutil.get_flow_timings(ctx))
errors = []
errors += testutil.verify_flow_timings(ctx, FLOW_TIMINGS)
return errors
#return []
def get_topo(ctx):
return MyTopo(ctx)
topos = { 'MyTopo': ( lambda: MyTopo() ) }
FLOW_TIMINGS = """{"DS->ES": {"f0": [0.5, 12.0]}, "DS->h1": {}, "DS2->ES": {},
"DS2->h2": {"f0": [1.5, 13.0]}, "ES->DS": {"f0": [3, 11.5]},
"ES->DS2": {"f0": [1.0, 12.5]}, "h1->DS": {"f0": [0, 10.5]}, "h2->DS2": {}}""" | [
"[email protected]"
] | |
593d31b488df95765e3a64530d9157de067998a2 | c8a38e65e71de888fc5b22fbd027bbaa0f3f6ef1 | /Python/142.py | 48db84b49b40e5429e83236336ce49f31599f810 | [] | no_license | skywhat/leetcode | e451a10cdab0026d884b8ed2b03e305b92a3ff0f | 6aaf58b1e1170a994affd6330d90b89aaaf582d9 | refs/heads/master | 2023-03-30T15:54:27.062372 | 2023-03-30T06:51:20 | 2023-03-30T06:51:20 | 90,644,891 | 82 | 27 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
slow = fast = head
while fast and fast.next:
slow, fast = slow.next, fast.next.next
if slow == fast:
while slow != head:
slow, head = slow.next, head.next
return head
return None
| [
"[email protected]"
] | |
417a9c86d6cf0e60446d13fbaa43104cd89c1a44 | b0f4b12ec6b14659b252f19776eb297366c9f330 | /代码/day3-5/A.FileDemo.py | 1bfc45d54864ee1dccb3618fe339ea82646998b0 | [] | no_license | vothin/code | a77259db4a3c4630bed293f979a49b676a1bd7c4 | d2b7819fd3687e0a011988fefab3e6fd70bb014a | refs/heads/master | 2020-08-31T15:48:28.155535 | 2020-01-09T08:21:57 | 2020-01-09T08:21:57 | 218,725,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,389 | py | '''
open
r 以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。
rb 以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。这是默认模式。
r+ 打开一个文件用于读写。文件指针将会放在文件的开头。
rb+ 以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。
w 打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb 以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
w+ 打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb+ 以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
a 打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
ab 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
a+ 打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。
ab+ 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。
'''
'''
函数语法
open(name[, mode[, buffering]]) 文件句柄 = open('文件路径', '模式',编码方式)。
name : 一个包含了你要访问的文件名称的字符串值。
mode : mode 决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。
buffering : 如果 buffering 的值被设为 0,就不会有寄存。如果 buffering 的值取 1,访问文件时会寄存行。
如果将 buffering 的值设为大于 1 的整数,表明了这就是的寄存区的缓冲大小。如果取负值,寄存区的缓冲大小则为系统默认。
示例: f = open('test.txt',"r")
file 对象方法
file.read([size]) size未指定则返回整个文件,如果文件大小>2倍内存则有问题.f.read()读到文件尾时返回""(空字串)
file.readline() 返回一行
file.readlines([size]) 返回包含size行的列表,size 未指定则返回全部行
for line in f: print line #通过迭代器访问
f.write("hello\n") #如果要写入字符串以外的数据,先将他转换为字符串.
f.tell() 返回一个整数,表示当前文件指针的位置(就是到文件头的比特数).
f.seek(偏移量,[起始位置]) 用来移动文件指针.
f.close() 打开文件之后一定要关闭,否则文件内容会丢失:
'''
| [
"[email protected]"
] | |
716cc2c81ec577e777a6a3cfc47ba680a6cadfc7 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_plectrums.py | 0d3dda010f944bbbef6409f78aeac191753a0607 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
from xai.brain.wordbase.nouns._plectrum import _PLECTRUM
#calss header
class _PLECTRUMS(_PLECTRUM, ):
def __init__(self,):
_PLECTRUM.__init__(self)
self.name = "PLECTRUMS"
self.specie = 'nouns'
self.basic = "plectrum"
self.jsondata = {}
| [
"[email protected]"
] | |
7815604a4051af01935361e7b7859ccd85e3e71b | ea393959886a5cd13da4539d634f2ca0bbcd06a2 | /283.py | b2b4f2cad4536764cd733094eaf98757b705c7b1 | [] | no_license | zhangchizju2012/LeetCode | f605f35b82f16282559af71e4e61ec2629a90ebc | 0c4c38849309124121b03cc0b4bf39071b5d1c8c | refs/heads/master | 2020-04-05T12:12:14.810639 | 2018-08-09T10:24:52 | 2018-08-09T10:24:52 | 81,021,830 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 14 00:24:22 2017
@author: zhangchi
"""
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
indexList = []
for index, item in enumerate(nums):
if item == 0:
indexList.append(index)
indexList.append(len(nums)) # 相当于最后也有个0,配合一下后面的处理
count = 0
for i in xrange(len(indexList)-1):
nums[indexList[i]-count:indexList[i+1]-count-1] = nums[indexList[i]+1:indexList[i+1]]
count += 1 #每次往后挪动一次,相当于每次有个0的位置被空出来了,所以前面要减掉count,且count每次加一
for i in xrange(indexList[-1]-count,len(nums)):
nums[i] = 0
#return nums
s = Solution()
print s.moveZeroes([]) | [
"[email protected]"
] | |
900bbc907bb10a759b672147517f8448c7ef5e21 | ef54d37f8a3303013ca7469871a320d303957ed7 | /robo4.2/fusion/tests/wpst_crm/feature_tests/C7000/Supershaw_TAA_FA_DA/validate.py | d1b1f709416576fdb725e7dd9fe4c24c42439338 | [] | no_license | richa92/Jenkin_Regression_Testing | d18badfcf16bda682dfe7bcbbd66f54a9a27a58d | 24a74926170cbdfafa47e972644e2fe5b627d8ff | refs/heads/master | 2020-07-12T10:01:59.099137 | 2019-08-27T12:14:53 | 2019-08-27T12:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py |
'''
This module contains the code to get the IP's of the
ethernet networks. Using the IP's it can login to the
server and execute the diskspd commands to start the traffic.
Diskspd results or ouput will be redirected to the log file
'''
import paramiko
import os
import time
import re
import threading
import Queue
def execute_diskspd(ip, username, passwd, diskspd_cmd):
'''
Execute the diskSPD tool Command
'''
try:
single_cmd = "psexec \\\\" + ip + " -u " + username + " -p " + passwd + " " +\
diskspd_cmd
output = os.system(single_cmd)
return (output)
except Exception as e:
return (e)
def validate_windows_lun_count(ip, username, passwd, diskspd_cmd):
output = execute_diskspd(ip,
username, passwd, diskspd_cmd)
with open("C:\\WINDOWSLUN.txt") as f:
lines = f.readlines()
print lines
count = 0
for i in lines:
if "3PARdata" in i:
count = count + 1
print count
return count
| [
"[email protected]"
] | |
7d0fa9b4b4f4b3082220c3ee9b07b146fdbbd204 | 9cbd088a0f7288acee3c1d736ef85e516b86d8fe | /twitter_tools.py | f3b7643e42816d3d937696a696eca0c0ddfeb875 | [] | no_license | fjccoin/twitbots | 91ba75a8123c9c21cf20d3e235075f5e7b0ebd5d | 513a6df705034aeb61b0d7ea2fccfe6c722160d9 | refs/heads/master | 2020-04-08T02:42:31.602419 | 2015-11-12T08:38:01 | 2015-11-12T08:38:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,719 | py | import nltk
from collections import OrderedDict, defaultdict
import re
import requests
from bs4 import BeautifulSoup
from urlparse import urlparse
SLEEP_COMMAND = ' go to sleep'
WAKE_COMMAND = ' wake up'
QUIET_COMMAND = ' no reply'
LOUD_COMMAND = ' reply on'
ADMIN_ID = 21455761
def filter_tweet(tweet, userid, botname, friends=None):
skip = False
sleep = False
wake = False
debug = False
end_debug = False
# filter RTs
if tweet.get('retweet_count') > 0:
skip = True
# only reply to target user
sender = None
""" tweets to reply to:
if sender is owner and not a reply
if sender if owner's friend and mentions my name
"""
try:
sender = tweet.get('user').get('id')
if sender not in [userid, ADMIN_ID] + friends:
skip = True
except:
sender = None
skip = True
t = tweet.get('text')
if not t:
skip = True
else:
t = t.lower()
if t[:3] == "rt ":
skip = True
if sender in [userid, ADMIN_ID]:
if SLEEP_COMMAND in t:
sleep = True
elif WAKE_COMMAND in t:
wake = True
if QUIET_COMMAND in t:
debug = True
elif LOUD_COMMAND in t:
end_debug = True
if tweet.get('in_reply_to_status_id') and botname not in t:
skip = True
if t[0] == "@" and botname not in t:
skip = True
elif botname not in t:
skip = True
elif tweet.get('in_reply_to_status_id'):
skip = True
return skip, sleep, wake, debug, end_debug
def word_count(sentence, words):
s = nltk.word_tokenize(sentence)
return len(set(s) & set(words))
def ok_tweet(c, minlen, maxlen):
if c.endswith(':') or c.endswith(','):
return False
if len(c) > maxlen or len(c) < minlen:
return False
else:
return True
GARBAGE = [",", "--", "\'s", ".", "``","n\'t","\'\'",")","(","%","!","\'","?","percent",":"]
# semantic tools
def remove_stopwords(documents, sents=False):
texts = []
for d in documents:
if sents:
doc = d #d[0]+d[1]
else:
doc = documents[d]
doc = clean_str(doc)
tokens = nltk.word_tokenize(doc.lower())
tokens = [t for t in tokens if t not in nltk.corpus.stopwords.words('english')]
tokens = [t for t in tokens if t not in GARBAGE]
texts.append(tokens)
return texts
def clean_str(text):
# remove words that start with @
# remove urls
y = " ".join(filter(lambda x:(x[0]!='@' and x[:4]!='http'), text.split()))
return re.sub('[#$*|]', '', y)
def remove_infreq(inputs, minfreq):
frequency = defaultdict(int)
for text in inputs:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > minfreq]
for text in inputs]
return texts
NEWS_DOMAINS = "thenewyorktimes moneybeat"
""" deal with urls in tweets """
def pull_headlines(tweet):
ent = tweet.get('entities')
urls = ent.get('urls')
t = ""
if urls:
for u in urls:
try:
url = u.get('expanded_url')
r = requests.get(url)
headlines = BeautifulSoup(r.content).find('title')
if not headlines:
headlines = BeautifulSoup(r.content).find('h1')
# remove domain
domain = '{uri.netloc}'.format(uri=urlparse(url)) + NEWS_DOMAINS
hwords = [h for h in headlines.getText().split() if h.lower() not in domain]
t = "%s %s" % (t,' '.join(hwords))
except:
continue
# also pull quoted tweets
if tweet.get('is_quote_status'):
try:
quote = tweet.get('quoted_status').get('text')
except:
quote = ''
t+=quote
return t
""" break and chunk tweets """
def send_tweet(api, tweet, id_orig=None, username=None):
twit = api.request('statuses/update', {'status': username + tweet, 'in_reply_to_status_id': id_orig})
# if too long, break it up
r = twit.response.json()
if username:
maxlen = 139-len(username)
else:
maxlen = 139
if r.get('errors'):
tweets = break_tweet(tweet, maxlen)
id_str = id_orig
for rt in tweets:
t = api.request('statuses/update', {'status': username + rt, 'in_reply_to_status_id': id_str})
rt_resp = t.response.json()
if rt_resp.get('errors'):
continue
else:
id_str = rt_resp.get('id_str')
def chunks(l, n):
"""Yield successive n-sized chunks from l.
Chunks prioritize commas. after that, spaces
"""
q = []
total = 0
remainder = l
while len(remainder) > 0:
if len(remainder) <= n:
q.append(remainder[:idx])
break
x = remainder[:n]
idx = x.rfind(',')
if idx > 0:
if idx > 50:
q.append(remainder[:idx+1])
remainder = remainder[idx+1:]
continue
idx = x.rfind(' ')
q.append(remainder[:idx])
remainder = remainder[idx+1:]
#for i in xrange(0, len(l), n):
# yield l[i:i+n]
return q
def break_tweet(tweet, n):
# first break into sentences
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
rtweets = sent_detector.tokenize(tweet.strip())
for idx, rt in enumerate(rtweets):
if len(rt) > n:
clauses = rt.split('\n')
for cdx, c in enumerate(clauses):
d = '?'
commas = [e+d for e in c.split(d) if e != '']
commas[-1] = commas[-1][:-1]
clauses[cdx:cdx+len(commas)] = commas
rtweets[idx:idx+len(clauses)] = clauses
for idx, rt in enumerate(rtweets):
if len(rt) > n:
chunkt = chunks(rt, n)
rtweets[idx:idx+len(chunkt)] = chunkt
return rtweets
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
def create_tweet(text, username):
""" create a tweet from mult long sentences
This process will vary by user.
"""
# up to 2 tweets
#maxlen = 263-2*len(username)
maxlen = 139-len(username)
for t in text:
if ok_tweet(t, 40, maxlen):
return t
# go through again and break them up
else:
sents = sent_detector.tokenize(t)
for s in sents:
if ok_tweet(s, 40, maxlen):
return s
return None | [
"[email protected]"
] | |
16f5f3e683d884969d7b2a96646d43ae6d346d91 | 91b2fb1fb6df216f2e365c3366bab66a567fc70d | /Week06/每日一题/857. 雇佣 K 名工人的最低成本.py | a439d49b3ad77a70ab1c5a3a7846aa901ac77d1d | [] | no_license | hrz123/algorithm010 | d17aee642f03f607a7984beb099eec18f2de1c8e | 817911d4282d2e226518b3533dff28282a91b3d4 | refs/heads/master | 2022-12-20T14:09:26.365781 | 2020-10-11T04:15:57 | 2020-10-11T04:15:57 | 270,178,423 | 1 | 0 | null | 2020-06-07T03:21:09 | 2020-06-07T03:21:09 | null | UTF-8 | Python | false | false | 5,322 | py | # 857. 雇佣 K 名工人的最低成本.py
import heapq
from typing import List
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
v = list(zip(quality, wage))
v.sort(key=lambda t: t[1] / t[0])
priority_queue = []
ans = float('inf')
total = 0
for q, w in v:
total += q
heapq.heappush(priority_queue, -q)
if len(priority_queue) > K:
total += heapq.heappop(priority_queue)
if len(priority_queue) == K:
ans = min(ans, total * w / q)
return ans
# 给工资的钱取决于两点,与最大的工资质量比成正比,这些人的质量总和成正比
# 我们要同时减小这两个元素
# 我们沿着工资质量比,和这些人总体的质量这条曲线的边界,找最小值
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
v = list(zip(quality, wage))
v.sort(key=lambda e: e[1] / e[0])
heap = []
res = float('inf')
_sum_q = 0
for q, w in v:
_sum_q += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, _sum_q * w / q)
_sum_q += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(quality, wage))
zv.sort(key=lambda x: x[1] / x[0])
heap = []
res = float('inf')
q_sum = 0
for q, w in zv:
q_sum += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(quality, wage))
zv.sort(key=lambda x: x[1] / x[0])
heap = []
q_sum = 0
res = float('inf')
for q, w in zv:
q_sum += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(quality, wage))
zv.sort(key=lambda x: x[1] / x[0])
heap = []
res = float('inf')
q_sum = 0
for q, w in zv:
q_sum += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(wage, quality))
zv.sort(key=lambda x: x[0] / x[1])
heap = []
res = float('inf')
qs = 0
for w, q in zv:
qs += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, w / q * qs)
qp = -heapq.heappop(heap)
qs -= qp
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(wage, quality))
zv.sort(key=lambda x: x[0] / x[1])
heap = []
res = float('inf')
qs = 0
for w, q in zv:
qs += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, w / q * qs)
qp = -heapq.heappop(heap)
qs -= qp
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = [*zip(quality, wage)]
zv.sort(key=lambda x: x[1] / x[0])
heap = []
q_sum = 0
res = float('inf')
for q, w in zv:
heapq.heappush(heap, -q)
q_sum += q
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = [*zip(quality, wage)]
zv.sort(key=lambda x: x[1] / x[0])
q_sum = 0
heap = []
res = float('inf')
for q, w in zv:
q_sum += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
def main():
sol = Solution()
quality = [10, 20, 5]
wage = [70, 50, 30]
K = 2
res = sol.mincostToHireWorkers(quality, wage, K)
print(res)
quality = [3, 1, 10, 10, 1]
wage = [4, 8, 2, 2, 7]
K = 3
res = sol.mincostToHireWorkers(quality, wage, K)
print(res)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1e722c8b3d71456db9c90dd4ee5c9bde1a02f8c7 | 7dfb5942ae4721b7084bde958d632dd90096328a | /function_generator/error_models.py | b75cd3ae00e6daae1112f1a68f604e5b2ace591f | [
"Apache-2.0"
] | permissive | blackwer/function_generator | f392ae0041f56d235a959ce3e54c1f865baf3cca | 91025e67a2c64009f0384ee35466bb04f0819fce | refs/heads/master | 2021-11-26T03:20:49.104389 | 2021-11-10T14:23:34 | 2021-11-10T14:23:34 | 219,051,758 | 9 | 2 | Apache-2.0 | 2021-11-09T13:34:12 | 2019-11-01T19:42:48 | C++ | UTF-8 | Python | false | false | 291 | py | import numpy as np
def standard_error_model(coefs, f):
return np.abs(coefs[-2:]).max()/max(1, np.abs(coefs[0]))
def relative_error_model(coefs, f):
return np.abs(coefs[-2:]).max()/np.abs(coefs[0])
def new_error_model(coefs, f):
return np.abs(coefs[-2:]).max()/np.abs(f).min()
| [
"[email protected]"
] | |
52a4d27d2d45abfa176ad4c8edd1e8e1b6e7298c | 1b126876948b3d05f89e058d4642405f192fb858 | /src/strava_api/Client.py | ff70413ffc370f22346a23c172543126be8f72e8 | [
"MIT"
] | permissive | yknot/strava_api | 6ecc972132156432cdc4e19ffe23fd5045fa765a | b31080b8718a6c26399cfc7c36b77f36a2bed1d3 | refs/heads/master | 2023-05-25T04:51:02.822053 | 2020-07-18T04:44:35 | 2020-07-18T04:44:35 | 279,205,963 | 0 | 0 | MIT | 2023-05-23T00:04:21 | 2020-07-13T04:01:33 | Python | UTF-8 | Python | false | false | 1,091 | py | """Main module."""
import requests
from .Athlete import Athlete
class Client:
"""Class to manage your Strava API Client"""
def __init__(
self, client_id: str, client_secret: str, auth_token: str, refresh_token: str
) -> None:
"""initialize client with application attributes"""
self.client_id = client_id
self.client_secret = client_secret
self.auth_token = auth_token
self.refresh_token = refresh_token
# create variables
self.athlete = None
def set_athlete(self, auth_code: str) -> None:
try:
response = requests.post(
url="https://www.strava.com/oauth/token",
params={
"client_id": self.client_id,
"client_secret": self.client_secret,
"code": auth_code,
"grant_type": "authorization_code",
},
)
self.athlete = Athlete(response.json())
except requests.exceptions.RequestException:
print("HTTP Request failed")
| [
"[email protected]"
] | |
b33759539b2bc335df52bacedf3a8424c3ec86c0 | c8da3539397dbd49388719fb6d8720db61e859a7 | /catkin_ws/build/hector_slam/hector_geotiff_plugins/catkin_generated/pkg.develspace.context.pc.py | b98873d668c4614483b338c03fe900e4a959193b | [] | no_license | pinkpinkheart/ROS | a465c9e967cd1c71da7648a62d1cc8af342b70df | bd91772e24b72d466a90d2dd65f54be4be49ce99 | refs/heads/master | 2023-03-12T17:55:40.650415 | 2021-03-03T09:20:00 | 2021-03-03T09:20:00 | 344,137,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_geotiff_plugins"
PROJECT_SPACE_DIR = "/home/cy/workspace/ROS/catkin_ws/devel"
PROJECT_VERSION = "0.3.5"
| [
"[email protected]"
] | |
9e01ee06ccb0d0c3f6fcbb90b6af174e4d295b4a | 96086ae5e7bfa1e40159f919269a90c83e472326 | /opengever/usermigration/plone_tasks.py | 121756f0302306a726785ba83d2b3607d1afb842 | [] | no_license | lukasgraf/opengever.core | 6fc313717fbec3692354e56c2c3293789076a389 | a15c4ff8e0d5494906d7de46a43e3427c8d2d49f | refs/heads/master | 2020-12-01T11:38:46.721555 | 2018-06-18T10:13:09 | 2018-06-18T10:13:09 | 57,871,187 | 0 | 0 | null | 2016-05-02T06:59:58 | 2016-05-02T06:59:58 | null | UTF-8 | Python | false | false | 6,253 | py | """
Migrate user IDs in Plone tasks (issuers, responsibles, responses)
"""
from opengever.ogds.base.utils import ogds_service
from opengever.task.adapters import IResponseContainer
from opengever.task.task import ITask
from opengever.usermigration.exceptions import UserMigrationException
from plone import api
import logging
logger = logging.getLogger('opengever.usermigration')
FIELDS_TO_CHECK = ('responsible', 'issuer')
class PloneTasksMigrator(object):
"""This migrator changes the `issuer` and `responsible` fields on
Plone tasks, as well as updating responses on tasks as needed.
It does not however fix local roles assigned to Plone tasks - these can
be fixed using the "local roles" migration in ftw.usermigration.
"""
def __init__(self, portal, principal_mapping, mode='move', strict=True):
self.portal = portal
self.principal_mapping = principal_mapping
if mode != 'move':
raise NotImplementedError(
"PloneTasksMigrator only supports 'move' mode")
self.mode = mode
self.strict = strict
# Keep track of tasks that need reindexing
self.to_reindex = set()
self.task_moves = {
'responsible': [],
'issuer': [],
}
self.response_moves = {
'creator': [],
'responsible_before': [],
'responsible_after': [],
}
def _verify_user(self, userid):
ogds_user = ogds_service().fetch_user(userid)
if ogds_user is None:
msg = "User '{}' not found in OGDS!".format(userid)
raise UserMigrationException(msg)
def _fix_responses(self, obj):
container = IResponseContainer(obj)
path = '/'.join(obj.getPhysicalPath())
for response_no, response in enumerate(container):
response_identifier = '%s - Response #%s' % (path, response_no)
# Fix response creator
creator = getattr(response, 'creator', '')
if creator in self.principal_mapping:
logger.info("Fixing 'creator' for %s" % response_identifier)
new_userid = self.principal_mapping[creator]
response.creator = new_userid
self.response_moves['creator'].append((
response_identifier, creator, new_userid))
for change in response.changes:
# Fix responsible [before|after]
if change.get('id') == 'responsible':
before = change.get('before', '')
if before in self.principal_mapping:
new_userid = self.principal_mapping[before]
change['before'] = unicode(new_userid)
# Need to flag changes to track mutations - see #3419
response.changes._p_changed = True
logger.info(
"Fixed 'responsible:before' for change in %s "
"(%s -> %s)" % (
response_identifier, before, new_userid))
self.response_moves['responsible_before'].append((
response_identifier, before, new_userid))
after = change.get('after', '')
if after in self.principal_mapping:
new_userid = self.principal_mapping[after]
change['after'] = unicode(new_userid)
# Need to flag changes to track mutations - see #3419
response.changes._p_changed = True
logger.info(
"Fixed 'responsible:after' for change in %s "
"(%s -> %s)" % (
response_identifier, after, new_userid))
self.response_moves['responsible_after'].append((
response_identifier, after, new_userid))
def _migrate_plone_task(self, obj):
task = ITask(obj)
for field_name in FIELDS_TO_CHECK:
# Check 'responsible' and 'issuer' fields
old_userid = getattr(task, field_name, None)
if old_userid in self.principal_mapping:
path = '/'.join(obj.getPhysicalPath())
logger.info('Fixing %r for %s' % (field_name, path))
new_userid = self.principal_mapping[old_userid]
setattr(task, field_name, new_userid)
self.to_reindex.add(obj)
self.task_moves[field_name].append(
(path, old_userid, new_userid))
def migrate(self):
catalog = api.portal.get_tool('portal_catalog')
# Verify all new users exist before doing anything
for old_userid, new_userid in self.principal_mapping.items():
self._verify_user(new_userid)
all_tasks = [b.getObject() for b in catalog.unrestrictedSearchResults(
object_provides=ITask.__identifier__)]
for obj in all_tasks:
self._migrate_plone_task(obj)
self._fix_responses(obj)
for obj in self.to_reindex:
# Reindex 'responsible' and 'issuer' for changed objects.
logger.info('Reindexing %s' % '/'.join(obj.getPhysicalPath()))
obj.reindexObject(idxs=FIELDS_TO_CHECK)
results = {
'task_issuers': {
'moved': self.task_moves['issuer'],
'copied': [],
'deleted': []},
'task_responsibles': {
'moved': self.task_moves['responsible'],
'copied': [],
'deleted': []},
'response_creators': {
'moved': self.response_moves['creator'],
'copied': [],
'deleted': []},
'response_responsible_before': {
'moved': self.response_moves['responsible_before'],
'copied': [],
'deleted': []},
'response_responsible_after': {
'moved': self.response_moves['responsible_after'],
'copied': [],
'deleted': []},
}
return results
| [
"[email protected]"
] | |
d89d76b57a914617374ae2be28918b6019c91b82 | 2cb07ae51d1de3e8bdff12e5628e7d142a98d970 | /Aula3/Problem15_12_4.py | 3454f557c9f57d8e47ebee3ce6450c7593be0a3e | [] | no_license | juanfdg/JuanFreireCES22 | e7c40a11584a86e1f81520d9da0bbdd58ea48e02 | 4d80b32163ea6d3f4c5f35375969a748022be438 | refs/heads/master | 2021-04-27T00:50:48.754467 | 2018-07-03T03:29:36 | 2018-07-03T03:29:36 | 122,661,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | class Point():
def __init__(self, x, y):
self.x = x
self.y = y
# Method wont work when other_point.x - self.x = 0
def get_line_to(self, other_point):
slope = (other_point.y-self.y)/(other_point.x-self.x)
linear_coef = self.y - slope*self.x
return (slope, linear_coef)
print(Point(4, 11).get_line_to(Point(6, 15))) | [
"--global"
] | --global |
8d6cca91d5489b3dabcf10d8c98523f7f3c593f8 | 9924e0dc6e0e8c8665508a218636f391451a153f | /Extras/use_flacco.py | 2e8dfe4b9cb62fa2b2d599de9da641448cd1f9e8 | [] | no_license | ai-se/ExploratoryLandscapeAnalysis | b531d374221397ed91f43eeff00217aa85797881 | c338fe93bb11881d25b6000853ca7ac0be69e212 | refs/heads/master | 2020-07-13T12:52:04.601453 | 2016-09-23T21:21:08 | 2016-09-23T21:21:08 | 66,961,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,730 | py | from __future__ import division
import pyRserve
from os import listdir
import pandas as pd
from random import shuffle
def df_to_list_str(df):
columns = df.columns.tolist()
list = []
for column in columns:
list.extend(df[column].tolist())
result_str = ""
for i, l in enumerate(list):
result_str += str(l)
if i<len(list)-1: result_str += ","
return result_str
def get_ela_features(independent, dependent):
# rcmd = pyRserve.connect(host='localhost', port=6311)
# print(rcmd.eval('rnorm(100)'))
features = {}
i_ncols = len(independent.columns)
str_indep = "matrix(c(" + df_to_list_str(independent) + "), ncol=" + str(i_ncols) + ")"
str_dep = "matrix(c(" + df_to_list_str(dependent) + "), ncol=" + str(1) + ")"
assert(len(independent) == len(dependent)), "sanity check failed"
conn = pyRserve.connect(host='localhost', port=6311)
conn.voidEval("library('flacco')")
conn.voidEval("X <- " + str_indep)
conn.voidEval("y<- " + str_dep)
conn.voidEval("feat.object = createFeatureObject(X = X, y = y, blocks = 3)")
fs1 = conn.r("calculateFeatureSet(feat.object, set = 'ela_distr')")
for name, value in zip(fs1.keys, fs1.values):
features[name] = value
# fs2 = conn.r("calculateFeatureSet(feat.object, set = 'ela_level')")
# for name, value in zip(fs2.keys, fs2.values):
# features[name] = value
# fs3 = conn.r("calculateFeatureSet(feat.object, set = 'ela_meta')")
# for name, value in zip(fs3.keys, fs3.values):
# features[name] = value
# fs4 = conn.r("calculateFeatureSet(feat.object, set = 'cm_grad')")
# for name, value in zip(fs4.keys, fs4.values):
# features[name] = value
return features
if __name__ == "__main__":
files = ["../FeatureModels/" + f for f in listdir("../FeatureModels") if ".csv" in f]
for filename in ["../FeatureModels/BerkeleyDB.csv"]:
contents = pd.read_csv(filename)
independent_columns = [c for c in contents.columns if "$<" not in c]
dependent_column = [c for c in contents.columns if "$<" in c]
independents = contents[independent_columns]
raw_dependents = contents[dependent_column]
dependents = (raw_dependents - raw_dependents.mean()) / (raw_dependents.max() - raw_dependents.min())
indexes = range(len(contents))
shuffle(indexes)
n = 100#min(n, int(len(contents) * 0.1))
samples = indexes[:n]
independent_values = independents[independents.index.isin(samples)]
dependent_values = dependents[dependents.index.isin(samples)]
print filename
print get_ela_features(independent_values, dependent_values)
exit() | [
"[email protected]"
] | |
26c2f5e55d19a42e4299bc3c03c1aa8d472539d8 | 38a42a205eaa5a0a46989c95f0b01f7e04b96a9e | /uoft/CSC148H1F Intro to Comp Sci/@week3_stacks/@@Exercise3/stack_ex.py | 25de6d1577c709a79973a271d6b1427ee3ffe857 | [
"MIT"
] | permissive | Reginald-Lee/biji-ben | d24cd1189ca3e9ed7b30e5b20a40137e8d6d4039 | 37009dfdbef9a15c2851bcca2a4e029267e6a02d | refs/heads/master | 2023-05-06T23:06:49.819088 | 2020-06-10T12:07:47 | 2020-06-10T12:07:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | py | # Exercise 3: More Stack Exercises
#
# CSC148 Fall 2014, University of Toronto
# Instructor: David Liu
# ---------------------------------------------
# STUDENT INFORMATION
#
# List your information below, in format
# <full name>, <utorid>
# <Rui Qiu>, <999292509>
# ---------------------------------------------
from stack import Stack, EmptyStackError
class SmallStackError(Exception):
print("The stack has fewer than two elements.")
def reverse_top_two(stack):
""" (Stack) -> NoneType
Reverse the top two elements on stack.
Raise a SmallStackError if stack has fewer than two elements.
>>> stack = Stack()
>>> stack.push(1)
>>> stack.push(2)
>>> reverse_top_two(stack)
>>> stack.pop()
1
>>> stack.pop()
2
"""
try:
stack.is_empty() == False
except:
raise EmptyStackError
else:
try:
t1 = stack.pop()
t2 = stack.pop()
stack.push(t1)
stack.push(t2)
except:
raise SmallStackError
return stack
def reverse(stack):
""" (Stack) -> NoneType
Reverse all the elements of stack.
>>> stack = Stack()
>>> stack.push(1)
>>> stack.push(2)
>>> reverse(stack)
>>> stack.pop()
1
>>> stack.pop()
2
"""
temp = Stack()
temp2 = Stack()
while not stack.is_empty():
stuff = stack.pop()
temp.push(stuff)
while not temp.is_empty():
stuff = temp.pop()
temp2.push(stuff)
while not temp2.is_empty():
stuff = temp2.pop()
stack.push(stuff)
return stack | [
"[email protected]"
] | |
a8d4ea1ab28833bfd43a58cd9b108e03ae0b7c42 | 9d90b664ebbd11a57ee6156c528081551b98055b | /wsgi/local_data/brython_programs/tuple1.py | fb6bc882e1ee285aa89bedf32f13c2ec02f31f08 | [] | no_license | 2014cdag21/c21 | d4f85f91ba446feb6669a39903dda38c21e8b868 | faf4b354f7d1d4abec79c683d7d02055c6bab489 | refs/heads/master | 2020-06-03T17:54:16.144118 | 2014-06-20T09:29:02 | 2014-06-20T09:29:02 | 19,724,479 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | d = (11,12,13,'asdf',14,15.0)
# Note - tuples are immutable types
# Common operations:
# length of a typle
print(len(d))
# indexation (in Python it starts from zero)
print(d[0], d[1])
# slicing
print(d[0:2]) # equals to (11, 12)
print(d[2:-1]) # equals to (13, 'asdf', 14)
print(d[:2]) # same as d[0:2], equals to (11, 12)
print(d[3:]) # equals to ('asdf', 14, 15.0)
# contains
print((15 in d, 100 in d)) # returns (True, False) | [
"[email protected]"
] | |
0e8f422dbaf4ff83f83fc49dc9410897d3314dcd | 7e9daf6a2a3ebfb969e793f92afc0dc5f1c2fc35 | /cat_mouse.py | 940150c58ad59356e7f9220c3b08a3bfc16612a7 | [] | no_license | NARESHSWAMI199/5-Star-On-Hacker-Rank-Python | e43ce5cb3429d2a683c37e6f4ba6440d073d47c2 | 51f245d1d0966de21ddf861b22fe3379e7c8a0a7 | refs/heads/main | 2023-02-25T03:05:25.330205 | 2021-01-19T13:49:27 | 2021-01-19T13:49:27 | 325,296,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | quries_size = int(input())
maxmum = 0
for i in range(quries_size):
query = list(map(int,input().split()))
if query[0] > query[2]:
dist_of_a = query[0] - query[2]
else :
dist_of_a = query[2]- query[0]
if query[1] > query[2]:
dist_of_b = query[1] - query[2]
else :
dist_of_b = query[2]- query[1]
if dist_of_a < dist_of_b:
print("Cat A")
elif dist_of_b < dist_of_a:
print("Cat B")
else :
print("Mouse C")
| [
"[email protected]"
] | |
d4952e4625b9ebd20f0d0deb21cdd0ca66b480cf | faa0ce2a95da958be3bfb171cdff29eeb43c3eb6 | /py-exercises/JulieTestModule/characters/shadow.py | f71a4d7d759a9855c1f3ccbf67630318ea88332d | [] | no_license | julianapeace/digitalcrafts-exercises | 98fe4e20420c47cf9d92d16c45ac60dc35a49a6a | 98e6680138d55c5d093164a47da53e1ddb6d064c | refs/heads/master | 2021-08-30T04:17:09.997205 | 2017-12-16T00:22:22 | 2017-12-16T00:22:22 | 103,176,043 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | from characters.base import Character
class Shadow(Character):
def __init__(self, name = 'Shadow', health = 1, power = 5, armor = 0, evade = 0, coincount = 4):
super().__init__(name, health, power, armor, evade, coincount)
| [
"[email protected]"
] | |
f66c598f24bf258557c6b380eb6f1b14b1fa4d9a | 67a7c314fc99d9cd7a677fcb6bc2b6dfa20a9cff | /spambayes-1.0.4/utilities/dump_cdb.py | 49728d0958b67c26cdc52128cfdcf1d6f116874e | [
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | Xodarap/Eipi | 7ebbb9fd861fdb411c1e273ea5d2a088aa579930 | d30997a737912e38316c198531f7cb9c5693c313 | refs/heads/master | 2016-09-11T06:28:01.333832 | 2011-05-03T15:35:20 | 2011-05-03T15:35:20 | 1,367,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | #! /usr/bin/env python
RC_DIR = "~/.spambayes"
DB_FILE = RC_DIR + "/wordprobs.cdb"
import sys
import os
DB_FILE = os.path.expanduser(DB_FILE)
from spambayes.cdb import Cdb
def main():
if len(sys.argv) == 2:
db_file = sys.argv[1]
else:
db_file = os.path.expanduser(DB_FILE)
db = Cdb(open(db_file, 'rb'))
items = []
for k, v in db.iteritems():
items.append((float(v), k))
items.sort()
for v, k in items:
print k, v
if __name__ == "__main__":
main()
| [
"eipi@mybox.(none)"
] | eipi@mybox.(none) |
4cf0f265880518fe33637b3e56d940727ba2b525 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-ief/huaweicloudsdkief/v1/model/delete_app_version_response.py | 8ee4920d3d770b7d585ed8241983204f95e97477 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 2,447 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteAppVersionResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""DeleteAppVersionResponse
The model defined in huaweicloud sdk
"""
super(DeleteAppVersionResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteAppVersionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
0f5b526ae48d78b8406f0db682c36822b46482bf | 5cb8f3b4db461de2e84084a6f0992955c2ee10ce | /txtrader/rtx.py | 3fb95d5dffab7f0b5662f99bf368b1c63a40fdfa | [
"MIT"
] | permissive | dadaxiaoxiaobaba/txTrader | 5bf134210da839adf7b6c15f1de365e4cd80facd | 9ad2afd37c81c2408632b3b5f7dfa4749586e6a6 | refs/heads/master | 2020-12-30T10:12:24.467623 | 2017-07-11T19:44:07 | 2017-07-11T19:44:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,031 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
rtx.py
------
RealTick TWS API interface module
Copyright (c) 2015 Reliance Systems Inc. <[email protected]>
Licensed under the MIT license. See LICENSE for details.
"""
import sys
import mx.DateTime
import types
import datetime
from uuid import uuid1
import json
import time
from config import Config
DEFAULT_CALLBACK_TIMEOUT = 5
# allow disable of tick requests for testing
ENABLE_TICK_REQUESTS = True
DISCONNECT_SECONDS = 15
SHUTDOWN_ON_DISCONNECT = True
ADD_SYMBOL_TIMEOUT = 5
from twisted.python import log
from twisted.internet.protocol import Factory, Protocol
from twisted.internet import reactor, defer
from twisted.internet.task import LoopingCall
from twisted.web import server
from socket import gethostname
class API_Symbol():
def __init__(self, api, symbol, client_id, init_callback):
self.api = api
self.id = str(uuid1())
self.output = api.output
self.clients = set([client_id])
self.callback = init_callback
self.symbol = symbol
self.fullname = ''
self.bid = 0.0
self.bid_size = 0
self.ask = 0.0
self.ask_size = 0
self.last = 0.0
self.size = 0
self.volume = 0
self.close = 0.0
self.rawdata = ''
self.api.symbols[symbol] = self
self.last_quote = ''
self.output('API_Symbol %s %s created for client %s' %
(self, symbol, client_id))
self.output('Adding %s to watchlist' % self.symbol)
self.cxn = api.cxn_get('TA_SRV', 'LIVEQUOTE')
cb = API_Callback(self.api, self.cxn.id, 'init_symbol', RTX_LocalCallback(
self.api, self.init_handler), ADD_SYMBOL_TIMEOUT)
self.cxn.request('LIVEQUOTE', '*', "DISP_NAME='%s'" % symbol, cb)
def __str__(self):
return 'API_Symbol(%s bid=%s bidsize=%d ask=%s asksize=%d last=%s size=%d volume=%d close=%s clients=%s' % (self.symbol, self.bid, self.bid_size, self.ask, self.ask_size, self.last, self.size, self.volume, self.close, self.clients)
def __repr__(self):
return str(self)
def export(self):
return {
'symbol': self.symbol,
'bid': self.bid,
'bidsize': self.bid_size,
'ask': self.ask,
'asksize': self.ask_size,
'last': self.last,
'size': self.size,
'volume': self.volume,
'close': self.close,
'fullname': self.fullname
}
def add_client(self, client):
self.output('API_Symbol %s %s adding client %s' %
(self, self.symbol, client))
self.clients.add(client)
def del_client(self, client):
self.output('API_Symbol %s %s deleting client %s' %
(self, self.symbol, client))
self.clients.discard(client)
if not self.clients:
self.output('Removing %s from watchlist' % self.symbol)
# TODO: stop live updates of market data from RTX
def update_quote(self):
quote = 'quote.%s:%s %d %s %d' % (
self.symbol, self.bid, self.bid_size, self.ask, self.ask_size)
if quote != self.last_quote:
self.last_quote = quote
self.api.WriteAllClients(quote)
def update_trade(self):
self.api.WriteAllClients('trade.%s:%s %d %d' % (
self.symbol, self.last, self.size, self.volume))
def init_handler(self, data):
self.output('API_Symbol init: %s' % data)
self.rawdata = data
self.parse_fields(None, data[0])
if self.api.symbol_init(self):
self.cxn = self.api.cxn_get('TA_SRV', 'LIVEQUOTE')
self.cxn.advise('LIVEQUOTE', 'TRDPRC_1,TRDVOL_1,BID,BIDSIZE,ASK,ASKSIZE,ACVOL_1',
"DISP_NAME='%s'" % self.symbol, self.parse_fields)
def parse_fields(self, cxn, data):
trade_flag = False
quote_flag = False
if 'TRDPRC_1' in data.keys():
self.last = float(data['TRDPRC_1'])
trade_flag = True
if 'TRDVOL_1' in data.keys():
self.size = int(data['TRDVOL_1'])
trade_flag = True
if 'ACVOL_1' in data.keys():
self.volume = int(data['ACVOL_1'])
trade_flag = True
if 'BID' in data.keys():
self.bid = float(data['BID'])
quote_flag = True
if 'BIDSIZE' in data.keys():
self.bidsize = int(data['BIDSIZE'])
quote_flag = True
if 'ASK' in data.keys():
self.ask = float(data['ASK'])
quote_flag = True
if 'ASKSIZE' in data.keys():
self.asksize = int(data['ASKSIZE'])
quote_flag = True
if 'COMPANY_NAME' in data.keys():
self.fullname = data['COMPANY_NAME']
if 'HST_CLOSE' in data.keys():
self.close = float(data['HST_CLOSE'])
if quote_flag:
self.update_quote()
if trade_flag:
self.update_trade()
def update_handler(self, data):
self.output('API_Symbol update: %s' % data)
self.rawdata = data
class API_Callback():
def __init__(self, api, id, label, callable, timeout=0):
"""callable is stored and used to return results later"""
api.output('API_Callback.__init__() %s' % self)
self.api = api
self.id = id
self.label = label
if not timeout:
timeout = api.callback_timeout
self.expire = int(mx.DateTime.now()) + timeout
self.callable = callable
self.done = False
self.data = None
def complete(self, results):
"""complete callback by calling callable function with value of results"""
self.api.output('API_Callback.complete() %s' % self)
if not self.done:
if self.callable.callback.__name__ == 'write':
results = '%s.%s: %s\n' % (
self.api.channel, self.label, json.dumps(results))
self.callable.callback(results)
self.done = True
else:
self.api.output('error: callback: %s was already done!' % self)
def check_expire(self):
self.api.output('API_Callback.check_expire() %s' % self)
if not self.done:
if int(mx.DateTime.now()) > self.expire:
self.api.WriteAllClients(
'error: callback expired: %s' % repr((self.id, self.label)))
if self.callable.callback.__name__ == 'write':
self.callable.callback(
'%s.error: %s callback expired\n', (self.api.channel, self.label))
else:
self.callable.callback(None)
self.done = True
# set an update_handler to handle async updates
# set response pending,
class RTX_Connection():
def __init__(self, api, service, topic):
self.api = api
self.id = str(uuid1())
self.service = service
self.topic = topic
self.key = '%s;%s' % (service, topic)
self.api.cxn_register(self)
self.api.gateway_send('connect %s %s' % (self.id, self.key))
self.response_pending = 'CONNECTION PENDING'
self.response_callback = None
self.status_pending = 'OnInitAck'
self.status_callback = None
self.update_callback = None
self.update_handler = None
self.connected = False
self.on_connect_action = None
self.update_ready()
def update_ready(self):
self.ready = not(
self.response_pending or self.response_callback or self.status_pending or self.status_callback or self.update_callback or self.update_handler)
self.api.output('update_ready() %s %s' % (self, self.ready))
if self.ready:
self.api.cxn_activate(self)
def receive(self, type, data):
if type == 'response':
self.handle_response(data)
elif type == 'status':
self.handle_status(data)
elif type == 'update':
self.handle_update(data)
else:
self.api.error_handler(
self.id, 'Message Type Unexpected: %s' % data)
self.update_ready()
def handle_response(self, data):
self.api.output('Connection Response: %s %s' % (self, data))
if self.response_pending:
if data == self.response_pending:
self.response_pending = None
else:
self.api.error_handler(id, 'Response Error: %s' % data)
if self.response_callback:
self.response_callback.complete(data)
self.response_callback = None
else:
self.api.error_handler(id, 'Response Unexpected: %s' % data)
def handle_status(self, s):
self.api.output('Connection Status: %s %s' % (self, s))
if self.status_pending and s['msg'] == self.status_pending:
self.status_pending = None
if s['status'] == '1':
if s['msg'] == 'OnInitAck':
self.connected = True
if self.on_connect_action:
self.ready = True
cmd, arg, exr, cbr, exs, cbs, cbu, uhr = self.on_connect_action
self.api.output('Sending on_connect_action: %s' %
repr(self.on_connect_action))
self.send(cmd, arg, exr, cbr, exs, cbs, cbu, uhr)
self.on_connect_action = None
else:
self.api.error_handler(self.id, 'Status Error: %s' % data)
else:
self.api.error_handler(self.id, 'Status Unexpected: %s' % data)
def handle_update(self, d):
self.api.output('Connection Update: %s %s' % (self, repr(d)))
if self.update_callback:
self.update_callback.complete(d)
self.update_callback = None
else:
if self.update_handler:
self.update_handler(self, d)
else:
self.api.error_handler(
self.id, 'Update Unexpected: %s' % repr(d))
def query(self, cmd, table, what, where, ex_response, cb_response, ex_status, cb_status, cb_update, update_handler):
ret = self.send(cmd, '%s;%s;%s' % (table, what, where), ex_response,
cb_response, ex_status, cb_status, cb_update, update_handler)
def request(self, table, what, where, callback):
return self.query('request', table, what, where, 'REQUEST_OK', None, None, None, callback, None)
def advise(self, table, what, where, handler):
return self.query('advise', table, what, where, 'ADVISE_OK', None, 'OnOtherAck', None, None, handler)
def adviserequest(self, table, what, where, callback, handler):
return self.query('adviserequest', table, what, where, 'ADVISEREQUEST_OK', None, 'OnOtherAck', None, callback, handler)
def unadvise(self, table, what, where, callback):
return self.query('unadvise', table, what, where, 'UNADVISE_OK', None, 'OnOtherAck', callback, None, None)
def poke(self, table, what, where, data, callback):
return self.send('poke', '%s;%s;%s!%s' % (table, what, where, data), "POKE_OK", callback)
def execute(self, command, callback):
return self.send('execute', command, "EXECUTE_OK", callback)
def terminate(self, code, callback):
return self.send('terminate', str(code), "TERMINATE_OK", callback)
def send(self, cmd, args, ex_response=None, cb_response=None, ex_status=None, cb_status=None, cb_update=None, update_handler=None):
if self.ready:
ret = self.api.gateway_send('%s %s %s' % (cmd, self.id, args))
self.response_pending = ex_response
self.response_callback = cb_response
self.status_pending = ex_status
self.status_callback = cb_status
self.update_callback = cb_update
self.update_handler = update_handler
else:
if self.on_connect_action:
self.api.error_handler(
self.id, 'Failure: on_connect_action already exists: %s' % repr(self.on_connect_action))
ret = False
else:
self.api.output('storing on_connect_action...%s' % self)
self.on_connect_action = (
cmd, args, ex_response, cb_response, ex_status, cb_status, cb_update, update_handler)
ret = True
return ret
class RTX_LocalCallback:
def __init__(self, api, handler):
self.api = api
self.callback_handler = handler
def callback(self, data):
if self.callback_handler:
self.callback_handler(data)
else:
self.api.error_handler(
self.id, 'Failure: undefined callback_handler for Connection: %s' % repr(self))
class RTX():
def __init__(self):
self.label = 'RTX Gateway'
self.channel = 'rtx'
self.id = 'RTX'
self.output('RTX init')
self.config = Config(self.channel)
self.api_hostname = self.config.get('API_HOST')
self.api_port = int(self.config.get('API_PORT'))
self.username = self.config.get('USERNAME')
self.password = self.config.get('PASSWORD')
self.xmlrpc_port = int(self.config.get('XMLRPC_PORT'))
self.tcp_port = int(self.config.get('TCP_PORT'))
self.callback_timeout = int(self.config.get('CALLBACK_TIMEOUT'))
if not self.callback_timeout:
self.callback_timeout = DEFAULT_CALLBACK_TIMEOUT
self.output('callback_timeout=%d' % self.callback_timeout)
self.enable_ticker = bool(int(self.config.get('ENABLE_TICKER')))
self.current_account = ''
self.clients = set([])
self.orders = {}
self.pending_orders = {}
self.openorder_callbacks = []
self.accounts = None
self.account_data = {}
self.pending_account_data_requests = set([])
self.positions = {}
self.position_callbacks = []
self.executions = {}
self.execution_callbacks = []
self.bardata_callbacks = []
self.cancel_callbacks = []
self.order_callbacks = []
self.add_symbol_callbacks = []
self.accountdata_callbacks = []
self.set_account_callbacks = []
self.account_request_callbacks = []
self.account_request_pending = True
self.timer_callbacks = []
self.connected = False
self.last_connection_status = ''
self.connection_status = 'Initializing'
self.LastError = -1
self.next_order_id = -1
self.last_minute = -1
self.symbols = {}
self.primary_exchange_map = {}
self.gateway_sender = None
self.active_cxn = {}
self.idle_cxn = {}
self.cx_time = None
self.seconds_disconnected = 0
self.repeater = LoopingCall(self.EverySecond)
self.repeater.start(1)
def cxn_register(self, cxn):
self.output('cxn_register: %s' % repr(cxn))
self.active_cxn[cxn.id] = cxn
def cxn_activate(self, cxn):
self.output('cxn_activate: %s' % repr(cxn))
if not cxn.key in self.idle_cxn.keys():
self.idle_cxn[cxn.key] = []
self.idle_cxn[cxn.key].append(cxn)
def cxn_get(self, service, topic):
key = '%s;%s' % (service, topic)
if key in self.idle_cxn.keys() and len(self.idle_cxn[key]):
cxn = self.idle_cxn[key].pop()
else:
cxn = RTX_Connection(self, service, topic)
self.output('cxn_get() returning: %s' % repr(cxn))
return cxn
def gateway_connect(self, protocol):
if protocol:
self.gateway_sender = protocol.sendLine
self.gateway_transport = protocol.transport
else:
self.gateway_sender = None
self.connected = False
self.seconds_disconnected = 0
self.account_request_pending = False
self.accounts = None
self.update_connection_status('Disconnected')
self.WriteAllClients('error: API Disconnected')
return self.gateway_receive
def gateway_send(self, msg):
self.output('<-- %s' % repr(msg))
if self.gateway_sender:
self.gateway_sender('%s\n' % msg)
def gateway_receive(self, msg):
"""handle input from rtgw """
o = json.loads(msg)
msg_type = o['type']
msg_id = o['id']
msg_data = o['data']
self.output('--> %s %s %s' % (msg_type, msg_id, msg_data))
if msg_type == 'system':
self.handle_system_message(msg_id, msg_data)
else:
if msg_id in self.active_cxn.keys():
c = self.active_cxn[msg_id].receive(msg_type, msg_data)
else:
self.error_handler(
self.id, 'Message Received on Unknown connection: %s' % repr(msg))
return True
def handle_system_message(self, id, data):
if data['msg'] == 'startup':
self.connected = True
self.accounts = None
self.update_connection_status('Connected')
self.output('Connected to %s' % data['item'])
self.setup_local_queries()
else:
self.error_handler(
self.id, 'Unknown system message: %s' % repr(data))
def setup_local_queries(self):
"""Upon connection to rtgw, start automatic queries"""
self.rtx_request('ACCOUNT_GATEWAY', 'ORDER', 'ACCOUNT', '*', '',
'accounts', self.handle_accounts, self.accountdata_callbacks, 5)
def output(self, msg):
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
def open_client(self, client):
self.clients.add(client)
def close_client(self, client):
self.clients.discard(client)
symbols = self.symbols.values()
for ts in symbols:
if client in ts.clients:
ts.del_client(client)
if not ts.clients:
del(self.symbols[ts.symbol])
def set_primary_exchange(self, symbol, exchange):
if exchange:
self.primary_exchange_map[symbol] = exchange
else:
del(self.primary_exchange_map[symbol])
return self.primary_exchange_map
def CheckPendingResults(self):
# check each callback list for timeouts
for cblist in [self.timer_callbacks, self.position_callbacks, self.openorder_callbacks, self.execution_callbacks, self.bardata_callbacks, self.order_callbacks, self.cancel_callbacks, self.add_symbol_callbacks, self.accountdata_callbacks, self.set_account_callbacks, self.account_request_callbacks]:
dlist = []
for cb in cblist:
cb.check_expire()
if cb.done:
dlist.append(cb)
# delete any callbacks that are done
for cb in dlist:
cblist.remove(cb)
def handle_order_status(self, msg):
mid = str(msg.orderId)
pid = str(msg.permId)
if not pid in self.orders.keys():
self.orders[pid] = {}
m = self.orders[pid]
if 'status' in m.keys():
oldstatus = json.dumps(m)
else:
oldstatus = ''
m['permid'] = msg.permId
m['id'] = msg.orderId
m['status'] = msg.status
m['filled'] = msg.filled
m['remaining'] = msg.remaining
m['avgfillprice'] = msg.avgFillPrice
m['parentid'] = msg.parentId
m['lastfillprice'] = msg.lastFillPrice
m['clientid'] = msg.clientId
m['whyheld'] = msg.whyHeld
# callbacks are keyed by message-id, not permid
for cb in self.cancel_callbacks:
if cb.id == mid:
self.output('cancel_callback[%s] completed' % mid)
cb.complete(m)
for cb in self.order_callbacks:
if cb.id == mid:
self.output('order_callback[%s] completed' % mid)
cb.complete(m)
if json.dumps(m) != oldstatus:
self.send_order_status(m)
def send_order_status(self, order):
self.WriteAllClients('order.%s: %s' %
(order['permid'], json.dumps(order)))
def handle_open_order(self, msg):
mid = str(msg.orderId)
pid = str(msg.order.m_permId)
if not pid in self.orders.keys():
self.orders[pid] = {}
m = self.orders[pid]
if 'status' in m.keys():
oldstatus = json.dumps(m)
else:
oldstatus = ''
m['id'] = msg.orderId
m['symbol'] = msg.contract.m_symbol
m['action'] = msg.order.m_action
m['quantity'] = msg.order.m_totalQuantity
m['account'] = msg.order.m_account
m['clientid'] = msg.order.m_clientId
m['permid'] = msg.order.m_permId
m['price'] = msg.order.m_lmtPrice
m['aux_price'] = msg.order.m_auxPrice
m['type'] = msg.order.m_orderType
m['status'] = msg.orderState.m_status
m['warning'] = msg.orderState.m_warningText
if oldstatus != json.dumps(m):
self.WriteAllClients('open-order.%s: %s' %
(m['permid'], json.dumps(m)))
def handle_accounts(self, msg):
if msg:
self.accounts = []
for row in msg:
account = '%s.%s.%s.%s.%s' % (
row['BANK'], row['BRANCH'], row['CUSTOMER'], row['DEPOSIT'], row['ACCT_TYPE'])
self.accounts.append(account)
self.accounts.sort()
self.account_request_pending = False
self.WriteAllClients('accounts: %s' % json.dumps(self.accounts))
for cb in self.account_request_callbacks:
cb.complete(self.accounts)
for cb in self.set_account_callbacks:
self.outptut('set_account: processing deferred response.')
process_set_account(cb.id, cb)
else:
self.error_handler(
self.id, 'handle_accounts: unexpected null input')
def set_account(self, account_name, callback):
cb = API_Callback(self, account_name, 'set-account', callback)
if self.accounts:
self.process_set_account(account_name, cb)
elif self.account_request_pending:
self.account_set_callbacks.append(cb)
else:
self.output(
'Error: set_account; no data, but no account_request_pending')
cb.complete(None)
def process_set_account(self, account_name, callback):
if account_name in self.accounts:
self.current_account = account_name
msg = 'current account set to %s' % account_name
self.output(msg)
ret = True
else:
msg = 'account %s not found' % account_name
self.output('Error: set_account(): %s' % msg)
ret = False
self.WriteAllClients('current-account: %s' % self.current_account)
if callback:
callback.complete(ret)
else:
return ret
def rtx_request(self, service, topic, table, what, where, label, handler, cb_list, timeout=0):
cxn = self.cxn_get(service, topic)
cb = API_Callback(self, cxn.id, label,
RTX_LocalCallback(self, handler), timeout)
cxn.request(table, what, where, cb)
cb_list.append(cb)
def EverySecond(self):
if self.connected:
if ENABLE_TICK_REQUESTS:
self.rtx_request('TA_SRV', 'LIVEQUOTE', 'LIVEQUOTE', 'DISP_NAME,TRDTIM_1,TRD_DATE',
"DISP_NAME='$TIME'", 'tick', self.handle_time, self.timer_callbacks, 5)
else:
self.seconds_disconnected += 1
if self.seconds_disconnected > DISCONNECT_SECONDS:
self.output(
'Realtick Gateway is disconnected; forcing shutdown')
if SHUTDOWN_ON_DISCONNECT:
reactor.stop()
self.CheckPendingResults()
def WriteAllClients(self, msg):
self.output('WriteAllClients: %s.%s' % (self.channel, msg))
msg = str('%s.%s\n' % (self.channel, msg))
for c in self.clients:
c.transport.write(msg)
def error_handler(self, id, msg):
"""report error messages"""
self.output('ERROR: %s %s' % (id, msg))
self.WriteAllClients('error: %s %s' % (id, msg))
def handle_time(self, rows):
print('handle_time: %s' % json.dumps(rows))
if rows:
hour, minute = [int(i)
for i in rows[0]['TRDTIM_1'].split(':')[0:2]]
if minute != self.last_minute:
self.last_minute = minute
self.WriteAllClients('time: %s %02d:%02d:00' %
(rows[0]['TRD_DATE'], hour, minute))
else:
self.error_handler('handle_time: unexpected null input')
def create_contract(self, symbol, sec_type, exch, prim_exch, curr):
"""Create a Contract object defining what will
be purchased, at which exchange and in which currency.
symbol - The ticker symbol for the contract
sec_type - The security type for the contract ('STK' is 'stock')
exch - The exchange to carry out the contract on
prim_exch - The primary exchange to carry out the contract on
curr - The currency in which to purchase the contract
In cases where SMART exchange results in ambiguity SYMBOL:PRIMARY_EXCHANGE can be passed."""
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = sec_type
contract.m_exchange = exch
if symbol in self.primary_exchange_map.keys():
contract.m_primaryExch = self.primary_exchange_map[symbol]
else:
contract.m_primaryExch = prim_exch
contract.m_currency = curr
return contract
def create_order(self, order_type, quantity, action):
"""Create an Order object (Market/Limit) to go long/short.
order_type - 'MKT', 'LMT' for Market or Limit orders
quantity - Integral number of assets to order
action - 'BUY' or 'SELL'"""
order = Order()
order.m_orderType = order_type
order.m_totalQuantity = quantity
order.m_action = action
order.m_account = self.current_account
return order
def connect(self):
self.update_connection_status('Connecting')
self.output('Awaiting startup response from RTX gateway at %s:%d...' % (
self.api_hostname, self.api_port))
def market_order(self, symbol, quantity, callback):
return self.submit_order('market', 0, 0, symbol, int(quantity), callback)
def limit_order(self, symbol, limit_price, quantity, callback):
return self.submit_order('limit', float(limit_price), 0, symbol, int(quantity), callback)
def stop_order(self, symbol, stop_price, quantity, callback):
return self.submit_order('stop', 0, float(stop_price), symbol, int(quantity), callback)
def stoplimit_order(self, symbol, stop_price, limit_price, quantity, callback):
return self.submit_order('stoplimit', float(limit_price), float(stop_price), symbol, int(quantity), callback)
def submit_order(self, order_type, price, stop_price, symbol, quantity, callback):
self.output('ERROR: submit_order unimplemented')
def cancel_order(self, id, callback):
self.output('ERROR: cancel_order unimplemented')
self.output('cancel_order%s' % repr((id)))
mid = str(id)
tcb = TWS_Callback(self, mid, 'cancel_order', callback)
order = self.find_order_with_id(mid)
if order:
if order['status'] == 'Cancelled':
tcb.complete(
{'status': 'Error', 'errorMsg': 'Already cancelled.', 'id': id})
else:
resp = self.tws_conn.cancelOrder(mid)
self.output('cancelOrder(%s) returned %s' %
(repr(mid), repr(resp)))
self.cancel_callbacks.append(tcb)
else:
tcb.complete(
{'status': 'Error', 'errorMsg': 'Order not found', 'id': mid})
def symbol_enable(self, symbol, client, callback):
self.output('symbol_enable(%s,%s,%s)' % (symbol, client, callback))
if not symbol in self.symbols.keys():
cb = API_Callback(self, symbol, 'add-symbol', callback)
symbol = API_Symbol(self, symbol, client, cb)
self.add_symbol_callbacks.append(cb)
else:
self.symbols[symbol].add_client(client)
API_Callback(self, 0, 'add-symbol', callback).complete(True)
self.output('symbol_enable: symbols=%s' % repr(self.symbols))
def symbol_init(self, symbol):
ret = not 'SYMBOL_ERROR' in symbol.rawdata[0].keys()
if not ret:
self.symbol_disable(symbol.symbol, list(symbol.clients)[0])
symbol.callback.complete(ret)
return ret
def symbol_disable(self, symbol, client):
self.output('symbol_disable(%s,%s)' % (symbol, client))
self.output('self.symbols=%s' % repr(self.symbols))
if symbol in self.symbols.keys():
ts = self.symbols[symbol]
ts.del_client(client)
if not ts.clients:
del(self.symbols[symbol])
self.output('ret True: self.symbols=%s' % repr(self.symbols))
return True
self.output('ret False: self.symbols=%s' % repr(self.symbols))
def update_connection_status(self, status):
self.connection_status = status
if status != self.last_connection_status:
self.last_connection_status = status
self.WriteAllClients('connection-status-changed: %s' % status)
def request_accounts(self, callback):
cb = API_Callback(self, 0, 'request-accounts', callback)
if self.accounts:
cb.complete(self.accounts)
elif self.account_request_pending:
self.account_request_callbacks.append(cb)
else:
self.output(
'Error: request_accounts; no data, but no account_request_pending')
cb.complete(None)
def request_positions(self, callback):
cxn = self.cxn_get('ACCOUNT_GATEWAY', 'ORDER')
cb = API_Callback(self, 0, 'positions', callback)
cxn.request('POSITION', '*', '', cb)
self.position_callbacks.append(cb)
return cxn.id
def request_orders(self, callback):
cxn = self.cxn_get('ACCOUNT_GATEWAY', 'ORDER')
cb = API_Callback(self, 0, 'orders', callback)
cxn.request('ORDERS', '*', '', cb)
self.openorder_callbacks.append(cb)
return cxn.id
def request_executions(self, callback):
cxn = self.cxn_get('ACCOUNT_GATEWAY', 'ORDER')
cb = API_Callback(self, 0, 'executions', callback)
cxn.request('ORDERS', '*',
"CURRENT_STATUS='COMPLETED',TYPE='ExchangeTradeOrder'", cb)
self.execution_callbacks.append(cb)
return cxn.id
def request_account_data(self, account, fields, callback):
cxn = self.cxn_get('ACCOUNT_GATEWAY', 'ORDER')
cb = API_Callback(self, 0, 'account_data', callback)
cxn.request('DEPOSIT', '*', '', cb)
self.accountdata_callbacks.append(cb)
return cxn.id
def request_global_cancel(self):
self.tws_conn.reqGlobalCancel()
def query_bars(self, symbol, period, bar_start, bar_end, callback):
id = self.next_id()
self.output('bardata request id=%s' % id)
# 30 second timeout for bar data
cb = TWS_Callback(self, id, 'bardata', callback, 30)
contract = self.create_contract(symbol, 'STK', 'SMART', 'SMART', 'USD')
if type(bar_start) != types.IntType:
mxd = mx.DateTime.ISO.ParseDateTime(bar_start)
bar_start = datetime.datetime(
mxd.year, mxd.month, mxd.day, mxd.hour, mxd.minute, int(mxd.second))
if type(bar_end) != types.IntType:
mxd = mx.DateTime.ISO.ParseDateTime(bar_end)
bar_end = datetime.datetime(
mxd.year, mxd.month, mxd.day, mxd.hour, mxd.minute, int(mxd.second))
# try:
if 1 == 1:
endDateTime = bar_end.strftime('%Y%m%d %H:%M:%S')
durationStr = '%s S' % (bar_end - bar_start).seconds
barSizeSetting = {'1': '1 min', '5': '5 mins'}[
str(period)] # legal period values are '1' and '5'
whatToShow = 'TRADES'
useRTH = 0
formatDate = 1
self.bardata_callbacks.append(cb)
self.output('edt:%s ds:%s bss:%s' %
(endDateTime, durationStr, barSizeSetting))
self.tws_conn.reqHistoricalData(
id, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH, formatDate)
# except:
if 1 == 2:
cb.complete(['Error', 'query_bars(%s) failed!' % repr(
(bar_symbol, bar_period, bar_start, bar_end)), 'Count: 0'])
def handle_historical_data(self, msg):
for cb in self.bardata_callbacks:
if cb.id == msg.reqId:
if not cb.data:
cb.data = []
if msg.date.startswith('finished'):
cb.complete(['OK', cb.data])
else:
cb.data.append(dict(msg.items()))
# self.output('historical_data: %s' % msg) #repr((id, start_date, bar_open, bar_high, bar_low, bar_close, bar_volume, count, WAP, hasGaps)))
def query_connection_status(self):
return self.connection_status
| [
"[email protected]"
] | |
14698f5e208340300976981461b72d99053e4499 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/django_django/django-master/django/views/static.py | 479c59cac6c4165e1254d9a1815a56860e62d1b5 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 5,108 | py | """
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
import mimetypes
import os
import posixpath
import re
import stat
from urllib.parse import unquote
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
HttpResponseRedirect,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.http import http_date, parse_http_date
from django.utils.translation import gettext as _, gettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
response = FileResponse(open(fullpath, 'rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% if directory != "/" %}
<li><a href="../">../</a></li>
{% endif %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = gettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine(libraries={'i18n': 'django.templatetags.i18n'}).from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
| [
"[email protected]"
] | |
2e95ff4a78787e3e931b2ee198077cfb1abc5341 | 05d9291f8d02bb98a3d2a3c0b49858f1f2e6d834 | /quick_service/quick_service/doctype/production/production.py | 6a2cdf6f6856a35d314f88b03bcf957788dac9f7 | [
"MIT"
] | permissive | leaftechnology/quick_service | b1e6fa4012bcc75816cd4e895e4f9e6b9105c2a8 | 69ff87a33b3f135b7d12977c3e95727243b2f740 | refs/heads/master | 2023-05-04T05:30:22.610405 | 2021-05-22T05:12:41 | 2021-05-22T05:12:41 | 301,926,248 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,180 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, jan and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe.model.document import Document
from erpnext.stock.stock_ledger import get_previous_sle
from frappe.utils import cint, flt
from datetime import datetime
class Production(Document):
@frappe.whitelist()
def change_status(self, status):
if status == "Closed" or status == "Completed":
frappe.db.sql(""" UPDATE `tabProduction` SET last_status=%s WHERE name=%s """,(self.status, self.name))
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s """,(status, self.name))
frappe.db.commit()
elif status == "Open":
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s """, (self.last_status, self.name))
frappe.db.commit()
if status == "Completed":
self.get_service_records()
@frappe.whitelist()
def get_service_records(self):
estimation_ = ""
estimation = frappe.db.sql(""" SELECT * FROM `tabProduction` WHERE name= %s""", self.name, as_dict=1)
if len(estimation) > 0:
estimation_ = estimation[0].estimation
frappe.db.sql(""" UPDATE `tabEstimation` SET status=%s WHERE name=%s""",
("Completed", estimation_))
inspections = frappe.db.sql(""" SELECT * FROM `tabInspection Table` WHERE parent=%s """, estimation_, as_dict=1)
for i in inspections:
frappe.db.sql(""" UPDATE `tabInspection` SET status=%s WHERE name=%s""",
("Completed", i.inspection))
srn = frappe.db.sql(""" SELECT * FROM `tabEstimation` WHERE name=%s """, estimation_, as_dict=1)
if len(srn) > 0:
srn_ = srn[0].service_receipt_note
frappe.db.sql(""" UPDATE `tabService Receipt Note` SET status=%s WHERE name=%s""",
("Completed", srn_))
frappe.db.commit()
def on_update_after_submit(self):
for i in self.raw_material:
if i.production:
get_qty = frappe.db.sql(""" SELECT * FROM `tabProduction` WHERE name=%s""", i.production, as_dict=1)
get_qty_total = frappe.db.sql(""" SELECT SUM(qty_raw_material) as qty_raw_material FROM `tabRaw Material` WHERE production=%s """, i.production, as_dict=1)
if get_qty[0].qty == get_qty_total[0].qty_raw_material:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s, last_status=%s WHERE name=%s""", ("Completed",get_qty[0].status,i.production))
frappe.db.commit()
else:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s, last_status=%s WHERE name=%s""", ("Linked",get_qty[0].status,i.production))
frappe.db.commit()
@frappe.whitelist()
def change_production_status(self, production):
raw_material = frappe.db.sql(""" SELECT * FROM `tabRaw Material` WHERE name=%s""",production, as_dict=1)
if len(raw_material) > 0 and raw_material[0].production:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s""", ("To Deliver and Bill", raw_material[0].production))
frappe.db.commit()
def on_cancel(self):
for i in self.raw_material:
if i.production:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s""", ("In Progress", i.production))
frappe.db.commit()
se = frappe.db.sql(""" SELECT * FROM `tabStock Entry` WHERE production=%s """, self.name, as_dict=1)
if len(se) > 0:
for i in se:
se_record = frappe.get_doc("Stock Entry", i.name)
se_record.cancel()
def on_submit(self):
for i in self.raw_material:
if i.production:
get_qty = frappe.db.sql(""" SELECT * FROM `tabProduction` WHERE name=%s""", i.production, as_dict=1)
get_qty_total = frappe.db.sql(""" SELECT SUM(qty_raw_material) as qty_raw_material FROM `tabRaw Material` WHERE production=%s """, i.production, as_dict=1)
if get_qty[0].qty == get_qty_total[0].qty_raw_material:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s, last_status=%s WHERE name=%s""", ("Completed",get_qty[0].status,i.production))
frappe.db.commit()
else:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s, last_status=%s WHERE name=%s""", ("Linked",get_qty[0].status,i.production))
frappe.db.commit()
@frappe.whitelist()
def set_available_qty(self):
time = frappe.utils.now_datetime().time()
date = frappe.utils.now_datetime().date()
for d in self.get('raw_material'):
previous_sle = get_previous_sle({
"item_code": d.item_code,
"warehouse": d.warehouse,
"posting_date": date,
"posting_time": time
})
# get actual stock at source warehouse
d.available_qty = previous_sle.get("qty_after_transaction") or 0
def validate(self):
if self.type == "Assemble":
self.series = "SK-"
elif self.type == "Disassemble":
self.series = "SK-D-"
elif self.type == "Service":
self.series = "CS-"
@frappe.whitelist()
def check_raw_materials(self):
for i in self.raw_material:
if i.available_qty == 0:
return False, i.item_code
return True, ""
@frappe.whitelist()
def generate_se(self):
check,item_code = self.check_raw_materials()
allow_negative_stock = cint(frappe.db.get_value("Stock Settings", None, "allow_negative_stock"))
if check or (not check and allow_negative_stock):
doc_se = {
"doctype": "Stock Entry",
"stock_entry_type": "Manufacture" if self.type == "Assemble" or self.type == "Service" else "Material Issue" if self.type == "Re-Service" else"Repack",
"items": self.get_manufacture_se_items() if self.type == "Assemble" or self.type == "Service" else self.get_material_issue_se_items() if self.type == "Re-Service" else self.get_repack_se_items(),
"production": self.name,
"additional_costs": self.get_additional_costs(),
"analytic_account": self.analytic_account if self.analytic_account else ""
}
frappe.get_doc(doc_se).insert(ignore_permissions=1).submit()
if self.type == "Re-Service":
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s""",
("Completed", self.name))
frappe.db.commit()
else:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s""",
("To Deliver and Bill", self.name))
frappe.db.commit()
return ""
else:
frappe.throw("Item " + item_code + " Has no available stock")
@frappe.whitelist()
def generate_finish_good_se(self):
doc_se1 = {
"doctype": "Stock Entry",
"stock_entry_type": "Manufacture",
"production": self.name,
"additional_costs": self.get_additional_costs(),
"items": [{
'item_code': self.item_code_prod,
't_warehouse': self.warehouse,
'qty': self.qty,
'uom': self.umo,
'basic_rate': self.rate,
'cost_center': self.cost_center,
"analytic_account": self.analytic_account
}],
}
frappe.get_doc(doc_se1).insert(ignore_permissions=1).submit()
@frappe.whitelist()
def get_additional_costs(self):
costs = []
for i in self.additional_cost:
costs.append({
"expense_account": i.expense_ledger,
"description": i.description,
"amount": i.additional_cost_amount
})
return costs
@frappe.whitelist()
def generate_dn(self):
if self.input_qty > self.qty_for_sidn:
frappe.throw("Maximum qty that can be generated is " + str(self.qty))
doc_dn = {
"doctype": "Delivery Note",
"customer": self.customer,
"items": self.get_si_items("DN", self.input_qty),
"production": self.get_production_items(self.input_qty),
}
dn = frappe.get_doc(doc_dn)
dn.insert(ignore_permissions=1)
return dn.name
@frappe.whitelist()
def generate_si(self):
if self.input_qty > self.qty_for_sidn:
frappe.throw("Maximum qty that can be generated is " + str(self.qty))
doc_si = {
"doctype": "Sales Invoice",
"customer": self.customer,
"items": self.get_si_items("SI", self.input_qty),
"production": self.get_production_items(self.input_qty),
}
si = frappe.get_doc(doc_si)
si.insert(ignore_permissions=1)
return si.name
@frappe.whitelist()
def generate_jv(self):
doc_jv = {
"doctype": "Journal Entry",
"voucher_type": "Journal Entry",
"posting_date": self.posting_date,
"accounts": self.jv_accounts(),
"production": self.name
}
jv = frappe.get_doc(doc_jv)
jv.insert(ignore_permissions=1)
jv.submit()
@frappe.whitelist()
def jv_accounts(self):
accounts = []
amount = 0
for item in self.advance_payment:
amount += item.amount
accounts.append({
'account': item.expense_account,
'debit_in_account_currency': item.amount,
'credit_in_account_currency': 0,
})
debit_account = frappe.db.sql(""" SELECT * FROM `tabAccount` WHERE name like %s """, "%Debtors%",as_dict=1 )
if len(debit_account) > 0:
accounts.append({
'account': debit_account[0].name,
'debit_in_account_currency': 0,
'credit_in_account_currency': amount,
'party_type': "Customer",
'party': self.customer,
'is_advance': "Yes",
})
print(accounts)
return accounts
@frappe.whitelist()
def get_manufacture_se_items(self):
items = []
for item in self.raw_material:
items.append({
'item_code': item.item_code,
's_warehouse': item.warehouse,
'qty': item.qty_raw_material,
'uom': "Nos",
'basic_rate': item.rate_raw_material,
'cost_center': item.cost_center,
"analytic_account": self.analytic_account
})
items.append({
'item_code': self.item_code_prod,
't_warehouse': self.warehouse,
'qty': self.qty,
'uom': self.umo,
'basic_rate': self.rate,
'cost_center': self.cost_center,
'is_finished_item': 1,
"analytic_account": self.analytic_account
})
return items
@frappe.whitelist()
def get_material_issue_se_items(self):
items = []
for item in self.raw_material:
items.append({
'item_code': item.item_code,
's_warehouse': item.warehouse,
'qty': item.qty_raw_material,
'uom': "Nos",
'basic_rate': item.rate_raw_material,
'cost_center': item.cost_center,
"analytic_account": self.analytic_account
})
return items
@frappe.whitelist()
def get_repack_se_items(self):
items = []
for item in self.raw_material:
if item.available_qty > 0:
items.append({
'item_code': item.item_code,
't_warehouse': item.warehouse,
'qty': item.qty_raw_material,
'uom': "Nos",
'basic_rate': item.rate_raw_material,
'cost_center': item.cost_center,
"analytic_account": self.analytic_account
})
items.append({
'item_code': self.item_code_prod,
's_warehouse': self.warehouse,
'qty': self.qty,
'uom': self.umo,
'basic_rate': self.rate,
'cost_center': self.cost_center,
"analytic_account": self.analytic_account
})
return items
@frappe.whitelist()
def get_si_items(self, type, qty):
obj = {
'item_code': self.item_code_prod,
'item_name': self.get_item_value("item_name"),
'description': self.get_item_value("description"),
'qty': qty,
'uom': "Nos",
'rate': self.invoice_rate,
'cost_center': self.cost_center,
'income_account': self.income_account
}
if type == "DN":
obj["warehouse"] = self.warehouse
return [obj]
@frappe.whitelist()
def get_production_items(self, qty):
return [{
'reference': self.name,
'qty': qty,
'rate': self.invoice_rate,
'amount': self.invoice_rate * qty,
}]
@frappe.whitelist()
def get_sales_man(self):
return [{
'sales_man': self.sales_man,
'reference': self.name,
}]
@frappe.whitelist()
def get_item_value(self, field):
items = frappe.db.sql(""" SELECT * FROM `tabItem` WHERE name=%s """, self.item_code_prod, as_dict=1)
return items[0][field]
@frappe.whitelist()
def get_available_qty(production):
get_qty = frappe.db.sql(""" SELECT * FROM `tabProduction` WHERE name=%s""", production, as_dict=1)
get_qty_total = frappe.db.sql(
""" SELECT SUM(RM.qty_raw_material) as qty_raw_material FROM `tabProduction` AS P INNER JOIN `tabRaw Material` AS RM ON RM.parent = P.name and RM.production=%s WHERE P.docstatus=1 """,
production, as_dict=1)
print(get_qty_total)
return get_qty[0].qty - get_qty_total[0].qty_raw_material if get_qty_total[0].qty_raw_material else get_qty[0].qty
@frappe.whitelist()
def get_rate(item_code, warehouse, based_on,price_list):
time = frappe.utils.now_datetime().time()
date = frappe.utils.now_datetime().date()
balance = 0
if warehouse:
previous_sle = get_previous_sle({
"item_code": item_code,
"warehouse": warehouse,
"posting_date": date,
"posting_time": time
})
# get actual stock at source warehouse
balance = previous_sle.get("qty_after_transaction") or 0
condition = ""
if price_list == "Standard Buying":
condition += " and buying = 1 "
elif price_list == "Standard Selling":
condition += " and selling = 1 "
query = """ SELECT * FROM `tabItem Price` WHERE item_code=%s {0} ORDER BY valid_from DESC LIMIT 1""".format(condition)
item_price = frappe.db.sql(query,item_code, as_dict=1)
rate = item_price[0].price_list_rate if len(item_price) > 0 else 0
print(based_on)
if based_on == "Valuation Rate":
print("WALA DIR")
item_record = frappe.db.sql(
""" SELECT * FROM `tabItem` WHERE item_code=%s""",
item_code, as_dict=1)
rate = item_record[0].valuation_rate if len(item_record) > 0 else 0
if based_on == "Last Purchase Rate":
print("WALA DIR")
item_record = frappe.db.sql(
""" SELECT * FROM `tabItem` WHERE item_code=%s""",
item_code, as_dict=1)
rate = item_record[0].last_purchase_rate if len(item_record) > 0 else 0
return rate, balance
@frappe.whitelist()
def get_uom(item_code):
item = frappe.db.sql(
""" SELECT * FROM `tabItem` WHERE name=%s""",
item_code, as_dict=1)
return item[0].stock_uom, item[0].item_name
@frappe.whitelist()
def get_address(customer):
address = frappe.db.sql("""
SELECT
A.name,
A.address_line1,
A.city,
A.county ,
A.state,
A.country,
A.pincode
FROM `tabAddress` AS A
INNER JOIN `tabDynamic Link` AS DL
ON DL.link_doctype=%s and DL.link_name=%s and DL.parent = A.name
WHERE A.is_primary_address=1 """,("Customer", customer), as_dict=1)
return address[0] if len(address) > 0 else {}
@frappe.whitelist()
def get_jv(production):
jv = frappe.db.sql(""" SELECT * FROM `tabJournal Entry` WHERE production=%s """, production, as_dict=1)
return jv[0].name if len(jv) > 0 else ""
@frappe.whitelist()
def get_se(name):
se = frappe.db.sql(""" SELECT * FROM `tabStock Entry` WHERE production=%s """, name, as_dict=1)
return len(se) > 0
@frappe.whitelist()
def get_dn_or_si(name):
si = frappe.db.sql("""
SELECT * FROM `tabSales Invoice Production` WHERE reference=%s and parenttype=%s """,
(name,"Sales Invoice"), as_dict=1)
dn = frappe.db.sql("""
SELECT * FROM `tabSales Invoice Production` WHERE reference=%s and parenttype=%s """,
(name, "Delivery Note"), as_dict=1)
return len(si) > 0,len(dn) > 0
@frappe.whitelist()
def get_dn_si_qty(item_code, qty, name):
si_query = """
SELECT SIP.qty as qty, SI.status FROM `tabSales Invoice` AS SI
INNER JOIN `tabSales Invoice Production` AS SIP ON SI.name = SIP.parent
WHERE SIP.reference=%s and SIP.parenttype=%s and SI.docstatus = 1 and SI.status!='Cancelled'
"""
si = frappe.db.sql(si_query, (name, "Sales Invoice"), as_dict=1)
dn_query = """
SELECT SIP.qty as qty, DN.status FROM `tabDelivery Note` AS DN
INNER JOIN `tabSales Invoice Production` AS SIP ON DN.name = SIP.parent
WHERE SIP.reference=%s and SIP.parenttype=%s and DN.docstatus = 1 and DN.status!='Cancelled'
"""
dn = frappe.db.sql(dn_query, (name, "Delivery Note"), as_dict=1)
total_qty = 0
if len(si) > len(dn):
for i in si:
total_qty += i.qty
elif len(dn) > len(si):
for d in dn:
total_qty += d.qty
elif len(dn) == len(si):
for d in dn:
total_qty += d.qty
print("TOTALALLL")
print(total_qty)
return float(qty) - float(total_qty)
@frappe.whitelist()
def change_status(name):
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s""", ("Partially Delivered", name))
frappe.db.commit()
return 1
@frappe.whitelist()
def get_valuation_rate(item_code):
item = frappe.db.sql(""" SELECT * FROM `tabItem` WHERE item_code=%s""", (item_code),as_dict=1)
return item[0].valuation_rate if len(item) > 0 else 0
@frappe.whitelist()
def compute_selling_price(raw_materials):
import json
selling_price_total = 0
raw_material = json.loads(raw_materials)
for i in raw_material:
warehouse = i['warehouse'] if 'warehouse' in i and i['warehouse'] else "",
if 'item_code' in i:
selling_price = get_rate(i['item_code'],warehouse,"Price List", "Standard Selling")
selling_price_total += (selling_price[0] * i['qty_raw_material'])
return selling_price_total
@frappe.whitelist()
def selling_price_list(raw_materials):
import json
array_selling = []
raw_material = json.loads(raw_materials)
for i in raw_material:
warehouse = i['warehouse'] if 'warehouse' in i and i['warehouse'] else "",
if 'item_code' in i:
selling_price = get_rate(i['item_code'],warehouse,"Price List", "Standard Selling")
array_selling.append({
"item_name": i['item_name'],
"qty_raw_material": i['qty_raw_material'],
"rate_raw_material": selling_price[0] * i['qty_raw_material']
})
return array_selling | [
"[email protected]"
] | |
f05d2c71ce52bff9656cf0194c7cf3ab35c12a64 | 630bf979e99b1b0e14f7ffdc65c18ba470ce2fe0 | /neuclease/dvid/annotation.py | 7422c0b6646b86f1ad03bcd07db19fd449660caa | [
"BSD-3-Clause"
] | permissive | y2mk1ng/neuclease | 0384294259aa592b4e58de2df959f3a3d9ca1338 | 02e36d7d76859d391c080e2a8690d1f80247f308 | refs/heads/master | 2022-12-14T02:15:44.251677 | 2020-09-16T20:57:11 | 2020-09-16T20:57:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69,848 | py | import os
import sys
import logging
import warnings
from itertools import chain
from functools import partial
from collections import namedtuple
import ujson
import numpy as np
import pandas as pd
from dvidutils import LabelMapper
from . import dvid_api_wrapper, fetch_generic_json
from .common import post_tags
from .node import fetch_instance_info
from .voxels import fetch_volume_box
from ..util import Timer, Grid, boxes_from_grid, round_box, tqdm_proxy, compute_parallel, gen_json_objects, encode_coords_to_uint64, decode_coords_from_uint64
logger = logging.getLogger(__name__)
@dvid_api_wrapper
def post_sync(server, uuid, instance, sync_instances, replace=False, *, session=None):
"""
Appends to list of data instances with which the annotations are synced.
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
sync_instances:
list of dvid instances to which the annotations instance should be synchronized,
e.g. ['segmentation']
replace:
If True, replace existing sync instances with the given sync_instances.
Otherwise append the sync_instances.
"""
body = { "sync": ",".join(sync_instances) }
params = {}
if replace:
params['replace'] = str(bool(replace)).lower()
r = session.post(f'{server}/api/node/{uuid}/{instance}/sync', json=body, params=params)
r.raise_for_status()
# Synonym
post_annotation_sync = post_sync
# The common post_tags() function works for annotation instances.
#post_tags = post_tags
@dvid_api_wrapper
def post_reload(server, uuid, instance, *, check=False, inmemory=True, session=None): # Note: See wrapper_proxies.post_reload()
"""
Forces asynchronous recreation of its tag and label indexed denormalizations.
Can be used to initialize a newly added instance.
Notes:
- This call merely triggers the reload and returns immediately.
For sufficiently large volumes, the reloading process on DVID will take hours.
The only way to determine that the reloading process has completed is to
monitor the dvid log file for a message that includes the
words ``Finished denormalization``.
- The instance will return errors for any POST request
while denormalization is ongoing.
Args:
server:
dvid server, e.g. 'emdata4:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
check:
If True, check denormalizations, writing to log when issues
are detected, and only replacing denormalization when it is incorrect.
inmemory:
If True, use in-memory reload, which assumes the server
has enough memory to hold all annotations in memory.
"""
params = {}
if check:
params['check'] = "true"
if not inmemory:
params['inmemory'] = "false"
r = session.post(f'{server}/api/node/{uuid}/{instance}/reload', params=params)
r.raise_for_status()
# Synonym
post_annotation_reload = post_reload
@dvid_api_wrapper
def fetch_label(server, uuid, instance, label, relationships=False, *, format='json', session=None):
"""
Returns all point annotations within the given label as an array of elements.
This endpoint is only available if the annotation data instance is synced with
voxel label data instances (labelblk, labelarray, labelmap).
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
label:
Body ID
relationships:
Set to true to return all relationships for each annotation.
format:
Either 'json' or 'pandas'.
Returns:
JSON list or pandas DataFrame
"""
assert format in ('json', 'pandas')
params = { 'relationships': str(bool(relationships)).lower() }
r = session.get(f'{server}/api/node/{uuid}/{instance}/label/{label}', params=params)
r.raise_for_status()
if format == 'json':
return r.json()
else:
return load_elements_as_dataframe(r.json())
# Synonym. See wrapper_proxies.py
fetch_annotation_label = fetch_label
@dvid_api_wrapper
def fetch_tag(server, uuid, instance, tag, relationships=False, *, session=None):
"""
Returns all point annotations with the given tag as an array of elements.
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
tag:
The tag to search for
relationships:
Set to true to return all relationships for each annotation.
Returns:
JSON list
"""
params = { 'relationships': str(bool(relationships)).lower() }
r = session.get(f'{server}/api/node/{uuid}/{instance}/tag/{tag}', params=params)
r.raise_for_status()
return r.json()
@dvid_api_wrapper
def fetch_roi(server, uuid, instance, roi, roi_uuid=None, *, session=None):
"""
Returns all point annotations within the ROI. Currently, this
request will only work for ROIs that have same block size as
the annotation data instance. Therefore, most ROIs (32px blocks) are not
not compatible with most labelmap instances (64px blocks).
Warning:
The name 'fetch_roi()' clashes with a function in dvid.roi, so you
may need to explicitly import dvid.annotations to access this function:
from dvid.annotations import fetch_roi
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
roi:
The name of a roi instance, e.g. 'AL-lm'
roi_uuid:
If provided, the ROI will be fetched at this version.
Otherwise, the ROI will be fetched at the same version
as the requested annotation instance.
Returns:
JSON list
"""
if roi_uuid:
roi = roi + ',' + roi_uuid
r = session.get(f'{server}/api/node/{uuid}/{instance}/roi/{roi}')
r.raise_for_status()
return r.json()
# Synonym to avoid conflicts with roi.fetch_roi()
fetch_annotation_roi = fetch_roi
@dvid_api_wrapper
def fetch_elements(server, uuid, instance, box_zyx, *, format='json', session=None): #@ReservedAssignment
"""
Returns all point annotations within the given box.
Note:
Automatically includes relationships if format=True,
and automatically discards relationships if format=False.
Note:
This function is best for fetching relatively
sparse annotations, to-do annotations.
For synapse annotations, see ``fetch_synapses_in_batches()``.
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
box_zyx:
The bounds of the subvolume from which to fetch annotation elements.
Given as a pair of coordinates (start, stop), e.g. [(0,0,0), (10,20,30)],
in Z,Y,X order. It need not be block-aligned.
format:
Either 'json' or 'pandas'
If 'pandas', convert the elements into a dataframe
with separate columns for X,Y,Z and each property.
In the pandas case, relationships are discarded.
Returns:
JSON list
"""
assert format in ('json', 'pandas')
box_zyx = np.asarray(box_zyx)
shape = box_zyx[1] - box_zyx[0]
shape_str = '_'.join(map(str, shape[::-1]))
offset_str = '_'.join(map(str, box_zyx[0, ::-1]))
url = f'{server}/api/node/{uuid}/{instance}/elements/{shape_str}/{offset_str}'
data = fetch_generic_json(url, session=session)
# The endooint returns 'null' instead of an empty list, on old servers at least.
# But we always return a list.
data = data or []
if format == 'pandas':
return load_elements_as_dataframe(data)
else:
return data
def load_elements_as_dataframe(elements):
"""
Convert the given elements from JSON to a pandas DataFrame.
Note:
For synapse annotations in particular,
see ``load_synapses_as_dataframes()``
"""
pos = np.zeros((len(elements), 3), dtype=np.int32)
kinds = []
tags = []
prop_arrays = {}
for i, e in enumerate(elements):
pos[i] = e['Pos']
kinds.append(e['Kind'])
tags.append(e['Tags'])
if 'Prop' not in e or not e['Prop']:
continue
for k, v in e['Prop'].items():
pa = prop_arrays.get(k)
if pa is None:
pa = prop_arrays[k] = np.empty(len(elements), dtype=object)
pa[i] = v
return pd.DataFrame({'z': pos[:, 2], 'y': pos[:,1], 'x': pos[:,0],
'kind': kinds, 'tags': tags, **prop_arrays})
@dvid_api_wrapper
def fetch_all_elements(server, uuid, instance, format='json', *, session=None):
"""
Returns all point annotations in the entire data instance, which could exceed data
response sizes (set by server) if too many elements are present. This should be
equivalent to the /blocks endpoint but without the need to determine extents.
The returned stream of data is the same as /blocks endpoint.
"""
url = f'{server}/api/node/{uuid}/{instance}/all-elements'
return fetch_generic_json(url, session=session)
@dvid_api_wrapper
def post_elements(server, uuid, instance, elements, kafkalog=True, *, session=None):
"""
Adds or modifies point annotations.
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
elements:
Elements as JSON data (a python list-of-dicts).
This is the same format as returned by fetch_elements().
It is NOT the format returned by fetch_blocks().
If your data came from fetch_blocks(), you must extract and concatenate the values of that dict.
kafkalog:
If True, log kafka events for each posted element.
Example:
from itertools import chain
blocks = fetch_blocks(server, uuid, instance_1, box)
elements = list(chain(*blocks.values()))
post_elements(server, uuid, instance_2, elements)
"""
params = {}
if not kafkalog or kafkalog == 'off':
params['kafkalog'] = 'off'
r = session.post(f'{server}/api/node/{uuid}/{instance}/elements', json=elements, params=params)
r.raise_for_status()
@dvid_api_wrapper
def fetch_blocks(server, uuid, instance, box_zyx, *, session=None):
"""
Returns all point annotations within all blocks that intersect the given box.
This differs from fetch_elements() in the following ways:
- All annotations in the intersecting blocks are returned,
even annotations that lie outside of the specified box.
- The return value is a dict instead of a list.
Note: Automatically includes relationships.
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
box_zyx:
The bounds of the subvolume from which to fetch annotation elements.
Given as a pair of coordinates (start, stop), e.g. [(0,0,0), (10,20,30)],
in Z,Y,X order. It need not be block-aligned.
Returns:
JSON dict { block_id : element-list }
"""
box_zyx = np.asarray(box_zyx)
shape = box_zyx[1] - box_zyx[0]
shape_str = '_'.join(map(str, shape[::-1]))
offset_str = '_'.join(map(str, box_zyx[0, ::-1]))
url = f'{server}/api/node/{uuid}/{instance}/blocks/{shape_str}/{offset_str}'
return fetch_generic_json(url, session=session)
@dvid_api_wrapper
def post_blocks(server, uuid, instance, blocks_json, kafkalog=False, *, session=None):
"""
Unlike the POST /elements endpoint, the /blocks endpoint is the fastest way to store
all point annotations and assumes the caller has (1) properly partitioned the elements
int the appropriate block for the block size (default 64) and (2) will do a POST /reload
to create the denormalized Label and Tag versions of the annotations after all
ingestion is completed.
This low-level ingestion also does not transmit subscriber events to associated
synced data (e.g., labelsz).
The POSTed JSON should be similar to the GET version with the block coordinate as
the key:
{
"10,381,28": [ array of point annotation elements ],
"11,381,28": [ array of point annotation elements ],
...
}
"""
params = {}
if not kafkalog:
params['kafkalog'] = 'off'
url = f'{server}/api/node/{uuid}/{instance}/blocks'
data = ujson.dumps(blocks_json).encode('utf-8')
r = session.post(url, data=data, params=params)
r.raise_for_status()
@dvid_api_wrapper
def delete_element(server, uuid, instance, coord_zyx, kafkalog=True, *, session=None):
"""
Deletes a point annotation given its location.
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
coord_zyx:
coordinate (Z,Y,X)
kafkalog:
If True, log this deletion in kafka. Otherwise, don't.
"""
assert len(coord_zyx) == 3
coord_str = '_'.join(map(str, coord_zyx[::-1]))
params = {}
if not kafkalog:
params['kafkalog'] = 'off'
r = session.delete(f'{server}/api/node/{uuid}/{instance}/element/{coord_str}', params=params)
r.raise_for_status()
class SynapseWarning(UserWarning):
pass
def load_synapses_as_dataframes(elements, return_both_partner_tables=False):
"""
Load the given JSON elements as synapses a DataFrame.
Args:
elements:
JSON list of synapse annotation elements as returned by
fetch_elements(), etc.
return_both_partner_tables:
Debugging feature.
Helps detect DVID data inconsistencies, if used correctly.
If True, return two separate partner tables, computed
from the PreSyn and PostSyn relationship data, respectively.
That is, pre_partner_df contains the pre->post pairs found
in the 'PreSynTo' relationships, and post_partner_df contains
the pre->post found in the 'PostSynTo' relationships.
Note that the two tables will likely NOT be identical,
unless the given elements include every synapse in your volume.
By default, combine (and de-duplicate) the two tables.
Returns:
point_df:
One row for every t-bar and psd in the file, indicating its
location, confidence, and synapse type (PostSyn or PreSyn)
Columns: ['z', 'y', 'x', 'conf', 'kind', 'user']
Index: np.uint64, an encoded version of [z,y,x]
[post_]partner_df:
Indicates which T-bar each PSD is associated with.
One row for every psd in the file.
Columns: ['pre_id', 'post_id']
where the values correspond to the index of point_df.
Note:
It can generally be assumed that for the synapses we
load into dvid, every PSD (PostSyn) is
associated with exactly one T-bar (PreSyn).
[pre_partner_df]:
Only returned if return_both_partner_tables=True
"""
#with warnings.catch_warnings():
# warnings.simplefilter("once", category=SynapseWarning)
return _load_synapses_as_dataframes(elements, return_both_partner_tables)
def _load_synapses_as_dataframes(elements, return_both_partner_tables):
if not elements:
point_df = pd.DataFrame([], columns=['x', 'y', 'z', 'kind', 'conf', 'user'])
partner_df = pd.DataFrame([], columns=['post_id', 'pre_id'], dtype=np.uint64)
if return_both_partner_tables:
return point_df, partner_df, partner_df
else:
return point_df, partner_df
# Accumulating separate lists for each column ought to be
# faster than building a list-of-tuples, I think.
# Primary columns
xs = []
ys = []
zs = []
kinds = []
confs = []
users = []
# Relationship coordinates
# [(pre_z, pre_y, pre_x, post_z, post_y, post_x), ...]
pre_rel_points = []
post_rel_points = []
need_fake_point = False
for e in elements:
x,y,z = e['Pos']
xs.append(x)
ys.append(y)
zs.append(z)
kinds.append( e['Kind'] )
confs.append( float(e.get('Prop', {}).get('conf', 0.0)) )
users.append( e.get('Prop', {}).get('user', '') )
if 'Rels' not in e or len(e['Rels']) == 0:
# In general, there should never be
# a tbar or psd with no relationships at all.
# That indicates an inconsistency in the database.
# To keep track of such cases, we add a special connection to point (0,0,0).
#warnings.warn("At least one synapse had no relationships! "
# "Adding artificial partner(s) to (0,0,0).",
# SynapseWarning)
need_fake_point = True
if e['Kind'] == 'PreSyn':
pre_rel_points.append( (z,y,x, 0,0,0) )
else:
post_rel_points.append( (0,0,0, z,y,x) )
else:
for rel in e['Rels']:
rx, ry, rz = rel['To']
if rx == ry == rz == 0:
# We usually assume (0,0,0) is not a real synapse, so it can be used in the case of "orphan" synapses.
# But in this case, apparently a real synapse was found at (0,0,0), obfuscating the warning above.
warnings.warn("Huh? The fetched synapse data actually contains a relationship to point (0,0,0)!")
if e['Kind'] == 'PreSyn':
pre_rel_points.append( (z,y,x, rz,ry,rx) )
else:
post_rel_points.append( (rz,ry,rx, z,y,x) )
# See warning above.
if need_fake_point:
xs.append(0)
ys.append(0)
zs.append(0)
kinds.append('Fake')
confs.append(0.0)
users.append('neuclease.dvid.annotation.load_synapses_as_dataframes')
point_df = pd.DataFrame( {'z': zs, 'y': ys, 'x': xs}, dtype=np.int32 )
kind_dtype = pd.CategoricalDtype(categories=["PreSyn", "PostSyn", "Fake"], ordered=False)
point_df['kind'] = pd.Series(kinds, dtype=kind_dtype)
point_df['conf'] = pd.Series(confs, dtype=np.float32)
point_df['user'] = pd.Series(users, dtype='category')
point_df.index = encode_coords_to_uint64(point_df[['z', 'y', 'x']].values)
point_df.index.name = 'point_id'
def construct_partner_df(rel_points):
if rel_points:
rel_points = np.array(rel_points, np.int32)
pre_partner_ids = encode_coords_to_uint64(rel_points[:, :3])
post_partner_ids = encode_coords_to_uint64(rel_points[:, 3:])
else:
pre_partner_ids = np.zeros((0,), dtype=np.uint64)
post_partner_ids = np.zeros((0,), dtype=np.uint64)
partner_df = pd.DataFrame({'pre_id': pre_partner_ids, 'post_id': post_partner_ids})
return partner_df
pre_partner_df = construct_partner_df(pre_rel_points)
post_partner_df = construct_partner_df(post_rel_points)
if return_both_partner_tables:
return point_df, pre_partner_df, post_partner_df
# For synapses near block borders, maybe only the PreSyn or
# only the PostSyn happens to be in the given elements.
# But in most cases, both PreSyn and PostSyn are present,
# and therefore the relationship is probably listed twice.
# Drop duplicates.
partner_df = pd.concat((pre_partner_df, post_partner_df), ignore_index=True)
partner_df.drop_duplicates(inplace=True)
return point_df, partner_df
def fetch_bodies_for_synapses(server, uuid, seg_instance, point_df=None, partner_df=None, batch_size=10_000, threads=0, processes=0):
from .labelmap import fetch_labels_batched
if point_df is not None:
bodies = fetch_labels_batched(server, uuid, seg_instance, point_df[['z', 'y', 'x']].values,
batch_size=batch_size, threads=threads, processes=processes)
point_df['body'] = bodies
if partner_df is not None:
pre_coords = decode_coords_from_uint64(partner_df['pre_id'].values)
post_coords = decode_coords_from_uint64(partner_df['post_id'].values)
partner_df['pre_body'] = fetch_labels_batched(server, uuid, seg_instance, pre_coords,
batch_size=batch_size, threads=threads, processes=processes)
partner_df['post_body'] = fetch_labels_batched(server, uuid, seg_instance, post_coords,
batch_size=batch_size, threads=threads, processes=processes)
def fetch_synapses_in_batches(server, uuid, synapses_instance, bounding_box_zyx=None, batch_shape_zyx=(256,256,64000),
format='pandas', endpoint='blocks', processes=8, #@ReservedAssignment
check_consistency=False, return_both_partner_tables=False):
"""
Fetch all synapse annotations for the given labelmap volume (or subvolume) and synapse instance.
Box-shaped regions are queried in batches according to the given batch shape.
Returns either the raw JSON or a pandas DataFrame.
Note:
Every synapse should have at least one partner (relationship).
If a synapse is found without a partner, that indicates a problem with the database.
In that case, a warning is emitted and the synapse is given an artificial partner to point (0,0,0).
Note:
On the hemibrain dataset (~70 million points),
this function takes ~4 minutes if you use 32 processes.
Warning:
For large volumes with many synapses, the 'json' format requires a lot of RAM,
and is not particularly convenient to save/load.
See also:
``save_synapses_npy()``, ``load_synapses_npy()``
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
synapses_instance:
dvid annotations instance name, e.g. 'synapses'
bounding_box_zyx:
The bounds of the subvolume from which to fetch synapse annotations.
Given as a pair of coordinates (start, stop), e.g. [(0,0,0), (256,1024,1024)],
in Z,Y,X order. It must be block-aligned.
If not provided, the entire bounding box of the sync'd
labelmap instance (e.g. 'segmentation') is used.
batch_shape_zyx:
What box shape to use for each /elements request.
Must be block-aligned (i.e. multiple of 64px in all dimensions).
format:
Either 'json' or 'pandas'. If 'pandas, return a DataFrame.
endpoint:
Either 'blocks' (faster) or 'elements' (supported on older servers).
check_consistency:
DVID debug feature. Checks for consistency in the response to the /blocks endpoint.
return_both_partner_tables:
Debugging feature.
Helps detect DVID data inconsistencies, if used correctly.
If True, return two separate partner tables, computed
from the PreSyn and PostSyn relationship data, respectively.
That is, pre_partner_df contains the pre->post pairs found
in the 'PreSynTo' relationships, and post_partner_df contains
the pre->post found in the 'PostSynTo' relationships.
Note that the two tables will likely NOT be identical,
unless the given elements include every synapse in your volume.
When return_both_partner_tables=False, then automatically combine
(and de-duplicate) the two tables.
Returns:
If format == 'json', a list of JSON elements.
If format == 'pandas', returns two or three dataframes,
depending on return_both_partner_tables:
point_df:
One row for every t-bar and psd in the file, indicating its
location, confidence, and synapse type (PostSyn or PreSyn)
Columns: ['z', 'y', 'x', 'conf', 'kind', 'user']
Index: np.uint64, an encoded version of [z,y,x]
[pre_]partner_df:
Indicates which T-bar each PSD is associated with.
One row for every psd in the file.
Columns: ['pre_id', 'post_id']
where the values correspond to the index of point_df.
Note:
It can generally be assumed that for the synapses we
load into dvid, every PSD (PostSyn) is
associated with exactly one T-bar (PreSyn).
[post_partner_df]:
Only returned if return_both_partner_tables=True
"""
assert format in ('pandas', 'json')
assert endpoint in ('blocks', 'elements')
assert not return_both_partner_tables or format == 'pandas', \
"return_both_partner_tables does not apply unless you're asking for pandas format"
if bounding_box_zyx is None or isinstance(bounding_box_zyx, str):
# Determine name of the segmentation instance that's
# associated with the given synapses instance.
syn_info = fetch_instance_info(server, uuid, synapses_instance)
seg_instance = syn_info["Base"]["Syncs"][0]
if isinstance(bounding_box_zyx, str):
assert bounding_box_zyx == seg_instance, \
("The segmentation instance name you provided doesn't match the name of the sync'd instance.\n"
"Please provide an explicit bounding-box.")
bounding_box_zyx = fetch_volume_box(server, uuid, seg_instance)
else:
bounding_box_zyx = np.asarray(bounding_box_zyx)
assert (bounding_box_zyx % 64 == 0).all(), "box must be block-aligned"
batch_shape_zyx = np.asarray(batch_shape_zyx)
assert (batch_shape_zyx % 64 == 0).all(), "batch shape must be block-aligned"
boxes = [*boxes_from_grid(bounding_box_zyx, Grid(batch_shape_zyx))]
fn = partial(_fetch_synapse_batch, server, uuid, synapses_instance,
format=format, endpoint=endpoint, check_consistency=check_consistency,
return_both_partner_tables=return_both_partner_tables)
initializer = None
#initializer = lambda: warnings.simplefilter("once", category=SynapseWarning)
results = compute_parallel(fn, boxes, processes=processes, ordered=False, leave_progress=True, initializer=initializer)
if format == 'json':
return list(chain(*results))
elif format == 'pandas':
if return_both_partner_tables:
point_dfs, pre_partner_dfs, post_partner_dfs = zip(*results)
pre_partner_dfs = [*filter(len, pre_partner_dfs)]
post_partner_dfs = [*filter(len, post_partner_dfs)]
else:
point_dfs, partner_dfs = zip(*results)
partner_dfs = [*filter(len, partner_dfs)]
# Any zero-length dataframes might have the wrong dtypes,
# which would screw up the concat step. Remove them.
point_dfs = [*filter(len, point_dfs)]
if len(point_dfs) == 0:
# Return empty dataframe
return load_synapses_as_dataframes([], return_both_partner_tables)
point_df = pd.concat(point_dfs)
# Make sure user and kind are Categorical
point_df['kind'] = point_df['kind'].astype("category")
point_df['user'] = point_df['user'].astype("category")
# If any 'fake' synapses were added due to inconsistent data,
# Drop duplicates among them.
if (point_df['kind'] == "Fake").any():
# All fake rows are the same. Drop all but the first.
fake_df = point_df.query('kind == "Fake"').iloc[0:1]
point_df = pd.concat((fake_df, point_df.query('kind != "Fake"')))
# Sort, mostly to ensure that the Fake point (if any) is at the top.
point_df.sort_values(['z', 'y', 'x'], inplace=True)
if return_both_partner_tables:
pre_partner_df = pd.concat(pre_partner_dfs, ignore_index=True)
post_partner_df = pd.concat(post_partner_dfs, ignore_index=True)
return point_df, pre_partner_df, post_partner_df
else:
partner_df = pd.concat(partner_dfs, ignore_index=True)
partner_df.drop_duplicates(inplace=True)
return point_df, partner_df
def _fetch_synapse_batch(server, uuid, synapses_instance, batch_box, format, endpoint, # @ReservedAssignment
check_consistency, return_both_partner_tables):
"""
Helper for fetch_synapses_in_batches(), above.
As a special check, if format 'pandas' is used, we also check for dvid inconsistencies.
"""
assert not check_consistency or endpoint == 'blocks', \
"check_consistency can only be used with the blocks endpoint."
if endpoint == 'blocks':
blocks = fetch_blocks(server, uuid, synapses_instance, batch_box)
elements = list(chain(*blocks.values()))
if check_consistency:
for key, els in blocks.items():
if len(els) == 0:
continue
block = [int(c) for c in key.split(',')]
block_box = 64*np.array((block, block))
block_box[1] += 64
pos = np.array([e['Pos'] for e in els])
if (pos < block_box[0]).any() or (pos >= block_box[1]).any():
msg = ("Detected a DVID inconsistency: Some elements fetched from block "
f"at {block_box[0, ::-1].tolist()} (XYZ) fall outside the block!")
raise RuntimeError(msg)
elif endpoint == 'elements':
elements = fetch_elements(server, uuid, synapses_instance, batch_box)
else:
raise AssertionError("Invalid endpoint choice")
if format == 'json':
return elements
if return_both_partner_tables:
point_df, pre_partner_df, post_partner_df = load_synapses_as_dataframes(elements, True)
return point_df, pre_partner_df, post_partner_df
else:
point_df, partner_df = load_synapses_as_dataframes(elements, False)
return point_df, partner_df
def save_synapses_npy(synapse_point_df, npy_path, save_index=None):
"""
Save the given synapse point DataFrame to a .npy file,
with careful handling of strings to avoid creating any
pickled objects (which are annoying to load).
"""
assert save_index in (True, False, None)
if save_index is None:
save_index = (synapse_point_df.index.name is not None)
dtypes = {}
# Avoid 'pickle' objects (harder to load) by converting
# categories/strings to fixed-width strings
max_kind = synapse_point_df['kind'].map(len).astype(int).max()
dtypes['kind'] = f'U{max_kind}'
if 'user' in synapse_point_df:
max_user = synapse_point_df['user'].map(len).astype(int).max()
dtypes['user'] = f'U{max_user}'
np.save(npy_path, synapse_point_df.to_records(index=save_index, column_dtypes=dtypes))
def load_synapses_npy(npy_path):
"""
Load the given .npy file as a synapse point DataFrame,
with special handling of the string columns to use
categorical dtypes (saves RAM).
"""
records = np.load(npy_path, allow_pickle=True)
numeric_cols = ['z', 'y', 'x', 'conf', 'label', 'body', 'sv']
numeric_cols = [*filter(lambda c: c in records.dtype.names, numeric_cols)]
df = pd.DataFrame(records[numeric_cols])
if 'point_id' in records.dtype.names:
df.index = records['point_id']
df['kind'] = pd.Series(records['kind'], dtype='category')
if 'user' in records.dtype.names:
df['user'] = pd.Series(records['user'], dtype='category')
return df
def save_synapses_csv(synapse_point_df, csv_path, index=False):
"""
Save the given synapse points table to CSV.
Note:
Usually it's more efficient to read/write .npy files.
See ``save_synapses_npy()``.
"""
synapse_point_df.to_csv(csv_path, header=True, index=index)
def load_synapses_csv(csv_path):
"""
Convenience function for reading saved synapse
table from CSV with the proper dtypes.
Note:
Usually it's more efficient to read/write .npy files.
See ``load_synapses_npy()``.
"""
dtype = { 'x': np.int32,
'y': np.int32,
'z': np.int32,
'kind': 'category',
'conf': np.float32,
'user': 'category',
'label': np.uint64,
'body': np.uint64,
'sv': np.uint64 }
return pd.read_csv(csv_path, header=0, dtype=dtype)
def load_synapses(path):
"""
Load synapse points from the given file path.
"""
if isinstance(path, pd.DataFrame):
return path
assert isinstance(path, str)
_, ext = os.path.splitext(path)
assert ext in ('.csv', '.npy', '.json')
if ext == '.csv':
points_df = load_synapses_csv(path)
elif ext == '.npy':
points_df = load_synapses_npy(path)
elif ext == '.json':
points_df, _partner_df = load_synapses_from_json()
return points_df
def load_synapses_from_json(json_path, batch_size=1000):
"""
Load the synapses to a dataframe from a JSON file
(which must have the same structure as the elements response from DVID).
The JSON file is consumed in batches, avoiding the need
to load the entire JSON document in RAM at once.
"""
point_dfs = []
partner_dfs = []
try:
with open(json_path, 'r') as f:
for elements in tqdm_proxy( gen_json_objects(f, batch_size) ):
point_df, partner_df = load_synapses_as_dataframes(elements)
point_dfs.append(point_df)
partner_dfs.append(partner_df)
except KeyboardInterrupt:
msg = f"Stopping early due to KeyboardInterrupt. ({len(point_dfs)} batches completed)\n"
sys.stderr.write(msg)
point_df = pd.concat(point_dfs)
partner_df = pd.concat(partner_dfs)
return point_df, partner_df
def load_relationships(elements, kind=None):
"""
Given a list of JSON elements, load all relationships as a table.
"""
from_x = []
from_y = []
from_z = []
to_x = []
to_y = []
to_z = []
rels = []
for element in tqdm_proxy(elements):
if kind and (kind != element['Kind']):
continue
fx, fy, fz = element['Pos']
for obj in element['Rels']:
tx, ty, tz = obj['To']
from_x.append(fx)
from_y.append(fy)
from_z.append(fz)
to_x.append(tx)
to_y.append(ty)
to_z.append(tz)
rels.append(obj['Rel'])
df = pd.DataFrame( {'from_x': from_x,
'from_y': from_y,
'from_z': from_z,
'to_x': to_x,
'to_y': to_y,
'to_z': to_z,
}, dtype=np.int32 )
df['rel'] = pd.Series(rels, dtype='category')
return df
def compute_weighted_edge_table(relationships_df, synapses_df):
"""
Given a synapse 'relationship table' with columns [from_x, from_y, from_z, to_x, to_y, to_z],
and a synapse table with columns [x, y, z, body],
Perform the necessary merge operations to determine from_body and to_body for each relationship,
and then aggregate those relationships to to yield a table of weights for each unique body pair.
"""
from_bodies = relationships_df.merge(synapses_df[['z', 'y', 'x', 'body']], how='left',
left_on=['from_z', 'from_y', 'from_x'],
right_on=['z', 'y', 'x'])['body']
to_bodies = relationships_df.merge(synapses_df[['z', 'y', 'x', 'body']], how='left',
left_on=['to_z', 'to_y', 'to_x'],
right_on=['z', 'y', 'x'])['body']
edge_table = pd.DataFrame({'from_body': from_bodies,
'to_body': to_bodies})
weighted_edge_table = edge_table.groupby(['from_body', 'to_body']).size()
weighted_edge_table.sort_values(ascending=False, inplace=True)
weighted_edge_table.name = 'weight'
return weighted_edge_table.reset_index()
def load_gary_synapse_json(path, processes=8, batch_size=100_000):
"""
Load a synapse json file from Gary's format into two tables.
Args:
path:
A path to a .json file.
See ``neuclease/tests/test_annotation.py`` for an example.
processes:
How many processes to use in parallel to load the data.
batch_size:
The size (number of t-bars) to processes per batch during multiprocessing.
Returns:
point_df:
One row for every t-bar and psd in the file.
Columns: ['z', 'y', 'x', 'confidence', 'kind']
Index: np.uint64, an encoded version of [z,y,x]
partner_df:
Indicates which T-bar each PSD is associated with.
One row for every psd in the file.
Columns: ['post_id', 'pre_id']
where the values correspond to the index of point_df.
Note:
Gary guarantees that every PSD (PostSyn) is
associated with exactly 1 T-bar (PreSyn).
"""
logger.info(f"Loading JSON data from {path}")
with open(path, 'r') as f:
data = ujson.load(f)["data"]
if processes == 0:
logger.info("Generating tables in the main process (not parallel).")
return _load_gary_synapse_data(data)
batches = []
for batch_start in range(0, len(data), batch_size):
batches.append(data[batch_start:batch_start+batch_size])
logger.info(f"Converting via {len(batches)} batches (using {processes} processes).")
results = compute_parallel(_load_gary_synapse_data, batches, processes=processes)
point_dfs, partner_dfs = zip(*results)
logger.info("Combining results")
point_df = pd.concat(point_dfs)
partner_df = pd.concat(partner_dfs, ignore_index=True)
return point_df, partner_df
def _load_gary_synapse_data(data):
"""
Helper for load_gary_synapse_json()
"""
point_table = []
confidences = []
kinds = []
partner_table = []
for syn in data:
tx, ty, tz = syn["T-bar"]["location"]
confidence = float(syn["T-bar"]["confidence"])
point_table.append( (tz, ty, tx) )
confidences.append( confidence )
kinds.append('PreSyn')
for partner in syn["partners"]:
px, py, pz = partner["location"]
confidence = float(partner["confidence"])
point_table.append( (pz, py, px) )
confidences.append(confidence)
kinds.append('PostSyn')
partner_table.append( (tz, ty, tx, pz, py, px) )
points = np.array(point_table, np.int32)
point_df = pd.DataFrame(points, columns=['z', 'y', 'x'], dtype=np.int32)
point_df['conf'] = np.array(confidences, np.float32)
point_df['kind'] = pd.Series(kinds, dtype='category')
point_ids = encode_coords_to_uint64(points)
point_df.index = point_ids
point_df.index.name = 'point_id'
partner_points = np.array(partner_table, dtype=np.int32)
tbar_partner_ids = encode_coords_to_uint64(partner_points[:,:3])
psd_partner_ids = encode_coords_to_uint64(partner_points[:,3:])
partner_df = pd.DataFrame({'post_id': psd_partner_ids, 'pre_id': tbar_partner_ids})
return point_df, partner_df
def body_synapse_counts(synapse_samples):
"""
Given a DataFrame of sampled synapses (or a path to a CSV file),
Tally synapse totals (by kind) for each body.
Returns:
DataFrame with columns: ['PreSyn', 'PostSyn'], indexed by 'body'.
(The PreSyn/PostSyn columns are synapse counts.)
"""
if isinstance(synapse_samples, str):
synapse_samples = pd.read_csv(synapse_samples)
assert 'body' in synapse_samples.columns, "Samples must have a 'body' col."
assert 'kind' in synapse_samples.columns, "Samples must have a 'kind' col"
synapse_samples = synapse_samples[['body', 'kind']]
synapse_counts = synapse_samples.pivot_table(index='body', columns='kind', aggfunc='size')
synapse_counts.fillna(0.0, inplace=True)
if 0 in synapse_counts.index:
msg = ("*** Synapse table includes body 0 and was therefore probably generated "
"from out-of-date data OR some synapses in your data fall on voxels with "
"no label (label 0). ***")
logger.warning(msg)
synapse_counts['PostSyn'] = synapse_counts['PostSyn'].astype(np.int32)
synapse_counts['PreSyn'] = synapse_counts['PreSyn'].astype(np.int32)
# Convert columns from categorical index to normal index,
# so the caller can easily append their own columns if they want.
synapse_counts.columns = synapse_counts.columns.tolist()
return synapse_counts[['PreSyn', 'PostSyn']]
def fetch_roi_synapses(server, uuid, synapses_instance, rois, fetch_labels=False, return_partners=False, processes=16):
"""
Fetch the coordinates and (optionally) body labels for
all synapses that fall within the given ROIs.
Args:
server:
DVID server, e.g. 'emdata4:8900'
uuid:
DVID uuid, e.g. 'abc9'
synapses_instance:
DVID synapses instance name, e.g. 'synapses'
rois:
A single DVID ROI instance names or a list of them, e.g. 'EB' or ['EB', 'FB']
fetch_labels:
If True, also fetch the supervoxel and body label underneath each synapse,
returned in columns 'sv' and 'body'.
return_partners:
If True, also return the partners table.
processes:
How many parallel processes to use when fetching synapses and supervoxel labels.
Returns:
pandas DataFrame with columns:
``['z', 'y', 'x', 'kind', 'conf']`` and ``['sv', 'body']`` (if ``fetch_labels=True``)
If return_partners is True, also return the partners table.
Example:
df = fetch_roi_synapses('emdata4:8900', '3c281', 'synapses', ['PB(L5)', 'PB(L7)'], True, 8)
"""
# Late imports to avoid circular imports in dvid/__init__
from neuclease.dvid import fetch_combined_roi_volume, determine_point_rois, fetch_labels_batched, fetch_mapping, fetch_mappings
assert rois, "No rois provided, result would be empty. Is that what you meant?"
if isinstance(rois, str):
rois = [rois]
# Determine name of the segmentation instance that's
# associated with the given synapses instance.
syn_info = fetch_instance_info(server, uuid, synapses_instance)
seg_instance = syn_info["Base"]["Syncs"][0]
logger.info(f"Fetching mask for ROIs: {rois}")
# Fetch the ROI as a low-res array (scale 5, i.e. 32-px resolution)
roi_vol_s5, roi_box_s5, overlapping_pairs = fetch_combined_roi_volume(server, uuid, rois)
if len(overlapping_pairs) > 0:
logger.warning("Some ROIs overlapped and are thus not completely represented in the output:\n"
f"{overlapping_pairs}")
# Convert to full-res box
roi_box = (2**5) * roi_box_s5
# fetch_synapses_in_batches() requires a box that is 64-px-aligned
roi_box = round_box(roi_box, 64, 'out')
logger.info("Fetching synapse points")
# points_df is a DataFrame with columns for [z,y,x]
points_df, partners_df = fetch_synapses_in_batches(server, uuid, synapses_instance, roi_box, processes=processes)
# Append a 'roi_name' column to points_df
logger.info("Labeling ROI for each point")
determine_point_rois(server, uuid, rois, points_df, roi_vol_s5, roi_box_s5)
logger.info("Discarding points that don't overlap with the roi")
rois = {*rois}
points_df = points_df.query('roi in @rois').copy()
columns = ['z', 'y', 'x', 'kind', 'conf', 'roi_label', 'roi']
if fetch_labels:
logger.info("Fetching supervoxel under each point")
svs = fetch_labels_batched(server, uuid, seg_instance,
points_df[['z', 'y', 'x']].values,
supervoxels=True,
processes=processes)
with Timer("Mapping supervoxels to bodies", logger):
# Arbitrary heuristic for whether to do the
# body-lookups on DVID or on the client.
if len(svs) < 100_000:
bodies = fetch_mapping(server, uuid, seg_instance, svs)
else:
mapping = fetch_mappings(server, uuid, seg_instance)
mapper = LabelMapper(mapping.index.values, mapping.values)
bodies = mapper.apply(svs, True)
points_df['sv'] = svs
points_df['body'] = bodies
columns += ['body', 'sv']
if return_partners:
# Filter
#partners_df = partners_df.query('post_id in @points_df.index and pre_id in @points_df.index').copy()
# Faster filter (via merge)
partners_df = partners_df.merge(points_df[[]], 'inner', left_on='pre_id', right_index=True)
partners_df = partners_df.merge(points_df[[]], 'inner', left_on='post_id', right_index=True)
return points_df[columns], partners_df
else:
return points_df[columns]
def determine_bodies_of_interest(server, uuid, synapses_instance, rois=None, min_tbars=2, min_psds=10, processes=16, *, synapse_table=None, seg_instance=None):
"""
Determine which bodies fit the given criteria
for minimum synapse counts WITHIN the given ROIs.
Note that the min_tbars and min_psds criteria are OR'd together.
A body need only match at least one of the criteria to be considered "of interest".
This function is just a convenience wrapper around calling
fetch_roi_synapses(), fetch_labels_batched(), and body_synapse_counts().
Note:
If your synapse table is already loaded and already has a 'body' column,
and you aren't providing any rois to filter with, then this function is
merely equivalent to calling body_synapse_counts() and filtering it
for tbar/psd requirements.
Args:
server:
dvid server
uuid:
dvid uuid
synapses_instance:
synapses annotation instance name, e.g. 'synapses'
If you are providing a pre-loaded synapse_table and overriding seg_instance,
you can set synapses_instance=None.
rois:
A list of ROI instance names. If provided, ONLY synapses
within these ROIs will be counted when determining bodies of interest.
If not provided, all synapses in the volume will be counted.
min_tbars:
All bodies with at least this many t-bars (PreSyn annotations) will be "of interest".
min_psds:
All bodies with at least this many PSDs (PostSyn annotations) will be "of interest".
processes:
How many parallel processes to use when fetching synapses and body labels.
synapse_table:
If you have a pre-loaded synapse table (or a path to one stored as .npy or .csv),
you may provide it here, in which case the synapse points won't be fetched from DVID.
Furthermore, if the table already contains a 'body' column, then it is presumed to be
accurate and body labels will not be fetched from DVID.
seg_instance:
If you want to override the segmentation instance name to use
(rather than inspecting the syanapse instance syncs), provide it here.
Returns:
pandas DataFrame, as returned by body_synapse_counts().
That is, DataFrame with columns: ['PreSyn', 'PostSyn'], indexed by 'body',
where only bodies of interest are included in the table.
"""
from neuclease.dvid import fetch_labels_batched, fetch_combined_roi_volume, determine_point_rois
# Download synapses if necessary
if synapse_table is None:
with Timer("Fetching synapse points", logger):
if rois is None:
# Fetch all synapses in the volume
points_df, _partners_df = fetch_synapses_in_batches(server, uuid, synapses_instance, processes=processes)
else:
# Fetch only the synapses within the given ROIs
points_df = fetch_roi_synapses(server, uuid, synapses_instance, rois, False, processes=processes)
else:
# User provided a pre-loaded synapse table (or a path to one)
if isinstance(synapse_table, str):
with Timer(f"Loading synapse table {synapse_table}", logger):
_, ext = os.path.splitext(synapse_table)
assert ext in ('.csv', '.npy')
if ext == '.csv':
synapse_table = load_synapses_csv(synapse_table)
elif ext == '.npy':
synapse_table = load_synapses_npy(synapse_table)
assert isinstance(synapse_table, pd.DataFrame)
assert not ({'z', 'y', 'x', 'kind'} - {*synapse_table.columns}), \
"Synapse table does not contain all expected columns"
points_df = synapse_table
if rois:
roi_vol_s5, roi_box_s5, _ = fetch_combined_roi_volume(server, uuid, rois)
determine_point_rois(server, uuid, rois, points_df, roi_vol_s5, roi_box_s5)
points_df = points_df.query('roi_label != 0')
if 'body' in points_df:
logger.info("Using user-provided body labels")
else:
with Timer("Fetching synapse body labels", logger):
if seg_instance is None:
syn_info = fetch_instance_info(server, uuid, synapses_instance)
seg_instance = syn_info["Base"]["Syncs"][0]
points_df['body'] = fetch_labels_batched( server, uuid, seg_instance,
points_df[['z', 'y', 'x']].values,
processes=processes )
with Timer("Aggregating body-wise synapse counts"):
body_synapses_df = body_synapse_counts(points_df)
body_synapses_df = body_synapses_df.query('PreSyn >= @min_tbars or PostSyn >= @min_psds')
return body_synapses_df
ConsistencyResults = namedtuple("ConsistencyResults",
["orphan_tbars", "orphan_psds",
"pre_dupes", "post_dupes",
"only_in_tbar", "only_in_psd",
"bad_tbar_refs", "bad_psd_refs",
"oversubscribed_post", "oversubscribed_pre"])
def check_synapse_consistency(syn_point_df, pre_partner_df, post_partner_df):
"""
Given a synapse point table and TWO partners tables as returned when
calling ``fetch_synapses_in_batches(..., return_both_partner_tables=True)``,
Analyze the relationships to look for inconsistencies.
Note:
There are different types of results returned,
and they are not mutually exclusive.
For example, "orphan tbars" will also count toward
"non-reciprocal relationships", and also contribute to the "oversubscribed"
counts (since the orphans are artificially partnered to (0,0,0), which ends
up counting as oversubscribed).
"""
# 'Orphan' points (a tbar or psd with no relationships at all)
orphan_tbars = pre_partner_df.query('post_id == 0')
orphan_psds = post_partner_df.query('pre_id == 0')
logger.info(f"Found {len(orphan_tbars)} orphan TBars")
logger.info(f"Found {len(orphan_psds)} orphan psds")
# Duplicate connections (one tbar references the same PSD twice or more)
pre_dupes = pre_partner_df.loc[pre_partner_df.duplicated()].drop_duplicates()
post_dupes = post_partner_df.loc[post_partner_df.duplicated()].drop_duplicates()
logger.info(f"Found {len(pre_dupes)} duplicated tbar->psd relationships.")
logger.info(f"Found {len(post_dupes)} duplicated psd<-tbar relationships.")
# Non-reciprocal (Tbar references PSD, but not the other way around, or vice-versa)
pre_nodupes_df = pre_partner_df.drop_duplicates()
merged = pre_nodupes_df.merge(post_partner_df.drop_duplicates(), 'outer', ['pre_id', 'post_id'], indicator='which')
only_in_tbar = merged.query('which == "left_only"')
only_in_psd = merged.query('which == "right_only"')
logger.info(f"Found {len(only_in_tbar)} non-reciprocal relationships from TBars")
logger.info(f"Found {len(only_in_psd)} non-reciprocal relationships from PSDs")
# Refs to nowhere (Tbar or PSD has a relationship to a point that doesn't exist)
point_ids = syn_point_df.index
bad_tbar_refs = pre_partner_df.query('post_id not in @point_ids')
bad_psd_refs = post_partner_df.query('pre_id not in @point_ids')
logger.info(f"Found {len(bad_tbar_refs)} references to non-existent PSDs")
logger.info(f"Found {len(bad_psd_refs)} references to non-existent TBars")
# Too many refs from a single PSD
oversubscribed_post = post_partner_df.loc[post_partner_df.duplicated('post_id')]
oversubscribed_pre = pre_nodupes_df.loc[pre_nodupes_df.duplicated('post_id')]
logger.info(f"Found {len(oversubscribed_post)} PSDs that contain more than one relationship")
logger.info(f"Found {len(oversubscribed_pre)} PSDs that are referenced by more than one TBar")
return ConsistencyResults( orphan_tbars, orphan_psds,
pre_dupes, post_dupes,
only_in_tbar, only_in_psd,
bad_tbar_refs, bad_psd_refs,
oversubscribed_post, oversubscribed_pre )
def post_tbar_jsons(server, uuid, instance, partner_df, merge_existing=True, processes=32, chunk_shape=(256, 256, 64000)):
"""
Post a large set of tbars (including their PSD relationships) to dvid,
using the POST /blocks annotation endpoint.
If you're posting T-bars only, with no associated PSDs,
you can omit the _post coordinate columns.
The points will be divided into block-aligned sets, serialized as JSON,
and sent to DVID via multiple processes.
Args:
server, uuid, instance:
annotation instance info
partner_df:
A DataFrame containing the following columns:
[
# tbar coordinates
'z_pre', 'y_pre', 'x_pre',
# confidence
'conf_pre',
# psd coordinates
'z_post', 'y_post', 'x_post',
# unique ID for each tbar. Appended for you if this is missing.
'pre_id',
]
"""
logger.info("Computing chunk/block IDs")
if 'pre_id' not in partner_df.columns:
partner_df['pre_id'] = encode_coords_to_uint64(partner_df[['z_pre', 'y_pre', 'x_pre']].values)
partner_df['cz_pre'] = partner_df['z_pre'] // chunk_shape[0]
partner_df['cy_pre'] = partner_df['y_pre'] // chunk_shape[1]
partner_df['cx_pre'] = partner_df['x_pre'] // chunk_shape[2]
partner_df['cid_pre'] = encode_coords_to_uint64(partner_df[['cz_pre', 'cy_pre', 'cx_pre']].values)
partner_df['bz_pre'] = partner_df['z_pre'] // 64
partner_df['by_pre'] = partner_df['y_pre'] // 64
partner_df['bx_pre'] = partner_df['x_pre'] // 64
partner_df['bid_pre'] = encode_coords_to_uint64(partner_df[['bz_pre', 'by_pre', 'bx_pre']].values)
num_chunks = partner_df['cid_pre'].nunique()
_post = partial(_post_tbar_chunk, server, uuid, instance, chunk_shape, merge_existing)
compute_parallel(_post, partner_df.groupby(['cz_pre', 'cy_pre', 'cx_pre']),
total=num_chunks, processes=processes, ordered=False, starmap=True)
def _post_tbar_chunk(server, uuid, instance, chunk_shape, merge_existing, c_zyx, chunk_df):
block_jsons = {}
for (bz, by, bx), block_df in chunk_df.groupby(['bz_pre', 'by_pre', 'bx_pre']):
block_jsons[f"{bx},{by},{bz}"] = compute_tbar_jsons(block_df)
if merge_existing:
chunk_start = np.asarray(c_zyx) * chunk_shape
chunk_stop = chunk_start + chunk_shape
existing = fetch_blocks(server, uuid, instance, [chunk_start, chunk_stop])
for key in existing.keys():
if key in block_jsons:
block_jsons[key].extend(existing[key])
elif existing[key]:
block_jsons[key] = existing[key]
post_blocks(server, uuid, instance, block_jsons)
def post_psd_jsons(server, uuid, instance, partner_df, merge_existing=True, processes=32, chunk_shape=(256, 256, 64000)):
logger.info("Computing chunk/block IDs")
partner_df['cz_post'] = partner_df['z_post'] // chunk_shape[0]
partner_df['cy_post'] = partner_df['y_post'] // chunk_shape[1]
partner_df['cx_post'] = partner_df['x_post'] // chunk_shape[2]
partner_df['cid_post'] = encode_coords_to_uint64(partner_df[['cz_post', 'cy_post', 'cx_post']].values)
partner_df['bz_post'] = partner_df['z_post'] // 64
partner_df['by_post'] = partner_df['y_post'] // 64
partner_df['bx_post'] = partner_df['x_post'] // 64
partner_df['bid_post'] = encode_coords_to_uint64(partner_df[['bz_post', 'by_post', 'bx_post']].values)
num_chunks = partner_df['cid_post'].nunique()
_post = partial(_post_psd_chunk, server, uuid, instance, chunk_shape, merge_existing)
compute_parallel(_post, partner_df.groupby(['cz_post', 'cy_post', 'cx_post']),
total=num_chunks, processes=processes, ordered=False, starmap=True)
def _post_psd_chunk(server, uuid, instance, chunk_shape, merge_existing, c_zyx, chunk_df):
block_jsons = {}
for (bz, by, bx), block_df in chunk_df.groupby(['bz_post', 'by_post', 'bx_post']):
block_jsons[f"{bx},{by},{bz}"] = compute_psd_jsons(block_df)
if merge_existing:
chunk_start = np.asarray(c_zyx) * chunk_shape
chunk_stop = chunk_start + chunk_shape
existing = fetch_blocks(server, uuid, instance, [chunk_start, chunk_stop])
for key in existing.keys():
if key in block_jsons:
block_jsons[key].extend(existing[key])
elif existing[key]:
block_jsons[key] = existing[key]
post_blocks(server, uuid, instance, block_jsons)
def delete_all_synapses(server, uuid, instance, box=None, chunk_shape=(256,256,64000)):
if box is None or isinstance(box, str):
# Determine name of the segmentation instance that's
# associated with the given synapses instance.
syn_info = fetch_instance_info(server, uuid, instance)
seg_instance = syn_info["Base"]["Syncs"][0]
if isinstance(box, str):
assert box == seg_instance, \
("The segmentation instance name you provided doesn't match the name of the sync'd instance.\n"
"Please provide an explicit bounding-box.")
box = fetch_volume_box(server, uuid, seg_instance)
box = np.asarray(box)
assert (box % 64 == 0).all(), "box must be block-aligned"
chunk_boxes = boxes_from_grid(box, chunk_shape, clipped=True)
_erase = partial(_erase_chunk, server, uuid, instance)
compute_parallel(_erase, chunk_boxes, processes=32)
def _erase_chunk(server, uuid, instance, chunk_box):
"""
Helper for delete_all_synapses().
Fetch all blocks in the chunk (to see which blocks have data)
and erase the ones that aren't empty.
"""
EMPTY = []
chunk_data = fetch_blocks(server, uuid, instance, chunk_box)
empty_data = {k:EMPTY for k,v in chunk_data.items() if v}
post_blocks(server, uuid, instance, empty_data, kafkalog=False)
def compute_tbar_jsons(partner_df):
"""
Compute the element JSON data that corresponds
to the tbars in the given partner table.
If you are posting an initial set of tbar points without any PSDs,
simply omit the '_post' columns from the table.
"""
block_ids = partner_df[['z_pre', 'y_pre', 'z_pre']].values // 64
assert not np.diff(block_ids, axis=0).any(), \
f"DataFrame contains multiple blocks!\n{partner_df}"
tbars_only = ('x_post' not in partner_df.columns)
tbar_jsons = []
for _pre_id, tbar_df in partner_df.groupby('pre_id'):
tbar_xyz = tbar_df[['x_pre', 'y_pre', 'z_pre']].values[0].tolist()
tbar_conf = tbar_df['conf_pre'].iloc[0]
tbar_json = {
"Pos": tbar_xyz,
"Kind": "PreSyn",
"Tags": [],
"Prop": {"conf": str(tbar_conf), "user": "$fpl"},
}
if tbars_only:
tbar_json["Rels"] = []
else:
tbar_json["Rels"] = [{"Rel": "PreSynTo", "To":c} for c in tbar_df[['x_post', 'y_post', 'z_post']].values.tolist()]
tbar_jsons.append(tbar_json)
return tbar_jsons
def compute_psd_jsons(partner_df):
"""
Compute the element JSON data that corresponds to the PSDs in the given partner table
"""
block_ids = partner_df[['z_post', 'y_post', 'z_post']].values // 64
assert np.equal.reduce(block_ids, axis=0).all()
psd_jsons = []
for row in partner_df.itertuples():
psd_jsons.append({
"Pos": [int(row.x_post), int(row.y_post), int(row.z_post)],
"Kind": "PostSyn",
"Tags": [],
"Prop": {"conf": str(row.conf_post), "user": "$fpl"},
"Rels": [{"Rel": "PostSynTo", "To": [int(row.x_pre), int(row.y_pre), int(row.z_pre)]}]
})
return psd_jsons
def load_gary_psds(pkl_path):
"""
Load a pickle file as given by Gary's code and return a 'partner table'.
"""
import pickle
data = pickle.load(open(pkl_path, 'rb'))
_table = []
for tbar_coord, tbar_conf, psd_coords, psd_confs in tqdm_proxy(zip(data['locs'], data['conf'], data['psds'], data['psds_conf']), total=len(data['locs'])):
for psd_coord, psd_conf in zip(psd_coords, psd_confs):
_table.append([*(tbar_coord[::-1]), tbar_conf, *(psd_coord[::-1]), psd_conf])
df = pd.DataFrame(_table, columns=['z_pre', 'y_pre', 'x_pre', 'conf_pre', 'z_post', 'y_post', 'x_post', 'conf_post'])
for col in ['z_pre', 'y_pre', 'x_pre', 'z_post', 'y_post', 'x_post']:
df[col] = df[col].astype(np.int32)
df['pre_id'] = encode_coords_to_uint64(df[['z_pre', 'y_pre', 'x_pre']].values)
df['post_id'] = encode_coords_to_uint64(df[['z_post', 'y_post', 'x_post']].values)
df['user_pre'] = df['user_post'] = '$fpl'
df['kind_pre'] = 'PreSyn'
df['kind_post'] = 'PostSyn'
df = df[['pre_id', 'z_pre', 'y_pre', 'x_pre', 'kind_pre', 'conf_pre', 'user_pre',
'post_id', 'z_post', 'y_post', 'x_post', 'kind_post', 'conf_post', 'user_post']]
return df
def add_synapses(point_df, partner_df, new_psd_partners_df):
"""
Add the PSDs from new_psd_partners_df, which may reference
existing tbars, or may reference new tbars, in which
case the tbars will be added, too.
"""
POINT_COLS = ['z', 'y', 'x', 'kind', 'conf', 'user']
PARTNER_COLS_PRE = ['pre_id', 'z_pre', 'y_pre', 'x_pre', 'kind_pre', 'conf_pre', 'user_pre']
PARTNER_COLS_POST = ['post_id', 'z_post', 'y_post', 'x_post', 'kind_post', 'conf_post', 'user_post']
PARTNER_COLS = [*PARTNER_COLS_PRE, *PARTNER_COLS_POST]
partner_df = partner_df[PARTNER_COLS]
new_psd_partners_df = new_psd_partners_df[PARTNER_COLS]
# Check for possible conflicts before we begin
conflicts = (pd.Index(new_psd_partners_df['pre_id'].values)
.intersection(new_psd_partners_df['post_id'].values))
if len(conflicts) > 0:
raise RuntimeError("tbars and psds in the new set overlap!")
conflicts = (pd.Index(new_psd_partners_df['pre_id'].values)
.intersection(partner_df['post_id'].values))
if len(conflicts) > 0:
raise RuntimeError("tbars in the new set overlap with psds in the old set!")
conflicts = (pd.Index(new_psd_partners_df['post_id'].values)
.intersection(partner_df['pre_id'].values))
if len(conflicts) > 0:
raise RuntimeError("psds in the new set overlap with tbars in the old set!")
partner_df = pd.concat((partner_df, new_psd_partners_df), ignore_index=True, sort=True)
partner_df.drop_duplicates(['pre_id', 'post_id'], keep='last', inplace=True)
# Update points
new_points_pre = (new_psd_partners_df
.rename(columns={'pre_id': 'point_id', **dict(zip(PARTNER_COLS_PRE[1:], POINT_COLS))})
.drop_duplicates('point_id', keep='last')
.set_index('point_id'))
new_points_post = (new_psd_partners_df
.rename(columns={'post_id': 'point_id', **dict(zip(PARTNER_COLS_POST[1:], POINT_COLS))})
.drop_duplicates('point_id', keep='last')
.set_index('point_id'))
point_df = pd.concat((point_df, new_points_pre, new_points_post), sort=True)
# Drop duplicate point_ids, keep new
point_df = point_df.loc[~point_df.index.duplicated(keep='last')]
return point_df, partner_df
def delete_psds(point_df, partner_df, obsolete_partner_df):
"""
Delete the PSDs listed in the given obsolete_partner_df.
If any tbars are left with no partners, delete those tbars, too.
"""
obsolete_partner_df = obsolete_partner_df[['pre_id', 'post_id']]
obsolete_pre_ids = obsolete_partner_df['pre_id'].values
obsolete_post_ids = obsolete_partner_df['post_id'].values
# Drop obsolete PSDs
point_df = point_df.query('kind == "PreSyn" or point_id not in @obsolete_post_ids')
partner_df = partner_df.query('post_id not in @obsolete_post_ids')
# Delete empty tbars
remaining_tbar_ids = partner_df['pre_id'].unique()
dropped_tbar_ids = obsolete_partner_df.query('pre_id not in @remaining_tbar_ids')['pre_id'].unique()
point_df = point_df.query('kind == "PostSyn" or point_id not in @dropped_tbar_ids')
return point_df.copy(), partner_df.copy(), dropped_tbar_ids
def delete_tbars(point_df, partner_df, obsolete_tbar_ids):
"""
Delete the given tbars and all of their associated PSDs.
"""
_obsolete_psd_ids = partner_df.query('pre_id in @obsolete_tbar_ids')['post_id'].values
partner_df = partner_df.query('pre_id not in @obsolete_tbar_ids')
q = (' (kind == "PreSyn" and point_id not in @obsolete_tbar_ids)'
' or (kind == "PostSyn" and point_id not in @_obsolete_psd_ids)')
point_df = point_df.query(q)
return point_df.copy(), partner_df.copy()
def select_autapses(partner_df):
"""
Select rows from the given 'partner table' that correspond to autapses.
Must have columns body_pre and body_post.
"""
return partner_df.query('body_pre == body_post')
def select_redundant_psds(partner_df):
"""
Select rows of the given 'partner table' that correspond to redundant PSDs.
If a tbar has more than one connection to the same body, then all but one
of them are considered redundant.
This function returns the less confident PSD entries as redundant.
"""
if 'conf_post' in partner_df:
partner_df = partner_df.sort_values('conf_post')
else:
logger.warning("DataFrame has no 'conf_post' column. Discarding redundant PSDs in arbitrary order.")
dupe_psd_rows = partner_df.duplicated(['pre_id', 'body_post'], keep='last')
dupe_partner_df = partner_df.loc[dupe_psd_rows]
return dupe_partner_df.copy()
| [
"[email protected]"
] | |
40ccd51ea1d674209bf46cbea751869f208c6df8 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/coins_20200608093830.py | 2b449f4937abf589c1a075934356f3463068e9c8 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py |
def change(amount,coins):
count = 0
for i in range(len(coins)):
times = coins[i] / amount
print(times)
change(5,[1,2,5])
| [
"[email protected]"
] | |
7dd7acbd17cee8b4c05c6f118abbd654aca5e2d0 | 797f21680bf51656db629691cc667a4ddae7a513 | /final_exams/heroes_of_code_and_logic_VII.py | 758594704481bd5724bca88a701dcec11bcbc266 | [] | no_license | yordan-marinov/fundamentals_python | 48f5ab77814fddc6d3cb5a8d4b5e14f1eebf1298 | e1e9544d02be99640623317fadee810b503e7d9f | refs/heads/master | 2023-01-24T04:59:48.140176 | 2020-12-14T14:21:49 | 2020-12-14T14:21:49 | 309,784,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,679 | py | def get_heroes_data() -> dict:
number_heroes = int(input())
heroes_data = {}
for _ in range(number_heroes):
data = input().split()
hero_name = data[0]
hit_points = int(data[1])
mana_points = int(data[2])
heroes_data[hero_name] = {
"hp": hit_points,
"mp": mana_points,
}
return heroes_data
def cast_spell(dd: dict, *args) -> dict:
hero_name = args[0]
mp_needed = int(args[1])
spell_name = args[2]
if dd[hero_name]["mp"] >= mp_needed:
dd[hero_name]["mp"] -= mp_needed
print(
f"{hero_name} has successfully cast {spell_name} "
f"and now has {dd[hero_name]['mp']} MP!"
)
else:
print(f"{hero_name} does not have enough MP to cast {spell_name}!")
return dd
def take_damage(dd: dict, *args) -> dict:
hero_name = args[0]
damage = int(args[1])
attacker = args[2]
dd[hero_name]["hp"] -= damage
if dd[hero_name]["hp"] > 0:
print(
f"{hero_name} was hit for {damage} HP by {attacker} and "
f"now has {dd[hero_name]['hp']} HP left!"
)
else:
print(f"{hero_name} has been killed by {attacker}!")
del dd[hero_name]
return dd
def recharge(dd: dict, *args) -> dict:
hero_name = args[0]
amount = int(args[1])
if dd[hero_name]["mp"] + amount > MAXIMUM_POINTS["mp"]:
amount = MAXIMUM_POINTS["mp"] - dd[hero_name]["mp"]
print(f"{hero_name} recharged for {amount} MP!")
dd[hero_name]["mp"] += amount
return dd
def heal(dd: dict, *args) -> dict:
hero_name = args[0]
amount = int(args[1])
if dd[hero_name]["hp"] + amount > MAXIMUM_POINTS["hp"]:
amount = MAXIMUM_POINTS["hp"] - dd[hero_name]["hp"]
print(f"{hero_name} healed for {amount} HP!")
dd[hero_name]["hp"] += amount
return dd
def main_manipulation_print_func(dd: dict, commands) -> print:
while True:
data = input()
if data == "End":
sorting_printing_func(dd)
break
data = data.split(" - ")
command = data.pop(0)
commands[command](dd, *data)
def sorting_printing_func(dd: dict) -> print:
for name, values in sorted(
dd.items(),
key=lambda pair: (-pair[1]["hp"], pair[0])
):
print(f"{name}")
print(f" HP: {values['hp']}")
print(f" MP: {values['mp']}")
MAXIMUM_POINTS = {"hp": 100, "mp": 200}
COMMANDS = dict(
CastSpell=cast_spell,
TakeDamage=take_damage,
Recharge=recharge,
Heal=heal
)
heroes = get_heroes_data()
main_manipulation_print_func(heroes, COMMANDS)
| [
"[email protected]"
] | |
ff370e3deef1c238a5a0f662f60617ff309c9fb4 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/movingMedian_20200630225631.py | 8bf0eecde0b2a7ea0cd4d0c81ed465699b4c24f6 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | def MovingMedian(arr):
answer = []
window = arr[0]
for i in range
print(MovingMedian([3,1,3,5,10,6,4,3,1])) | [
"[email protected]"
] | |
1e21179f0570aa693579e6491ffaf4b3e2c88bff | b844c72c394b13d9ed4f73222a934f962d6ff187 | /src/structures/program.py | f7b7a6f7418d7d3bc1faf0ca35d1146a66d15fcf | [] | no_license | curtisbright/sagesat | b9b4c9180c75ce8574217058ffa4e121163ccf36 | 8fe52609ab6479d9b98a1e6cf2199a4f12c27777 | refs/heads/master | 2021-01-01T17:52:01.288449 | 2015-08-19T18:14:26 | 2015-08-19T18:14:26 | 41,425,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | '''
Created on Oct 17, 2014
@author: ezulkosk
'''
class Program(object):
'''
classdocs
'''
def __init__(self, ast):
'''
Constructors
'''
self.ast = ast
self.bools = { }
self.graphs = { }
def toStr(self, indent):
res = "Program\n"
for i in self.ast:
res += i.toStr(1) + "\n"
return res | [
"[email protected]"
] | |
5f1c39b7f1a0f726f7dcc29c7018a74e5f080035 | 609eb72e6f9fefe18ebe806c2aed24bb5b0562c1 | /apps/invoices/models.py | f0acc13e1d3d4a681f55e66650461091b02f2bd6 | [
"MIT"
] | permissive | PocketGM/django-htk | 68b0f780e9f748932e857bf66f3e0ffdf9fb2fa2 | 371ce2c68bc825df174e11d0f6f4c489a8184d9f | refs/heads/master | 2020-12-27T15:26:31.946007 | 2014-12-12T10:45:45 | 2014-12-12T10:45:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,266 | py | from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from htk.apps.invoices.constants import *
from htk.apps.invoices.utils import compute_invoice_code
from htk.fields import CurrencyField
from htk.utils.enums import enum_to_str
class BaseInvoice(models.Model):
customer = models.ForeignKey(settings.HTK_INVOICE_CUSTOMER_MODEL, related_name='invoices')
date = models.DateField()
notes = models.TextField(max_length=256, blank=True)
invoice_type = models.PositiveIntegerField(default=HTK_INVOICE_DEFAULT_TYPE.value)
paid = models.BooleanField(default=False)
payment_terms = models.PositiveIntegerField(default=HTK_INVOICE_DEFAULT_PAYMENT_TERM.value)
class Meta:
abstract = True
def __unicode__(self):
value = 'Invoice #%s' % self.id
return value
def get_encoded_id(self):
invoice_code = compute_invoice_code(self)
return invoice_code
def get_url(self):
url = reverse('invoices_invoice', args=(self.get_encoded_id(),))
return url
def get_total(self):
line_items = self.line_items.all()
subtotal = 0
for line_item in line_items:
subtotal += line_item.get_amount()
return subtotal
def get_invoice_type(self):
from htk.apps.invoices.enums import InvoiceType
invoice_type = InvoiceType(self.invoice_type)
return invoice_type
def get_payment_terms(self):
from htk.apps.invoices.enums import InvoicePaymentTerm
invoice_payment_term = InvoicePaymentTerm(self.payment_terms)
str_value = enum_to_str(invoice_payment_term)
return str_value
class BaseInvoiceLineItem(models.Model):
invoice = models.ForeignKey(settings.HTK_INVOICE_MODEL, related_name='line_items')
name = models.CharField(max_length=64)
description = models.TextField(max_length=256)
unit_cost = CurrencyField(default=0)
quantity = models.PositiveIntegerField(default=1)
class Meta:
abstract = True
def __unicode__(self):
value = 'Line Item for Invoice #%s' % self.invoice.id
return value
def get_amount(self):
amount = self.unit_cost * self.quantity
return amount
| [
"[email protected]"
] | |
4bd8dd1ae4d0a490aba2aeb6656b3246b7aa3b32 | 9d278285f2bc899ac93ec887b1c31880ed39bf56 | /ondoc/doctor/migrations/0225_merge_20190314_1713.py | b3a7b682746a743fbde3d1c7d89eac3c2c5f43b0 | [] | no_license | ronit29/docprime | 945c21f8787387b99e4916cb3ba1618bc2a85034 | 60d4caf6c52a8b70174a1f654bc792d825ba1054 | refs/heads/master | 2023-04-01T14:54:10.811765 | 2020-04-07T18:57:34 | 2020-04-07T18:57:34 | 353,953,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | # Generated by Django 2.0.5 on 2019-03-14 11:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('doctor', '0224_auto_20190314_1709'),
('doctor', '0223_providersignuplead_matrix_lead_id'),
]
operations = [
]
| [
"[email protected]"
] | |
c35aa7b06fb0de485363edc1da75caeecd3bf974 | 9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612 | /exercises/1901050046/d11/mymodule/main.py | 3607d2c0e686c773d0b1336d3c3af49404d9e679 | [] | no_license | shen-huang/selfteaching-python-camp | e8410bfc06eca24ee2866c5d890fd063e9d4be89 | 459f90c9f09bd3a3df9e776fc64dfd64ac65f976 | refs/heads/master | 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null | UTF-8 | Python | false | false | 867 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import yagmail
import getpass
import requests
import stats_word
from pyquery import PyQuery
#提取文章正文
r = requests.get('https://mp.weixin.qq.com/s/pLmuGoc4bZrMNl7MSoWgiA')
document = PyQuery(r.text)
content = document('#js_content').text()
#统计词频
result = stats_word.stats_text_cn(content,100,len_size = 2)
result_str = ""
for i in result:
result_str += str(i)
print("统计结果为:", result_str)
#配置邮箱
sender = input("输入发件人邮箱:")
password = getpass.getpass('输入发件人邮箱密码:')
recipients = input('输入收件人邮箱:')
#链接邮箱服务器
yag = yagmail.SMTP( user= sender, password=password, host='smtp.sina.cn')
# 邮箱正文
contents = result_str
# 发送邮件
yag.send(recipients, '自学训练营学习4群 DAY11 sixthspace', contents)
| [
"[email protected]"
] | |
47e2512f693b8d7dae3919a19c1129913658adac | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/-57053121/scipy/stats/mvn.py | 4839dbc5f44d33ff30f8a051e8b11af29844004e | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,653 | py | # encoding: utf-8
# module scipy.stats.mvn
# from C:\Users\Doly\Anaconda3\lib\site-packages\scipy\stats\mvn.cp37-win_amd64.pyd
# by generator 1.147
"""
This module 'mvn' is auto-generated with f2py (version:2).
Functions:
value,inform = mvnun(lower,upper,means,covar,maxpts=d*1000,abseps=1e-06,releps=1e-06)
value,inform = mvnun_weighted(lower,upper,means,weights,covar,maxpts=d*1000,abseps=1e-06,releps=1e-06)
error,value,inform = mvndst(lower,upper,infin,correl,maxpts=2000,abseps=1e-06,releps=1e-06)
COMMON blocks:
/dkblck/ ivls
.
"""
# no imports
# Variables with simple values
__version__ = b'$Revision: $'
# functions
def dkblck(*args, **kwargs): # real signature unknown
""" 'i'-scalar """
pass
def mvndst(lower, upper, infin, correl, maxpts=None, abseps=None, releps=None): # real signature unknown; restored from __doc__
"""
error,value,inform = mvndst(lower,upper,infin,correl,[maxpts,abseps,releps])
Wrapper for ``mvndst``.
Parameters
----------
lower : input rank-1 array('d') with bounds (n)
upper : input rank-1 array('d') with bounds (n)
infin : input rank-1 array('i') with bounds (n)
correl : input rank-1 array('d') with bounds (n*(n-1)/2)
Other Parameters
----------------
maxpts : input int, optional
Default: 2000
abseps : input float, optional
Default: 1e-06
releps : input float, optional
Default: 1e-06
Returns
-------
error : float
value : float
inform : int
"""
pass
def mvnun(lower, upper, means, covar, maxpts=None, abseps=None, releps=None): # real signature unknown; restored from __doc__
"""
value,inform = mvnun(lower,upper,means,covar,[maxpts,abseps,releps])
Wrapper for ``mvnun``.
Parameters
----------
lower : input rank-1 array('d') with bounds (d)
upper : input rank-1 array('d') with bounds (d)
means : input rank-2 array('d') with bounds (d,n)
covar : input rank-2 array('d') with bounds (d,d)
Other Parameters
----------------
maxpts : input int, optional
Default: d*1000
abseps : input float, optional
Default: 1e-06
releps : input float, optional
Default: 1e-06
Returns
-------
value : float
inform : int
"""
pass
def mvnun_weighted(lower, upper, means, weights, covar, maxpts=None, abseps=None, releps=None): # real signature unknown; restored from __doc__
"""
value,inform = mvnun_weighted(lower,upper,means,weights,covar,[maxpts,abseps,releps])
Wrapper for ``mvnun_weighted``.
Parameters
----------
lower : input rank-1 array('d') with bounds (d)
upper : input rank-1 array('d') with bounds (d)
means : input rank-2 array('d') with bounds (d,n)
weights : input rank-1 array('d') with bounds (n)
covar : input rank-2 array('d') with bounds (d,d)
Other Parameters
----------------
maxpts : input int, optional
Default: d*1000
abseps : input float, optional
Default: 1e-06
releps : input float, optional
Default: 1e-06
Returns
-------
value : float
inform : int
"""
pass
# no classes
# variables with complex values
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x000001CB57F39940>'
__spec__ = None # (!) real value is "ModuleSpec(name='scipy.stats.mvn', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x000001CB57F39940>, origin='C:\\\\Users\\\\Doly\\\\Anaconda3\\\\lib\\\\site-packages\\\\scipy\\\\stats\\\\mvn.cp37-win_amd64.pyd')"
| [
"[email protected]"
] | |
3df813b7b10c86143253c4d824e3408615ad7b62 | 5141a9446464e03df639093bb75307c3a421084b | /sim/materials/pitch/segment_03/pitch_handlers.py | edc792cea7b2c4d034c339a6744fc4dab648d58e | [] | no_license | GregoryREvans/sim | b32faaa4ec0288dfc03d33e27a46971c30bf6c33 | d9be48232c41365759d551137786627cff140abc | refs/heads/master | 2022-02-26T15:54:15.807251 | 2022-02-10T13:51:19 | 2022-02-10T13:51:19 | 248,852,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,623 | py | import evans
from sim.materials.pitch.segment_03.pitches import (
voice_1_chord_1,
voice_1_chord_2,
voice_1_chord_3,
voice_1_chord_4,
voice_1_chord_5,
voice_1_chord_6,
voice_1_chord_7,
voice_1_chord_8,
voice_2_chord_1,
voice_2_chord_2,
voice_2_chord_3,
voice_2_chord_4,
voice_2_chord_5,
voice_2_chord_6,
voice_2_chord_7,
voice_2_chord_8,
voice_3_chord_1,
voice_3_chord_2,
voice_3_chord_3,
voice_3_chord_4,
voice_3_chord_5,
voice_3_chord_6,
voice_3_chord_7,
voice_3_chord_8,
voice_4_chord_1,
voice_4_chord_2,
voice_4_chord_3,
voice_4_chord_4,
voice_4_chord_5,
voice_4_chord_6,
voice_4_chord_7,
voice_4_chord_8,
)
piano_pitch_handler_one_1 = evans.PitchHandler(
pitch_list=voice_1_chord_1, forget=False, name="voice_1_chord_1"
)
piano_pitch_handler_one_2 = evans.PitchHandler(
pitch_list=voice_1_chord_2, forget=False, name="voice_1_chord_2"
)
piano_pitch_handler_one_3 = evans.PitchHandler(
pitch_list=voice_1_chord_3, forget=False, name="voice_1_chord_3"
)
piano_pitch_handler_one_4 = evans.PitchHandler(
pitch_list=voice_1_chord_4, forget=False, name="voice_1_chord_4"
)
piano_pitch_handler_one_5 = evans.PitchHandler(
pitch_list=voice_1_chord_5, forget=False, name="voice_1_chord_5"
)
piano_pitch_handler_one_6 = evans.PitchHandler(
pitch_list=voice_1_chord_6, forget=False, name="voice_1_chord_6"
)
piano_pitch_handler_one_7 = evans.PitchHandler(
pitch_list=voice_1_chord_7, forget=False, name="voice_1_chord_7"
)
piano_pitch_handler_one_8 = evans.PitchHandler(
pitch_list=voice_1_chord_8, forget=False, name="voice_1_chord_8"
)
# ##
piano_pitch_handler_two_1 = evans.PitchHandler(
pitch_list=voice_2_chord_1, forget=False, name="voice_2_chord_1"
)
piano_pitch_handler_two_2 = evans.PitchHandler(
pitch_list=voice_2_chord_2, forget=False, name="voice_2_chord_2"
)
piano_pitch_handler_two_3 = evans.PitchHandler(
pitch_list=voice_2_chord_3, forget=False, name="voice_2_chord_3"
)
piano_pitch_handler_two_4 = evans.PitchHandler(
pitch_list=voice_2_chord_4, forget=False, name="voice_2_chord_4"
)
piano_pitch_handler_two_5 = evans.PitchHandler(
pitch_list=voice_2_chord_5, forget=False, name="voice_2_chord_5"
)
piano_pitch_handler_two_6 = evans.PitchHandler(
pitch_list=voice_2_chord_6, forget=False, name="voice_2_chord_6"
)
piano_pitch_handler_two_7 = evans.PitchHandler(
pitch_list=voice_2_chord_7, forget=False, name="voice_2_chord_7"
)
piano_pitch_handler_two_8 = evans.PitchHandler(
pitch_list=voice_2_chord_8, forget=False, name="voice_2_chord_8"
)
# ##
piano_pitch_handler_three_1 = evans.PitchHandler(
pitch_list=voice_3_chord_1, forget=False, name="voice_3_chord_1"
)
piano_pitch_handler_three_2 = evans.PitchHandler(
pitch_list=voice_3_chord_2, forget=False, name="voice_3_chord_2"
)
piano_pitch_handler_three_3 = evans.PitchHandler(
pitch_list=voice_3_chord_3, forget=False, name="voice_3_chord_3"
)
piano_pitch_handler_three_4 = evans.PitchHandler(
pitch_list=voice_3_chord_4, forget=False, name="voice_3_chord_4"
)
piano_pitch_handler_three_5 = evans.PitchHandler(
pitch_list=voice_3_chord_5, forget=False, name="voice_3_chord_5"
)
piano_pitch_handler_three_6 = evans.PitchHandler(
pitch_list=voice_3_chord_6, forget=False, name="voice_3_chord_6"
)
piano_pitch_handler_three_7 = evans.PitchHandler(
pitch_list=voice_3_chord_7, forget=False, name="voice_3_chord_7"
)
piano_pitch_handler_three_8 = evans.PitchHandler(
pitch_list=voice_3_chord_8, forget=False, name="voice_3_chord_8"
)
# ##
piano_pitch_handler_four_1 = evans.PitchHandler(
pitch_list=voice_4_chord_1, forget=False, name="voice_4_chord_1"
)
piano_pitch_handler_four_2 = evans.PitchHandler(
pitch_list=voice_4_chord_2, forget=False, name="voice_4_chord_2"
)
piano_pitch_handler_four_3 = evans.PitchHandler(
pitch_list=voice_4_chord_3, forget=False, name="voice_4_chord_3"
)
piano_pitch_handler_four_4 = evans.PitchHandler(
pitch_list=voice_4_chord_4, forget=False, name="voice_4_chord_4"
)
piano_pitch_handler_four_5 = evans.PitchHandler(
pitch_list=voice_4_chord_5, forget=False, name="voice_4_chord_5"
)
piano_pitch_handler_four_6 = evans.PitchHandler(
pitch_list=voice_4_chord_6, forget=False, name="voice_4_chord_6"
)
piano_pitch_handler_four_7 = evans.PitchHandler(
pitch_list=voice_4_chord_7, forget=False, name="voice_4_chord_7"
)
piano_pitch_handler_four_8 = evans.PitchHandler(
pitch_list=voice_4_chord_8, forget=False, name="voice_4_chord_8"
)
| [
"[email protected]"
] | |
03b959614e50f787f0f04938891a9d27ef1ec31b | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/storage-blob-preview/azext_storage_blob_preview/tests/latest/test_storage_sas_scenarios.py | 7fe8ebfd5f1fd7662ee08735a0b96f88a2c80fb6 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 7,018 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from datetime import datetime, timedelta
from azure.cli.testsdk import (LiveScenarioTest, ResourceGroupPreparer, StorageAccountPreparer,
JMESPathCheck, JMESPathCheckExists, NoneCheck)
from ..storage_test_util import StorageScenarioMixin
class StorageSASScenario(StorageScenarioMixin, LiveScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer(name_prefix='blobsas', kind='StorageV2', location='eastus2euap')
def test_storage_blob_sas_permission_scenario(self, resource_group, storage_account):
"""
Test service SAS operations.
A service SAS is secured with the storage account key. A service SAS delegates access to a resource in only
one of the Azure Storage services: Blob storage, Queue storage, Table storage, or Azure Files.
"""
expiry = (datetime.utcnow() + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%MZ')
account_info = self.get_account_info(resource_group, storage_account)
container = self.create_container(account_info)
local_file = self.create_temp_file(128, full_random=False)
blob_name = self.create_random_name('blob', 16)
self.kwargs.update({
'expiry': expiry,
'account': storage_account,
'container': container,
'local_file': local_file,
'blob': blob_name
})
# ----account key----
# test sas-token for a container
sas = self.storage_cmd('storage container generate-sas -n {} --https-only --permissions dlrwt --expiry {} -otsv',
account_info, container, expiry).output.strip()
self.kwargs['container_sas'] = sas
self.cmd('storage blob upload -c {container} -f "{local_file}" -n {blob} '
'--account-name {account} --sas-token "{container_sas}"')
# test sas-token for a blob
sas = self.storage_cmd('storage blob generate-sas -c {} -n {} --https-only --permissions acdrwt --expiry {} '
'-otsv', account_info, container, blob_name, expiry).output.strip()
self.kwargs['blob_sas'] = sas
self.cmd('storage blob upload -c {container} -f "{local_file}" -n {blob} --overwrite '
'--account-name {account} --sas-token "{blob_sas}" --tags test=tag ')
self.cmd('storage blob show -c {container} -n {blob} --account-name {account} --sas-token {blob_sas}') \
.assert_with_checks(JMESPathCheck('name', blob_name),
JMESPathCheck('tagCount', 1))
self.cmd('storage blob tag list -n {} -c {} --account-name {} --sas-token "{}" '.format(blob_name,
container, storage_account, sas)).assert_with_checks(JMESPathCheck('test', 'tag'))
# ----connection string----
connection_str = self.cmd('storage account show-connection-string -n {account} --query connectionString '
'-otsv').output.strip()
self.kwargs['con_str'] = connection_str
# test sas-token for a container
sas = self.cmd('storage container generate-sas -n {container} --https-only --permissions dlrw '
'--connection-string {con_str} --expiry {expiry} -otsv').output.strip()
self.kwargs['container_sas'] = sas
self.cmd('storage blob upload -c {container} -f "{local_file}" -n {blob} '
'--account-name {account} --sas-token "{container_sas}"')
# test sas-token for a blob
sas = self.cmd('storage blob generate-sas -c {container} -n {blob} --account-name {account} --https-only '
'--permissions acdrwt --expiry {expiry} -otsv').output.strip()
self.kwargs['blob_sas'] = sas
self.cmd('storage blob show -c {container} -n {blob} --account-name {account} --sas-token {blob_sas}') \
.assert_with_checks(JMESPathCheck('name', blob_name))
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_sas_permission_scenario(self, resource_group, storage_account):
"""
Test service SAS with stored access policy.
A stored access policy is defined on a resource container, which can be a blob container, table, queue,
or file share. The stored access policy can be used to manage constraints for one or more service shared
access signatures. When you associate a service SAS with a stored access policy, the SAS inherits the
constraints—the start time, expiry time, and permissions—defined for the stored access policy.
"""
expiry = (datetime.utcnow() + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%MZ')
account_info = self.get_account_info(resource_group, storage_account)
container = self.create_container(account_info)
local_file = self.create_temp_file(128, full_random=False)
blob_name = self.create_random_name('blob', 16)
policy = self.create_random_name('policy', 16)
self.storage_cmd('storage container policy create -c {} -n {} --expiry {} --permissions acdlrw', account_info,
container, policy, expiry)
self.storage_cmd('storage container policy list -c {} ', account_info, container)\
.assert_with_checks(JMESPathCheckExists('{}.expiry'.format(policy)),
JMESPathCheck('{}.permission'.format(policy), 'racwdl'))
self.storage_cmd('storage container policy show -c {} -n {} ', account_info, container, policy, expiry)\
.assert_with_checks(JMESPathCheckExists('expiry'),
JMESPathCheck('permission', 'racwdl'))
sas = self.storage_cmd('storage blob generate-sas -n {} -c {} --policy-name {} -otsv ', account_info, blob_name,
container, policy).output.strip()
self.storage_cmd('storage blob upload -n {} -c {} -f "{}" --sas-token "{}" ', account_info, blob_name, container,
local_file, sas)
self.storage_cmd('storage container policy update -c {} -n {} --permissions acdlr', account_info, container,
policy)
self.storage_cmd('storage container policy show -c {} -n {} ', account_info, container, policy)\
.assert_with_checks(JMESPathCheckExists('expiry'),
JMESPathCheck('permission', 'racdl'))
self.storage_cmd('storage container policy delete -c {} -n {} ', account_info, container, policy)
self.storage_cmd('storage container policy list -c {} ', account_info, container) \
.assert_with_checks(NoneCheck())
| [
"[email protected]"
] | |
c0aeb537a3746a1fd86d34cb9b84507d15349fce | 527fd39d3a1555800c2c32025fdd15fd86ba6672 | /make_Flexible_Finction/args.py | 0c2e15b007ee07cc6e3d18abdd7e1d7b7d6597f0 | [] | no_license | rohanwarange/Python-Tutorials | cfd39551f7ff62bd032946976ba3820474e42405 | 53d8fb226f94d027ae7999f9678697206d37d83a | refs/heads/master | 2023-06-18T10:45:36.884324 | 2021-07-07T17:44:22 | 2021-07-07T17:44:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | def total(a,b):
return a+b
print(total(5,5))
# *args
def total(*args):
print(args)
total=0
for num in args:
total+=num
return total
print(total(1,2,3,4,5)) | [
"[email protected]"
] | |
4cd3d45d04bbde67e61a64cad5efbb27ea26f331 | 8a41a7f9340cfa784cb36d35dca1ecb1630e4097 | /Programming/Python/dict_practice/Dictionaries_Ordered.py | daef72a4b09a83b3aa461282c1f773d366a4206e | [] | no_license | anishst/Learn | 02e6b6cce43cf21621d328ef0fc25168267a9a3d | a1aed8b78b19acdb23e20be57b67fb242e0aefc5 | refs/heads/master | 2022-05-13T10:17:40.293640 | 2022-03-30T12:44:21 | 2022-03-30T12:44:21 | 173,595,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from collections import OrderedDict
d = OrderedDict()
d['foo'] = 1
d['bar'] = 2
d['spam'] = 3
d['groom'] = 4
for key in d:
print(key,d[key])
import json
print(json.dumps(d)) | [
"[email protected]"
] | |
52b7b2f5dcb464cd81400a0fb5ba7962dfdc5ca5 | a03a7935a191d63bee76fd3b85a61ee27f98904a | /src/visitpy/visit_utils/setup.py | 3e5361e0abaa404122a4e29ab1228fc6a4e762b9 | [] | no_license | cchriste/visit | 57091c4a512ab87efd17c64c7494aa4cf01b7e53 | c72c413f571e56b52fb7221955219f11f4ba19e3 | refs/heads/master | 2020-04-12T06:25:27.458132 | 2015-10-12T15:41:49 | 2015-10-12T15:41:49 | 10,111,791 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,130 | py | #*****************************************************************************
#
# Copyright (c) 2000 - 2015, Lawrence Livermore National Security, LLC
# Produced at the Lawrence Livermore National Laboratory
# LLNL-CODE-442911
# All rights reserved.
#
# This file is part of VisIt. For details, see https://visit.llnl.gov/. The
# full copyright notice is contained in the file COPYRIGHT located at the root
# of the VisIt distribution or at http://www.llnl.gov/visit/copyright.html.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the disclaimer below.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer (as noted below) in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of the LLNS/LLNL nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY,
# LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#*****************************************************************************
"""
file: setup.py
author: Cyrus Harrison <[email protected]>
distutils setup script for the 'visit_utils' module.
"""
import sys
__system_bytecode_setting = sys.dont_write_bytecode
sys.dont_write_bytecode = True
from distutils.core import setup
import sys
import setup_tests
#
# Support running tests w/ visit's cli.
#
using_visit = False
try:
# the command line string passed to cli
# will confuse distutils, so modify
# sys.argv to only have args passed after
# '-s setup.py'
args = Argv()
sys.argv = [__file__]
sys.argv.extend(args)
using_visit = True
except:
pass
setup(name='visit_utils',
version='0.1',
author = 'Cyrus Harrison',
author_email = '[email protected]',
description='VisIt Utilties Module',
package_dir = {'visit_utils':'src'},
packages=['visit_utils','visit_utils.qannote'],
cmdclass = { 'test': setup_tests.ExecuteTests})
if using_visit:
sys.exit(0)
| [
"bonnell@18c085ea-50e0-402c-830e-de6fd14e8384"
] | bonnell@18c085ea-50e0-402c-830e-de6fd14e8384 |
fe14c9a96e8cc5ceb988566f3f44b607c74ee60f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03729/s464510871.py | 1871472a58b9eb44569fe01d1ae8219a23ea2c08 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | def main():
s1,s2,s3 = map(str,input().split())
ans = False
if s1[len(s1)-1] == s2[0]:
if s2[len(s2)-1]==s3[0]:
ans = True
print("YES" if ans else "NO")
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
cc0c913331abe5f50e3e501719d521e817e06232 | 915865db25d918a4b2c3296aaa702fedf784b042 | /experiments/amplitude/filters_and_envelope.py | 7fc2b2a7fdaa51ea790575b8d8a11459ab693256 | [] | no_license | nikolaims/pi_nfb | f456484683f31986d12f659ee2a9227a51d5edf4 | 789ad0e20fac7f8d0843b5c3af834e23dcc65e33 | refs/heads/master | 2020-04-05T08:09:23.416703 | 2017-07-25T08:55:32 | 2017-07-25T08:55:32 | 81,811,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,404 | py | import pylab as plt
from scipy.signal import *
from utils.data.loaders import get_ideal_signal, load_feedback, get_signal
from utils.filters import magic_filter_taps, min_phase_magic_filter
from utils.sinbase import get_base
from itertools import combinations
import numpy as np
n = 10000
n_components = 50
fs = 250
band = (8, 12)
time = np.arange(n) / fs
# load signal
signal = get_signal()[:n, 15]
# IIR filt filt
w = 0.1
gain = [0, 0, 1, 1, 0, 0]
taps = firwin2(1000, [0 , band[0]-w, band[0], band[1], band[1]+w, fs/2], gain, nyq=fs/2)
ideal = filtfilt(taps, 1, signal)
plt.plot(np.abs(ideal), 'b', alpha=0.6)
plt.plot(np.abs(hilbert(ideal)), 'b')
# fft
from scipy.fftpack import rfft, irfft, fftfreq
W = fftfreq(signal.size, d=1/fs*2)
f_signal = rfft(signal)
cut_f_signal = f_signal.copy()
cut_f_signal[(W<8) | (W>12)] = 0
cut_signal = irfft(cut_f_signal)
plt.plot(np.abs(cut_signal), 'k', alpha=0.6)
plt.plot(np.abs(hilbert(cut_signal)), 'k')
print(np.mean((np.abs(hilbert(cut_signal)) - np.abs(hilbert(ideal)))**2)/np.var(np.abs(hilbert(cut_signal))))
# fir minphase
fir_signal = lfilter(min_phase_magic_filter(), 1, signal)[28:]
plt.plot(np.abs(fir_signal), 'g', alpha=0.6)
plt.plot(np.abs(hilbert(fir_signal)), 'g')
# iir fir
fir_signal = lfilter(magic_filter_taps(), 1, signal)[28:]
plt.plot(np.abs(fir_signal), 'r', alpha=0.6)
plt.plot(np.abs(hilbert(fir_signal)), 'r')
plt.show() | [
"[email protected]"
] | |
df9acff7102ba7093d1df918c8721d0bccd54c52 | 89ad82bfa5bb3aa3312c815f4199c88e12345974 | /test/test_ifc_checker_checkplan.py | 49cf0d81acc925c49a8f75cc27b0302c48ded0e7 | [] | no_license | lucaslmmanoel/python-api-client | 020f55251af5d86a895740d806618ba94f1863b0 | 49dbabfddb576d3b816c84d86f5c1f080f553704 | refs/heads/master | 2021-02-15T22:45:14.735020 | 2020-03-04T14:02:29 | 2020-03-04T14:02:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # coding: utf-8
"""
BIMData API
BIMData API is a tool to interact with your models stored on BIMData’s servers. Through the API, you can manage your projects, the clouds, upload your IFC files and manage them through endpoints. # noqa: E501
The version of the OpenAPI document: v1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import bimdata_api_client
from bimdata_api_client.models.ifc_checker_checkplan import IfcCheckerCheckplan # noqa: E501
from bimdata_api_client.rest import ApiException
class TestIfcCheckerCheckplan(unittest.TestCase):
"""IfcCheckerCheckplan unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIfcCheckerCheckplan(self):
"""Test IfcCheckerCheckplan"""
# FIXME: construct object with mandatory attributes with example values
# model = bimdata_api_client.models.ifc_checker_checkplan.IfcCheckerCheckplan() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
01c8ccbe9e6889846c28217f8e9266445fb7c747 | 4d0eae8dc3c7dabe2ff79fd8ad035be056423ac7 | /weixincl/weixincl/apps/weixin/urls.py | beb633d21134a141c05983baea470dfeeeaf34ee | [
"MIT"
] | permissive | lionsin/weixincl | 2cdb83e3e596c7fe168b31ed6f747715f7919024 | e02f342b00eaea2704a34ca889903747b0fbb167 | refs/heads/master | 2020-11-30T00:48:33.053979 | 2019-12-26T14:16:04 | 2019-12-26T14:16:04 | 230,254,861 | 1 | 2 | MIT | 2019-12-26T14:54:06 | 2019-12-26T11:45:49 | Python | UTF-8 | Python | false | false | 928 | py | from django.conf.urls import url
from weixin import views
urlpatterns = [
# Articles/search?
url(r"^pcnum/search$", views.PcSearchListView.as_view()),
url(r"^collect_list$", views.AddCollectList.as_view()),
url(r"^collect_list/clear$", views.ClearCollectList.as_view()),
url(r"^collect_list/delete$", views.ClearCollectList.as_view()),
url(r"^tasks/add$", views.TaskaddAPIView.as_view()),
url(r"^tasks/list$", views.TasklistAPIView.as_view()),
url(r"^tasks/detail$", views.TaskShowDetailAPIView.as_view()),
url(r"^tasks/delete$", views.TaskdeleteAPIView.as_view()),
# 查看任务文章列表
url(r"^task_arts/list$", views.ArticleShowDetailAPIView.as_view()),
url(r"^history/add$", views.PcSearchHistoryView.as_view()),
url(r"^history$", views.PcSearchHistoryView.as_view()),
# 清空采集列表页
url(r"^history/clear$", views.HistoryClearAPIView.as_view()),
]
| [
"="
] | = |
f85c12fb2141e0a77279dc13b68fe54489ab674f | a26554d068f564f2811e7774f5df44326dd6978b | /04. FUNCTIONS/04-02-07. Perfect Number.py | e62a4aef3a2b7574dae7bd4f64c0788746f07a0e | [] | no_license | emma-metodieva/SoftUni_Python_Fundamentals_202009 | 300c8323308af8a7be4efe42836dd6a7866a34b0 | 5f58a2d565dc4c3bf28330888aa6a2d3b1f8125f | refs/heads/master | 2023-01-31T15:28:04.360805 | 2020-12-12T20:39:17 | 2020-12-12T20:39:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # 04-02. FUNCTIONS [Exercise]
# 07. Perfect Number
def perfect_number(number):
perfect = False
sum_divisors = 0
if number > 0:
for i in range(1, number):
if number % i == 0:
sum_divisors += i
if sum_divisors == number:
perfect = True
return perfect
if perfect_number(int(input())):
print('We have a perfect number!')
else:
print('It\'s not so perfect.')
| [
"[email protected]"
] | |
4d8545ec4c2396a029e4df3f841a6a2014442e80 | 4edbeb3e2d3263897810a358d8c95854a468c3ca | /python3/urllib/urllib1.py | 0a567f305976d5ebce0f80d94faea4c0b095cdf5 | [
"MIT"
] | permissive | jtraver/dev | f505d15d45b67a59d11306cc7252114c265f388b | 2197e3443c7619b856470558b737d85fe1f77a5a | refs/heads/master | 2023-08-06T02:17:58.601861 | 2023-08-01T16:58:44 | 2023-08-01T16:58:44 | 14,509,952 | 0 | 1 | MIT | 2020-10-14T18:32:48 | 2013-11-19T00:51:19 | Python | UTF-8 | Python | false | false | 479 | py | #!/usr/bin/env python3
#!/usr/bin/python
import urllib.request, urllib.parse, urllib.error
def main():
# url = 'https://screener.finance.yahoo.com/stocks.html'
url = 'https://screener.finance.yahoo.com/b?sc=&im=&prmin=0&prmax=&mcmin=&mcmax=&dvymin=0&dvymax=&betamin=&betamax=&remin=&remax=&pmmin=&pmmax=&pemin=&pemax=&pbmin=&pbmax=&psmin=&psmax=&pegmin=&pegmax=&gr=&grfy=&ar=&vw=1&db=stocks'
html = urllib.request.urlopen(url)
print("%s" % html.read())
main()
| [
"[email protected]"
] | |
e1450556c70c9b5607a7ff05bc84adab3fea1d72 | 732b0b3e2ae0e6c498cfd2ed893de60b9fc22a32 | /tests/integration/actions/collections/test_direct_interactive_ee.py | efb5c28f74c6641d5927e5003b6c20613e8003f6 | [
"Apache-2.0"
] | permissive | didib/ansible-navigator | eb7b77c1df30b2e90b663383f0f76b6224e92c02 | 62fdbd05f25fb2d79133b3ab207f53ac2f2d6d36 | refs/heads/main | 2023-08-30T06:43:42.876079 | 2021-10-14T18:42:17 | 2021-10-14T18:42:17 | 425,540,819 | 0 | 0 | Apache-2.0 | 2021-11-07T15:27:54 | 2021-11-07T15:27:53 | null | UTF-8 | Python | false | false | 1,161 | py | """ collections direct from cli interactive with ee
"""
import pytest
from .base import BaseClass
CLI = "ansible-navigator collections --execution-environment true"
testdata = [
(0, CLI, "ansible-navigator collections browse window"),
(1, ":0", "Browse testorg.coll_1 plugins window"),
(2, ":0", "lookup_1 plugin docs window"),
(3, ":back", "Back to browse testorg.coll_1 plugins window"),
(4, ":1", "mod_1 plugin docs window"),
(5, ":back", "Back to browse testorg.coll_1 plugins window"),
(6, ":back", "Back to ansible-navigator collections browse window"),
(7, ":1", "Browse testorg.coll_2 plugins window"),
(8, ":0", "lookup_2 plugin docs window"),
(9, ":back", "Back to browse testorg.coll_2 plugins window"),
(10, ":1", "mod_2 plugin docs window"),
(11, ":back", "Back to browse testorg.coll_2 plugins window"),
(12, ":back", "Back to ansible-navigator collections browse window"),
]
@pytest.mark.parametrize("index, user_input, comment", testdata)
class Test(BaseClass):
"""run the tests"""
TEST_FOR_MODE = "interactive"
EXECUTION_ENVIRONMENT_TEST = True
UPDATE_FIXTURES = False
| [
"[email protected]"
] | |
d2a9cb9e98f386701503627e098a2b8957381254 | f2befaae3840bafd181cc712108e3b64caf2696f | /app/portal/horizon/openstack_dashboard/contrib/developer/theme_preview/panel.py | ebe06fc1d16d07e88e901c59aee0806dedf6353f | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | F5Networks/f5-adcaas-openstack | 17d5c408d421dcfe542002e1f850b2d9f29f1663 | 02bd8a606215c0fa08b926bac1b092b5e8b278df | refs/heads/master | 2023-08-28T12:09:54.972191 | 2022-08-12T02:03:43 | 2022-08-12T02:03:43 | 164,592,273 | 4 | 23 | Apache-2.0 | 2022-08-12T02:03:44 | 2019-01-08T07:40:35 | Python | UTF-8 | Python | false | false | 779 | py | # Copyright 2015 Cisco Systems, Inc.
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
class Preview(horizon.Panel):
name = _("Theme Preview")
slug = 'theme_preview'
| [
"[email protected]"
] | |
f98206a5f823d8106d69712bbbda48934f3bb4dd | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/f5networks/f5_modules/plugins/modules/bigip_policy_rule.py | f12064beb3988ec10b30bd46e8aa3e14b27fa81a | [
"MIT",
"GPL-3.0-only"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 45,525 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_policy_rule
short_description: Manage LTM policy rules on a BIG-IP
description:
- This module will manage LTM policy rules on a BIG-IP.
version_added: "1.0.0"
options:
description:
description:
- Description of the policy rule.
type: str
actions:
description:
- The actions you want the policy rule to perform.
- The available attributes vary by the action, however, each action requires
you specify a C(type).
- These conditions can be specified in any order. Despite the fact they are in a list,
the order in the list does not matter to the BIG-IP.
type: list
elements: dict
suboptions:
type:
description:
- The action type. This value controls which of the following options are required.
- When C(type) is C(forward), the system associates a given C(pool), or C(virtual),
or C(node) with this rule.
- When C(type) is C(enable), the system associates a given C(asm_policy) with
this rule.
- When C(type) is C(ignore), the system removes all existing actions from this
rule.
- When C(type) is C(redirect), the system redirects an HTTP request to a different URL.
- When C(type) is C(reset), the system resets the connection upon C(event).
- When C(type) is C(persist), the system associates C(cookie_insert) and C(cookie_expiry) with this rule.
- When C(type) is C(set_variable), the system sets a variable based on the evaluated Tcl C(expression) based on C(event).
type: str
required: true
choices:
- forward
- enable
- ignore
- redirect
- reset
- persist
- set_variable
pool:
description:
- Pool to which you want to forward traffic.
- This parameter is only valid with the C(forward) type.
type: str
virtual:
description:
- Virtual Server to which you want to forward traffic.
- This parameter is only valid with the C(forward) type.
type: str
node:
description:
- Node to which you want to forward traffic.
- This parameter is only valid with the C(forward) type.
type: str
version_added: "1.2.0"
asm_policy:
description:
- ASM policy to enable.
- This parameter is only valid with the C(enable) type.
type: str
location:
description:
- The new URL for which a redirect response will be sent.
- A Tcl command substitution can be used for this field.
type: str
event:
description:
- Events on which actions, such as reset, can be triggered.
- With the C(set_variable) action, it is used for specifying
an action event, such as request or response.
type: str
expression:
description:
- A tcl expression used with the C(set_variable) action.
type: str
variable_name:
description:
- Variable name used with the C(set_variable) action.
type: str
cookie_insert:
description:
- Cookie name on which you want to persist.
- This parameter is only valid with the C(persist) type.
type: str
version_added: "1.1.0"
cookie_expiry:
description:
- Optional argument, specifying the time for which the session will be persisted.
- This parameter is only valid with the C(persist) type.
type: int
version_added: "1.1.0"
policy:
description:
- The name of the policy you want to associate this rule with.
type: str
required: True
name:
description:
- The name of the rule.
type: str
required: True
conditions:
description:
- A list of attributes that describe the condition.
- See suboptions for details on how to construct each list entry.
- The ordering of this list is important, the module will ensure the order is
kept when modifying the task.
- The suboption options listed below are not required for all condition types,
read the description for more details.
- These conditions can be specified in any order. Despite the fact they are in a list,
the order in the list does not matter to the BIG-IP.
type: list
elements: dict
suboptions:
type:
description:
- The condition type. This value controls which of the following options are required.
- When C(type) is C(http_uri), the system associates a given C(path_begins_with_any)
list of strings with which the HTTP URI should begin. Any item in the
list will provide a match.
- When C(type) is C(all_traffic), the system removes all existing conditions from
this rule.
type: str
required: True
choices:
- http_uri
- all_traffic
- http_host
- ssl_extension
path_begins_with_any:
description:
- A list of strings of characters the HTTP URI should start with.
- This parameter is only valid with the C(http_uri) type.
type: list
elements: str
host_is_any:
description:
- A list of strings of characters the HTTP Host should match.
- This parameter is only valid with the C(http_host) type.
type: list
elements: str
host_is_not_any:
description:
- A list of strings of characters the HTTP Host should not match.
- This parameter is only valid with the C(http_host) type.
type: list
elements: str
host_begins_with_any:
description:
- A list of strings of characters the HTTP Host should start with.
- This parameter is only valid with the C(http_host) type.
type: list
elements: str
server_name_is_any:
description:
- A list of strings of characters the SSL Extension should match.
- This parameter is only valid with the C(ssl_extension) type.
type: list
elements: str
event:
description:
- Events on which conditions such as SSL Extension can be triggered.
type: str
state:
description:
- When C(present), ensures the key is uploaded to the device. When
C(absent), ensures the key is removed from the device. If the key
is currently in use, the module will not be able to remove the key.
type: str
choices:
- present
- absent
default: present
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
extends_documentation_fragment: f5networks.f5_modules.f5
requirements:
- BIG-IP >= v12.1.0
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
- Greg Crosby (@crosbygw)
- Nitin Khanna (@nitinthewiz)
- Andrey Kashcheev (@andreykashcheev)
'''
EXAMPLES = r'''
- name: Create policies
bigip_policy:
name: Policy-Foo
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add a rule to the new policy
bigip_policy_rule:
policy: Policy-Foo
name: rule3
conditions:
- type: http_uri
path_begins_with_any:
- /ABC
actions:
- type: forward
pool: pool-svrs
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add multiple rules to the new policy
bigip_policy_rule:
policy: Policy-Foo
name: "{{ item.name }}"
conditions: "{{ item.conditions }}"
actions: "{{ item.actions }}"
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
loop:
- name: rule1
actions:
- type: forward
pool: pool-svrs
conditions:
- type: http_uri
path_begins_with_any:
- /euro
- name: rule2
actions:
- type: forward
pool: pool-svrs
conditions:
- type: http_uri
path_begins_with_any:
- /HomePage/
- name: rule3
actions:
- type: set_variable
variable_name: user-agent
expression: tcl:[HTTP::header User-Agent]
event: request
conditions:
- type: http_uri
path_begins_with_any:
- /HomePage/
- name: Remove all rules and confitions from the rule
bigip_policy_rule:
policy: Policy-Foo
name: rule1
conditions:
- type: all_traffic
actions:
- type: ignore
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
actions:
description: The new list of actions applied to the rule.
returned: changed
type: complex
contains:
type:
description: The action type.
returned: changed
type: str
sample: forward
pool:
description: Pool for forwarding to.
returned: changed
type: str
sample: foo-pool
sample: hash/dictionary of values
conditions:
description: The new list of conditions applied to the rule.
returned: changed
type: complex
contains:
type:
description: The condition type.
returned: changed
type: str
sample: http_uri
path_begins_with_any:
description: List of strings that the URI begins with.
returned: changed
type: list
sample: [foo, bar]
sample: hash/dictionary of values
description:
description: The new description of the rule.
returned: changed
type: str
sample: My rule
'''
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ansible.module_utils.six import iteritems
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec, fq_name
)
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'actionsReference': 'actions',
'conditionsReference': 'conditions',
}
api_attributes = [
'description',
'actions',
'conditions',
]
updatables = [
'actions',
'conditions',
'description',
]
returnables = [
'description',
'action',
'conditions'
]
@property
def name(self):
return self._values.get('name', None)
@property
def description(self):
return self._values.get('description', None)
@property
def policy(self):
if self._values['policy'] is None:
return None
return self._values['policy']
class ApiParameters(Parameters):
def _remove_internal_keywords(self, resource):
items = [
'kind', 'generation', 'selfLink', 'poolReference', 'offset',
]
for item in items:
try:
del resource[item]
except KeyError:
pass
@property
def actions(self):
result = []
if self._values['actions'] is None or 'items' not in self._values['actions']:
return [dict(type='ignore')]
for item in self._values['actions']['items']:
action = dict()
self._remove_internal_keywords(item)
if 'forward' in item:
action.update(item)
action['type'] = 'forward'
del action['forward']
elif 'enable' in item:
action.update(item)
action['type'] = 'enable'
del action['enable']
elif 'redirect' in item:
action.update(item)
action['type'] = 'redirect'
del action['redirect']
elif 'setVariable' in item:
action.update(item)
action['type'] = 'set_variable'
del action['fullPath']
del action['code']
del action['expirySecs']
del action['length']
del action['port']
del action['status']
del action['vlanId']
del action['timeout']
elif 'shutdown' in item:
action.update(item)
action['type'] = 'reset'
del action['shutdown']
if 'persist' in item:
action.update(item)
action['type'] = 'persist'
del action['persist']
result.append(action)
result = sorted(result, key=lambda x: x['name'])
return result
@property
def conditions(self):
result = []
if self._values['conditions'] is None or 'items' not in self._values['conditions']:
return [dict(type='all_traffic')]
for item in self._values['conditions']['items']:
action = dict()
self._remove_internal_keywords(item)
if 'httpUri' in item:
action.update(item)
action['type'] = 'http_uri'
del action['httpUri']
# Converts to common stringiness
#
# The tuple set "issubset" check that happens in the Difference
# engine does not recognize that a u'foo' and 'foo' are equal "enough"
# to consider them a subset. Therefore, we cast everything here to
# whatever the common stringiness is.
if 'values' in action:
action['values'] = [str(x) for x in action['values']]
elif 'httpHost' in item:
action.update(item)
action['type'] = 'http_host'
if 'values' in action:
action['values'] = [str(x) for x in action['values']]
elif 'sslExtension' in item:
action.update(item)
action['type'] = 'ssl_extension'
if 'values' in action:
action['values'] = [str(x) for x in action['values']]
result.append(action)
# Names contains the index in which the rule is at.
result = sorted(result, key=lambda x: x['name'])
return result
class ModuleParameters(Parameters):
@property
def actions(self):
result = []
if self._values['actions'] is None:
return None
for idx, item in enumerate(self._values['actions']):
action = dict()
if 'name' in item:
action['name'] = str(item['name'])
else:
action['name'] = str(idx)
if item['type'] == 'forward':
self._handle_forward_action(action, item)
elif item['type'] == 'set_variable':
self._handle_set_variable_action(action, item)
elif item['type'] == 'enable':
self._handle_enable_action(action, item)
elif item['type'] == 'ignore':
return [dict(type='ignore')]
elif item['type'] == 'redirect':
self._handle_redirect_action(action, item)
elif item['type'] == 'reset':
self._handle_reset_action(action, item)
del action['shutdown']
elif item['type'] == 'persist':
self._handle_persist_action(action, item)
result.append(action)
result = sorted(result, key=lambda x: x['name'])
return result
@property
def conditions(self):
result = []
if self._values['conditions'] is None:
return None
for idx, item in enumerate(self._values['conditions']):
action = dict()
if 'name' in item:
action['name'] = str(item['name'])
else:
action['name'] = str(idx)
if item['type'] == 'http_uri':
self._handle_http_uri_condition(action, item)
elif item['type'] == 'http_host':
self._handle_http_host_condition(action, item)
elif item['type'] == 'ssl_extension':
self._handle_ssl_extension_condition(action, item)
elif item['type'] == 'all_traffic':
return [dict(type='all_traffic')]
result.append(action)
result = sorted(result, key=lambda x: x['name'])
return result
def _handle_http_host_condition(self, action, item):
action['type'] = 'http_host'
if 'host_begins_with_any' in item and item['host_begins_with_any'] is not None:
if isinstance(item['host_begins_with_any'], list):
values = item['host_begins_with_any']
else:
values = [item['host_begins_with_any']]
action.update(dict(
host=True,
startsWith=True,
values=values
))
elif 'host_is_any' in item and item['host_is_any'] is not None:
if isinstance(item['host_is_any'], list):
values = item['host_is_any']
else:
values = [item['host_is_any']]
action.update(dict(
equals=True,
host=True,
values=values
))
elif 'host_is_not_any' in item and item['host_is_not_any'] is not None:
if isinstance(item['host_is_not_any'], list):
values = item['host_is_not_any']
else:
values = [item['host_is_not_any']]
action.update({
'equals': True,
'host': True,
'not': True,
'values': values
})
def _handle_http_uri_condition(self, action, item):
"""Handle the nuances of the forwarding type
Right now there is only a single type of forwarding that can be done. As that
functionality expands, so-to will the behavior of this, and other, methods.
Therefore, do not be surprised that the logic here is so rigid. It's deliberate.
:param action:
:param item:
:return:
"""
action['type'] = 'http_uri'
if 'path_begins_with_any' not in item:
raise F5ModuleError(
"A 'path_begins_with_any' must be specified when the 'http_uri' type is used."
)
if isinstance(item['path_begins_with_any'], list):
values = item['path_begins_with_any']
else:
values = [item['path_begins_with_any']]
action.update(dict(
path=True,
startsWith=True,
values=values
))
def _handle_ssl_extension_condition(self, action, item):
action['type'] = 'ssl_extension'
if 'server_name_is_any' in item:
if isinstance(item['server_name_is_any'], list):
values = item['server_name_is_any']
else:
values = [item['server_name_is_any']]
action.update(dict(
equals=True,
serverName=True,
values=values
))
if 'event' not in item:
raise F5ModuleError(
"An 'event' must be specified when the 'ssl_extension' condition is used."
)
elif 'ssl_client_hello' in item['event']:
action.update(dict(
sslClientHello=True
))
elif 'ssl_server_hello' in item['event']:
action.update(dict(
sslServerHello=True
))
def _handle_forward_action(self, action, item):
"""Handle the nuances of the forwarding type
Right now there is only a single type of forwarding that can be done. As that
functionality expands, so-to will the behavior of this, and other, methods.
Therefore, do not be surprised that the logic here is so rigid. It's deliberate.
:param action:
:param item:
:return:
"""
action['type'] = 'forward'
if not any(x for x in ['pool', 'virtual', 'node'] if x in item):
raise F5ModuleError(
"A 'pool' or 'virtual' or 'node' must be specified when the 'forward' type is used."
)
if item.get('pool', None):
action['pool'] = fq_name(self.partition, item['pool'])
elif item.get('virtual', None):
action['virtual'] = fq_name(self.partition, item['virtual'])
elif item.get('node', None):
action['node'] = item['node']
def _handle_set_variable_action(self, action, item):
"""Handle the nuances of the set_variable type
:param action:
:param item:
:return:
"""
if 'expression' not in item and 'variable_name' not in item:
raise F5ModuleError(
"A 'variable_name' and 'expression' must be specified when the 'set_variable' type is used."
)
if 'event' in item and item['event'] is not None:
action[item['event']] = True
else:
action['request'] = True
action.update(dict(
type='set_variable',
expression=item['expression'],
tmName=item['variable_name'],
setVariable=True,
tcl=True
))
def _handle_enable_action(self, action, item):
"""Handle the nuances of the enable type
:param action:
:param item:
:return:
"""
action['type'] = 'enable'
if 'asm_policy' not in item:
raise F5ModuleError(
"An 'asm_policy' must be specified when the 'enable' type is used."
)
action.update(dict(
policy=fq_name(self.partition, item['asm_policy']),
asm=True
))
def _handle_redirect_action(self, action, item):
"""Handle the nuances of the redirect type
:param action:
:param item:
:return:
"""
action['type'] = 'redirect'
if 'location' not in item:
raise F5ModuleError(
"A 'location' must be specified when the 'redirect' type is used."
)
action.update(
location=item['location'],
httpReply=True,
)
def _handle_reset_action(self, action, item):
"""Handle the nuances of the reset type
:param action:
:param item:
:return:
"""
action['type'] = 'reset'
if 'event' not in item:
raise F5ModuleError(
"An 'event' must be specified when the 'reset' type is used."
)
elif 'ssl_client_hello' in item['event']:
action.update(dict(
sslClientHello=True,
connection=True,
shutdown=True
))
def _handle_persist_action(self, action, item):
"""Handle the nuances of the persist type
:param action:
:param item:
:return:
"""
action['type'] = 'persist'
if 'cookie_insert' not in item:
raise F5ModuleError(
"A 'cookie_insert' must be specified when the 'persist' type is used."
)
elif 'cookie_expiry' in item:
action.update(
cookieInsert=True,
tmName=item['cookie_insert'],
expiry=str(item['cookie_expiry'])
)
else:
action.update(
cookieInsert=True,
tmName=item['cookie_insert']
)
class Changes(Parameters):
def to_return(self):
result = {}
for returnable in self.returnables:
try:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class ReportableChanges(Changes):
returnables = [
'description', 'actions', 'conditions'
]
@property
def actions(self):
result = []
if self._values['actions'] is None:
return [dict(type='ignore')]
for item in self._values['actions']:
action = dict()
if 'forward' in item:
action.update(item)
action['type'] = 'forward'
del action['forward']
elif 'set_variable' in item:
action.update(item)
action['type'] = 'set_variable'
del action['set_variable']
elif 'enable' in item:
action.update(item)
action['type'] = 'enable'
del action['enable']
elif 'redirect' in item:
action.update(item)
action['type'] = 'redirect'
del action['redirect']
del action['httpReply']
elif 'reset' in item:
action.update(item)
action['type'] = 'reset'
del action['connection']
del action['shutdown']
elif 'persist' in item:
action.update(item)
action['type'] = 'persist'
action['cookie_insert'] = action['tmName']
if 'expiry' in item:
action['cookie_expiry'] = int(action['expiry'])
del action['expiry']
del action['tmName']
del action['persist']
del action['cookieInsert']
result.append(action)
result = sorted(result, key=lambda x: x['name'])
return result
@property
def conditions(self):
result = []
if self._values['conditions'] is None:
return [dict(type='all_traffic')]
for item in self._values['conditions']:
action = dict()
if 'httpUri' in item:
action.update(item)
action['type'] = 'http_uri'
del action['httpUri']
elif 'httpHost' in item:
action.update(item)
action['type'] = 'http_host'
del action['httpHost']
elif 'sslExtension' in item:
action.update(item)
action['type'] = 'ssl_extension'
del action['sslExtension']
result.append(action)
# Names contains the index in which the rule is at.
result = sorted(result, key=lambda x: x['name'])
return result
class UsableChanges(Changes):
@property
def actions(self):
if self._values['actions'] is None:
return None
result = []
for action in self._values['actions']:
if 'type' not in action:
continue
if action['type'] == 'forward':
action['forward'] = True
del action['type']
elif action['type'] == 'enable':
action['enable'] = True
del action['type']
elif action['type'] == 'set_variable':
action['setVariable'] = True
action['tcl'] = True
del action['type']
elif action['type'] == 'ignore':
result = []
break
elif action['type'] == 'redirect':
action['httpReply'] = True
action['redirect'] = True
del action['type']
elif action['type'] == 'reset':
action['shutdown'] = True
action['connection'] = True
del action['type']
elif action['type'] == 'persist':
action['persist'] = True
del action['type']
result.append(action)
return result
@property
def conditions(self):
if self._values['conditions'] is None:
return None
result = []
for condition in self._values['conditions']:
if 'type' not in condition:
continue
if condition['type'] == 'http_uri':
condition['httpUri'] = True
del condition['type']
elif condition['type'] == 'http_host':
condition['httpHost'] = True
del condition['type']
elif condition['type'] == 'ssl_extension':
condition['sslExtension'] = True
del condition['type']
elif condition['type'] == 'all_traffic':
result = []
break
result.append(condition)
return result
class Difference(object):
updatables = [
'actions', 'conditions', 'description'
]
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
def to_tuple(self, items):
result = []
for x in items:
tmp = [(str(k), str(v)) for k, v in iteritems(x)]
result += tmp
return result
def _diff_complex_items(self, want, have):
if want == [] and have is None:
return None
if want is None:
return None
w = self.to_tuple(want)
h = self.to_tuple(have)
if set(w).issubset(set(h)):
return None
else:
return want
@property
def actions(self):
result = self._diff_complex_items(self.want.actions, self.have.actions)
if self._conditions_missing_default_rule_for_asm(result):
raise F5ModuleError(
"Valid options when using an ASM policy in a rule's 'enable' action include all_traffic, http_uri, or http_host."
)
return result
@property
def conditions(self):
result = self._diff_complex_items(self.want.conditions, self.have.conditions)
return result
def _conditions_missing_default_rule_for_asm(self, want_actions):
if want_actions is None:
actions = self.have.actions
else:
actions = want_actions
if actions is None:
return False
if any(x for x in actions if x['type'] == 'enable'):
conditions = self._diff_complex_items(self.want.conditions, self.have.conditions)
if conditions is None:
return False
if any(y for y in conditions if y['type'] not in ['all_traffic', 'http_uri', 'http_host']):
return True
return False
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
if self.draft_exists():
redraft = True
else:
redraft = False
self._create_existing_policy_draft_on_device()
self.update_on_device()
if redraft is False:
self.publish_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
if self.draft_exists():
redraft = True
else:
redraft = False
self._create_existing_policy_draft_on_device()
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
if redraft is False:
self.publish_on_device()
return True
def create(self):
self.should_update()
if self.module.check_mode:
return True
if self.draft_exists():
redraft = True
else:
redraft = False
self._create_existing_policy_draft_on_device()
self.create_on_device()
if redraft is False:
self.publish_on_device()
return True
def exists(self):
if self.draft_exists():
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy, sub_path='Drafts'),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy),
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def draft_exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy, sub_path='Drafts')
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def _create_existing_policy_draft_on_device(self):
params = dict(createDraft=True)
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def publish_on_device(self):
params = dict(
name=fq_name(self.want.partition,
self.want.policy,
sub_path='Drafts'
),
command="publish"
)
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy, sub_path='Drafts'),
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy, sub_path='Drafts'),
self.want.name
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy, sub_path='Drafts'),
self.want.name
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
if self.draft_exists():
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy, sub_path='Drafts'),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.policy),
self.want.name
)
query = "?expandSubcollections=true"
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
description=dict(),
actions=dict(
type='list',
elements='dict',
options=dict(
type=dict(
choices=[
'forward',
'enable',
'ignore',
'redirect',
'reset',
'persist',
'set_variable'
],
required=True
),
pool=dict(),
node=dict(),
asm_policy=dict(),
virtual=dict(),
location=dict(),
event=dict(),
cookie_insert=dict(),
cookie_expiry=dict(type='int'),
expression=dict(),
variable_name=dict()
),
mutually_exclusive=[
['pool', 'asm_policy', 'virtual', 'location', 'cookie_insert', 'node']
]
),
conditions=dict(
type='list',
elements='dict',
options=dict(
type=dict(
choices=[
'http_uri',
'http_host',
'ssl_extension',
'all_traffic'
],
required=True
),
path_begins_with_any=dict(
type='list',
elements='str',
),
host_begins_with_any=dict(
type='list',
elements='str',
),
host_is_any=dict(
type='list',
elements='str',
),
host_is_not_any=dict(
type='list',
elements='str',
),
server_name_is_any=dict(
type='list',
elements='str',
),
event=dict()
),
),
name=dict(required=True),
policy=dict(required=True),
state=dict(
default='present',
choices=['absent', 'present']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ff2b2b75ab60f2bcdf63d75afe4f428eebcd8f6a | 4488e3c26de4291da447d8251c491b43cb810f7c | /orgstruct_partner_zip/__openerp__.py | a3cafaa4ab0bc9f4d686889567eae43e20dccf4d | [] | no_license | smart-solution/odoo-crm-80 | b19592ce6e374c9c7b0a3198498930ffb1283018 | 85dfd0cc37f81bcba24d2a0091094708a262fe2c | refs/heads/master | 2016-09-06T06:04:35.191924 | 2015-07-14T12:48:28 | 2015-07-14T12:48:28 | 33,174,511 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Organisation Structure Partner Zip',
'version': '1.0',
'category': 'Base',
'description': """
Manage organisation structure
""",
'author': 'Smart Solotution',
'website': 'http://www.smartsolution.be',
'depends': ['orgstruct'],
'data': [
'orgstruct_partner_zip_view.xml',
],
'installable': True,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
3a53cb314b58082e61aced67aaaa888078c41c10 | 4349c9bea560b094c9c84540b539b612bef40953 | /subliminal/plugins/Subtitulos.py | bcb26d7660b42c8945d9ef49c23251f6c806f8e1 | [] | no_license | fgel/subliminal | 456c263603cbe5143e6b6343930222ece9c465dc | 3cf265f6c978506d02e74c87cadd0a8e6c6419fe | refs/heads/master | 2021-01-18T09:21:04.687551 | 2011-11-11T19:37:19 | 2011-11-11T19:37:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,288 | py | # -*- coding: utf-8 -*-
#
# Subliminal - Subtitles, faster than your thoughts
# Copyright (c) 2008-2011 Patrick Dessalle <[email protected]>
# Copyright (c) 2011 Antoine Bertin <[email protected]>
#
# This file is part of Subliminal.
#
# Subliminal is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from BeautifulSoup import BeautifulSoup
import guessit
import urllib2
import unicodedata
import re
import PluginBase
from subliminal.classes import Subtitle
class Subtitulos(PluginBase.PluginBase):
site_url = 'http://www.subtitulos.es'
site_name = 'Subtitulos'
server_url = 'http://www.subtitulos.es'
api_based = False
_plugin_languages = {u'English (US)': 'en', u'English (UK)': 'en', u'English': 'en', u'French': 'fr', u'Brazilian': 'pt-br',
u'Portuguese': 'pt', u'Español (Latinoamérica)': 'es', u'Español (España)': 'es', u'Español': 'es', u'Italian': 'it',
u'Català': 'ca'}
def __init__(self, config_dict=None):
super(Subtitulos, self).__init__(self._plugin_languages, config_dict, True)
self.release_pattern = re.compile('Versión (.+) ([0-9]+).([0-9])+ megabytes')
def list(self, filepath, languages):
possible_languages = self.possible_languages(languages)
if not possible_languages:
return []
guess = guessit.guess_file_info(filepath, 'autodetect')
if guess['type'] != 'episode':
self.logger.debug(u'Not an episode')
return []
# add multiple things to the release group set
release_group = set()
if 'releaseGroup' in guess:
release_group.add(guess['releaseGroup'].lower())
else:
if 'title' in guess:
release_group.add(guess['title'].lower())
if 'screenSize' in guess:
release_group.add(guess['screenSize'].lower())
if 'series' not in guess or len(release_group) == 0:
self.logger.debug(u'Not enough information to proceed')
return []
self.release_group = release_group # used to sort results
return self.query(guess['series'], guess['season'], guess['episodeNumber'], release_group, filepath, possible_languages)
def query(self, name, season, episode, release_group, filepath, languages):
sublinks = []
searchname = name.lower().replace(' ', '-')
if isinstance(searchname, unicode):
searchname = unicodedata.normalize('NFKD', searchname).encode('ascii','ignore')
searchurl = '%s/%s/%sx%.2d' % (self.server_url, urllib2.quote(searchname), season, episode)
self.logger.debug(u'Searching in %s' % searchurl)
try:
req = urllib2.Request(searchurl, headers={'User-Agent': self.user_agent})
page = urllib2.urlopen(req, timeout=self.timeout)
except urllib2.HTTPError as inst:
self.logger.info(u'Error: %s - %s' % (searchurl, inst))
return []
except urllib2.URLError as inst:
self.logger.info(u'TimeOut: %s' % inst)
return []
soup = BeautifulSoup(page.read())
for subs in soup('div', {'id': 'version'}):
version = subs.find('p', {'class': 'title-sub'})
sub_teams = self.listTeams([self.release_pattern.search('%s' % version.contents[1]).group(1).lower()], ['.', '_', ' ', '/', '-'])
self.logger.debug(u'Team from website: %s' % sub_teams)
self.logger.debug(u'Team from file: %s' % release_group)
if not release_group.intersection(sub_teams): # On wrong team
continue
for html_language in subs.findAllNext('ul', {'class': 'sslist'}):
sub_language = self.getRevertLanguage(html_language.findNext('li', {'class': 'li-idioma'}).find('strong').contents[0].string.strip())
if not sub_language in languages: # On wrong language
continue
html_status = html_language.findNext('li', {'class': 'li-estado green'})
sub_status = html_status.contents[0].string.strip()
if not sub_status == 'Completado': # On not completed subtitles
continue
sub_link = html_status.findNext('span', {'class': 'descargar green'}).find('a')['href']
result = Subtitle(filepath, self.getSubtitlePath(filepath, sub_language), self.__class__.__name__, sub_language, sub_link, teams=sub_teams)
sublinks.append(result)
sublinks.sort(self._cmpReleaseGroup)
return sublinks
def download(self, subtitle):
self.downloadFile(subtitle.link, subtitle.path)
return subtitle
| [
"[email protected]"
] | |
6ec582b45c915e09dd744f84899c6718fc1c86f7 | f6c6e0ebc18b7b1a28c23367f62c960e86194c88 | /fileIO/hdf5/backup/plot_h5.py | 0190bfb940e43963a0d6738e3a4fb4c64a2e2b2f | [] | no_license | TheGrim1/python_work | 9316d6fbb71a4be9bd901f104e939949dfd91174 | 5b34277aed4c06b62276644160e0aa97a4260233 | refs/heads/master | 2021-01-11T13:54:54.366575 | 2019-03-12T12:38:39 | 2019-03-12T12:38:39 | 94,876,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | from __future__ import print_function
# home: /data/id13/inhouse2/AJ/skript/fileIO/hdf5/open_h5.py
# global imports
import h5py
import sys, os
import matplotlib.pyplot as plt
import time
# local imports
from open_h5 import open_h5
def plot_h5(data,
index=0,
title = "Title"):
dimension = len(data.shape)
print("dimension of data to be plotted is %s" % dimension)
if dimension == 3:
plt.imshow(data[index,:,:], interpolation = 'none')
elif dimension == 2:
plt.imshow(data[:,:], interpolation = 'none')
elif dimension not in (2,3):
print("invalid data for plotting \ntitle : %s\n%s" % (title, dimension))
# plt.clim(0,0.001)
plt.show()
plt.title(title)
def plotmany_h5(data,index):
ax1 = plt.subplot(1,1,1,axisbg = (0.9, 0.9, 0.95))
ax1.figure.set_size_inches(10,10)
title = "Plotting image no %s of %s"
ax1.title(title % (0, 0))
ax1.ion()
if dimension == 3:
toplot = data[index,:,:]
elif dimension == 2:
toplot = data[:,:]
elif dimension not in (2,3):
print("invalid data for plotting \ntitle : %s\n%s" % (title, dimension))
ax1.pcolor(toplot,norm=LogNorm(vmin=max(data.min(),0.0001),vmax=max(data.max(),0.01)), cmap='PuBu')
nimages = data.shape[0]
plt.show()
def main(args):
'also does the plotting'
for fname in args:
data = open_h5(fname, framelist = [529,530,532], threshold = 5000000)
# print 'the data shape is:'
# print data.shape
if data.ndim != 2:
plotmany_h5(data)
else:
plot_h5(data, title = os.path.basename(fname))
if __name__ == '__main__':
usage =""" \n1) python <thisfile.py> <arg1> <arg2> etc.
\n2) python <thisfile.py> -f <file containing args as lines>
\n3) find <*yoursearch* -> arg1 etc.> | python <thisfile.py>
"""
args = []
if len(sys.argv) > 1:
if sys.argv[1].find("-f")!= -1:
f = open(sys.argv[2])
for line in f:
args.append(line.rstrip())
else:
args=sys.argv[1:]
else:
f = sys.stdin
for line in f:
args.append(line.rstrip())
# print args
main(args)
| [
"[email protected]"
] | |
bca28e38fcd7944d329ae326037eaa937382d563 | 032144d039ead151804d56910a4059d0fc48c374 | /civpy/structures/node_load.py | f1c958aa95696c84c41c95e1f4f0d620e2bcf427 | [
"BSD-3-Clause"
] | permissive | mpewsey/civpy | 038f5bd4a22971864cceb7c4f9568fdcca40a147 | bbf74b1c04ca9f7604831f5280cc80d796240e67 | refs/heads/master | 2022-02-26T06:45:40.087975 | 2019-05-05T04:48:47 | 2019-05-05T04:48:47 | 170,751,326 | 16 | 3 | BSD-3-Clause | 2022-02-11T02:30:16 | 2019-02-14T20:09:32 | Python | UTF-8 | Python | false | false | 5,178 | py | """
Copyright (c) 2019, Matt Pewsey
"""
import weakref
import numpy as np
__all__ = ['NodeLoad']
class NodeLoad(np.ndarray):
"""
A class representing a load applied to a node.
Parameters
----------
node : str
The name of the node to which the load will be applied.
fx, fy, fz : float
The applied global node forces.
mx, my, mz : float
The applied global moments.
dx, dy, dz : float
The applied node deflections.
rx, ry, rz : float
The applied node rotations.
"""
def __new__(cls, node, fx=0, fy=0, fz=0, mx=0, my=0, mz=0,
dx=0, dy=0, dz=0, rx=0, ry=0, rz=0):
obj = np.array([fx, fy, fz, mx, my, mz,
dx, dy, dz, rx, ry, rz], dtype='float').view(cls)
obj.node = node
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.node = getattr(obj, 'node', '')
self.node_ref = None
def node():
def fget(self):
return self._node
def fset(self, value):
if not isinstance(value, str):
value = str(value)
self._node = value
def fdel(self):
del self._node
return locals()
node = property(**node())
def node_ref():
def fget(self):
value = self._node_ref
if value is None:
return value
return value()
def fset(self, value):
if value is not None:
value = weakref.ref(value)
self._node_ref = value
def fdel(self):
del self._node_ref
return locals()
node_ref = property(**node_ref())
def fx():
def fget(self):
return self[0]
def fset(self, value):
self[0] = value
return locals()
fx = property(**fx())
def fy():
def fget(self):
return self[1]
def fset(self, value):
self[1] = value
return locals()
fy = property(**fy())
def fz():
def fget(self):
return self[2]
def fset(self, value):
self[2] = value
return locals()
fz = property(**fz())
def mx():
def fget(self):
return self[3]
def fset(self, value):
self[3] = value
return locals()
mx = property(**mx())
def my():
def fget(self):
return self[4]
def fset(self, value):
self[4] = value
return locals()
my = property(**my())
def mz():
def fget(self):
return self[5]
def fset(self, value):
self[5] = value
return locals()
mz = property(**mz())
def dx():
def fget(self):
return self[6]
def fset(self, value):
self[6] = value
def fdel(self):
del self._dx
return locals()
dx = property(**dx())
def dy():
def fget(self):
return self[7]
def fset(self, value):
self[7] = value
def fdel(self):
del self._dy
return locals()
dy = property(**dy())
def dz():
def fget(self):
return self[8]
def fset(self, value):
self[8] = value
def fdel(self):
del self._dz
return locals()
dz = property(**dz())
def rx():
def fget(self):
return self[9]
def fset(self, value):
self[9] = value
def fdel(self):
del self._rx
return locals()
rx = property(**rx())
def ry():
def fget(self):
return self[10]
def fset(self, value):
self[10] = value
def fdel(self):
del self._ry
return locals()
ry = property(**ry())
def rz():
def fget(self):
return self[11]
def fset(self, value):
self[11] = value
def fdel(self):
del self._rz
return locals()
rz = property(**rz())
def __repr__(self):
s = [
'node={!r}'.format(self.node),
'forces={!r}'.format((self.fx, self.fy, self.fz)),
'moments={!r}'.format((self.mx, self.my, self.mz)),
'defl={!r}'.format((self.dx, self.dy, self.dz)),
'rot={!r}'.format((self.rx, self.ry, self.rz))
]
return '{}({})'.format(type(self).__name__, ', '.join(s))
def forces(self):
"""Returns the applied force and moment matrix."""
return self[:6]
def deflections(self):
"""Returns the applied deflection and rotation matrix."""
return self[6:]
def get_node(self):
"""Gets the referenced node."""
if self.node_ref is None:
raise ValueError('Node has not been set.')
return self.node_ref
def set_node(self, ndict):
"""
Sets the node reference.
Parameters
----------
ndict : dict
A dictionary mapping node names to node objects.
"""
self.node_ref = ndict[self.node]
| [
"[email protected]"
] | |
7996b0260e16f6a5f2b53bf673c0e97d691983cd | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/39/usersdata/136/13246/submittedfiles/dec2bin.py | f1bd3e0804d034f78294918ab7ae45017384299e | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | n = int(input("Digite um número decimal:"))
i = 0
j = 1
d = n%2
while n>0:
d = n%2
n = n/2
i = i+(d*j)
j = j*10
print("%d"%i) | [
"[email protected]"
] | |
e499d64a713706ac756077b6e2c5e825b687d3c7 | ff941fe046a593189050b6fdf77d44ade0925e8a | /lesson_python_basics/default_parameters2.py | 7a862b12f45c6e47912aa620b568a03c7691b53e | [] | no_license | hamk-webdev-intip19x6/petrikuittinen_assignments | c0dd02d3465ebf29f4387ab2805b12858c22110b | 68dc154fbc571d8bc85f8eec0130b49e143c1e51 | refs/heads/master | 2021-11-11T04:08:45.963836 | 2021-10-04T15:47:03 | 2021-10-04T15:47:03 | 233,399,492 | 0 | 9 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | def ask(question, choices, correct, retries=2):
i = 1
print(question)
for c in choices:
print(i, c)
i += 1
while retries>0:
try:
guess = int(input("?"))
except ValueError:
continue
if guess==correct:
print("right")
break
print("wrong guess")
retries -= 1
else:
print("the correct reply was", correct, choices[correct-1])
ask("What is the capital of Australia?", \
("London", "Sydney", "Canberra", "Victoria"), 3)
ask("When Finland gained independence?", \
("1900", "1917", "1919", "1939"), 2, 1)
ask(question="What is the chemical symbol of Iron?", \
correct=1, choices=("Fe", "R", "Ir", "I"))
ask("How to delete a variable in Python?", \
("delete", "del", "remove", "destroy"), \
retries=3, correct=2)
| [
"pkuittinen@ubuntupetrikuittinen.xbp5jv35e4rujjrlarqjj32eqf.fx.internal.cloudapp.net"
] | pkuittinen@ubuntupetrikuittinen.xbp5jv35e4rujjrlarqjj32eqf.fx.internal.cloudapp.net |
1b7ef58ff701c085ead9ee493e21e53abfca2806 | 250b997d715c168315a927e28124cf24c77048c0 | /python3基础/9.Python修炼第九层/day9预习/11 生产者消费者模型.py | c20f15d6206f6793cd0c499148df156b6e894097 | [] | no_license | cuitianfeng/Python | c78077e5dcad01ee5fe44c0aa8b61bbc2fa388cf | 9c9f10f13311116ce0bc60ec128f765ff2ca3078 | refs/heads/master | 2023-01-10T23:25:57.158141 | 2020-11-17T15:39:36 | 2020-11-17T15:39:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,299 | py | # from multiprocessing import Process,Queue
# import time,os
# def producer(q,name):
# for i in range(3):
# time.sleep(1)
# res='%s%s' %(name,i)
# q.put(res)
# print('\033[45m<%s> 生产了 [%s]\033[0m' %(os.getpid(),res))
#
#
# def consumer(q):
# while True:
# res=q.get()
# if res is None:break
# time.sleep(1.5)
# print('\033[34m<%s> 吃了 [%s]\033[0m' % (os.getpid(), res))
#
# if __name__ == '__main__':
# q=Queue()
# #生产者们:即厨师们
# p1=Process(target=producer,args=(q,'包子'))
# p2=Process(target=producer,args=(q,'饺子'))
# p3=Process(target=producer,args=(q,'馄饨'))
#
# #消费者们:即吃货们
# c1=Process(target=consumer,args=(q,))
# c2=Process(target=consumer,args=(q,))
#
# p1.start()
# p2.start()
# p3.start()
# c1.start()
# c2.start()
#
# p1.join()
# p2.join()
# p3.join()
# q.put(None)
# q.put(None)
#
# print('主')
# from multiprocessing import Process, JoinableQueue
# import time, os
#
#
# def producer(q, name):
# for i in range(3):
# time.sleep(1)
# res = '%s%s' % (name, i)
# q.put(res)
# print('\033[45m<%s> 生产了 [%s]\033[0m' % (os.getpid(), res))
# q.join()
#
# def consumer(q):
# while True:
# res = q.get()
# time.sleep(1.5)
# print('\033[34m<%s> 吃了 [%s]\033[0m' % (os.getpid(), res))
# q.task_done()
#
# if __name__ == '__main__':
# q = JoinableQueue()
#
# # 生产者们:即厨师们
# p1 = Process(target=producer, args=(q, '包子'))
# p2 = Process(target=producer, args=(q, '饺子'))
# p3 = Process(target=producer, args=(q, '馄饨'))
#
# # 消费者们:即吃货们
# c1 = Process(target=consumer, args=(q,))
# c2 = Process(target=consumer, args=(q,))
#
# c1.daemon=True
# c2.daemon=True
# p1.start()
# p2.start()
# p3.start()
# c1.start()
# c2.start()
#
#
# p1.join()
#
# print('主')
# -----------------
# from multiprocessing import Process,Queue
# import time
# import os
#
# def producer(q,name):
# for i in range(3):
# time.sleep(1)
# res='%s%s' %(name,i)
# q.put(res)
# print('\033[45m<%s> 生产了 [%s]\033[0m' %(os.getpid(),res))
#
#
# def consumer(q):
# while True:
# res=q.get()
# if res is None:break
# time.sleep(1.5)
# print('\033[34m<%s> 吃了 [%s]\033[0m' % (os.getpid(), res))
#
# if __name__ == '__main__':
# q=Queue()
# #生产者们:即厨师们
# p1=Process(target=producer,args=(q,'包子'))
# p2=Process(target=producer,args=(q,'饺子'))
# p3=Process(target=producer,args=(q,'馄饨'))
#
# #消费者们:即吃货们
# c1=Process(target=consumer,args=(q,))
# c2=Process(target=consumer,args=(q,))
#
# p1.start()
# p2.start()
# p3.start()
#
# c1.start()
# c2.start()
#
# p1.join()
# p2.join()
# p3.join()
# q.put(None)
# q.put(None)
#
# print('主')
#
from multiprocessing import Process,JoinableQueue
import time
import os
def producer(q,name):
for i in range(3):
time.sleep(1)
res='%s%s' %(name,i)
q.put(res)
print('\033[45m<%s> 生产了 [%s]\033[0m' %(os.getpid(),res))
q.join()
def consumer(q):
while True:
res=q.get()
time.sleep(1.5)
print('\033[34m<%s> 吃了 [%s]\033[0m' % (os.getpid(), res))
q.task_done()
if __name__ == '__main__':
q=JoinableQueue()
#生产者们:即厨师们
p1=Process(target=producer,args=(q,'包子'))
p2=Process(target=producer,args=(q,'饺子'))
p3=Process(target=producer,args=(q,'馄饨'))
#消费者们:即吃货们
c1=Process(target=consumer,args=(q,))
c2=Process(target=consumer,args=(q,))
c1.daemon=True
c2.daemon=True
p1.start()
p2.start()
p3.start()
c1.start()
c2.start()
p1.join()
p2.join()
p3.join()
print('主')
| [
"[email protected]"
] | |
56e5c49696fd2ab9d295abcdc27255368e6e7461 | ea83d172b211dad5a5d3680a537e4d2d538f42d9 | /week2_priority_queues_and_disjoint_sets/1_make_heap/test_build_heap_cases.py | a7a62dbb8dc4ded4e681d2d59664d2f5a4b798b0 | [] | no_license | FluffyFu/UCSD_Algorithms_Course_2 | 9e17e696be14b70da0d221802e4fb8527aeab0aa | f56aeee174f89cebffe5df6abb3930bda1fd4709 | refs/heads/master | 2022-12-07T00:38:28.499483 | 2020-08-27T15:39:36 | 2020-08-27T15:39:36 | 285,307,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | from build_heap import build_heap
with open('tests/04') as f:
n = int(f.readline())
data = list(map(int, f.readline().split()))
with open('tests/04.a') as f:
n_swaps = int(f.readline())
results = []
for line in f.readlines():
results.append(tuple(map(int, line.split())))
my_results = build_heap(data)
# my_results = [(b, a) for a, b in my_results]
assert my_results == results, 'my results len: {}, truth len: {}'.format(
len(my_results), len(results))
print('my_results: ', my_results[:10])
print('truth: ', results[:10])
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.