blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dd57282a6f43709922c5f7cbe9ce63f81e77bcd0 | 414db33a43c50a500741784eea627ba98bb63e27 | /0x0A-python-inheritance/9-rectangle.py | 4092a9005ebb2873185b2c9b324c123b1c9c6344 | [] | no_license | rayraib/holbertonschool-higher_level_programming | 2308ea02bd7f97eae3643e3ce0a6489cc1ad9ff5 | 6b4196eb890ffcb91e541431da9f5f57c5b85d4e | refs/heads/master | 2021-09-14T09:12:26.664653 | 2018-05-11T03:23:12 | 2018-05-11T03:23:12 | 113,070,818 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | #!/usr/bin/python3
BaseGeometry = __import__('7-base_geometry').BaseGeometry
'''
subclass of BaseGeometry class
'''
class Rectangle(BaseGeometry):
''' representation of a rectangle'''
def __init__(self, width, height):
'''initialize the object attributes'''
BaseGeometry.integer_validator(self, "height", height)
self.__height = height
BaseGeometry.integer_validator(self, "width", width)
self.__width = width
def area(self):
''' calculate area of the rectangle'''
return (self.__height * self.__width)
def __str__(self):
'''return informal string represention of the object itself'''
return ("[Rectangle] {}/{}".format(self.__width, self.__height))
| [
"[email protected]"
] | |
902b09ed2ee809a19293ec13b3fccd3cf58d2dbf | 6ffd23679939f59f0a09c9507a126ba056b239d7 | /imperative/python/megengine/core/_trace_option.py | 638c142a12249cc9b7381b3c378d5b01f5b5ff9e | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | MegEngine/MegEngine | 74c1c9b6022c858962caf7f27e6f65220739999f | 66b79160d35b2710c00befede0c3fd729109e474 | refs/heads/master | 2023-08-23T20:01:32.476848 | 2023-08-01T07:12:01 | 2023-08-11T06:04:12 | 248,175,118 | 5,697 | 585 | Apache-2.0 | 2023-07-19T05:11:07 | 2020-03-18T08:21:58 | C++ | UTF-8 | Python | false | false | 862 | py | # -*- coding: utf-8 -*-
import os
from ._imperative_rt.core2 import set_cpp_use_symbolic_shape
_use_symbolic_shape = False
if os.environ.get("MEGENGINE_USE_SYMBOLIC_SHAPE"):
_use_symbolic_shape = True
_use_xla_backend = False
def use_symbolic_shape() -> bool:
r"""Returns whether tensor.shape returns a tensor instead of a tuple"""
return _use_symbolic_shape
def set_symbolic_shape(option: bool):
r"""Sets whether tensor.shape returns a tensor instead of a tuple"""
global _use_symbolic_shape
_org = _use_symbolic_shape
_use_symbolic_shape = option
return _org
def use_xla_backend() -> bool:
return _use_xla_backend
def set_use_xla_backend(option: bool) -> bool:
global _use_xla_backend
_org = _use_xla_backend
_use_xla_backend = option
return _org
set_cpp_use_symbolic_shape(use_symbolic_shape)
| [
"[email protected]"
] | |
6c54d81e4263105997a4b7dbcb57d4d4673fe0e2 | 5d0fe4a9e026234fe15e6c4380355061bb4dac64 | /tests/functional/pages/profile/individual_enter_your_email_and_password.py | 4ed6007a0f1fe073b148c538f8fdceb4a783b69b | [
"MIT"
] | permissive | uktrade/directory-tests | 37e243862da8ac594cf1ea06ade714db5e1aba03 | 39ec6c26203580238e65566a472cbd80916e6726 | refs/heads/master | 2022-08-09T16:58:56.248982 | 2022-08-01T12:25:10 | 2022-08-01T12:25:10 | 71,367,747 | 4 | 3 | MIT | 2022-08-01T12:26:09 | 2016-10-19T14:48:57 | Python | UTF-8 | Python | false | false | 1,702 | py | # -*- coding: utf-8 -*-
"""Profile - Individual - Enter your business email address and set a password"""
from requests import Response, Session
from directory_tests_shared import PageType, Service, URLs
from tests.functional.utils.context_utils import Actor
from tests.functional.utils.request import (
Method,
check_response,
check_url,
make_request,
)
SERVICE = Service.PROFILE
NAME = "Individual enter your email address and set a password"
TYPE = PageType.FORM
URL = URLs.PROFILE_ENROL_INDIVIDUAL_ENTER_YOUR_EMAIL_AND_PASSWORD.absolute
EXPECTED_STRINGS = [
"Enter your email address and set a password",
"Your email address",
"Set a password",
"Confirm password",
"Tick this box to accept the",
]
def go_to(session: Session) -> Response:
return make_request(Method.GET, URL, session=session)
def should_be_here(response: Response):
check_url(response, URL)
check_response(response, 200, body_contains=EXPECTED_STRINGS)
def submit(actor: Actor) -> Response:
session = actor.session
headers = {"Referer": URL}
data = {
"csrfmiddlewaretoken": actor.csrfmiddlewaretoken,
"individual_user_enrolment_view-current_step": "user-account",
"user-account-email": actor.email,
"user-account-password": actor.password,
"user-account-password_confirmed": actor.password,
"user-account-terms_agreed": "on",
"user-account-remote_password_error": None,
"g-recaptcha-response": "test mode",
}
return make_request(
Method.POST,
URL,
session=session,
headers=headers,
files=data,
no_filename_in_multipart_form_data=True,
)
| [
"[email protected]"
] | |
905cb8c5f6d0197487ae82ee1d0f00475fb00efe | 2153a7ecfa69772797e379ff5642d52072a69b7c | /library/test/test_compiler/sbs_code_tests/70_class.py | 64ce08233157b32ce3204a302018c8a61bc3d153 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"Python-2.0"
] | permissive | KCreate/skybison | a3789c84541f39dc6f72d4d3eb9783b9ed362934 | d1740e08d8de85a0a56b650675717da67de171a0 | refs/heads/trunk | 2023-07-26T04:50:55.898224 | 2021-08-31T08:20:46 | 2021-09-02T19:25:08 | 402,908,053 | 1 | 0 | NOASSERTION | 2021-09-03T22:05:57 | 2021-09-03T22:05:57 | null | UTF-8 | Python | false | false | 422 | py | # Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
class C:
pass
# EXPECTED:
[
LOAD_BUILD_CLASS(0),
LOAD_CONST(Code((1, 0))),
LOAD_CONST('C'),
MAKE_FUNCTION(0),
LOAD_CONST('C'),
CALL_FUNCTION(2),
STORE_NAME('C'),
...,
CODE_START('C'),
LOAD_NAME('__name__'),
STORE_NAME('__module__'),
LOAD_CONST('C'),
STORE_NAME('__qualname__'),
...,
]
| [
"[email protected]"
] | |
1c22d4445c54dc6358a0ba0086ed39af5a259b49 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/osconfig/agentendpoint/v1/osconfig-agentendpoint-v1-py/google/cloud/osconfig/agentendpoint_v1/services/agent_endpoint_service/transports/base.py | 1529267fb51e6ac71e8e7bfbcf4c92072cb41021 | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,967 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.osconfig.agentendpoint_v1.types import agentendpoint
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-osconfig-agentendpoint',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class AgentEndpointServiceTransport(abc.ABC):
"""Abstract transport class for AgentEndpointService."""
AUTH_SCOPES = (
)
DEFAULT_HOST: str = 'osconfig.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials is service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.receive_task_notification: gapic_v1.method.wrap_method(
self.receive_task_notification,
default_retry=retries.Retry(
initial=1.0,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.Aborted,
core_exceptions.Cancelled,
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=3600.0,
),
default_timeout=3600.0,
client_info=client_info,
),
self.start_next_task: gapic_v1.method.wrap_method(
self.start_next_task,
default_timeout=None,
client_info=client_info,
),
self.report_task_progress: gapic_v1.method.wrap_method(
self.report_task_progress,
default_timeout=None,
client_info=client_info,
),
self.report_task_complete: gapic_v1.method.wrap_method(
self.report_task_complete,
default_timeout=None,
client_info=client_info,
),
self.register_agent: gapic_v1.method.wrap_method(
self.register_agent,
default_timeout=None,
client_info=client_info,
),
self.report_inventory: gapic_v1.method.wrap_method(
self.report_inventory,
default_timeout=None,
client_info=client_info,
),
}
@property
def receive_task_notification(self) -> Callable[
[agentendpoint.ReceiveTaskNotificationRequest],
Union[
agentendpoint.ReceiveTaskNotificationResponse,
Awaitable[agentendpoint.ReceiveTaskNotificationResponse]
]]:
raise NotImplementedError()
@property
def start_next_task(self) -> Callable[
[agentendpoint.StartNextTaskRequest],
Union[
agentendpoint.StartNextTaskResponse,
Awaitable[agentendpoint.StartNextTaskResponse]
]]:
raise NotImplementedError()
@property
def report_task_progress(self) -> Callable[
[agentendpoint.ReportTaskProgressRequest],
Union[
agentendpoint.ReportTaskProgressResponse,
Awaitable[agentendpoint.ReportTaskProgressResponse]
]]:
raise NotImplementedError()
@property
def report_task_complete(self) -> Callable[
[agentendpoint.ReportTaskCompleteRequest],
Union[
agentendpoint.ReportTaskCompleteResponse,
Awaitable[agentendpoint.ReportTaskCompleteResponse]
]]:
raise NotImplementedError()
@property
def register_agent(self) -> Callable[
[agentendpoint.RegisterAgentRequest],
Union[
agentendpoint.RegisterAgentResponse,
Awaitable[agentendpoint.RegisterAgentResponse]
]]:
raise NotImplementedError()
@property
def report_inventory(self) -> Callable[
[agentendpoint.ReportInventoryRequest],
Union[
agentendpoint.ReportInventoryResponse,
Awaitable[agentendpoint.ReportInventoryResponse]
]]:
raise NotImplementedError()
__all__ = (
'AgentEndpointServiceTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
ca882b27134e8b7e97382771cc03bef0fcd2a3fe | 242f1dafae18d3c597b51067e2a8622c600d6df2 | /src/1300-1399/1344.angle.clock.py | 8f16b6ea976d0a6986c2e132b2eb2b95f928c1e3 | [] | no_license | gyang274/leetcode | a873adaa083270eb05ddcdd3db225025533e0dfe | 6043134736452a6f4704b62857d0aed2e9571164 | refs/heads/master | 2021-08-07T15:15:01.885679 | 2020-12-22T20:57:19 | 2020-12-22T20:57:19 | 233,179,192 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | class Solution:
def angleClock(self, hour: int, minutes: int) -> float:
h, m = hour % 12, minutes % 60
hA, mA = h * 30 + m / 60 * 30, m * 6
dA = abs(hA - mA)
return min(dA, 360 - dA)
if __name__ == '__main__':
solver = Solution()
cases = [
(2, 58),
]
rslts = [solver.angleClock(hour, minutes) for hour, minutes in cases]
for cs, rs in zip(cases, rslts):
print(f"case: {cs} | solution: {rs}")
| [
"[email protected]"
] | |
66ebb027ebb9fcf1674157a1fd4328b8c803a1b6 | 60aa3bcf5ace0282210685e74ee8ed31debe1769 | /base/lib/encodings/cp1253.py | e32862ea0e2b0a2d349861903d7635099bf924b3 | [] | no_license | TheBreadGuy/sims4-ai-engine | 42afc79b8c02527353cc084117a4b8da900ebdb4 | 865212e841c716dc4364e0dba286f02af8d716e8 | refs/heads/master | 2023-03-16T00:57:45.672706 | 2016-05-01T17:26:01 | 2016-05-01T17:26:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py | import codecs
class Codec(codecs.Codec):
__qualname__ = 'Codec'
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
__qualname__ = 'IncrementalEncoder'
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
__qualname__ = 'IncrementalDecoder'
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
__qualname__ = 'StreamWriter'
class StreamReader(Codec, codecs.StreamReader):
__qualname__ = 'StreamReader'
def getregentry():
return codecs.CodecInfo(name='cp1253', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f€\ufffe‚ƒ„…†‡\ufffe‰\ufffe‹\ufffe\ufffe\ufffe\ufffe\ufffe‘’“”•–—\ufffe™\ufffe›\ufffe\ufffe\ufffe\ufffe\xa0΅Ά£¤¥¦§¨©\ufffe«¬\xad®―°±²³΄µ¶·ΈΉΊ»Ό½ΎΏΐΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡ\ufffeΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋόύώ\ufffe'
encoding_table = codecs.charmap_build(decoding_table)
| [
"[email protected]"
] | |
82a31547b7df987e69677a23ad29f56ad9a5ccbe | 41c5f7da28b87a3034754254d21791b322e819d8 | /test/test_json_analysis_result_sub_group_all_of.py | e181c4639ce155f9ebebe587db93934f73ee12ae | [] | no_license | MADANA-IO/madana-apiclient-python | 16cb3eb807897903df2a885a94a2c02fc405818a | 40dc21ab43d9565ac3dff86d7270093cce112753 | refs/heads/master | 2023-03-08T05:02:32.616469 | 2021-02-11T10:17:30 | 2021-02-11T10:17:30 | 287,797,297 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,151 | py | # coding: utf-8
"""
madana-api
<h1>API Quickstart Guide</h1> <p>This documentation contains a Quickstart Guide, a few <a href=\"downloads.html\">sample clients</a> for download and information about the available <a href=\"resources.html\">endpoints</a> and <a href=\"data.html\">DataTypes</a> </p> <p>The <a target=\"_blank\" href=\"http://madana-explorer-staging.eu-central-1.elasticbeanstalk.com/login\"> MADANA Explorer</a> can be used to verify the interactions with the API</p> <p>Internal use only. For more information visit <a href=\"https://www.madana.io\">www.madana.io</a></p> <br> <br> # noqa: E501
The version of the OpenAPI document: 0.4.12
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import madana_sampleclient_python
from madana_sampleclient_python.models.json_analysis_result_sub_group_all_of import JsonAnalysisResultSubGroupAllOf # noqa: E501
from madana_sampleclient_python.rest import ApiException
class TestJsonAnalysisResultSubGroupAllOf(unittest.TestCase):
"""JsonAnalysisResultSubGroupAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test JsonAnalysisResultSubGroupAllOf
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = madana_sampleclient_python.models.json_analysis_result_sub_group_all_of.JsonAnalysisResultSubGroupAllOf() # noqa: E501
if include_optional :
return JsonAnalysisResultSubGroupAllOf(
filter = '0'
)
else :
return JsonAnalysisResultSubGroupAllOf(
)
def testJsonAnalysisResultSubGroupAllOf(self):
"""Test JsonAnalysisResultSubGroupAllOf"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a3107b0c1a2da9aed5839d1306f79a2aa6a91e03 | 0d2f636592dc12458254d793f342857298c26f12 | /vowel.py | d1da799f259f873b5637804df56c23b3325a671c | [] | no_license | chenpc1214/test | c6b545dbe13e672f11c58464405e024394fc755b | 8610320686c499be2f5fa36ba9f11935aa6d657b | refs/heads/master | 2022-12-13T22:44:41.256315 | 2020-09-08T16:25:49 | 2020-09-08T16:25:49 | 255,796,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | vowel = ['a', 'e', 'i', 'o', 'u']
word= "milliway"
for letter in word:
if letter in vowel:
print(letter)
| [
"[email protected]"
] | |
c2e4537265eacfee364c3be61266d0a16861c951 | dc39ccc50b7d34e5de84f3cc132c5cc096a32656 | /BASIC/class/attribute.py | 40377cc862a0cdd596c36046d3178d5438bfeccf | [] | no_license | Shukladas1115/Python | 0947aefd62a9ce4c3140360cb7259b031368709c | feb32bc2e2e7df377fc2d92330bfdacb83f31a55 | refs/heads/master | 2022-02-20T04:15:56.036495 | 2019-08-26T16:36:52 | 2019-08-26T16:36:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | class A(object):
x = 1
class B(A):
pass
class C(A):
pass
print(A.x, B.x, C.x) # 1 1 1
B.x = 2
print(A.x, B.x, C.x) # 1 2 1
A.x = 3
print(A.x, B.x, C.x) # 3 2 3 tại sao vậy?
'''
C doesn’t have its own x property, independent of A.
Thus, references to C.x are in fact references to A.x
C kế thừa từ A, C không thực sự sở hữu thuộc tính x mà nó tham chiếu đến thuộc tính x của A
''' | [
"[email protected]"
] | |
fa36d96624f3655b5258367533c44b0c14db498b | d364123a0655bff7e9d725382934fe2c15b5bfc4 | /Crawler/lianxi/hsimg_test.py | bc62fc7c1c354c4ba3007bd3c78507f7a0a83c1e | [] | no_license | yuan1093040152/SeleniumTest | 88d75361c8419354f56856c326f843a0a89d7ca6 | d155b98702bc46c174499042b43257696b861b5e | refs/heads/master | 2023-08-31T15:00:25.415642 | 2023-08-30T09:26:42 | 2023-08-30T09:26:42 | 227,269,300 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,271 | py | #coding=utf-8
'''
Created on 2018年7月15日
@author: kai.yangf
'''
import requests,re,time
from multiprocessing import pool
from requests.exceptions import RequestException
from threading import Thread
def get_one_page(url):
try:
response = requests.get(url)
html = response.text
if response.status_code == 200:
print (True)
print (html[:5])
return html
else:
return None
except RequestException:
return None
def parse_one_page(url):
html = get_one_page(url)
pettern = re.compile('<img.*?alt.*?src="(.*?)" />',re.S)
items = re.findall(pettern,html)
print (len(items))
for item in items:
writeIO(item)
def writeIO(item):
filename = str(time.time()) + '.jpg'
response = requests.get(item)
Path = 'E:\\CrawlerImg\\' + filename
with open(Path,'wb') as f:
f.write(response.content)
f.close()
def each_page(url):
host = 'https://www.8484dd.com'
html = get_one_page(url)
pettern = re.compile('<li.*?<a.*?href="(.+?)".*?</a>',re.S)
items = re.findall(pettern,html)
print (len(items))
for item in items:
if re.match('/pic', item):
if re.search('.html', item):
url = host + item
parse_one_page(url)
def each_page_value(i):
url = 'https://www.8484dd.com/pic/5/index_'+ str(i) +'.html'
host = 'https://www.8484dd.com'
html = get_one_page(url)
pettern = re.compile('<li.*?<a.*?href="(.+?)".*?</a>',re.S)
items = re.findall(pettern,html)
print (len(items))
for item in items:
if re.match('/pic', item):
if re.search('.html', item):
url = host + item
parse_one_page(url)
def main(url):
html = get_one_page(url)
parse_one_page(html)
if __name__ == '__main__':
# for i in range(2,10):
# url = 'https://www.8484dd.com/pic/5/index_'+ str(i) +'.html'
# each_page(url)
Threads = []
for i in range(2,11):
t = Thread(target=each_page_value, args =(i,))
Threads.append(t)
for i in range(2,11):
Threads[i].start()
for i in range(2,11):
Threads[i].join()
| [
"[email protected]"
] | |
f4c38240821bf96e65612f342986cf276694f90d | 34578a08451dc124f02fbba92a219da3347059cd | /.history/tools/views_20190502130213.py | 5ef8462e7964c7373832387076323b91f3acac43 | [] | no_license | gwjczwy/CTF-Exercises | b35d938b30adbc56c1b6f45dc36cea1421c702fb | c2d5c47f5047b1601564453e270ce50aad7f56fc | refs/heads/master | 2020-05-25T23:51:26.190350 | 2019-05-22T13:18:59 | 2019-05-22T13:18:59 | 188,042,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,399 | py | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from json import dumps
from .models import Url,Money
import time
#########################
#配置变量
sourcePath=r'C:\Users\arnoux\Desktop\训练平台\sql\log.txt'
#########################
#主页
@login_required
def index(requests):
data={'toolname':'index','user':requests.user}
return render(requests,'tools/index.html',data)
#########################
#短链接
@login_required
def surl(requests):#短链接 index
data={}
data['toolName']="surl"
data['parameter']="index"
return render(requests, 'tools/index.html', data)
def surls(requests,parameter):#带参数的短链接跳转
data={}
data['toolName']="surl"
data['parameter']="link"
print('短链接参数',parameter)
try:
req=Url.objects.get(sUrl=parameter)
print('获取对象成功')
except:
return HttpResponse('你来错地方了,悟空')
req=req.fullUrl
return HttpResponse('<script>window.location.href="'+req+'";</script>')
@csrf_exempt
@login_required
def createSUrl(requests):
if not (requests.method == 'POST' and requests.POST['fullUrl']):
req={'message':'fail'}
return HttpResponse(dumps(req),content_type="application/json")
fullUrl=requests.POST['fullUrl']
while True:
randUrl=randStr(5)#随机长度为5的字符串
try:
Url.objects.get(sUrl=randUrl)#如果重复就继续随机
print('再!来!一!次!')
except:
break
randUrl=randStr(5)
Url(sUrl=randUrl,fullUrl=fullUrl).save()
req={'message':'success','url':randUrl}
return HttpResponse(dumps(req),content_type="application/json")
def randStr(l):
import random
import string
seed = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
sa = []
for i in range(l):
sa.append(random.choice(seed))
salt = ''.join(sa)
return salt
#########################
#商店
@login_required
def shop(requests):
data={}
data['toolName']="shop"
money = Money.objects.get(user=requests.user)
data['money']=money
return render(requests, 'tools/index.html', data)
#商店兑换
@csrf_exempt
@login_required
def shopExchange(requests):
if not (requests.method == 'POST' and 'rule' in requests.POST and 'num' in requests.POST):
print('非法请求')
req={'message':'fail','reason':'非法请求'}
return HttpResponse(dumps(req),content_type="application/json")
rule=requests.POST['rule']
num=requests.POST['num']
if not rule in ['m2b','b2m']:# 判断转换规则是否合法
print('rule参数不合法')
req={'message':'fail','reason':'rule参数不合法'}
return HttpResponse(dumps(req),content_type="application/json")
if num.isdigit():# 判断数字是否合法
num=int(num)
if num<0:
req={'message':'fail','reason':'非法参数'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'非法参数'}
return HttpResponse(dumps(req),content_type="application/json")
# 获取货币对象
money = Money.objects.get(user=requests.user)
if rule=='m2b':
if money.monero>=num:
money.bitcoin+=num
money.save()
time.sleep(5) #等待时间 造成条件竞争
money.monero-=num
money.save()
else:
req={'message':'fail','reason':'monero 不足'}
return HttpResponse(dumps(req),content_type="application/json")
elif rule=='b2m':
if money.bitcoin>=num:
money.monero+=num
money.save()
time.sleep(5)
money.bitcoin-=num
money.save()
else:
req={'message':'fail','reason':'bitcoin 不足'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'未知错误'}
return HttpResponse(dumps(req),content_type="application/json")
req={'message':'success','monero':money.monero,'bitcoin':money.bitcoin}
return HttpResponse(dumps(req),content_type="application/json")
#########################
#日志
@login_required
def logs(requests):
data={}
data['toolName']="logs"
return render(requests, 'tools/index.html', data)
# 添加日志
@csrf_exempt
@login_required
def addLog(requests):
if not (requests.method == 'POST' and 'path' in requests.POST and 'content' in requests.POST):
req={'message':'fail','reason':'非法请求'}
return HttpResponse(dumps(req),content_type="application/json")
path=requests.POST['path']
content=requests.POST['content']
# 获取货币对象
money = Money.objects.get(user=requests.user)
if money.bitcoin >=100:
try:
with open(path,'at') as file:
file.write(content)
money.bitcoin-=100
money.save()
req={'message':'success','reason':'操作成功'}
return HttpResponse(dumps(req),content_type="application/json")
except:
req={'message':'fail','reason':'写入文件错误'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'货币不足'}
return HttpResponse(dumps(req),content_type="application/json")
# 获取日志
def getLog(requests):
req={'message':'fail','reason':'货币不足'}
return HttpResponse(dumps(req),content_type="application/json")
#下载源代码
def downSource(requests):
# 获取货币对象
money = Money.objects.get(user=requests.user)
if money.bitcoin >=1000:
money.bitcoin-=1000
money.save()
file = open(sourcePath, 'rb')
response = HttpResponse(file)
response['Content-Type'] = 'application/octet-stream' #设置头信息,告诉浏览器这是个文件
response['Content-Disposition'] = 'attachment;filename="'+sourcePath.split('\\')[-1]+'";'
return response
else:
req={'message':'fail','reason':'货币不足'}
return HttpResponse(dumps(req),content_type="application/json")
| [
"[email protected]"
] | |
31f64762cb63b1fbd9b34933a297a9ed4438eddb | ffad0de28109d0156baba92b5793e6d8142ced7c | /server/channels_list_test.py | 84b54743e63a3c4deed2798a8d9a3f3a3ced6293 | [] | no_license | nomii15/COMP1531-server | 823753e11b78619b7f67c32d9f5f1f39d839b6f8 | af00ba90cdf2fa1ce5170a7a2bf506bfe550bbd7 | refs/heads/master | 2021-07-17T08:26:57.074709 | 2019-11-17T07:29:44 | 2019-11-17T07:29:44 | 228,518,923 | 1 | 0 | null | 2021-01-05T18:13:55 | 2019-12-17T02:47:02 | Python | UTF-8 | Python | false | false | 1,550 | py | import pytest
from channels_list import channels_list
from auth_register import auth_register
from channels_create import channels_create
'''
Provide a list of all channels (and their associated details) that the authorised user is part of
'''
def test_list_one():
#setup
register1 = auth_register("[email protected]", "validpassword1", "USER1", "validname1")
token1 = register1['token']
u_id1 = register1['u_id']
channel_id1 = channels_create(token1, 'channel1', True)
channel_list1 = channels_list(token1)
channel_list = {'channels': [{'channel_id': 1, 'name': 'channel1'}]}
#check only channel user is part of exists in the list
assert channel_list == channel_list1
def test_list_empty():
#setup
register2 = auth_register("[email protected]", "validpassword2", "USER2", "validname2")
token2 = register2['token']
u_id2 = register2['u_id']
register3 = auth_register("[email protected]", "validpassword3", "USER3", "validname3")
token3 = register3['token']
u_id3 = register3['u_id']
register4 = auth_register("[email protected]", "validpassword4", "USER4", "validname4")
token4 = register4['token']
u_id4 = register4['u_id']
channel_id2 = channels_create(token2, 'channel2', True)
channel_id3 = channels_create(token3, 'channel3', True)
channel_list4 = channels_list(token4)
empty_list = {'channels' : []}
#check channel list is empty as user does not belong to any channels
assert channel_list4 == empty_list | [
"[email protected]"
] | |
a4aa71959c2f1c3dce79168ddb51c85bfaa1899c | cdee5cc20a5085b40f8555e7199fe19403e005c3 | /experimental/graphicalClassification/MultiClassMajorityVote.py | e02402ed226105cb3faf7d5e5aab05424c9616b6 | [
"Apache-2.0"
] | permissive | visenger/aggregation | 1e908d11df701e900d94d6545f3cc35a6c7dc915 | 82dce87eaaf14b0b2bedd29fc82c026fda2a0138 | refs/heads/master | 2020-03-19T03:08:52.140663 | 2017-06-21T10:32:27 | 2017-06-21T10:32:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | #!/usr/bin/env python
from __future__ import print_function
__author__ = 'greghines'
class MultiClassMajorityVote:
def __init__(self,subjectNodes,userNodes):
self.subjectNodes = subjectNodes
self.userNodes = userNodes
self.alpha = 0.6
def __classify__(self,attributeList):
for att in attributeList:
for user in self.userNodes:
user.__changeClassificationAttributes__(att)
for subject in self.subjectNodes:
subject.__changeClassificationAttributes__(att)
#what alpha value would this subject need to get correct positive?
| [
"[email protected]"
] | |
a2e495fdc47015c860dc2e716dfa6d8a401a6538 | 0b40232eb2395c27353c892ef4ccb5c604bb75be | /Array/third_max.py | 174029680ba012a49f9c34cb0d61196da859ba00 | [] | no_license | HareshNasit/LeetCode | 971ae9dd5e4f0feeafa5bb3bcf5b7fa0a514d54d | 674728af189aa8951a3fcb355b290f5666b1465c | refs/heads/master | 2021-06-18T07:37:40.121698 | 2021-02-12T12:30:18 | 2021-02-12T12:30:18 | 168,089,751 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | def thirdMax(self, nums):
"""
https://leetcode.com/problems/third-maximum-number/submissions/
:type nums: List[int]
:rtype: int
"""
nums_set = set(nums)
nums_list = list(nums_set)
nums_list.sort(reverse = True)
if len(nums_list) > 2:
return nums_list[2]
return nums_list[0]
| [
"[email protected]"
] | |
39ddeb9ad873ed4901adbf3640031f907f3503a3 | 2b5bc632859ca01b6b2feae6186b1314ed8c5187 | /everpad/provider/daemon.py | 5b6b49be3c92f2d0a2ee5e6669c92c7f6b8189b9 | [] | no_license | mcardillo55/everpad | c64e2d35bd4ccceff901d9720030dbb8adfcef56 | ab6271a5b73eedf81d0c31e351e567282dbd6685 | refs/heads/master | 2020-12-25T05:55:05.811394 | 2012-12-19T03:36:25 | 2012-12-19T03:36:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,175 | py | import sys
sys.path.insert(0, '../..')
from everpad.provider.service import ProviderService
from everpad.provider.sync import SyncThread
from everpad.provider.tools import set_auth_token, get_db_session
from everpad.tools import get_auth_token, print_version
from everpad.provider import models
from PySide.QtCore import Slot, QSettings
import dbus
import dbus.mainloop.glib
import signal
import fcntl
import os
import getpass
import argparse
if 'kde' in os.environ.get('DESKTOP_SESSION'): # kde init qwidget for wallet access
from PySide.QtGui import QApplication
App = QApplication
else:
from PySide.QtCore import QCoreApplication
App = QCoreApplication
class ProviderApp(App):
def __init__(self, verbose, *args, **kwargs):
App.__init__(self, *args, **kwargs)
self.settings = QSettings('everpad', 'everpad-provider')
self.verbose = verbose
session_bus = dbus.SessionBus()
self.bus = dbus.service.BusName("com.everpad.Provider", session_bus)
self.service = ProviderService(self, session_bus, '/EverpadProvider')
self.sync_thread = SyncThread(self)
self.sync_thread.sync_state_changed.connect(
Slot(int)(self.service.sync_state_changed),
)
self.sync_thread.data_changed.connect(
Slot()(self.service.data_changed),
)
if get_auth_token():
self.sync_thread.start()
self.service.qobject.authenticate_signal.connect(
self.on_authenticated,
)
self.service.qobject.remove_authenticate_signal.connect(
self.on_remove_authenticated,
)
@Slot(str)
def on_authenticated(self, token):
set_auth_token(token)
self.sync_thread.start()
@Slot()
def on_remove_authenticated(self):
self.sync_thread.quit()
set_auth_token('')
session = get_db_session()
session.query(models.Note).delete(
synchronize_session='fetch',
)
session.query(models.Resource).delete(
synchronize_session='fetch',
)
session.query(models.Notebook).delete(
synchronize_session='fetch',
)
session.query(models.Tag).delete(
synchronize_session='fetch',
)
session.commit()
def log(self, data):
if self.verbose:
print data
def main():
signal.signal(signal.SIGINT, signal.SIG_DFL)
fp = open('/tmp/everpad-provider-%s.lock' % getpass.getuser(), 'w')
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
try:
os.mkdir(os.path.expanduser('~/.everpad/'))
os.mkdir(os.path.expanduser('~/.everpad/data/'))
except OSError:
pass
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true', help='verbose output')
parser.add_argument('--version', '-v', action='store_true', help='show version')
args = parser.parse_args(sys.argv[1:])
if args.version:
print_version()
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
app = ProviderApp(args.verbose, sys.argv)
app.exec_()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
13f9fc971c3c8582a7f8e5715f7b253fbbd05b76 | 17ca5bae91148b5e155e18e6d758f77ab402046d | /analysis_ACS/CID3570/first_analysis/cut_PSFs_in_analysis.py | 618268eb935438571ce91984e37bd80070f991f4 | [] | no_license | dartoon/QSO_decomposition | 5b645c298825091c072778addfaab5d3fb0b5916 | a514b9a0ad6ba45dc9c3f83abf569688b9cf3a15 | refs/heads/master | 2021-12-22T19:15:53.937019 | 2021-12-16T02:07:18 | 2021-12-16T02:07:18 | 123,425,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,011 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 31 13:54:02 2018
@author: Dartoon
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import sys
sys.path.insert(0,'../../py_tools')
from cut_image import cut_image, cut_center_bright, save_loc_png, grab_pos
import copy
import astropy.io.fits as pyfits
import os
path = os.getcwd()
ID = path.split('/')[-1]
fitsFile = pyfits.open('../../Cycle25data/ACS_data/{0}_acs_I_mosaic_180mas_sci.fits'.format(ID))
img = fitsFile[0].data # check the back grounp
#from astropy.visualization import SqrtStretch
#from astropy.stats import SigmaClip
#from photutils import Background2D, SExtractorBackground
#from astropy.visualization.mpl_normalize import ImageNormalize
#norm = ImageNormalize(stretch=SqrtStretch())
#sigma_clip = SigmaClip(sigma=3., iters=10)
#bkg_estimator = SExtractorBackground()
#from photutils import make_source_mask
#mask_0 = make_source_mask(img, snr=2, npixels=5, dilate_size=11)
#mask_1 = (np.isnan(img))
#mask = mask_0 + mask_1
#bkg = Background2D(img, (50, 50), filter_size=(3, 3),
# sigma_clip=sigma_clip, bkg_estimator=bkg_estimator,
# mask=mask)
#fig=plt.figure(figsize=(15,15))
#ax=fig.add_subplot(1,1,1)
#ax.imshow(img, norm=LogNorm(), origin='lower')
##bkg.plot_meshes(outlines=True, color='#1f77b4')
#ax.xaxis.set_visible(False)
#ax.yaxis.set_visible(False)
#plt.show()
#fig=plt.figure(figsize=(15,15))
#ax=fig.add_subplot(1,1,1)
#ax.imshow(mask, origin='lower')
##bkg.plot_meshes(outlines=True, color='#1f77b4')
#ax.xaxis.set_visible(False)
#ax.yaxis.set_visible(False)
#plt.show()
#
#back = bkg.background* ~mask_1
#fig=plt.figure(figsize=(15,15))
#ax=fig.add_subplot(1,1,1)
#ax.imshow(back, origin='lower', cmap='Greys_r')
#ax.xaxis.set_visible(False)
#ax.yaxis.set_visible(False)
#plt.show()
#
#img -= back
#pyfits.PrimaryHDU(img).writeto('sub_coadd.fits',overwrite=True)
#img = pyfits.getdata('sub_coadd.fits')
filename= '{0}.reg'.format(ID)
c_psf_list, QSO_loc = grab_pos(filename,reg_ty = 'acs', QSO_reg_return=True)
center_QSO = c_psf_list[QSO_loc]
QSO, cut_center = cut_center_bright(image=img, center=center_QSO, radius=60, return_center=True, plot=False)
QSO_outer = cut_image(image=img, center=cut_center, radius=200)
pyfits.PrimaryHDU(QSO).writeto('{0}_cutout.fits'.format(ID),overwrite=True)
pyfits.PrimaryHDU(QSO_outer).writeto('{0}_cutout_outer.fits'.format(ID),overwrite=True)
PSFs = []
PSF_gauss_centers = []
PSF_bright_centers = []
count=0
#psf_list = None
psf_list = np.delete(c_psf_list, (QSO_loc), axis=0)
dist = (psf_list-center_QSO)[:,0]**2+(psf_list-center_QSO)[:,1]**2
psf_list = psf_list[dist.argsort()]
for i in range(len(psf_list)):
print 'PSF',i
PSF, PSF_center = cut_center_bright(image=img, center=psf_list[i], radius=60, return_center=True, plot=False)
PSFs.append([PSF, 1, PSF_center])
PSF_gauss_centers.append(PSF_center)
_, PSF_br_center = cut_center_bright(image=img, center=psf_list[i], radius=60, kernel = 'center_bright', return_center=True, plot=False)
PSF_bright_centers.append(PSF_br_center)
count += 1
#extra_psfs = None
extra_psfs = np.array([[1479.9762,3554.7075], [5409.6929,4718.4676], [2870.2585,4735.0797], [1065.9795,1476.4033]])
dist_extra = (extra_psfs-center_QSO)[:,0]**2+(extra_psfs-center_QSO)[:,1]**2
extra_psfs = extra_psfs[dist_extra.argsort()]
for i in range(len(extra_psfs)):
print 'PSF',count
PSF, PSF_center = cut_center_bright(image=img, center=extra_psfs[i], radius=60, return_center=True, plot=False)
PSFs.append([PSF,0, PSF_center])
PSF_gauss_centers.append(PSF_center)
_, PSF_br_center = cut_center_bright(image=img, center=extra_psfs[i], radius=60, kernel = 'center_bright', return_center=True, plot=False)
PSF_bright_centers.append(PSF_br_center)
count += 1
from mask_objects import mask_obj
print "QSO:"
a, QSO_mask = mask_obj(img=QSO, exp_sz=1.4)
if len(QSO_mask) > 1:
QSO_mask = np.sum(np.asarray(QSO_mask),axis=0)
elif len(QSO_mask) == 1:
QSO_mask = QSO_mask[0]
#print "QSO image:"
#plt.imshow((QSO_mask), origin='lower')
#plt.show()
QSO_mask = (1 - (QSO_mask != 0)*1.)
PSF_msk_list = []
for i in range(len(PSFs)):
print "PSF{0}:".format(i)
_, PSF_mask = mask_obj(img=PSFs[i][0], snr=3., exp_sz=2.4)
if len(PSF_mask) > 1:
PSF_mask = np.sum(np.asarray(PSF_mask),axis=0)
elif len(PSF_mask) == 1:
PSF_mask = PSF_mask[0]
# print "PSF{0} image:".format(i)
# plt.imshow(PSF_mask, origin='lower')
# plt.show()
PSF_mask = (1 - (PSF_mask != 0)*1.)
if i in PSF_msk_list:
PSF_mask = PSF_mask*0 + 1
print "PSF", i, "not use this mask"
PSFs[i].append(PSF_mask)
center_match = (np.sum(abs(np.asarray(PSF_gauss_centers)-np.asarray(PSF_bright_centers)),axis = 1) == 0)
PSFs_all = copy.deepcopy(PSFs)
PSFs=[]
for i in range(len(PSFs_all)):
if center_match[i] == True:
print i
PSFs.append(PSFs_all[i])
#==============================================================================
# Compare the FWHM
#==============================================================================
from measure_FWHM import measure_FWHM
FWHM = []
for i in range(len(PSFs)):
FWHM_i = measure_FWHM(PSFs[i][0])[0]
print "The measued FWHM for PSF", i, ":", FWHM_i
FWHM.append(FWHM_i)
FWHM = np.asarray(FWHM)
#==============================================================================
# Compare the profile and derive the Average image
#==============================================================================
flux_list = []
for i in range(len(PSFs)):
flux = np.sum(PSFs[i][0]*PSFs[i][3])
print "tot_flux for PSF{0}".format(i), flux
flux_list.append(flux)
del_list = [0,3]
PSFs = [PSFs[i] for i in range(len(PSFs)) if i not in del_list]
#plot the first selection
if extra_psfs is None:
save_loc_png(img,center_QSO,psf_list, ID=ID, label='ini' ,reg_ty = 'acs')
else:
save_loc_png(img,center_QSO,psf_list,extra_psfs, ID=ID, label='ini', reg_ty = 'acs')
PSFs_familiy = [PSFs[i][1] for i in range(len(PSFs))]
if extra_psfs is None:
loc_PSFs = psf_list
elif psf_list is None:
loc_PSFs = extra_psfs
else:
loc_PSFs = np.append(psf_list, extra_psfs, axis=0)
loc_ind_star = [PSFs[i][2] for i in range(len(PSFs)) if PSFs[i][1]==1] #and flux_list[i]>100]
loc_like_star = [PSFs[i][2] for i in range(len(PSFs)) if PSFs[i][1]==0] # and flux_list[i]>100]
if PSFs_familiy[-1] ==1:
save_loc_png(img,center_QSO,loc_ind_star, ID=ID,reg_ty = 'acs')
else:
save_loc_png(img,center_QSO,loc_ind_star,loc_like_star, ID=ID,reg_ty = 'acs')
PSF_list = [PSFs[i][0] for i in range(len(PSFs))]
PSF_masks = [PSFs[i][3] for i in range(len(PSFs))]
from flux_profile import QSO_psfs_compare
gridsp_l = ['log', None]
if_annuli_l = [False, True]
for i in range(2):
for j in range(2):
plt_which_PSF = None
plt_QSO = False
# if i+j == 0:
# plt_which_PSF = range(len(PSFs))
# plt_QSO = True
fig_psf_com = QSO_psfs_compare(QSO=QSO, QSO_msk=QSO_mask, psfs= PSF_list,
plt_which_PSF=plt_which_PSF,
PSF_mask_img=PSF_masks, grids=30,
include_QSO=True,
plt_QSO = plt_QSO, norm_pix = 5.0,
gridspace= gridsp_l[i], if_annuli=if_annuli_l[j])
# fig_psf_com.savefig('PSFvsQSO{0}_{1}_{2}.pdf'.format(i,['xlog','xlin'][i],['circ','annu'][j]))
if j==1:
plt.show()
else:
plt.close()
import pickle
filename='{0}_PSFs_QSO'.format(ID)
datafile = open(filename, 'wb')
QSOs = [QSO,cut_center]
pickle.dump([PSFs, QSOs], open(filename, 'wb'))
datafile.close()
#import pickle
#datafile = open('{0}_PSFs_QSO'.format(ID),'rb')
#PSFs, QSO=pickle.load(open('XID2202_PSFs_QSO','rb'))
#datafile.close()
| [
"[email protected]"
] | |
08de3983cade375a46349f7de656f9ca3a921a9e | 89b45e528f3d495f1dd6f5bcdd1a38ff96870e25 | /PythonCrashCourse/chapter_06/exercise6_05.py | b03a04f3a086ec1337414ecd27d147eb1ba55d24 | [] | no_license | imatyukin/python | 2ec6e712d4d988335fc815c7f8da049968cc1161 | 58e72e43c835fa96fb2e8e800fe1a370c7328a39 | refs/heads/master | 2023-07-21T13:00:31.433336 | 2022-08-24T13:34:32 | 2022-08-24T13:34:32 | 98,356,174 | 2 | 0 | null | 2023-07-16T02:31:48 | 2017-07-25T22:45:29 | Python | UTF-8 | Python | false | false | 660 | py | #!/usr/bin/env python3
rivers = {
'amazon': 'brasil',
'nile': 'egypt',
'mississippi': 'usa',
}
for river, country in rivers.items():
if river == 'mississippi':
print("The " + river.title() + " runs through " + country.upper() + ".")
else:
print("The " + river.title() + " runs through " + country.title() + ".")
print("\nThe following rivers have been mentioned:")
for river in set(rivers.keys()):
print(river.title())
print("\nThe following countries have been mentioned:")
for country in set(rivers.values()):
if country == 'usa':
print(country.upper())
else:
print(country.title())
| [
"[email protected]"
] | |
68a5556339d6c4ba6f854be0cda3f296574eaf67 | 5981fc46a2e033b1c8b3f49449ee55c3dbcc17c6 | /allopathy/views.py | ec56988bb3024a45ff6d4c154ecd36f652af9285 | [] | no_license | shamitlal/Medical-Website | 619ad0aa18dc69fe13cb5850d4de6a177d41d6ca | 17d3f1387c65f5bda547894d002ef22143484158 | refs/heads/master | 2021-01-13T14:50:44.216726 | 2016-12-14T19:03:25 | 2016-12-14T19:03:25 | 76,488,492 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | from django.shortcuts import render
# Create your views here.
def allopathy(request):
return render(request, 'allopathy/allopathy.html', {})
| [
"[email protected]"
] | |
4726012f426c9e8943505c2ecbca998aa912a06a | 246e9200a834261eebcf1aaa54da5080981a24ea | /project-euler/26-50/distinct-powers.py | 548316d3dcc396ed31b53767aa4519b6d076d20d | [] | no_license | kalsotra2001/practice | db435514b7b57ce549b96a8baf64fad8f579da18 | bbc8a458718ad875ce5b7caa0e56afe94ae6fa68 | refs/heads/master | 2021-12-15T20:48:21.186658 | 2017-09-07T23:01:56 | 2017-09-07T23:01:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | powers = set()
for i in range(2, 101):
for j in range(2, 101):
powers.add(i ** j)
print len(powers) | [
"[email protected]"
] | |
806594d6287d004b7f59fd97bde8ccda5942dc4a | 17d531819123ea09fef201353efcbee4e8ff8097 | /reduce/owner/permissions.py | 7566e4734a5b33e2760e0341428f1d01cee25dce | [] | no_license | showmethepeach/Re.duce | 07a00463c02c572d6e96e177ea0ef5e6e615c2ad | d1ca88ef2256683e0ef51f12c0b6ec747fdda24c | refs/heads/master | 2021-08-24T01:10:51.920406 | 2017-10-26T15:53:22 | 2017-10-26T15:53:22 | 104,641,211 | 0 | 0 | null | 2017-11-16T06:15:53 | 2017-09-24T12:11:28 | Python | UTF-8 | Python | false | false | 290 | py | from rest_framework import permissions
class IsOwner(permissions.BasePermission):
"""
OWNER에게만 쓰기, 읽기 허용
"""
def has_permission(self, request, view):
if request.user.is_authenticated and request.user.owner is not None:
return True
| [
"[email protected]"
] | |
b99ab818fca8289648830abc2a851b6e7323a5e5 | 2e60017779c5c286629ab5a3a7aeb27a6b19a60b | /python/2017day19part2.py | 7f09c5b24ce6b8bf021a566185e157549778341b | [] | no_license | jamesjiang52/10000-Lines-of-Code | f8c7cb4b8d5e441693f3e0f6919731ce4680f60d | 3b6c20b288bad1de5390ad672c73272d98e93ae0 | refs/heads/master | 2020-03-15T03:50:38.104917 | 2018-05-07T04:41:52 | 2018-05-07T04:41:52 | 131,952,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 19 13:21:23 2017
@author: James Jiang
"""
all_lines = [line.rstrip('\n') for line in open('Data.txt')]
all_lines_chars = []
for i in range(len(all_lines)):
chars = [j for j in all_lines[i]]
all_lines_chars.append(chars)
index_list = 0
index_all = 0
for i in range(len(all_lines_chars[0])):
if all_lines_chars[0][i] == '|':
index_list = i
mode = 'down'
total = 0
while True:
if all_lines_chars[index_all][index_list] == ' ':
break
if all_lines_chars[index_all][index_list] == '+':
k = 0
if (mode == 'down') or (mode == 'up'):
if index_list != 0:
if all_lines_chars[index_all][index_list - 1] != ' ':
mode = 'left'
k += 1
if index_list != len(all_lines_chars[index_all]) - 1:
if all_lines_chars[index_all][index_list + 1] != ' ':
mode = 'right'
k += 1
elif (mode == 'left') or (mode == 'right'):
if index_all != 0:
if all_lines_chars[index_all - 1][index_list] != ' ':
mode = 'up'
k += 1
if index_all != len(all_lines_chars) - 1:
if all_lines_chars[index_all + 1][index_list] != ' ':
mode = 'down'
k += 1
if k == 0:
break
if mode == 'down':
index_all += 1
elif mode == 'up':
index_all -= 1
elif mode == 'left':
index_list -= 1
elif mode == 'right':
index_list += 1
total += 1
print(total)
| [
"[email protected]"
] | |
a1912ffe7b983cce6c3ec5119d89a01a0a747635 | fd02e8924ba325f2a62bbf97e460740a65559c74 | /PythonStart/0722Python/循环.py | 6e97b5c0cfd955e8823bf5ef1a968b1dc63d9ef4 | [] | no_license | ShiJingChao/Python- | 51ee62f7f39e0d570bdd853794c028020ca2dbc2 | 26bc75c1981a1ffe1b554068c3d78455392cc7b2 | refs/heads/master | 2020-07-08T00:05:16.532383 | 2019-10-14T15:19:49 | 2019-10-14T15:19:49 | 203,512,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | # for i in range(1, 1001):
# print("第%d" % i, "次hello word", end=',')
# i=1
# while i < 100:
# print(i, end=" ")
# i += 1
a = 1100
b = 2255
print(a & b)
c = 0b100011001111
print(a&c) | [
"[email protected]"
] | |
a2cfd5e483d4082c18fb3e4fd15d7a66f8e7946c | 292c8f912492e97ecb852437bba4e7294833f514 | /Figures/fig_scatterplot_task_score_egoallo.py | e9a37c438e6457eedfb5b61243ab8b3920f48715 | [] | no_license | CornuA1/lntmodel_final | e47971ce3d36bd6ef8a0b1f125c706663752c307 | 1dd732ae86c1f8680fbf0f6d8e1c0ec3a7fd22cb | refs/heads/main | 2023-07-11T13:23:09.607029 | 2021-08-13T16:15:00 | 2021-08-13T16:15:00 | 395,445,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,410 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 13:36:14 2021
@author: lukasfischer
"""
import csv, os, yaml, warnings
import numpy as np
import scipy as sp
from scipy.io import loadmat
from scipy import stats
import matplotlib.pyplot as plt
plt.rcParams['svg.fonttype'] = 'none'
plt.rcParams['xtick.bottom'] = True
plt.rcParams['ytick.left'] = True
import seaborn as sns
sns.set_style("white")
warnings.filterwarnings('ignore')
# load yaml file with local filepaths
with open('..' + os.sep + 'loc_settings.yaml', 'r') as f:
loc_info = yaml.load(f)
def make_folder(out_folder):
if not os.path.exists(out_folder):
os.makedirs(out_folder)
fname = "total_analysis"
TRIAL_THRESHOLD = 0
file_path = loc_info["raw_dir"] + "figure_sample_data" + os.sep + fname + ".mat"
data = sp.io.loadmat(file_path)
naive = [('LF191022_1','20191115'),('LF191022_3','20191113'),('LF191023_blue','20191119'),('LF191022_2','20191116'),('LF191023_blank','20191114'),('LF191024_1','20191114')]
# expert = [('LF191022_1','20191209'),('LF191022_3','20191207'),('LF191023_blue','20191208'),('LF191022_2','20191210'),('LF191023_blank','20191210'),('LF191024_1','20191210')]
# expert = [('LF191022_1','20191204'),('LF191022_2','20191210'),('LF191022_3','20191207'),('LF191023_blank','20191206'),('LF191023_blue','20191204'),('LF191024_1','20191204')]
expert = [('LF191022_1','20191209'),('LF191022_2','20191210'),('LF191022_3','20191210'),('LF191023_blank','20191210'),('LF191023_blue','20191210'),('LF191024_1','20191210')]
all_sessions = [('LF191022_1','20191114'),
('LF191022_1','20191115'),
('LF191022_1','20191121'),
('LF191022_1','20191125'),
('LF191022_1','20191204'),
('LF191022_1','20191207'),
('LF191022_1','20191209'),
# ('LF191022_1','20191211'),
# ('LF191022_1','20191213'),
# ('LF191022_1','20191215'),
# ('LF191022_1','20191217'),
('LF191022_2','20191114'),
('LF191022_2','20191116'),
('LF191022_2','20191121'),
('LF191022_2','20191204'),
('LF191022_2','20191206'),
('LF191022_2','20191208'),
('LF191022_2','20191210'),
# ('LF191022_2','20191212'),
# ('LF191022_2','20191216'),
('LF191022_3','20191113'),
('LF191022_3','20191114'),
('LF191022_3','20191119'),
('LF191022_3','20191121'),
('LF191022_3','20191204'),
('LF191022_3','20191207'),
('LF191022_3','20191210'),
# ('LF191022_3','20191211'),
# ('LF191022_3','20191215'),
# ('LF191022_3','20191217'),
('LF191023_blank','20191114'),
('LF191023_blank','20191116'),
('LF191023_blank','20191121'),
('LF191023_blank','20191206'),
('LF191023_blank','20191208'),
('LF191023_blank','20191210'),
# ('LF191023_blank','20191212'),
# ('LF191023_blank','20191213'),
# ('LF191023_blank','20191216'),
# ('LF191023_blank','20191217'),
('LF191023_blue','20191113'),
('LF191023_blue','20191114'),
('LF191023_blue','20191119'),
('LF191023_blue','20191121'),
('LF191023_blue','20191125'),
('LF191023_blue','20191204'),
('LF191023_blue','20191206'),
('LF191023_blue','20191208'),
('LF191023_blue','20191210'),
# ('LF191023_blue','20191212'),
# ('LF191023_blue','20191215'),
# ('LF191023_blue','20191217'),
('LF191024_1','20191114'),
('LF191024_1','20191115'),
('LF191024_1','20191121'),
('LF191024_1','20191204'),
('LF191024_1','20191207'),
('LF191024_1','20191210')
]
print("------ NAIVE --------")
tscore_naive = []
egoallo_naive = []
ntrials_naive = []
for animal,session in naive:
print(animal,session,data[animal + '_' + session])
if data[animal + '_' + session][0][1] > TRIAL_THRESHOLD:
tscore_naive.append(data[animal + '_' + session][0][0])
egoallo_naive.append(data[animal + '_' + session][0][2])
ntrials_naive.append(data[animal + '_' + session][0][1])
print("------ EXPERT --------")
tscore_expert = []
egoallo_expert = []
ntrials_expert = []
for animal,session in expert:
print(animal,session, data[animal + '_' + session])
if data[animal + '_' + session][0][1] > TRIAL_THRESHOLD:
tscore_expert.append(data[animal + '_' + session][0][0])
egoallo_expert.append(data[animal + '_' + session][0][2])
ntrials_expert.append(data[animal + '_' + session][0][1])
print("------ ALL --------")
tscore_all = []
egoallo_all = []
n_trials = []
for animal,session in all_sessions:
print(animal,session, data[animal + '_' + session])
if data[animal + '_' + session][0][1] > TRIAL_THRESHOLD:
tscore_all.append(data[animal + '_' + session][0][0])
egoallo_all.append(data[animal + '_' + session][0][2])
n_trials.append(data[animal + '_' + session][0][1])
fig = plt.figure(figsize=(2.5,20))
(ax,ax2,ax3,ax4) = fig.subplots(4,1)
n_animals_naive = len(tscore_naive)
n_animals_expert = len(tscore_expert)
# ax.scatter(np.zeros((n_animals_naive,1)), egoallo_naive, c='0.5')
# ax.scatter(np.ones((n_animals_expert,1)), egoallo_expert, c='0.5')
_,p_ttest = sp.stats.ttest_rel(egoallo_naive,egoallo_expert)
ax.set_xlim([-0.2,2])
ax.set_ylim([0.4,1])
if n_animals_naive == n_animals_expert:
for i in range(n_animals_naive):
ax.plot([0,1], [egoallo_naive[i], egoallo_expert[i]], c='0.7', marker='o', lw=2)
# ax.plot([0,1], [np.mean(egoallo_naive), np.mean(egoallo_expert)], c='k')
ax.plot([0,1], [np.mean(egoallo_naive), np.mean(egoallo_expert)], marker='s', markersize=10, c='k', lw=3)
ax.set_xlim([-0.2,1.2])
ax.set_ylim([0,1])
ax2.scatter(tscore_naive, egoallo_naive, color='0.7')
ax2.scatter(tscore_expert, egoallo_expert, color='0.7')
ax2.set_ylim([0.4,1])
ax2.set_xticks([-30,0,30,60,90])
ax2.set_xticklabels(['-30','0','30','60','90'])
corr_ne,p_ne = sp.stats.spearmanr(np.hstack((tscore_naive,tscore_expert)), np.hstack((egoallo_naive,egoallo_expert)))
ax2.set_ylim([0,1])
ax2.set_xlim([-45,75])
# fit a linear regression
res = stats.linregress(x=np.concatenate((tscore_naive, tscore_expert)), y=np.concatenate((egoallo_naive, egoallo_expert)))
ax2.plot(np.concatenate((tscore_naive, tscore_expert)), res.intercept + res.slope*np.concatenate((tscore_naive, tscore_expert)), 'r', label='fitted line', lw=2)
ax3.scatter(tscore_all, egoallo_all, c='0.7')
ax3.set_ylim([0.3,1])
corr,p = sp.stats.spearmanr(tscore_all, egoallo_all)
ax3.set_ylim([0.3,1])
ax3.set_xticks([-30,0,30,60,90])
ax3.set_xticklabels(['-30','0','30','60','90'])
ax3.set_ylim([0,1])
ax3.set_xlim([-45,105])
res = stats.linregress(x=tscore_all, y=egoallo_all)
ax3.plot(tscore_all, res.intercept + np.multiply(res.slope,tscore_all), 'r', label='fitted line', lw=2)
ax4.scatter(n_trials, egoallo_all)
corr_nt,p_nt = sp.stats.spearmanr(n_trials, egoallo_all)
sns.despine(top=True, right=True, left=False, bottom=False)
ax.tick_params(left='on',bottom='on',direction='out')
ax2.tick_params(left='on',bottom='on',direction='out')
ax3.tick_params(left='on',bottom='on',direction='out')
print('------ Tscores ------')
print(np.mean(tscore_naive), sp.stats.sem(tscore_naive))
print(np.mean(tscore_expert), sp.stats.sem(tscore_expert))
print("---------------------")
print("------ STATS --------")
print("naive vs. expert: " + str(sp.stats.ttest_ind(egoallo_naive, egoallo_expert)))
print("naive vs. expert Spearman: " + str(sp.stats.spearmanr(egoallo_naive, egoallo_expert)))
print("All Spearman: " + str(sp.stats.spearmanr(tscore_all, egoallo_all)))
print("---------------------")
fig.savefig("C:\\Users\\lfisc\\Work\\Projects\\Lntmodel\\manuscript\\Figure 1\\egoallo_scatterplot.svg", format='svg')
print("saved" + "C:\\Users\\lfisc\\Work\\Projects\\Lntmodel\\manuscript\\Figure 1\\egoallo_scatterplot.svg" + "_fig.svg")
all_r_naive = np.empty((0,))
mean_r_naive = np.empty((0,))
for na in naive:
r2_data = loadmat("C:\\Users\\lfisc\\Work\\Projects\\Lntmodel\\data_2p\\dataset" + os.sep + na[0] + '_' + na[1] + '_r2_data.mat')
all_r_naive = np.hstack((all_r_naive,r2_data['data'][0]))
mean_r_naive = np.hstack((mean_r_naive,np.mean(r2_data['data'])))
all_r_expert = np.empty((0,))
mean_r_expert = np.empty((0,))
for na in expert:
r2_data = loadmat("C:\\Users\\lfisc\\Work\\Projects\\Lntmodel\\data_2p\\dataset" + os.sep + na[0] + '_' + na[1] + '_r2_data.mat')
all_r_expert = np.hstack((all_r_expert,r2_data['data'][0]))
mean_r_expert = np.hstack((mean_r_expert,np.mean(r2_data['data'])))
all_r_allsess = np.empty((0,))
mean_r_allsess = np.empty((0,))
for na in all_sessions:
r2_data = loadmat("C:\\Users\\lfisc\\Work\\Projects\\Lntmodel\\data_2p\\dataset" + os.sep + na[0] + '_' + na[1] + '_r2_data.mat')
all_r_allsess = np.hstack((all_r_allsess,r2_data['data'][0]))
mean_r_allsess = np.hstack((mean_r_allsess,np.mean(r2_data['data'])))
fig = plt.figure(figsize=(15,10))
ax1 = fig.add_subplot(2,3,1)
ax2 = fig.add_subplot(2,3,4)
ax3 = fig.add_subplot(2,3,2)
ax4 = fig.add_subplot(2,3,5)
ax5 = fig.add_subplot(2,3,3)
ax6 = fig.add_subplot(2,3,6)
ax1.hist(all_r_naive)
ax2.hist(mean_r_naive)
ax1.set_xlabel('naive r2 distribution')
ax3.hist(all_r_expert)
ax4.hist(mean_r_expert)
ax3.set_xlabel('expert r2 distribution')
ax5.hist(all_r_allsess, bins=100)
ax6.hist(mean_r_allsess, color='r')
ax5.set_xlabel('allsess r2 distribution')
ax6.set_ylim([0,14])
sns.despine(ax=ax1, right=True, top=True)
ax1.tick_params(left='on',bottom='on',direction='out')
sns.despine(ax=ax2, right=True, top=True)
ax2.tick_params(left='on',bottom='on',direction='out')
sns.despine(ax=ax3, right=True, top=True)
ax3.tick_params(left='on',bottom='on',direction='out')
sns.despine(ax=ax4, right=True, top=True)
ax4.tick_params(left='on',bottom='on',direction='out')
sns.despine(ax=ax5, right=True, top=True)
ax5.tick_params(left='on',bottom='on',direction='out')
sns.despine(ax=ax6, right=True, top=True)
ax6.tick_params(left='on',bottom='on',direction='out')
plt.tight_layout()
ax6.set_xlabel('Deviance explained')
ax6.set_ylabel('Sesssion count')
make_folder("C:\\Users\\lfisc\\Work\\Projects\\Lntmodel\\manuscript\\Figure 1\\")
fname = "C:\\Users\\lfisc\\Work\\Projects\\Lntmodel\\manuscript\\Figure 1\\r2_distributions.svg"
fig.savefig(fname, format='svg')
print("saved " + fname)
| [
"[email protected]"
] | |
c7e32b7956006589585393f647556ed9c81dfb10 | 7f25740b1ef47edc24db1a3618b399959b073fe1 | /1105_08_closer.py | 60b915e8ee1fc7296456e8dbffab48f45dbbce39 | [] | no_license | pjh9362/PyProject | b2d0aa5f8cfbf2abbd16232f2b55859be50446dc | 076d31e0055999c1f60767a9d60e122fb1fc913e | refs/heads/main | 2023-01-09T12:12:06.913295 | 2020-11-07T15:32:03 | 2020-11-07T15:32:03 | 306,814,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | '''
x = 10 #전역 변수
def foo():
print(x) #전역 변수 출력
foo()
print(x) #전역 변수 출력
'''
def foo():
x = 10 # foo의 지역 변수
print(x) # foo의 지역 변수 출력
foo()
print(x) # 에러. foo의 지역 변수는 출력할 수 없음
| [
"[email protected]"
] | |
56fe690f573e1bcd1c237c4fc714e06af528d8d6 | ceb3d82494813cd21e38231964e098bb3efe093b | /Transform/matrix_transform.py | b31c53519cc07e99f7fae2eaae98fa5108272797 | [
"Apache-2.0"
] | permissive | Joevaen/Scikit-image_On_CT | 0c0a306a9ca18668bd9bb4105e577766b1d5578b | e3bf0eeadc50691041b4b7c44a19d07546a85001 | refs/heads/main | 2023-03-16T01:28:04.871513 | 2021-03-16T07:53:57 | 2021-03-16T07:53:57 | 344,071,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | # 应用2D矩阵变换 | [
"[email protected]"
] | |
96d43d8fa24fe2bf0141da26ab1de903a5a6164a | 6d3c865ce6d9c416d8d11e91d6571a5154b036cf | /js_vacancies/apps.py | c28e61b96619b705fa4509492f9bf1a51fea5e6d | [] | no_license | compoundpartners/js-vacancies | 2cc94c842df980be177c6fa64b3879b5dcc50bbc | 175d9f3673c7b002db5c0ea550bb0f29638b7cbb | refs/heads/master | 2021-07-17T05:41:29.800636 | 2020-07-07T14:25:28 | 2020-07-07T14:25:28 | 178,962,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | # -*- coding: utf-8 -*-
from django.apps import AppConfig
class Vacancies(AppConfig):
name = 'js_vacancies'
verbose_name = 'Vacancies'
| [
"[email protected]"
] | |
4f596c420101e3d0cb7db56aec280d763311ef13 | 6f04a6ef99c581ed2f0519c897f254a7b63fb61d | /rastervision/data/vector_source/default.py | 3946d67b4fa693f28e9a6590c44f1eadb29e48b8 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | dgketchum/raster-vision | 18030c9a8bfe99386aa95adbf8e3ec51d204947f | fe74bef30daa5821023946576b00c584ddc56de8 | refs/heads/master | 2020-08-30T13:56:08.598240 | 2019-11-03T17:38:33 | 2019-11-03T17:38:33 | 218,400,435 | 3 | 1 | NOASSERTION | 2019-10-29T23:09:57 | 2019-10-29T23:09:57 | null | UTF-8 | Python | false | false | 1,230 | py | from abc import (ABC, abstractmethod)
import os
import rastervision as rv
class VectorSourceDefaultProvider(ABC):
@staticmethod
@abstractmethod
def handles(s):
"""Returns True of this provider is a default for this string"""
pass
@abstractmethod
def construct(s):
"""Constructs a default VectorSource based on the
string.
"""
pass
class GeoJSONVectorSourceDefaultProvider(VectorSourceDefaultProvider):
@staticmethod
def handles(uri):
ext = os.path.splitext(uri)[1]
return ext.lower() in ['.json', '.geojson']
@staticmethod
def construct(uri):
return rv.VectorSourceConfig.builder(rv.GEOJSON_SOURCE) \
.with_uri(uri) \
.build()
class VectorTileVectorSourceDefaultProvider(VectorSourceDefaultProvider):
@staticmethod
def handles(uri):
ext = os.path.splitext(uri)[1]
return ext.lower() in ['.pbf', '.mvt']
@staticmethod
def construct(uri):
return rv.VectorSourceConfig.builder(rv.VECTOR_TILE_SOURCE) \
.with_uri(uri) \
.build()
| [
"[email protected]"
] | |
e33911f4ff39e954282be6c971e468995f91606c | 0d32e3819606c3fb6820d0cd5f5097db3b0d3dd4 | /HW3/sarsa_mountain_car.py | 0d4789ce9c45fd1092146fe290050525440869d0 | [] | no_license | IanCBrown/COMP5600 | e8e06b2a8e3bde0acc6897adb2396a57a2811f0a | ef454c009d6fd5eec50ceec5a8283a7c6d81d097 | refs/heads/master | 2020-08-02T13:20:41.024681 | 2019-12-09T03:53:37 | 2019-12-09T03:53:37 | 211,366,293 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,189 | py | import math
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
import gym
from gym import spaces
from gym.utils import seeding
# Resources:
# https://en.wikipedia.org/wiki/Mountain_car_problem
# https://towardsdatascience.com/getting-started-with-reinforcement-learning-and-open-ai-gym-c289aca874f
# https://towardsdatascience.com/reinforcement-learning-temporal-difference-sarsa-q-learning-expected-sarsa-on-python-9fecfda7467e
def epsilon_greedy(Q, state, action_space, epsilon):
# if in epsilon range use it
if np.random.rand() < 1 - epsilon:
action = np.argmax(Q[state[0], state[1]])
# else take random action
else:
action = np.random.randint(0, action_space)
return action
def sarsa(learning_rate, discount, epsilon, min_epsilon, episodes):
# initialize environment
env = gym.make("MountainCar-v0")
env.reset()
states = (env.observation_space.high - env.observation_space.low)*np.array([10,100])
states = np.round(states, 0).astype(int) + 1
# Q(s,a)
Q_table = np.random.uniform(low = -1, high = 1, size = (states[0], states[1], env.action_space.n))
reward_list = []
var_list = []
avg_reward_list = []
# reduce epsilon linearly as time increases
decay = (epsilon - min_epsilon)/episodes
# Q learning main loop
for i in range(episodes):
finished = False
total_reward = 0
reward = 0
state = env.reset()
state_adj = (state - env.observation_space.low)*np.array([10,100])
state_adj = np.round(state_adj, 0).astype(int)
while not finished:
# render last N episodes
# comment out to see plots
# if i >= episodes - 1:
# env.render()
# pick aciton greedily without randomness
action = epsilon_greedy(Q_table, state_adj, env.action_space.n, epsilon)
next_state, reward, finished, info = env.step(action)
# Discretize
next_state_adj = (next_state - env.observation_space.low)*np.array([10,100])
next_state_adj = np.round(next_state_adj, 0).astype(int)
if finished and next_state[0] >= 0.5: # and ... condition
Q_table[state_adj[0], state_adj[1], action] = reward
else:
update = learning_rate * (reward + discount * np.max(Q_table[next_state_adj[0],next_state_adj[1]])
- Q_table[state_adj[0], state_adj[1], action])
# update Q table
Q_table[state_adj[0], state_adj[1], action] += update
total_reward += reward
state_adj = next_state_adj
# decay epsilon if still greater than min_epsilon
if epsilon > min_epsilon:
epsilon -= decay
reward_list.append(total_reward)
# choose how often to record data
# recording every data point will make the plots crowded
# 10 and 100 work well.
recording_interval = 100
if i % recording_interval == 0:
avg_reward = np.mean(reward_list)
var = np.var(reward_list)
var_list.append(var)
avg_reward_list.append(avg_reward)
reward_list = []
env.close()
return (avg_reward_list, var_list)
# Adjust these parameters as needed
number_of_episodes = 2500
learning_rate = 0.1
gamma = 0.9
epsilon = 0.8
min_epsilon = 0
def single_run():
"""
Run the algorithm once
"""
rewards_and_var = sarsa(learning_rate, gamma, epsilon, min_epsilon, number_of_episodes)
avg_reward = rewards_and_var[0]
var = rewards_and_var[1]
episodes1 = 100*(np.arange(len(avg_reward)) + 1)
episodes2 = 100*(np.arange(len(var)) + 1)
plt.figure("Average Reward vs. Episodes")
plt.title("Average Reward vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward")
plt.plot(episodes1, avg_reward, color='blue')
plt.figure("Variance vs. Episodes")
plt.title("Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Variance")
plt.plot(episodes2, var, color='orange')
plt.figure("Average Reward w/ Variance vs. Episodes")
plt.title("Average Reward w/ Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward w/ Variance")
plt.errorbar(episodes1, avg_reward, var, linestyle='None', marker='^', ecolor="orange")
plt.show()
def multi_run(N):
"""
Run the algorithm N times
@param N - number of times to test (e.g. 20)
"""
rewards = []
vars = []
for _ in range(N):
rewards_and_var = sarsa(learning_rate, gamma, epsilon, min_epsilon, number_of_episodes)
avg_reward = rewards_and_var[0]
var = rewards_and_var[1]
rewards.append(avg_reward)
vars.append(var)
rewards = list(zip(*rewards))
vars = list(zip(*vars))
reward_to_plot = []
for sublist in rewards:
reward_to_plot.append(np.mean(sublist))
var_to_plot = []
for sublist in vars:
var_to_plot.append(np.mean(sublist))
episodes1 = 100*(np.arange(len(avg_reward)) + 1)
episodes2 = 100*(np.arange(len(var)) + 1)
plt.figure("Average Reward vs. Episodes")
plt.title("Average Reward vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward")
plt.plot(episodes1, reward_to_plot, color='blue')
plt.savefig("sarsa_results/Average_Reward_vs_Episodes.png")
plt.figure("Variance vs. Episodes")
plt.title("Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Variance")
plt.plot(episodes2, var_to_plot, color='orange')
plt.savefig("sarsa_results/Variance_vs_Episodes.png")
plt.figure("Average Reward w/ Variance vs. Episodes")
plt.title("Average Reward w/ Variance vs. Episodes")
plt.xlabel("Episodes")
plt.ylabel("Average Reward w/ Variance")
plt.errorbar(episodes1, reward_to_plot, var_to_plot, linestyle='None', marker='^', ecolor="orange")
plt.savefig("sarsa_results/Average_Reward_and_Variance_vs_Episodes.png")
# choose multi or single run
# single_run()
multi_run(20)
| [
"[email protected]"
] | |
e45a01330d9e90fa76dea147d9fc060e42d10c77 | 9044b440bed2b8407ed9e04f7fb9d3d2a7593136 | /vision/classification/slim/image_models/finetune/train.py | b15420b6bf71de14a447e1b40980949e6c95830b | [] | no_license | xuzhezhaozhao/ai | d4264f5d15cc5fa514e81adb06eb83731a0ca818 | 925cbd31ad79f8827e2c3c706f4b51910f9f85d1 | refs/heads/master | 2022-01-22T07:04:29.082590 | 2022-01-17T06:49:39 | 2022-01-17T06:49:39 | 136,691,051 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 5,638 | py | #! /usr/bin/env python
# -*- coding=utf8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import input_data
import hook
import build_model_fn
def build_estimator(opts):
"""Build estimator."""
num_samples_per_epoch = len(input_data.read_txt_file(
opts.train_data_path, False))
save_checkpoints_secs = None
if opts.save_checkpoints_secs > 0:
save_checkpoints_secs = opts.save_checkpoints_secs
save_checkpoints_steps = None
if opts.save_checkpoints_steps > 0 and opts.save_checkpoints_epoches > 0:
raise ValueError("save_checkpoints_steps and save_checkpoints_epoches "
"should not be both set.")
if opts.save_checkpoints_steps > 0:
save_checkpoints_steps = opts.save_checkpoints_steps
if opts.save_checkpoints_epoches > 0:
save_checkpoints_steps = int(opts.save_checkpoints_epoches *
num_samples_per_epoch / opts.batch_size)
config_keys = {}
config_keys['model_dir'] = opts.model_dir
config_keys['tf_random_seed'] = None
config_keys['save_summary_steps'] = opts.save_summary_steps
config_keys['save_checkpoints_secs'] = save_checkpoints_secs
config_keys['save_checkpoints_steps'] = save_checkpoints_steps
config_keys['session_config'] = None
config_keys['keep_checkpoint_max'] = opts.keep_checkpoint_max
config_keys['keep_checkpoint_every_n_hours'] = 10000
config_keys['log_step_count_steps'] = opts.log_step_count_steps
estimator_keys = {}
estimator_keys['model_fn'] = build_model_fn.model_fn
estimator_keys['params'] = {
'opts': opts,
'num_samples_per_epoch': num_samples_per_epoch
}
config = tf.estimator.RunConfig(**config_keys)
estimator_keys['config'] = config
estimator = tf.estimator.Estimator(**estimator_keys)
return estimator
def create_hooks(opts):
"""Create profile hooks."""
save_steps = opts.profile_steps
meta_hook = hook.MetadataHook(save_steps=save_steps,
output_dir=opts.model_dir)
profile_hook = tf.train.ProfilerHook(save_steps=save_steps,
output_dir=opts.model_dir,
show_dataflow=True,
show_memory=True)
hooks = [meta_hook, profile_hook] if opts.use_profile_hook else []
return hooks
def train_and_eval_in_local_mode(opts, estimator, hooks):
"""Train and eval model in lcoal mode."""
build_train_input_fn = input_data.build_train_input_fn(
opts, opts.train_data_path)
build_eval_input_fn = input_data.build_eval_input_fn(
opts, opts.eval_data_path)
num_samples_per_epoch = len(
input_data.read_txt_file(opts.train_data_path, False))
num_steps_per_epoch = num_samples_per_epoch / opts.batch_size
if opts.max_train_steps > 0:
max_steps = opts.max_train_steps
else:
max_steps = opts.epoch*num_steps_per_epoch
tf.logging.info('max_steps = {}'.format(max_steps))
max_steps_without_decrease = int(
opts.max_epoches_without_decrease*num_steps_per_epoch)
early_stopping_min_steps = int(
opts.early_stopping_min_epoches*num_steps_per_epoch)
run_every_steps = int(
opts.early_stopping_run_every_epoches*num_steps_per_epoch)
early_stopping_hook = tf.contrib.estimator.stop_if_no_decrease_hook(
estimator, "loss",
max_steps_without_decrease=max_steps_without_decrease,
run_every_secs=None,
min_steps=early_stopping_min_steps,
run_every_steps=run_every_steps)
hooks.append(early_stopping_hook)
train_spec = tf.estimator.TrainSpec(
input_fn=build_train_input_fn,
max_steps=max_steps,
hooks=hooks)
eval_spec = tf.estimator.EvalSpec(
input_fn=build_eval_input_fn,
steps=None,
name='eval',
start_delay_secs=3,
throttle_secs=opts.throttle_secs)
result = tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
return result
def export_model_in_local_mode(opts, estimator):
"""Export model in local mode."""
# export model
tf.logging.info("Beginning export model ...")
estimator.export_savedmodel(
opts.export_model_dir,
serving_input_receiver_fn=input_data.build_serving_input_fn(opts))
tf.logging.info("Export model OK")
def train(opts, export=False):
"""Train model."""
estimator = build_estimator(opts)
hooks = create_hooks(opts)
result = train_and_eval_in_local_mode(opts, estimator, hooks)
if export:
export_model_in_local_mode(opts, estimator)
return result
def predict(opts):
tf.logging.info("Begin predict ...")
estimator = build_estimator(opts)
build_predict_input_fn = input_data.build_predict_input_fn(
opts, opts.predict_data_path)
checkpoint_path = opts.predict_checkpoint_path
if tf.gfile.IsDirectory(opts.predict_checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
results = estimator.predict(
input_fn=build_predict_input_fn,
checkpoint_path=checkpoint_path,
yield_single_examples=True)
with open(opts.predict_output, 'w') as fout, \
open(opts.predict_data_path, 'r') as fin:
for result in results:
src = fin.readline().strip()
fout.write(src + ' ')
fout.write(str(result['score'][1]) + '\n')
tf.logging.info("Predict done")
| [
"[email protected]"
] | |
9359e762b8b25c861b32337ae9f6b139862987da | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/PyBox/pybox2d-android/examples/.svn/text-base/pyglet_framework.py.svn-base | 94e88482c339765bee83db8a62e627517c221639 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 21,827 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version Copyright (c) 2010 kne / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
"""
Global Keys:
Space - shoot projectile
Z/X - zoom
Escape - quit
Other keys can be set by the individual test.
Mouse:
Left click - select/drag body (creates mouse joint)
Right click - pan
Shift+Left - drag to create a directional projectile
Scroll - zoom
You can easily add your own tests based on test_empty.
"""
import pyglet
import framework
from framework import *
from pyglet import gl
import string
import math
class grBlended (pyglet.graphics.Group):
"""
This pyglet rendering group enables blending.
"""
def set_state(self):
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
def unset_state(self):
gl.glDisable(gl.GL_BLEND)
class grPointSize (pyglet.graphics.Group):
"""
This pyglet rendering group sets a specific point size.
"""
def __init__(self, size=4.0):
super(grPointSize, self).__init__()
self.size = size
def set_state(self):
gl.glPointSize(self.size)
def unset_state(self):
gl.glPointSize(1.0)
class grText(pyglet.graphics.Group):
"""
This pyglet rendering group sets the proper projection for
displaying text when used.
"""
window = None
def __init__(self, window=None):
super(grText, self).__init__()
self.window = window
def set_state(self):
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glPushMatrix()
gl.glLoadIdentity()
gl.gluOrtho2D(0, self.window.width, 0, self.window.height)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glPushMatrix()
gl.glLoadIdentity()
def unset_state(self):
gl.glPopMatrix()
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glPopMatrix()
gl.glMatrixMode(gl.GL_MODELVIEW)
class PygletDraw(b2Draw):
"""
This debug draw class accepts callbacks from Box2D (which specifies what to draw)
and handles all of the rendering.
If you are writing your own game, you likely will not want to use debug drawing.
Debug drawing, as its name implies, is for debugging.
"""
blended = grBlended()
circle_segments = 16
surface = None
circle_cache_tf = {} # triangle fan (inside)
circle_cache_ll = {} # line loop (border)
def __init__(self, test):
super(PygletDraw, self).__init__()
self.test=test
def StartDraw(self): pass
def EndDraw(self): pass
def triangle_fan(self, vertices):
"""
in: vertices arranged for gl_triangle_fan ((x,y),(x,y)...)
out: vertices arranged for gl_triangles (x,y,x,y,x,y...)
"""
out = []
for i in range(1, len(vertices)-1):
# 0,1,2 0,2,3 0,3,4 ..
out.extend( vertices[0 ] )
out.extend( vertices[i ] )
out.extend( vertices[i+1] )
return len(out) / 2, out
def line_loop(self, vertices):
"""
in: vertices arranged for gl_line_loop ((x,y),(x,y)...)
out: vertices arranged for gl_lines (x,y,x,y,x,y...)
"""
out = []
for i in range(0, len(vertices)-1):
# 0,1 1,2 2,3 ... len-1,len len,0
out.extend( vertices[i ] )
out.extend( vertices[i+1] )
out.extend( vertices[len(vertices)-1] )
out.extend( vertices[0] )
return len(out)/2, out
def _getLLCircleVertices(self, radius, points):
"""
Get the line loop-style vertices for a given circle.
Drawn as lines.
"Line Loop" is used as that's how the C++ code draws the
vertices, with lines going around the circumference of the
circle (GL_LINE_LOOP).
This returns 'points' amount of lines approximating the
border of a circle.
(x1, y1, x2, y2, x3, y3, ...)
"""
ret = []
step = 2*math.pi/points
n = 0
for i in range(0, points):
ret.append( (math.cos(n) * radius, math.sin(n) * radius ) )
n += step
ret.append( (math.cos(n) * radius, math.sin(n) * radius ) )
return ret
def _getTFCircleVertices(self, radius, points):
"""
Get the triangle fan-style vertices for a given circle.
Drawn as triangles.
"Triangle Fan" is used as that's how the C++ code draws the
vertices, with triangles originating at the center of the
circle, extending around to approximate a filled circle
(GL_TRIANGLE_FAN).
This returns 'points' amount of lines approximating the
circle.
(a1, b1, c1, a2, b2, c2, ...)
"""
ret = []
step = 2*math.pi/points
n = 0
for i in range(0, points):
ret.append( (0.0, 0.0) )
ret.append( (math.cos(n) * radius, math.sin(n) * radius ) )
n += step
ret.append( (math.cos(n) * radius, math.sin(n) * radius ) )
return ret
def getCircleVertices(self, center, radius, points):
"""
Returns the triangles that approximate the circle and
the lines that border the circles edges, given
(center, radius, points).
Caches the calculated LL/TF vertices, but recalculates
based on the center passed in.
TODO: Currently, there's only one point amount,
so the circle cache ignores it when storing. Could cause
some confusion if you're using multiple point counts as
only the first stored point-count for that radius will
show up.
TODO: What does the previous TODO mean?
Returns: (tf_vertices, ll_vertices)
"""
if radius not in self.circle_cache_tf:
self.circle_cache_tf[radius]=self._getTFCircleVertices(radius,points)
self.circle_cache_ll[radius]=self._getLLCircleVertices(radius,points)
ret_tf, ret_ll = [], []
for x, y in self.circle_cache_tf[radius]:
ret_tf.extend( (x+center[0], y+center[1]) )
for x, y in self.circle_cache_ll[radius]:
ret_ll.extend( (x+center[0], y+center[1]) )
return ret_tf, ret_ll
def DrawCircle(self, center, radius, color):
"""
Draw an unfilled circle given center, radius and color.
"""
unused, ll_vertices = self.getCircleVertices( center, radius, self.circle_segments)
ll_count = len(ll_vertices)/2
self.batch.add(ll_count, gl.GL_LINES, None,
('v2f', ll_vertices),
('c4f', [color.r, color.g, color.b, 1.0] * (ll_count)))
def DrawSolidCircle(self, center, radius, axis, color):
"""
Draw an filled circle given center, radius, axis (of orientation) and color.
"""
tf_vertices, ll_vertices = self.getCircleVertices( center, radius, self.circle_segments)
tf_count, ll_count = len(tf_vertices) / 2, len(ll_vertices) / 2
self.batch.add(tf_count, gl.GL_TRIANGLES, self.blended,
('v2f', tf_vertices),
('c4f', [0.5 * color.r, 0.5 * color.g, 0.5 * color.b, 0.5] * (tf_count)))
self.batch.add(ll_count, gl.GL_LINES, None,
('v2f', ll_vertices),
('c4f', [color.r, color.g, color.b, 1.0] * (ll_count)))
p = b2Vec2(center) + radius * b2Vec2(axis)
self.batch.add(2, gl.GL_LINES, None,
('v2f', (center[0], center[1], p[0], p[1])),
('c3f', [1.0, 0.0, 0.0] * 2))
def DrawPolygon(self, vertices, color):
"""
Draw a wireframe polygon given the world vertices (tuples) with the specified color.
"""
if len(vertices)==2:
p1, p2=vertices
self.batch.add(2, gl.GL_LINES, None,
('v2f', (p1[0], p1[1], p2[0], p2[1])),
('c3f', [color.r, color.g, color.b]*2))
else:
ll_count, ll_vertices = self.line_loop(vertices)
self.batch.add(ll_count, gl.GL_LINES, None,
('v2f', ll_vertices),
('c4f', [color.r, color.g, color.b, 1.0] * (ll_count)))
def DrawSolidPolygon(self, vertices, color):
"""
Draw a filled polygon given the world vertices (tuples) with the specified color.
"""
if len(vertices)==2:
p1, p2=vertices
self.batch.add(2, gl.GL_LINES, None,
('v2f', (p1[0], p1[1], p2[0], p2[1])),
('c3f', [color.r, color.g, color.b]*2))
else:
tf_count, tf_vertices = self.triangle_fan(vertices)
if tf_count==0:
return
self.batch.add(tf_count, gl.GL_TRIANGLES, self.blended,
('v2f', tf_vertices),
('c4f', [0.5 * color.r, 0.5 * color.g, 0.5 * color.b, 0.5] * (tf_count)))
ll_count, ll_vertices = self.line_loop(vertices)
self.batch.add(ll_count, gl.GL_LINES, None,
('v2f', ll_vertices),
('c4f', [color.r, color.g, color.b, 1.0] * (ll_count)))
def DrawSegment(self, p1, p2, color):
"""
Draw the line segment from p1-p2 with the specified color.
"""
self.batch.add(2, gl.GL_LINES, None,
('v2f', (p1[0], p1[1], p2[0], p2[1])),
('c3f', [color.r, color.g, color.b]*2))
def DrawXForm(self, xf):
"""
Draw the transform xf on the screen
"""
p1 = xf.position
k_axisScale = 0.4
p2 = p1 + k_axisScale * xf.R.col1
p3 = p1 + k_axisScale * xf.R.col2
self.batch.add(3, gl.GL_LINES, None,
('v2f', (p1[0], p1[1], p2[0], p2[1], p1[0], p1[1], p3[0], p3[1])),
('c3f', [1.0, 0.0, 0.0] * 2 + [0.0, 1.0, 0.0] * 2))
def DrawPoint(self, p, size, color):
"""
Draw a single point at point p given a point size and color.
"""
self.batch.add(1, gl.GL_POINTS, grPointSize(size),
('v2f', (p[0], p[1])),
('c3f', [color.r, color.g, color.b]))
def DrawAABB(self, aabb, color):
"""
Draw a wireframe around the AABB with the given color.
"""
self.renderer.batch.add(8, gl.GL_LINES, None,
('v2f', (aabb.lowerBound.x, aabb.lowerBound.y, abb.upperBound.x, aabb.lowerBound.y,
abb.upperBound.x, aabb.lowerBound.y, aabb.upperBound.x, aabb.upperBound.y,
aabb.upperBound.x, aabb.upperBound.y, aabb.lowerBound.x, aabb.upperBound.y,
aabb.lowerBound.x, aabb.upperBound.y, aabb.lowerBound.x, aabb.lowerBound.y)),
('c3f', [color.r, color.g, color.b] * 8))
def to_screen(self, point):
"""
In here for compatibility with other frameworks.
"""
return tuple(point)
class PygletWindow(pyglet.window.Window):
def __init__(self, test):
super(PygletWindow, self).__init__()
self.test=test
def on_close(self):
"""
Callback: user tried to close the window
"""
pyglet.clock.unschedule(self.test.SimulationLoop)
super(PygletWindow, self).on_close()
def on_show(self):
"""
Callback: the window was shown.
"""
self.test.updateProjection()
def on_key_press(self, key, modifiers):
self.test._Keyboard_Event(key, down=True)
def on_key_release(self, key, modifiers):
self.test._Keyboard_Event(key, down=False)
def on_mouse_press(self, x, y, button, modifiers):
p = self.test.ConvertScreenToWorld(x, y)
self.test.mouseWorld = p
if button == pyglet.window.mouse.LEFT:
if modifiers & pyglet.window.key.MOD_SHIFT:
self.test.ShiftMouseDown( p )
else:
self.test.MouseDown( p )
elif button == pyglet.window.mouse.MIDDLE:
pass
def on_mouse_release(self, x, y, button, modifiers):
"""
Mouse up
"""
p = self.test.ConvertScreenToWorld(x, y)
self.test.mouseWorld = p
if button == pyglet.window.mouse.LEFT:
self.test.MouseUp(p)
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
"""
Mouse scrollwheel used
"""
if scroll_y < 0:
self.test.viewZoom *= 1.1
elif scroll_y > 0:
self.test.viewZoom /= 1.1
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
"""
Mouse moved while clicking
"""
p = self.test.ConvertScreenToWorld(x, y)
self.test.mouseWorld = p
self.test.MouseMove(p)
if buttons & pyglet.window.mouse.RIGHT:
self.test.viewCenter -= (float(dx)/5, float(dy)/5)
class PygletFramework(FrameworkBase):
def setup_keys(self):
key=pyglet.window.key
self.keys=key.KeyStateHandler()
# Only basic keys are mapped for now: K_[a-z0-9], K_F[1-12] and K_COMMA.
for letter in string.uppercase:
setattr(Keys, 'K_'+letter.lower(), getattr(key, letter))
for i in range(0,10):
setattr(Keys, 'K_%d'%i, getattr(key, '_%d' % i))
for i in range(1,13):
setattr(Keys, 'K_F%d'%i, getattr(key, 'F%d' % i))
Keys.K_LEFT=key.LEFT
Keys.K_RIGHT=key.RIGHT
Keys.K_UP=key.UP
Keys.K_DOWN=key.DOWN
Keys.K_HOME=key.HOME
Keys.K_PAGEUP=key.PAGEUP
Keys.K_PAGEDOWN=key.PAGEDOWN
Keys.K_COMMA=key.COMMA
def __reset(self):
# Screen/rendering-related
self._viewZoom = 10.0
self._viewCenter = None
self._viewOffset = None
self.screenSize = None
self.rMouseDown = False
self.textLine = 30
self.font = None
self.fps = 0
# Window-related
self.fontname = "Arial"
self.fontsize = 10
self.font = None
self.textGroup = None
# Screen-related
self._viewZoom = 1.0
self._viewCenter = None
self.screenSize = None
self.textLine = 30
self.font = None
self.fps = 0
self.setup_keys()
def __init__(self):
super(PygletFramework, self).__init__()
if fwSettings.onlyInit: # testing mode doesn't initialize Pyglet
return
print('Initializing Pyglet framework...')
self.__reset()
self.window=PygletWindow(self)
# Initialize the text display group
self.textGroup = grText(self.window)
# Load the font and record the screen dimensions
self.font = pyglet.font.load(self.fontname, self.fontsize)
self.screenSize = b2Vec2(self.window.width, self.window.height)
self.renderer = PygletDraw(self)
self.renderer.surface = self.window.screen
self.world.renderer=self.renderer
self._viewCenter = b2Vec2(0,10.0)
self.groundbody = self.world.CreateBody()
def setCenter(self, value):
"""
Updates the view offset based on the center of the screen.
Tells the debug draw to update its values also.
"""
self._viewCenter = b2Vec2( *value )
self.updateProjection()
def setZoom(self, zoom):
self._viewZoom = zoom
self.updateProjection()
viewZoom = property(lambda self: self._viewZoom, setZoom,
doc='Zoom factor for the display')
viewCenter = property(lambda self: self._viewCenter, setCenter,
doc='Screen center in camera coordinates')
def updateProjection(self):
"""
Recalculates the necessary projection.
"""
gl.glViewport(0, 0, self.window.width, self.window.height)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
ratio = float(self.window.width) / self.window.height
extents = b2Vec2(ratio * 25.0, 25.0)
extents *= self._viewZoom
lower = self._viewCenter - extents
upper = self._viewCenter + extents
# L/R/B/T
gl.gluOrtho2D(lower.x, upper.x, lower.y, upper.y)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
def run(self):
"""
Main loop.
"""
if self.settings.hz > 0.0:
pyglet.clock.schedule_interval(self.SimulationLoop, 1.0 / self.settings.hz)
#self.window.push_handlers(pyglet.window.event.WindowEventLogger())
self.window._enable_event_queue=False # TODO: figure out why this is required
pyglet.app.run()
self.world.contactListener = None
self.world.destructionListener=None
self.world.renderer=None
def SimulationLoop(self, dt):
"""
The main simulation loop. Don't override this, override Step instead.
And be sure to call super(classname, self).Step(settings) at the end
of your Step function.
"""
# Check the input and clear the screen
self.CheckKeys()
self.window.clear()
# Update the keyboard status
self.window.push_handlers(self.keys)
# Create a new batch for drawing
self.renderer.batch = pyglet.graphics.Batch()
# Reset the text position
self.textLine=15
# Draw the title of the test at the top
self.Print(self.name)
# Step the physics
self.Step(self.settings)
self.renderer.batch.draw()
self.window.invalid = True
self.fps = pyglet.clock.get_fps()
def _Keyboard_Event(self, key, down=True):
"""
Internal keyboard event, don't override this.
Checks for the initial keydown of the basic testbed keys. Passes the unused
ones onto the test via the Keyboard() function.
"""
if down:
if key==pyglet.window.key.ESCAPE:
exit(0)
elif key==pyglet.window.key.SPACE:
# Launch a bomb
self.LaunchRandomBomb()
elif key==Keys.K_z:
# Zoom in
self.viewZoom = min(1.1 * self.viewZoom, 20.0)
elif key==Keys.K_x:
# Zoom out
self.viewZoom = max(0.9 * self.viewZoom, 0.02)
else:
# Inform the test of the key press
self.Keyboard(key)
else:
self.KeyboardUp(key)
def CheckKeys(self):
"""
Check the keys that are evaluated on every main loop iteration.
I.e., they aren't just evaluated when first pressed down
"""
keys=self.keys
if keys[Keys.K_LEFT]:
self.viewCenter -= (0.5, 0)
elif keys[Keys.K_RIGHT]:
self.viewCenter += (0.5, 0)
if keys[Keys.K_UP]:
self.viewCenter += (0, 0.5)
elif keys[Keys.K_DOWN]:
self.viewCenter -= (0, 0.5)
if keys[Keys.K_HOME]:
self.viewZoom = 1.0
self.viewCenter = (0.0, 20.0)
#def Step(self, settings):
# super(PygletFramework, self).Step(settings)
def ConvertScreenToWorld(self, x, y):
"""
Takes screen (x, y) and returns
world coordinate b2Vec2(x,y).
"""
u = float(x) / self.window.width
v = float(y) / self.window.height
ratio = float(self.window.width) / self.window.height
extents = b2Vec2(ratio * 25.0, 25.0)
extents *= self._viewZoom
lower = self._viewCenter - extents
upper = self._viewCenter + extents
p = b2Vec2(
(1.0 - u) * lower.x + u * upper.x,
(1.0 - v) * lower.y + v * upper.y )
return p
def DrawStringAt(self, x, y, str, color=(229,153,153,255)):
"""
Draw some text, str, at screen coordinates (x, y).
"""
text = pyglet.text.Label(str, font_name=self.fontname, font_size=self.fontsize,
x=x, y=self.window.height-y, color=color, batch=self.renderer.batch, group=self.textGroup)
def Print(self, str, color=(229,153,153,255)):
"""
Draw some text, str, at screen coordinates (x, y).
"""
text = pyglet.text.Label(str, font_name=self.fontname, font_size=self.fontsize,
x=5, y=self.window.height-self.textLine, color=color, batch=self.renderer.batch, group=self.textGroup)
self.textLine += 15
def Keyboard(self, key):
"""
Callback indicating 'key' has been pressed down.
"""
pass
def KeyboardUp(self, key):
"""
Callback indicating 'key' has been released.
See Keyboard() for key information
"""
pass
| [
"[email protected]"
] | ||
1a56ce32cf2752a7fe134d978447571ef9758c2e | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/dialogflow/v2/dialogflow-v2-py/google/cloud/dialogflow_v2/services/entity_types/async_client.py | 21d695ce98ae5062c7c952f41a7a0a8c5056c830 | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,710 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.dialogflow_v2.services.entity_types import pagers
from google.cloud.dialogflow_v2.types import entity_type
from google.cloud.dialogflow_v2.types import entity_type as gcd_entity_type
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from .transports.base import EntityTypesTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import EntityTypesGrpcAsyncIOTransport
from .client import EntityTypesClient
class EntityTypesAsyncClient:
"""Service for managing
[EntityTypes][google.cloud.dialogflow.v2.EntityType].
"""
_client: EntityTypesClient
DEFAULT_ENDPOINT = EntityTypesClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = EntityTypesClient.DEFAULT_MTLS_ENDPOINT
entity_type_path = staticmethod(EntityTypesClient.entity_type_path)
parse_entity_type_path = staticmethod(EntityTypesClient.parse_entity_type_path)
common_billing_account_path = staticmethod(EntityTypesClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(EntityTypesClient.parse_common_billing_account_path)
common_folder_path = staticmethod(EntityTypesClient.common_folder_path)
parse_common_folder_path = staticmethod(EntityTypesClient.parse_common_folder_path)
common_organization_path = staticmethod(EntityTypesClient.common_organization_path)
parse_common_organization_path = staticmethod(EntityTypesClient.parse_common_organization_path)
common_project_path = staticmethod(EntityTypesClient.common_project_path)
parse_common_project_path = staticmethod(EntityTypesClient.parse_common_project_path)
common_location_path = staticmethod(EntityTypesClient.common_location_path)
parse_common_location_path = staticmethod(EntityTypesClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EntityTypesAsyncClient: The constructed client.
"""
return EntityTypesClient.from_service_account_info.__func__(EntityTypesAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EntityTypesAsyncClient: The constructed client.
"""
return EntityTypesClient.from_service_account_file.__func__(EntityTypesAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> EntityTypesTransport:
"""Returns the transport used by the client instance.
Returns:
EntityTypesTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(EntityTypesClient).get_transport_class, type(EntityTypesClient))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, EntityTypesTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the entity types client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.EntityTypesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = EntityTypesClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_entity_types(self,
request: entity_type.ListEntityTypesRequest = None,
*,
parent: str = None,
language_code: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEntityTypesAsyncPager:
r"""Returns the list of all entity types in the specified
agent.
Args:
request (:class:`google.cloud.dialogflow_v2.types.ListEntityTypesRequest`):
The request object. The request message for
[EntityTypes.ListEntityTypes][google.cloud.dialogflow.v2.EntityTypes.ListEntityTypes].
parent (:class:`str`):
Required. The agent to list all entity types from.
Format: ``projects/<Project ID>/agent``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
language_code (:class:`str`):
Optional. The language used to access language-specific
data. If not specified, the agent's default language is
used. For more information, see `Multilingual intent and
entity
data <https://cloud.google.com/dialogflow/docs/agents-multilingual#intent-entity>`__.
This corresponds to the ``language_code`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.services.entity_types.pagers.ListEntityTypesAsyncPager:
The response message for
[EntityTypes.ListEntityTypes][google.cloud.dialogflow.v2.EntityTypes.ListEntityTypes].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, language_code])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = entity_type.ListEntityTypesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if language_code is not None:
request.language_code = language_code
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_entity_types,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListEntityTypesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_entity_type(self,
request: entity_type.GetEntityTypeRequest = None,
*,
name: str = None,
language_code: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> entity_type.EntityType:
r"""Retrieves the specified entity type.
Args:
request (:class:`google.cloud.dialogflow_v2.types.GetEntityTypeRequest`):
The request object. The request message for
[EntityTypes.GetEntityType][google.cloud.dialogflow.v2.EntityTypes.GetEntityType].
name (:class:`str`):
Required. The name of the entity type. Format:
``projects/<Project ID>/agent/entityTypes/<EntityType ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
language_code (:class:`str`):
Optional. The language used to access language-specific
data. If not specified, the agent's default language is
used. For more information, see `Multilingual intent and
entity
data <https://cloud.google.com/dialogflow/docs/agents-multilingual#intent-entity>`__.
This corresponds to the ``language_code`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.EntityType:
Each intent parameter has a type, called the entity type, which dictates
exactly how data from an end-user expression is
extracted.
Dialogflow provides predefined system entities that
can match many common types of data. For example,
there are system entities for matching dates, times,
colors, email addresses, and so on. You can also
create your own custom entities for matching custom
data. For example, you could define a vegetable
entity that can match the types of vegetables
available for purchase with a grocery store agent.
For more information, see the [Entity
guide](\ https://cloud.google.com/dialogflow/docs/entities-overview).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, language_code])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = entity_type.GetEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if language_code is not None:
request.language_code = language_code
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_entity_type,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def create_entity_type(self,
request: gcd_entity_type.CreateEntityTypeRequest = None,
*,
parent: str = None,
entity_type: gcd_entity_type.EntityType = None,
language_code: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_entity_type.EntityType:
r"""Creates an entity type in the specified agent.
Note: You should always train an agent prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/es/docs/training>`__.
Args:
request (:class:`google.cloud.dialogflow_v2.types.CreateEntityTypeRequest`):
The request object. The request message for
[EntityTypes.CreateEntityType][google.cloud.dialogflow.v2.EntityTypes.CreateEntityType].
parent (:class:`str`):
Required. The agent to create a entity type for. Format:
``projects/<Project ID>/agent``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entity_type (:class:`google.cloud.dialogflow_v2.types.EntityType`):
Required. The entity type to create.
This corresponds to the ``entity_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
language_code (:class:`str`):
Optional. The language used to access language-specific
data. If not specified, the agent's default language is
used. For more information, see `Multilingual intent and
entity
data <https://cloud.google.com/dialogflow/docs/agents-multilingual#intent-entity>`__.
This corresponds to the ``language_code`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.EntityType:
Each intent parameter has a type, called the entity type, which dictates
exactly how data from an end-user expression is
extracted.
Dialogflow provides predefined system entities that
can match many common types of data. For example,
there are system entities for matching dates, times,
colors, email addresses, and so on. You can also
create your own custom entities for matching custom
data. For example, you could define a vegetable
entity that can match the types of vegetables
available for purchase with a grocery store agent.
For more information, see the [Entity
guide](\ https://cloud.google.com/dialogflow/docs/entities-overview).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, entity_type, language_code])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = gcd_entity_type.CreateEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if entity_type is not None:
request.entity_type = entity_type
if language_code is not None:
request.language_code = language_code
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_entity_type,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def update_entity_type(self,
request: gcd_entity_type.UpdateEntityTypeRequest = None,
*,
entity_type: gcd_entity_type.EntityType = None,
language_code: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_entity_type.EntityType:
r"""Updates the specified entity type.
Note: You should always train an agent prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/es/docs/training>`__.
Args:
request (:class:`google.cloud.dialogflow_v2.types.UpdateEntityTypeRequest`):
The request object. The request message for
[EntityTypes.UpdateEntityType][google.cloud.dialogflow.v2.EntityTypes.UpdateEntityType].
entity_type (:class:`google.cloud.dialogflow_v2.types.EntityType`):
Required. The entity type to update.
This corresponds to the ``entity_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
language_code (:class:`str`):
Optional. The language used to access language-specific
data. If not specified, the agent's default language is
used. For more information, see `Multilingual intent and
entity
data <https://cloud.google.com/dialogflow/docs/agents-multilingual#intent-entity>`__.
This corresponds to the ``language_code`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.EntityType:
Each intent parameter has a type, called the entity type, which dictates
exactly how data from an end-user expression is
extracted.
Dialogflow provides predefined system entities that
can match many common types of data. For example,
there are system entities for matching dates, times,
colors, email addresses, and so on. You can also
create your own custom entities for matching custom
data. For example, you could define a vegetable
entity that can match the types of vegetables
available for purchase with a grocery store agent.
For more information, see the [Entity
guide](\ https://cloud.google.com/dialogflow/docs/entities-overview).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([entity_type, language_code])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = gcd_entity_type.UpdateEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if entity_type is not None:
request.entity_type = entity_type
if language_code is not None:
request.language_code = language_code
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_entity_type,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("entity_type.name", request.entity_type.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_entity_type(self,
request: entity_type.DeleteEntityTypeRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified entity type.
Note: You should always train an agent prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/es/docs/training>`__.
Args:
request (:class:`google.cloud.dialogflow_v2.types.DeleteEntityTypeRequest`):
The request object. The request message for
[EntityTypes.DeleteEntityType][google.cloud.dialogflow.v2.EntityTypes.DeleteEntityType].
name (:class:`str`):
Required. The name of the entity type to delete. Format:
``projects/<Project ID>/agent/entityTypes/<EntityType ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = entity_type.DeleteEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_entity_type,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def batch_update_entity_types(self,
request: entity_type.BatchUpdateEntityTypesRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates/Creates multiple entity types in the specified agent.
Note: You should always train an agent prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/es/docs/training>`__.
Args:
request (:class:`google.cloud.dialogflow_v2.types.BatchUpdateEntityTypesRequest`):
The request object. The request message for
[EntityTypes.BatchUpdateEntityTypes][google.cloud.dialogflow.v2.EntityTypes.BatchUpdateEntityTypes].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.dialogflow_v2.types.BatchUpdateEntityTypesResponse`
The response message for
[EntityTypes.BatchUpdateEntityTypes][google.cloud.dialogflow.v2.EntityTypes.BatchUpdateEntityTypes].
"""
# Create or coerce a protobuf request object.
request = entity_type.BatchUpdateEntityTypesRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_update_entity_types,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
entity_type.BatchUpdateEntityTypesResponse,
metadata_type=struct_pb2.Struct,
)
# Done; return the response.
return response
async def batch_delete_entity_types(self,
request: entity_type.BatchDeleteEntityTypesRequest = None,
*,
parent: str = None,
entity_type_names: Sequence[str] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes entity types in the specified agent.
Note: You should always train an agent prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/es/docs/training>`__.
Args:
request (:class:`google.cloud.dialogflow_v2.types.BatchDeleteEntityTypesRequest`):
The request object. The request message for
[EntityTypes.BatchDeleteEntityTypes][google.cloud.dialogflow.v2.EntityTypes.BatchDeleteEntityTypes].
parent (:class:`str`):
Required. The name of the agent to delete all entities
types for. Format: ``projects/<Project ID>/agent``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entity_type_names (:class:`Sequence[str]`):
Required. The names entity types to delete. All names
must point to the same agent as ``parent``.
This corresponds to the ``entity_type_names`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, entity_type_names])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = entity_type.BatchDeleteEntityTypesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if entity_type_names:
request.entity_type_names.extend(entity_type_names)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_delete_entity_types,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
)
# Done; return the response.
return response
async def batch_create_entities(self,
request: entity_type.BatchCreateEntitiesRequest = None,
*,
parent: str = None,
entities: Sequence[entity_type.EntityType.Entity] = None,
language_code: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates multiple new entities in the specified entity type.
Note: You should always train an agent prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/es/docs/training>`__.
Args:
request (:class:`google.cloud.dialogflow_v2.types.BatchCreateEntitiesRequest`):
The request object. The request message for
[EntityTypes.BatchCreateEntities][google.cloud.dialogflow.v2.EntityTypes.BatchCreateEntities].
parent (:class:`str`):
Required. The name of the entity type to create entities
in. Format:
``projects/<Project ID>/agent/entityTypes/<Entity Type ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entities (:class:`Sequence[google.cloud.dialogflow_v2.types.EntityType.Entity]`):
Required. The entities to create.
This corresponds to the ``entities`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
language_code (:class:`str`):
Optional. The language used to access language-specific
data. If not specified, the agent's default language is
used. For more information, see `Multilingual intent and
entity
data <https://cloud.google.com/dialogflow/docs/agents-multilingual#intent-entity>`__.
This corresponds to the ``language_code`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, entities, language_code])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = entity_type.BatchCreateEntitiesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if language_code is not None:
request.language_code = language_code
if entities:
request.entities.extend(entities)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_create_entities,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
)
# Done; return the response.
return response
async def batch_update_entities(self,
request: entity_type.BatchUpdateEntitiesRequest = None,
*,
parent: str = None,
entities: Sequence[entity_type.EntityType.Entity] = None,
language_code: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates or creates multiple entities in the specified entity
type. This method does not affect entities in the entity type
that aren't explicitly specified in the request.
Note: You should always train an agent prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/es/docs/training>`__.
Args:
request (:class:`google.cloud.dialogflow_v2.types.BatchUpdateEntitiesRequest`):
The request object. The request message for
[EntityTypes.BatchUpdateEntities][google.cloud.dialogflow.v2.EntityTypes.BatchUpdateEntities].
parent (:class:`str`):
Required. The name of the entity type to update or
create entities in. Format:
``projects/<Project ID>/agent/entityTypes/<Entity Type ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entities (:class:`Sequence[google.cloud.dialogflow_v2.types.EntityType.Entity]`):
Required. The entities to update or
create.
This corresponds to the ``entities`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
language_code (:class:`str`):
Optional. The language used to access language-specific
data. If not specified, the agent's default language is
used. For more information, see `Multilingual intent and
entity
data <https://cloud.google.com/dialogflow/docs/agents-multilingual#intent-entity>`__.
This corresponds to the ``language_code`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, entities, language_code])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = entity_type.BatchUpdateEntitiesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if language_code is not None:
request.language_code = language_code
if entities:
request.entities.extend(entities)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_update_entities,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
)
# Done; return the response.
return response
async def batch_delete_entities(self,
request: entity_type.BatchDeleteEntitiesRequest = None,
*,
parent: str = None,
entity_values: Sequence[str] = None,
language_code: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes entities in the specified entity type.
Note: You should always train an agent prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/es/docs/training>`__.
Args:
request (:class:`google.cloud.dialogflow_v2.types.BatchDeleteEntitiesRequest`):
The request object. The request message for
[EntityTypes.BatchDeleteEntities][google.cloud.dialogflow.v2.EntityTypes.BatchDeleteEntities].
parent (:class:`str`):
Required. The name of the entity type to delete entries
for. Format:
``projects/<Project ID>/agent/entityTypes/<Entity Type ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entity_values (:class:`Sequence[str]`):
Required. The reference ``values`` of the entities to
delete. Note that these are not fully-qualified names,
i.e. they don't start with ``projects/<Project ID>``.
This corresponds to the ``entity_values`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
language_code (:class:`str`):
Optional. The language used to access language-specific
data. If not specified, the agent's default language is
used. For more information, see `Multilingual intent and
entity
data <https://cloud.google.com/dialogflow/docs/agents-multilingual#intent-entity>`__.
This corresponds to the ``language_code`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, entity_values, language_code])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = entity_type.BatchDeleteEntitiesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if language_code is not None:
request.language_code = language_code
if entity_values:
request.entity_values.extend(entity_values)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_delete_entities,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"EntityTypesAsyncClient",
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
6f636b5072e7d57d722bcf8845eabfe6746a93a9 | 81f2cd08a11f6be0d11a2664001491329957b200 | /pyscf/pbc/df/mdf.py | afdf3d5c06d62899305f92b0be06cb20c77f5436 | [
"Apache-2.0"
] | permissive | crisely09/pyscf | 18b564556b249bafab24e1c7d08fdf0a57dfcf0a | cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6 | refs/heads/master | 2021-07-10T01:54:45.698418 | 2019-11-27T22:49:43 | 2019-11-27T22:49:43 | 224,692,664 | 0 | 0 | Apache-2.0 | 2019-11-28T16:32:10 | 2019-11-28T16:32:09 | null | UTF-8 | Python | false | false | 17,494 | py | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
Gaussian and planewaves mixed density fitting
Ref:
J. Chem. Phys. 147, 164119 (2017)
'''
import os
import time
import tempfile
import numpy
import h5py
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
from pyscf.df.outcore import _guess_shell_ranges
from pyscf.pbc import tools
from pyscf.pbc import gto
from pyscf.pbc.df import outcore
from pyscf.pbc.df import ft_ao
from pyscf.pbc.df import df
from pyscf.pbc.df import aft
from pyscf.pbc.df.df import fuse_auxcell, _round_off_to_odd_mesh
from pyscf.pbc.df.df_jk import zdotNN, zdotCN, zdotNC
from pyscf.pbc.lib.kpts_helper import (is_zero, gamma_point, member, unique,
KPT_DIFF_TOL)
from pyscf.pbc.df import mdf_jk
from pyscf.pbc.df import mdf_ao2mo
from pyscf import __config__
# kpti == kptj: s2 symmetry
# kpti == kptj == 0 (gamma point): real
def _make_j3c(mydf, cell, auxcell, kptij_lst, cderi_file):
t1 = (time.clock(), time.time())
log = logger.Logger(mydf.stdout, mydf.verbose)
max_memory = max(2000, mydf.max_memory-lib.current_memory()[0])
fused_cell, fuse = fuse_auxcell(mydf, auxcell)
# Create swap file to avoid huge cderi_file. see also function
# pyscf.pbc.df.df._make_j3c
swapfile = tempfile.NamedTemporaryFile(dir=os.path.dirname(cderi_file))
fswap = lib.H5TmpFile(swapfile.name)
# Unlink swapfile to avoid trash
swapfile = None
outcore._aux_e2(cell, fused_cell, fswap, 'int3c2e', aosym='s2',
kptij_lst=kptij_lst, dataname='j3c-junk', max_memory=max_memory)
t1 = log.timer_debug1('3c2e', *t1)
nao = cell.nao_nr()
naux = auxcell.nao_nr()
mesh = mydf.mesh
Gv, Gvbase, kws = cell.get_Gv_weights(mesh)
b = cell.reciprocal_vectors()
gxyz = lib.cartesian_prod([numpy.arange(len(x)) for x in Gvbase])
ngrids = gxyz.shape[0]
kptis = kptij_lst[:,0]
kptjs = kptij_lst[:,1]
kpt_ji = kptjs - kptis
uniq_kpts, uniq_index, uniq_inverse = unique(kpt_ji)
log.debug('Num uniq kpts %d', len(uniq_kpts))
log.debug2('uniq_kpts %s', uniq_kpts)
# j2c ~ (-kpt_ji | kpt_ji)
j2c = fused_cell.pbc_intor('int2c2e', hermi=1, kpts=uniq_kpts)
for k, kpt in enumerate(uniq_kpts):
aoaux = ft_ao.ft_ao(fused_cell, Gv, None, b, gxyz, Gvbase, kpt).T
aoaux = fuse(aoaux)
coulG = mydf.weighted_coulG(kpt, False, mesh)
LkR = numpy.asarray(aoaux.real, order='C')
LkI = numpy.asarray(aoaux.imag, order='C')
j2c_k = fuse(fuse(j2c[k]).T).T.copy()
if is_zero(kpt): # kpti == kptj
j2c_k -= lib.dot(LkR*coulG, LkR.T)
j2c_k -= lib.dot(LkI*coulG, LkI.T)
else:
# aoaux ~ kpt_ij, aoaux.conj() ~ kpt_kl
j2cR, j2cI = zdotCN(LkR*coulG, LkI*coulG, LkR.T, LkI.T)
j2c_k -= j2cR + j2cI * 1j
fswap['j2c/%d'%k] = j2c_k
aoaux = LkR = LkI = j2cR = j2cI = coulG = None
j2c = None
def cholesky_decomposed_metric(uniq_kptji_id):
j2c = numpy.asarray(fswap['j2c/%d'%uniq_kptji_id])
j2c_negative = None
# Note large difference may be found in results between the CD/eig treatments.
# In some systems, small integral errors can lead to different treatments of
# linear dependency which can be observed in the total energy/orbital energy
# around 4th decimal place.
# try:
# j2c = scipy.linalg.cholesky(j2c, lower=True)
# j2ctag = 'CD'
# except scipy.linalg.LinAlgError as e:
#
# Abandon CD treatment for better numerical stablity
w, v = scipy.linalg.eigh(j2c)
log.debug('MDF metric for kpt %s cond = %.4g, drop %d bfns',
uniq_kptji_id, w[-1]/w[0], numpy.count_nonzero(w<mydf.linear_dep_threshold))
v1 = v[:,w>mydf.linear_dep_threshold].T.conj()
v1 /= numpy.sqrt(w[w>mydf.linear_dep_threshold]).reshape(-1,1)
j2c = v1
if cell.dimension == 2 and cell.low_dim_ft_type != 'inf_vacuum':
idx = numpy.where(w < -mydf.linear_dep_threshold)[0]
if len(idx) > 0:
j2c_negative = (v[:,idx]/numpy.sqrt(-w[idx])).conj().T
j2ctag = 'eig'
return j2c, j2c_negative, j2ctag
feri = h5py.File(cderi_file, 'a')
feri['j3c-kptij'] = kptij_lst
nsegs = len(fswap['j3c-junk/0'])
def make_kpt(uniq_kptji_id, cholesky_j2c): # kpt = kptj - kpti
kpt = uniq_kpts[uniq_kptji_id]
log.debug1('kpt = %s', kpt)
adapted_ji_idx = numpy.where(uniq_inverse == uniq_kptji_id)[0]
adapted_kptjs = kptjs[adapted_ji_idx]
nkptj = len(adapted_kptjs)
log.debug1('adapted_ji_idx = %s', adapted_ji_idx)
j2c, j2c_negative, j2ctag = cholesky_j2c
Gaux = ft_ao.ft_ao(fused_cell, Gv, None, b, gxyz, Gvbase, kpt).T
Gaux = fuse(Gaux)
Gaux *= mydf.weighted_coulG(kpt, False, mesh)
kLR = Gaux.T.real.copy('C')
kLI = Gaux.T.imag.copy('C')
if is_zero(kpt): # kpti == kptj
aosym = 's2'
nao_pair = nao*(nao+1)//2
if cell.dimension == 3:
vbar = fuse(mydf.auxbar(fused_cell))
ovlp = cell.pbc_intor('int1e_ovlp', hermi=1, kpts=adapted_kptjs)
ovlp = [lib.pack_tril(s) for s in ovlp]
else:
aosym = 's1'
nao_pair = nao**2
mem_now = lib.current_memory()[0]
log.debug2('memory = %s', mem_now)
max_memory = max(2000, mydf.max_memory-mem_now)
# nkptj for 3c-coulomb arrays plus 1 Lpq array
buflen = min(max(int(max_memory*.38e6/16/naux/(nkptj+1)), 1), nao_pair)
shranges = _guess_shell_ranges(cell, buflen, aosym)
buflen = max([x[2] for x in shranges])
# +1 for a pqkbuf
if aosym == 's2':
Gblksize = max(16, int(max_memory*.1e6/16/buflen/(nkptj+1)))
else:
Gblksize = max(16, int(max_memory*.2e6/16/buflen/(nkptj+1)))
Gblksize = min(Gblksize, ngrids, 16384)
pqkRbuf = numpy.empty(buflen*Gblksize)
pqkIbuf = numpy.empty(buflen*Gblksize)
# buf for ft_aopair
buf = numpy.empty((nkptj,buflen*Gblksize), dtype=numpy.complex128)
def pw_contract(istep, sh_range, j3cR, j3cI):
bstart, bend, ncol = sh_range
if aosym == 's2':
shls_slice = (bstart, bend, 0, bend)
else:
shls_slice = (bstart, bend, 0, cell.nbas)
for p0, p1 in lib.prange(0, ngrids, Gblksize):
dat = ft_ao._ft_aopair_kpts(cell, Gv[p0:p1], shls_slice, aosym,
b, gxyz[p0:p1], Gvbase, kpt,
adapted_kptjs, out=buf)
nG = p1 - p0
for k, ji in enumerate(adapted_ji_idx):
aoao = dat[k].reshape(nG,ncol)
pqkR = numpy.ndarray((ncol,nG), buffer=pqkRbuf)
pqkI = numpy.ndarray((ncol,nG), buffer=pqkIbuf)
pqkR[:] = aoao.real.T
pqkI[:] = aoao.imag.T
lib.dot(kLR[p0:p1].T, pqkR.T, -1, j3cR[k], 1)
lib.dot(kLI[p0:p1].T, pqkI.T, -1, j3cR[k], 1)
if not (is_zero(kpt) and gamma_point(adapted_kptjs[k])):
lib.dot(kLR[p0:p1].T, pqkI.T, -1, j3cI[k], 1)
lib.dot(kLI[p0:p1].T, pqkR.T, 1, j3cI[k], 1)
for k, ji in enumerate(adapted_ji_idx):
if is_zero(kpt) and gamma_point(adapted_kptjs[k]):
v = j3cR[k]
else:
v = j3cR[k] + j3cI[k] * 1j
if j2ctag == 'CD':
v = scipy.linalg.solve_triangular(j2c, v, lower=True, overwrite_b=True)
feri['j3c/%d/%d'%(ji,istep)] = v
else:
feri['j3c/%d/%d'%(ji,istep)] = lib.dot(j2c, v)
# low-dimension systems
if j2c_negative is not None:
feri['j3c-/%d/%d'%(ji,istep)] = lib.dot(j2c_negative, v)
with lib.call_in_background(pw_contract) as compute:
col1 = 0
for istep, sh_range in enumerate(shranges):
log.debug1('int3c2e [%d/%d], AO [%d:%d], ncol = %d', \
istep+1, len(shranges), *sh_range)
bstart, bend, ncol = sh_range
col0, col1 = col1, col1+ncol
j3cR = []
j3cI = []
for k, idx in enumerate(adapted_ji_idx):
v = [fswap['j3c-junk/%d/%d'%(idx,i)][0,col0:col1].T for i in range(nsegs)]
v = fuse(numpy.vstack(v))
if is_zero(kpt) and cell.dimension == 3:
for i in numpy.where(vbar != 0)[0]:
v[i] -= vbar[i] * ovlp[k][col0:col1]
j3cR.append(numpy.asarray(v.real, order='C'))
if is_zero(kpt) and gamma_point(adapted_kptjs[k]):
j3cI.append(None)
else:
j3cI.append(numpy.asarray(v.imag, order='C'))
v = None
compute(istep, sh_range, j3cR, j3cI)
for ji in adapted_ji_idx:
del(fswap['j3c-junk/%d'%ji])
# Wrapped around boundary and symmetry between k and -k can be used
# explicitly for the metric integrals. We consider this symmetry
# because it is used in the df_ao2mo module when contracting two 3-index
# integral tensors to the 4-index 2e integral tensor. If the symmetry
# related k-points are treated separately, the resultant 3-index tensors
# may have inconsistent dimension due to the numerial noise when handling
# linear dependency of j2c.
def conj_j2c(cholesky_j2c):
j2c, j2c_negative, j2ctag = cholesky_j2c
if j2c_negative is None:
return j2c.conj(), None, j2ctag
else:
return j2c.conj(), j2c_negative.conj(), j2ctag
a = cell.lattice_vectors() / (2*numpy.pi)
def kconserve_indices(kpt):
'''search which (kpts+kpt) satisfies momentum conservation'''
kdif = numpy.einsum('wx,ix->wi', a, uniq_kpts + kpt)
kdif_int = numpy.rint(kdif)
mask = numpy.einsum('wi->i', abs(kdif - kdif_int)) < KPT_DIFF_TOL
uniq_kptji_ids = numpy.where(mask)[0]
return uniq_kptji_ids
done = numpy.zeros(len(uniq_kpts), dtype=bool)
for k, kpt in enumerate(uniq_kpts):
if done[k]:
continue
log.debug1('Cholesky decomposition for j2c at kpt %s', k)
cholesky_j2c = cholesky_decomposed_metric(k)
# The k-point k' which has (k - k') * a = 2n pi. Metric integrals have the
# symmetry S = S
uniq_kptji_ids = kconserve_indices(-kpt)
log.debug1("Symmetry pattern (k - %s)*a= 2n pi", kpt)
log.debug1(" make_kpt for uniq_kptji_ids %s", uniq_kptji_ids)
for uniq_kptji_id in uniq_kptji_ids:
if not done[uniq_kptji_id]:
make_kpt(uniq_kptji_id, cholesky_j2c)
done[uniq_kptji_ids] = True
# The k-point k' which has (k + k') * a = 2n pi. Metric integrals have the
# symmetry S = S*
uniq_kptji_ids = kconserve_indices(kpt)
log.debug1("Symmetry pattern (k + %s)*a= 2n pi", kpt)
log.debug1(" make_kpt for %s", uniq_kptji_ids)
cholesky_j2c = conj_j2c(cholesky_j2c)
for uniq_kptji_id in uniq_kptji_ids:
if not done[uniq_kptji_id]:
make_kpt(uniq_kptji_id, cholesky_j2c)
done[uniq_kptji_ids] = True
feri.close()
# valence_exp = 1. are typically the Gaussians in the valence
VALENCE_EXP = getattr(__config__, 'pbc_df_mdf_valence_exp', 1.0)
def _mesh_for_valence(cell, valence_exp=VALENCE_EXP):
'''Energy cutoff estimation'''
precision = cell.precision * 10
Ecut_max = 0
for i in range(cell.nbas):
l = cell.bas_angular(i)
es = cell.bas_exp(i).copy()
es[es>valence_exp] = valence_exp
cs = abs(cell.bas_ctr_coeff(i)).max(axis=1)
ke_guess = gto.cell._estimate_ke_cutoff(es, l, cs, precision)
Ecut_max = max(Ecut_max, ke_guess.max())
mesh = tools.cutoff_to_mesh(cell.lattice_vectors(), Ecut_max)
mesh = numpy.min((mesh, cell.mesh), axis=0)
if cell.dimension < 2 or cell.low_dim_ft_type == 'inf_vacuum':
mesh[cell.dimension:] = cell.mesh[cell.dimension:]
return _round_off_to_odd_mesh(mesh)
del(VALENCE_EXP)
class MDF(df.DF):
'''Gaussian and planewaves mixed density fitting
'''
def __init__(self, cell, kpts=numpy.zeros((1,3))):
self.cell = cell
self.stdout = cell.stdout
self.verbose = cell.verbose
self.max_memory = cell.max_memory
self.kpts = kpts # default is gamma point
self.kpts_band = None
self._auxbasis = None
self.mesh = _mesh_for_valence(cell)
# In MDF, fitting PWs (self.mesh), and parameters eta and exp_to_discard
# are related to each other. The compensated function does not need to
# be very smooth. It just needs to be expanded by the specified PWs
# (self.mesh). self.eta is estimated on the fly based on the value of
# self.mesh.
self.eta = None
# Any functions which are more diffused than the compensated Gaussian
# are linearly dependent to the PWs. They can be removed from the
# auxiliary set without affecting the accuracy of MDF. exp_to_discard
# can be set to the value of self.eta
self.exp_to_discard = None
# The following attributes are not input options.
self.exxdiv = None # to mimic KRHF/KUHF object in function get_coulG
self.auxcell = None
self.blockdim = getattr(__config__, 'df_df_DF_blockdim', 240)
self.linear_dep_threshold = df.LINEAR_DEP_THR
self._j_only = False
# If _cderi_to_save is specified, the 3C-integral tensor will be saved in this file.
self._cderi_to_save = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
# If _cderi is specified, the 3C-integral tensor will be read from this file
self._cderi = None
self._keys = set(self.__dict__.keys())
@property
def eta(self):
if self._eta is not None:
return self._eta
else:
cell = self.cell
if cell.dimension == 0:
return 0.2
ke_cutoff = tools.mesh_to_cutoff(cell.lattice_vectors(), self.mesh)
ke_cutoff = ke_cutoff[:cell.dimension].min()
return aft.estimate_eta_for_ke_cutoff(cell, ke_cutoff, cell.precision)
@eta.setter
def eta(self, x):
self._eta = x
@property
def exp_to_discard(self):
if self._exp_to_discard is not None:
return self._exp_to_discard
else:
return self.eta
@exp_to_discard.setter
def exp_to_discard(self, x):
self._exp_to_discard = x
_make_j3c = _make_j3c
# Note: Special exxdiv by default should not be used for an arbitrary
# input density matrix. When the df object was used with the molecular
# post-HF code, get_jk was often called with an incomplete DM (e.g. the
# core DM in CASCI). An SCF level exxdiv treatment is inadequate for
# post-HF methods.
def get_jk(self, dm, hermi=1, kpts=None, kpts_band=None,
with_j=True, with_k=True, exxdiv=None):
if kpts is None:
if numpy.all(self.kpts == 0):
# Gamma-point calculation by default
kpts = numpy.zeros(3)
else:
kpts = self.kpts
kpts = numpy.asarray(kpts)
if kpts.shape == (3,):
return mdf_jk.get_jk(self, dm, hermi, kpts, kpts_band, with_j,
with_k, exxdiv)
vj = vk = None
if with_k:
vk = mdf_jk.get_k_kpts(self, dm, hermi, kpts, kpts_band, exxdiv)
if with_j:
vj = mdf_jk.get_j_kpts(self, dm, hermi, kpts, kpts_band)
return vj, vk
get_eri = get_ao_eri = mdf_ao2mo.get_eri
ao2mo = get_mo_eri = mdf_ao2mo.general
ao2mo_7d = mdf_ao2mo.ao2mo_7d
def update_mp(self):
pass
def update_cc(self):
pass
def update(self):
pass
################################################################################
# With this function to mimic the molecular DF.loop function, the pbc gamma
# point DF object can be used in the molecular code
def loop(self, blksize=None):
for dat in aft.AFTDF.loop(self, blksize):
yield dat
for dat in df.DF.loop(self, blksize):
yield dat
def get_naoaux(self):
return df.DF.get_naoaux(self) + aft.AFTDF.get_naoaux(self)
| [
"[email protected]"
] | |
b2e0391d750efe19f614deb8c2bd1631da82841d | 5916383e8d3df886edd20ac00ce9706a78078f56 | /飞机大战/v2/world.py | 9e05cd9b131661fae9882e44e040079213137409 | [] | no_license | sczhan/wode | 556154e8ccaa9192ea257bc88df3c5e4b268f88e | af4c721d0cedfdd2fe01dd681539724d1d64c378 | refs/heads/master | 2021-07-06T22:26:34.465708 | 2020-09-04T18:56:38 | 2020-09-04T18:56:38 | 181,295,279 | 1 | 0 | null | 2019-09-09T16:30:00 | 2019-04-14T10:53:57 | Python | UTF-8 | Python | false | false | 1,656 | py |
import tkinter
"""
蜜蜂从上向下运动
可以通过键盘左右控制
"""
step = 0 # 计算器,计算一个走了多少步
direction = (1, 1)
x = 0
y = 10
def set_right(e):
"""
:param e:
:return:
"""
global x
x += 20
def set_left(e):
"""
:param e:
:return:
"""
global x
x -= 20
root_window = tkinter.Tk()
root_window.title("world")
root_window.bind("<Key-Left>", set_left)
root_window.bind("<Key-Right>", set_right)
# 设置不能更改宽,高
root_window.resizable(width=False, height=False)
window_canvas = tkinter.Canvas(root_window, width=450, height=600)
window_canvas.pack()
def main():
# 创建开始界面
bg_img_name = "../img/background.gif"
bg_img = tkinter.PhotoImage(file=bg_img_name)
# tags 的作用是,以后我们使用创建好的image可以通过tags使用
window_canvas.create_image(480/2, 600/2, anchor=tkinter.CENTER, image=bg_img, tags="bg")
# 画上一个小蜜蜂
bee = "../img/bee.gif"
bee_img = tkinter.PhotoImage(file=bee)
window_canvas.create_image(150, 180/2, anchor=tkinter.CENTER, image=bee_img, tags="bee")
sp = "../img/smallplane.gif"
sp_img = tkinter.PhotoImage(file=sp)
window_canvas.create_image(50, 100/2, anchor=tkinter.CENTER, image=sp_img, tags="sp")
# 让小飞机动起来
ap_move()
tkinter.mainloop()
def ap_move():
"""
:return:
"""
global step
global x
global y
y += 20
print(x, y)
window_canvas.move("sp", x, y)
window_canvas.move("bee", x, y)
step += 1
window_canvas.after(1000, ap_move)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
944dd21d731631667b2b61b7df4bbb9c9272ea4d | f0d6efe035d4c2ed1ea6bb6d1d5a613b8630a025 | /lib/jsonrpc/flexjsonrpc/__init__.py | 53ece394443611d381a3d2a3a98aed5682669d8f | [
"BSD-2-Clause-Views",
"BSD-3-Clause"
] | permissive | bemoss/BEMOSS3.5 | d24c1c5587e5081092cc97250db45645363da4e4 | 75a09bc5d0a2ec0ae994ac900a93dc027b527860 | refs/heads/master | 2021-08-15T23:05:40.661118 | 2021-03-29T20:28:14 | 2021-03-29T20:28:14 | 91,000,462 | 81 | 38 | NOASSERTION | 2021-03-29T20:29:54 | 2017-05-11T16:25:43 | Python | UTF-8 | Python | false | false | 2,921 | py | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2013, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
from core import *
| [
"[email protected]"
] | |
de387f75b9153d81353f74324c32842675a55b8c | 888e79392cb660be5799cc5bd25d76bcfa9e2e2c | /doctorus/doctorus/doctype/actividad/test_actividad.py | 64e868e23ae78de54542b56cedaaeb515a1bd9a4 | [
"MIT"
] | permissive | Nirchains/doctorus | 269eadee5754612c521d1c6193d5fe7bbfdb3b8a | 38d39270742dfdae6597a06713952df01a2c3e9d | refs/heads/master | 2020-03-17T07:09:30.046005 | 2019-05-08T06:51:50 | 2019-05-08T06:51:50 | 133,386,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, HISPALIS DIGITAL and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestActividad(unittest.TestCase):
pass
| [
"[email protected]"
] | |
b2595d9eccaf22427e7e16962a002d011843363f | c2df9e04adec78e789d1fbdb0711c45e5b9263a7 | /venv/Lib/site-packages/matplotlib/tests/test_texmanager.py | d24f7dc27a562a23298a3978078f1dbbcabf9e93 | [
"MIT",
"BSD-3-Clause"
] | permissive | AdarshSai/Final_Project | 433009a2f416e894ee3be85cd9317cb8e8df5516 | f966834ca72dd232102ed500ef47ef2b3bdbed5b | refs/heads/main | 2023-01-23T12:21:41.342074 | 2020-11-19T22:24:15 | 2020-11-19T22:24:15 | 308,898,012 | 0 | 1 | MIT | 2020-11-19T22:24:17 | 2020-10-31T14:19:58 | Python | UTF-8 | Python | false | false | 475 | py | import matplotlib.pyplot as plt
from matplotlib.texmanager import TexManager
def test_fontconfig_preamble():
"""
Test that the preamble is included in _fontconfig
"""
plt.rcParams['text.usetex'] = True
tm1 = TexManager()
font_config1 = tm1.get_font_config()
plt.rcParams['text.latex.preamble'] = '\\usepackage{txfonts}'
tm2 = TexManager()
font_config2 = tm2.get_font_config()
assert font_config1 != font_config2
| [
"[email protected]"
] | |
3b600461905bbc4961263bfe2745dd295cc11579 | d9296d3b420d8f5c1aeca094d00dd6bc38a3d57d | /read_statistics/migrations/0001_initial.py | ea8634dbdab68cbb44d0ce86241b1fce182ee74d | [] | no_license | Anthony88888/mysite | 57f5f40530886b12cf1364c10c6206983b022c6c | 7130715ef3acac054b96fa22dcf19fec1f31e019 | refs/heads/master | 2023-01-09T12:15:11.720225 | 2020-10-25T14:48:35 | 2020-10-25T14:48:35 | 305,168,092 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | # Generated by Django 2.0.13 on 2020-10-06 16:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='ReadNum',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('read_num', models.IntegerField(default=0)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='contenttypes.ContentType')),
],
),
]
| [
"[email protected]"
] | |
503a11282b2b012d89e3014060423162487ba9a6 | fec863b67ec1ae65da7111bd8c77d0ab2ef1f6ce | /movie recommendation system/.history/model3_20210430162616.py | ef78677ec57da3e3bcb5a7edf1bc1dcf42a79f03 | [] | no_license | kannan768/movie-recommendation-system | e6cf71620e25a0185fed3b37896137f1f39b0801 | 7460d440d44e77390e459ab10c535b6971c9c3ab | refs/heads/main | 2023-05-14T02:21:50.930672 | 2021-06-09T05:02:30 | 2021-06-09T05:02:30 | 375,225,316 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,918 | py | #item-item filtering
#colloborative filtering
from math import sqrt
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics import pairwise_distances
from scipy.spatial.distance import cosine, correlation
ratings = pd.read_csv('m1-1m/ratings.dat', sep='::', names=['userId', 'movieId', 'rating', 'timestamp'],engine = 'python', encoding = 'latin-1')
users = pd.read_csv('m1-1m/users.dat', sep='::', names=['userId', 'gender', 'age', 'occupation', 'zipcode'],engine = 'python', encoding = 'latin-1')
movies = pd.read_csv('m1-1m/movies.dat', sep='::', names=['movieId', 'title', 'genres'],engine = 'python', encoding = 'latin-1')
df_movies=movies
df_ratings=ratings
df_movies_ratings=pd.merge(df_movies, df_ratings)
ratings_matrix_items = df_movies_ratings.pivot_table(index=['movieId'],columns=['userId'],values='rating').reset_index(drop=True)
ratings_matrix_items.fillna( 0, inplace = True )
movie_similarity = 1 - pairwise_distances( ratings_matrix_items.to_numpy(), metric="cosine" )
np.fill_diagonal( movie_similarity, 0 )
ratings_matrix_items = pd.DataFrame( movie_similarity )
def item_similarity(movieName):
try:
user_inp=movieName
inp=df_movies[df_movies['title']==user_inp].index.tolist()
inp=inp[0]
df_movies['similarity'] = ratings_matrix_items.iloc[inp]
df_movies.columns = ['movie_id', 'title', 'release_date','similarity']
except:
print("Sorry, the movie is not in the database!")
def recommendedMoviesAsperItemSimilarity(user_id):
user_movie= df_movies_ratings[(df_movies_ratings.userId==user_id) & df_movies_ratings.rating.isin([5,4.5])][['title']]
user_movie=user_movie.iloc[0,0]
item_similarity(user_movie)
sorted_movies_as_per_userChoice=df_movies.sort_values( ["similarity"], ascending = False )
sorted_movies_as_per_userChoice=sorted_movies_as_per_userChoice[sorted_movies_as_per_userChoice['similarity'] >=0.45]['movie_id']
recommended_movies=list()
df_recommended_item=pd.DataFrame()
user2Movies= df_ratings[df_ratings['userId']== user_id]['movieId']
for movieId in sorted_movies_as_per_userChoice:
if movieId not in user2Movies:
df_new= df_ratings[(df_ratings.movieId==movieId)]
df_recommended_item=pd.concat([df_recommended_item,df_new])
best10=df_recommended_item.sort_values(['rating'], ascending = False )[1:10]
return best10['movieId']
def movieIdToTitle(listMovieIDs):
movie_titles= list()
for id in listMovieIDs:
movie_titles.append(df_movies[df_movies['movie_id']==id]['title'])
return movie_titles
user_id=50
print("Recommended movies,:\n",movieIdToTitle(recommendedMoviesAsperItemSimilarity(user_id))) | [
"[email protected]"
] | |
a48974d41c1667c0b092f366d4efcc8a8d480fcd | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_248/ch119_2020_03_30_20_53_23_088219.py | f49b4da0a032fcc39a7720976214d6d9206f89d1 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | lista[0]*n
lista[0]=1
t=0
while t<n:
lista[t+1]=lista[t]*x/n
t+=1
def calcula_euler(lista,n):
soma_das_notas = sum(lista)
print(soma_das_notas)
| [
"[email protected]"
] | |
57b176a71b273a1c9636c541ba74fd7a62612b4b | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /PORMain/panda/direct/extensions/NurbsCurveEvaluator-extensions.py | 86eb757f5c5e803e4e77288108ddd26264177ebb | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 799 | py |
"""
NurbsCurveEvaluator-extensions module: contains methods to extend
functionality of the NurbsCurveEvaluator class
"""
def getKnots(self):
"""Returns the knot vector as a Python list of floats"""
knots = []
for i in xrange(self.getNumKnots()):
knots.append(self.getKnot(i))
return knots
def getVertices(self, relTo = None):
"""Returns the vertices as a Python list of Vec4's, relative
to the indicated space if given."""
verts = []
if relTo:
for i in xrange(self.getNumVertices()):
verts.append(self.getVertex(i, relTo))
else:
for i in xrange(self.getNumVertices()):
verts.append(self.getVertex(i))
return verts
| [
"[email protected]"
] | |
e3968b5a6ee4acfc5472f3331048077d2290fe32 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_203/78.py | 29bc91b6b0a4fa993c5a99a015c4bc7188f4154e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,130 | py | #!/usr/bin/env python2
import itertools
def solve_row(prev_row, row):
prev_row = list(prev_row)
prev_chars = set(prev_row)
#print prev_row, row
for i, row_i in enumerate(row):
if row_i == '?':
continue
min_i = i
max_i = i
while max_i + 1 < len(prev_row) and prev_row[max_i] == prev_row[max_i + 1]:
max_i += 1
while min_i - 1 >= 0 and prev_row[min_i] == prev_row[min_i - 1] and prev_row[min_i] in prev_chars:
min_i -= 1
prev_row[min_i:max_i+1] = row_i * (max_i + 1 - min_i)
return prev_row
def solve(r, c, a):
ans = []
prev_row = ['?' for _ in a[0]]
for row in a:
if any(row_i != '?' for row_i in row):
prev_row = solve_row(prev_row, row)
break
for row in a:
prev_row = solve_row(prev_row, row)
ans.append(prev_row)
assert '?' not in prev_row
return ans
def _iter_tuples(a):
for i, row in enumerate(a):
for j, row_j in enumerate(row):
yield i, j, row_j
def _to_tuples(a):
return list(_iter_tuples(a))
def check(r, c, a, ans):
a = _to_tuples(a)
ans = _to_tuples(ans)
for (i, j, char) in a:
if char != '?':
assert (i, j, char) in ans
ptslen = 0
for char in {char for (i, j, char) in a}:
if char == '?':
continue
pts = {(i, j) for (i, j, char2) in ans if char2 == char}
ptslen += len(pts)
i_min = min(i for i, j in pts)
i_max = max(i for i, j in pts)
j_min = min(j for i, j in pts)
j_max = max(j for i, j in pts)
pts2 = {(i, j) for i in xrange(i_min, 1 + i_max) for j in xrange(j_min, 1 + j_max)}
assert pts == pts2, (char, pts2 - pts)
assert ptslen == r * c
def main():
for t in xrange(1, 1 + int(raw_input())):
print 'Case #%d:' % t
r, c = map(int, raw_input().split())
a = [list(raw_input().strip()) for _ in xrange(r)]
ans = solve(r, c, a)
check(r, c, a, ans)
for row in ans:
print ''.join(row)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
fc17c5d9f4350ec9d4472375aea8d04b216e0ed2 | 4eee308593cb45abdfedecb3c80438584504cfed | /trainerbid/trainer/views.py | 5b3fc9ef17ac3b580db1810124e186237d388ea7 | [] | no_license | sikha-jayanth/Trainer-Bidding | 46ffb94f1af1a83f322e2b7cf1ff167e6c7150ee | fe43e6e9781d0da51a2805b7fbfb7b1dbb9b1af5 | refs/heads/main | 2023-01-21T01:13:38.866317 | 2020-11-30T22:16:30 | 2020-11-30T22:16:30 | 317,160,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,793 | py | from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from trainer.forms import RegistrationForm, PersonProfileForm, ApplicationForm, FilterApplicationForm
from django.contrib import messages
from institute.models import Requirements
from trainer.models import Application
from django.contrib.auth.decorators import login_required
from django.forms import forms
# Create your views here.
from trainer.models import PersonProfile
def trainerRegistration(request):
form = RegistrationForm()
context = {}
context["form"] = form
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
return redirect("login")
else:
context["form"] = form
return render(request, "trainer/registration.html", context)
return render(request, "trainer/registration.html", context)
def trainerLogin(request):
if request.method == 'POST':
uname = request.POST.get('uname')
pwd = request.POST.get('pwd')
user = authenticate(request, username=uname, password=pwd)
if user is not None:
login(request, user)
return redirect("trainerhome")
else:
messages.info(request, 'invalid credentials!')
return render(request, "trainer/login.html")
return render(request, "trainer/login.html")
@login_required(login_url='login')
def trainerHome(request):
return render(request, 'trainer/trainerhome.html')
@login_required(login_url='login')
def trainerLogout(request):
logout(request)
return redirect("login")
@login_required(login_url='login')
def trainerProfile(request):
context = {}
user = User.objects.get(username=request.user)
fname = user.first_name
lname = user.last_name
fullname = fname + " " + lname
email = user.email
form = PersonProfileForm(initial={'user': request.user, 'name': fullname, 'email': email})
context["form"] = form
if request.method == 'POST':
form = PersonProfileForm(request.POST)
if form.is_valid():
form.save()
return redirect("viewprofile")
else:
context["form"] = form
return render(request, "trainer/createprofile.html", context)
return render(request, "trainer/createprofile.html", context)
@login_required(login_url='login')
def viewProfile(request):
profile = PersonProfile.objects.get(user=request.user)
context = {}
context["profile"] = profile
return render(request, "trainer/viewprofile.html", context)
@login_required(login_url='login')
def updateProfile(request):
profile = PersonProfile.objects.get(user=request.user)
form = PersonProfileForm(instance=profile)
context = {}
context["form"] = form
if request.method == 'POST':
form = PersonProfileForm(instance=profile, data=request.POST)
if form.is_valid():
form.save()
return redirect("viewprofile")
else:
context["form"] = form
return render(request, "trainer/updateprofile.html", context)
return render(request, "trainer/updateprofile.html", context)
@login_required(login_url='login')
def matchingJobs(request):
context = {}
profile = PersonProfile.objects.get(user=request.user)
skill = profile.skill
requirements = Requirements.objects.filter(skill_needed=skill)
context["requirements"] = requirements
return render(request, "trainer/listjobs.html", context)
@login_required(login_url='login')
def applyJob(request, pk):
context = {}
profile = PersonProfile.objects.get(user=request.user)
job = Requirements.objects.get(id=pk)
form = ApplicationForm(
initial={'jobid': job.jobid, 'job_title': job.job_title, 'location': job.location, 'user': request.user,
'name': profile.name,
'skill': profile.skill, 'years_of_experience': profile.years_of_experience,
'qualification': profile.qualification, 'cgpa': profile.cgpa, 'email': profile.email,
'phone': profile.phone})
context["form"] = form
if request.method == 'POST':
form = ApplicationForm(request.POST)
if form.is_valid():
form.save()
return render(request, "trainer/msgapplied.html")
else:
context["form"] = form
return render(request, "trainer/applyjob.html", context)
return render(request, "trainer/applyjob.html", context)
@login_required(login_url='login')
def viewApplications(request):
context = {}
form = FilterApplicationForm()
context["form"] = form
queryset = Application.objects.filter(user=request.user)
count = queryset.count()
context["count"] = count
context["applications"] = queryset
return render(request, "trainer/viewapplications.html", context)
@login_required(login_url='login')
def filterApplications(request):
context = {}
form = FilterApplicationForm()
context["form"] = form
if request.method == 'POST':
form = FilterApplicationForm(request.POST)
if form.is_valid():
status = form.cleaned_data['status']
queryset = Application.objects.filter(status=status, user=request.user)
count = queryset.count()
context["applications"] = queryset
context["count"] = count
return render(request, "trainer/viewapplications.html", context)
else:
context["form"] = form
return render(request, "trainer/viewapplications.html", context)
return render(request, "trainer/viewapplications.html", context)
| [
"[email protected]"
] | |
532643ec7785b8e9f57df629a1c947434c7fcbcd | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-2705.py | da1acce1fdca02af865dc610333ef8e807a4d3b9 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,755 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:$IDSTRING) -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
478f8c7ada5ddf9f251c892adde96e027b636b33 | d5ed141e513dcb6fc8ab851835ec9a4630e3651b | /anaconda/anaconda/pkgs/anaconda-navigator-1.4.3-py27_0/lib/python2.7/site-packages/anaconda_navigator/widgets/dialogs/license.py | 89276b1e51fc11996f430c0cb1cfb33639b87002 | [] | no_license | starrysky1211/starrysky | 713998b366449a5ae4371e38723c56ea40532593 | abb642548fb9b431551133657f1a67858041a7e6 | refs/heads/master | 2022-11-09T21:51:22.558151 | 2017-02-25T14:42:37 | 2017-02-25T14:42:37 | 67,608,074 | 0 | 1 | null | 2022-10-16T05:17:25 | 2016-09-07T13:16:45 | Python | UTF-8 | Python | false | false | 13,033 | py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2016 Continuum Analytics, Inc.
#
# May be copied and distributed freely only as part of an Anaconda or
# Miniconda installation.
# -----------------------------------------------------------------------------
"""License Manager Dialog."""
# yapf: disable
# Third party imports
from qtpy.compat import getopenfilename, to_qvariant
from qtpy.QtCore import (QAbstractTableModel, QModelIndex,
QSortFilterProxyModel, Qt, Signal)
from qtpy.QtGui import QColor
from qtpy.QtWidgets import (QAbstractItemView, QHBoxLayout, QStyle,
QStyledItemDelegate, QTableView, QVBoxLayout)
# Local imports
from anaconda_navigator.api.anaconda_api import AnacondaAPI
from anaconda_navigator.config import LICENSE_PATH, get_home_dir
from anaconda_navigator.utils.qthelpers import qapplication
from anaconda_navigator.widgets import (ButtonLink, ButtonNormal,
ButtonPrimary, LabelBase,
SpacerHorizontal, SpacerVertical)
from anaconda_navigator.widgets.dialogs import (DialogBase,
MessageBoxInformation,
MessageBoxRemove)
# yapf: enable
# Extra data added to the license dicts to track the file it comes from
# Defined as a constant as it is used in several places so this avoidd hard
# coding a string
COL_MAP = {
0: '__type__',
1: 'product',
2: 'end_date',
3: '__status__',
4: 'sig',
5: LICENSE_PATH,
}
HIDDEN_COLUMNS = [LICENSE_PATH, 'sig']
class LicenseModel(QAbstractTableModel):
"""Table model for the license view."""
def __init__(self, parent=None, licenses=None):
"""Table model for the license view."""
super(LicenseModel, self).__init__(parent=parent)
self._parent = parent
self._rows = licenses if licenses else []
@staticmethod
def flags(index):
"""Override Qt method."""
if index.isValid():
return Qt.ItemFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
def data(self, index, role=Qt.DisplayRole):
"""Override Qt method."""
if not index.isValid() or not 0 <= index.row() < len(self._rows):
return None
row = index.row()
column = index.column()
license_data = self._rows[row]
if role == Qt.DisplayRole:
data_key = COL_MAP.get(column)
if data_key:
return license_data.get(data_key)
elif role == Qt.TextAlignmentRole:
return Qt.AlignCenter
return to_qvariant()
@staticmethod
def headerData(section, orientation, role=Qt.DisplayRole):
"""Override Qt method."""
title = COL_MAP.get(section)
title = title.replace('__', '')
title = title.replace('_', ' ').capitalize()
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return to_qvariant(int(Qt.AlignHCenter | Qt.AlignVCenter))
return to_qvariant(int(Qt.AlignRight | Qt.AlignVCenter))
elif role == Qt.ToolTipRole:
return to_qvariant()
elif role == Qt.DisplayRole and orientation == Qt.Horizontal:
return to_qvariant(title)
def rowCount(self, index=QModelIndex()):
"""Override Qt method."""
return len(self._rows)
@staticmethod
def columnCount(index=QModelIndex()):
"""Override Qt method."""
return len(COL_MAP)
# --- Helpers
# -------------------------------------------------------------------------
def row(self, rownum):
"""Return the row data."""
return self._rows[rownum] if rownum < len(self._rows) else None
def load_licenses(self, licenses=None):
"""(Re)Load license data."""
self._rows = licenses if licenses else []
class BackgroundDelegate(QStyledItemDelegate):
"""
Delegate for handling background color in table.
QTableView CSS styling rules are too limited so in order to get an even
styling that matches the overall look, this delegate is needed.
"""
def __init__(self, parent=None):
"""Delegate for handling background color in table."""
super(BackgroundDelegate, self).__init__(parent=parent)
self._parent = parent
def paint(self, painter, option, index):
"""Override Qt method."""
# To draw a border on selected cells
if option.state & QStyle.State_Selected:
if self._parent.hasFocus():
color = QColor('#43B02A') # TODO: Get this from the scss
else:
color = QColor('#cecece') # TODO: Get this from the scss
painter.save()
painter.fillRect(option.rect, color)
painter.restore()
# Disable the state for the super() painter method
option.state ^= QStyle.State_Selected
super(BackgroundDelegate, self).paint(painter, option, index)
class LicenseTableView(QTableView):
"""License table manager view."""
sig_dropped = Signal(object)
sig_entered = Signal()
sig_left = Signal()
def __init__(self, parent=None):
"""License table manager view."""
super(LicenseTableView, self).__init__(parent=parent)
self.setMinimumWidth(500)
self.setMinimumHeight(200)
self.setAcceptDrops(True)
self.setShowGrid(False)
self.setSortingEnabled(True)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setAlternatingRowColors(True)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.verticalHeader().hide()
self.horizontalHeader().setStretchLastSection(True)
def focusInEvent(self, event):
"""Override Qt Method."""
super(LicenseTableView, self).focusInEvent(event)
self.sig_entered.emit()
def focusOutEvent(self, event):
"""Override Qt Method."""
super(LicenseTableView, self).focusInEvent(event)
self.sig_left.emit()
def dragEnterEvent(self, event):
"""Override Qt Method."""
self.setProperty('dragin', True)
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dragLeaveEvent(self, event):
"""Override Qt Method."""
self.setProperty('dragin', False)
@staticmethod
def dragMoveEvent(event):
"""Override Qt Method."""
if event.mimeData().hasUrls:
event.setDropAction(Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
"""Override Qt Method."""
self.setProperty('dragin', False)
if event.mimeData().hasUrls:
event.setDropAction(Qt.CopyAction)
event.accept()
links = []
for url in event.mimeData().urls():
links.append(str(url.toLocalFile()))
self.sig_dropped.emit(tuple(links))
else:
event.ignore()
def setProperty(self, name, value):
"""Override Qt method."""
QTableView.setProperty(self, name, value)
self.style().unpolish(self)
self.style().polish(self)
self.update()
class LicenseManagerDialog(DialogBase):
"""License Manager main dialog."""
CONTACT_LINK = 'https://support.continuum.io/' # TODO: Centralize this?
# Url, Sender
sig_url_clicked = Signal(object, object)
def __init__(self, parent=None):
"""License Manager main dialog."""
super(LicenseManagerDialog, self).__init__(parent=parent)
self.api = AnacondaAPI()
# Widgets
self.message_box = None # For testing
self.button_add = ButtonPrimary('Add license')
self.button_close = ButtonNormal('Close')
self.button_remove = ButtonNormal('Remove license')
self.button_contact = ButtonLink('Please contact us.')
self.label_info = LabelBase(
'Manage your Continuum Analytics '
'license keys.'
)
self.label_contact = LabelBase('Got a problem with your license? ')
self.proxy_model = QSortFilterProxyModel(parent=self)
self.model = LicenseModel(parent=self)
self.table = LicenseTableView(parent=self)
self.delegate = BackgroundDelegate(self.table)
# Widget setup
self.proxy_model.setSourceModel(self.model)
self.table.setItemDelegate(self.delegate)
self.table.setModel(self.proxy_model)
self.setWindowTitle('License Manager')
# Layouts
layout_buttons = QHBoxLayout()
layout_buttons.addWidget(self.label_info)
layout_buttons.addWidget(SpacerHorizontal())
layout_buttons.addStretch()
layout_buttons.addWidget(self.button_add)
layout_buttons.addWidget(SpacerHorizontal())
layout_buttons.addWidget(self.button_remove)
layout_buttons_bottom = QHBoxLayout()
layout_buttons_bottom.addWidget(self.label_contact)
layout_buttons_bottom.addWidget(self.button_contact)
layout_buttons_bottom.addStretch()
layout_buttons_bottom.addWidget(self.button_close)
layout = QVBoxLayout()
layout.addLayout(layout_buttons)
layout.addWidget(SpacerVertical())
layout.addWidget(self.table)
layout.addWidget(SpacerVertical())
layout.addWidget(SpacerVertical())
layout.addLayout(layout_buttons_bottom)
self.setLayout(layout)
# Signals
self.button_add.clicked.connect(lambda: self.add_license())
self.button_remove.clicked.connect(self.remove_license)
self.button_close.clicked.connect(self.accept)
self.button_contact.clicked.connect(
lambda v=None: self.sig_url_clicked.
emit(self.CONTACT_LINK, 'License Manager')
)
self.table.sig_dropped.connect(self.api.add_license)
# Setup
self.button_add.setFocus()
self.load_licenses()
def _hide_columns(self):
"""Hide columns."""
for key, val in COL_MAP.items():
if val in HIDDEN_COLUMNS:
self.table.setColumnHidden(key, True)
def add_license(self, v=None, path=None):
"""Add license file."""
if path is None:
filename, selected_filter = getopenfilename(
self,
'Select license file',
filters='License files (*.txt)',
basedir=get_home_dir(),
)
if filename:
paths = [filename]
else:
paths = []
else:
paths = [path]
valid_licenses, invalid_licenses = self.api.add_license(paths)
for invalid_license in invalid_licenses:
text = ('File: <b>"{0}"</b>'
'<br>is not a valid license file.').format(path)
self.message_box = MessageBoxInformation(
text=text, title="Invalid license file"
)
self.message_box.exec_()
if valid_licenses:
self.load_licenses()
def remove_license(self, row=None):
"""Remove license from file."""
if row is None:
index = self.table.currentIndex()
else:
index = self.proxy_model.index(row, 0)
model_index = self.proxy_model.mapToSource(index)
row_data = self.model.row(model_index.row())
if row_data:
text = (
'Do you want to remove license for product:<br><br>'
'<b>{product}</b> ({issued} - {end_date})'
)
text = text.format(
product=row_data.get('product'),
end_date=row_data.get('end_date'),
issued=row_data.get('issued')
)
self.message_box = MessageBoxRemove(
title='Remove license', text=text
)
if self.message_box.exec_():
self.api.remove_license(row_data)
self.load_licenses()
def load_licenses(self):
"""Load license files."""
res = self.api.load_licenses()
self.model.load_licenses(res)
self.proxy_model.setSourceModel(self.model)
self.table.resizeColumnsToContents()
self._hide_columns()
self.update_status()
def count(self):
"""Return the number of items in the table."""
return self.table.model().rowCount()
def update_status(self):
"""Update visible and enabled status for widgets based on actions."""
self.button_remove.setEnabled(bool(self.count()))
def test(): # pragma: no cover
"""Run local test."""
app = qapplication()
w = LicenseManagerDialog()
w.update_style_sheet()
w.show()
app.exec_()
if __name__ == '__main__':
test()
| [
"[email protected]"
] | |
2f147c1641f843f833516ed9c68321409fb72dac | 84c4474a88a59da1e72d86b33b5326003f578271 | /saleor/graphql/checkout/mutations/checkout_language_code_update.py | 2da8f5d51c6bdb4c3a5fd72b4babc3f0f2d1e657 | [
"BSD-3-Clause"
] | permissive | vineetb/saleor | 052bd416d067699db774f06453d942cb36c5a4b7 | b0d5ec1a55f2ceeba6f62cf15f53faea0adf93f9 | refs/heads/main | 2023-07-20T02:01:28.338748 | 2023-07-17T06:05:36 | 2023-07-17T06:05:36 | 309,911,573 | 0 | 0 | NOASSERTION | 2020-11-04T06:32:55 | 2020-11-04T06:32:55 | null | UTF-8 | Python | false | false | 2,360 | py | import graphene
from saleor.webhook.event_types import WebhookEventAsyncType
from ...core import ResolveInfo
from ...core.descriptions import ADDED_IN_34, DEPRECATED_IN_3X_INPUT
from ...core.doc_category import DOC_CATEGORY_CHECKOUT
from ...core.enums import LanguageCodeEnum
from ...core.mutations import BaseMutation
from ...core.scalars import UUID
from ...core.types import CheckoutError
from ...core.utils import WebhookEventInfo
from ...plugins.dataloaders import get_plugin_manager_promise
from ..types import Checkout
from .utils import get_checkout
class CheckoutLanguageCodeUpdate(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
id = graphene.ID(
description="The checkout's ID." + ADDED_IN_34,
required=False,
)
token = UUID(
description=f"Checkout token.{DEPRECATED_IN_3X_INPUT} Use `id` instead.",
required=False,
)
checkout_id = graphene.ID(
required=False,
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use `id` instead."
),
)
language_code = graphene.Argument(
LanguageCodeEnum, required=True, description="New language code."
)
class Meta:
description = "Update language code in the existing checkout."
doc_category = DOC_CATEGORY_CHECKOUT
error_type_class = CheckoutError
error_type_field = "checkout_errors"
webhook_events_info = [
WebhookEventInfo(
type=WebhookEventAsyncType.CHECKOUT_UPDATED,
description="A checkout was updated.",
)
]
@classmethod
def perform_mutation( # type: ignore[override]
cls,
_root,
info: ResolveInfo,
/,
*,
checkout_id=None,
id=None,
language_code,
token=None
):
checkout = get_checkout(cls, info, checkout_id=checkout_id, token=token, id=id)
checkout.language_code = language_code
checkout.save(update_fields=["language_code", "last_change"])
manager = get_plugin_manager_promise(info.context).get()
cls.call_event(manager.checkout_updated, checkout)
return CheckoutLanguageCodeUpdate(checkout=checkout)
| [
"[email protected]"
] | |
b460173067fee5643332d9bdb6cca562422f0628 | d007f8d6c318c3d66e76d99715edf324c9fe0294 | /recipe_modules/ninja/__init__.py | e0c77a7a641618bf0e7ebb4ca42b2cc775baf20b | [
"BSD-3-Clause"
] | permissive | nirvus/infra-recipes | c0f9e5facca7ad1907d639eb8819a59dc8f3584e | a5dc52f47405dcce56fb43a3e8ac80a2fbd56717 | refs/heads/master | 2020-04-07T23:15:01.809232 | 2018-11-06T02:30:12 | 2018-11-06T17:37:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | DEPS = [
'recipe_engine/step',
] | [
"[email protected]"
] | |
426c5e0d5a83f6df17a3d005e7214aa7f8ce9038 | 189d79c0e0fcdce192a6034306416fd492202501 | /LeetCode/Python/306 Additive Number.py | c7806ee6c106e36c199ee794f0ded80b76622235 | [] | no_license | digant0705/Algorithm | 294fbc84eaa4b6e0ea864924b71c4773c2e1c0c6 | 01f04bcc5e8f55014973d4eef069245f3f663eb9 | refs/heads/master | 2021-07-25T16:44:34.366974 | 2021-06-05T23:37:17 | 2021-06-05T23:37:17 | 251,144,249 | 0 | 0 | null | 2020-03-29T22:05:29 | 2020-03-29T22:05:28 | null | UTF-8 | Python | false | false | 1,981 | py | # -*- coding: utf-8 -*-
'''
Additive Number
===============
Additive number is a string whose digits can form additive sequence.
A valid additive sequence should contain at least three numbers. Except for the
first two numbers, each subsequent number in the sequence must be the sum of
the preceding two.
For example:
"112358" is an additive number because the digits can form an additive
sequence: 1, 1, 2, 3, 5, 8.
1 + 1 = 2, 1 + 2 = 3, 2 + 3 = 5, 3 + 5 = 8
"199100199" is also an additive number, the additive sequence is:
1, 99, 100, 199.
1 + 99 = 100, 99 + 100 = 199
Note: Numbers in the additive sequence cannot have leading zeros, so sequence
1, 2, 03 or 1, 02, 3 is invalid.
Given a string containing only digits '0'-'9', write a function to determine
if it's an additive number.
Follow up:
How would you handle overflow for very large input integers?
'''
import collections
class Solution(object):
'''算法思路:
前两个数字固定,那么就可以判断整个序列,所以枚举前两个不同的数字即可
'''
def add(self, a, b):
i, j, carry, r = len(a) - 1, len(b) - 1, 0, collections.deque()
while i >= 0 or j >= 0:
carry, mod = divmod(
(int(a[i]) if i >= 0 else 0) +
(int(b[j]) if j >= 0 else 0) + carry, 10)
r.appendleft(mod)
i -= 1
j -= 1
if carry:
r.appendleft(carry)
return ''.join(map(str, r))
def check(self, a, b, num):
if not num:
return True
sum = self.add(a, b)
if num.startswith(sum):
return self.check(b, sum, num[len(sum):])
return False
def isAdditiveNumber(self, num):
return any(
self.check(num[:i + 1], num[i + 1:j + 1], num[j + 1:])
for i in xrange(len(num) - 2)
for j in xrange(i + 1, len(num) - 1)
)
s = Solution()
print s.isAdditiveNumber("11")
| [
"[email protected]"
] | |
b80f3341e01c927cd8220c8b5e567848a7c8a259 | 229f4ec6272c5a730da44923a94f211fba04d38f | /cltk/prosody/latin/HendecasyllableScanner.py | 15a3a006967237e83df65eef182e34fe46ab2867 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Sedictious/cltk | d8fd364f66eb5fdbc85057b490ddd842b15e96a4 | 681170d58de61d50bec6cca9ca5753844506c3f6 | refs/heads/master | 2021-04-30T02:10:41.988706 | 2018-12-10T17:14:17 | 2018-12-10T17:14:17 | 121,495,814 | 1 | 0 | MIT | 2018-12-10T17:01:16 | 2018-02-14T10:05:17 | Python | UTF-8 | Python | false | false | 9,228 | py | """Utility class for producing a scansion pattern for a Latin hendecasyllables.
Given a line of hendecasyllables, the scan method performs a series of transformation and checks
are performed and for each one performed successfully, a note is added to the scansion_notes
list so that end users may view the provenance of a scansion.
"""
import re
from Levenshtein import distance
from cltk.prosody.latin.Verse import Verse
from cltk.prosody.latin.MetricalValidator import MetricalValidator
from cltk.prosody.latin.ScansionConstants import ScansionConstants
from cltk.prosody.latin.ScansionFormatter import ScansionFormatter
from cltk.prosody.latin.Syllabifier import Syllabifier
import cltk.prosody.latin.StringUtils as StringUtils
from cltk.prosody.latin.VerseScanner import VerseScanner
__author__ = ['Todd Cook <[email protected]>']
__license__ = 'MIT License'
class HendecasyllableScanner(VerseScanner):
"""The scansion symbols used can be configured by passing a suitable constants class to
the constructor."""
def __init__(self, constants=ScansionConstants(), syllabifier=Syllabifier(),
optional_tranform=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.constants = constants
self.remove_punct_map = StringUtils.remove_punctuation_dict()
self.punctuation_substitutions = StringUtils.punctuation_for_spaces_dict()
self.metrical_validator = MetricalValidator(constants)
self.formatter = ScansionFormatter(constants)
self.syllabifier = syllabifier
self.inverted_amphibrach_re = re.compile(
r"{}\s*{}\s*{}".format(self.constants.STRESSED,
self.constants.UNSTRESSED,
self.constants.STRESSED))
self.syllable_matcher = re.compile(r"[{}]".format(self.constants.VOWELS +
self.constants.ACCENTED_VOWELS +
self.constants.LIQUIDS +
self.constants.MUTES))
self.optional_transform = optional_tranform
def scan(self, original_line: str, optional_transform: bool = False) -> Verse:
"""Scan a line of Latin hendecasyllables and produce a scansion pattern, and other data.
:return: a Verse object
>>> scanner = HendecasyllableScanner()
>>> print(scanner.scan("Cui dono lepidum novum libellum"))
Verse(original='Cui dono lepidum novum libellum', scansion=' - U - U U - U - U - U ', meter='hendecasyllable', valid=True, syllable_count=11, accented='Cui donō lepidūm novūm libēllum', scansion_notes=['Corrected invalid start.'], syllables = ['Cui', 'do', 'no', 'le', 'pi', 'dūm', 'no', 'vūm', 'li', 'bēl', 'lum'])
>>> print(scanner.scan(
... "ārida modo pumice expolitum?").scansion) # doctest: +NORMALIZE_WHITESPACE
- U - U U - U - U - U
"""
verse = Verse(original_line, meter='hendecasyllable')
# replace punctuation with spaces
line = original_line.translate(self.punctuation_substitutions)
# conservative i to j
line = self.transform_i_to_j(line)
working_line = self.elide_all(line)
working_line = self.accent_by_position(working_line)
syllables = self.syllabifier.syllabify(working_line)
if optional_transform:
working_line = self.transform_i_to_j_optional(line)
working_line = self.elide_all(working_line)
working_line = self.accent_by_position(working_line)
syllables = self.syllabifier.syllabify(working_line)
verse.scansion_notes += [self.constants.NOTE_MAP["optional i to j"]]
verse.working_line = working_line
verse.syllable_count = self.syllabifier.get_syllable_count(syllables)
verse.syllables = syllables
# identify some obvious and probably choices based on number of syllables
if verse.syllable_count > 11:
verse.valid = False
verse.scansion_notes += [self.constants.NOTE_MAP["> 11"]]
return verse
if verse.syllable_count < 11:
verse.valid = False
verse.scansion_notes += [self.constants.NOTE_MAP["< 11"]]
return verse
stresses = self.flag_dipthongs(syllables)
syllables_wspaces = StringUtils.to_syllables_with_trailing_spaces(working_line, syllables)
offset_map = self.calc_offset(syllables_wspaces)
for idx, syl in enumerate(syllables):
for accented in self.constants.ACCENTED_VOWELS:
if accented in syl:
stresses.append(idx)
# second to last syllable is always long
stresses.append(verse.syllable_count - 2)
verse.scansion = self.produce_scansion(stresses,
syllables_wspaces, offset_map)
if len(StringUtils.stress_positions(self.constants.STRESSED, verse.scansion)) != \
len(set(stresses)):
verse.valid = False
verse.scansion_notes += [self.constants.NOTE_MAP["invalid syllables"]]
return verse
if self.metrical_validator.is_valid_hendecasyllables(verse.scansion):
verse.scansion_notes += [self.constants.NOTE_MAP["positionally"]]
return self.assign_candidate(verse, verse.scansion)
smoothed = self.correct_invalid_start(verse.scansion)
if distance(verse.scansion, smoothed) > 0:
verse.scansion_notes += [self.constants.NOTE_MAP["invalid start"]]
verse.scansion = smoothed
stresses += StringUtils.differences(verse.scansion, smoothed)
if self.metrical_validator.is_valid_hendecasyllables(verse.scansion):
return self.assign_candidate(verse, verse.scansion)
smoothed = self.correct_antepenult_chain(verse.scansion)
if distance(verse.scansion, smoothed) > 0:
verse.scansion_notes += [self.constants.NOTE_MAP["antepenult chain"]]
verse.scansion = smoothed
stresses += StringUtils.differences(verse.scansion, smoothed)
if self.metrical_validator.is_valid_hendecasyllables(verse.scansion):
return self.assign_candidate(verse, verse.scansion)
candidates = self.metrical_validator.closest_hendecasyllable_patterns(verse.scansion)
if candidates is not None:
if len(candidates) == 1 \
and len(verse.scansion.replace(" ", "")) == len(candidates[0]) \
and len(StringUtils.differences(verse.scansion, candidates[0])) == 1:
tmp_scansion = self.produce_scansion(
StringUtils.differences(verse.scansion, candidates[0]),
syllables_wspaces, offset_map)
if self.metrical_validator.is_valid_hendecasyllables(tmp_scansion):
verse.scansion_notes += [self.constants.NOTE_MAP["closest match"]]
return self.assign_candidate(verse, tmp_scansion)
# if the line doesn't scan "as is", if may scan if the optional i to j transformations
# are made, so here we set them and try again.
if self.optional_transform and not verse.valid:
return self.scan(original_line, optional_transform=True)
verse.accented = self.formatter.merge_line_scansion(
verse.original, verse.scansion)
return verse
def correct_invalid_start(self, scansion: str) -> str:
"""The third syllable of a hendecasyllabic line is long, so we will convert it
:param scansion:
:return: scansion string with corrected start
>>> print(HendecasyllableScanner().correct_invalid_start(
... "- U U U U - U - U - U").strip())
- U - U U - U - U - U
"""
mark_list = StringUtils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
corrected = vals[:2] + [self.constants.STRESSED] + vals[3:]
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line)
def correct_antepenult_chain(self, scansion: str) -> str:
"""For hendecasyllables the last three feet of the verse are predictable
and do not regularly allow substitutions.
:param scansion: scansion line thus far
:return: corrected line of scansion
>>> print(HendecasyllableScanner().correct_antepenult_chain(
... "-U -UU UU UU UX").strip())
-U -UU -U -U -X
"""
mark_list = StringUtils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
new_vals = vals[:len(vals) - 6] + [self.constants.TROCHEE +
self.constants.TROCHEE +
self.constants.STRESSED] + vals[-1:]
corrected = "".join(new_vals)
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line)
| [
"[email protected]"
] | |
d06c40ecaf072a5bad0a3bfcdf2cff9f0960317d | ccb4cb8358fb896a88bbf0c6771462d898d7a492 | /examples/goce_reentry_chart.py | decf8f0416fb3a95317f8d7eb65579f41c578074 | [
"MIT"
] | permissive | skyfielders/python-skyfield | a30d34a680dcd285bc8cd39cedc2629f792d5821 | 61fb6324e312715e20aa75ec24dc87286442be1a | refs/heads/master | 2023-08-31T13:10:32.863587 | 2023-08-10T14:25:56 | 2023-08-10T14:25:56 | 7,924,113 | 1,040 | 204 | MIT | 2023-08-28T19:44:50 | 2013-01-30T21:19:21 | Python | UTF-8 | Python | false | false | 2,026 | py | import numpy as np
from matplotlib import pyplot as plt
from matplotlib.dates import HourLocator, DateFormatter
from skyfield.api import load, EarthSatellite
# Labels for both date and hour on the x axis, and km on y.
def label_dates_and_hours(axes):
axes.xaxis.set_major_locator(HourLocator([0]))
axes.xaxis.set_minor_locator(HourLocator([0, 6, 12, 18]))
axes.xaxis.set_major_formatter(DateFormatter('0h\n%Y %b %d\n%A'))
axes.xaxis.set_minor_formatter(DateFormatter('%Hh'))
for label in ax.xaxis.get_ticklabels(which='both'):
label.set_horizontalalignment('left')
axes.yaxis.set_major_formatter('{x:.0f} km')
axes.tick_params(which='both', length=0)
# Load the satellite's final TLE entry.
sat = EarthSatellite(
'1 34602U 09013A 13314.96046236 .14220718 20669-5 50412-4 0 930',
'2 34602 096.5717 344.5256 0009826 296.2811 064.0942 16.58673376272979',
'GOCE',
)
# Build the time range `t` over which to plot, plus other values.
ts = load.timescale()
t = ts.tt_jd(np.arange(sat.epoch.tt - 2.0, sat.epoch.tt + 2.0, 0.005))
reentry = ts.utc(2013, 11, 11, 0, 16)
earth_radius_km = 6371.0
# Compute geocentric positions for the satellite.
g = sat.at(t)
valid = [m is None for m in g.message]
# Start a new figure.
fig, ax = plt.subplots()
# Draw the blue curve.
x = t.utc_datetime()
y = np.where(valid, g.distance().km - earth_radius_km, np.nan)
ax.plot(x, y)
# Label the TLE epoch.
x = sat.epoch.utc_datetime()
y = sat.at(sat.epoch).distance().km - earth_radius_km
ax.plot(x, y, 'k.')
ax.text(x, y - 9, 'Epoch of TLE data ', ha='right')
# Label the official moment of reentry.
x = reentry.utc_datetime()
y = sat.at(reentry).distance().km - earth_radius_km
ax.plot(x, y, 'r.')
ax.text(x, y + 6, ' Moment of re-entry', c='r')
# Grid lines and labels.
ax.grid(which='both')
ax.set(title='GOCE satellite: altitude above sea level', xlabel='UTC')
label_dates_and_hours(ax)
# Render the plot to a PNG file.
fig.savefig('goce-reentry.png', bbox_inches='tight')
| [
"[email protected]"
] | |
be938368f2fbe8f503a6259a20e3e9714ac29b5c | 5af4b89949a703bcc53bdc25a19a5ff079817cce | /papermerge/core/models/folder.py | 00f6881892ed5ee47048c385c945b3f38b07f4ff | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | 0xpointer42/papermerge | 4b176a865ffa3044605844406fecd3ac5f3c5657 | 9bea16e96d460d00229e813f7063e45bfd07b4e2 | refs/heads/master | 2022-09-09T09:18:56.596921 | 2020-06-02T15:45:11 | 2020-06-02T15:45:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | from django.utils.translation import ugettext_lazy as _
from papermerge.core import mixins
from papermerge.core.models.kvstore import KVNode
from papermerge.core.models.node import BaseTreeNode
from papermerge.search import index
class Folder(mixins.ExtractIds, BaseTreeNode):
search_fields = [
index.SearchField('title'),
index.SearchField('text', partial_match=True, boost=2),
index.SearchField('notes')
]
@property
def kv(self):
return KVNode(instance=self)
class Meta:
verbose_name = _("Folder")
verbose_name_plural = _("Folders")
def __str__(self):
return self.title
| [
"[email protected]"
] | |
29d64bfeff13d2d620664beeb544713fc033e990 | 614d5ec96dcd9c6bb7a4384ea5420a7757c43d34 | /examples/checkable.py | 3bb79a1ddb3669a679ec3b68eab1e3c9bd9625ce | [
"MIT"
] | permissive | githeshuai/dayu_widgets_tag | 52ae4816addd58505b6bbd0e4cd12f931df89e95 | f843e8f100b698af74353ec7595c26213574bc15 | refs/heads/master | 2023-04-05T10:04:03.726767 | 2021-04-01T16:02:42 | 2021-04-01T16:02:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.8
# Email : [email protected]
###################################################################
from dayu_widgets_tag import MCheckableTag
from dayu_widgets.qt import QWidget, QHBoxLayout, QApplication, Slot
from dayu_widgets import dayu_theme, MLabel
@dayu_theme.deco
class Checkable(QWidget):
def __init__(self, parent=None):
super(Checkable, self).__init__(parent)
label = MLabel('Categories:')
topic_lay = QHBoxLayout()
topic_lay.addWidget(label)
for i in ['Movies', 'Books', 'Music', 'Sports']:
topic_lay.addWidget(MCheckableTag(text=i))
topic_lay.addStretch()
main_lay = QHBoxLayout()
main_lay.addLayout(topic_lay)
self.setLayout(main_lay)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
test = Checkable()
test.show()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
07464cdad35ad7b4f680e3ab926989fbcf8d020a | f8aa467bbaa5dbdddf2085b6121f90cb19bc13c7 | /my_happy_pandas/plotting/_matplotlib/tools.py | eaf9e090f77dc0e2f0813c2bad7a2a826fda6779 | [
"Apache-2.0"
] | permissive | ggservice007/my-happy-pandas | ec5520383aa887b12f07a2dc5e2944d0ca9b260e | 63145d54e452177f7d5b2fc8fdbc1fdf37dd5b16 | refs/heads/default | 2023-02-22T00:24:01.164363 | 2021-01-27T11:22:24 | 2021-01-27T11:22:24 | 332,691,761 | 0 | 0 | Apache-2.0 | 2021-01-26T07:39:47 | 2021-01-25T09:18:59 | Python | UTF-8 | Python | false | false | 12,266 | py | # being a bit too dynamic
from math import ceil
import warnings
import matplotlib.table
import matplotlib.ticker as ticker
import numpy as np
from my_happy_pandas.core.dtypes.common import is_list_like
from my_happy_pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from my_happy_pandas.plotting._matplotlib import compat
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
for label in ax.get_xticklabels():
label.set_ha("right")
label.set_rotation(rot)
fig = ax.get_figure()
fig.subplots_adjust(bottom=0.2)
def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
if isinstance(data, ABCSeries):
data = data.to_frame()
elif isinstance(data, ABCDataFrame):
pass
else:
raise ValueError("Input data must be DataFrame or Series")
if rowLabels is None:
rowLabels = data.index
if colLabels is None:
colLabels = data.columns
cellText = data.values
table = matplotlib.table.table(
ax, cellText=cellText, rowLabels=rowLabels, colLabels=colLabels, **kwargs
)
return table
def _get_layout(nplots, layout=None, layout_type="box"):
if layout is not None:
if not isinstance(layout, (tuple, list)) or len(layout) != 2:
raise ValueError("Layout must be a tuple of (rows, columns)")
nrows, ncols = layout
# Python 2 compat
ceil_ = lambda x: int(ceil(x))
if nrows == -1 and ncols > 0:
layout = nrows, ncols = (ceil_(float(nplots) / ncols), ncols)
elif ncols == -1 and nrows > 0:
layout = nrows, ncols = (nrows, ceil_(float(nplots) / nrows))
elif ncols <= 0 and nrows <= 0:
msg = "At least one dimension of layout must be positive"
raise ValueError(msg)
if nrows * ncols < nplots:
raise ValueError(
f"Layout of {nrows}x{ncols} must be larger than required size {nplots}"
)
return layout
if layout_type == "single":
return (1, 1)
elif layout_type == "horizontal":
return (1, nplots)
elif layout_type == "vertical":
return (nplots, 1)
layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)}
try:
return layouts[nplots]
except KeyError:
k = 1
while k ** 2 < nplots:
k += 1
if (k - 1) * k >= nplots:
return k, (k - 1)
else:
return k, k
# copied from matplotlib/pyplot.py and modified for pandas.plotting
def _subplots(
naxes=None,
sharex=False,
sharey=False,
squeeze=True,
subplot_kw=None,
ax=None,
layout=None,
layout_type="box",
**fig_kw,
):
"""
Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Parameters
----------
naxes : int
Number of required axes. Exceeded axes are set invisible. Default is
nrows * ncols.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharey : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing is done: the returned axis object is always
a 2-d array containing Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
ax : Matplotlib axis object, optional
layout : tuple
Number of rows and columns of the subplot grid.
If not specified, calculated from naxes and layout_type
layout_type : {'box', 'horizontal', 'vertical'}, default 'box'
Specify how to layout the subplot grid.
fig_kw : Other keyword arguments to be passed to the figure() call.
Note that all keywords not recognized above will be
automatically included here.
Returns
-------
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one subplot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
Examples
--------
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
if is_list_like(ax):
ax = _flatten(ax)
if layout is not None:
warnings.warn(
"When passing multiple axes, layout keyword is ignored", UserWarning
)
if sharex or sharey:
warnings.warn(
"When passing multiple axes, sharex and sharey "
"are ignored. These settings must be specified when creating axes",
UserWarning,
stacklevel=4,
)
if len(ax) == naxes:
fig = ax[0].get_figure()
return fig, ax
else:
raise ValueError(
f"The number of passed axes must be {naxes}, the "
"same as the output plot"
)
fig = ax.get_figure()
# if ax is passed and a number of subplots is 1, return ax as it is
if naxes == 1:
if squeeze:
return fig, ax
else:
return fig, _flatten(ax)
else:
warnings.warn(
"To output multiple subplots, the figure containing "
"the passed axes is being cleared",
UserWarning,
stacklevel=4,
)
fig.clear()
nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)
nplots = nrows * ncols
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw["sharex"] = ax0
if sharey:
subplot_kw["sharey"] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
kwds = subplot_kw.copy()
# Set sharex and sharey to None for blank/dummy axes, these can
# interfere with proper axis limits on the visible axes if
# they share axes e.g. issue #7528
if i >= naxes:
kwds["sharex"] = None
kwds["sharey"] = None
ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)
axarr[i] = ax
if naxes != nplots:
for ax in axarr[naxes:]:
ax.set_visible(False)
_handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots == 1:
axes = axarr[0]
else:
axes = axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
axes = axarr.reshape(nrows, ncols)
return fig, axes
def _remove_labels_from_axis(axis):
for t in axis.get_majorticklabels():
t.set_visible(False)
# set_visible will not be effective if
# minor axis has NullLocator and NullFormatter (default)
if isinstance(axis.get_minor_locator(), ticker.NullLocator):
axis.set_minor_locator(ticker.AutoLocator())
if isinstance(axis.get_minor_formatter(), ticker.NullFormatter):
axis.set_minor_formatter(ticker.FormatStrFormatter(""))
for t in axis.get_minorticklabels():
t.set_visible(False)
axis.get_label().set_visible(False)
def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
if nplots > 1:
if compat._mpl_ge_3_2_0():
row_num = lambda x: x.get_subplotspec().rowspan.start
col_num = lambda x: x.get_subplotspec().colspan.start
else:
row_num = lambda x: x.rowNum
col_num = lambda x: x.colNum
if nrows > 1:
try:
# first find out the ax layout,
# so that we can correctly handle 'gaps"
layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool_)
for ax in axarr:
layout[row_num(ax), col_num(ax)] = ax.get_visible()
for ax in axarr:
# only the last row of subplots should get x labels -> all
# other off layout handles the case that the subplot is
# the last in the column, because below is no subplot/gap.
if not layout[row_num(ax) + 1, col_num(ax)]:
continue
if sharex or len(ax.get_shared_x_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
except IndexError:
# if gridspec is used, ax.rowNum and ax.colNum may different
# from layout shape. in this case, use last_row logic
for ax in axarr:
if ax.is_last_row():
continue
if sharex or len(ax.get_shared_x_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
if ncols > 1:
for ax in axarr:
# only the first column should get y labels -> set all other to
# off as we only have labels in the first column and we always
# have a subplot there, we can skip the layout test
if ax.is_first_col():
continue
if sharey or len(ax.get_shared_y_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.yaxis)
def _flatten(axes):
if not is_list_like(axes):
return np.array([axes])
elif isinstance(axes, (np.ndarray, ABCIndexClass)):
return axes.ravel()
return np.array(axes)
def _set_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None):
import matplotlib.pyplot as plt
for ax in _flatten(axes):
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return axes
def _get_all_lines(ax):
lines = ax.get_lines()
if hasattr(ax, "right_ax"):
lines += ax.right_ax.get_lines()
if hasattr(ax, "left_ax"):
lines += ax.left_ax.get_lines()
return lines
def _get_xlim(lines):
left, right = np.inf, -np.inf
for l in lines:
x = l.get_xdata(orig=False)
left = min(np.nanmin(x), left)
right = max(np.nanmax(x), right)
return left, right
| [
"[email protected]"
] | |
d89cfb8f0978fc0bca985f2f530f9406acc32058 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2465/60631/246241.py | f35f636406a4f9cd9aa0b1ec54ba471016376403 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | si = input()
li = si.split(',')
out = []
for i in range(len(li)):
p = len(li)-1-i
h = li[p]
if i < int(h):
out.append(i+1)
print(max(out)) | [
"[email protected]"
] | |
9ec8dfb6896bd3defa4e777b809942f49b4b449d | 3f597d5c1363f1f6f77764bcdb864167c3e51795 | /qwapp/defaults.py | ac08eacd7647f3441469ca0c64e9eeeb3df07f45 | [] | no_license | mbr/qwapp | 558c58b47398abcaca41b1814c7b5e8363b8eaf0 | 44fa2ecefcb61d2fb5c2280d30af2b1140f3f03b | refs/heads/master | 2023-06-06T20:48:59.776375 | 2013-06-06T01:46:49 | 2013-06-06T01:46:49 | 1,467,990 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | WIKI_NAME = 'qwapp Wiki'
REPOSITORY_PATH = './wiki'
DEBUG = True
SECRET_KEY = 'development key'
# use password.py to generate
PASSWORD_HASH = '06ab2f79d3fb9d86c75f0bb981c95f5d68497b311bdb1ed32918717547b4a6c31017a7a04908c6d39a93c8f748e312d5bfd255cbfbf15530cf374c1861dc73a7' # "devpass"
CACHE_TYPE = 'simple' # set this to 'null' to disable or use memcached, or others
#CACHE_MEMCACHED_SERVERS = ['localhost:11211']
CACHE_THRESHOLD = 200
CACHE_DEFAULT_TIMEOUT = 50 # 50 seconds default cache timeout
CACHE_KEY_PREFIX = PASSWORD_HASH[:10]
# no plugins loaded by default
PLUGINS = ['headershift','wikilinks']
PLUGIN_HEADERSHIFT_LEVEL = 1
| [
"[email protected]"
] | |
3042812bdbd8a115621ce18b49ec5776b9227138 | 3b9d763180410bf0abf5b9c37391a64319efe839 | /toontown/town/TTTownLoader.py | 0780028cd0ae5e5b0b03c022cae3ac05115db2fc | [] | no_license | qphoton/Reverse_Engineering_Project_ToonTown | 442f15d484324be749f6f0e5e4e74fc6436e4e30 | 11468ab449060169191366bc14ff8113ee3beffb | refs/heads/master | 2021-05-08T00:07:09.720166 | 2017-10-21T02:37:22 | 2017-10-21T02:37:22 | 107,617,661 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | # File: T (Python 2.4)
import TownLoader
import TTStreet
from toontown.suit import Suit
class TTTownLoader(TownLoader.TownLoader):
def __init__(self, hood, parentFSM, doneEvent):
TownLoader.TownLoader.__init__(self, hood, parentFSM, doneEvent)
self.streetClass = TTStreet.TTStreet
self.musicFile = 'phase_3.5/audio/bgm/TC_SZ.mid'
self.activityMusicFile = 'phase_3.5/audio/bgm/TC_SZ_activity.mid'
self.townStorageDNAFile = 'phase_5/dna/storage_TT_town.dna'
def load(self, zoneId):
TownLoader.TownLoader.load(self, zoneId)
Suit.loadSuits(1)
dnaFile = 'phase_5/dna/toontown_central_' + str(self.canonicalBranchZone) + '.dna'
self.createHood(dnaFile)
def unload(self):
Suit.unloadSuits(1)
TownLoader.TownLoader.unload(self)
| [
"[email protected]"
] | |
3459de3607f81b8e3cd2943b8031dbd163d4b650 | 1268030197a27bf2ef5e3f5ab8df38993457fed5 | /run_bot.py | 552b71c22140a9c5e5e54878a65f05870a32fd77 | [] | no_license | parimalpate123/rasa_slack_chatbot | 439abd9a541d6314b46c6fb303c0275803fc9357 | 206aacab62f12be9df9f009f65736caed3e8edac | refs/heads/master | 2020-04-17T14:13:49.917604 | 2019-05-07T11:08:07 | 2019-05-07T11:08:07 | 166,649,129 | 0 | 1 | null | 2019-01-29T11:09:07 | 2019-01-20T10:32:59 | Python | UTF-8 | Python | false | false | 1,112 | py | #import json
from rasa_core.channels.slack import SlackInput
from rasa_core.agent import Agent
from rasa_core.interpreter import RegexInterpreter
from rasa_core.channels import HttpInputChannel
#from rasa_core.utils import EndpointConfig
# load your trained agent
#agent = Agent.load(models\current\dialogue, interpreter=RegexInterpreter())
agent = Agent.load('models/current/dialogue', interpreter='models/current/nlu')
#action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
input_channel = \
SlackInput(slack_token='xoxb-525465834114-525382855891-SYt6HyWl7IfVyhtX19z6jJec'
, slack_channel='@devops') # this is the `bot_user_o_auth_access_token`
# the name of your channel to which the bot posts (optional)
# set serve_forever=True if you want to keep the server running
#agent.handle_channel(HttpInputChannel(5004, "/chat", input_channel))
agent.handle_channel(HttpInputChannel(5004, "", input_channel))
#s = agent.handle_channels([input_channel], 5004, serve_forever=False)
#agent.handle_channels([input_channel], 5004, serve_forever=True)
| [
"[email protected]"
] | |
8f9139a190d429e54bae0fddd3b33a486abcfe40 | 80301f1cffc5afce13256e2ecab6323c5df00194 | /cn.sc/py/T2210.py | a756c221d750cd1c54907be6878f0a7c1118b1b4 | [] | no_license | ZhenjianYang/SoraVoiceScripts | c1ddf7c1bbcb933243754f9669bd6b75777c87b9 | 94a948090aba0f63b10b2c69dc845dc99c822fc4 | refs/heads/master | 2023-04-18T04:54:44.306652 | 2023-04-06T11:15:17 | 2023-04-06T11:15:17 | 103,167,541 | 43 | 11 | null | 2021-03-06T08:52:54 | 2017-09-11T17:36:55 | Python | UTF-8 | Python | false | false | 75,412 | py | from ED6SCScenarioHelper import *
def main():
SetCodePage("gbk")
# 卢安
CreateScenaFile(
FileName = 'T2210 ._SN',
MapName = 'Ruan',
Location = 'T2210.x',
MapIndex = 1,
MapDefaultBGM = "ed60012",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'弗洛拉', # 9
'多米尼克', # 10
'比古', # 11
'王国军宪兵', # 12
'达里奥', # 13
'索雷诺', # 14
'诺曼市长', # 15
'秘书德尔斯', # 16
'贝尔夫', # 17
'杯子', # 18
'杯子', # 19
'水壶', # 20
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT06/CH20020 ._CH', # 00
'ED6_DT07/CH02540 ._CH', # 01
'ED6_DT07/CH01350 ._CH', # 02
'ED6_DT07/CH01280 ._CH', # 03
'ED6_DT07/CH01300 ._CH', # 04
'ED6_DT07/CH01560 ._CH', # 05
'ED6_DT07/CH01040 ._CH', # 06
'ED6_DT07/CH01200 ._CH', # 07
'ED6_DT07/CH01140 ._CH', # 08
)
AddCharChipPat(
'ED6_DT06/CH20020P._CP', # 00
'ED6_DT07/CH02540P._CP', # 01
'ED6_DT07/CH01350P._CP', # 02
'ED6_DT07/CH01280P._CP', # 03
'ED6_DT07/CH01300P._CP', # 04
'ED6_DT07/CH01560P._CP', # 05
'ED6_DT07/CH01040P._CP', # 06
'ED6_DT07/CH01200P._CP', # 07
'ED6_DT07/CH01140P._CP', # 08
)
DeclNpc(
X = 34540,
Z = 0,
Y = 27220,
Direction = 90,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 7,
)
DeclNpc(
X = -63810,
Z = 0,
Y = 34870,
Direction = 0,
Unknown2 = 0,
Unknown3 = 2,
ChipIndex = 0x2,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 6,
)
DeclNpc(
X = 33500,
Z = 0,
Y = 24400,
Direction = 270,
Unknown2 = 0,
Unknown3 = 3,
ChipIndex = 0x3,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 5,
)
DeclNpc(
X = 2620,
Z = 0,
Y = 3200,
Direction = 180,
Unknown2 = 0,
Unknown3 = 4,
ChipIndex = 0x4,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 4,
)
DeclNpc(
X = 67820,
Z = -30,
Y = -5200,
Direction = 90,
Unknown2 = 0,
Unknown3 = 5,
ChipIndex = 0x5,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 8,
)
DeclNpc(
X = 800,
Z = 0,
Y = 2100,
Direction = 0,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 9,
)
DeclNpc(
X = -64500,
Z = 0,
Y = 33170,
Direction = 270,
Unknown2 = 0,
Unknown3 = 7,
ChipIndex = 0x7,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 10,
)
DeclNpc(
X = -7500,
Z = 0,
Y = 33230,
Direction = 90,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 36150,
Z = 0,
Y = 34260,
Direction = 193,
Unknown2 = 0,
Unknown3 = 8,
ChipIndex = 0x8,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 13,
)
DeclNpc(
X = 35510,
Z = 750,
Y = 27280,
Direction = 0,
Unknown2 = 0,
Unknown3 = 1638400,
ChipIndex = 0x0,
NpcIndex = 0x1E6,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 35450,
Z = 750,
Y = 26890,
Direction = 0,
Unknown2 = 0,
Unknown3 = 1638400,
ChipIndex = 0x0,
NpcIndex = 0x1E6,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 35490,
Z = 750,
Y = 26520,
Direction = 0,
Unknown2 = 0,
Unknown3 = 1703936,
ChipIndex = 0x0,
NpcIndex = 0x1E6,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclActor(
TriggerX = -475,
TriggerZ = 0,
TriggerY = 3173,
TriggerRange = 800,
ActorX = -475,
ActorZ = 800,
ActorY = 3173,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 14,
Unknown_22 = 0,
)
DeclActor(
TriggerX = -63800,
TriggerZ = 0,
TriggerY = 50790,
TriggerRange = 900,
ActorX = -63800,
ActorZ = -300,
ActorY = 50790,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 15,
Unknown_22 = 0,
)
DeclActor(
TriggerX = -62370,
TriggerZ = 0,
TriggerY = -43110,
TriggerRange = 500,
ActorX = -62370,
ActorZ = 2000,
ActorY = -43110,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 16,
Unknown_22 = 0,
)
DeclActor(
TriggerX = -59500,
TriggerZ = 250,
TriggerY = -36760,
TriggerRange = 800,
ActorX = -59500,
ActorZ = 1250,
ActorY = -36760,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 17,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_302", # 00, 0
"Function_1_402", # 01, 1
"Function_2_43E", # 02, 2
"Function_3_5BB", # 03, 3
"Function_4_698", # 04, 4
"Function_5_99B", # 05, 5
"Function_6_DEA", # 06, 6
"Function_7_11E1", # 07, 7
"Function_8_163D", # 08, 8
"Function_9_185C", # 09, 9
"Function_10_1A4A", # 0A, 10
"Function_11_20D0", # 0B, 11
"Function_12_24EC", # 0C, 12
"Function_13_296F", # 0D, 13
"Function_14_2FC0", # 0E, 14
"Function_15_306D", # 0F, 15
"Function_16_3077", # 10, 16
"Function_17_3081", # 11, 17
)
def Function_0_302(): pass
label("Function_0_302")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_3B8")
SetChrFlags(0xB, 0x80)
ClearChrFlags(0xC, 0x80)
ClearChrFlags(0xE, 0x80)
ClearChrFlags(0xF, 0x80)
ClearChrFlags(0x10, 0x80)
SetChrPos(0xA, 33760, 0, 25890, 270)
SetChrPos(0x8, -4550, 0, -4059, 95)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_365")
ClearChrFlags(0xD, 0x80)
SetChrPos(0x9, 67400, 0, 32619, 270)
Jump("loc_3B5")
label("loc_365")
ClearChrFlags(0xD, 0x80)
SetChrPos(0xD, 4070, 0, 35300, 270)
SetChrPos(0x9, -1900, 0, 4450, 90)
SetChrPos(0xF, -61820, 0, 30050, 355)
SetChrPos(0x8, -2750, 0, 42770, 342)
OP_43(0x8, 0x0, 0x0, 0x2)
label("loc_3B5")
Jump("loc_401")
label("loc_3B8")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x280, 0)), scpexpr(EXPR_END)), "loc_3D3")
SetChrPos(0x8, 35530, 0, 34250, 180)
Jump("loc_401")
label("loc_3D3")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 5)), scpexpr(EXPR_END)), "loc_3DD")
Jump("loc_401")
label("loc_3DD")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x242, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 2)), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_3FA")
ClearChrFlags(0x11, 0x80)
ClearChrFlags(0x12, 0x80)
ClearChrFlags(0x13, 0x80)
Jump("loc_401")
label("loc_3FA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x240, 6)), scpexpr(EXPR_END)), "loc_401")
label("loc_401")
Return()
# Function_0_302 end
def Function_1_402(): pass
label("Function_1_402")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_42C")
OP_71(0x0, 0x4)
OP_71(0x1, 0x4)
OP_71(0x2, 0x4)
OP_71(0x3, 0x4)
OP_71(0x4, 0x4)
OP_71(0x5, 0x4)
OP_71(0x6, 0x4)
label("loc_42C")
OP_72(0x10, 0x10)
OP_72(0x10, 0x8)
OP_6F(0x10, 360)
Return()
# Function_1_402 end
def Function_2_43E(): pass
label("Function_2_43E")
RunExpression(0x1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xE), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_463")
OP_99(0xFE, 0x0, 0x7, 0x672)
Jump("loc_5A5")
label("loc_463")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_47C")
OP_99(0xFE, 0x1, 0x7, 0x640)
Jump("loc_5A5")
label("loc_47C")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_495")
OP_99(0xFE, 0x2, 0x7, 0x60E)
Jump("loc_5A5")
label("loc_495")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_4AE")
OP_99(0xFE, 0x3, 0x7, 0x5DC)
Jump("loc_5A5")
label("loc_4AE")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_4C7")
OP_99(0xFE, 0x4, 0x7, 0x5AA)
Jump("loc_5A5")
label("loc_4C7")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x5), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_4E0")
OP_99(0xFE, 0x5, 0x7, 0x578)
Jump("loc_5A5")
label("loc_4E0")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_4F9")
OP_99(0xFE, 0x6, 0x7, 0x546)
Jump("loc_5A5")
label("loc_4F9")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_512")
OP_99(0xFE, 0x0, 0x7, 0x677)
Jump("loc_5A5")
label("loc_512")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_52B")
OP_99(0xFE, 0x1, 0x7, 0x645)
Jump("loc_5A5")
label("loc_52B")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x9), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_544")
OP_99(0xFE, 0x2, 0x7, 0x613)
Jump("loc_5A5")
label("loc_544")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_55D")
OP_99(0xFE, 0x3, 0x7, 0x5E1)
Jump("loc_5A5")
label("loc_55D")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_576")
OP_99(0xFE, 0x4, 0x7, 0x5AF)
Jump("loc_5A5")
label("loc_576")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xC), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_58F")
OP_99(0xFE, 0x5, 0x7, 0x57D)
Jump("loc_5A5")
label("loc_58F")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xD), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_5A5")
OP_99(0xFE, 0x6, 0x7, 0x54B)
label("loc_5A5")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_5BA")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("loc_5A5")
label("loc_5BA")
Return()
# Function_2_43E end
def Function_3_5BB(): pass
label("Function_3_5BB")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_635")
label("loc_5C2")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_632")
OP_8E(0xFE, 0xFFFFEE6C, 0x0, 0xFFFFEAA2, 0x3E8, 0x0)
OP_62(0xFE, 0x0, 2000, 0x8, 0x9, 0xFA, 0x2)
OP_8C(0xFE, 90, 400)
Sleep(3500)
OP_8E(0xFE, 0xFFFFEE6C, 0x0, 0xFFFFF204, 0x3E8, 0x0)
OP_8C(0xFE, 90, 400)
OP_62(0xFE, 0x0, 2000, 0x8, 0x9, 0xFA, 0x2)
Sleep(4500)
Jump("loc_5C2")
label("loc_632")
Jump("loc_697")
label("loc_635")
RunExpression(0x2, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
label("loc_63F")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_697")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
RunExpression(0x2, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 0x2), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_GE), scpexpr(EXPR_END)), "loc_694")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x242, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 2)), scpexpr(EXPR_OR), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_68A")
OP_62(0xFE, 0x0, 2000, 0x8, 0x9, 0xFA, 0x2)
label("loc_68A")
RunExpression(0x2, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
label("loc_694")
Jump("loc_63F")
label("loc_697")
Return()
# Function_3_5BB end
def Function_4_698(): pass
label("Function_4_698")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x280, 0)), scpexpr(EXPR_END)), "loc_763")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 3)), scpexpr(EXPR_END)), "loc_6F6")
ChrTalk( #0
0xFE,
(
"伙食非常美味,\x01",
"不知不觉吃太多了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #1
0xFE,
(
"这样工作下去\x01",
"会不断长胖的啊。\x02",
)
)
CloseMessageWindow()
Jump("loc_760")
label("loc_6F6")
OP_A2(0x3)
ChrTalk( #2
0xFE,
(
"嗯~厨房\x01",
"飘来好香的味道啊。\x02",
)
)
CloseMessageWindow()
ChrTalk( #3
0xFE,
(
"这里的厨师做的饭菜\x01",
"非常非常好吃。\x02",
)
)
CloseMessageWindow()
ChrTalk( #4
0xFE,
(
"因此我的皮带\x01",
"都紧起来了。\x02",
)
)
CloseMessageWindow()
label("loc_760")
Jump("loc_997")
label("loc_763")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 5)), scpexpr(EXPR_END)), "loc_81C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 3)), scpexpr(EXPR_END)), "loc_7B8")
ChrTalk( #5
0xFE,
(
"竟然在自己家里\x01",
"养魔兽……\x02",
)
)
CloseMessageWindow()
ChrTalk( #6
0xFE,
(
"居然还有人\x01",
"会去想这么可怕的事。\x02",
)
)
CloseMessageWindow()
Jump("loc_819")
label("loc_7B8")
OP_A2(0x3)
ChrTalk( #7
0xFE,
(
"房间二楼的\x01",
"秘密魔兽饲养房间……\x02",
)
)
CloseMessageWindow()
ChrTalk( #8
0xFE,
"……看过了吗?\x02",
)
CloseMessageWindow()
ChrTalk( #9
0xFE,
(
"居然还有人\x01",
"会去想这么可怕的事。\x02",
)
)
CloseMessageWindow()
label("loc_819")
Jump("loc_997")
label("loc_81C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x242, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 2)), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_902")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 3)), scpexpr(EXPR_END)), "loc_88B")
ChrTalk( #10
0xFE,
(
"对这等美术品出手的\x01",
"只可能是绝顶的笨蛋或者天才了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #11
0xFE,
(
"普通的盗贼\x01",
"还是有自知之明的。\x02",
)
)
CloseMessageWindow()
Jump("loc_8FF")
label("loc_88B")
OP_A2(0x3)
ChrTalk( #12
0xFE,
(
"从这里的女佣\x01",
"那里听说……\x02",
)
)
CloseMessageWindow()
ChrTalk( #13
0xFE,
(
"前不久这个烛台\x01",
"被偷走过。\x02",
)
)
CloseMessageWindow()
ChrTalk( #14
0xFE,
(
"好厉害的家伙……不、不,\x01",
"竟然有这么坏的家伙。\x02",
)
)
CloseMessageWindow()
label("loc_8FF")
Jump("loc_997")
label("loc_902")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x240, 6)), scpexpr(EXPR_END)), "loc_997")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 3)), scpexpr(EXPR_END)), "loc_948")
ChrTalk( #15
0xFE,
(
"所、所以不要\x01",
"在这附近转悠。\x02",
)
)
CloseMessageWindow()
ChrTalk( #16
0xFE,
"我也很紧张呢。\x02",
)
CloseMessageWindow()
Jump("loc_997")
label("loc_948")
OP_A2(0x3)
ChrTalk( #17
0xFE,
(
"这个烛台现在\x01",
"由王国军代为保管。\x02",
)
)
CloseMessageWindow()
ChrTalk( #18
0xFE,
(
"别太靠近。\x01",
"这可是相当贵重的东西。\x02",
)
)
CloseMessageWindow()
label("loc_997")
TalkEnd(0xFE)
Return()
# Function_4_698 end
def Function_5_99B(): pass
label("Function_5_99B")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_A74")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_A13")
ChrTalk( #19
0xFE,
(
"这是使用柴火的暖炉。\x01",
"最近的炊事都靠这个了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #20
0xFE,
(
"本来是想要用火才做的,\x01",
"没想到会这么有用。\x02",
)
)
CloseMessageWindow()
OP_A2(0x2)
Jump("loc_A71")
label("loc_A13")
ChrTalk( #21
0xFE,
(
"这是使用柴火的暖炉。\x01",
"最近的炊事都靠这个了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #22
0xFE,
(
"不过,适用范围\x01",
"稍微窄了点,也没办法了。\x02",
)
)
CloseMessageWindow()
label("loc_A71")
Jump("loc_DE6")
label("loc_A74")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_B55")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_B02")
ChrTalk( #23
0xFE,
(
"管家达里奥\x01",
"回来了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #24
0xFE,
(
"多年服侍戴尔蒙家\x01",
"的同伴都在一起就心安多了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #25
0xFE,
(
"这么快就欠了雇佣我们的\x01",
"新市长的人情了啊。\x02",
)
)
CloseMessageWindow()
OP_A2(0x2)
Jump("loc_B52")
label("loc_B02")
ChrTalk( #26
0xFE,
(
"达里奥那家伙\x01",
"回来了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #27
0xFE,
(
"作为服侍戴尔蒙家的同伴,\x01",
"他回来可让人心安多了。\x02",
)
)
CloseMessageWindow()
label("loc_B52")
Jump("loc_DE6")
label("loc_B55")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x280, 0)), scpexpr(EXPR_END)), "loc_C0D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 2)), scpexpr(EXPR_END)), "loc_BAC")
ChrTalk( #28
0xFE,
(
"好,差不多\x01",
"该准备午饭了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #29
0xFE,
(
"我为了士兵们\x01",
"加倍卖力的自信作品。\x02",
)
)
CloseMessageWindow()
Jump("loc_C0A")
label("loc_BAC")
OP_A2(0x2)
ChrTalk( #30
0xFE,
(
"今天的伙食\x01",
"是加了橘子调味汁\x01",
"的照烧仔鸡。\x02",
)
)
CloseMessageWindow()
ChrTalk( #31
0xFE,
(
"是我将东方的烹调法\x01",
"加以调整的自信作品。\x02",
)
)
CloseMessageWindow()
label("loc_C0A")
Jump("loc_DE6")
label("loc_C0D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 5)), scpexpr(EXPR_END)), "loc_C6B")
ChrTalk( #32
0xFE,
(
"我也一直在担心\x01",
"达里奥那家伙的事……\x02",
)
)
CloseMessageWindow()
ChrTalk( #33
0xFE,
(
"市长被逮捕之后\x01",
"他的情况确实很奇怪……\x02",
)
)
CloseMessageWindow()
Jump("loc_DE6")
label("loc_C6B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x242, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 2)), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_D19")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 2)), scpexpr(EXPR_END)), "loc_CBD")
ChrTalk( #34
0xFE,
(
"士兵们也不挑食,\x01",
"吃得都很多。\x02",
)
)
CloseMessageWindow()
ChrTalk( #35
0xFE,
"嗯,也算做得值得了。\x02",
)
CloseMessageWindow()
Jump("loc_D16")
label("loc_CBD")
OP_A2(0x2)
ChrTalk( #36
0xFE,
(
"我现在负责佣人和士兵们\x01",
"的伙食。\x02",
)
)
CloseMessageWindow()
ChrTalk( #37
0xFE,
(
"在这房子也待了很久了。\x01",
"就让我效劳到最后吧。\x02",
)
)
CloseMessageWindow()
label("loc_D16")
Jump("loc_DE6")
label("loc_D19")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x240, 6)), scpexpr(EXPR_END)), "loc_DE6")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 2)), scpexpr(EXPR_END)), "loc_D74")
ChrTalk( #38
0xFE,
(
"不管怎么说,\x01",
"我一直在服侍戴尔蒙家。\x02",
)
)
CloseMessageWindow()
ChrTalk( #39
0xFE,
(
"家道没落了,\x01",
"还真是可惜啊。\x02",
)
)
CloseMessageWindow()
Jump("loc_DE6")
label("loc_D74")
OP_A2(0x2)
ChrTalk( #40
0xFE,
(
"戴尔蒙市长确实\x01",
"做了坏事……\x02",
)
)
CloseMessageWindow()
ChrTalk( #41
0xFE,
(
"但我和管家达里奥\x01",
"都服侍戴尔蒙家多年了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #42
0xFE,
(
"家道没落了,\x01",
"实在是难过啊。\x02",
)
)
CloseMessageWindow()
label("loc_DE6")
TalkEnd(0xFE)
Return()
# Function_5_99B end
def Function_6_DEA(): pass
label("Function_6_DEA")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_E4B")
ChrTalk( #43
0xFE,
(
"没有了导力器的光,\x01",
"这个烛台也真可怜。\x02",
)
)
CloseMessageWindow()
ChrTalk( #44
0xFE,
(
"就跟被导力文明所装点的\x01",
"我们一样……\x02",
)
)
CloseMessageWindow()
Jump("loc_11DD")
label("loc_E4B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_F2C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_EDD")
ChrTalk( #45
0xFE,
(
"达里奥也完全\x01",
"恢复状态了呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #46
0xFE,
(
"有段时间还形迹可疑,\x01",
"让人感觉诡异呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #47
0xFE,
(
"无论如何,有个熟悉这里\x01",
"的人在真是帮大忙了。\x02",
)
)
CloseMessageWindow()
OP_A2(0x1)
Jump("loc_F29")
label("loc_EDD")
ChrTalk( #48
0xFE,
(
"达里奥也完全\x01",
"恢复状态了呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #49
0xFE,
(
"有段时间还形迹可疑,\x01",
"让人感觉诡异呢。\x02",
)
)
CloseMessageWindow()
label("loc_F29")
Jump("loc_11DD")
label("loc_F2C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x280, 0)), scpexpr(EXPR_END)), "loc_FC6")
ChrTalk( #50
0xFE,
(
"根据市长选举的结果\x01",
"找工作的方针也要发生变化。\x02",
)
)
CloseMessageWindow()
ChrTalk( #51
0xFE,
(
"如果诺曼获胜就找旅游相关职业,\x01",
"要是波尔多斯就去港口酒馆。\x02",
)
)
CloseMessageWindow()
ChrTalk( #52
0xFE,
"哼哼,完美的再就业计划。\x02",
)
CloseMessageWindow()
Jump("loc_11DD")
label("loc_FC6")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 5)), scpexpr(EXPR_END)), "loc_FF7")
ChrTalk( #53
0xFE,
(
"外面好像\x01",
"很吵闹……\x02",
)
)
CloseMessageWindow()
ChrTalk( #54
0xFE,
"怎么啦?\x02",
)
CloseMessageWindow()
Jump("loc_11DD")
label("loc_FF7")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x242, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 2)), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_1131")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_END)), "loc_1062")
ChrTalk( #55
0xFE,
(
"事件之后,管家达里奥\x01",
"好像变得很奇怪呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #56
0xFE,
(
"戴尔蒙市长被逮捕\x01",
"似乎让他相当震惊。\x02",
)
)
CloseMessageWindow()
Jump("loc_112E")
label("loc_1062")
OP_A2(0x1)
ChrTalk( #57
0xFE,
(
"最近,这里的旧管家\x01",
"达里奥好像都不见人影呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #58
0xFE,
(
"说到底,那个人\x01",
"在市长被逮捕以后\x01",
"就变得有点奇怪了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #59
0xFE,
(
"『还有一个我!』什么的\x01",
"都说出来了,真是不太妙。\x02",
)
)
CloseMessageWindow()
ChrTalk( #60
0xFE,
(
"戴尔蒙市长被逮捕\x01",
"看来让他相当震惊吧。\x02",
)
)
CloseMessageWindow()
label("loc_112E")
Jump("loc_11DD")
label("loc_1131")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x240, 6)), scpexpr(EXPR_END)), "loc_11DD")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_END)), "loc_1194")
ChrTalk( #61
0xFE,
(
"军队管理期间还好,\x01",
"此后会变成怎样呢?\x02",
)
)
CloseMessageWindow()
ChrTalk( #62
0xFE,
(
"趁现在找到下一份工作\x01",
"会比较好吧。\x02",
)
)
CloseMessageWindow()
Jump("loc_11DD")
label("loc_1194")
OP_A2(0x1)
ChrTalk( #63
0xFE,
(
"军队管理期间还好,\x01",
"此后会变成怎样呢?\x02",
)
)
CloseMessageWindow()
ChrTalk( #64
0xFE,
(
"佣人还是\x01",
"会被解雇吧。\x02",
)
)
CloseMessageWindow()
label("loc_11DD")
TalkEnd(0xFE)
Return()
# Function_6_DEA end
def Function_7_11E1(): pass
label("Function_7_11E1")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_1236")
ChrTalk( #65
0xFE,
(
"除尘器也不能使用\x01",
"扫除可辛苦了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #66
0xFE,
(
"呼,这栋房子\x01",
"竟然这么宽广啊~\x02",
)
)
CloseMessageWindow()
Jump("loc_1639")
label("loc_1236")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_1340")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_12E5")
ChrTalk( #67
0xFE,
(
"啊,欢迎~\x01",
"欢迎光临市长官邸。\x02",
)
)
CloseMessageWindow()
ChrTalk( #68
0xFE,
(
"我们大家全部\x01",
"都被新市长雇佣了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #69
0xFE,
(
"导力器不能使用,\x01",
"做家务虽然辛苦点……\x02",
)
)
CloseMessageWindow()
ChrTalk( #70
0xFE,
(
"但大家一起努力\x01",
"一定能渡过难关的。\x02",
)
)
CloseMessageWindow()
OP_A2(0x0)
Jump("loc_133D")
label("loc_12E5")
ChrTalk( #71
0xFE,
(
"我们大家全部\x01",
"都被新市长雇佣了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #72
0xFE,
(
"达里奥先生也回来了,\x01",
"这下一切都恢复原样了吧⊙\x02",
)
)
CloseMessageWindow()
label("loc_133D")
Jump("loc_1639")
label("loc_1340")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x280, 0)), scpexpr(EXPR_END)), "loc_13D0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_1386")
ChrTalk( #73
0xFE,
(
"我虽然想在这房子里\x01",
"工作……\x02",
)
)
CloseMessageWindow()
ChrTalk( #74
0xFE,
"但还是很难吧。\x02",
)
CloseMessageWindow()
Jump("loc_13CD")
label("loc_1386")
OP_A2(0x0)
ChrTalk( #75
0xFE,
(
"多米尼克已经\x01",
"在找下一份工作了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #76
0xFE,
(
"这样一来\x01",
"我也着急起来了。\x02",
)
)
CloseMessageWindow()
label("loc_13CD")
Jump("loc_1639")
label("loc_13D0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 5)), scpexpr(EXPR_END)), "loc_14AA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_141A")
ChrTalk( #77
0xFE,
(
"最近一直没见着\x01",
"管家达里奥的身影。\x02",
)
)
CloseMessageWindow()
ChrTalk( #78
0xFE,
"怎么回事呢。\x02",
)
CloseMessageWindow()
Jump("loc_14A7")
label("loc_141A")
OP_A2(0x0)
ChrTalk( #79
0xFE,
(
"扫除的时候和多米尼克\x01",
"聊天来着……\x02",
)
)
CloseMessageWindow()
ChrTalk( #80
0xFE,
(
"最近一直没见着\x01",
"管家达里奥的身影。\x02",
)
)
CloseMessageWindow()
ChrTalk( #81
0xFE,
(
"事件之后\x01",
"情况就很奇怪……\x02",
)
)
CloseMessageWindow()
ChrTalk( #82
0xFE,
(
"达里奥到底\x01",
"怎么回事呢。\x02",
)
)
CloseMessageWindow()
label("loc_14A7")
Jump("loc_1639")
label("loc_14AA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x242, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x243, 2)), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_153D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_14F9")
ChrTalk( #83
0xFE,
(
"啦啦~啦⊙\x01",
"噜噜噜噜~⊙\x02",
)
)
CloseMessageWindow()
ChrTalk( #84
0xFE,
(
"士兵先生们\x01",
"人都很好呢~\x02",
)
)
CloseMessageWindow()
Jump("loc_153A")
label("loc_14F9")
OP_A2(0x0)
ChrTalk( #85
0xFE,
(
"啦啦~啦⊙\x01",
"噜噜噜噜~⊙\x02",
)
)
CloseMessageWindow()
ChrTalk( #86
0xFE,
(
"我正在准备\x01",
"给士兵们的茶呢。\x02",
)
)
CloseMessageWindow()
label("loc_153A")
Jump("loc_1639")
label("loc_153D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x240, 6)), scpexpr(EXPR_END)), "loc_1639")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_15A6")
ChrTalk( #87
0xFE,
(
"老爷被逮捕的时候\x01",
"还在想会变成怎样……\x02",
)
)
CloseMessageWindow()
ChrTalk( #88
0xFE,
(
"看来暂时还能和以前一样\x01",
"在这里生活下去。\x02",
)
)
CloseMessageWindow()
Jump("loc_1639")
label("loc_15A6")
OP_A2(0x0)
ChrTalk( #89
0xFE,
(
"现在,这栋房子\x01",
"由王国军管理哦。\x02",
)
)
CloseMessageWindow()
ChrTalk( #90
0xFE,
(
"为了维持宅邸的管理。\x01",
"我们佣人们\x01",
"也维持原样被雇佣了下来。\x02",
)
)
CloseMessageWindow()
ChrTalk( #91
0xFE,
(
"嘿嘿,幸好军队的士兵们\x01",
"都是和善的好人。\x02",
)
)
CloseMessageWindow()
label("loc_1639")
TalkEnd(0xFE)
Return()
# Function_7_11E1 end
def Function_8_163D(): pass
label("Function_8_163D")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_174C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1705")
ChrTalk( #92
0xFE,
(
"如此非常时期\x01",
"竟然又发生事件……\x02",
)
)
CloseMessageWindow()
ChrTalk( #93
0xFE,
(
"最近这世道\x01",
"是怎么回事呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #94
0xFE,
(
"想来前市长的事件\x01",
"也是难以理解……\x02",
)
)
CloseMessageWindow()
ChrTalk( #95
0xFE,
"……不,没什么好说的。\x02",
)
CloseMessageWindow()
ChrTalk( #96
0xFE,
(
"前市长的过失是事实。\x01",
"有罪就要认罪。\x02",
)
)
CloseMessageWindow()
OP_A2(0x4)
Jump("loc_1749")
label("loc_1705")
ChrTalk( #97
0xFE,
(
"如此非常时期\x01",
"竟然又发生事件……\x02",
)
)
CloseMessageWindow()
ChrTalk( #98
0xFE,
(
"到底这世道\x01",
"是怎么回事呢。\x02",
)
)
CloseMessageWindow()
label("loc_1749")
Jump("loc_1858")
label("loc_174C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_1858")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1804")
ChrTalk( #99
0xFE,
(
"我是在戴尔蒙家\x01",
"服侍多年的人……\x02",
)
)
CloseMessageWindow()
ChrTalk( #100
0xFE,
(
"受新市长的委托,\x01",
"我作为这个市长官邸的管家\x01",
"重新回到这里了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #101
0xFE,
(
"我当作重获新生\x01",
"诚心诚意来服侍。\x02",
)
)
CloseMessageWindow()
ChrTalk( #102
0xFE,
(
"任何事都\x01",
"敬请吩咐。\x02",
)
)
CloseMessageWindow()
OP_A2(0x4)
Jump("loc_1858")
label("loc_1804")
ChrTalk( #103
0xFE,
(
"作为市长官邸的管家\x01",
"又回到宅邸了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #104
0xFE,
(
"能和同伴们一起工作的幸福\x01",
"我要牢牢抓住。\x02",
)
)
CloseMessageWindow()
label("loc_1858")
TalkEnd(0xFE)
Return()
# Function_8_163D end
def Function_9_185C(): pass
label("Function_9_185C")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_194A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_18F3")
ChrTalk( #105
0xFE,
(
"我面见了市长,\x01",
"请求对玛诺利亚紧急支持……\x02",
)
)
CloseMessageWindow()
ChrTalk( #106
0xFE,
(
"但是卢安市\x01",
"好像情况也相当严峻。\x02",
)
)
CloseMessageWindow()
ChrTalk( #107
0xFE,
(
"诺曼市长的严肃表情\x01",
"完全说明了这一点。\x02",
)
)
CloseMessageWindow()
OP_A2(0x5)
Jump("loc_1947")
label("loc_18F3")
ChrTalk( #108
0xFE,
(
"已经请求支持村子,\x01",
"但是卢安的状况也很严峻啊。\x02",
)
)
CloseMessageWindow()
ChrTalk( #109
0xFE,
(
"诺曼市长\x01",
"看起来也相当疲劳。\x02",
)
)
CloseMessageWindow()
label("loc_1947")
Jump("loc_1A46")
label("loc_194A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_1A46")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_19E6")
ChrTalk( #110
0xFE,
(
"作为玛诺利亚村的村长代理,\x01",
"我是来向卢安市长请愿的。\x02",
)
)
CloseMessageWindow()
ChrTalk( #111
0xFE,
(
"需要尽早请求食品和燃料\x01",
"的支援啊。\x02",
)
)
CloseMessageWindow()
ChrTalk( #112
0xFE,
(
"好了,立刻去跟新市长\x01",
"打个招呼吧。\x02",
)
)
CloseMessageWindow()
OP_A2(0x5)
Jump("loc_1A46")
label("loc_19E6")
ChrTalk( #113
0xFE,
(
"作为玛诺利亚村的村长代理,\x01",
"我是来向卢安市长请愿的。\x02",
)
)
CloseMessageWindow()
ChrTalk( #114
0xFE,
(
"需要尽早请求食品和燃料\x01",
"的支援啊。\x02",
)
)
CloseMessageWindow()
label("loc_1A46")
TalkEnd(0xFE)
Return()
# Function_9_185C end
def Function_10_1A4A(): pass
label("Function_10_1A4A")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_1DC2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x417, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1C7D")
Jc((scpexpr(EXPR_EXEC_OP, "OP_29(0x69, 0x0, 0x10)"), scpexpr(EXPR_END)), "loc_1AB7")
OP_62(0xE, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(1000)
ChrTalk( #115
0xFE,
"哦哦,是你们……\x02",
)
CloseMessageWindow()
ChrTalk( #116
0xFE,
(
"刚刚收到学院事件\x01",
"的报告呢。\x02",
)
)
CloseMessageWindow()
Jump("loc_1AFD")
label("loc_1AB7")
ChrTalk( #117
0xFE,
(
"哦哦……\x01",
"你们就是那些游击士吗?\x02",
)
)
CloseMessageWindow()
ChrTalk( #118
0xFE,
(
"正好收到学院事件\x01",
"的报告呢。\x02",
)
)
CloseMessageWindow()
label("loc_1AFD")
ChrTalk( #119
0x101,
"#1011F哦~消息真灵通啊。\x02",
)
CloseMessageWindow()
ChrTalk( #120
0x102,
(
"#1040F是嘉恩先生\x01",
"告知的吗?\x02",
)
)
CloseMessageWindow()
ChrTalk( #121
0xFE,
(
"啊啊,从协会\x01",
"来了使者……\x02",
)
)
CloseMessageWindow()
ChrTalk( #122
0xFE,
(
"哦,这么说来\x01",
"还没打招呼呢。\x02",
)
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_29(0x69, 0x0, 0x10)"), scpexpr(EXPR_END)), "loc_1C03")
ChrTalk( #123
0xFE,
(
"和以前见面时相比\x01",
"我的立场也发生了变化呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #124
0x101,
"#1000F啊,是哦。\x02",
)
CloseMessageWindow()
ChrTalk( #125
0xFE,
(
"我是这次就任新市长\x01",
"的诺曼。\x02",
)
)
CloseMessageWindow()
ChrTalk( #126
0xFE,
"今后请多关照。\x02",
)
CloseMessageWindow()
Jump("loc_1C3A")
label("loc_1C03")
ChrTalk( #127
0xFE,
(
"我是这次就任新市长\x01",
"的诺曼。\x02",
)
)
CloseMessageWindow()
ChrTalk( #128
0xFE,
"以后请多关照了。\x02",
)
CloseMessageWindow()
label("loc_1C3A")
ChrTalk( #129
0x101,
"#1000F哪里哪里,彼此彼此。\x02",
)
CloseMessageWindow()
ChrTalk( #130
0x102,
"#1040F恭喜您当选市长。\x02",
)
CloseMessageWindow()
Call(0, 11)
Jump("loc_1DBF")
label("loc_1C7D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 7)), scpexpr(EXPR_END)), "loc_1CBF")
ChrTalk( #131
0xFE,
(
"我们也\x01",
"下定了决心。\x02",
)
)
CloseMessageWindow()
ChrTalk( #132
0xFE,
(
"希望能尽快\x01",
"解决这个情况。\x02",
)
)
CloseMessageWindow()
Jump("loc_1DBF")
label("loc_1CBF")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1D44")
ChrTalk( #133
0xFE,
(
"关于学院的事件\x01",
"刚刚才收到报告。\x02",
)
)
CloseMessageWindow()
ChrTalk( #134
0xFE,
(
"勤务员实在可怜,\x01",
"不过据说平安解决了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #135
0xFE,
(
"代表市民,也让我\x01",
"重新表示感谢吧。\x02",
)
)
CloseMessageWindow()
OP_A2(0x6)
Jump("loc_1DBF")
label("loc_1D44")
ChrTalk( #136
0xFE,
(
"关于学院的事件\x01",
"刚刚收到报告呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #137
0xFE,
(
"这种非常时期的占据事件\x01",
"实在是令人难以置信的暴行。\x02",
)
)
CloseMessageWindow()
ChrTalk( #138
0xFE,
(
"犯人们应该\x01",
"受到严惩才行。\x02",
)
)
CloseMessageWindow()
label("loc_1DBF")
Jump("loc_20CC")
label("loc_1DC2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_20CC")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x417, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2030")
Jc((scpexpr(EXPR_EXEC_OP, "OP_29(0x69, 0x0, 0x10)"), scpexpr(EXPR_END)), "loc_1F72")
OP_62(0xE, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(1000)
ChrTalk( #139
0xFE,
"哦哦,是你们……\x02",
)
CloseMessageWindow()
ChrTalk( #140
0xFE,
(
"选举中的酒店事件时\x01",
"承蒙关照。\x02",
)
)
CloseMessageWindow()
ChrTalk( #141
0x101,
(
"#1016F啊~那个事件啊。\x02\x03",
"嗯,记得很~清楚哦。\x01",
"你的头还撞在门上。\x02",
)
)
CloseMessageWindow()
OP_62(0x102, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1)
OP_22(0x31, 0x0, 0x64)
Sleep(1000)
ChrTalk( #142
0x102,
"#1048F什么?那个事件……\x02",
)
CloseMessageWindow()
TurnDirection(0xFE, 0x101, 400)
ChrTalk( #143
0xFE,
"哎呀,真是丢脸……\x02",
)
CloseMessageWindow()
ChrTalk( #144
0xFE,
(
"不管怎样,趁此机会\x01",
"请让我重新自我介绍一下。\x02",
)
)
CloseMessageWindow()
ChrTalk( #145
0xFE,
(
"和以前见面时相比\x01",
"我的立场也发生了变化呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #146
0x101,
"#1011F啊,是哦。\x02",
)
CloseMessageWindow()
ChrTalk( #147
0xFE,
"我是就任新市长的诺曼。\x02",
)
CloseMessageWindow()
ChrTalk( #148
0xFE,
"今后请多关照。\x02",
)
CloseMessageWindow()
Jump("loc_1FED")
label("loc_1F72")
ChrTalk( #149
0xFE,
(
"唔……\x01",
"你们是游击士吧。\x02",
)
)
CloseMessageWindow()
ChrTalk( #150
0xFE,
(
"虽然不是初次见面,\x01",
"请让我重新自我介绍一下。\x02",
)
)
CloseMessageWindow()
ChrTalk( #151
0xFE,
(
"我是就任新市长的诺曼。\x01",
"以后也请多关照。\x02",
)
)
CloseMessageWindow()
label("loc_1FED")
ChrTalk( #152
0x101,
"#1000F哪里哪里,彼此彼此。\x02",
)
CloseMessageWindow()
ChrTalk( #153
0x102,
"#1040F恭喜您当选市长。\x02",
)
CloseMessageWindow()
Call(0, 11)
Jump("loc_20CC")
label("loc_2030")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 7)), scpexpr(EXPR_END)), "loc_207E")
ChrTalk( #154
0xFE,
(
"我们也\x01",
"下定了决心。\x02",
)
)
CloseMessageWindow()
ChrTalk( #155
0xFE,
(
"祈祷事件能尽快解决,\x01",
"期待诸位的表现。\x02",
)
)
CloseMessageWindow()
Jump("loc_20CC")
label("loc_207E")
ChrTalk( #156
0xFE,
(
"总之市民生活的稳定\x01",
"可以说是当前的课题。\x02",
)
)
CloseMessageWindow()
ChrTalk( #157
0xFE,
(
"为此现在正在\x01",
"寻求各方援助。\x02",
)
)
CloseMessageWindow()
label("loc_20CC")
TalkEnd(0xFE)
Return()
# Function_10_1A4A end
def Function_11_20D0(): pass
label("Function_11_20D0")
TurnDirection(0xFE, 0x102, 400)
ChrTalk( #158
0xFE,
"唔,谢谢。\x02",
)
CloseMessageWindow()
ChrTalk( #159
0xFE,
(
"不过,遗憾的是还不到\x01",
"沉浸在胜利中的时候……\x02",
)
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x5)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_215E")
ChrTalk( #160
0x106,
(
"#552F啊啊,正是。\x02\x03",
"您刚刚就任,也真是多灾多难。\x02",
)
)
CloseMessageWindow()
Jump("loc_21F0")
label("loc_215E")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x2)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_21A4")
ChrTalk( #161
0x103,
(
"#025F嗯嗯,我们明白。\x02\x03",
"您刚刚就任就碰到这些事。\x02",
)
)
CloseMessageWindow()
Jump("loc_21F0")
label("loc_21A4")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x7)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_21F0")
ChrTalk( #162
0x108,
(
"#074F唔,实在让您伤脑筋了啊。\x02\x03",
"刚刚就任\x01",
"就碰到这些难题。\x02",
)
)
CloseMessageWindow()
label("loc_21F0")
ChrTalk( #163
0xFE,
(
"说实话,\x01",
"真是无从下手啊……\x02",
)
)
CloseMessageWindow()
ChrTalk( #164
0xFE,
(
"当初的混乱虽然收拾了,\x01",
"但是导力器还是没恢复原状。\x02",
)
)
CloseMessageWindow()
ChrTalk( #165
0xFE,
(
"这种时候只能努力\x01",
"支援市民的生活了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #166
0x102,
(
"#1043F但是,就现在而言\x01",
"这是最好的对策。\x02\x03",
"遗憾的是事态的解决\x01",
"可能还要花费一些时间。\x02",
)
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x7)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2334")
ChrTalk( #167
0x108,
(
"#072F确实不是一朝一夕\x01",
"就能解决的事件啊。\x02\x03",
"为了防止长期延续\x01",
"需要更有效的对策。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xFE, 0x108, 400)
Jump("loc_2407")
label("loc_2334")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x2)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_23A1")
ChrTalk( #168
0x103,
(
"#022F是啊,这不是一朝一夕\x01",
"就能解决的事件。\x02\x03",
"为了防止长期延续\x01",
"需要更有效的对策。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xFE, 0x103, 400)
Jump("loc_2407")
label("loc_23A1")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x5)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2407")
ChrTalk( #169
0x106,
(
"#050F确实不是一天两天\x01",
"就能解决的事件。\x02\x03",
"考虑到事态的延续\x01",
"需要更有效的对策。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xFE, 0x106, 400)
label("loc_2407")
ChrTalk( #170
0xFE,
"唔,果然是这样吗。\x02",
)
CloseMessageWindow()
ChrTalk( #171
0xFE,
(
"作为新市长的首次工作来说\x01",
"略感负担沉重……\x02",
)
)
CloseMessageWindow()
ChrTalk( #172
0xFE,
(
"为了不负女神的期待,\x01",
"只有想办法努力克服了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #173
0xFE,
(
"祈祷事件能尽快解决,\x01",
"期待诸位的表现。\x02",
)
)
CloseMessageWindow()
ChrTalk( #174
0x101,
(
"#1006F嗯……\x01",
"市长也要加油。\x02",
)
)
CloseMessageWindow()
ChrTalk( #175
0x102,
"#1040F我们会尽力的!\x02",
)
CloseMessageWindow()
OP_A2(0x7)
OP_A2(0x20BC)
Return()
# Function_11_20D0 end
def Function_12_24EC(): pass
label("Function_12_24EC")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_296B")
Jc((scpexpr(EXPR_EXEC_OP, "OP_29(0x69, 0x0, 0x10)"), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x417, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_26EB")
ChrTalk( #176
0xFE,
"啊,游击士……\x02",
)
CloseMessageWindow()
ChrTalk( #177
0xFE,
(
"那、那个……\x01",
"前几天承蒙关照了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #178
0x101,
(
"#1000F啊~还以为是谁呢,\x01",
"是宾馆事件的受害者吧。\x02\x03",
"……撞到的头已经不要紧了吗?\x02",
)
)
CloseMessageWindow()
ChrTalk( #179
0xFE,
"托你的福已经完全好了。\x02",
)
CloseMessageWindow()
ChrTalk( #180
0xFE,
(
"那,今天\x01",
"来市长官邸有什么事吗?\x02",
)
)
CloseMessageWindow()
ChrTalk( #181
0x101,
"#1000F嗯,其实也没什么……\x02",
)
CloseMessageWindow()
ChrTalk( #182
0x102,
(
"#1040F请不用在意。\x01",
"只是来看看情况的。\x02",
)
)
CloseMessageWindow()
ChrTalk( #183
0xFE,
(
"算是所谓的市内巡查吧?\x01",
"一直执行任务真是辛苦了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #184
0xFE,
(
"那么,有什么事的话\x01",
"请尽管开口。\x02",
)
)
CloseMessageWindow()
ChrTalk( #185
0xFE,
(
"我至少也算是\x01",
"市长秘书嘛。\x02",
)
)
CloseMessageWindow()
ChrTalk( #186
0x101,
(
"#1000F哦~这样啊。\x02\x03",
"那么,到时候就请多关照了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #187
0xFE,
"啊啊,请不必客气。\x02",
)
CloseMessageWindow()
OP_A2(0x9)
OP_A2(0x20BD)
Jump("loc_296B")
label("loc_26EB")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 1)), scpexpr(EXPR_END)), "loc_2739")
ChrTalk( #188
0xFE,
(
"别看我这样,\x01",
"至少也是市长秘书呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #189
0xFE,
(
"有什么事情\x01",
"请尽管吩咐。\x02",
)
)
CloseMessageWindow()
Jump("loc_296B")
label("loc_2739")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_284B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_27F4")
ChrTalk( #190
0xFE,
(
"关于学院的事件\x01",
"刚刚收到了报告。\x02",
)
)
CloseMessageWindow()
ChrTalk( #191
0xFE,
(
"听说平安解决了,\x01",
"我和市长总算都放心了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #192
0xFE,
(
"现在正忙着做\x01",
"发放宣传的准备呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #193
0xFE,
(
"不管怎样得发出消息\x01",
"让大家感到安心才行呢。\x02",
)
)
CloseMessageWindow()
OP_A2(0x8)
Jump("loc_2848")
label("loc_27F4")
ChrTalk( #194
0xFE,
(
"关于学院的事件\x01",
"刚刚收到了报告呢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #195
0xFE,
(
"听说平安解决了,\x01",
"我和市长总算都放心了。\x02",
)
)
CloseMessageWindow()
label("loc_2848")
Jump("loc_296B")
label("loc_284B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2915")
ChrTalk( #196
0xFE,
(
"呼,应付市民的意见\x01",
"总算告一段落了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #197
0xFE,
(
"众多的市民一时间全涌过来,\x01",
"那时候这里也够辛苦的。\x02",
)
)
CloseMessageWindow()
ChrTalk( #198
0xFE,
(
"但是,导力器的问题\x01",
"还没有解决的头绪。\x02",
)
)
CloseMessageWindow()
ChrTalk( #199
0xFE,
(
"总之现在光是支持市民生活\x01",
"就已经竭尽全力了。\x02",
)
)
CloseMessageWindow()
OP_A2(0x8)
Jump("loc_296B")
label("loc_2915")
ChrTalk( #200
0xFE,
(
"导力器的问题\x01",
"还没有解决的头绪。\x02",
)
)
CloseMessageWindow()
ChrTalk( #201
0xFE,
(
"总之现在光是支持市民生活\x01",
"就已经竭尽全力了。\x02",
)
)
CloseMessageWindow()
label("loc_296B")
TalkEnd(0xFE)
Return()
# Function_12_24EC end
def Function_13_296F(): pass
label("Function_13_296F")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x400, 0)), scpexpr(EXPR_END)), "loc_2FBC")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x417, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2D55")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x5)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2A60")
TurnDirection(0xFE, 0x106, 0)
OP_62(0xFE, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(400)
ChrTalk( #202
0xFE,
"咦,阿加特先生……\x02",
)
CloseMessageWindow()
ChrTalk( #203
0x106,
(
"#051F哦,好久不见了啊。\x02\x03",
"看来还是\x01",
"很有精神嘛。\x02",
)
)
CloseMessageWindow()
ChrTalk( #204
0xFE,
"哈哈,托你的福……\x02",
)
CloseMessageWindow()
ChrTalk( #205
0xFE,
(
"老爸当了市长,\x01",
"我就来帮他的忙了。\x02",
)
)
CloseMessageWindow()
ChrTalk( #206
0xFE,
(
"这种情况下,\x01",
"很多事都需要忙呢。\x02",
)
)
CloseMessageWindow()
Jump("loc_2B57")
label("loc_2A60")
ChrTalk( #207
0xFE,
"咦,你们是……\x02",
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x2)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2ABC")
ChrTalk( #208
0x101,
"#1000F啊,好久不见。\x02",
)
CloseMessageWindow()
ChrTalk( #209
0x103,
"#021F呵呵,很有精神嘛。\x02",
)
CloseMessageWindow()
Jump("loc_2AE6")
label("loc_2ABC")
ChrTalk( #210
0x101,
(
"#1000F啊,好久不见。\x02\x03",
"怎样?还好吧?\x02",
)
)
CloseMessageWindow()
label("loc_2AE6")
TurnDirection(0xFE, 0x101, 400)
ChrTalk( #211
0xFE,
"哈哈,托你的福还算好。\x02",
)
CloseMessageWindow()
ChrTalk( #212
0xFE,
(
"老爸当了市长,\x01",
"我现在正在帮他的忙。\x02",
)
)
CloseMessageWindow()
ChrTalk( #213
0xFE,
(
"这种情况下,\x01",
"很多事都需要忙呢。\x02",
)
)
CloseMessageWindow()
label("loc_2B57")
ChrTalk( #214
0x101,
(
"#1011F哦~这可是\x01",
"正经的工作呢。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xFE, 0x101, 400)
ChrTalk( #215
0xFE,
(
"嗯,现在就和\x01",
"打工差不多。\x02",
)
)
CloseMessageWindow()
ChrTalk( #216
0xFE,
(
"不过着急也不是办法,\x01",
"我打算脚踏实地地努力看看。\x02",
)
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x5)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2C51")
ChrTalk( #217
0x106,
(
"#051F有这觉悟就没问题了。\x02\x03",
"……好好干哦。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xFE, 0x106, 400)
ChrTalk( #218
0xFE,
(
"是,是。\x01",
"非常感谢。\x02",
)
)
CloseMessageWindow()
ChrTalk( #219
0xFE,
"阿加特先生也多保重。\x02",
)
CloseMessageWindow()
Jump("loc_2D4C")
label("loc_2C51")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x2)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2CDC")
ChrTalk( #220
0x103,
(
"#020F嗯嗯,有这觉悟就\x01",
"一定没问题了。\x02\x03",
"那么,好好干哦。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xFE, 0x103, 400)
ChrTalk( #221
0xFE,
"嗯、嗯,我会努力的。\x02",
)
CloseMessageWindow()
ChrTalk( #222
0xFE,
(
"那么,\x01",
"你们也多加小心。\x02",
)
)
CloseMessageWindow()
Jump("loc_2D4C")
label("loc_2CDC")
ChrTalk( #223
0x101,
(
"#1006F有这觉悟就\x01",
"一定没问题了。\x02\x03",
"那么,加油工作哦。\x02",
)
)
CloseMessageWindow()
ChrTalk( #224
0xFE,
"嗯、嗯,我会努力的。\x02",
)
CloseMessageWindow()
ChrTalk( #225
0xFE,
(
"那么,\x01",
"你们也多加小心。\x02",
)
)
CloseMessageWindow()
label("loc_2D4C")
OP_A2(0xB)
OP_A2(0x20BE)
Jump("loc_2FBC")
label("loc_2D55")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 3)), scpexpr(EXPR_END)), "loc_2E0A")
Jc((scpexpr(EXPR_EXEC_OP, "OP_42(0x5)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_2DC3")
ChrTalk( #226
0xFE,
(
"总之我打算\x01",
"脚踏实地的努力看看。\x02",
)
)
CloseMessageWindow()
ChrTalk( #227
0xFE,
(
"这种非常时期,\x01",
"阿加特先生你们也要多加小心。\x02",
)
)
CloseMessageWindow()
Jump("loc_2E07")
label("loc_2DC3")
ChrTalk( #228
0xFE,
(
"总之我打算\x01",
"脚踏实地的努力看看。\x02",
)
)
CloseMessageWindow()
ChrTalk( #229
0xFE,
(
"那么,\x01",
"你们也要多加小心。\x02",
)
)
CloseMessageWindow()
label("loc_2E07")
Jump("loc_2FBC")
label("loc_2E0A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x405, 7)), scpexpr(EXPR_END)), "loc_2EF9")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2E9A")
ChrTalk( #230
0xFE,
(
"学院的事件……\x01",
"从准游击士那里听说啦。\x02",
)
)
CloseMessageWindow()
ChrTalk( #231
0xFE,
(
"这种时候还发生人质事件,\x01",
"真是受不了啊。\x02",
)
)
CloseMessageWindow()
ChrTalk( #232
0xFE,
(
"犯人真是的,\x01",
"到底在想什么呢。\x02",
)
)
CloseMessageWindow()
OP_A2(0xA)
Jump("loc_2EF6")
label("loc_2E9A")
ChrTalk( #233
0xFE,
(
"这种时候还发生人质事件,\x01",
"真是受不了啊。\x02",
)
)
CloseMessageWindow()
ChrTalk( #234
0xFE,
(
"大家都在齐心协力的时候,\x01",
"真是不能原谅啊。\x02",
)
)
CloseMessageWindow()
label("loc_2EF6")
Jump("loc_2FBC")
label("loc_2EF9")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x1, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2F79")
ChrTalk( #235
0xFE,
"我老爸也因为各种事忙得不得了。\x02",
)
CloseMessageWindow()
ChrTalk( #236
0xFE,
(
"应付市民的意见、\x01",
"食品和医药品的确保……\x02",
)
)
CloseMessageWindow()
ChrTalk( #237
0xFE,
(
"真是,要做的事\x01",
"多得堆成山。\x02",
)
)
CloseMessageWindow()
OP_A2(0xA)
Jump("loc_2FBC")
label("loc_2F79")
ChrTalk( #238
0xFE,
"我老爸也因为各种事忙得不得了。\x02",
)
CloseMessageWindow()
ChrTalk( #239
0xFE,
(
"为什么这么喜欢\x01",
"当市长呢。\x02",
)
)
CloseMessageWindow()
label("loc_2FBC")
TalkEnd(0xFE)
Return()
# Function_13_296F end
def Function_14_2FC0(): pass
label("Function_14_2FC0")
FadeToDark(300, 0, 100)
SetChrName("")
SetMessageWindowPos(-1, -1, -1, -1)
AnonymousTalk( #240
(
"\x07\x05『苍耀之灯火』\x01",
" 被认为是初期导力艺术的\x01",
" 极致作品。\x01",
" 导力革命之后\x01",
" 由卢安市民\x01",
" 赠送给为城市发展\x01",
" 作出贡献的戴尔蒙家。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
SetMessageWindowPos(72, 320, 56, 3)
TalkEnd(0xFF)
Return()
# Function_14_2FC0 end
def Function_15_306D(): pass
label("Function_15_306D")
NewScene("ED6_DT21/T2210 ._SN", 123, 1, 0)
IdleLoop()
Return()
# Function_15_306D end
def Function_16_3077(): pass
label("Function_16_3077")
NewScene("ED6_DT21/T2210 ._SN", 121, 1, 0)
IdleLoop()
Return()
# Function_16_3077 end
def Function_17_3081(): pass
label("Function_17_3081")
FadeToDark(300, 0, 100)
SetChrName("")
AnonymousTalk( #241
"\x07\x05有吊桥的控制装置。\x02",
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
SetMessageWindowPos(72, 320, 56, 3)
TalkEnd(0xFF)
Return()
# Function_17_3081 end
SaveToFile()
Try(main)
| [
"[email protected]"
] | |
84d759bb04610b3c0237f3c151ca8917b2c27f4b | 456433ac78b70cb8ae076ae166a85e349f181d7f | /systems/KURSSKLAD/KURSTERM/SELECTDC/templates/taskMWares.py | f515a3b448eecf874d1c05dd1dd6fe8fc60e5da9 | [] | no_license | shybkoi/WMS-Demo | 854c1679b121c68323445b60f3992959f922be8d | 2525559c4f56654acfbc21b41b3f5e40387b89e0 | refs/heads/master | 2021-01-23T01:51:20.074825 | 2017-03-23T11:51:18 | 2017-03-23T11:51:18 | 85,937,726 | 0 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 19,848 | py | #!/usr/bin/env python
# -*- coding: cp1251 -*-
##################################################
## DEPENDENCIES
import sys
import os
import os.path
from os.path import getmtime, exists
import time
import types
import __builtin__
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import DummyTransaction
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from systems.KURSSKLAD.KURSTERM.templates.main import main
from systems.KURSSKLAD.cheetahutils import viewQuantity
##################################################
## MODULE CONSTANTS
try:
True, False
except NameError:
True, False = (1==1), (1==0)
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.0rc8'
__CHEETAH_versionTuple__ = (2, 0, 0, 'candidate', 8)
__CHEETAH_genTime__ = 1482336170.5510001
__CHEETAH_genTimestamp__ = 'Wed Dec 21 18:02:50 2016'
__CHEETAH_src__ = 'systems\\KURSSKLAD\\KURSTERM\\SELECTDC\\templates\\taskMWares.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Dec 21 09:10:13 2016'
__CHEETAH_docstring__ = 'Autogenerated by CHEETAH: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class taskMWares(main):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
main.__init__(self, *args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def mainData(self, **KWS):
## CHEETAH: generated from #def mainData at line 5, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(''' \xb9 <b>''')
_v = VFFSL(SL,"docnum",True) # '$docnum' on line 6, col 10
if _v is not None: write(_filter(_v, rawExpr='$docnum')) # from line 6, col 10.
write('''</b> \xee\xf2 <b>''')
_orig_filter_22397595 = _filter
filterName = 'DateFilter'
if self._CHEETAH__filters.has_key("DateFilter"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"docdate",True) # '$docdate' on line 6, col 46
if _v is not None: write(_filter(_v, rawExpr='$docdate')) # from line 6, col 46.
_filter = _orig_filter_22397595
write(''' (<u>''')
_v = VFFSL(SL,"TID",True) # '$TID' on line 6, col 71
if _v is not None: write(_filter(_v, rawExpr='$TID')) # from line 6, col 71.
write('''</u>)</b>
''')
if VFFSL(SL,"varExists",False)('$TONAME') and VFFSL(SL,"TONAME",True): # generated from line 7, col 5
write(''' <br>''')
_v = VFFSL(SL,"TONAME",True) # '$TONAME' on line 8, col 13
if _v is not None: write(_filter(_v, rawExpr='$TONAME')) # from line 8, col 13.
write('''<br>
''')
write(''' <b><u>''')
_v = VFFSL(SL,"INFONAME",True) # '$INFONAME' on line 10, col 11
if _v is not None: write(_filter(_v, rawExpr='$INFONAME')) # from line 10, col 11.
write('''</u></b>
<hr>
<form action="taskMWares">
<input type="hidden" name="taskid" value="''')
_v = VFFSL(SL,"TID",True) # '$TID' on line 13, col 51
if _v is not None: write(_filter(_v, rawExpr='$TID')) # from line 13, col 51.
write('''">
<input type="hidden" name="waresid" value="''')
_v = VFFSL(SL,"WID",True) # '$WID' on line 14, col 52
if _v is not None: write(_filter(_v, rawExpr='$WID')) # from line 14, col 52.
write('''">
''')
if False:
_('ШК')
_v = VFFSL(SL,"_",False)('ШК') # "$_('\xd8\xca')" on line 15, col 9
if _v is not None: write(_filter(_v, rawExpr="$_('\xd8\xca')")) # from line 15, col 9.
write(''': <input type="text" id=":scan:text" name="barcode" title="''')
if False:
_('Товар')
_v = VFFSL(SL,"_",False)('Товар') # "$_('\xd2\xee\xe2\xe0\xf0')" on line 15, col 76
if _v is not None: write(_filter(_v, rawExpr="$_('\xd2\xee\xe2\xe0\xf0')")) # from line 15, col 76.
write('''"><br>
</form>
<hr>
<b><u>(''')
_v = VFFSL(SL,"WCODE",True) # '$WCODE' on line 19, col 12
if _v is not None: write(_filter(_v, rawExpr='$WCODE')) # from line 19, col 12.
write(''')</u></b>''')
_v = VFFSL(SL,"WNAME",True) # '$WNAME' on line 19, col 27
if _v is not None: write(_filter(_v, rawExpr='$WNAME')) # from line 19, col 27.
write('''
''')
if VFFSL(SL,"varExists",False)('$ARTICUL') and VFFSL(SL,"ARTICUL",True): # generated from line 20, col 5
write(''' \t<b>(<u>''')
_v = VFFSL(SL,"ARTICUL",True) # '$ARTICUL' on line 21, col 13
if _v is not None: write(_filter(_v, rawExpr='$ARTICUL')) # from line 21, col 13.
write('''</u>)</b>
''')
write(''' <br>
''')
if VFFSL(SL,"VWUID",True): # generated from line 24, col 5
write(''' <b>''')
_v = VFFSL(SL,"VWUCODE",True) # '$VWUCODE' on line 25, col 12
if _v is not None: write(_filter(_v, rawExpr='$VWUCODE')) # from line 25, col 12.
write(''' = ''')
_orig_filter_68095643 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"VWUFACTOR",True) # '$VWUFACTOR' on line 25, col 39
if _v is not None: write(_filter(_v, rawExpr='$VWUFACTOR')) # from line 25, col 39.
_filter = _orig_filter_68095643
write(''' ''')
_v = VFFSL(SL,"MWUCODE",True) # '$MWUCODE' on line 25, col 62
if _v is not None: write(_filter(_v, rawExpr='$MWUCODE')) # from line 25, col 62.
write(''' </b><br>
''')
write(''' <br>
''')
if VFFSL(SL,"varExists",False)('$datalist') and VFFSL(SL,"datalist",True) and len(VFFSL(SL,"datalist",True))>0: # generated from line 29, col 5
wuamount = 0
amount = 0
write(''' <form action=taskMWaresSave method=post>
<input type=hidden name=waresid value=''')
_v = VFFSL(SL,"wid",True) # '$wid' on line 33, col 47
if _v is not None: write(_filter(_v, rawExpr='$wid')) # from line 33, col 47.
write('''>
<input type=hidden name=taskid value=''')
_v = VFFSL(SL,"tid",True) # '$tid' on line 34, col 46
if _v is not None: write(_filter(_v, rawExpr='$tid')) # from line 34, col 46.
write('''>
<input type=hidden name=dbeg value="''')
_v = VFFSL(SL,"dbeg",True) # '$dbeg' on line 35, col 45
if _v is not None: write(_filter(_v, rawExpr='$dbeg')) # from line 35, col 45.
write('''">
<table>
<thead>
<tr>
<th>''')
if False:
_('Дата')
_v = VFFSL(SL,"_",False)('Дата') # "$_('\xc4\xe0\xf2\xe0')" on line 39, col 25
if _v is not None: write(_filter(_v, rawExpr="$_('\xc4\xe0\xf2\xe0')")) # from line 39, col 25.
write('''</th>
<th>
<select name=wuid id=":focus:">
<option value=''')
_v = VFFSL(SL,"MWUID",True) # '$MWUID' on line 42, col 43
if _v is not None: write(_filter(_v, rawExpr='$MWUID')) # from line 42, col 43.
write(''' selected>''')
_v = VFFSL(SL,"MWUCODE",True) # '$MWUCODE' on line 42, col 59
if _v is not None: write(_filter(_v, rawExpr='$MWUCODE')) # from line 42, col 59.
write('''</option>
''')
if VFFSL(SL,"VWUID",True): # generated from line 43, col 27
write(''' <option value=''')
_v = VFFSL(SL,"VWUID",True) # '$VWUID' on line 44, col 43
if _v is not None: write(_filter(_v, rawExpr='$VWUID')) # from line 44, col 43.
write('''>''')
_v = VFFSL(SL,"VWUCODE",True) # '$VWUCODE' on line 44, col 50
if _v is not None: write(_filter(_v, rawExpr='$VWUCODE')) # from line 44, col 50.
write('''</option>
''')
write(''' </select>
</th>
<th>''')
if False:
_('Кол-во')
_v = VFFSL(SL,"_",False)('Кол-во') # "$_('\xca\xee\xeb-\xe2\xee')" on line 48, col 25
if _v is not None: write(_filter(_v, rawExpr="$_('\xca\xee\xeb-\xe2\xee')")) # from line 48, col 25.
write('''</th>
<tr>
</thead>
<tbody>
''')
for item in VFFSL(SL,"datalist",True): # generated from line 52, col 13
if VFFSL(SL,"item.canedit",True) == '0': # generated from line 53, col 17
trClass = 'class="inactive"'
else: # generated from line 55, col 17
trClass = ''
write(''' <tr ''')
_v = VFFSL(SL,"trClass",True) # '$trClass' on line 58, col 21
if _v is not None: write(_filter(_v, rawExpr='$trClass')) # from line 58, col 21.
write('''>
<td>''')
_orig_filter_88920082 = _filter
filterName = 'DateFilter2'
if self._CHEETAH__filters.has_key("DateFilter2"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"item.productdate",True) # '$item.productdate' on line 59, col 44
if _v is not None: write(_filter(_v, rawExpr='$item.productdate')) # from line 59, col 44.
_filter = _orig_filter_88920082
write('''</td>
''')
if VFFSL(SL,"item.canedit",True) == '1': # generated from line 60, col 19
write(''' <td><input type="text" name="WL_''')
_v = VFFSL(SL,"item.WLOTID",True) # '$item.WLOTID' on line 61, col 53
if _v is not None: write(_filter(_v, rawExpr='$item.WLOTID')) # from line 61, col 53.
write('''" id="::float" title="''')
_v = VFFSL(SL,"item.WLNUMBER",True) # '$item.WLNUMBER' on line 61, col 87
if _v is not None: write(_filter(_v, rawExpr='$item.WLNUMBER')) # from line 61, col 87.
write('''" value="''')
_orig_filter_30262987 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"item.AMOUNT",True) # '$item.AMOUNT' on line 61, col 126
if _v is not None: write(_filter(_v, rawExpr='$item.AMOUNT')) # from line 61, col 126.
_filter = _orig_filter_30262987
write('''" size="4"></td>
''')
else: # generated from line 62, col 19
write(''' <td><a href=\'#\' title="''')
_v = VFFSL(SL,"item.WLNUMBER",True) # '$item.WLNUMBER' on line 63, col 44
if _v is not None: write(_filter(_v, rawExpr='$item.WLNUMBER')) # from line 63, col 44.
write('''">''')
_orig_filter_14753798 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"item.AMOUNT",True) # '$item.AMOUNT' on line 63, col 76
if _v is not None: write(_filter(_v, rawExpr='$item.AMOUNT')) # from line 63, col 76.
_filter = _orig_filter_14753798
write('''</a></td>
''')
write(''' <td>''')
_v = VFFSL(SL,"viewQuantity",False)(VFFSL(SL,"item.AMOUNT",True),VFFSL(SL,"VWUFACTOR",True),VFFSL(SL,"VWUCODE",True),VFFSL(SL,"MWUFACTOR",True),VFFSL(SL,"MWUCODE",True)) # '$viewQuantity($item.AMOUNT,$VWUFACTOR,$VWUCODE,$MWUFACTOR,$MWUCODE)' on line 65, col 25
if _v is not None: write(_filter(_v, rawExpr='$viewQuantity($item.AMOUNT,$VWUFACTOR,$VWUCODE,$MWUFACTOR,$MWUCODE)')) # from line 65, col 25.
write('''(<b><u>''')
_orig_filter_57748425 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"item.AMOUNT",True) # '$item.AMOUNT' on line 65, col 115
if _v is not None: write(_filter(_v, rawExpr='$item.AMOUNT')) # from line 65, col 115.
_filter = _orig_filter_57748425
write('''</u></b>)</td>
</tr>
''')
amount += float(VFFSL(SL,"item.AMOUNT",True))
write(''' </tbody>
<tfoot>
<tr>
<th>''')
if False:
_('Итого')
_v = VFFSL(SL,"_",False)('Итого') # "$_('\xc8\xf2\xee\xe3\xee')" on line 72, col 25
if _v is not None: write(_filter(_v, rawExpr="$_('\xc8\xf2\xee\xe3\xee')")) # from line 72, col 25.
write(''':</th>
<th colspan=2>''')
_v = VFFSL(SL,"viewQuantity",False)(VFFSL(SL,"amount",True),VFFSL(SL,"VWUFACTOR",True),VFFSL(SL,"VWUCODE",True),VFFSL(SL,"MWUFACTOR",True),VFFSL(SL,"MWUCODE",True)) # '$viewQuantity($amount,$VWUFACTOR,$VWUCODE,$MWUFACTOR,$MWUCODE)' on line 73, col 35
if _v is not None: write(_filter(_v, rawExpr='$viewQuantity($amount,$VWUFACTOR,$VWUCODE,$MWUFACTOR,$MWUCODE)')) # from line 73, col 35.
write('''(<b><u>''')
_orig_filter_11841561 = _filter
filterName = 'Quantity'
if self._CHEETAH__filters.has_key("Quantity"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
_v = VFFSL(SL,"amount",True) # '$amount' on line 73, col 120
if _v is not None: write(_filter(_v, rawExpr='$amount')) # from line 73, col 120.
_filter = _orig_filter_11841561
write('''</u></b>)</th>
</tr>
</tfoot>
</table>
<input type="submit" value="''')
if False:
_('Сохранить')
_v = VFFSL(SL,"_",False)('Сохранить') # "$_('\xd1\xee\xf5\xf0\xe0\xed\xe8\xf2\xfc')" on line 77, col 37
if _v is not None: write(_filter(_v, rawExpr="$_('\xd1\xee\xf5\xf0\xe0\xed\xe8\xf2\xfc')")) # from line 77, col 37.
write('''">
</form>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def writeBody(self, **KWS):
## CHEETAH: main method generated for this template
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('''
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_taskMWares= 'writeBody'
## END CLASS DEFINITION
if not hasattr(taskMWares, '_initCheetahAttributes'):
templateAPIClass = getattr(taskMWares, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(taskMWares)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=taskMWares()).run()
| [
"[email protected]"
] | |
14b577ec46ee9d7038f9abbef96019ef6af5fd26 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/RiskFinishLabel.py | 70510d2ed4724524faa93b6970839d177175fd54 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,630 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class RiskFinishLabel(object):
def __init__(self):
self._code = None
self._label = None
self._path = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def label(self):
return self._label
@label.setter
def label(self, value):
self._label = value
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.label:
if hasattr(self.label, 'to_alipay_dict'):
params['label'] = self.label.to_alipay_dict()
else:
params['label'] = self.label
if self.path:
if hasattr(self.path, 'to_alipay_dict'):
params['path'] = self.path.to_alipay_dict()
else:
params['path'] = self.path
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = RiskFinishLabel()
if 'code' in d:
o.code = d['code']
if 'label' in d:
o.label = d['label']
if 'path' in d:
o.path = d['path']
return o
| [
"[email protected]"
] | |
0c5e81e31f3423a12125f91838a1aa195b0987ba | ca47ebf432f787e0ae78a54afcd3c60d0af2d476 | /GitProgs/152002016_PythonLabCode1_R_Parnika_Murty/Q2.py | 1bd4495f446a8e5de2c579c00a17269c90c17d39 | [] | no_license | Parnika1102/My_Assignments | 0659c70f8f8473107b49a611ee9d16823331c535 | b0ecf3df0107c627944f5ef98f72996efdf42f37 | refs/heads/master | 2023-03-20T11:37:02.821148 | 2021-03-10T12:14:45 | 2021-03-10T12:14:45 | 344,998,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | #!/bin/python3
#Class Polygon with attributes numsides and area.
class Polygon:
#__init__() constructor.
def __init__(self,numSides,area):
#The class attributes "numSides" and "area".
self.numSides = numSides
self.area = area
#For the string representation of our object.
def __str__(self):
#To display error message if number of sides is less than 3.
if self.numSides<3 :
raise Exception("Number of sides should be atleast 3")
#To display error message if polygon has negative area.
elif self.area<0 :
raise Exception("Polygon should have postive area")
#To display details about the polygon.
else:
return "Polygon with % s sides and area % s" % (self.numSides, self.area)
try:
#Creating a polygon object with respective number of sides and area.
p1 = Polygon(1,23)
#Printing the object.
print(p1)
#Printing the exception type and respective message.
except Exception as e:
print(type(e))
print(e) | [
"email"
] | email |
f254f69848a95f326b53f8ce3d6c7f556a3e272f | 5130754859e274cd06f63260439e5203c2000a11 | /core/jobs/batch_jobs/blog_post_search_indexing_jobs.py | 9b9440e7125be3ee12d6e27e9720636aeb7227bd | [
"Apache-2.0"
] | permissive | oppia/oppia | 8ebc9c7c7f2b336e9a79ce04533abe3956f48cbe | d16fdf23d790eafd63812bd7239532256e30a21d | refs/heads/develop | 2023-09-04T07:50:13.661276 | 2023-09-03T09:21:32 | 2023-09-03T09:21:32 | 40,687,563 | 6,172 | 4,666 | Apache-2.0 | 2023-09-14T18:25:11 | 2015-08-14T00:16:14 | Python | UTF-8 | Python | false | false | 3,766 | py | # coding: utf-8
#
# Copyright 2022 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jobs that are run by CRON scheduler."""
from __future__ import annotations
from core.domain import blog_domain
from core.domain import blog_services
from core.domain import search_services
from core.jobs import base_jobs
from core.jobs.io import ndb_io
from core.jobs.transforms import job_result_transforms
from core.jobs.types import job_run_result
from core.platform import models
import apache_beam as beam
import result
from typing import Final, Iterable, List
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import blog_models
from mypy_imports import search_services as platform_search_services
(blog_models,) = models.Registry.import_models([models.Names.BLOG])
platform_search_services = models.Registry.import_search_services()
class IndexBlogPostsInSearchJob(base_jobs.JobBase):
"""Job that indexes the blog posts in Elastic Search."""
MAX_BATCH_SIZE: Final = 1000
def run(self) -> beam.PCollection[job_run_result.JobRunResult]:
"""Returns a PCollection of 'SUCCESS' or 'FAILURE' results from
the Elastic Search.
Returns:
PCollection. A PCollection of 'SUCCESS' or 'FAILURE' results from
the Elastic Search.
"""
return (
self.pipeline
| 'Get all non-deleted models' >> (
ndb_io.GetModels(
blog_models.BlogPostSummaryModel.get_all(
include_deleted=False
)
))
| 'Convert BlogPostSummaryModels to domain objects' >> beam.Map(
blog_services.get_blog_post_summary_from_model)
| 'Split models into batches' >> beam.transforms.util.BatchElements(
max_batch_size=self.MAX_BATCH_SIZE)
| 'Index batches of models' >> beam.ParDo(
IndexBlogPostSummaries())
| 'Count the output' >> (
job_result_transforms.ResultsToJobRunResults())
)
# TODO(#15613): Here we use MyPy ignore because the incomplete typing of
# apache_beam library and absences of stubs in Typeshed, forces MyPy to
# assume that PTransform class is of type Any. Thus to avoid MyPy's error
# (Class cannot subclass 'PTransform' (has type 'Any')), we added an
# ignore here.
class IndexBlogPostSummaries(beam.DoFn): # type: ignore[misc]
"""DoFn to index blog post summaries."""
def process(
self, blog_post_summaries: List[blog_domain.BlogPostSummary]
) -> Iterable[result.Result[None, Exception]]:
"""Index blog post summaries and catch any errors.
Args:
blog_post_summaries: list(BlogPostSummaries). List of Blog Post
Summary domain objects to be indexed.
Yields:
JobRunResult. List containing one element, which is either SUCCESS,
or FAILURE.
"""
try:
search_services.index_blog_post_summaries(
blog_post_summaries)
for _ in blog_post_summaries:
yield result.Ok()
except platform_search_services.SearchException as e:
yield result.Err(e)
| [
"[email protected]"
] | |
02106294b4d4b980e76f0077bd730aa8cb529c27 | 9c14bb4d3029a9fff23cf0d3e9fdce9ca4e369ab | /prettyqt/widgets/composed/imageviewer.py | ac1daae24ae902a88755ea0c2d5992f940896d16 | [
"MIT"
] | permissive | fossabot/PrettyQt | 0e1ae074ca0776fa02ee0b8e6c04f9d545408855 | d435b8d8c68d16c704c39972457497c93741859f | refs/heads/master | 2020-05-14T16:50:48.896440 | 2019-04-17T11:48:25 | 2019-04-17T11:48:25 | 181,880,405 | 0 | 0 | null | 2019-04-17T11:48:19 | 2019-04-17T11:48:19 | null | UTF-8 | Python | false | false | 726 | py | # -*- coding: utf-8 -*-
"""
@author: Philipp Temminghoff
"""
import pathlib
import sys
from prettyqt import widgets
class ImageViewer(widgets.Widget):
def __init__(self, title="", parent=None):
super().__init__(parent)
self.title = title
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
path = pathlib.Path("decisiontree.png")
self.image = widgets.Label.image_from_path(path, parent=self)
self.show()
if __name__ == "__main__":
app = widgets.Application(sys.argv)
ex = ImageViewer()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
c7ce6a26eabd9e0321bd10daacd750f082343174 | b8d2f095a4b7ea567ccc61ee318ba879318eec3d | /树 Tree/538. 把二叉搜索树转换为累加树.py | 9a2100675571f2350424587e70a2d48bbd0aa325 | [] | no_license | f1amingo/leetcode-python | a3ef78727ae696fe2e94896258cfba1b7d58b1e3 | b365ba85036e51f7a9e018767914ef22314a6780 | refs/heads/master | 2021-11-10T16:19:27.603342 | 2021-09-17T03:12:59 | 2021-09-17T03:12:59 | 205,813,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from util.ZTree import TreeNode
class Solution:
def convertBST(self, root: TreeNode) -> TreeNode:
def dfs(r: TreeNode):
if r:
dfs(r.right)
nonlocal total
total += r.val
r.val = total
dfs(r.left)
total = 0
dfs(root)
return root
| [
"[email protected]"
] | |
87f27491103c863122d5b540b57be42f6faccd47 | 5b28005b6ee600e6eeca2fc7c57c346e23da285f | /nomadic_recording_lib/comm/dmx/OSCtoOLA.py | c5c93f2ac60ce93d0dcc09a1ffe7fb3941cf2212 | [] | no_license | nocarryr/wowza_logparse | c31d2db7ad854c6b0d13495a0ede5f406c2fce3f | d6daa5bf58bae1db48ac30031a845bf975c7d5cc | refs/heads/master | 2021-01-17T07:19:00.347206 | 2017-06-24T16:57:32 | 2017-06-24T16:57:32 | 25,835,704 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,980 | py | import socket
import threading
import array
#import jsonpickle
from Bases import OSCBaseObject, Serialization
from ola_IO import olaIO
from ..osc.osc_io import oscIO
from ..BaseIO import detect_usable_address
class OSCtoOLAHost(OSCBaseObject):
osc_address = 'OSCtoOLA'
ui_name = 'OLA (Open Lighting Architecture)'
_Properties = {'connected':dict(fget='_connected_getter', fset='_connected_setter')}
def __init__(self, **kwargs):
self.osc_io = kwargs.get('osc_io')
self.root_address = 'OSCtoOLA-' + socket.gethostname()
self.direct_mode = False
# if not self.osc_io:
# self.direct_mode = True
# s = 'OSCtoOLA'
# io_kwargs = dict(confsection=s + '_io', app_address=s, root_address=s)
# for key in ['hostaddr', 'hostport', 'mcastaddr', 'mcastport']:
# if key in kwargs:
# io_kwargs.update({key:kwargs[key]})
# self.osc_io = oscIO(**io_kwargs)
# self.osc_io.add_client_name(socket.gethostname())
self.osc_parent_node = self.osc_io.root_node
super(OSCtoOLAHost, self).__init__(**kwargs)
self.register_signal('state_changed')
self.universes = {}
self.olaIO = olaIO()
#self.osc_io.add_client_name(self.root_address, update_conf=False)
addr = detect_usable_address()
port = self.osc_io.hostdata['recvport']
self.osc_io.add_client(name=self.root_address, address=addr, port=port,
update_conf=False, isLocalhost=False)
self.osc_io.connect('new_master', self.on_osc_new_master)
self.olaIO.connect('new_universe', self.on_new_ola_universe)
self.olaIO.connect('state_changed', self.on_ola_state_changed)
#self.add_osc_handler(callbacks={'request-universes':self.on_universes_requested})
#self.do_connect()
# @property
# def connected(self):
# return self.olaIO.connected
# @connected.setter
# def connected(self, value):
# self.olaIO.connected = value
def _connected_getter(self):
return self.olaIO.connected
def _connected_setter(self, value):
self.olaIO.connected = value
def do_connect(self):
if self.direct_mode:
self.osc_io.do_connect()
self.olaIO.do_connect()
def do_disconnect(self):
def _do_disconnect():
if self.direct_mode:
self.osc_io.do_disconnect()
self.olaIO.do_disconnect()
for univ in self.universes.itervalues():
univ.set_all_zero(True)
t = threading.Timer(.5, _do_disconnect)
t.daemon = True
t.start()
def on_ola_state_changed(self, **kwargs):
self.emit('state_changed', **kwargs)
def on_new_ola_universe(self, **kwargs):
univ = kwargs.get('ola_universe')
if univ.id not in self.universes:
u_kwargs = self.add_osc_child(address=str(univ.id))
u_kwargs.update({'ola_universe':univ, 'root_address':self.root_address})
obj = OSCUniverse(**u_kwargs)
self.universes.update({obj.id:obj})
def on_universes_requested(self, **kwargs):
d = {}
for key, val in self.universes.iteritems():
d.update({key:{}})
for attr in ['id', 'name']:
d[key].update({attr:getattr(val, attr)})
s = Serialization.to_json(d)
self.osc_node.send_message(root_address=self.root_address, address='universes-info', value=s)
def on_osc_new_master(self, **kwargs):
for univ in self.universes.itervalues():
univ.set_all_zero(not self.osc_node.oscMaster)
def on_app_exit(self, *args, **kwargs):
self.LOG.info('oscola app exit')
self.olaIO.on_app_exit()
class OSCUniverse(OSCBaseObject):
def __init__(self, **kwargs):
self._values = None
self.all_zero = False
super(OSCUniverse, self).__init__(**kwargs)
self.register_signal('value_update')
self.values = array.array('B', [0]*513)
#print 'osc path: ', self.osc_node.get_full_path()
self.root_address = kwargs.get('root_address')
self.ola_universe = kwargs.get('ola_universe')
self.ola_universe.Universe = self
#self.id = self.ola_universe.id
self.add_osc_handler(callbacks={'set-channel':self.on_universe_set_channel,
'dump-response':self.on_universe_dump_response})
self.osc_node.send_message(root_address=self.root_address, client=self.root_address, address='request-dump')
#print 'OSCtoOLA new_universe: uid=%s, name=%s, pyid=%s' % (self.id, self.name, id(self))
@property
def id(self):
return self.ola_universe.id
@property
def name(self):
return self.ola_universe.name
@property
def values(self):
if self.all_zero:
return array.array('B', [0]*513)
return self._values
@values.setter
def values(self, values):
self._values = values
def on_universe_set_channel(self, **kwargs):
values = kwargs.get('values')
chan = values[0]
value = values[1]
self.values[chan-1] = value
#print 'oscola univ update: ', chan, value
#print 'update from osc: chan=%s, value=%s' % (chan, value)
if not self.all_zero:
self.emit('value_update', universe=self, values=self.values)
def on_universe_dump_response(self, **kwargs):
values = kwargs.get('values')
for i, value in enumerate(values):
self.values[i] = value
self.emit('value_update', universe=self, values=self.values)
def set_all_zero(self, state):
self.all_zero = state
self.emit('value_update', universe=self, values=self.values)
| [
"[email protected]"
] | |
e0a0bfe842755d832225f9678234d2d59ed708fb | 90047daeb462598a924d76ddf4288e832e86417c | /build/android/pylib/utils/emulator.py | a5aa544b4c70ec402b7a00d23e4684e671fb52db | [
"BSD-3-Clause"
] | permissive | massbrowser/android | 99b8c21fa4552a13c06bbedd0f9c88dd4a4ad080 | a9c4371682c9443d6e1d66005d4db61a24a9617c | refs/heads/master | 2022-11-04T21:15:50.656802 | 2017-06-08T12:31:39 | 2017-06-08T12:31:39 | 93,747,579 | 2 | 2 | BSD-3-Clause | 2022-10-31T10:34:25 | 2017-06-08T12:36:07 | null | UTF-8 | Python | false | false | 17,458 | py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to start and stop Android emulator.
Emulator: The class provides the methods to launch/shutdown the emulator with
the android virtual device named 'avd_armeabi' .
"""
import logging
import os
import signal
import subprocess
import time
from devil.android import device_errors
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.utils import cmd_helper
from pylib import constants
from pylib import pexpect
from pylib.utils import time_profile
# Default sdcard size in the format of [amount][unit]
DEFAULT_SDCARD_SIZE = '512M'
# Default internal storage (MB) of emulator image
DEFAULT_STORAGE_SIZE = '1024M'
# Each emulator has 60 secs of wait time for launching
_BOOT_WAIT_INTERVALS = 6
_BOOT_WAIT_INTERVAL_TIME = 10
# Path for avd files and avd dir
_BASE_AVD_DIR = os.path.expanduser(os.path.join('~', '.android', 'avd'))
_TOOLS_ANDROID_PATH = os.path.join(constants.ANDROID_SDK_ROOT,
'tools', 'android')
# Template used to generate config.ini files for the emulator
CONFIG_TEMPLATE = """avd.ini.encoding=ISO-8859-1
hw.dPad=no
hw.lcd.density=320
sdcard.size={sdcard.size}
hw.cpu.arch={hw.cpu.arch}
hw.device.hash=-708107041
hw.camera.back=none
disk.dataPartition.size=800M
hw.gpu.enabled={gpu}
skin.path=720x1280
skin.dynamic=yes
hw.keyboard=yes
hw.ramSize=1024
hw.device.manufacturer=Google
hw.sdCard=yes
hw.mainKeys=no
hw.accelerometer=yes
skin.name=720x1280
abi.type={abi.type}
hw.trackBall=no
hw.device.name=Galaxy Nexus
hw.battery=yes
hw.sensors.proximity=yes
image.sysdir.1=system-images/android-{api.level}/default/{abi.type}/
hw.sensors.orientation=yes
hw.audioInput=yes
hw.camera.front=none
hw.gps=yes
vm.heapSize=128
{extras}"""
CONFIG_REPLACEMENTS = {
'x86': {
'{hw.cpu.arch}': 'x86',
'{abi.type}': 'x86',
'{extras}': ''
},
'arm': {
'{hw.cpu.arch}': 'arm',
'{abi.type}': 'armeabi-v7a',
'{extras}': 'hw.cpu.model=cortex-a8\n'
},
'mips': {
'{hw.cpu.arch}': 'mips',
'{abi.type}': 'mips',
'{extras}': ''
}
}
class EmulatorLaunchException(Exception):
"""Emulator failed to launch."""
pass
def WaitForEmulatorLaunch(num):
"""Wait for emulators to finish booting
Emulators on bots are launch with a separate background process, to avoid
running tests before the emulators are fully booted, this function waits for
a number of emulators to finish booting
Arg:
num: the amount of emulators to wait.
"""
for _ in range(num*_BOOT_WAIT_INTERVALS):
emulators = [device_utils.DeviceUtils(a)
for a in adb_wrapper.AdbWrapper.Devices()
if a.is_emulator]
if len(emulators) >= num:
logging.info('All %d emulators launched', num)
return
logging.info(
'Waiting for %d emulators, %d of them already launched', num,
len(emulators))
time.sleep(_BOOT_WAIT_INTERVAL_TIME)
raise Exception("Expected %d emulators, %d launched within time limit" %
(num, len(emulators)))
def KillAllEmulators():
"""Kill all running emulators that look like ones we started.
There are odd 'sticky' cases where there can be no emulator process
running but a device slot is taken. A little bot trouble and we're out of
room forever.
"""
logging.info('Killing all existing emulators and existing the program')
emulators = [device_utils.DeviceUtils(a)
for a in adb_wrapper.AdbWrapper.Devices()
if a.is_emulator]
if not emulators:
return
for e in emulators:
e.adb.Emu(['kill'])
logging.info('Emulator killing is async; give a few seconds for all to die.')
for _ in range(10):
if not any(a.is_emulator for a in adb_wrapper.AdbWrapper.Devices()):
return
time.sleep(1)
def DeleteAllTempAVDs():
"""Delete all temporary AVDs which are created for tests.
If the test exits abnormally and some temporary AVDs created when testing may
be left in the system. Clean these AVDs.
"""
logging.info('Deleting all the avd files')
avds = device_utils.GetAVDs()
if not avds:
return
for avd_name in avds:
if 'run_tests_avd' in avd_name:
cmd = [_TOOLS_ANDROID_PATH, '-s', 'delete', 'avd', '--name', avd_name]
cmd_helper.RunCmd(cmd)
logging.info('Delete AVD %s', avd_name)
class PortPool(object):
"""Pool for emulator port starting position that changes over time."""
_port_min = 5554
_port_max = 5585
_port_current_index = 0
@classmethod
def port_range(cls):
"""Return a range of valid ports for emulator use.
The port must be an even number between 5554 and 5584. Sometimes
a killed emulator "hangs on" to a port long enough to prevent
relaunch. This is especially true on slow machines (like a bot).
Cycling through a port start position helps make us resilient."""
ports = range(cls._port_min, cls._port_max, 2)
n = cls._port_current_index
cls._port_current_index = (n + 1) % len(ports)
return ports[n:] + ports[:n]
def _GetAvailablePort():
"""Returns an available TCP port for the console."""
used_ports = []
emulators = [device_utils.DeviceUtils(a)
for a in adb_wrapper.AdbWrapper.Devices()
if a.is_emulator]
for emulator in emulators:
used_ports.append(emulator.adb.GetDeviceSerial().split('-')[1])
for port in PortPool.port_range():
if str(port) not in used_ports:
return port
def LaunchTempEmulators(emulator_count, abi, api_level, enable_kvm=False,
kill_and_launch=True, sdcard_size=DEFAULT_SDCARD_SIZE,
storage_size=DEFAULT_STORAGE_SIZE, wait_for_boot=True,
headless=False):
"""Create and launch temporary emulators and wait for them to boot.
Args:
emulator_count: number of emulators to launch.
abi: the emulator target platform
api_level: the api level (e.g., 19 for Android v4.4 - KitKat release)
wait_for_boot: whether or not to wait for emulators to boot up
headless: running emulator with no ui
Returns:
List of emulators.
"""
emulators = []
for n in xrange(emulator_count):
t = time_profile.TimeProfile('Emulator launch %d' % n)
# Creates a temporary AVD.
avd_name = 'run_tests_avd_%d' % n
logging.info('Emulator launch %d with avd_name=%s and api=%d',
n, avd_name, api_level)
emulator = Emulator(avd_name, abi, enable_kvm=enable_kvm,
sdcard_size=sdcard_size, storage_size=storage_size,
headless=headless)
emulator.CreateAVD(api_level)
emulator.Launch(kill_all_emulators=(n == 0 and kill_and_launch))
t.Stop()
emulators.append(emulator)
# Wait for all emulators to boot completed.
if wait_for_boot:
for emulator in emulators:
emulator.ConfirmLaunch(True)
logging.info('All emulators are fully booted')
return emulators
def LaunchEmulator(avd_name, abi, kill_and_launch=True, enable_kvm=False,
sdcard_size=DEFAULT_SDCARD_SIZE,
storage_size=DEFAULT_STORAGE_SIZE, headless=False):
"""Launch an existing emulator with name avd_name.
Args:
avd_name: name of existing emulator
abi: the emulator target platform
headless: running emulator with no ui
Returns:
emulator object.
"""
logging.info('Specified emulator named avd_name=%s launched', avd_name)
emulator = Emulator(avd_name, abi, enable_kvm=enable_kvm,
sdcard_size=sdcard_size, storage_size=storage_size,
headless=headless)
emulator.Launch(kill_all_emulators=kill_and_launch)
emulator.ConfirmLaunch(True)
return emulator
class Emulator(object):
"""Provides the methods to launch/shutdown the emulator.
The emulator has the android virtual device named 'avd_armeabi'.
The emulator could use any even TCP port between 5554 and 5584 for the
console communication, and this port will be part of the device name like
'emulator-5554'. Assume it is always True, as the device name is the id of
emulator managed in this class.
Attributes:
emulator: Path of Android's emulator tool.
popen: Popen object of the running emulator process.
device: Device name of this emulator.
"""
# Signals we listen for to kill the emulator on
_SIGNALS = (signal.SIGINT, signal.SIGHUP)
# Time to wait for an emulator launch, in seconds. This includes
# the time to launch the emulator and a wait-for-device command.
_LAUNCH_TIMEOUT = 120
# Timeout interval of wait-for-device command before bouncing to a a
# process life check.
_WAITFORDEVICE_TIMEOUT = 5
# Time to wait for a 'wait for boot complete' (property set on device).
_WAITFORBOOT_TIMEOUT = 300
def __init__(self, avd_name, abi, enable_kvm=False,
sdcard_size=DEFAULT_SDCARD_SIZE,
storage_size=DEFAULT_STORAGE_SIZE, headless=False):
"""Init an Emulator.
Args:
avd_name: name of the AVD to create
abi: target platform for emulator being created, defaults to x86
"""
android_sdk_root = constants.ANDROID_SDK_ROOT
self.emulator = os.path.join(android_sdk_root, 'tools', 'emulator')
self.android = _TOOLS_ANDROID_PATH
self.popen = None
self.device_serial = None
self.abi = abi
self.avd_name = avd_name
self.sdcard_size = sdcard_size
self.storage_size = storage_size
self.enable_kvm = enable_kvm
self.headless = headless
@staticmethod
def _DeviceName():
"""Return our device name."""
port = _GetAvailablePort()
return ('emulator-%d' % port, port)
def CreateAVD(self, api_level):
"""Creates an AVD with the given name.
Args:
api_level: the api level of the image
Return avd_name.
"""
if self.abi == 'arm':
abi_option = 'armeabi-v7a'
elif self.abi == 'mips':
abi_option = 'mips'
else:
abi_option = 'x86'
api_target = 'android-%s' % api_level
avd_command = [
self.android,
'--silent',
'create', 'avd',
'--name', self.avd_name,
'--abi', abi_option,
'--target', api_target,
'--sdcard', self.sdcard_size,
'--force',
]
avd_cmd_str = ' '.join(avd_command)
logging.info('Create AVD command: %s', avd_cmd_str)
avd_process = pexpect.spawn(avd_cmd_str)
# Instead of creating a custom profile, we overwrite config files.
avd_process.expect('Do you wish to create a custom hardware profile')
avd_process.sendline('no\n')
avd_process.expect('Created AVD \'%s\'' % self.avd_name)
# Replace current configuration with default Galaxy Nexus config.
ini_file = os.path.join(_BASE_AVD_DIR, '%s.ini' % self.avd_name)
new_config_ini = os.path.join(_BASE_AVD_DIR, '%s.avd' % self.avd_name,
'config.ini')
# Remove config files with defaults to replace with Google's GN settings.
os.unlink(ini_file)
os.unlink(new_config_ini)
# Create new configuration files with Galaxy Nexus by Google settings.
with open(ini_file, 'w') as new_ini:
new_ini.write('avd.ini.encoding=ISO-8859-1\n')
new_ini.write('target=%s\n' % api_target)
new_ini.write('path=%s/%s.avd\n' % (_BASE_AVD_DIR, self.avd_name))
new_ini.write('path.rel=avd/%s.avd\n' % self.avd_name)
custom_config = CONFIG_TEMPLATE
replacements = CONFIG_REPLACEMENTS[self.abi]
for key in replacements:
custom_config = custom_config.replace(key, replacements[key])
custom_config = custom_config.replace('{api.level}', str(api_level))
custom_config = custom_config.replace('{sdcard.size}', self.sdcard_size)
custom_config.replace('{gpu}', 'no' if self.headless else 'yes')
with open(new_config_ini, 'w') as new_config_ini:
new_config_ini.write(custom_config)
return self.avd_name
def _DeleteAVD(self):
"""Delete the AVD of this emulator."""
avd_command = [
self.android,
'--silent',
'delete',
'avd',
'--name', self.avd_name,
]
logging.info('Delete AVD command: %s', ' '.join(avd_command))
cmd_helper.RunCmd(avd_command)
def ResizeAndWipeAvd(self, storage_size):
"""Wipes old AVD and creates new AVD of size |storage_size|.
This serves as a work around for '-partition-size' and '-wipe-data'
"""
userdata_img = os.path.join(_BASE_AVD_DIR, '%s.avd' % self.avd_name,
'userdata.img')
userdata_qemu_img = os.path.join(_BASE_AVD_DIR, '%s.avd' % self.avd_name,
'userdata-qemu.img')
resize_cmd = ['resize2fs', userdata_img, '%s' % storage_size]
logging.info('Resizing userdata.img to ideal size')
cmd_helper.RunCmd(resize_cmd)
wipe_cmd = ['cp', userdata_img, userdata_qemu_img]
logging.info('Replacing userdata-qemu.img with the new userdata.img')
cmd_helper.RunCmd(wipe_cmd)
def Launch(self, kill_all_emulators):
"""Launches the emulator asynchronously. Call ConfirmLaunch() to ensure the
emulator is ready for use.
If fails, an exception will be raised.
"""
if kill_all_emulators:
KillAllEmulators() # just to be sure
self._AggressiveImageCleanup()
(self.device_serial, port) = self._DeviceName()
self.ResizeAndWipeAvd(storage_size=self.storage_size)
emulator_command = [
self.emulator,
# Speed up emulator launch by 40%. Really.
'-no-boot-anim',
]
if self.headless:
emulator_command.extend([
'-no-skin',
'-no-window'
])
else:
emulator_command.extend([
'-gpu', 'on'
])
emulator_command.extend([
# Use a familiar name and port.
'-avd', self.avd_name,
'-port', str(port),
# all the argument after qemu are sub arguments for qemu
'-qemu', '-m', '1024',
])
if self.abi == 'x86' and self.enable_kvm:
emulator_command.extend([
# For x86 emulator --enable-kvm will fail early, avoiding accidental
# runs in a slow mode (i.e. without hardware virtualization support).
'--enable-kvm',
])
logging.info('Emulator launch command: %s', ' '.join(emulator_command))
self.popen = subprocess.Popen(args=emulator_command,
stderr=subprocess.STDOUT)
self._InstallKillHandler()
@staticmethod
def _AggressiveImageCleanup():
"""Aggressive cleanup of emulator images.
Experimentally it looks like our current emulator use on the bot
leaves image files around in /tmp/android-$USER. If a "random"
name gets reused, we choke with a 'File exists' error.
TODO(jrg): is there a less hacky way to accomplish the same goal?
"""
logging.info('Aggressive Image Cleanup')
emulator_imagedir = '/tmp/android-%s' % os.environ['USER']
if not os.path.exists(emulator_imagedir):
return
for image in os.listdir(emulator_imagedir):
full_name = os.path.join(emulator_imagedir, image)
if 'emulator' in full_name:
logging.info('Deleting emulator image %s', full_name)
os.unlink(full_name)
def ConfirmLaunch(self, wait_for_boot=False):
"""Confirm the emulator launched properly.
Loop on a wait-for-device with a very small timeout. On each
timeout, check the emulator process is still alive.
After confirming a wait-for-device can be successful, make sure
it returns the right answer.
"""
seconds_waited = 0
number_of_waits = 2 # Make sure we can wfd twice
device = device_utils.DeviceUtils(self.device_serial)
while seconds_waited < self._LAUNCH_TIMEOUT:
try:
device.adb.WaitForDevice(
timeout=self._WAITFORDEVICE_TIMEOUT, retries=1)
number_of_waits -= 1
if not number_of_waits:
break
except device_errors.CommandTimeoutError:
seconds_waited += self._WAITFORDEVICE_TIMEOUT
device.adb.KillServer()
self.popen.poll()
if self.popen.returncode != None:
raise EmulatorLaunchException('EMULATOR DIED')
if seconds_waited >= self._LAUNCH_TIMEOUT:
raise EmulatorLaunchException('TIMEOUT with wait-for-device')
logging.info('Seconds waited on wait-for-device: %d', seconds_waited)
if wait_for_boot:
# Now that we checked for obvious problems, wait for a boot complete.
# Waiting for the package manager is sometimes problematic.
device.WaitUntilFullyBooted(timeout=self._WAITFORBOOT_TIMEOUT)
logging.info('%s is now fully booted', self.avd_name)
def Shutdown(self):
"""Shuts down the process started by launch."""
self._DeleteAVD()
if self.popen:
self.popen.poll()
if self.popen.returncode == None:
self.popen.kill()
self.popen = None
def _ShutdownOnSignal(self, _signum, _frame):
logging.critical('emulator _ShutdownOnSignal')
for sig in self._SIGNALS:
signal.signal(sig, signal.SIG_DFL)
self.Shutdown()
raise KeyboardInterrupt # print a stack
def _InstallKillHandler(self):
"""Install a handler to kill the emulator when we exit unexpectedly."""
for sig in self._SIGNALS:
signal.signal(sig, self._ShutdownOnSignal)
| [
"[email protected]"
] | |
7880bcad5a3a3c0cfe1efef41f3c6bcba6189d35 | 49a0010d8c6c3dc4c92a5795ddee418de976ada4 | /CH03/0311.py | e40cc572a518f4ea487a43c2a36bcac7623a0484 | [] | no_license | mytree/Test_PythonCV | 4c20ee4f073558488d2bf947fca500f677f36d13 | 9ba1e0bc8e7d84f1f7df3ca051a3d7e70e1745bb | refs/heads/master | 2020-09-13T06:20:04.743092 | 2019-11-19T11:37:40 | 2019-11-19T11:37:40 | 222,679,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | #0311.py
import cv2
import numpy as np
def onMouse(event, x, y, flags, param):
## global img
if event == cv2.EVENT_LBUTTONDOWN: # 마우스 왼쪽 버튼 클릭
if flags & cv2.EVENT_FLAG_SHIFTKEY: # shift 키와 함께
cv2.rectangle(param[0], (x-5,y-5),(x+5,y+5),(255,0,0))
else:
cv2.circle(param[0], (x,y), 5, (255,0,0), 3)
elif event == cv2.EVENT_RBUTTONDOWN: # 마우스 오른쪽 버튼 클릭
cv2.circle(param[0], (x,y), 5, (0,0,255), 3)
elif event == cv2.EVENT_LBUTTONDBLCLK: # 마우스 왼쪽 버튼 더블클릭
param[0] = np.zeros(param[0].shape, np.uint8) + 255
cv2.imshow("img", param[0])
img = np.zeros((512,512,3),np.uint8)+255
cv2.imshow('img',img)
cv2.setMouseCallback('img', onMouse, [img])
cv2.waitKey()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
3ce2bc9fc56982061e585feab4245d388dd09ad7 | da489e1e388643174101981fbbdf12fd498a3ba0 | /ihome13/ihome/api_1_0/houses.py | 04060d610fb1249835258fd7910565bf95ce96a1 | [] | no_license | zb14755456464/home | f5344b90e91a538283524dbd21fecf51cdfdbe50 | 3ec478083c2f5792ddfbfdb92e8bd43f51d6242d | refs/heads/master | 2023-01-04T16:37:28.869627 | 2018-03-11T08:25:38 | 2018-03-11T08:25:38 | 124,736,942 | 0 | 0 | null | 2022-12-27T14:57:48 | 2018-03-11T08:23:36 | JavaScript | UTF-8 | Python | false | false | 16,644 | py | # coding=utf-8
import logging
import json
from . import api
from ihome import redis_store, constants, db
from ihome.models import Area
from flask import request, jsonify, g, session, current_app
from ihome.response_code import RET
from ihome.models import House, Facility, HouseImage, User, Order
from ihome.utils.commons import login_required
from ihome.utils.image_storage import storage
from datetime import datetime
@api.route('/areas/')
def get_area_info():
"""
1. 访问redis获取缓存
2. 没有缓存, 查询MySQL
3. 需要对数据转JSON
4. 保存redis中
5. 如果有缓存, 返回缓存数据
6. 返回给浏览器
"""
# 一. 处理业务逻辑
#1. 访问redis获取缓存
try:
# 直接获取JSON数据, 保存的也是JSON数据. 为了方便把数据返回给前端, 因此保存JSON返回JSON
areas_json = redis_store.get('area_info')
except Exception as e:
logging.error(e)
# 为了避免异常的事情发生, 如果执行失败, 就把数据设置为None
areas_json = None
# 2. 没有缓存, 查询MySQL
if areas_json is None:
# 查询MySQL所有的数据
areas_list = Area.query.all()
# 3. 需要对数据转JSON
areas = []
for area in areas_list:
# 调用模型的转字典方法, 不断拼接成一个areas
areas.append(area.to_dict())
# 将areas转换成JSON, 方便将来保存redis, 方便返回数据
areas_json = json.dumps(areas)
# 4. 保存redis中
try:
redis_store.setex('area_info', constants.AREA_INFO_REDIS_EXPIRES, areas_json)
db.session.commit()
except Exception as e:
logging.error(e)
db.session.rollback()
# 这里如果出错, 可以不用返回错误信息. 因此如果redis没有保存, 那么下一次会直接访问Mysql读取数据, 再次保存
# 5.如果有缓存, 返回缓存数据
else:
logging.info('当前数据从redis中读取的')
# 二. 返回数据
# return jsonify() --> contentType --> 'application/json'
# 如果调用了jsonify, 那么里面传递的数据, 是字符串. 而我们的城区数据已经转换成了JSON, 因此不能用jsonify
# 此时, 我们可以返回字典, 并告知是json格式的
# return jsonify(errno=RET.THIRDERR, errmsg='上传图像异常')
return '{"errno": 0, "errmsg": "查询城区信息成功", "data":{"areas": %s}}' % areas_json, 200, \
{"Content-Type": "application/json"}
@api.route("/houses/info", methods=["POST"])
@login_required
def save_house_info():
"""保存房屋的基本信息
前端发送过来的json数据
{
"title":"",
"price":"",
"area_id":"1",
"address":"",
"room_count":"",
"acreage":"",
"unit":"",
"capacity":"",
"beds":"",
"deposit":"",
"min_days":"",
"max_days":"",
"facility":["7","8"]
}
"""
# 一. 获取参数
house_data = request.get_json()
if house_data is None:
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
title = house_data.get("title") # 房屋名称标题
price = house_data.get("price") # 房屋单价
area_id = house_data.get("area_id") # 房屋所属城区的编号
address = house_data.get("address") # 房屋地址
room_count = house_data.get("room_count") # 房屋包含的房间数目
acreage = house_data.get("acreage") # 房屋面积
unit = house_data.get("unit") # 房屋布局(几室几厅)
capacity = house_data.get("capacity") # 房屋容纳人数
beds = house_data.get("beds") # 房屋卧床数目
deposit = house_data.get("deposit") # 押金
min_days = house_data.get("min_days") # 最小入住天数
max_days = house_data.get("max_days") # 最大入住天数
# 二. 校验参数
if not all((title, price, area_id, address, room_count,acreage, unit, capacity, beds, deposit, min_days, max_days)):
return jsonify(errno=RET.PARAMERR, errmsg="参数不完整")
# 判断单价和押金格式是否正确
# 前端传送过来的金额参数是以元为单位,浮点数,数据库中保存的是以分为单位,整数
try:
price = int(float(price) * 100)
deposit = int(float(deposit) * 100)
except Exception as e:
return jsonify(errno=RET.DATAERR, errmsg="参数有误")
# 三. 保存信息
# 1. 创建房屋对象
user_id = g.user_id
house = House(
user_id=user_id,
area_id=area_id,
title=title,
price=price,
address=address,
room_count=room_count,
acreage=acreage,
unit=unit,
capacity=capacity,
beds=beds,
deposit=deposit,
min_days=min_days,
max_days=max_days
)
# 2. 处理房屋的设施信息
facility_id_list = house_data.get("facility")
if facility_id_list:
# 表示用户勾选了房屋设施
# 过滤用户传送的不合理的设施id
# select * from facility where id in (facility_id_list)
try:
facility_list = Facility.query.filter(Facility.id.in_(facility_id_list)).all()
except Exception as e:
logging.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库异常")
# 为房屋添加设施信息
if facility_list:
house.facilities = facility_list
# 3. 保存数据库
try:
db.session.add(house)
db.session.commit()
except Exception as e:
logging.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据失败")
# 四. 返回
return jsonify(errno=RET.OK, errmsg="保存成功", data={"house_id": house.id})
@api.route("/houses/image", methods=["POST"])
@login_required
def save_house_image():
"""保存房屋的图片"""
# 获取参数 房屋的图片、房屋编号
house_id = request.form.get("house_id")
image_file = request.files.get("house_image")
# 校验参数
if not all([house_id, image_file]):
return jsonify(errno=RET.PARAMERR, errmsg="参数不完整")
# 1. 判断房屋是否存在
# 2. 上传房屋图片到七牛中
# 3. 保存图片信息到数据库中
# 4. 处理房屋基本信息中的主图片
# 5. 统一提交数据
# 1. 判断房屋是否存在
try:
house = House.query.get(house_id)
except Exception as e:
logging.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库异常")
if house is None:
return jsonify(errno=RET.NODATA, errmsg="房屋不存在")
# 2. 上传房屋图片到七牛中
image_data = image_file.read()
try:
file_name = storage(image_data)
except Exception as e:
logging.error(e)
return jsonify(errno=RET.THIRDERR, errmsg="保存房屋图片失败")
# 3. 保存图片信息到数据库中
house_image = HouseImage(
house_id=house_id,
url=file_name
)
db.session.add(house_image)
# 4. 处理房屋基本信息中的主图片
if not house.index_image_url:
house.index_image_url = file_name
db.session.add(house)
# 5. 统一提交数据
try:
db.session.commit()
except Exception as e:
logging.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存图片信息失败")
image_url = constants.QINIU_URL_DOMAIN + file_name
return jsonify(errno=RET.OK, errmsg="保存图片成功", data={"image_url": image_url})
@api.route("/users/houses", methods=["GET"])
@login_required
def get_user_houses():
"""获取房东发布的房源信息条目"""
user_id = g.user_id
try:
user = User.query.get(user_id)
houses = user.houses
# houses = House.query.filter_by(user_id=user_id)
except Exception as e:
logging.error(e)
return jsonify(errno=RET.DBERR, errmsg="获取数据失败")
# 将查询到的房屋信息转换为字典存放到列表中
houses_list = []
if houses:
for house in houses:
houses_list.append(house.to_basic_dict())
return jsonify(errno=RET.OK, errmsg="OK", data={"houses": houses_list})
@api.route("/houses/index", methods=["GET"])
def get_house_index():
"""获取主页幻灯片展示的房屋基本信息"""
# 从缓存中尝试获取数据
try:
ret = redis_store.get("home_page_data")
except Exception as e:
logging.error(e)
ret = None
if ret:
logging.info("hit house index info redis")
# 因为redis中保存的是json字符串,所以直接进行字符串拼接返回
return '{"errno":0, "errmsg":"OK", "data":%s}' % ret, 200, {"Content-Type": "application/json"}
else:
try:
# 查询数据库,返回房屋订单数目最多的5条数据
houses = House.query.order_by(House.order_count.desc()).limit(constants.HOME_PAGE_MAX_HOUSES)
except Exception as e:
logging.error(e)
return jsonify(errno=RET.DBERR, errmsg="查询数据失败")
if not houses:
return jsonify(errno=RET.NODATA, errmsg="查询无数据")
houses_list = []
for house in houses:
# 如果房屋未设置主图片,则跳过
if not house.index_image_url:
continue
houses_list.append(house.to_basic_dict())
# 将数据转换为json,并保存到redis缓存
json_houses = json.dumps(houses_list)
try:
redis_store.setex("home_page_data", constants.HOME_PAGE_DATA_REDIS_EXPIRES, json_houses)
except Exception as e:
logging.error(e)
return '{"errno":0, "errmsg":"OK", "data":%s}' % json_houses, 200, {"Content-Type": "application/json"}
@api.route("/houses/<int:house_id>", methods=["GET"])
def get_house_detail(house_id):
"""获取房屋详情"""
# 前端在房屋详情页面展示时,如果浏览页面的用户不是该房屋的房东,则展示预定按钮,否则不展示,
# 所以需要后端返回登录用户的user_id
# 尝试获取用户登录的信息,若登录,则返回给前端登录用户的user_id,否则返回user_id=-1
user_id = session.get("user_id", "-1")
# 校验参数
if not house_id:
return jsonify(errno=RET.PARAMERR, errmsg="参数缺失")
# 先从redis缓存中获取信息
try:
ret = redis_store.get("house_info_%s" % house_id)
except Exception as e:
logging.error(e)
ret = None
if ret:
logging.info("hit house info redis")
return '{"errno":"0", "errmsg":"OK", "data":{"user_id":%s, "house":%s}}' % (user_id, ret), 200, {"Content-Type": "application/json"}
# 查询数据库
try:
house = House.query.get(house_id)
except Exception as e:
logging.error(e)
return jsonify(errno=RET.DBERR, errmsg="查询数据失败")
if not house:
return jsonify(errno=RET.NODATA, errmsg="房屋不存在")
# 将房屋对象数据转换为字典
try:
house_data = house.to_full_dict()
except Exception as e:
logging.error(e)
return jsonify(errno=RET.DATAERR, errmsg="数据出错")
# 存入到redis中
json_house = json.dumps(house_data)
try:
redis_store.setex("house_info_%s" % house_id, constants.HOUSE_DETAIL_REDIS_EXPIRE_SECOND, json_house)
except Exception as e:
current_app.logger.error(e)
resp = '{"errno":"0", "errmsg":"OK", "data":{"user_id":%s, "house":%s}}' % (user_id, json_house), 200, {"Content-Type": "application/json"}
return resp
# /api/v1_0/houses?sd=xxxx-xx-xx&ed=xxxx-xx-xx&aid=xx&sk=new&p=1
@api.route("/houses", methods=["GET"])
def get_house_list():
"""获取房屋列表信息"""
# 一. 获取参数
start_date_str = request.args.get("sd", "") # 想要查询的起始时间
end_date_str = request.args.get("ed", "") # 想要查询的终止时间
area_id = request.args.get("aid", "") # 区域id
sort_key = request.args.get("sk", "new") # 排序关键字
page = request.args.get("p", 1) # 页数
# 二. 校验参数
# 2.1判断日期
try:
start_date = None
if start_date_str:
start_date = datetime.strptime(start_date_str, "%Y-%m-%d")
end_date = None
if end_date_str:
end_date = datetime.strptime(end_date_str, "%Y-%m-%d")
if start_date and end_date:
assert start_date <= end_date
except Exception as e:
return jsonify(errno=RET.PARAMERR, errmsg="日期参数有误")
# 2.2判断页数
try:
page = int(page)
except Exception:
page = 1
# 三. 业务逻辑处理
# 3.1 先从redis缓存中获取数据
try:
redis_key = "houses_%s_%s_%s_%s" % (start_date_str, end_date_str, area_id, sort_key)
resp_json = redis_store.hget(redis_key, page)
except Exception as e:
current_app.logger.error(e)
resp_json = None
if resp_json:
# 表示从缓存中拿到了数据
return resp_json, 200, {"Content-Type": "application/json"}
# 3.2 定义查询数据的参数空列表
filter_params = []
# 3.3 处理区域信息
if area_id:
filter_params.append(House.area_id == area_id)
# 3.4 处理时间, 获取不冲突的房屋信息
try:
conflict_orders_li = []
if start_date and end_date:
# 从订单表中查询冲突的订单,进而获取冲突的房屋id
conflict_orders_li = Order.query.filter(Order.begin_date <= end_date, Order.end_date >= start_date).all()
elif start_date:
# 从订单表中查询冲突的订单,进而获取冲突的房屋id
conflict_orders_li = Order.query.filter(Order.end_date >= start_date).all()
elif end_date:
# 从订单表中查询冲突的订单,进而获取冲突的房屋id
conflict_orders_li = Order.query.filter(Order.begin_date <= end_date).all()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库异常")
if conflict_orders_li:
conflict_house_id_li = [order.house_id for order in conflict_orders_li]
# 添加条件,查询不冲突的房屋
filter_params.append(House.id.notin_(conflict_house_id_li))
# 3.5 排序
if sort_key == "booking":
house_query = House.query.filter(*filter_params).order_by(House.order_count.desc())
elif sort_key == "price-inc":
house_query = House.query.filter(*filter_params).order_by(House.price.asc())
elif sort_key == "price-des":
house_query = House.query.filter(*filter_params).order_by(House.price.desc())
else:
house_query = House.query.filter(*filter_params).order_by(House.create_time.desc())
# 3.6 分页 sqlalchemy的分页
try:
# 页数 每页数量 错误输出
house_page = house_query.paginate(page, constants.HOUSE_LIST_PAGE_CAPACITY, False)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库异常")
# 3.7 将数据转为JSON
house_li = house_page.items # 当前页中的数据结果
total_page = house_page.pages # 总页数
houses = []
for house in house_li:
houses.append(house.to_basic_dict())
# 将结果转换json字符串
resp = dict(errno=RET.OK, errmsg="查询成功", data={"houses": houses, "total_page": total_page, "current_page":page})
resp_json = json.dumps(resp)
# 3.8 将结果缓存到redis中
if page <= total_page:
# 用redis的哈希类型保存分页数据
redis_key = "houses_%s_%s_%s_%s" % (start_date_str, end_date_str, area_id, sort_key)
try:
# 使用redis中的事务
pipeline = redis_store.pipeline()
# 开启事务
pipeline.multi()
pipeline.hset(redis_key, page, resp_json)
pipeline.expire(redis_key, constants.HOUSE_LIST_PAGE_REDIS_EXPIRES)
# 执行事务
pipeline.execute()
except Exception as e:
current_app.logger.error(e)
# 四. 数据返回
return resp_json, 200, {"Content-Type": "application/json"}
| [
"[email protected]"
] | |
90d59540d8e2afccaf99b13f80cc0a735d81e0a3 | 85a7dde9c48945972a7f521f0fbb2eb56b323aa2 | /obsolete_files/old/listening_eyes.py | 69a61d1a1a20e04408df1df5513166b7f89f27b3 | [] | no_license | jwmcgettigan/renegade | 1e8f61a14d6a5a7aff5c410f0c26bb166f95bd03 | ef76bebc6867683e1fb3201be547f42aa6e65881 | refs/heads/master | 2021-04-06T13:53:12.945602 | 2018-07-17T22:09:13 | 2018-07-17T22:09:13 | 124,680,527 | 1 | 0 | null | 2018-07-17T22:09:14 | 2018-03-10T17:33:52 | Makefile | UTF-8 | Python | false | false | 752 | py | #!/usr/bin/env python
import rospy as rp
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
bridge = CvBridge()
def left_callback(data):
cv2.imshow("left_eye", bridge.imgmsg_to_cv2(data, desired_encoding="passthrough"))
if cv2.waitKey(20) & 0xFF == ord('q'):
pass
def right_callback(data):
cv2.imshow("right_eye", bridge.imgmsg_to_cv2(data, desired_encoding="passthrough"))
if cv2.waitKey(20) & 0xFF == ord('q'):
pass
def listener():
rp.init_node('listener', anonymous=True)
rp.Subscriber("left_eye", Image, left_callback)
rp.Subscriber("right_eye", Image, right_callback)
rp.spin()
if __name__ == '__main__':
listener()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
1816c72bb11d3ba9ad7302ebd635296b73376925 | 3235145c84c48535bbf27dabfb3faa7359ed6fef | /google-cloud-sdk/lib/surface/kms/keyrings/list.py | bf8cafeeb0701a5774aa513b68e90225b592a8f0 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | paceuniversity/CS3892017team1 | b69fb10f5194f09748cd5bca48901e9bd87a55dc | f8e82537c84cac148f577794d2299ea671b26bc2 | refs/heads/master | 2021-01-17T04:34:04.158071 | 2017-05-09T04:10:22 | 2017-05-09T04:10:22 | 82,976,622 | 2 | 8 | null | 2020-07-25T09:45:47 | 2017-02-23T22:13:04 | Python | UTF-8 | Python | false | false | 1,842 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List keyrings within a location."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.kms import flags
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
class List(base.ListCommand):
"""List keyrings within a location.
Lists all keyrings within the given location.
## EXAMPLES
The following command lists a maximum of five keyrings in the location
`global`:
$ {command} --location global --limit=5
"""
@staticmethod
def Args(parser):
parser.display_info.AddFormat('table(name)')
def Run(self, args):
client = cloudkms_base.GetClientInstance()
messages = cloudkms_base.GetMessagesModule()
location_ref = resources.REGISTRY.Create(
flags.LOCATION_COLLECTION,
projectsId=properties.VALUES.core.project.GetOrFail)
request = messages.CloudkmsProjectsLocationsKeyRingsListRequest(
parent=location_ref.RelativeName())
return list_pager.YieldFromList(
client.projects_locations_keyRings,
request,
field='keyRings',
limit=args.limit,
batch_size_attribute='pageSize')
| [
"[email protected]"
] | |
91a697244a8376cdea2aa5aa40233538c0976c78 | 66013dd1c4b051d1934a82f6c903f4088e9db3d0 | /2주차/2021.01.26/예제/differentiation.py | ed0d5c04867a8c4231bd263838638e8709580c8b | [] | no_license | dlrgy22/Boostcamp | 690656d5b0e35d88a9b1480b36b42ffba47b3bc5 | af6fb8ce02cc92d1d0227a972d187ccc294af0e9 | refs/heads/main | 2023-04-18T04:06:18.419625 | 2021-05-07T01:24:47 | 2021-05-07T01:24:47 | 330,589,750 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | import sympy as sym
from sympy.abc import x, y
func = sym.diff(sym.poly(x**2 + 2*x + 3), x)
print(func)
print(func.subs(x, 2))
print(sym.diff(sym.poly(x**2 + 2*x*y + 3) + sym.cos(x + 2*y), x))
print(sym.diff(sym.poly(x**2 + 2*x*y + 3) + sym.cos(x + 2*y), y))
| [
"[email protected]"
] | |
6b54a465ce0fb3d99b380b2741c436f2a04aba50 | d5ab31874dd279656d7f24780e102b352f7f1e08 | /reports/configs/only_logd_dmpnn8_2/only_logd_dmpnn8_2 | 2bdd9de08945ebddf565738ef8cab7e248ea5be7 | [
"MIT"
] | permissive | WangYitian123/graph_networks | 77f76ab9ffa74bb4d52df52b1a17867c0c86be25 | 542f2a59b1b9708abdc718d77db7111f3ba2df96 | refs/heads/main | 2023-07-08T22:43:11.775430 | 2021-07-29T13:01:35 | 2021-07-29T13:01:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,815 | from dataclasses import dataclass, field
from typing import List
import tensorflow as tf
from graph_networks.utilities import *
import logging
import os
ATOM_FEATURE_DIM = DGIN8_ATOM_FEATURE_DIM
EDGE_FEATURE_DIM = DGIN8_EDGE_FEATURE_DIM
@dataclass
class BasicModelConfig:
"""
Config for model1/2/3 run file.
General model parameters
"""
model_name: str = 'only_logd_dmpnn8_2' # without h_w in DGIN gin part - added h_v_0 instead
# whole train/eval split - no more double split within train data set
# random train/test split in get_data_sd - only change overall_seed
# CHANGES dgin3 10.02.2021:
# *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM
# *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'
# CHANGES dgin3 16.02.2021:
# *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM
# *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'
# CHANGES dgin4 16.02.2021:
# *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin
# encoding before logD prediction
# test_frags_dgin4 was added for species inclusion in model2 call()
batch_size: int =15
override_if_exists: bool = True
overall_seed: int = 2
# path to the project folder
project_path:str = "./"
retrain_model: bool = False
retrain_model_name: str = ''
retrain_model_epoch: str = ''
retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch
train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin8_logd/'
test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin8_logd/'
combined_dataset: bool = False
add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin8_logs/'
add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin8_logs/'
test_model: bool = False
test_model_epoch: str = '887'
# define the number or test runs for the CI.
# the mean and std of the RMSE and r^2 of the combined runs are taken as the output.
test_n_times: int = 1
# do you want to test the model with consensus mode?
# if yes, a defined ML model will be included in the consensus predictions during the testing.
consensus: bool = False
# include dropout during testing?
include_dropout: bool = False
test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch
# To save the prediction values for each property set to True
# When this flag is True - the whole test dataset is taken an test_n_times is set to zero!
save_predictions: bool = False
# define the folder where you want to save the predictions.
# For each property, a file is created under the property name ("./logd.txt","./logs.txt","./logp.txt","./others.txt")
test_prediction_output_folder: str = project_path+"reports/predictions/"+model_name+"/"
encode_hidden: bool = False
log_dir: str = project_path+'reports/logs/'+model_name+'.log'
verbosity_level = logging.INFO
model_type: str = 'DMPNN' # added 31.03.2021 to compare models like 'GIN' 'DMPNN' 'DGIN' 'MLP'
plot_dir: str = project_path+'reports/figures/'+model_name+'/'
tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'
config_log_dir: str = project_path+'reports/configs/'+model_name+'/'
model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'
stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'
@dataclass
class DGINConfig:
"""
Config for direcpted-mpnn class.
"""
dropout_aggregate_dmpnn: bool = False
layernorm_aggregate_dmpnn: bool = True
dropout_passing_dmpnn: bool = False
layernorm_passing_dmpnn: bool = True
dropout_aggregate_gin: bool = False
layernorm_aggregate_gin: bool = True
dropout_passing_gin: bool = False
layernorm_passing_gin: bool = True
gin_aggregate_bias: bool = False
dmpnn_passing_bias: bool = False
init_bias: bool = False
massge_iteration_dmpnn: int = 4
message_iterations_gin: int = 4
dropout_rate: float = 0.15
input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)
passing_hidden_size: int = 56 # this can be changed
input_size_gin: int = (ATOM_FEATURE_DIM) # changed 31.03.2021
return_hv: bool = True # model3 parameter
@dataclass
class Model1Config:
"""
Config model1 class - no subclass configs are defined here.
"""
validation_split: float = 0.90
learning_rate: float = 0.004
clip_rate: float = 0.6
optimizer = tf.keras.optimizers.Adam(learning_rate)
lipo_loss_mse = tf.keras.losses.mse
lipo_loss_mae = tf.keras.losses.mae
logP_loss_mse = tf.keras.losses.mse
logS_loss_mse = tf.keras.losses.mse
other_loss_mse = tf.keras.losses.mse
mw_loss_mse = tf.keras.losses.mse
metric = tf.keras.losses.mae
epochs: int = 1600
# define the number of epochs for each test run.
save_after_epoch: int = 3
# dropout rate for the general model - mainly the MLP for the different log predictions
dropout_rate: float = 0.15 # the overall dropout rate of the readout functions
# the seed to shuffle the training/validation dataset; For the same dataset, even when
# combined_dataset is True, it is the same training/valiation instances
train_data_seed: int = 0
dropout_rate: float = 0.15 # the overall dropout rate of the readout functions
train_data_seed: int = 0
hidden_readout_1: int = 32
hidden_readout_2: int = 14
activation_func_readout = tf.nn.relu
include_logD: bool = True
include_logS: bool = False
include_logP: bool = False
include_other: bool = False
include_mw: bool = False
include_rot_bond: bool = False
include_HBA: bool = False
include_HBD: bool = False
# define the starting threshold for the RMSE of the model. When the comnbined RMSE
# is below this threshold, the model weights are being safed and a new threshold
# is set. It only serves as a starting threshold so that not too many models
# are being safed. Depends on how many log endpoints are being taken into
# consideration - as three endpoints have a higher combined RMSE as only one
# endpoint.
best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/
# define the individual thresholds. If one model is better, the corresponding
# model weights are being saved.
best_evaluation_threshold_logd: float = 1.85
best_evaluation_threshold_logp: float = 1.65
best_evaluation_threshold_logs: float = 2.15
best_evaluation_threshold_other: float = 2.15
# 2.45 for all_logs
# 0.70 logP
# 0.75 logD
# 1.00 logS
# 1.75 logSD
# 1.70 logSP
# 1.45 logDP
include_fragment_conv: bool = False # was introduced on the 4.12.2020
use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss
shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)
add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction
@dataclass
class FrACConfig:
"""
Config fragment aggregation class - no subclass configs are defined here.
"""
input_size_gin: int = 28
layernorm_aggregate: bool = True
reduce_mean: bool = True # when false -> reduce_sum
@dataclass
class MLConfig:
"""
Configs for the ML algorithm
"""
# which algorithm do you want to use for the consensus?
# possibilities are: "SVM", "RF", "KNN" or "LR" - all are regression models!
# SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;
algorithm: str = "SVM"
# which fingerprint to use - possibilities are: "ECFP" or "MACCS"
fp_types: str = "ECFP"
# If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048!
n_bits: int = 2048
# If "ECFP" fingerprint is used, define the radius
radius: int = 4
# define if descriptors should be included into the non-GNN molecular representation
include_descriptors: bool = True
# define if the descriptors should be standardizedby scaling and centering (Sklearn)
standardize: bool = True
@dataclass
class Config():
"""
Overall config class for model2 and run file.
Includes all submodels config
"""
basic_model_config: BasicModelConfig
model1_config: Model1Config
d_gin_config: DGINConfig
frag_acc_config: FrACConfig
ml_config: MLConfig
model: str = 'model11' | [
"[email protected]"
] | ||
539846eac1b2f133d9cd8effb4190a5c233a6adb | 1a5d7882b9e89b821851be328256211c65f9c1a2 | /simple_settings/strategies/__init__.py | 7d95f88acb1141987187d45f54d012d3e2e30de8 | [
"MIT"
] | permissive | matthewh/simple-settings | 2644f3032e5fc7ffa50dc8fa164bf79f640e5641 | dbddf8d5be7096ee7c4c3cc6d82824befa9b714f | refs/heads/master | 2022-11-04T22:25:55.398073 | 2020-06-22T19:25:03 | 2020-06-22T19:25:03 | 274,223,776 | 0 | 0 | MIT | 2020-06-22T19:21:30 | 2020-06-22T19:21:29 | null | UTF-8 | Python | false | false | 805 | py | # -*- coding: utf-8 -*-
from .cfg import SettingsLoadStrategyCfg
from .environ import SettingsLoadStrategyEnviron
from .json_file import SettingsLoadStrategyJson
from .python import SettingsLoadStrategyPython
yaml_strategy = None
try:
from .yaml_file import SettingsLoadStrategyYaml
yaml_strategy = SettingsLoadStrategyYaml
except ImportError: # pragma: no cover
pass
toml_strategy = None
try:
from .toml_file import SettingsLoadStrategyToml
toml_strategy = SettingsLoadStrategyToml
except ImportError: # pragma: no cover
pass
strategies = (
SettingsLoadStrategyPython,
SettingsLoadStrategyCfg,
SettingsLoadStrategyJson,
SettingsLoadStrategyEnviron
)
if yaml_strategy:
strategies += (yaml_strategy,)
if toml_strategy:
strategies += (toml_strategy,)
| [
"[email protected]"
] | |
ed2954bdd2ec5424da580a3dbdf86056e9c9e612 | dd1e2ed53fec3dca0fa60042c04ad8cf6019ed89 | /python/functions/arguments_passed_as_dictionary/arguments_passed_as_dictionary.py | bd77e7ed569887e6547b03ab831fdd645d5f53b0 | [] | no_license | cloudavail/snippets | 9be4ee285789ff3cff1a3a71e1f505a1b1697500 | 340f5c2735d6ec88b793f1eea91f2b026c24586e | refs/heads/main | 2023-08-03T10:30:13.976947 | 2023-05-15T04:46:32 | 2023-05-15T04:46:32 | 12,838,293 | 22 | 24 | null | 2023-09-07T03:33:17 | 2013-09-15T00:40:49 | JavaScript | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python
# objective: pass arguments as dictionary
# creates the function "argument_catcher" and accepts the following keywords
def argument_catcher(city, population, size, state):
print 'city: {!s}'.format(city)
print 'state: {!s}'.format(state)
print 'population: {!s}'.format(population)
print 'size: {!s} miles'.format(size)
# creates the dictionary to be passed to the "argument_catcher" function
arguments_dict = {'city': 'San Francisco', 'population': 800000, 'size': 49,
'state': 'California'}
# calls the function "argument_catcher" with the previously created dictionary
argument_catcher(**arguments_dict)
| [
"[email protected]"
] | |
39c3141c70b4a3fe7f93408a9993d754ec1d4bd5 | e2c6f262bb4ea12e3adb4534b3d7e3451c416dc4 | /slarson/pywikipedia/maintcont.py | b55f806b04bc8e108737425fb4b8a8401c72cf48 | [
"MIT",
"Python-2.0",
"LicenseRef-scancode-mit-old-style"
] | permissive | slarson/ncmir-semantic-sandbox | c48e8c9dd5a6f5769d4422c80ca58c370786bfab | d6a02a5cf4415796f25d191d541ebaccaab53e7f | refs/heads/master | 2016-09-06T04:10:21.136714 | 2009-03-31T09:49:59 | 2009-03-31T09:49:59 | 32,129,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,925 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
The controller bot for maintainer.py
Exactly one instance should be running of it. To check, use /whois maintcont on irc.freenode.net
This script requires the Python IRC library http://python-irclib.sourceforge.net/
Warning: experimental software, use at your own risk
"""
__version__ = '$Id$'
# Author: Balasyum
# http://hu.wikipedia.org/wiki/User:Balasyum
from ircbot import SingleServerIRCBot
from irclib import nm_to_n
import threading
import time
import math
tasks = 'rciw|censure'
projtasks = {}
mainters = []
activity = {}
class MaintcontBot(SingleServerIRCBot):
def __init__(self, nickname, server, port=6667):
SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_welcome(self, c, e):
t = threading.Thread(target=self.lister)
t.setDaemon(True)
t.start()
def on_privmsg(self, c, e):
nick = nm_to_n(e.source())
c = self.connection
cmd = e.arguments()[0]
do = cmd.split()
if do[0] == "workerjoin":
c.privmsg(nick, "accepted")
mainters.append([nick, do[1]])
activity[nick] = time.time()
print "worker got, name:", nick, "job:", do[1]
self.retasker(do[1])
elif do[0] == "active":
activity[nick] = time.time()
def on_dccmsg(self, c, e):
pass
def on_dccchat(self, c, e):
pass
def lister(self):
while True:
print
print "worker list:"
for mainter in mainters:
if time.time() - activity[mainter[0]] > 30:
print "*", mainter[0], "has been removed"
mainters.remove(mainter)
del activity[mainter[0]]
self.retasker(mainter[1])
continue
print "mainter name:", mainter[0], "job:", mainter[1]
print "--------------------"
print
time.sleep(1*60)
def retasker(self, group, optask = ''):
ingroup = 0
for mainter in mainters:
if mainter[1] == group:
ingroup += 1
if ingroup == 0:
return
if projtasks.has_key(group):
grt = projtasks[group]
else:
grt = tasks
tpc = grt.split('|')
tpcn = round(len(tpc) / ingroup)
i = 0
for mainter in mainters:
if mainter[1] != group:
continue
tts = '|'.join(tpc[int(round(i * tpcn)):int(round((i + 1) * tpcn))])
if tts != False:
self.connection.privmsg(mainter[0], "tasklist " + tts)
i += 1
def main():
bot = MaintcontBot("maintcont", "irc.freenode.net")
bot.start()
if __name__ == "__main__":
main()
| [
"stephen.larson@933566eb-c141-0410-b91b-f3a7fcfc7766"
] | stephen.larson@933566eb-c141-0410-b91b-f3a7fcfc7766 |
f5bb27e1af65281e82c2d2612b64ea120e971722 | 30109f5f173f4e51a20cfcaf6ec41628b177f553 | /fhir/resources/STU3/documentmanifest.py | 94a0436357d396ba1018fdc3d460cc56e2befff9 | [
"BSD-3-Clause"
] | permissive | arkhn/fhir.resources | 82c8f705c8f19e15621f2bb59fd17600c0ef3697 | 122e89c8599c4034bb3075b31d1a1188e377db91 | refs/heads/master | 2022-12-16T07:58:19.448071 | 2020-08-13T03:59:37 | 2020-08-13T03:59:37 | 288,683,730 | 1 | 0 | NOASSERTION | 2020-08-19T09:01:02 | 2020-08-19T09:01:01 | null | UTF-8 | Python | false | false | 12,206 | py | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/DocumentManifest
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
from typing import Any, Dict
from typing import List as ListType
from pydantic import Field, root_validator
from . import backboneelement, domainresource, fhirtypes
class DocumentManifest(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A list that defines a set of documents.
A collection of documents compiled for a purpose together with metadata
that applies to the collection.
"""
resource_type = Field("DocumentManifest", const=True)
author: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="author",
title="Who and/or what authored the manifest",
description=(
"Identifies who is responsible for creating the manifest, and adding "
"documents to it."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=[
"Practitioner",
"Organization",
"Device",
"Patient",
"RelatedPerson",
],
)
content: ListType[fhirtypes.DocumentManifestContentType] = Field(
...,
alias="content",
title="The items included",
description="The list of Documents included in the manifest.",
# if property is element of this resource.
element_property=True,
)
created: fhirtypes.DateTime = Field(
None,
alias="created",
title="When this document manifest created",
description=(
"When the document manifest was created for submission to the server "
"(not necessarily the same thing as the actual resource last modified "
"time, since it may be modified, replicated, etc.)."
),
# if property is element of this resource.
element_property=True,
)
created__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_created", title="Extension field for ``created``."
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Human-readable description (title)",
description=(
"Human-readable description of the source document. This is sometimes "
'known as the "title".'
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Other identifiers for the manifest",
description=(
"Other identifiers associated with the document manifest, including "
"version independent identifiers."
),
# if property is element of this resource.
element_property=True,
)
masterIdentifier: fhirtypes.IdentifierType = Field(
None,
alias="masterIdentifier",
title="Unique Identifier for the set of documents",
description=(
"A single identifier that uniquely identifies this manifest. "
"Principally used to refer to the manifest in non-FHIR contexts."
),
# if property is element of this resource.
element_property=True,
)
recipient: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="recipient",
title="Intended to get notified about this set of documents",
description=(
"A patient, practitioner, or organization for which this set of "
"documents is intended."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=[
"Patient",
"Practitioner",
"RelatedPerson",
"Organization",
],
)
related: ListType[fhirtypes.DocumentManifestRelatedType] = Field(
None,
alias="related",
title="Related things",
description="Related identifiers or resources associated with the DocumentManifest.",
# if property is element of this resource.
element_property=True,
)
source: fhirtypes.Uri = Field(
None,
alias="source",
title="The source system/application/software",
description=(
"Identifies the source system, application, or software that produced "
"the document manifest."
),
# if property is element of this resource.
element_property=True,
)
source__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_source", title="Extension field for ``source``."
)
status: fhirtypes.Code = Field(
...,
alias="status",
title="current | superseded | entered-in-error",
description="The status of this document manifest.",
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["current", "superseded", "entered-in-error"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
subject: fhirtypes.ReferenceType = Field(
None,
alias="subject",
title="The subject of the set of documents",
description=(
"Who or what the set of documents is about. The documents can be about "
"a person, (patient or healthcare practitioner), a device (i.e. "
"machine) or even a group of subjects (such as a document about a herd "
"of farm animals, or a set of patients that share a common exposure). "
"If the documents cross more than one subject, then more than one "
"subject is allowed here (unusual use case)."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Patient", "Practitioner", "Group", "Device"],
)
type: fhirtypes.CodeableConceptType = Field(
None,
alias="type",
title="Kind of document set",
description=(
"Specifies the kind of this set of documents (e.g. Patient Summary, "
"Discharge Summary, Prescription, etc.). The type of a set of documents"
" may be the same as one of the documents in it - especially if there "
"is only one - but it may be wider."
),
# if property is element of this resource.
element_property=True,
)
class DocumentManifestContent(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The items included.
The list of Documents included in the manifest.
"""
resource_type = Field("DocumentManifestContent", const=True)
pAttachment: fhirtypes.AttachmentType = Field(
None,
alias="pAttachment",
title="Contents of this set of documents",
description=(
"The list of references to document content, or Attachment that consist"
" of the parts of this document manifest. Usually, these would be "
"document references, but direct references to Media or Attachments are"
" also allowed."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e p[x]
one_of_many="p",
one_of_many_required=True,
)
pReference: fhirtypes.ReferenceType = Field(
None,
alias="pReference",
title="Contents of this set of documents",
description=(
"The list of references to document content, or Attachment that consist"
" of the parts of this document manifest. Usually, these would be "
"document references, but direct references to Media or Attachments are"
" also allowed."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e p[x]
one_of_many="p",
one_of_many_required=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
@root_validator(pre=True)
def validate_one_of_many(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {"p": ["pAttachment", "pReference"]}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class DocumentManifestRelated(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Related things.
Related identifiers or resources associated with the DocumentManifest.
"""
resource_type = Field("DocumentManifestRelated", const=True)
identifier: fhirtypes.IdentifierType = Field(
None,
alias="identifier",
title="Identifiers of things that are related",
description=(
"Related identifier to this DocumentManifest. For example, Order "
"numbers, accession numbers, XDW workflow numbers."
),
# if property is element of this resource.
element_property=True,
)
ref: fhirtypes.ReferenceType = Field(
None,
alias="ref",
title="Related Resource",
description=(
"Related Resource to this DocumentManifest. For example, Order, "
"ProcedureRequest, Procedure, EligibilityRequest, etc."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
| [
"[email protected]"
] | |
ec282d154faabb3d27915f38c3c13d823ae008c8 | 39de3097fb024c67a00c8d0e57c937d91f8b2cc9 | /Graphs/first_depth_first_search.py | d08ac89c8316ae345b61554a0dbaf65cbb800397 | [] | no_license | srajsonu/InterviewBit-Solution-Python | 4f41da54c18b47db19c3c0ad0e5efa165bfd0cd0 | 6099a7b02ad0d71e08f936b7ac35fe035738c26f | refs/heads/master | 2023-03-07T05:49:15.597928 | 2021-02-24T18:20:07 | 2021-02-24T18:20:07 | 249,359,666 | 0 | 2 | null | 2020-10-06T10:54:07 | 2020-03-23T07:09:53 | Python | UTF-8 | Python | false | false | 558 | py | from _collections import defaultdict
class Solution:
def __init__(self):
self.graph = defaultdict(list)
def Solve(self,A,B,C):
n=len(A)
for i in range(n):
self.graph[A[i]].append(i+1)
vis=[0]*(n+1)
q=[]
q.append(C)
vis[C]=1
while q:
a=q.pop(0)
for i in self.graph[a]:
if not vis[i]:
q.append(i)
vis[i]=1
return vis[B]
A=[1,1,1,3,3,2,2,7,6]
B=9
C=1
D=Solution()
print(D.Solve(A,B,C))
| [
"[email protected]"
] | |
75cc35602ae659ea024b658db136fe838acb3ec8 | dae4ab4882080344e5f505def7e2e59e0ed888b4 | /polyaxon/libs/unique_urls.py | 9a1268f47539af0e3fffc4d92358250465c22ab1 | [
"MPL-2.0"
] | permissive | vfdev-5/polyaxon | 8c3945604e8eaa25ba8b3a39ed0838d0b9f39a28 | 3e1511a993dc1a03e0a0827de0357f4adcc0015f | refs/heads/master | 2021-07-09T22:27:23.272591 | 2018-11-01T23:44:44 | 2018-11-01T23:44:44 | 154,320,634 | 0 | 0 | MIT | 2018-10-23T12:01:34 | 2018-10-23T12:01:33 | null | UTF-8 | Python | false | false | 1,467 | py | def get_user_url(username):
return '/{}'.format(username)
def get_project_url(unique_name):
values = unique_name.split('.')
return '{}/{}'.format(get_user_url(values[0]), values[1])
def get_user_project_url(username, project_name):
return '{}/{}'.format(get_user_url(username), project_name)
def get_experiment_url(unique_name):
values = unique_name.split('.')
project_url = get_user_project_url(username=values[0], project_name=values[1])
return '{}/experiments/{}'.format(project_url, values[-1])
def get_experiment_health_url(unique_name):
experiment_url = get_experiment_url(unique_name=unique_name)
return '{}/_heartbeat'.format(experiment_url)
def get_experiment_group_url(unique_name):
values = unique_name.split('.')
project_url = get_user_project_url(username=values[0], project_name=values[1])
return '{}/groups/{}'.format(project_url, values[-1])
def get_job_url(unique_name):
values = unique_name.split('.')
project_url = get_user_project_url(username=values[0], project_name=values[1])
return '{}/jobs/{}'.format(project_url, values[-1])
def get_job_health_url(unique_name):
job_url = get_job_url(unique_name=unique_name)
return '{}/_heartbeat'.format(job_url)
def get_build_url(unique_name):
values = unique_name.split('.')
project_url = get_user_project_url(username=values[0], project_name=values[1])
return '{}/builds/{}'.format(project_url, values[-1])
| [
"[email protected]"
] | |
94c209d3d25c989f349ccd38025fa4dd3e3dbd18 | 7f35d7d1b8f203217f47a615ca8efdb5e17976db | /algo/second/p693_binary_number_with_alternating_bits.py | 1c70b23a02fcb9375c33a53430168b55fc331bdc | [] | no_license | thinkreed/lc.py | 767dd61f4c9454f09e66e48b2974b8d049d6e448 | ba6b2500b86489cc34852ff73ba0915e57aa0275 | refs/heads/master | 2020-05-16T14:49:18.261246 | 2019-07-16T23:42:12 | 2019-07-16T23:42:12 | 183,113,318 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | class Solution(object):
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
a = n ^ (n / 2)
b = a + 1
return not (a & b)
| [
"[email protected]"
] | |
4525aa767d23f6bb83dd9dc9727e3368900f2e47 | 01f321b011953de639030b010249ec721446e71b | /virtual/bin/easy_install-3.6 | 5395a95d1f956a344d068bdc788dada7c9a1edfe | [] | no_license | gabrielcoder247/myportfolio | a5b37fd809eeb46926f72d9409d31f29f842d179 | e7e08045d6cea0f8393379bc2feb878cef25ff63 | refs/heads/master | 2020-03-30T03:37:15.111963 | 2018-09-28T06:55:31 | 2018-09-28T06:55:31 | 150,698,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | 6 | #!/home/gabrielcoder/Documents/portfolio/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
d3ef66b13c17f8fe1ee580b188cfbdc448362ae2 | 8a2736b2f6ff848d0296aaf64f615ffab10d657d | /b_NaiveBayes/Original/Basic.py | c43274031e68abacbf14c82fc4271fc557f866f9 | [] | no_license | amorfortune/MachineLearning | 4d73edee44941da517f19ff0947dfcc2aab80bb1 | 1923557870002e1331306f651ad7fc7a1c1c1344 | refs/heads/master | 2021-01-09T06:02:56.852816 | 2017-02-03T07:22:22 | 2017-02-03T07:22:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,477 | py | import numpy as np
from math import pi, exp
sqrt_pi = (2 * pi) ** 0.5
class NBFunctions:
@staticmethod
def gaussian(x, mu, sigma):
return exp(-(x - mu) ** 2 / (2 * sigma ** 2)) / (sqrt_pi * sigma)
@staticmethod
def gaussian_maximum_likelihood(labelled_x, n_category, dim):
mu = [np.sum(
labelled_x[c][dim]) / len(labelled_x[c][dim]) for c in range(n_category)]
sigma = [np.sum(
(labelled_x[c][dim] - mu[c]) ** 2) / len(labelled_x[c][dim]) for c in range(n_category)]
def func(_c):
def sub(_x):
return NBFunctions.gaussian(_x, mu[_c], sigma[_c])
return sub
return [func(_c=c) for c in range(n_category)]
class NaiveBayes:
def __init__(self):
self._x = self._y = None
self._data = self._func = None
self._n_possibilities = None
self._labelled_x = self._label_zip = None
self._cat_counter = self._con_counter = None
self.label_dic = self._feat_dics = None
def __getitem__(self, item):
if isinstance(item, str):
return getattr(self, "_" + item)
def feed_data(self, x, y, sample_weights=None):
pass
def feed_sample_weights(self, sample_weights=None):
pass
def get_prior_probability(self, lb=1):
return [(_c_num + lb) / (len(self._y) + lb * len(self._cat_counter))
for _c_num in self._cat_counter]
def fit(self, x=None, y=None, sample_weights=None, lb=1):
if x is not None and y is not None:
self.feed_data(x, y, sample_weights)
self._func = self._fit(lb)
def _fit(self, lb):
pass
def predict_one(self, x, get_raw_result=False):
if isinstance(x, np.ndarray):
x = x.tolist()
else:
x = x[:]
x = self._transfer_x(x)
m_arg, m_probability = 0, 0
for i in range(len(self._cat_counter)):
p = self._func(x, i)
if p > m_probability:
m_arg, m_probability = i, p
if not get_raw_result:
return self.label_dic[m_arg]
return m_probability
def predict(self, x, get_raw_result=False):
return np.array([self.predict_one(xx, get_raw_result) for xx in x])
def estimate(self, x, y):
y_pred = self.predict(x)
print("Acc: {:12.6} %".format(100 * np.sum(y_pred == y) / len(y)))
def _transfer_x(self, x):
return x
| [
"[email protected]"
] | |
77f5e2718963f38e6f8d3b4f94db63d867327aac | fa074f02d654df1a60e5f5d6cc0e53279f352ba3 | /Pilot3/P3B7/metrics.py | 2e3b8e8867ce592d35fdca05cce30c73ebec6bb8 | [
"MIT"
] | permissive | samcom12/Benchmarks-3 | 2ff5b943df7a0b4f20f8cfa6a9373383a74687e5 | a48c85a4d4d76905c3392b18e42bea4bd28c518c | refs/heads/master | 2023-08-29T19:44:27.455414 | 2021-08-02T14:34:52 | 2021-08-02T14:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | from pytorch_lightning.metrics.classification import F1
class F1Meter:
def __init__(self, tasks, average='micro'):
self.metrics = self._create_metrics(tasks, average)
def _create_metrics(self, tasks, avg):
"""Create F1 metrics for each of the tasks
Args:
tasks: dictionary of tasks and their respective number
of classes
avg: either 'micro' or 'macro'
"""
return {t: F1(c, average=avg) for t, c in tasks.items()}
def f1(self, y_hat, y):
"""Get the batch F1 score"""
scores = {}
for task, pred in y_hat.items():
scores[task] = self.metrics[task](pred, y[task])
return scores
def compute(self):
"""Compute the F1 score over all batches"""
return {t: f1.compute().item() for t, f1 in self.metrics.items()}
| [
"[email protected]"
] | |
c026325912bbc226f2020f4804cb3964da43e858 | 4252102a1946b2ba06d3fa914891ec7f73570287 | /pylearn2/linear/linear_transform.py | 657282a1c1dbc8111ae74b874623568fcce31f81 | [] | no_license | lpigou/chalearn2014 | 21d487f314c4836dd1631943e20f7ab908226771 | 73b99cdbdb609fecff3cf85e500c1f1bfd589930 | refs/heads/master | 2020-05-17T00:08:11.764642 | 2014-09-24T14:42:00 | 2014-09-24T14:42:00 | 24,418,815 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | """
.. todo::
WRITEME
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "Ian Goodfellow"
__email__ = "goodfeli@iro"
class LinearTransform(object):
"""
A generic class describing a LinearTransform. Derived classes may implement linear
transformation as a dense matrix multiply, a convolution, etc.
Classes inheriting from this should also inherit from TheanoLinear's LinearTransform
This class does not directly inherit from TheanoLinear's LinearTransform because
most LinearTransform classes in pylearn2 will inherit from a TheanoLinear derived
class and don't want to end up inheriting from TheanoLinear by two paths
This class is basically just here as a placeholder to show you what extra methods you
need to add to make a TheanoLinear LinearTransform work with pylearn2
"""
def get_params(self):
"""
Return a list of parameters that govern the linear transformation
"""
raise NotImplementedError()
def get_weights_topo(self):
"""
Return a batch of filters, formatted topologically.
This only really makes sense if you are working with a topological space,
such as for a convolution operator.
If your transformation is defined on a VectorSpace then some other class
like a ViewConverter will need to transform your vector into a topological
space; you are not responsible for doing so here.
"""
raise NotImplementedError()
def set_batch_size(self, batch_size):
"""
Some transformers such as Conv2D have a fixed batch size.
Use this method to change the batch size.
"""
pass
| [
"[email protected]"
] | |
f6a5c9b594417257ba6c45214cb08941d6ed3a86 | 66c3ff83c3e3e63bf8642742356f6c1817a30eca | /.vim/tmp/neocomplete/tags_output/=+home=+abel=+.virtualenvs=+django=+lib=+python2.7=+site-packages=+django=+views=+generic=+detail.py | a3f8b315eb315bda85117f9e7f2d3232d007aa1d | [] | no_license | pacifi/vim | 0a708e8bc741b4510a8da37da0d0e1eabb05ec83 | 22e706704357b961acb584e74689c7080e86a800 | refs/heads/master | 2021-05-20T17:18:10.481921 | 2020-08-06T12:38:58 | 2020-08-06T12:38:58 | 30,074,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,602 | py | !_TAG_FILE_FORMAT 2 /extended format; --format=1 will not append ;" to lines/
!_TAG_FILE_SORTED 1 /0=unsorted, 1=sorted, 2=foldcase/
!_TAG_PROGRAM_AUTHOR Darren Hiebert /[email protected]/
!_TAG_PROGRAM_NAME Exuberant Ctags //
!_TAG_PROGRAM_URL http://ctags.sourceforge.net /official site/
!_TAG_PROGRAM_VERSION 5.9~svn20110310 //
BaseDetailView /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^class BaseDetailView(SingleObjectMixin, View):$/;" c
DetailView /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView):$/;" c
SingleObjectMixin /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^class SingleObjectMixin(ContextMixin):$/;" c
SingleObjectTemplateResponseMixin /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^class SingleObjectTemplateResponseMixin(TemplateResponseMixin):$/;" c
context_object_name /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ context_object_name = None$/;" v class:SingleObjectMixin
get /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ def get(self, request, *args, **kwargs):$/;" m class:BaseDetailView
get_context_data /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ def get_context_data(self, **kwargs):$/;" m class:SingleObjectMixin
get_context_object_name /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ def get_context_object_name(self, obj):$/;" m class:SingleObjectMixin
get_object /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ def get_object(self, queryset=None):$/;" m class:SingleObjectMixin
get_queryset /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ def get_queryset(self):$/;" m class:SingleObjectMixin
get_slug_field /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ def get_slug_field(self):$/;" m class:SingleObjectMixin
get_template_names /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ def get_template_names(self):$/;" m class:SingleObjectTemplateResponseMixin
model /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ model = None$/;" v class:SingleObjectMixin
pk_url_kwarg /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ pk_url_kwarg = 'pk'$/;" v class:SingleObjectMixin
queryset /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ queryset = None$/;" v class:SingleObjectMixin
slug_field /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ slug_field = 'slug'$/;" v class:SingleObjectMixin
slug_url_kwarg /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ slug_url_kwarg = 'slug'$/;" v class:SingleObjectMixin
template_name_field /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ template_name_field = None$/;" v class:SingleObjectTemplateResponseMixin
template_name_suffix /home/abel/.virtualenvs/django/lib/python2.7/site-packages/django/views/generic/detail.py /^ template_name_suffix = '_detail'$/;" v class:SingleObjectTemplateResponseMixin
| [
"[email protected]"
] | |
70a8dd326fb2ca09e7b9dafc697a919fc5f4956e | e56214188faae8ebfb36a463e34fc8324935b3c2 | /test/test_hcl_firmware.py | 773860479af00219eb4688debc01670faea8c88d | [
"Apache-2.0"
] | permissive | CiscoUcs/intersight-python | 866d6c63e0cb8c33440771efd93541d679bb1ecc | a92fccb1c8df4332ba1f05a0e784efbb4f2efdc4 | refs/heads/master | 2021-11-07T12:54:41.888973 | 2021-10-25T16:15:50 | 2021-10-25T16:15:50 | 115,440,875 | 25 | 18 | Apache-2.0 | 2020-03-02T16:19:49 | 2017-12-26T17:14:03 | Python | UTF-8 | Python | false | false | 1,857 | py | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.hcl_firmware import HclFirmware # noqa: E501
from intersight.rest import ApiException
class TestHclFirmware(unittest.TestCase):
"""HclFirmware unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHclFirmware(self):
"""Test HclFirmware"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.hcl_firmware.HclFirmware() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
d9ebb9da6703c60aa1b6bae5d27a4646a86c8585 | 9405aa570ede31a9b11ce07c0da69a2c73ab0570 | /aliyun-python-sdk-petadata/aliyunsdkpetadata/request/v20160101/DescribeInstanceInfoRequest.py | 9e54e048432bd24eba7c3baff1ba1d512898802c | [
"Apache-2.0"
] | permissive | liumihust/aliyun-openapi-python-sdk | 7fa3f5b7ea5177a9dbffc99e73cf9f00e640b72b | c7b5dd4befae4b9c59181654289f9272531207ef | refs/heads/master | 2020-09-25T12:10:14.245354 | 2019-12-04T14:43:27 | 2019-12-04T14:43:27 | 226,002,339 | 1 | 0 | NOASSERTION | 2019-12-05T02:50:35 | 2019-12-05T02:50:34 | null | UTF-8 | Python | false | false | 2,128 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeInstanceInfoRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'PetaData', '2016-01-01', 'DescribeInstanceInfo','petadata')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | [
"[email protected]"
] | |
79d7698a4437041440511147e14d336945d9fffe | e942cafaf64f6354e1f9ebd4a84bcf236ad93004 | /yawast/commands/ssl.py | bbfe7b60ff039aab4923e020844fa135c88a4fb5 | [
"MIT"
] | permissive | Prodject/yawast | 9a441a0576012dc5f0664cd23cfa0a803fd7a477 | 044309709cf3782de75a35f77297f2d2850d8e1c | refs/heads/master | 2020-03-23T02:32:12.357082 | 2020-01-21T18:13:19 | 2020-01-21T18:13:19 | 140,978,938 | 0 | 0 | BSD-3-Clause | 2020-01-21T18:13:20 | 2018-07-14T21:23:05 | Ruby | UTF-8 | Python | false | false | 1,974 | py | # Copyright (c) 2013 - 2019 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
import socket
from yawast.commands import utils as cutils
from yawast.scanner.cli import ssl_internal, ssl_sweet32, ssl_labs
from yawast.scanner.session import Session
from yawast.shared import utils, output
def start(session: Session):
print(f"Scanning: {session.url}")
# make sure it resolves
try:
socket.gethostbyname(session.domain)
except socket.gaierror as error:
output.debug_exception()
output.error(f"Fatal Error: Unable to resolve {session.domain} ({str(error)})")
return
try:
cutils.check_redirect(session)
except Exception as error:
output.debug_exception()
output.error(f"Unable to continue: {str(error)}")
return
# check to see if we are looking at an HTTPS server
if session.url_parsed.scheme == "https":
if (
session.args.internalssl
or utils.is_ip(session.domain)
or utils.get_port(session.url) != 443
):
# use SSLyze
try:
ssl_internal.scan(session)
except Exception as error:
output.error(f"Error running scan with SSLyze: {str(error)}")
else:
try:
ssl_labs.scan(session)
except Exception as error:
output.debug_exception()
output.error(f"Error running scan with SSL Labs: {str(error)}")
output.norm("Switching to internal SSL scanner...")
try:
ssl_internal.scan(session)
except Exception as error:
output.error(f"Error running scan with SSLyze: {str(error)}")
if session.args.tdessessioncount:
ssl_sweet32.scan(session)
| [
"[email protected]"
] | |
042f26bfe56643c6652b56921c76c835ae78b86e | fcf99db2d9f58da7065369c70f81e3e7cb53356b | /extra/dynamic1.py | 53d37a6922ed684b88e5d2cd97b18c2a630e82aa | [] | no_license | manankshastri/self-d | b0f438e19d1eb6378093205c49eacd7ad3c53275 | 4266c27118354391cc9677e56c0f494506d390cd | refs/heads/master | 2020-04-24T00:38:53.226656 | 2019-10-14T03:44:40 | 2019-10-14T03:44:40 | 171,572,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | import time
def fib(x):
if x ==0:
return 0
elif x == 1:
return 1
else:
return fib(x-1) + fib(x-2)
startTime = time.time()
print("%-14s:%d" % ("Result:" , fib(32)))
print("%-14s:%.4f seconds" % ("Elapsed time: ", time.time() - startTime))
| [
"[email protected]"
] | |
4efa1687dadd46892464c946083720005116424d | 888f65551bb3fe1b8e84c205796b24678669a649 | /venv/bin/mako-render | e6e8f3b2ebd988dca4cd46c0956c7a2d59f20d2a | [] | no_license | chunharrison/NBA-Predictor | e6514c70f2cf26d6db4c14aee225cfbd9d5984a7 | 967951ba34debee012385af63f2bf8031dee51ca | refs/heads/master | 2022-05-04T22:02:03.374496 | 2019-05-15T05:55:34 | 2019-05-15T05:55:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/Users/harrison/Documents/NBA-Predictor/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from mako.cmd import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
| [
"[email protected]"
] | ||
f29a1716fb77131024301e47e4439bc769de638a | ef32b87973a8dc08ba46bf03c5601548675de649 | /pytglib/api/types/search_messages_filter_animation.py | e52ba5032981ef7f5289e9d53f9ec2a0230f7cab | [
"MIT"
] | permissive | iTeam-co/pytglib | 1a7580f0e0c9e317fbb0de1d3259c8c4cb90e721 | d3b52d7c74ee5d82f4c3e15e4aa8c9caa007b4b5 | refs/heads/master | 2022-07-26T09:17:08.622398 | 2022-07-14T11:24:22 | 2022-07-14T11:24:22 | 178,060,880 | 10 | 9 | null | null | null | null | UTF-8 | Python | false | false | 559 | py |
from ..utils import Object
class SearchMessagesFilterAnimation(Object):
"""
Returns only animation messages
Attributes:
ID (:obj:`str`): ``SearchMessagesFilterAnimation``
No parameters required.
Returns:
SearchMessagesFilter
Raises:
:class:`telegram.Error`
"""
ID = "searchMessagesFilterAnimation"
def __init__(self, **kwargs):
pass
@staticmethod
def read(q: dict, *args) -> "SearchMessagesFilterAnimation":
return SearchMessagesFilterAnimation()
| [
"[email protected]"
] | |
40ebde4a66db2044009fa4345a931b67003e657b | c91d029b59f4e6090a523bf571b3094e09852258 | /src/comercial/apps.py | 8f54832caa2d497204eca6944b8e7c6dc3c3e09e | [
"MIT"
] | permissive | anselmobd/fo2 | d51b63ebae2541b00af79448ede76b02638c41f0 | 8e7f8f3d9a296c7da39d0faf38a266e9c6c162ab | refs/heads/master | 2023-08-31T19:59:33.964813 | 2023-08-31T19:50:53 | 2023-08-31T19:50:53 | 92,856,677 | 1 | 0 | MIT | 2023-04-21T21:50:46 | 2017-05-30T17:04:27 | Python | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class ComercialConfig(AppConfig):
name = 'comercial'
| [
"[email protected]"
] | |
5a45bf84ab969517a17806532492d907662c8f93 | 844e0cd4ffbe1ead05b844508276f66cc20953d5 | /test/testseqdiagbuilder.py | 111bfcc7200d6457de50b6bc106cb2bee15747bd | [] | no_license | Archanciel/cryptopricer | a256fa793bb1f2d65b5c032dd81a266ee5be79cc | 00c0911fe1c25c1da635dbc9b26d45be608f0cc5 | refs/heads/master | 2022-06-29T13:13:22.435670 | 2022-05-11T20:37:43 | 2022-05-11T20:37:43 | 100,196,449 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 66,316 | py | import unittest
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
sys.path.insert(0,currentdir) # this instruction is necessary for successful importation of utilityfortest module when
# the test is executed standalone
from testclasses.isolatedclass import IsolatedClass
from testclasses.subtestpackage.isolatedclasssub import IsolatedClassSub
from testclasses.isolatedclasswithinstancevariables import IsolatedClassWithInstanceVariables
from testclasses.foobarclasses import *
from testclasses.subtestpackage.dsub import DSub
from testclasses.subtestpackage.caller import Caller
class Client:
def do(self):
c1 = ChildOne()
c1.getCoordinate()
def make(self):
c1 = ChildOne()
c1.compute()
def perform(self):
c1 = ChildOne()
c1.computeTwo()
def doCall(self):
c1 = ChildOne()
c1.computeThree()
def doProcess(self):
c1 = ChildOfChildTwo()
c1.computeFour()
class Parent:
def getCoordinate(self, location=''):
'''
:param location:
:seqdiag_return Coord
:return:
'''
SeqDiagBuilder.recordFlow()
def getCoordinateNoneSelected(self, location=''):
'''
:param location:
:seqdiag_return Coord
:return:
'''
SeqDiagBuilder.recordFlow()
def compute(self, size = 0):
'''
This a dummy merhod.
:seqdiag_return Analysis
:return:
'''
pass
def computeTwo(self, size = 0):
'''
This a dummy merhod.
:seqdiag_select_method
:seqdiag_return Analysis
:return:
'''
pass
def computeThree(self, size = 0):
'''
This a dummy merhod.
:seqdiag_select_method
:seqdiag_return Analysis
:return:
'''
iso = IsolatedClass()
iso.analyse()
def computeFour(self, size = 0):
'''
This a dummy merhod.
:seqdiag_return Analysis
:return:
'''
pass
def inheritedMethod(self, inhArg):
'''
This a dummy merhod.
:seqdiag_return inhMethResult
:return:
'''
SeqDiagBuilder.recordFlow()
class ChildOne(Parent):
def getCoordinate(self, location=''):
iso = IsolatedClass()
iso.analyse()
def getCoordinateNoneSelected(self, location=''):
iso = IsolatedClass()
iso.analyse()
def m(self):
pass
def compute(self, size = 0):
'''
This a dummy merhod.
:seqdiag_select_method
:seqdiag_return Analysis
:return:
'''
super().compute(size)
iso = IsolatedClass()
iso.analyse()
def computeTwo(self, size = 0):
'''
This a dummy merhod.
:seqdiag_select_method
:seqdiag_return Analysis
:return:
'''
super().compute(size)
iso = IsolatedClass()
iso.analyse()
class ChildTwo(Parent):
def l(self):
pass
def computeFour(self, size = 0):
'''
This a dummy merhod.
:seqdiag_select_method
:seqdiag_return Analysis
:return:
'''
iso = IsolatedClass()
iso.analyse()
def getCoordinateNoneSelected(self, location=''):
SeqDiagBuilder.recordFlow()
class ChildThree(Parent):
def getCoordinate(self, location=''):
'''
:param location:
:seqdiag_return CoordSel
:seqdiag_select_method
:return:
'''
SeqDiagBuilder.recordFlow()
class ChildOfChildTwo(Parent):
def l(self):
pass
def computeFour(self, size = 0):
'''
This a dummy merhod.
:seqdiag_return Analysis
:return:
'''
iso = IsolatedClass()
iso.analyse()
class ClassA:
def doWork(self):
'''
:seqdiag_return ClassAdoWorkRes
:return:
'''
self.internalCall()
def internalCall(self):
'''
:seqdiag_return ResultPrice
:return:
'''
pr = self.internalInnerCall()
b = ClassB()
res = b.createRequest(1, 2)
def internalInnerCall(self):
'''
:seqdiag_return ResultPrice
:return:
'''
b = ClassB()
res = b.createInnerRequest(1)
def aMethod(self, aMarg):
'''
:seqdiag_return ResultAmeth
:return:
'''
child = ChildTwo()
child.inheritedMethod(aMarg)
class ClassB:
def createInnerRequest(self, parm1):
'''
:seqdiag_return Bool
:param parm1:
:return:
'''
SeqDiagBuilder.recordFlow()
def createRequest(self, parm1, parm2):
'''
:seqdiag_return Bool
:param parm1:
:return:
'''
SeqDiagBuilder.recordFlow()
class C:
def c1(self, c1_p1):
'''
:param c1_p1:
:seqdiag_return Cc1Return
:return:
'''
SeqDiagBuilder.recordFlow()
def c2(self, c2_p1):
'''
:param c2_p1:
:seqdiag_return Cc2Return
:return:
'''
d = DSub()
d.d1(1)
def c3(self, c3_p1):
'''
:param c3_p1:
:seqdiag_return Cc3Return
:return:
'''
d = DSub()
d.d2(1)
SeqDiagBuilder.recordFlow()
self.c4(1)
def c4(self, c4_p1):
'''
:param c4_p1:
:seqdiag_return Cc4Return
:return:
'''
d = DSub()
d.d2(1)
SeqDiagBuilder.recordFlow()
def c5(self, c5_p1):
'''
:param c5_p1:
:seqdiag_return Cc5Return
:return:
'''
d = DSub()
d.d3(1)
def fibonaci(self, number):
'''
:param number:
:seqdiag_return CfibonaciReturn
:return:
'''
if number == 1:
SeqDiagBuilder.recordFlow()
return 1
else:
return number + self.fibonaci(number - 1)
class B:
'''
:seqdiag_note Test class note for class B
'''
def b0(self, b1_p1):
'''
:param b1_p1:
:seqdiag_return Bb1Return
:return:
'''
pass
def b1(self, b1_p1):
'''
:param b1_p1:
:seqdiag_return Bb1Return
:return:
'''
SeqDiagBuilder.recordFlow()
def b2(self, b2_p1):
'''
:param b2_p1:
:seqdiag_return Bb2Return
:return:
'''
c = C()
c.c1(1)
def b3(self, b3_p1):
'''
:param b3_p1:
:seqdiag_return Bb3Return
:return:
'''
c = C()
c.c1(1)
c.c1(1)
def b4(self, b4_p1):
'''
:param b4_p1:
:seqdiag_return Bb4Return
:return:
'''
SeqDiagBuilder.recordFlow()
def b5(self, b5_p1):
'''
:param b5_p1:
:seqdiag_return Bb5Return
:return:
'''
SeqDiagBuilder.recordFlow()
def b6(self, b6_p1):
'''
:param b6_p1:
:seqdiag_return Bb6Return
:return:
'''
c = C()
c.c2(1)
def b7(self, b7_p1):
'''
:param b7_p1:
:seqdiag_return Bb7Return
:return:
'''
c = C()
c.c3(1)
SeqDiagBuilder.recordFlow()
d = DSub()
d.d2(1)
def b8(self, b8_p1):
'''
:param b8_p1:
:seqdiag_return Bb8Return
:return:
'''
c = C()
c.c5(1)
d = DSub()
d.d2(1)
class A:
'''
:seqdiag_note Test class note for class A
'''
def a0(self, a1_p1, a1_p2):
'''
:param a1_p1:
:param a1_p2:
:seqdiag_return Aa1Return
:return:
'''
pass
def a1(self, a1_p1, a1_p2):
'''
:param a1_p1:
:param a1_p2:
:seqdiag_return Aa1Return
:return:
'''
SeqDiagBuilder.recordFlow()
def a2(self, a2_p1):
'''
:param a2_p1:
:seqdiag_return Aa2Return
:return:
'''
b = B()
b.b1(1)
def a3(self, a3_p1):
'''
:param a3_p1:
:seqdiag_return Aa3Return
:return:
'''
b = B()
b.b2(1)
def a4(self, a4_p1):
'''
:param a4_p1:
:seqdiag_return Aa4Return
:return:
'''
b = B()
b.b1(1)
b.b1(1)
def a5(self, a5_p1):
'''
:param a5_p1:
:seqdiag_return Aa5Return
:return:
'''
b = B()
b.b1(1)
b.b1(1)
b.b1(1)
def a6(self, a6_p1):
'''
:param a6_p1:
:seqdiag_return Aa6Return
:return:
'''
b = B()
b.b2(1)
b.b2(1)
def a7(self, a7_p1):
'''
:param a7_p1:
:seqdiag_return Aa6Return
:return:
'''
b = B()
b.b3(1)
def a8(self, a8_p1, a8_p2):
'''
:param a8_p1:
:param a8_p2:
:seqdiag_return Aa8Return
:return:
'''
SeqDiagBuilder.recordFlow()
def a9(self, a9_p1):
'''
:param a9_p1:
:seqdiag_return Aa9Return
:return:
'''
SeqDiagBuilder.recordFlow()
def a10(self, a10_p1):
'''
:param a10_p1:
:seqdiag_return Aa10Return
:return:
'''
b = B()
b.b4(1)
b.b5(1)
def a11(self, a11_p1):
'''
:param a11_p1:
:seqdiag_return Aa11Return
:return:
'''
b = B()
b.b6(1)
b.b6(1)
def a12(self, a12_p1):
'''
:param a12_p1:
:seqdiag_return Aa12Return
:return:
'''
b = B()
b.b7(1)
b.b7(1)
SeqDiagBuilder.recordFlow()
def a13(self, a13_p1):
'''
:param a13_p1:
:seqdiag_return Aa13Return
:return:
'''
b = B()
b.b8(1)
b.b8(1)
class TestSeqDiagBuilder(unittest.TestCase):
def setUp(self):
SeqDiagBuilder.deactivate()
def testCreateSeqDiagCommandsOnSimplestCallWithoutRecordFlowCallInLeafMethod(self):
entryPoint = A()
SeqDiagBuilder.activate(parentdir, 'A', 'a0') # activate sequence diagram building
entryPoint.a0(1, 2)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 1)
self.assertEqual(
'''@startuml
center header
<b><font color=red size=20> Warnings</font></b>
<b><font color=red size=14> No control flow recorded.</font></b>
<b><font color=red size=14> Method activate() called with arguments projectPath=<{}>, entryClass=<A>, entryMethod=<a0>, classArgDic=<None>: True.</font></b>
<b><font color=red size=14> Method recordFlow() called: False.</font></b>
<b><font color=red size=14> Specified entry point: A.a0 reached: False.</font></b>
endheader
actor USER
@enduml'''.format(parentdir), commands) # using format() instead og replace fails !
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnSimplestCall(self):
entryPoint = A()
SeqDiagBuilder.activate(parentdir, 'A', 'a1') # activate sequence diagram building
entryPoint.a1(1, 2)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
self.assertEqual(
'''@startuml
actor USER
participant TestSeqDiagBuilder
participant A
/note over of A
Test class note for class A
end note
USER -> A: a1(a1_p1, a1_p2)
activate A
USER <-- A: return Aa1Return
deactivate A
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnSimplestCallNotPassingProjectDir(self):
entryPoint = A()
SeqDiagBuilder.activate(None, 'A', 'a1') # activate sequence diagram building
entryPoint.a1(1, 2)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 1)
self.assertEqual(
'''@startuml
center header
<b><font color=red size=20> Warnings</font></b>
<b><font color=red size=14> No control flow recorded.</font></b>
<b><font color=red size=14> Method activate() called with arguments projectPath=<None>, entryClass=<A>, entryMethod=<a1>, classArgDic=<None>: True.</font></b>
<b><font color=red size=14> Method recordFlow() called: True.</font></b>
<b><font color=red size=14> Specified entry point: A.a1 reached: False.</font></b>
endheader
actor USER
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnSimplestCallPassingEmptyProjectDir(self):
entryPoint = A()
SeqDiagBuilder.activate('', 'A', 'a1') # activate sequence diagram building
entryPoint.a1(1, 2)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 1)
self.assertEqual(
'''@startuml
center header
<b><font color=red size=20> Warnings</font></b>
<b><font color=red size=14> No control flow recorded.</font></b>
<b><font color=red size=14> Method activate() called with arguments projectPath=<>, entryClass=<A>, entryMethod=<a1>, classArgDic=<None>: True.</font></b>
<b><font color=red size=14> Method recordFlow() called: True.</font></b>
<b><font color=red size=14> Specified entry point: A.a1 reached: False.</font></b>
endheader
actor USER
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsTwoLevelCallTwoDiffMethods(self):
entryPoint = A()
SeqDiagBuilder.activate(parentdir, 'A', 'a10') # activate sequence diagram building
entryPoint.a10(1)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
self.assertEqual(
'''@startuml
actor USER
participant TestSeqDiagBuilder
participant A
/note over of A
Test class note for class A
end note
participant B
/note over of B
Test class note for class B
end note
USER -> A: a10(a10_p1)
activate A
A -> B: b4(b4_p1)
activate B
A <-- B: return Bb4Return
deactivate B
A -> B: b5(b5_p1)
activate B
A <-- B: return Bb5Return
deactivate B
USER <-- A: return Aa10Return
deactivate A
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnTwoLevelCall(self):
entryPoint = A()
SeqDiagBuilder.activate(parentdir, 'A', 'a2') # activate sequence diagram building
entryPoint.a2(1)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
actor USER
participant TestSeqDiagBuilder
participant A
/note over of A
Test class note for class A
end note
participant B
/note over of B
Test class note for class B
end note
USER -> A: a2(a2_p1)
activate A
A -> B: b1(b1_p1)
activate B
A <-- B: return Bb1Return
deactivate B
USER <-- A: return Aa2Return
deactivate A
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnThreeLevelCallingMidLevelMethodTwice(self):
entryPoint = A()
SeqDiagBuilder.activate(parentdir, 'A', 'a6') # activate sequence diagram building
entryPoint.a6(1)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
actor USER
participant TestSeqDiagBuilder
participant A
/note over of A
Test class note for class A
end note
participant B
/note over of B
Test class note for class B
end note
participant C
USER -> A: a6(a6_p1)
activate A
A -> B: b2(b2_p1)
activate B
B -> C: c1(c1_p1)
activate C
B <-- C: return Cc1Return
deactivate C
A <-- B: return Bb2Return
deactivate B
A -> B: b2(b2_p1)
activate B
B -> C: c1(c1_p1)
activate C
B <-- C: return Cc1Return
deactivate C
A <-- B: return Bb2Return
deactivate B
USER <-- A: return Aa6Return
deactivate A
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnFiveLevelCallingSecondLevelMethodTwice(self):
entryPoint = A()
SeqDiagBuilder.activate(parentdir, 'A', 'a11') # activate sequence diagram building
entryPoint.a11(1)
commands = SeqDiagBuilder.createSeqDiaqCommands(actorName='USER', title='Sequence diagram title')
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
title Sequence diagram title
actor USER
participant TestSeqDiagBuilder
participant A
/note over of A
Test class note for class A
end note
participant B
/note over of B
Test class note for class B
end note
participant C
participant DSub
/note over of DSub
Short note DSub
end note
USER -> A: a11(a11_p1)
activate A
A -> B: b6(b6_p1)
activate B
B -> C: c2(c2_p1)
activate C
C -> DSub: d1(d1_p1)
activate DSub
C <-- DSub: return Dd1Return
deactivate DSub
B <-- C: return Cc2Return
deactivate C
A <-- B: return Bb6Return
deactivate B
A -> B: b6(b6_p1)
activate B
B -> C: c2(c2_p1)
activate C
C -> DSub: d1(d1_p1)
activate DSub
C <-- DSub: return Dd1Return
deactivate DSub
B <-- C: return Cc2Return
deactivate C
A <-- B: return Bb6Return
deactivate B
USER <-- A: return Aa11Return
deactivate A
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnFiveLevelCallingSecondLevelMethodTwiceProjectPathUnixLike(self):
entryPoint = A()
SeqDiagBuilder.activate(parentdir.replace('\\','/'), 'A', 'a11') # activate sequence diagram building
entryPoint.a11(1)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
actor USER
participant TestSeqDiagBuilder
participant A
/note over of A
Test class note for class A
end note
participant B
/note over of B
Test class note for class B
end note
participant C
participant DSub
/note over of DSub
Short note DSub
end note
USER -> A: a11(a11_p1)
activate A
A -> B: b6(b6_p1)
activate B
B -> C: c2(c2_p1)
activate C
C -> DSub: d1(d1_p1)
activate DSub
C <-- DSub: return Dd1Return
deactivate DSub
B <-- C: return Cc2Return
deactivate C
A <-- B: return Bb6Return
deactivate B
A -> B: b6(b6_p1)
activate B
B -> C: c2(c2_p1)
activate C
C -> DSub: d1(d1_p1)
activate DSub
C <-- DSub: return Dd1Return
deactivate DSub
B <-- C: return Cc2Return
deactivate C
A <-- B: return Bb6Return
deactivate B
USER <-- A: return Aa11Return
deactivate A
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnFiveLevelCallingSecondLevelMethodTwiceWithRecordFlowInEveryMethod(self):
entryPoint = A()
SeqDiagBuilder.activate(parentdir, 'A', 'a12') # activate sequence diagram building
entryPoint.a12(1)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
actor USER
participant TestSeqDiagBuilder
participant A
/note over of A
Test class note for class A
end note
participant B
/note over of B
Test class note for class B
end note
participant C
participant DSub
/note over of DSub
Short note DSub
end note
USER -> A: a12(a12_p1)
activate A
A -> B: b7(b7_p1)
activate B
B -> C: c3(c3_p1)
activate C
C -> DSub: d2(d2_p1)
activate DSub
C <-- DSub: return Dd2Return
deactivate DSub
C -> C: c4(c4_p1)
activate C
C -> DSub: d2(d2_p1)
activate DSub
C <-- DSub: return Dd2Return
deactivate DSub
C <-- C: return Cc4Return
deactivate C
B <-- C: return Cc3Return
deactivate C
B -> DSub: d2(d2_p1)
activate DSub
B <-- DSub: return Dd2Return
deactivate DSub
A <-- B: return Bb7Return
deactivate B
A -> B: b7(b7_p1)
activate B
B -> C: c3(c3_p1)
activate C
C -> DSub: d2(d2_p1)
activate DSub
C <-- DSub: return Dd2Return
deactivate DSub
C -> C: c4(c4_p1)
activate C
C -> DSub: d2(d2_p1)
activate DSub
C <-- DSub: return Dd2Return
deactivate DSub
C <-- C: return Cc4Return
deactivate C
B <-- C: return Cc3Return
deactivate C
B -> DSub: d2(d2_p1)
activate DSub
B <-- DSub: return Dd2Return
deactivate DSub
A <-- B: return Bb7Return
deactivate B
USER <-- A: return Aa12Return
deactivate A
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnFiveLevelCallingSecondLevelMethodTwiceWithRecordFlowInOnePlaceOnlySpecifyingNoteLengthLimit(self):
entryPoint = A()
SeqDiagBuilder.activate(parentdir, 'A', 'a13') # activate sequence diagram building
entryPoint.a13(1)
commands = SeqDiagBuilder.createSeqDiaqCommands(actorName='USER', title=None, maxSigArgNum=None, maxSigCharLen=200, maxNoteCharLen=15)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
actor USER
participant TestSeqDiagBuilder
participant A
/note over of A
Test class note for
class A
end note
participant B
/note over of B
Test class note for
class B
end note
participant DSub
/note over of DSub
Short note DSub
end note
USER -> A: a13(a13_p1)
activate A
A -> B: b8(b8_p1)
activate B
B -> DSub: d2(d2_p1)
activate DSub
B <-- DSub: return Dd2Return
deactivate DSub
A <-- B: return Bb8Return
deactivate B
A -> B: b8(b8_p1)
activate B
B -> DSub: d2(d2_p1)
activate DSub
B <-- DSub: return Dd2Return
deactivate DSub
A <-- B: return Bb8Return
deactivate B
USER <-- A: return Aa13Return
deactivate A
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnThreeLevelCallingLastLevelMethodTwice(self):
'''
Calling two level deep method which calls last Level method twice
:return:
'''
entryPoint = A()
SeqDiagBuilder.activate(parentdir, 'A', 'a7') # activate sequence diagram building
entryPoint.a7(1)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
actor USER
participant TestSeqDiagBuilder
participant A
/note over of A
Test class note for class A
end note
participant B
/note over of B
Test class note for class B
end note
participant C
USER -> A: a7(a7_p1)
activate A
A -> B: b3(b3_p1)
activate B
B -> C: c1(c1_p1)
activate C
B <-- C: return Cc1Return
deactivate C
B -> C: c1(c1_p1)
activate C
B <-- C: return Cc1Return
deactivate C
A <-- B: return Bb3Return
deactivate B
USER <-- A: return Aa6Return
deactivate A
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnTwoLevelCallCallingMethodTwice(self):
entryPoint = A()
SeqDiagBuilder.activate(parentdir, 'A', 'a4') # activate sequence diagram building
entryPoint.a4(1)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
actor USER
participant TestSeqDiagBuilder
participant A
/note over of A
Test class note for class A
end note
participant B
/note over of B
Test class note for class B
end note
USER -> A: a4(a4_p1)
activate A
A -> B: b1(b1_p1)
activate B
A <-- B: return Bb1Return
deactivate B
A -> B: b1(b1_p1)
activate B
A <-- B: return Bb1Return
deactivate B
USER <-- A: return Aa4Return
deactivate A
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnTwoLevelCallCallingMethodThreeTimes(self):
entryPoint = A()
SeqDiagBuilder.activate(parentdir, 'A', 'a5') # activate sequence diagram building
entryPoint.a5(1)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
actor USER
participant TestSeqDiagBuilder
participant A
/note over of A
Test class note for class A
end note
participant B
/note over of B
Test class note for class B
end note
USER -> A: a5(a5_p1)
activate A
A -> B: b1(b1_p1)
activate B
A <-- B: return Bb1Return
deactivate B
A -> B: b1(b1_p1)
activate B
A <-- B: return Bb1Return
deactivate B
A -> B: b1(b1_p1)
activate B
A <-- B: return Bb1Return
deactivate B
USER <-- A: return Aa5Return
deactivate A
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnThreeLevelCall(self):
entryPoint = A()
SeqDiagBuilder.activate(parentdir, 'A', 'a3') # activate sequence diagram building
entryPoint.a3(1)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
self.assertEqual(
'''@startuml
actor USER
participant TestSeqDiagBuilder
participant A
/note over of A
Test class note for class A
end note
participant B
/note over of B
Test class note for class B
end note
participant C
USER -> A: a3(a3_p1)
activate A
A -> B: b2(b2_p1)
activate B
B -> C: c1(c1_p1)
activate C
B <-- C: return Cc1Return
deactivate C
A <-- B: return Bb2Return
deactivate B
USER <-- A: return Aa3Return
deactivate A
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def test_instanciateClassInitTwoArgs(self):
className = 'IsolatedClassWithInstanceVariables'
packageSpec = 'testclasses.'
moduleName = 'isolatedclasswithinstancevariables'
instance = SeqDiagBuilder._instanciateClass(className, packageSpec, moduleName)
self.assertIsInstance(instance, IsolatedClassWithInstanceVariables)
def test_instanciateClassInitNoArgs(self):
className = 'IsolatedClass'
packageSpec = 'testclasses.'
moduleName = 'isolatedclass'
instance = SeqDiagBuilder._instanciateClass(className, packageSpec, moduleName)
self.assertIsInstance(instance, IsolatedClass)
def test_instanciateClassInitNoArgsSubPackageSpec(self):
className = 'IsolatedClassSub'
packageSpec = 'testclasses.subtestpackage.'
moduleName = 'isolatedclasssub'
instance = SeqDiagBuilder._instanciateClass(className, packageSpec, moduleName)
self.assertIsInstance(instance, IsolatedClassSub)
def test_instanciateClassInitNoArgsEmptyPackageSpec(self):
className = 'Client'
packageSpec = ''
moduleName = 'testseqdiagbuilder'
instance = SeqDiagBuilder._instanciateClass(className, packageSpec, moduleName)
self.assertIsInstance(instance, Client)
def test_instanciateClassInitNoArgsEmptyPackageSpecClassInProjectRoot(self):
className = 'SeqDiagBuilder'
packageSpec = ''
moduleName = 'seqdiagbuilder'
instance = SeqDiagBuilder._instanciateClass(className, packageSpec, moduleName)
self.assertIsInstance(instance, SeqDiagBuilder)
def testRecordFlowWhereMulitpleClassesSupportSameMethodAndOneIsSelected(self):
entryPoint = ChildThree()
SeqDiagBuilder.activate(parentdir, 'ChildThree', 'getCoordinate') # activate sequence diagram building
entryPoint.getCoordinate()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
actor USER
participant TestSeqDiagBuilder
participant ChildThree
USER -> ChildThree: getCoordinate(location='')
activate ChildThree
USER <-- ChildThree: return CoordSel
deactivate ChildThree
@enduml''', commands)
SeqDiagBuilder.deactivate()
def testRecordFlowWhereMulitpleClassesSupportSameMethodAndOneIsSelectedInOtherClass(self):
entryPoint = ChildTwo()
SeqDiagBuilder.activate(parentdir, 'ChildTwo', 'getCoordinate') # activate sequence diagram building
entryPoint.getCoordinate()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
center header
<b><font color=red size=20> Warnings</font></b>
<b><font color=red size=14> No control flow recorded.</font></b>
<b><font color=red size=14> Method activate() called with arguments projectPath=<{}>, entryClass=<ChildTwo>, entryMethod=<getCoordinate>, classArgDic=<None>: True.</font></b>
<b><font color=red size=14> Method recordFlow() called: True.</font></b>
<b><font color=red size=14> Specified entry point: ChildTwo.getCoordinate reached: False.</font></b>
endheader
actor USER
@enduml'''.format(parentdir), commands) # using format() instead og replace fails !
SeqDiagBuilder.deactivate()
def testRecordFlowWhereMulitpleClassesSupportSameMethodAndNoneIsSelected(self):
entryPoint = ChildTwo()
SeqDiagBuilder.activate(parentdir, 'ChildTwo', 'getCoordinateNoneSelected') # activate sequence diagram building
entryPoint.getCoordinateNoneSelected()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
center header
<b><font color=red size=20> Warnings</font></b>
<b><font color=red size=20> 1</font></b>
<b><font color=red size=14> More than one class ['Parent', 'ChildOne', 'ChildTwo', 'ChildThree', 'ChildOfChildTwo'] found in module testseqdiagbuilder do support method getCoordinateNoneSelected(location='').</font></b>
<b><font color=red size=14> Since Python provides no way to determine the exact target class, class Parent was chosen by default for building the sequence diagram.</font></b>
<b><font color=red size=14> To override this selection, put tag :seqdiag_select_method somewhere in the target method documentation or define every class of the hierarchy in its own file.</font></b>
<b><font color=red size=14> See help for more information.</font></b>
<b><font color=red size=20> 2</font></b>
<b><font color=red size=14> No control flow recorded.</font></b>
<b><font color=red size=14> Method activate() called with arguments projectPath=<{}>, entryClass=<ChildTwo>, entryMethod=<getCoordinateNoneSelected>, classArgDic=<None>: True.</font></b>
<b><font color=red size=14> Method recordFlow() called: True.</font></b>
<b><font color=red size=14> Specified entry point: ChildTwo.getCoordinateNoneSelected reached: False.</font></b>
endheader
actor USER
@enduml'''.format(parentdir), commands) # using format() instead og replace fails !
SeqDiagBuilder.deactivate()
def testRecordFlowWhereMulitpleClassesSupportInheritedMethodAndNoneIsSelected(self):
entryPoint = ClassA()
SeqDiagBuilder.activate(parentdir, 'ClassA', 'aMethod') # activate sequence diagram building
entryPoint.aMethod(1)
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
center header
<b><font color=red size=20> Warnings</font></b>
<b><font color=red size=14> More than one class ['Parent', 'ChildOne', 'ChildTwo', 'ChildThree', 'ChildOfChildTwo'] found in module testseqdiagbuilder do support method inheritedMethod(inhArg).</font></b>
<b><font color=red size=14> Since Python provides no way to determine the exact target class, class Parent was chosen by default for building the sequence diagram.</font></b>
<b><font color=red size=14> To override this selection, put tag :seqdiag_select_method somewhere in the target method documentation or define every class of the hierarchy in its own file.</font></b>
<b><font color=red size=14> See help for more information.</font></b>
endheader
actor USER
participant TestSeqDiagBuilder
participant ClassA
participant Parent
USER -> ClassA: aMethod(aMarg)
activate ClassA
ClassA -> Parent: inheritedMethod(inhArg)
activate Parent
ClassA <-- Parent: return inhMethResult
deactivate Parent
USER <-- ClassA: return ResultAmeth
deactivate ClassA
@enduml''', commands)
SeqDiagBuilder.deactivate()
def testCreateSeqDiagCommandsOnFullRequestHistoDayPrice(self):
'''
Generates a sequence diagram on a typical CryptoPricer request.
:return:
'''
if not 'CryptoPricer' in parentdir:
return
from datetimeutil import DateTimeUtil
from utilityfortest import UtilityForTest
from pricerequester import PriceRequester
from configurationmanager import ConfigurationManager
from gui.guioutputformatter import GuiOutputFormatter
from controller import Controller
SeqDiagBuilder.activate(parentdir, 'Controller', 'getPrintableResultForInput') # activate sequence diagram building
if os.name == 'posix':
FILE_PATH = '/sdcard/cryptopricer.ini'
else:
FILE_PATH = 'c:\\temp\\cryptopricer.ini'
configMgr = ConfigurationManager(FILE_PATH)
self.controller = Controller(GuiOutputFormatter(configMgr), configMgr, PriceRequester())
timezoneStr = 'Europe/Zurich'
now = DateTimeUtil.localNow(timezoneStr)
eightDaysBeforeArrowDate = now.shift(days=-8)
if eightDaysBeforeArrowDate.year < now.year:
print('{} skipped due to current date {}'.format('testControllerRTThenHistoMinuteThenRThenNewUnit()', now))
SeqDiagBuilder.deactivate()
return
eightDaysBeforeYearStr, eightDaysBeforeMonthStr, eightDaysBeforeDayStr, eightDaysBeforeHourStr, eightDaysBeforeMinuteStr = UtilityForTest.getFormattedDateTimeComponentsForArrowDateTimeObj(eightDaysBeforeArrowDate)
requestYearStr = eightDaysBeforeYearStr
requestDayStr = eightDaysBeforeDayStr
requestMonthStr = eightDaysBeforeMonthStr
inputStr = 'eth btc {}/{} all'.format(requestDayStr, requestMonthStr)
printResult, fullCommandStr, fullCommandStrWithOptions, fullCommandStrWithSaveModeOptions, fullCommandStrForStatusBar = self.controller.getPrintableResultForInput(
inputStr)
if DateTimeUtil.isDateOlderThan(eightDaysBeforeArrowDate, 7):
hourStr = '00'
minuteStr = '00'
priceType = 'C'
else:
hourStr = eightDaysBeforeHourStr
minuteStr = eightDaysBeforeMinuteStr
priceType = 'M'
self.assertEqual(
'ETH/BTC on AVG: ' + '{}/{}/{} {}:{}{}'.format(requestDayStr, requestMonthStr, requestYearStr, hourStr, minuteStr, priceType),
UtilityForTest.removeOneEndPriceFromResult(printResult))
self.assertEqual('eth btc {}/{}/{} {}:{} all'.format(requestDayStr, requestMonthStr, requestYearStr, hourStr, minuteStr), fullCommandStr)
self.assertEqual(None, fullCommandStrWithSaveModeOptions)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
commands = SeqDiagBuilder.createSeqDiaqCommands('GUI')
with open("c:\\temp\\sqCryptoPricerFullSig.txt","w") as f:
f.write(commands)
SeqDiagBuilder.deactivate()
self.assertEqual(
'''@startuml
actor GUI
participant Controller
/note over of Controller
Client in the GOF Command pattern. Entry point of the business layer. Instanciates the business layer classes.
end note
participant Requester
/note over of Requester
Parses the user requests, storing the request parms into the the appropriate Command.
end note
participant CommandPrice
/note over of CommandPrice
Command in the GOF Command pattern. Stores all the request parms parsed by the Requester. Stores aswell the currently active request parms which will
be used in case of partial request. Validates part of the request elements and computes the int request date/time components. Calls the receiver, i.e.
the Processor, passing to it the required request parms.
end note
participant Processor
/note over of Processor
Receiver in the GOF Command pattern. Validates and obtains real exchange name for crypto/unit and unit/fiat pairs. Determines if RT or historical
price must be asked to the PriceRequester. After getting the price, computes the fiat (-f) and value (-v) option values and add them to the returned
ResultData. In case a crypto/unit or a fiat/unit pair is not supported by the pair exchange, try to obtain a unit/crypto, respectively a unit/fiat
pair price.
end note
participant PriceRequester
/note over of PriceRequester
Obtains the RT or historical rates from the cryptocompare.com web site. For historical rates, determines if a minute or close rate is to be obtained.
end note
participant GuiOutputFormatter
/note over of GuiOutputFormatter
Formats the result data printed to the output zone of the application and to the status bar.
end note
GUI -> Controller: getPrintableResultForInput(inputStr)
activate Controller
Controller -> Requester: getCommand(inputStr)
activate Requester
Requester -> Requester: _parseAndFillCommandPrice(inputStr)
activate Requester
Requester -> Requester: _buildFullCommandPriceOrderFreeParmsDic(orderFreeParmList)
activate Requester
Requester <-- Requester: return optionalParsedParmDataDic
deactivate Requester
Requester <-- Requester: return CommandPrice or CommandError
deactivate Requester
Controller <-- Requester: return AbstractCommand
deactivate Requester
note right
May return a CommandError in case of parsing problem.
end note
Controller -> CommandPrice: execute()
activate CommandPrice
CommandPrice -> Processor: getCryptoPrice(crypto, unit, exchange, day, month, year, hour, minute, optionValueSymbol=None, ...)
activate Processor
Processor -> Processor: _getPrice(currency, targetCurrency, exchange, year, month, day, hour, minute, dateTimeFormat, localTz, ...)
activate Processor
Processor -> PriceRequester: getHistoricalPriceAtUTCTimeStamp(crypto, unit, timeStampLocalForHistoMinute, localTz, timeStampUTCNoHHMMForHistoDay, exchange)
activate PriceRequester
note right
Obtainins a minute price if request date < 7 days from now, else a day close price.
end note
PriceRequester -> PriceRequester: _getHistoDayPriceAtUTCTimeStamp(crypto, unit, timeStampUTC, exchange, resultData)
activate PriceRequester
PriceRequester <-- PriceRequester: return ResultData
deactivate PriceRequester
Processor <-- PriceRequester: return ResultData
deactivate PriceRequester
Processor <-- Processor: return ResultData
deactivate Processor
CommandPrice <-- Processor: return ResultData
deactivate Processor
Controller <-- CommandPrice: return ResultData or False
deactivate CommandPrice
Controller -> GuiOutputFormatter: getFullCommandString(resultData)
activate GuiOutputFormatter
GuiOutputFormatter -> GuiOutputFormatter: _buildFullDateAndTimeStrings(commandDic, timezoneStr)
activate GuiOutputFormatter
GuiOutputFormatter <-- GuiOutputFormatter: return requestDateDMY, requestDateHM
deactivate GuiOutputFormatter
Controller <-- GuiOutputFormatter: return printResult, fullCommandStrNoOptions, fullCommandStrWithNoSaveOptions, ...
deactivate GuiOutputFormatter
GUI <-- Controller: return printResult, fullCommandStrNoOptions, fullCommandStrWithNoSaveOptions, ...
deactivate Controller
@enduml''', commands)
def testCreateSeqDiagCommandsOnFullRequestHistoDayPriceWithSignatureLimitation(self):
'''
Generates a sequence diagram on a typical CryptoPricer request
with specifying a maximum size for the method signatures.
:return:
'''
if not 'CryptoPricer' in parentdir:
return
from datetimeutil import DateTimeUtil
from utilityfortest import UtilityForTest
from pricerequester import PriceRequester
from configurationmanager import ConfigurationManager
from gui.guioutputformatter import GuiOutputFormatter
from controller import Controller
SeqDiagBuilder.activate(parentdir, 'Controller', 'getPrintableResultForInput') # activate sequence diagram building
if os.name == 'posix':
FILE_PATH = '/sdcard/cryptopricer.ini'
else:
FILE_PATH = 'c:\\temp\\cryptopricer.ini'
configMgr = ConfigurationManager(FILE_PATH)
self.controller = Controller(GuiOutputFormatter(configMgr), configMgr, PriceRequester())
timezoneStr = 'Europe/Zurich'
now = DateTimeUtil.localNow(timezoneStr)
eightDaysBeforeArrowDate = now.shift(days=-8)
eightDaysBeforeYearStr, eightDaysBeforeMonthStr, eightDaysBeforeDayStr, eightDaysBeforeHourStr, eightDaysBeforeMinuteStr = UtilityForTest.getFormattedDateTimeComponentsForArrowDateTimeObj(
eightDaysBeforeArrowDate)
requestYearStr = eightDaysBeforeYearStr
requestDayStr = eightDaysBeforeDayStr
requestMonthStr = eightDaysBeforeMonthStr
inputStr = 'btc usd {}/{} all'.format(requestDayStr, requestMonthStr)
printResult, fullCommandStr, fullCommandStrWithOptions, fullCommandStrWithSaveModeOptions, fullCommandStrForStatusBar = self.controller.getPrintableResultForInput(
inputStr)
commands = SeqDiagBuilder.createSeqDiaqCommands(actorName='GUI', title='CryptoPricer sequence diagram', maxSigArgNum=None, maxSigCharLen=20, maxNoteCharLen=20)
plantUmlOutputDir = "c:\\temp\\"
plantUmlOutputFileName = 'sqCryptoPricerShortSig.txt'
plantUmlOutputFilePathName = plantUmlOutputDir + plantUmlOutputFileName
with open(plantUmlOutputFilePathName, "w") as f:
f.write(commands)
try:
self.assertEqual(
'''@startuml
title CryptoPricer sequence diagram
actor GUI
participant Controller
/note over of Controller
Client in the GOF Command
pattern. Entry point of the
business layer. Instanciates
the business layer classes.
end note
participant Requester
/note over of Requester
Parses the user requests,
storing the request parms into
the the appropriate Command.
end note
participant CommandPrice
/note over of CommandPrice
Command in the GOF Command
pattern. Stores all the
request parms parsed by the
Requester. Stores aswell the
currently active request parms
which will be used in case of
partial request. Validates
part of the request elements
and computes the int request
date/time components. Calls
the receiver, i.e. the
Processor, passing to it the
required request parms.
end note
participant Processor
/note over of Processor
Receiver in the GOF Command
pattern. Validates and obtains
real exchange name for
crypto/unit and unit/fiat
pairs. Determines if RT or
historical price must be asked
to the PriceRequester. After
getting the price, computes
the fiat (-f) and value (-v)
option values and add them to
the returned ResultData. In
case a crypto/unit or a
fiat/unit pair is not
supported by the pair
exchange, try to obtain a
unit/crypto, respectively a
unit/fiat pair price.
end note
participant PriceRequester
/note over of PriceRequester
Obtains the RT or historical
rates from the
cryptocompare.com web site.
For historical rates,
determines if a minute or
close rate is to be obtained.
end note
participant GuiOutputFormatter
/note over of GuiOutputFormatter
Formats the result data
printed to the output zone of
the application and to the
status bar.
end note
GUI -> Controller: getPrintableResultForInput(inputStr)
activate Controller
Controller -> Requester: getCommand(inputStr)
activate Requester
Requester -> Requester: _parseAndFillCommandPrice(inputStr)
activate Requester
Requester -> Requester: _buildFullCommandPriceOrderFreeParmsDic(orderFreeParmList)
activate Requester
Requester <-- Requester: return ...
deactivate Requester
Requester <-- Requester: return ...
deactivate Requester
Controller <-- Requester: return AbstractCommand
deactivate Requester
note right
May return a CommandError in
case of parsing problem.
end note
Controller -> CommandPrice: execute()
activate CommandPrice
CommandPrice -> Processor: getCryptoPrice(crypto, unit, ...)
activate Processor
Processor -> Processor: _getPrice(currency, ...)
activate Processor
Processor -> PriceRequester: getHistoricalPriceAtUTCTimeStamp(crypto, unit, ...)
activate PriceRequester
note right
Obtainins a minute price if
request date < 7 days from
now, else a day close price.
end note
PriceRequester -> PriceRequester: _getHistoDayPriceAtUTCTimeStamp(crypto, unit, ...)
activate PriceRequester
PriceRequester <-- PriceRequester: return ResultData
deactivate PriceRequester
Processor <-- PriceRequester: return ResultData
deactivate PriceRequester
Processor <-- Processor: return ResultData
deactivate Processor
CommandPrice <-- Processor: return ResultData
deactivate Processor
Controller <-- CommandPrice: return ResultData or False
deactivate CommandPrice
Controller -> GuiOutputFormatter: getFullCommandString(resultData)
activate GuiOutputFormatter
GuiOutputFormatter -> GuiOutputFormatter: _buildFullDateAndTimeStrings(commandDic, ...)
activate GuiOutputFormatter
GuiOutputFormatter <-- GuiOutputFormatter: return requestDateDMY, ...
deactivate GuiOutputFormatter
Controller <-- GuiOutputFormatter: return printResult, ...
deactivate GuiOutputFormatter
GUI <-- Controller: return printResult, ...
deactivate Controller
@enduml''' \
, commands)
except TypeError as e:
print(e)
pass
SeqDiagBuilder.deactivate()
print('In order to generate the sequence diagram image file, open a command window on {}\nand execute the command java -jar plantuml.jar -tsvg {}\n'.format(plantUmlOutputDir, plantUmlOutputFileName))
def testCreateSeqDiagCommandsOnClassesWithEmbededSelfCalls(self):
entryPoint = ClassA()
SeqDiagBuilder.activate(parentdir,'ClassA', 'doWork') # activate sequence diagram building
entryPoint.doWork()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt","w") as f:
f.write(commands)
self.assertEqual(
'''@startuml
actor USER
participant TestSeqDiagBuilder
participant ClassA
participant ClassB
USER -> ClassA: doWork()
activate ClassA
ClassA -> ClassA: internalCall()
activate ClassA
ClassA -> ClassA: internalInnerCall()
activate ClassA
ClassA -> ClassB: createInnerRequest(parm1)
activate ClassB
ClassA <-- ClassB: return Bool
deactivate ClassB
ClassA <-- ClassA: return ResultPrice
deactivate ClassA
ClassA -> ClassB: createRequest(parm1, parm2)
activate ClassB
ClassA <-- ClassB: return Bool
deactivate ClassB
ClassA <-- ClassA: return ResultPrice
deactivate ClassA
USER <-- ClassA: return ClassAdoWorkRes
deactivate ClassA
@enduml''', commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsWithoutActivatingSeqDiagBuilder(self):
entryPoint = ClassA()
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
entryPoint.doWork()
SeqDiagBuilder.createSeqDiaqCommands('USER')
warningList = SeqDiagBuilder.getWarningList()
self.assertEqual(len(warningList), 1)
self.assertEqual('No control flow recorded.\nMethod activate() called: False.\nMethod recordFlow() called: True.\nSpecified entry point: None.None reached: False.', warningList[0])
def testCreateSeqDiagCommandsOnClassLocatedInPackage(self):
entryPoint = IsolatedClass()
SeqDiagBuilder.activate(parentdir, 'IsolatedClass', 'analyse') # activate sequence diagram building
entryPoint.analyse()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
self.assertEqual(
'''@startuml
actor USER
participant IsolatedClass
USER -> IsolatedClass: analyse()
activate IsolatedClass
USER <-- IsolatedClass: return Analysis
deactivate IsolatedClass
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCreateSeqDiagCommandsOnClassLocatedInSubPackage(self):
entryPoint = IsolatedClassSub()
SeqDiagBuilder.activate(parentdir, 'IsolatedClassSub', 'analyse') # activate sequence diagram building
entryPoint.analyse()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
self.assertEqual(
'''@startuml
actor USER
participant IsolatedClassSub
USER -> IsolatedClassSub: analyse()
activate IsolatedClassSub
USER <-- IsolatedClassSub: return Analysis
deactivate IsolatedClassSub
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCallingMethodOnClassRequiringNonNoneConstructorParmWithoutPassingClassArgsDic(self):
entryPoint = Caller()
SeqDiagBuilder.activate(parentdir, 'Caller', 'call') # activate sequence diagram building
entryPoint.call()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 1)
self.assertEqual(
'''@startuml
center header
<b><font color=red size=20> Warnings</font></b>
<b><font color=red size=14> ERROR - constructor for class FileReader in module testclasses.subtestpackage.filereader failed due to invalid argument(s).</font></b>
<b><font color=red size=14> To solve the problem, pass a class argument dictionary with an entry for FileReader to the SeqDiagBuilder.activate() method.</font></b>
endheader
actor USER
participant Caller
USER -> Caller: call()
activate Caller
USER <-- Caller:
deactivate Caller
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCallingMethodOnClassRequiringNonNoneConstructotParmWithPassingClassArgsDic(self):
entryPoint = Caller()
classArgDic = {'FileReader': ['testfile.txt']}
SeqDiagBuilder.activate(parentdir, 'Caller', 'call', classArgDic) # activate sequence diagram building
entryPoint.call()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
self.assertEqual(
'''@startuml
actor USER
participant Caller
participant FileReader
USER -> Caller: call()
activate Caller
Caller -> FileReader: getContentAsList()
activate FileReader
Caller <-- FileReader:
deactivate FileReader
USER <-- Caller:
deactivate Caller
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCallingMethodOnClassRequiringNonNoneConstructotParmWithPassingClassArgsDicWithTwoEntries(self):
'''
Test case where the flow requires to instanciate the same class (FileReader) twice with
different values passed to the ctor at each instanciation.
'''
entryPoint = Caller()
classArgDic = {'FileReader_1': ['testfile.txt'], 'FileReader_2': ['testfile2.txt']}
SeqDiagBuilder.activate(parentdir, 'Caller', 'callUsingTwoFileReaders',
classArgDic) # activate sequence diagram building
entryPoint.callUsingTwoFileReaders()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
self.assertEqual(
'''@startuml
actor USER
participant Caller
participant FileReader
USER -> Caller: callUsingTwoFileReaders()
activate Caller
Caller -> FileReader: getContentAsList()
activate FileReader
Caller <-- FileReader:
deactivate FileReader
Caller -> FileReader: getContentAsList()
activate FileReader
Caller <-- FileReader:
deactivate FileReader
USER <-- Caller:
deactivate Caller
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCallingMethodOnClassRequiringNonNoneConstructotParmWithPassingClassArgsDicWithOneEntry(self):
'''
Test case where the flow requires to instanciate the same class (FileReader) twice with
the same value passed to the ctor at each instanciation.
'''
entryPoint = Caller()
classArgDic = {'FileReader': ['testfile.txt']}
SeqDiagBuilder.activate(parentdir, 'Caller', 'callUsingTwoFileReaders',
classArgDic) # activate sequence diagram building
entryPoint.callUsingTwoFileReaders()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
self.assertEqual(
'''@startuml
actor USER
participant Caller
participant FileReader
USER -> Caller: callUsingTwoFileReaders()
activate Caller
Caller -> FileReader: getContentAsList()
activate FileReader
Caller <-- FileReader:
deactivate FileReader
Caller -> FileReader: getContentAsList()
activate FileReader
Caller <-- FileReader:
deactivate FileReader
USER <-- Caller:
deactivate Caller
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCallingMethodOnClassRequiringNonNoneConstructotParmWithPassingClassArgsDicWithOneEntryOneBooleanArg(self):
'''
Test case where the flow requires to instanciate a class (FileReaderSupportingVerboseMode) whose ctor
requires a string and a boolean value. To handle this situation, a class ctor arg dictionary
must be passed to the SeqDiagBuilder.activate() method.
'''
entryPoint = Caller()
classArgDic = {'FileReaderSupportingVerboseMode': ['testfile.txt', False]}
SeqDiagBuilder.activate(parentdir, 'Caller', 'callUsingVerboseFileReader',
classArgDic) # activate sequence diagram building
entryPoint.callUsingVerboseFileReader()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
self.assertEqual(
'''@startuml
actor USER
participant Caller
participant FileReaderSupportingVerboseMode
USER -> Caller: callUsingVerboseFileReader()
activate Caller
Caller -> FileReaderSupportingVerboseMode: getContentAsList()
activate FileReaderSupportingVerboseMode
Caller <-- FileReaderSupportingVerboseMode:
deactivate FileReaderSupportingVerboseMode
USER <-- Caller:
deactivate Caller
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCallingMethodOnClassRequiringNonNoneConstructorParmWithPassingClassArgsDicWithOneEntryOneBooleanArgCallSuperClassMethod(self):
'''
Test case where the flow requires to instanciate a class (FileReaderSupportingVerboseMode) whose ctor
requires a string and a boolean value. To handle this situation, a class ctor arg dictionary
must be passed to the SeqDiagBuilder.activate() method. But here, since the method
FileReaderSupportingVerboseMode.getContentAsListFromSuper() calls a method of its parent
class, the class ctor arg dictionary must also contain an entry for the parent class (FileReader)
since its ctor __init__ method also requires an argument (a file name) !
'''
entryPoint = Caller()
# this is the argument dictionary which should be defined for successful sequence
# diagram generation:
#classArgDic = {'FileReaderSupportingVerboseMode': ['testfile.txt', False],
# 'FileReader': ['testfile.txt']}
# but we forget to add an entry for the FileReader base class ctor in order
# to ensure a correct warning will be added to the generated sequence diagram
classArgDic = {'FileReaderSupportingVerboseMode': ['testfile.txt', False]}
SeqDiagBuilder.activate(parentdir, 'Caller', 'callUsingVerboseFileReaderWithCallToSuper',
classArgDic) # activate sequence diagram building
entryPoint.callUsingVerboseFileReaderWithCallToSuper()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 1)
self.assertEqual(
'''@startuml
center header
<b><font color=red size=20> Warnings</font></b>
<b><font color=red size=14> ERROR - constructor for class FileReader in module testclasses.subtestpackage.filereader failed due to invalid argument(s).</font></b>
<b><font color=red size=14> To solve the problem, pass a class argument dictionary with an entry for FileReader to the SeqDiagBuilder.activate() method.</font></b>
endheader
actor USER
participant Caller
participant FileReaderSupportingVerboseMode
USER -> Caller: callUsingVerboseFileReaderWithCallToSuper()
activate Caller
Caller -> FileReaderSupportingVerboseMode: getContentAsListFromSuper()
activate FileReaderSupportingVerboseMode
Caller <-- FileReaderSupportingVerboseMode:
deactivate FileReaderSupportingVerboseMode
USER <-- Caller:
deactivate Caller
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCallingMethodOnClassRequiringNonNoneConstructotParmWithPassingClassArgsDicWithOneEntryOneBooleanArgCallSuperClassMethodEntryAddedForParentClass(
self):
'''
Test case where the flow requires to instanciate a class (FileReaderSupportingVerboseMode) whose ctor
requires a string and a boolean value. To handle this situation, a class ctor arg dictionary
must be passed to the SeqDiagBuilder.activate() method. But here, since the method
FileReaderSupportingVerboseMode.getContentAsListFromSuper() calls a method of its parent
class, the class ctor arg dictionary must also contain an entry for the parent class since
its ctor __init__ method also requires arguments. In this tst case, this requirement has
been satisfied !
'''
entryPoint = Caller()
classArgDic = {'FileReaderSupportingVerboseMode': ['testfile.txt', False], 'FileReader': ['testfile.txt']}
SeqDiagBuilder.activate(parentdir, 'Caller', 'callUsingVerboseFileReaderWithCallToSuper',
classArgDic) # activate sequence diagram building
entryPoint.callUsingVerboseFileReaderWithCallToSuper()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 0)
self.assertEqual(
'''@startuml
actor USER
participant Caller
participant FileReaderSupportingVerboseMode
participant FileReader
USER -> Caller: callUsingVerboseFileReaderWithCallToSuper()
activate Caller
Caller -> FileReaderSupportingVerboseMode: getContentAsListFromSuper()
activate FileReaderSupportingVerboseMode
FileReaderSupportingVerboseMode -> FileReader: getContentAsList()
activate FileReader
FileReaderSupportingVerboseMode <-- FileReader:
deactivate FileReader
Caller <-- FileReaderSupportingVerboseMode:
deactivate FileReaderSupportingVerboseMode
USER <-- Caller:
deactivate Caller
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCallingMethodOnClassRequiringNonNoneConstructotParmWithPassingClassArgsDicWithTwoEntriesSpecifyingWrongMethodName(self):
'''
Test case where the flow requires to instanciate the same class (FileReader) twice with
different values passed to the ctor at each instanciation. But here, we do not specify
the right method for the SeqDiagBuilder.activate() method: 'callUsingTwoFileReaders'
should be specified, not 'call' !
:return:
'''
entryPoint = Caller()
classArgDic = {'FileReader_1': ['testfile.txt'], 'FileReader_2': ['testfile2.txt']}
SeqDiagBuilder.activate(parentdir, 'Caller', 'call',
classArgDic) # activate sequence diagram building
entryPoint.callUsingTwoFileReaders()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 1)
self.assertEqual(
"""@startuml
center header
<b><font color=red size=20> Warnings</font></b>
<b><font color=red size=14> No control flow recorded.</font></b>
<b><font color=red size=14> Method activate() called with arguments projectPath=<{}>, entryClass=<Caller>, entryMethod=<call>, classArgDic=<{{'FileReader_1': ['testfile.txt'], 'FileReader_2': ['testfile2.txt']}}>: True.</font></b>
<b><font color=red size=14> Method recordFlow() called: True.</font></b>
<b><font color=red size=14> Specified entry point: Caller.call reached: False.</font></b>
endheader
actor USER
@enduml""".format(parentdir), commands) # using format() instead og replace fails !
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
def testCallingMethodOnClassRequiringNonNoneConstructotParmWithPassingInvalidClassArgsDic(self):
entryPoint = Caller()
classArgDic = {'FileReader': ['testfile.txt', 'inval arg']}
SeqDiagBuilder.activate(parentdir, 'Caller', 'call', classArgDic) # activate sequence diagram building
entryPoint.call()
commands = SeqDiagBuilder.createSeqDiaqCommands('USER')
with open("c:\\temp\\ess.txt", "w") as f:
f.write(commands)
self.assertEqual(len(SeqDiagBuilder.getWarningList()), 1)
self.assertEqual(
'''@startuml
center header
<b><font color=red size=20> Warnings</font></b>
<b><font color=red size=14> ERROR - constructor for class FileReader in module testclasses.subtestpackage.filereader failed due to invalid argument(s) (['testfile.txt', 'inval arg']) defined in the class argument dictionary passed to the SeqDiagBuilder.activate() method.</font></b>
endheader
actor USER
participant Caller
USER -> Caller: call()
activate Caller
USER <-- Caller:
deactivate Caller
@enduml''', commands)
SeqDiagBuilder.deactivate() # deactivate sequence diagram building
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
ea29a9cc461cc772418606651a63a753c9adce36 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/securitycenter/v1p1beta1/securitycenter-v1p1beta1-py/google/cloud/securitycenter_v1p1beta1/types/organization_settings.py | faec729075707f892513d3f7e9e1c999722a8557 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,410 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.securitycenter.v1p1beta1',
manifest={
'OrganizationSettings',
},
)
class OrganizationSettings(proto.Message):
r"""User specified settings that are attached to the Security
Command Center organization.
Attributes:
name (str):
The relative resource name of the settings. See:
https://cloud.google.com/apis/design/resource_names#relative_resource_name
Example:
"organizations/{organization_id}/organizationSettings".
enable_asset_discovery (bool):
A flag that indicates if Asset Discovery should be enabled.
If the flag is set to ``true``, then discovery of assets
will occur. If it is set to \`false, all historical assets
will remain, but discovery of future assets will not occur.
asset_discovery_config (google.cloud.securitycenter_v1p1beta1.types.OrganizationSettings.AssetDiscoveryConfig):
The configuration used for Asset Discovery
runs.
"""
class AssetDiscoveryConfig(proto.Message):
r"""The configuration used for Asset Discovery runs.
Attributes:
project_ids (Sequence[str]):
The project ids to use for filtering asset
discovery.
inclusion_mode (google.cloud.securitycenter_v1p1beta1.types.OrganizationSettings.AssetDiscoveryConfig.InclusionMode):
The mode to use for filtering asset
discovery.
"""
class InclusionMode(proto.Enum):
r"""The mode of inclusion when running Asset Discovery. Asset discovery
can be limited by explicitly identifying projects to be included or
excluded. If INCLUDE_ONLY is set, then only those projects within
the organization and their children are discovered during asset
discovery. If EXCLUDE is set, then projects that don't match those
projects are discovered during asset discovery. If neither are set,
then all projects within the organization are discovered during
asset discovery.
"""
INCLUSION_MODE_UNSPECIFIED = 0
INCLUDE_ONLY = 1
EXCLUDE = 2
project_ids = proto.RepeatedField(proto.STRING, number=1)
inclusion_mode = proto.Field(proto.ENUM, number=2,
enum='OrganizationSettings.AssetDiscoveryConfig.InclusionMode',
)
name = proto.Field(proto.STRING, number=1)
enable_asset_discovery = proto.Field(proto.BOOL, number=2)
asset_discovery_config = proto.Field(proto.MESSAGE, number=3,
message=AssetDiscoveryConfig,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
f8ce478e09b28dbecf8e4a33bca3cd6def745e32 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_241/ch5_2020_03_04_20_03_29_973202.py | 3d805dd5f639c6b24268fa29be5f1fa3398ea0f6 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | libras_para_kg=float(input("Quantas Quilogramas: ")
print("libras:libras_para_kg*0,453592")
| [
"[email protected]"
] | |
2dc3ec4af49c857ff67a051334b7be5cbb9dd6ba | 927b50cdaf1c384c8bbf6f13816d0ba465852fd8 | /main/migrations/0002_auto_20201128_0813.py | f86867def1d360053603e5adf8c185ee104522d0 | [
"MIT"
] | permissive | jhabarsingh/DOCMED | f37d336483cffd874b0a7db43677c08a47bd639c | 8a831886d3dd415020699491687fb73893e674c5 | refs/heads/main | 2023-04-26T06:45:10.409633 | 2021-05-19T14:37:53 | 2021-05-19T14:37:53 | 316,683,855 | 3 | 5 | MIT | 2021-02-21T13:32:33 | 2020-11-28T07:51:22 | JavaScript | UTF-8 | Python | false | false | 1,061 | py | # Generated by Django 2.0 on 2020-11-28 08:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='doctor',
name='blood_group',
field=models.CharField(blank=True, choices=[('A+', 'A+ Type'), ('A-', 'A- Type'), ('B+', 'B+ Type'), ('B-', 'B- Type'), ('AB+', 'AB+ Type'), ('AB+', 'AB- Type'), ('O+', 'O+ Type'), ('O-', 'O- Type')], max_length=3, null=True),
),
migrations.AlterField(
model_name='doctor',
name='is_working',
field=models.NullBooleanField(),
),
migrations.AlterField(
model_name='patient',
name='blood_group',
field=models.CharField(blank=True, choices=[('A+', 'A+ Type'), ('A-', 'A- Type'), ('B+', 'B+ Type'), ('B-', 'B- Type'), ('AB+', 'AB+ Type'), ('AB+', 'AB- Type'), ('O+', 'O+ Type'), ('O-', 'O- Type')], max_length=4, null=True),
),
]
| [
"[email protected]"
] | |
f0058d3d6a1df1097582e384bb22a5d06725cbb7 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/467/usersdata/282/111931/submittedfiles/Av2_Parte2.py | f4d68cd5e055d09012f4f459c82a6e8816d004ca | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | # -*- coding: utf-8 -*-
a=[]
qa=int(input('Digite o numero de elementos de a: '))
b=[]
qb=int(input('Digite o numero de elementos de b: '))
| [
"[email protected]"
] | |
1c4458211f04b61d65360a91f24938a79f071603 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.0_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=43/params.py | 0c4e8626f8dbb25ae1a1eaa2f0fe59307cd289fe | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | {'cpus': 4,
'duration': 30,
'final_util': '2.017881',
'max_util': '2.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 43,
'utils': 'uni-medium-3'}
| [
"[email protected]"
] | |
205d233ac74de0fef9008634ea2b663bc3c14464 | af55b6668d2f390049c57664af7b38832af6d7fa | /pycorrector/bert/predict_mask.py | 9a8613902744b317d7a2397c7405cdf028a5eddb | [
"Apache-2.0"
] | permissive | JohnParken/pycorrector | 5406f9802227dfaed8db6bb3a29e64baf98ddf2c | fed285600996510e073cdf71f3ba57e68116acf7 | refs/heads/master | 2021-02-04T10:04:53.982845 | 2020-03-03T02:37:16 | 2020-03-03T02:37:16 | 243,653,849 | 0 | 0 | Apache-2.0 | 2020-02-28T01:33:36 | 2020-02-28T01:33:36 | null | UTF-8 | Python | false | false | 17,375 | py | # -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description: Run BERT on Masked LM.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('../..')
import argparse
import os
import random
import re
import numpy as np
import torch
from pytorch_pretrained_bert import BertForMaskedLM
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pycorrector.utils.logger import logger
MASK_TOKEN = "[MASK]"
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids,
mask_ids=None, mask_positions=None, input_tokens=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.input_tokens = input_tokens
self.segment_ids = segment_ids
self.mask_ids = mask_ids
self.mask_positions = mask_positions
def read_lm_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with open(input_file, "r", encoding='utf-8') as reader:
while True:
line = reader.readline()
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(guid=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
def read_lm_sentence(sentence):
"""Read a list of `InputExample`s from an input line."""
examples = []
unique_id = 0
line = sentence.strip()
if line:
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(guid=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def is_subtoken(x):
return x.startswith("##")
def create_masked_lm_prediction(input_ids, mask_position, mask_count=1, mask_id=103):
new_input_ids = list(input_ids)
masked_lm_labels = []
masked_lm_positions = list(range(mask_position, mask_position + mask_count))
for i in masked_lm_positions:
new_input_ids[i] = mask_id
masked_lm_labels.append(input_ids[i])
return new_input_ids, masked_lm_positions, masked_lm_labels
def create_sequential_mask(input_tokens, input_ids, input_mask, segment_ids, mask_id=103, tokenizer=None):
"""Mask each token/word sequentially"""
features = []
i = 1
while i < len(input_tokens) - 1:
mask_count = 1
while is_subtoken(input_tokens[i + mask_count]):
mask_count += 1
input_ids_new, masked_lm_positions, masked_lm_labels = create_masked_lm_prediction(input_ids, i, mask_count,
mask_id)
feature = InputFeatures(
input_ids=input_ids_new,
input_mask=input_mask,
segment_ids=segment_ids,
mask_ids=masked_lm_labels,
mask_positions=masked_lm_positions,
input_tokens=tokenizer.convert_ids_to_tokens(input_ids_new))
features.append(feature)
i += mask_count
return features
def convert_examples_to_features(examples, tokenizer, max_seq_length,
mask_token='[MASK]', mask_id=103):
"""Loads a data file into a list of `InputBatch`s."""
features = []
all_features = []
all_tokens = []
for (example_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# The -3 accounts for [CLS], [SEP] and [SEP]
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# "-2" is [CLS] and [SEP]
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens_a = [i.replace('*', mask_token) for i in tokens_a]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens_b = [i.replace('*', '[MASK]') for i in tokens_b]
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
mask_positions = [i for i, v in enumerate(input_ids) if v == mask_id]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if example_index < 5:
logger.info("*** Example ***")
logger.info("example_index: %s" % (example_index))
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
mask_positions=mask_positions,
segment_ids=segment_ids,
input_tokens=tokens))
# Mask each word
# features = create_sequential_mask(tokens, input_ids, input_mask, segment_ids, mask_id, tokenizer)
# all_features.extend(features)
# all_tokens.extend(tokens)
# return all_features, all_tokens
return features
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--bert_model_dir", default='../data/bert_models/chinese_finetuned_lm/',
type=str,
help="Bert pre-trained model config dir")
parser.add_argument("--bert_model_vocab", default='../data/bert_models/chinese_finetuned_lm/vocab.txt',
type=str,
help="Bert pre-trained model vocab path")
parser.add_argument("--output_dir", default="./output", type=str,
help="The output directory where the model checkpoints and predictions will be written.")
# Other parameters
parser.add_argument("--predict_file", default='../data/cn/lm_test_zh.txt', type=str,
help="for predictions.")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--doc_stride", default=64, type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--verbose_logging", default=False, action='store_true',
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
args = parser.parse_args()
device = torch.device("cpu")
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
tokenizer = BertTokenizer(args.bert_model_vocab)
MASK_ID = tokenizer.convert_tokens_to_ids([MASK_TOKEN])[0]
print('MASK_ID,', MASK_ID)
# Prepare model
model = BertForMaskedLM.from_pretrained(args.bert_model_dir)
# Save a trained model
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
if not os.path.exists(output_model_file):
torch.save(model_to_save.state_dict(), output_model_file)
# Load a trained model that you have fine-tuned
model_state_dict = torch.load(output_model_file)
model.to(device)
# Tokenized input
text = "吸烟的人容易得癌症"
tokenized_text = tokenizer.tokenize(text)
print(text, '=>', tokenized_text)
# Mask a token that we will try to predict back with `BertForMaskedLM`
masked_index = 8
tokenized_text[masked_index] = '[MASK]'
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 0]
# Convert inputs to PyTorch tensors
print('tokens, segments_ids:', indexed_tokens, segments_ids)
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
# Load pre-trained model (weights)
model.eval()
# Predict all tokens
predictions = model(tokens_tensor, segments_tensors)
predicted_index = torch.argmax(predictions[0, masked_index]).item()
print(predicted_index)
predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
print(predicted_token)
# infer one line end
# predict ppl and prob of each word
text = "吸烟的人容易得癌症"
tokenized_text = tokenizer.tokenize(text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 0]
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
sentence_loss = 0.0
sentence_count = 0
for idx, label in enumerate(text):
print(label)
label_id = tokenizer.convert_tokens_to_ids([label])[0]
lm_labels = [-1, -1, -1, -1, -1, -1, -1, -1, -1]
if idx != 0:
lm_labels[idx] = label_id
if idx == 1:
lm_labels = indexed_tokens
print(lm_labels)
masked_lm_labels = torch.tensor([lm_labels])
# Predict all tokens
loss = model(tokens_tensor, segments_tensors, masked_lm_labels=masked_lm_labels)
print('loss:', loss)
prob = float(np.exp(-loss.item()))
print('prob:', prob)
sentence_loss += prob
sentence_count += 1
ppl = float(np.exp(sentence_loss / sentence_count))
print('ppl:', ppl)
# confirm we were able to predict 'henson'
# infer each word with mask one
text = "吸烟的人容易得癌症"
for masked_index, label in enumerate(text):
tokenized_text = tokenizer.tokenize(text)
print(text, '=>', tokenized_text)
tokenized_text[masked_index] = '[MASK]'
print(tokenized_text)
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
predictions = model(tokens_tensor, segments_tensors)
print('expected label:', label)
predicted_index = torch.argmax(predictions[0, masked_index]).item()
predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
print('predict label:', predicted_token)
scores = predictions[0, masked_index]
# predicted_index = torch.argmax(scores).item()
top_scores = torch.sort(scores, 0, True)
top_score_val = top_scores[0][:5]
top_score_idx = top_scores[1][:5]
for j in range(len(top_score_idx)):
print('Mask predict is:', tokenizer.convert_ids_to_tokens([top_score_idx[j].item()])[0],
' prob:', top_score_val[j].item())
print()
if args.predict_file:
eval_examples = read_lm_examples(input_file=args.predict_file)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
mask_token=MASK_TOKEN,
mask_id=MASK_ID)
logger.info("***** Running predictions *****")
logger.info(" Num orig examples = %d", len(eval_examples))
logger.info(" Num split examples = %d", len(eval_features))
logger.info("Start predict ...")
for f in eval_features:
input_ids = torch.tensor([f.input_ids])
segment_ids = torch.tensor([f.segment_ids])
predictions = model(input_ids, segment_ids)
# confirm we were able to predict 'henson'
mask_positions = f.mask_positions
if mask_positions:
for idx, i in enumerate(mask_positions):
if not i:
continue
scores = predictions[0, i]
# predicted_index = torch.argmax(scores).item()
top_scores = torch.sort(scores, 0, True)
top_score_val = top_scores[0][:5]
top_score_idx = top_scores[1][:5]
# predicted_prob = predictions[0, i][predicted_index].item()
# predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
print('original text is:', f.input_tokens)
# print('Mask predict is:', predicted_token, ' prob:', predicted_prob)
for j in range(len(top_score_idx)):
print('Mask predict is:', tokenizer.convert_ids_to_tokens([top_score_idx[j].item()])[0],
' prob:', top_score_val[j].item())
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
05fc046d63ad0da119f177a76e959f80d9d8f37b | d184d1fc998a300feee2d716d97209b9fbc78468 | /probability.py | dbeb07713ae4103f2e739fabfa5eb51dd35d80c9 | [] | no_license | MickeyKen/plot_node_master_thesis | df196d7a037b1960c1ee95268a1ae3b1e8f24148 | 5182ea79cb8cfbc6bead60d97eda9307f7e53c10 | refs/heads/master | 2023-02-16T21:17:49.284973 | 2021-01-19T09:19:40 | 2021-01-19T09:19:40 | 330,574,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,316 | py | #!/usr/bin/python
import matplotlib.pyplot as plt
path = 'data/param_UD-v95_output.txt'
isServiceCount = True
ACTOR_NUM = 3
AVERAGE_NUM = 100
LIMIT = 5000
if __name__ == '__main__':
collision = [[] for j in range(ACTOR_NUM)]
average_collision = []
success = [[] for j in range(ACTOR_NUM)]
average_success = []
no_action = [[] for j in range(ACTOR_NUM)]
average_no_action = []
eps = []
average_eps = []
epsilons = [[] for j in range(ACTOR_NUM)]
flag = 0
count = 0
fig = plt.figure(figsize=(8.27,3.9), dpi=100)
plt.ion()
plt.xlabel('Episode')
# plt.ylabel('P')
plt.grid()
cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
with open(path) as f:
for s_line in f:
eps_num = int(s_line.split(',')[0])
actor_num = int(s_line.split(',')[1])
step = int(s_line.split(',')[3])
reward = float(s_line.split(',')[5])
if step < 150 and reward < -200:
collision[actor_num].append(1.0)
success[actor_num].append(0.0)
no_action[actor_num].append(0.0)
elif step < 150 and reward > 0:
collision[actor_num].append(0.0)
success[actor_num].append(1.0)
no_action[actor_num].append(0.0)
else:
collision[actor_num].append(0.0)
success[actor_num].append(0.0)
no_action[actor_num].append(1.0)
collision_sum = 0.0
success_sum = 0.0
no_action_sum = 0.0
average_collision_sum = 0.0
average_success_sum = 0.0
average_no_action_sum = 0.0
count = 1
for index in range(min(len(v) for v in collision)):
collision_sum = 0.0
success_sum = 0.0
no_action_sum = 0.0
if index <= LIMIT:
for n in range(ACTOR_NUM):
collision_sum += collision[n][index]
success_sum += success[n][index]
no_action_sum += no_action[n][index]
average_collision_sum += collision_sum / float(ACTOR_NUM)
average_success_sum += success_sum / float(ACTOR_NUM)
average_no_action_sum += no_action_sum / float(ACTOR_NUM)
if index % AVERAGE_NUM == 0 and index > 0:
average_eps.append(count*AVERAGE_NUM)
average_collision.append(average_collision_sum / float(AVERAGE_NUM))
average_success.append(average_success_sum / float(AVERAGE_NUM))
average_no_action.append(average_no_action_sum / float(AVERAGE_NUM))
average_collision_sum = 0.0
average_success_sum = 0.0
average_no_action_sum = 0.0
count += 1
eps.append(index + 1)
plt.plot(average_eps, average_success, color='#e41a1c', label="success")
plt.plot(average_eps, average_collision, color='#00529a', label="collision")
plt.plot(average_eps, average_no_action, color='#3FBF00', label="past 150 steps")
plt.legend( loc='upper left', borderaxespad=1)
plt.draw()
fig.savefig("result_multi_probability.png")
plt.pause(0)
| [
"[email protected]"
] | |
145e5904cf2bc4e6e47030788b2461978b486ece | 6318f1458f9c6cca91cb00aa415638a599d8ba26 | /arcade/python/arcade-theCore/11_SpringOfIntegration/091_Combs.py | ec81b4e9bfbc202b226d08d5d49310be3d66ef37 | [
"MIT"
] | permissive | netor27/codefights-solutions | 836016a048086cd2bc644b2c40b7686102b6f179 | 69701ab06d45902c79ec9221137f90b75969d8c8 | refs/heads/master | 2021-10-28T13:04:42.940059 | 2019-01-16T23:12:08 | 2019-01-16T23:12:08 | 110,753,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | '''
Miss X has only two combs in her possession, both of which are old and miss a tooth or two. She also has many purses of different length, in which she carries the combs. The only way they fit is horizontally and without overlapping. Given teeth' positions on both combs, find the minimum length of the purse she needs to take them with her.
It is guaranteed that there is at least one tooth at each end of the comb.
It is also guaranteed that the total length of two strings is smaller than 32.
Note, that the combs can not be rotated/reversed.
Example
For comb1 = "*..*" and comb2 = "*.*", the output should be
combs(comb1, comb2) = 5.
Although it is possible to place the combs like on the first picture, the best way to do this is either picture 2 or picture 3.
'''
def combs(comb1, comb2):
n1, n2 = len(comb1), len(comb2)
res = n1 + n2
m1, m2 = mask(comb1), mask(comb2)
for i in range(n1 + 1):
if (m2 << i) & m1 == 0:
temp = max(n2 + i, n1)
if temp < res:
res = temp
for i in range(n2 + 1):
if (m1 << i) & m2 == 0:
temp = max(n1 + i, n2)
if temp < res:
res = temp
return res
def mask(s):
r = 0
for c in s:
digit = 0
if c == '*':
digit = 1
r = (r << 1) + digit
return r
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.