blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8ca9fad8cd78573c8d3ca2e9a76b0d607134371b | ce214c2cbecb3591665b2748c1c777dd83625f96 | /lesson_13/api/routers.py | ff242ade61e4fcebb7697a8a760da6bb173b9707 | [] | no_license | antonplkv/itea_advanced_june | e35af2f10d93d8ffb43664cd0cf7dfd46b969aef | c20e81167bfd87b7e16f340210b246a4cbc1751e | refs/heads/master | 2022-12-04T20:27:21.908624 | 2020-08-19T18:19:49 | 2020-08-19T18:19:49 | 272,512,423 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | from flask import Flask
from flask_restful import Api
from .resources import AuthorResource
app = Flask(__name__)
api = Api(app)
api.add_resource(AuthorResource, '/authors', '/authors/<author_id>') | [
"[email protected]"
] | |
91ff95988bce1d58997328ad6d6def469c878d07 | 452c33c0622ec36e93e6ff6637533a15a067a8a4 | /samples/client/petstore/python-experimental/petstore_api/models/outer_composite.py | f3887c8a3267c6a6532d498e3de2a32c135c4da3 | [
"Apache-2.0"
] | permissive | eric-erki/openapi-generator | 40c4294433bada9f693aca0c32326609e2234f9c | 0ea1ead59e41e4e8a959235dc8234d44447a9658 | refs/heads/master | 2023-01-07T03:33:36.315459 | 2019-09-20T18:13:33 | 2019-09-20T18:13:33 | 209,955,560 | 1 | 3 | Apache-2.0 | 2023-01-04T10:58:25 | 2019-09-21T09:09:49 | Java | UTF-8 | Python | false | false | 4,876 | py | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class OuterComposite(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'my_number': 'float',
'my_string': 'str',
'my_boolean': 'bool',
}
attribute_map = {
'my_number': 'my_number', # noqa: E501
'my_string': 'my_string', # noqa: E501
'my_boolean': 'my_boolean', # noqa: E501
}
def __init__(self, my_number=None, my_string=None, my_boolean=None): # noqa: E501
"""OuterComposite - a model defined in OpenAPI
Keyword Args:
my_number (float): [optional] # noqa: E501
my_string (str): [optional] # noqa: E501
my_boolean (bool): [optional] # noqa: E501
"""
self._my_number = None
self._my_string = None
self._my_boolean = None
self.discriminator = None
if my_number is not None:
self.my_number = my_number # noqa: E501
if my_string is not None:
self.my_string = my_string # noqa: E501
if my_boolean is not None:
self.my_boolean = my_boolean # noqa: E501
@property
def my_number(self):
"""Gets the my_number of this OuterComposite. # noqa: E501
:return: The my_number of this OuterComposite. # noqa: E501
:rtype: float
"""
return self._my_number
@my_number.setter
def my_number(
self,
my_number):
"""Sets the my_number of this OuterComposite.
:param my_number: The my_number of this OuterComposite. # noqa: E501
:type: float
"""
self._my_number = (
my_number)
@property
def my_string(self):
"""Gets the my_string of this OuterComposite. # noqa: E501
:return: The my_string of this OuterComposite. # noqa: E501
:rtype: str
"""
return self._my_string
@my_string.setter
def my_string(
self,
my_string):
"""Sets the my_string of this OuterComposite.
:param my_string: The my_string of this OuterComposite. # noqa: E501
:type: str
"""
self._my_string = (
my_string)
@property
def my_boolean(self):
"""Gets the my_boolean of this OuterComposite. # noqa: E501
:return: The my_boolean of this OuterComposite. # noqa: E501
:rtype: bool
"""
return self._my_boolean
@my_boolean.setter
def my_boolean(
self,
my_boolean):
"""Sets the my_boolean of this OuterComposite.
:param my_boolean: The my_boolean of this OuterComposite. # noqa: E501
:type: bool
"""
self._my_boolean = (
my_boolean)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OuterComposite):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
9aab50959e6376757d51b3fef3e88483eb1d7494 | 07c3124153a6909f19a21c3c664d8e3f8e0481d0 | /fractals/sierpinski_triangle/sierpinski_triangle.py | aae6e3da8f1aaeec51acdaeab10b98c9d1557216 | [] | no_license | gridl/art-of-turtle-programming | 94ed422a4e75f83e4c3abf7910ed9e5ed8a40aa9 | db6b2c1059bffc9df468691c6ecf1c110b38aafd | refs/heads/master | 2020-03-19T16:20:48.680667 | 2015-12-15T05:46:03 | 2015-12-15T05:46:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | from turtle import *
import math
tracer(1, 0)
setworldcoordinates(0, 0, 960, 810)
bgcolor(0.1, 0.1, 0.1)
BASE_SIZE = 13
BASE_HEIGHT = BASE_SIZE * math.sin(60 * (math.pi / 180))
START_X = 50
START_Y = 20
def draw_triangle(x, y, color):
penup()
pencolor(color)
goto(x, y) # go to bottom-left corner
pendown()
setheading(60)
forward(BASE_SIZE) # draw first side
right(120)
forward(BASE_SIZE) # draw second side
right(120)
forward(BASE_SIZE) # draw third side
def draw_sierpinski(x, y, level, color):
if level == 0:
draw_triangle(x, y, color)
draw_triangle(x + (BASE_SIZE * 0.5), y + BASE_HEIGHT, color)
draw_triangle(x + BASE_SIZE, y, color)
else:
draw_sierpinski(x, y, level - 1, color)
draw_sierpinski(x + (BASE_SIZE * 0.5 * (2 ** level)), y + (BASE_HEIGHT * (2 ** level)), level - 1, color)
draw_sierpinski(x + (BASE_SIZE * (2 ** level)), y, level - 1, color)
# loop from 5 to 0, drawing 5 sets of sierpinski triangles each with a different color
for i in range(5, -1, -1):
red = 1 - (0.2 * i)
green = 0.1 * i
blue = 0.1 * i
draw_sierpinski(START_X, START_Y, i, (red, green, blue))
hideturtle()
update()
exitonclick()
| [
"[email protected]"
] | |
dd2581b2b922761111f73de6a66b37bef9ca71ad | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/servicebus/latest/list_disaster_recovery_config_keys.py | 25a135b1c7de1f742920f2d68de3190e3c721078 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 6,888 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListDisasterRecoveryConfigKeysResult',
'AwaitableListDisasterRecoveryConfigKeysResult',
'list_disaster_recovery_config_keys',
]
@pulumi.output_type
class ListDisasterRecoveryConfigKeysResult:
"""
Namespace/ServiceBus Connection String
"""
def __init__(__self__, alias_primary_connection_string=None, alias_secondary_connection_string=None, key_name=None, primary_connection_string=None, primary_key=None, secondary_connection_string=None, secondary_key=None):
if alias_primary_connection_string and not isinstance(alias_primary_connection_string, str):
raise TypeError("Expected argument 'alias_primary_connection_string' to be a str")
pulumi.set(__self__, "alias_primary_connection_string", alias_primary_connection_string)
if alias_secondary_connection_string and not isinstance(alias_secondary_connection_string, str):
raise TypeError("Expected argument 'alias_secondary_connection_string' to be a str")
pulumi.set(__self__, "alias_secondary_connection_string", alias_secondary_connection_string)
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="aliasPrimaryConnectionString")
def alias_primary_connection_string(self) -> str:
"""
Primary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_primary_connection_string")
@property
@pulumi.getter(name="aliasSecondaryConnectionString")
def alias_secondary_connection_string(self) -> str:
"""
Secondary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_secondary_connection_string")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
A string that describes the authorization rule.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> str:
"""
Primary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> str:
"""
Secondary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListDisasterRecoveryConfigKeysResult(ListDisasterRecoveryConfigKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDisasterRecoveryConfigKeysResult(
alias_primary_connection_string=self.alias_primary_connection_string,
alias_secondary_connection_string=self.alias_secondary_connection_string,
key_name=self.key_name,
primary_connection_string=self.primary_connection_string,
primary_key=self.primary_key,
secondary_connection_string=self.secondary_connection_string,
secondary_key=self.secondary_key)
def list_disaster_recovery_config_keys(alias: Optional[str] = None,
authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDisasterRecoveryConfigKeysResult:
"""
Use this data source to access information about an existing resource.
:param str alias: The Disaster Recovery configuration name
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['alias'] = alias
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:servicebus/latest:listDisasterRecoveryConfigKeys', __args__, opts=opts, typ=ListDisasterRecoveryConfigKeysResult).value
return AwaitableListDisasterRecoveryConfigKeysResult(
alias_primary_connection_string=__ret__.alias_primary_connection_string,
alias_secondary_connection_string=__ret__.alias_secondary_connection_string,
key_name=__ret__.key_name,
primary_connection_string=__ret__.primary_connection_string,
primary_key=__ret__.primary_key,
secondary_connection_string=__ret__.secondary_connection_string,
secondary_key=__ret__.secondary_key)
| [
"[email protected]"
] | |
d212b119feedd836b1965727e519777fd8b95557 | fea44d5ca4e6c9b2c7950234718a4531d453849e | /sktime/forecasting/tests/test_all_forecasters.py | c528a23d1d8d1d4b7fe5fc87dd17cbf747f4fa26 | [
"BSD-3-Clause"
] | permissive | mlgig/sktime | 288069ab8c9b0743113877032dfca8cf1c2db3fb | 19618df351a27b77e3979efc191e53987dbd99ae | refs/heads/master | 2023-03-07T20:22:48.553615 | 2023-02-19T18:09:12 | 2023-02-19T18:09:12 | 234,604,691 | 1 | 0 | BSD-3-Clause | 2020-01-17T17:50:12 | 2020-01-17T17:50:11 | null | UTF-8 | Python | false | false | 28,833 | py | # -*- coding: utf-8 -*-
"""Tests for BaseForecaster API points.
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""
__author__ = ["mloning", "kejsitake", "fkiraly"]
import numpy as np
import pandas as pd
import pytest
from sktime.datatypes import check_is_mtype
from sktime.datatypes._utilities import get_cutoff
from sktime.exceptions import NotFittedError
from sktime.forecasting.base._delegate import _DelegatedForecaster
from sktime.forecasting.model_selection import (
ExpandingWindowSplitter,
SlidingWindowSplitter,
temporal_train_test_split,
)
from sktime.forecasting.tests._config import (
TEST_ALPHAS,
TEST_FHS,
TEST_OOS_FHS,
TEST_STEP_LENGTHS_INT,
TEST_WINDOW_LENGTHS_INT,
VALID_INDEX_FH_COMBINATIONS,
)
from sktime.performance_metrics.forecasting import mean_absolute_percentage_error
from sktime.tests.test_all_estimators import BaseFixtureGenerator, QuickTester
from sktime.utils._testing.forecasting import (
_assert_correct_columns,
_assert_correct_pred_time_index,
_get_expected_index_for_update_predict,
_get_n_columns,
_make_fh,
make_forecasting_problem,
)
from sktime.utils._testing.series import _make_series
from sktime.utils.validation.forecasting import check_fh
# get all forecasters
FH0 = 1
INVALID_X_INPUT_TYPES = [list("foo"), tuple()]
INVALID_y_INPUT_TYPES = [list("bar"), tuple()]
# testing data
y = make_forecasting_problem()
y_train, y_test = temporal_train_test_split(y, train_size=0.75)
# names for index/fh combinations to display in tests
index_fh_comb_names = [f"{x[0]}-{x[1]}-{x[2]}" for x in VALID_INDEX_FH_COMBINATIONS]
pytest_skip_msg = (
"ForecastingHorizon with timedelta values "
"is currently experimental and not supported everywhere"
)
class ForecasterFixtureGenerator(BaseFixtureGenerator):
"""Fixture generator for forecasting tests.
Fixtures parameterized
----------------------
estimator_class: estimator inheriting from BaseObject
ranges over all estimator classes not excluded by EXCLUDED_TESTS
estimator_instance: instance of estimator inheriting from BaseObject
ranges over all estimator classes not excluded by EXCLUDED_TESTS
instances are generated by create_test_instance class method
scenario: instance of TestScenario
ranges over all scenarios returned by retrieve_scenarios
"""
# note: this should be separate from TestAllForecasters
# additional fixtures, parameters, etc should be added here
# TestAllForecasters should contain the tests only
estimator_type_filter = "forecaster"
fixture_sequence = [
"estimator_class",
"estimator_instance",
"n_columns",
"scenario",
# "fh",
"update_params",
"step_length",
]
def _generate_n_columns(self, test_name, **kwargs):
"""Return number of columns for series generation in positive test cases.
Fixtures parameterized
----------------------
n_columns: int
1 for univariate forecasters, 2 for multivariate forecasters
ranges over 1 and 2 for forecasters which are both uni/multivariate
"""
if "estimator_class" in kwargs.keys():
scitype_tag = kwargs["estimator_class"].get_class_tag("scitype:y")
elif "estimator_instance" in kwargs.keys():
scitype_tag = kwargs["estimator_instance"].get_tag("scitype:y")
else:
return []
n_columns_list = _get_n_columns(scitype_tag)
if len(n_columns_list) == 1:
n_columns_names = ["" for x in n_columns_list]
else:
n_columns_names = [f"y:{x}cols" for x in n_columns_list]
return n_columns_list, n_columns_names
def _generate_update_params(self, test_name, **kwargs):
"""Return update_params for update calls.
Fixtures parameterized
----------------------
update_params: bool
whether to update parameters in update; ranges over True, False
"""
return [True, False], ["update_params=True", "update_params=False"]
def _generate_step_length(self, test_name, **kwargs):
"""Return step length for window.
Fixtures parameterized
----------------------
step_length: int
1 if update_params=True; TEST_STEP_LENGTH_INT if update_params=False
"""
update_params = kwargs["update_params"]
if update_params:
return [1], [""]
else:
return TEST_STEP_LENGTHS_INT, [f"step={a}" for a in TEST_STEP_LENGTHS_INT]
class TestAllForecasters(ForecasterFixtureGenerator, QuickTester):
"""Module level tests for all sktime forecasters."""
def test_get_fitted_params(self, estimator_instance, scenario):
"""Test get_fitted_params."""
scenario.run(estimator_instance, method_sequence=["fit"])
try:
params = estimator_instance.get_fitted_params()
assert isinstance(params, dict)
except NotImplementedError:
pass
# todo: should these not be checked in test_all_estimators?
def test_raises_not_fitted_error(self, estimator_instance):
"""Test that calling post-fit methods before fit raises error."""
# We here check extra method of the forecaster API: update and update_predict.
with pytest.raises(NotFittedError):
estimator_instance.update(y_test, update_params=False)
with pytest.raises(NotFittedError):
cv = SlidingWindowSplitter(fh=1, window_length=1, start_with_window=False)
estimator_instance.update_predict(y_test, cv=cv)
try:
with pytest.raises(NotFittedError):
estimator_instance.get_fitted_params()
except NotImplementedError:
pass
def test_y_multivariate_raises_error(self, estimator_instance):
"""Test that wrong y scitype raises error (uni/multivariate not supported)."""
if estimator_instance.get_tag("scitype:y") == "multivariate":
y = _make_series(n_columns=1)
with pytest.raises(ValueError, match=r"two or more variables"):
estimator_instance.fit(y, fh=FH0)
if estimator_instance.get_tag("scitype:y") in ["univariate", "both"]:
# this should pass since "both" allows any number of variables
# and "univariate" automatically vectorizes, behaves multivariate
pass
# todo: should these not be "negative scenarios", tested in test_all_estimators?
@pytest.mark.parametrize("y", INVALID_y_INPUT_TYPES)
def test_y_invalid_type_raises_error(self, estimator_instance, y):
"""Test that invalid y input types raise error."""
with pytest.raises(TypeError, match=r"type"):
estimator_instance.fit(y, fh=FH0)
# todo: should these not be "negative scenarios", tested in test_all_estimators?
@pytest.mark.parametrize("X", INVALID_X_INPUT_TYPES)
def test_X_invalid_type_raises_error(self, estimator_instance, n_columns, X):
"""Test that invalid X input types raise error."""
y_train = _make_series(n_columns=n_columns)
try:
with pytest.raises(TypeError, match=r"type"):
estimator_instance.fit(y_train, X, fh=FH0)
except NotImplementedError as e:
msg = str(e).lower()
assert "exogenous" in msg
# todo: refactor with scenarios. Need to override fh and scenario args for this.
@pytest.mark.parametrize(
"index_fh_comb", VALID_INDEX_FH_COMBINATIONS, ids=index_fh_comb_names
)
@pytest.mark.parametrize("fh_int", TEST_FHS, ids=[f"fh={fh}" for fh in TEST_FHS])
def test_predict_time_index(
self, estimator_instance, n_columns, index_fh_comb, fh_int
):
"""Check that predicted time index matches forecasting horizon.
Tests predicted time index for predict and predict_residuals.
"""
index_type, fh_type, is_relative = index_fh_comb
if fh_type == "timedelta":
return None
# todo: ensure check_estimator works with pytest.skip like below
# pytest.skip(
# "ForecastingHorizon with timedelta values "
# "is currently experimental and not supported everywhere"
# )
y_train = _make_series(
n_columns=n_columns, index_type=index_type, n_timepoints=50
)
cutoff = get_cutoff(y_train, return_index=True)
fh = _make_fh(cutoff, fh_int, fh_type, is_relative)
try:
estimator_instance.fit(y_train, fh=fh)
y_pred = estimator_instance.predict()
_assert_correct_pred_time_index(y_pred.index, cutoff, fh=fh_int)
_assert_correct_columns(y_pred, y_train)
y_test = _make_series(
n_columns=n_columns, index_type=index_type, n_timepoints=len(y_pred)
)
y_test.index = y_pred.index
y_res = estimator_instance.predict_residuals(y_test)
_assert_correct_pred_time_index(y_res.index, cutoff, fh=fh)
except NotImplementedError:
pass
@pytest.mark.parametrize(
"index_fh_comb", VALID_INDEX_FH_COMBINATIONS, ids=index_fh_comb_names
)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_predict_time_index_with_X(
self, estimator_instance, n_columns, index_fh_comb, fh_int_oos
):
"""Check that predicted time index matches forecasting horizon."""
index_type, fh_type, is_relative = index_fh_comb
if fh_type == "timedelta":
return None
# todo: ensure check_estimator works with pytest.skip like below
# pytest.skip(
# "ForecastingHorizon with timedelta values "
# "is currently experimental and not supported everywhere"
# )
z, X = make_forecasting_problem(index_type=index_type, make_X=True)
# Some estimators may not support all time index types and fh types, hence we
# need to catch NotImplementedErrors.
y = _make_series(n_columns=n_columns, index_type=index_type)
cutoff = get_cutoff(y.iloc[: len(y) // 2], return_index=True)
fh = _make_fh(cutoff, fh_int_oos, fh_type, is_relative)
y_train, _, X_train, X_test = temporal_train_test_split(y, X, fh=fh)
try:
estimator_instance.fit(y_train, X_train, fh=fh)
y_pred = estimator_instance.predict(X=X_test)
cutoff = get_cutoff(y_train, return_index=True)
_assert_correct_pred_time_index(y_pred.index, cutoff, fh)
_assert_correct_columns(y_pred, y_train)
except NotImplementedError:
pass
@pytest.mark.parametrize(
"index_fh_comb", VALID_INDEX_FH_COMBINATIONS, ids=index_fh_comb_names
)
def test_predict_time_index_in_sample_full(
self, estimator_instance, n_columns, index_fh_comb
):
"""Check that predicted time index equals fh for full in-sample predictions."""
index_type, fh_type, is_relative = index_fh_comb
if fh_type == "timedelta":
return None
# todo: ensure check_estimator works with pytest.skip like below
# pytest.skip(
# "ForecastingHorizon with timedelta values "
# "is currently experimental and not supported everywhere"
# )
y_train = _make_series(n_columns=n_columns, index_type=index_type)
cutoff = get_cutoff(y_train, return_index=True)
steps = -np.arange(len(y_train))
fh = _make_fh(cutoff, steps, fh_type, is_relative)
try:
estimator_instance.fit(y_train, fh=fh)
y_pred = estimator_instance.predict()
_assert_correct_pred_time_index(y_pred.index, cutoff, fh)
except NotImplementedError:
pass
def test_predict_series_name_preserved(self, estimator_instance):
"""Test that fit/predict preserves name attribute and type of pd.Series."""
# skip this test if estimator needs multivariate data
# because then it does not take pd.Series at all
if estimator_instance.get_tag("scitype:y") == "multivariate":
return None
y_train = _make_series(n_timepoints=15)
y_train.name = "foo"
estimator_instance.fit(y_train, fh=[1, 2, 3])
y_pred = estimator_instance.predict()
_assert_correct_columns(y_pred, y_train)
def _check_pred_ints(
self, pred_ints: pd.DataFrame, y_train: pd.Series, y_pred: pd.Series, fh_int
):
# make iterable
if isinstance(pred_ints, pd.DataFrame):
pred_ints = [pred_ints]
for pred_int in pred_ints:
# check column naming convention
assert list(pred_int.columns) == ["lower", "upper"]
# check time index
cutoff = get_cutoff(y_train, return_index=True)
_assert_correct_pred_time_index(pred_int.index, cutoff, fh_int)
# check values
assert np.all(pred_int["upper"] >= pred_int["lower"])
# check if errors are weakly monotonically increasing
# pred_errors = y_pred - pred_int["lower"]
# # assert pred_errors.is_mononotic_increasing
# assert np.all(
# pred_errors.values[1:].round(4) >= pred_errors.values[:-1].round(4)
# )
@pytest.mark.parametrize("index_type", [None, "range"])
@pytest.mark.parametrize(
"coverage", TEST_ALPHAS, ids=[f"alpha={a}" for a in TEST_ALPHAS]
)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_predict_interval(
self, estimator_instance, n_columns, index_type, fh_int_oos, coverage
):
"""Check prediction intervals returned by predict.
Arguments
---------
estimator_instance : BaseEstimator class descendant instance, forecaster to test
n_columns : number of columns for the test data
index_type : index type of the test data
fh_int_oos : forecasting horizon to test the forecaster at, all out of sample
coverage: float, coverage at which to make prediction intervals
Raises
------
AssertionError - if Forecaster test instance has "capability:pred_int"
and pred. int are not returned correctly when asking predict for them
AssertionError - if Forecaster test instance does not have "capability:pred_int"
and no NotImplementedError is raised when asking predict for pred.int
"""
y_train = _make_series(n_columns=n_columns, index_type=index_type)
estimator_instance.fit(y_train, fh=fh_int_oos)
if estimator_instance.get_tag("capability:pred_int"):
pred_ints = estimator_instance.predict_interval(
fh_int_oos, coverage=coverage
)
valid, msg, _ = check_is_mtype(
pred_ints, mtype="pred_interval", scitype="Proba", return_metadata=True
) # type: ignore
assert valid, msg
else:
with pytest.raises(NotImplementedError, match="prediction intervals"):
estimator_instance.predict_interval(fh_int_oos, coverage=coverage)
def _check_predict_quantiles(
self, pred_quantiles: pd.DataFrame, y_train: pd.Series, fh, alpha
):
# check if the input is a dataframe
assert isinstance(pred_quantiles, pd.DataFrame)
# check time index (also checks forecasting horizon is more than one element)
cutoff = get_cutoff(y_train, return_index=True)
_assert_correct_pred_time_index(pred_quantiles.index, cutoff, fh)
# Forecasters where name of variables do not exist
# In this cases y_train is series - the upper level in dataframe == 'Quantiles'
if isinstance(y_train, pd.Series):
expected = pd.MultiIndex.from_product([["Quantiles"], [alpha]])
else:
# multiply variables with all alpha values
expected = pd.MultiIndex.from_product([y_train.columns, [alpha]])
found = pred_quantiles.columns.to_flat_index()
assert all(expected == found)
if isinstance(alpha, list):
# sorts the columns that correspond to alpha values
pred_quantiles = pred_quantiles.reindex(
columns=pred_quantiles.columns.reindex(sorted(alpha), level=1)[0]
)
# check if values are monotonically increasing
for var in pred_quantiles.columns.levels[0]:
for index in range(len(pred_quantiles.index)):
assert pred_quantiles[var].iloc[index].is_monotonic_increasing
@pytest.mark.parametrize(
"alpha", TEST_ALPHAS, ids=[f"alpha={a}" for a in TEST_ALPHAS]
)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_predict_quantiles(self, estimator_instance, n_columns, fh_int_oos, alpha):
"""Check prediction quantiles returned by predict.
Arguments
---------
Forecaster: BaseEstimator class descendant, forecaster to test
fh: ForecastingHorizon, fh at which to test prediction
alpha: float, alpha at which to make prediction intervals
Raises
------
AssertionError - if Forecaster test instance has "capability:pred_int"
and pred. int are not returned correctly when asking predict for them
AssertionError - if Forecaster test instance does not have "capability:pred_int"
and no NotImplementedError is raised when asking predict for pred.int
"""
y_train = _make_series(n_columns=n_columns)
estimator_instance.fit(y_train, fh=fh_int_oos)
try:
quantiles = estimator_instance.predict_quantiles(fh=fh_int_oos, alpha=alpha)
self._check_predict_quantiles(quantiles, y_train, fh_int_oos, alpha)
except NotImplementedError:
pass
def test_pred_int_tag(self, estimator_instance):
"""Checks whether the capability:pred_int tag is correctly set.
Arguments
---------
estimator_instance : instance of BaseForecaster
Raises
------
ValueError - if capability:pred_int is True, but neither
predict_interval nor predict_quantiles have implemented content
this can be by direct implementation of _predict_interval/_predict_quantiles
or by defaulting to each other and/or _predict_proba
"""
f = estimator_instance
# we skip the _DelegatedForecaster, since it implements delegation methods
# which may look like the method is implemented, but in fact it is not
if isinstance(f, _DelegatedForecaster):
return None
# check which methods are implemented
implements_interval = f._has_implementation_of("_predict_interval")
implements_quantiles = f._has_implementation_of("_predict_quantiles")
implements_proba = f._has_implementation_of("_predict_proba")
pred_int_works = implements_interval or implements_quantiles or implements_proba
if not pred_int_works and f.get_class_tag("capability:pred_int", False):
raise ValueError(
f"{type(f).__name__} does not implement probabilistic forecasting, "
'but "capability:pred_int" flag has been set to True incorrectly. '
'The flag "capability:pred_int" should instead be set to False.'
)
if pred_int_works and not f.get_class_tag("capability:pred_int", False):
raise ValueError(
f"{type(f).__name__} does implement probabilistic forecasting, "
'but "capability:pred_int" flag has been set to False incorrectly. '
'The flag "capability:pred_int" should instead be set to True.'
)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_score(self, estimator_instance, n_columns, fh_int_oos):
"""Check score method."""
y = _make_series(n_columns=n_columns)
y_train, y_test = temporal_train_test_split(y)
estimator_instance.fit(y_train, fh=fh_int_oos)
y_pred = estimator_instance.predict()
fh_idx = check_fh(fh_int_oos).to_indexer() # get zero based index
expected = mean_absolute_percentage_error(
y_test.iloc[fh_idx], y_pred, symmetric=False
)
# compare expected score with actual score
actual = estimator_instance.score(y_test.iloc[fh_idx], fh=fh_int_oos)
assert actual == expected
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_update_predict_single(
self, estimator_instance, n_columns, fh_int_oos, update_params
):
"""Check correct time index of update-predict."""
y = _make_series(n_columns=n_columns)
y_train, y_test = temporal_train_test_split(y)
estimator_instance.fit(y_train, fh=fh_int_oos)
y_pred = estimator_instance.update_predict_single(
y_test, update_params=update_params
)
cutoff = get_cutoff(y_train, return_index=True)
_assert_correct_pred_time_index(y_pred.index, cutoff, fh_int_oos)
_assert_correct_columns(y_pred, y_train)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
@pytest.mark.parametrize("initial_window", TEST_WINDOW_LENGTHS_INT)
def test_update_predict_predicted_index(
self,
estimator_instance,
n_columns,
fh_int_oos,
step_length,
initial_window,
update_params,
):
"""Check predicted index in update_predict."""
y = _make_series(n_columns=n_columns, all_positive=True, index_type="datetime")
y_train, y_test = temporal_train_test_split(y)
cv = ExpandingWindowSplitter(
fh=fh_int_oos,
initial_window=initial_window,
step_length=step_length,
)
estimator_instance.fit(y_train, fh=fh_int_oos)
y_pred = estimator_instance.update_predict(
y_test, cv=cv, update_params=update_params
)
assert isinstance(y_pred, (pd.Series, pd.DataFrame))
expected = _get_expected_index_for_update_predict(
y_test, fh_int_oos, step_length, initial_window
)
actual = y_pred.index
np.testing.assert_array_equal(actual, expected)
def test__y_and_cutoff(self, estimator_instance, n_columns):
"""Check cutoff and _y."""
# check _y and cutoff is None after construction
f = estimator_instance
y = _make_series(n_columns=n_columns)
y_train, y_test = temporal_train_test_split(y, train_size=0.75)
# check that _y and cutoff are empty when estimator is constructed
assert f._y is None
assert f.cutoff is None
# check that _y and cutoff is updated during fit
f.fit(y_train, fh=FH0)
# assert isinstance(f._y, pd.Series)
# action:uncomments the line above
# why: fails for multivariates cause they are DataFrames
# solution: look for a general solution for Series and DataFrames
assert len(f._y) > 0
assert f.cutoff == y_train.index[-1]
# check data pointers
np.testing.assert_array_equal(f._y.index, y_train.index)
# check that _y and cutoff is updated during update
f.update(y_test, update_params=False)
np.testing.assert_array_equal(
f._y.index, np.append(y_train.index, y_test.index)
)
assert f.cutoff == y_test.index[-1]
def test__y_when_refitting(self, estimator_instance, n_columns):
"""Test that _y is updated when forecaster is refitted."""
y_train = _make_series(n_columns=n_columns)
estimator_instance.fit(y_train, fh=FH0)
estimator_instance.fit(y_train[3:], fh=FH0)
# using np.squeeze to make the test flexible to shape differeces like
# (50,) and (50, 1)
assert np.all(np.squeeze(estimator_instance._y) == np.squeeze(y_train[3:]))
def test_fh_attribute(self, estimator_instance, n_columns):
"""Check fh attribute and error handling if two different fh are passed."""
f = estimator_instance
y_train = _make_series(n_columns=n_columns)
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
f.predict()
np.testing.assert_array_equal(f.fh, FH0)
f.predict(FH0)
np.testing.assert_array_equal(f.fh, FH0)
# if fh is not required in fit, test this again with fh passed late
if not f.get_tag("requires-fh-in-fit"):
f.fit(y_train)
f.predict(FH0)
np.testing.assert_array_equal(f.fh, FH0)
def test_fh_not_passed_error_handling(self, estimator_instance, n_columns):
"""Check that not passing fh in fit/predict raises correct error."""
f = estimator_instance
y_train = _make_series(n_columns=n_columns)
if f.get_tag("requires-fh-in-fit"):
# if fh required in fit, should raise error if not passed in fit
with pytest.raises(ValueError):
f.fit(y_train)
else:
# if fh not required in fit, should raise error if not passed until predict
f.fit(y_train)
with pytest.raises(ValueError):
f.predict()
def test_different_fh_in_fit_and_predict_error_handling(
self, estimator_instance, n_columns
):
"""Check that fh different in fit and predict raises correct error."""
f = estimator_instance
# if fh is not required in fit, can be overwritten, should not raise error
if not f.get_tag("requires-fh-in-fit"):
return None
y_train = _make_series(n_columns=n_columns)
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
# changing fh during predict should raise error
with pytest.raises(ValueError):
f.predict(fh=FH0 + 1)
def test_hierarchical_with_exogeneous(self, estimator_instance, n_columns):
"""Check that hierarchical forecasting works, also see bug #3961.
Arguments
---------
estimator_instance : instance of BaseForecaster
n_columns : number of columns, of the endogeneous data y_train
Raises
------
Exception - if fit/predict does not complete without error
AssertionError - if forecast is not expected mtype pd_multiindex_hier,
and does not have expected row and column indices
"""
from sktime.datatypes import check_is_mtype
from sktime.datatypes._utilities import get_window
from sktime.utils._testing.hierarchical import _make_hierarchical
y_train = _make_hierarchical(
hierarchy_levels=(2, 4),
n_columns=n_columns,
min_timepoints=22,
max_timepoints=22,
index_type="period",
)
X = _make_hierarchical(
hierarchy_levels=(2, 4),
n_columns=2,
min_timepoints=24,
max_timepoints=24,
index_type="period",
)
X.columns = ["foo", "bar"]
X_train = get_window(X, lag=2)
X_test = get_window(X, window_length=2)
fh = [1, 2]
estimator_instance.fit(y=y_train, X=X_train, fh=fh)
y_pred = estimator_instance.predict(X=X_test)
assert isinstance(y_pred, pd.DataFrame)
assert check_is_mtype(y_pred, "pd_multiindex_hier")
msg = (
"returned columns after predict are not as expected. "
f"expected: {y_train.columns}. Found: {y_pred.columns}"
)
assert np.all(y_pred.columns == y_train.columns), msg
# check consistency of forecast hierarchy with training data
# some forecasters add __total levels, e.g., ReconcilerForecaster
# if = not such a forecaster; else = levels are added
if len(y_pred.index) == len(X_test.index):
# the indices should be equal iff no levels are added
assert np.all(y_pred.index == X_test.index)
else:
# if levels are added, all expected levels and times should be contained
assert set(X_test.index).issubset(y_pred.index)
| [
"[email protected]"
] | |
2d5ccf17197699d50e0b2fa57a4243eb7ca907aa | c609730a43596a2d3303f072fc97d9cf681fac7b | /cagey/carbuisness/main_currency_supply.py | ed84e5c37083ff51e2afabd4f2216adcf44c254f | [] | no_license | sinnettluo/ChenProject | 5403311c0c7b78c484145e16d692abff00d2a110 | 0e33ecf1683afb22f1deb4bd54294c41aed8a46b | refs/heads/master | 2023-03-22T23:48:08.430178 | 2020-09-02T15:05:02 | 2020-09-02T15:05:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from scrapy.cmdline import execute
import sys
import os
website = "currency_supply"
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute(["scrapy", "crawl", website])
| [
"[email protected]"
] | |
e59eaebb53a1dd0de0208e35718b32e92973811d | b7126fb70f72fea0e7bba6fe2fef6925302ef07b | /tceh5_opp/self_work/task1.py | 735da977c22bdb199e6944c42bfec6b0ac104bb8 | [] | no_license | duk1edev/tceh | 79cd909c5a6221a2ca77d342b917462345140faa | 21649d42488883beb58d709f4a9d1a05c75d2900 | refs/heads/master | 2021-07-12T10:20:22.330005 | 2020-04-29T09:24:08 | 2020-04-29T09:24:08 | 239,434,484 | 0 | 0 | null | 2021-03-20T03:38:26 | 2020-02-10T05:25:33 | Python | UTF-8 | Python | false | false | 1,781 | py | # 1. Создать класс корзина у кторого можно выставить разную вмесительность
# для разных обьектов. В обект можн опомещать разные
# 2. Создать класс - пакет в кторый тожно можн опомещать предмет у него тоже есть вместимость
# 3. Любой класс что бы можно было помещать в корзину и в пакет
# 4. Если вместимоть не достаточна сказать, что обьект поместить нельзя
class Trash:
def __init__(self, set_size):
self.size = set_size
def get_obj(self, obj):
if obj.size > self.size:
print('You could not put this stuff({} size) to that trash, \n'
'trash size is {}'.format(obj.size, self.size))
else:
print('You put the {} size {} to the trash'.format(obj, obj.size))
class Packet(Trash):
def __init__(self, set_size):
self.size = set_size
def get_obj(self, obj):
if obj.size > self.size:
print('You could not put this stuff({} size) to that packet, \n'
'packet size is {}'.format(obj.size, self.size))
else:
print('You put the {} size {} to the packet'.format(obj, obj.size))
class SomeStuff:
def __init__(self, set_size):
self.size = set_size
small_trash = Trash(5)
middle_trash = Trash(10)
big_trash = Trash(50)
small_packet = Packet(3)
middle_packet = Packet(5)
big_packet = Packet(10)
apple = SomeStuff(25)
print(apple.size)
garbage = SomeStuff(50)
small_trash.get_obj(apple)
big_trash.get_obj(garbage)
big_packet.get_obj(garbage) | [
"[email protected]"
] | |
35614a4b8e4a335c54fd174d3cf65ff29c823483 | db9ff8accaa4d8d4a96d3f9122c0fdc5e83ea2a5 | /test/test_price_quantity.py | 12635c2d23b1dcacf3ca517e059fcaba37c32bd5 | [] | no_license | agtt/ebay-openapi-inventory | 4754cdc8b6765acdb34f6b8f89b017ccbc6b1d2b | d990c26f16e811431892ac6401c73c4599c2d414 | refs/heads/master | 2023-06-17T10:53:43.204075 | 2021-07-14T18:32:38 | 2021-07-14T18:32:38 | 386,039,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | """
Inventory API
The Inventory API is used to create and manage inventory, and then to publish and manage this inventory on an eBay marketplace. There are also methods in this API that will convert eligible, active eBay listings into the Inventory API model. # noqa: E501
The version of the OpenAPI document: 1.13.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.offer_price_quantity import OfferPriceQuantity
from openapi_client.model.ship_to_location_availability import ShipToLocationAvailability
globals()['OfferPriceQuantity'] = OfferPriceQuantity
globals()['ShipToLocationAvailability'] = ShipToLocationAvailability
from openapi_client.model.price_quantity import PriceQuantity
class TestPriceQuantity(unittest.TestCase):
"""PriceQuantity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPriceQuantity(self):
"""Test PriceQuantity"""
# FIXME: construct object with mandatory attributes with example values
# model = PriceQuantity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
bcb87b977ae9f3dda477d957cc6ee78f8f5cdf2e | fbf6fcd3720d1a5f1f01f91c7ecad68f1b296924 | /tools/test_modules.py | 85199d0138cfbbde70f10f93fa006cc06675053a | [
"MIT"
] | permissive | uvavision/DrillDown | 9602ddabd712d14df10e7026db3d7e62e7e4edba | ad0ef773b3af0859e48ea302f4f1d87215b26cef | refs/heads/master | 2022-04-28T21:42:06.366515 | 2022-04-15T12:14:25 | 2022-04-15T12:14:25 | 214,220,415 | 11 | 4 | null | null | null | null | UTF-8 | Python | false | false | 14,358 | py | #!/usr/bin/env python
import _init_paths
import os, sys, cv2, json
import math, PIL, cairo
import numpy as np
import pickle, random
import os.path as osp
from time import time
from config import get_config
from copy import deepcopy
from glob import glob
import matplotlib.pyplot as plt
from vocab import Vocabulary
from utils import *
#######################################################################
from modules.text_encoder import TextEncoder
from modules.region_encoder import RegionEncoder
from modules.image_encoder import ImageEncoder
from modules.context_encoder import ContextEncoder
#######################################################################
from modules.attention import Attention
from modules.tirg_rnn import TIRGRNN
from modules.grounding_loss import GroundingLoss
#######################################################################
from modules.image_model import ImageModel
from modules.region_model import RegionModel
from modules.paragraph_model import ParagraphModel
from modules.image_hred_model import ImageHREDModel
from modules.region_grounding_model import RegionGroundingModel
#######################################################################
import torch, torchtext
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from datasets.vg import vg
from datasets.loader import region_loader, region_collate_fn
from datasets.loader import caption_loader, caption_collate_fn
from datasets.loader import paragraph_loader, paragraph_collate_fn
def test_attention(config):
attention = Attention(config, config.attn_type, 1024, 1024)
h_s = torch.randn(7, 36, 1024)
h_t = torch.randn(7, 5, 1024)
m_s = torch.randn(7, 36).random_(0, 2)
context, scores = attention(h_t, h_s, m_s)
print(context.size(), scores.size())
def test_tirg_rnn(config):
net = TIRGRNN(config, config.n_feature_dim, config.n_feature_dim, config.n_rnn_layers, dropout=0.1)
input_var = np.random.randn(2, 3, config.n_feature_dim)
prev_hidden = np.random.randn(config.n_rnn_layers, 2, config.n_feature_dim)
input_var_th = torch.from_numpy(input_var).float()
prev_hidden_th = torch.from_numpy(prev_hidden).float()
last_layer_hiddens, last_step_hiddens = net(input_var_th, prev_hidden_th)
print('last_layer_hiddens.size()', last_layer_hiddens.size())
print('last_step_hiddens.size()', last_step_hiddens.size())
def test_region_encoder(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = RegionEncoder(config)
for cnt, batched in enumerate(loader):
region_feats = batched['region_feats'].float()
region_clses = batched['region_clses'].long()
print('region_feats', region_feats.size())
print('region_clses', region_clses.size())
img_feats, masked_feats, mm = net(region_feats, region_clses)
print('img_feats', img_feats.size())
if config.subspace_alignment_mode > 0:
print('masked_feats', masked_feats.size())
print('mm', mm.size())
break
def test_image_encoder(config):
db = vg(config, 'test')
loaddb = caption_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=caption_collate_fn)
net = ImageEncoder(config)
for cnt, batched in enumerate(loader):
images = batched['images'].float()
print('images', images.size())
feats = net(images)
print('features', feats.size())
break
def test_text_encoder(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = TextEncoder(config)
for cnt, batched in enumerate(loader):
sent_inds = batched['sent_inds'].long()
sent_msks = batched['sent_msks'].float()
bsize, slen, fsize = sent_inds.size()
print('sent_inds', sent_inds.size())
print('sent_msks', sent_msks.size())
f1, f2, h = net(sent_inds.view(bsize*slen, fsize), sent_msks.view(bsize*slen, fsize))
print(f1.size(), f2.size(), h.size())
break
def test_image_model(config):
db = vg(config, 'test')
loaddb = caption_loader(db)
loader = DataLoader(loaddb, batch_size=config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=caption_collate_fn)
net = ImageModel(config)
for cnt, batched in enumerate(loader):
images = batched['images'].float()
sent_inds = batched['sent_inds'].long()
sent_msks = batched['sent_msks'].long()
img_feats, txt_feats = net(sent_inds, sent_msks, None, images)
print('images', images.size())
print('img_feats', img_feats.size())
print('txt_feats', txt_feats.size())
break
def test_grounding_loss(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = RegionModel(config)
criterion = GroundingLoss(config)
for cnt, batched in enumerate(loader):
scene_inds = batched['scene_inds'].long()[:config.batch_size]
sent_inds = batched['sent_inds'].long()[:config.batch_size]
sent_msks = batched['sent_msks'].long()[:config.batch_size]
region_feats = batched['region_feats'].float()[:config.batch_size]
region_clses = batched['region_clses'].long()[:config.batch_size]
region_masks = batched['region_masks'].float()[:config.batch_size]
src_region_feats = batched['region_feats'].float()[config.batch_size:2*config.batch_size]
src_region_clses = batched['region_clses'].long()[config.batch_size:2*config.batch_size]
src_region_masks = batched['region_masks'].float()[config.batch_size:2*config.batch_size]
img_feats, masked_feats, txt_feats, subspace_masks, sample_logits, sample_indices = \
net(scene_inds, sent_inds, sent_msks,
src_region_feats, src_region_clses, src_region_masks,
region_feats, region_clses, region_masks,
config.explore_mode)
masked_feats = img_feats
sim1 = criterion.compute_batch_mutual_similarity(masked_feats, region_masks, txt_feats)
sim2 = criterion.debug_compute_batch_mutual_similarity(masked_feats, region_masks, txt_feats)
print('sim1', sim1.size())
print('sim2', sim2.size())
print('diff', torch.sum(torch.abs(sim1-sim2)))
txt_masks = txt_feats.new_ones(txt_feats.size(0), txt_feats.size(1))
losses = criterion.forward_loss(masked_feats, region_masks, txt_feats, txt_masks, config.loss_reduction_mode)
print('losses', losses.size())
break
def test_paragraph_model(config):
db = vg(config, 'test')
loaddb = paragraph_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=paragraph_collate_fn)
net = ParagraphModel(config)
net.train()
for name, param in net.named_parameters():
print(name, param.size())
for cnt, batched in enumerate(loader):
start = time()
scene_inds = batched['scene_inds'].long()[:config.batch_size]
sent_inds = batched['sent_inds'].long()[:config.batch_size]
sent_msks = batched['sent_msks'].long()[:config.batch_size]
region_feats = batched['region_feats'].float()[:config.batch_size]
region_clses = batched['region_clses'].long()[:config.batch_size]
region_masks = batched['region_masks'].float()[:config.batch_size]
img_feats, txt_feats = net(sent_inds, sent_msks, region_feats, region_clses, region_masks)
losses = net.loss(img_feats, region_masks, txt_feats.unsqueeze(1))
print('losses', losses.size(), torch.mean(losses))
metrics, cache_results = net.evaluate(img_feats, region_masks, txt_feats.unsqueeze(1))
print('metrics', metrics)
print('sent_inds', sent_inds.size())
print('sent_msks', sent_msks.size())
print('region_feats', region_feats.size())
print('region_clses', region_clses.size())
print('region_masks', region_masks.size())
print('img_feats', img_feats.size())
print('txt_feats', txt_feats.size())
print('time:', time() - start)
break
def test_region_model(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = RegionModel(config)
net.train()
for name, param in net.named_parameters():
print(name, param.size())
for cnt, batched in enumerate(loader):
start = time()
scene_inds = batched['scene_inds'].long()[:config.batch_size]
sent_inds = batched['sent_inds'].long()[:config.batch_size]
sent_msks = batched['sent_msks'].long()[:config.batch_size]
region_feats = batched['region_feats'].float()[:config.batch_size]
region_clses = batched['region_clses'].long()[:config.batch_size]
region_masks = batched['region_masks'].float()[:config.batch_size]
src_region_feats = batched['region_feats'].float()[config.batch_size:2*config.batch_size]
src_region_clses = batched['region_clses'].long()[config.batch_size:2*config.batch_size]
src_region_masks = batched['region_masks'].float()[config.batch_size:2*config.batch_size]
img_feats, masked_feats, txt_feats, subspace_masks, sample_logits, sample_indices = \
net(scene_inds, sent_inds, sent_msks,
src_region_feats, src_region_clses, src_region_masks,
region_feats, region_clses, region_masks,
config.explore_mode)
print('img_feats', img_feats.size())
print('txt_feats', txt_feats.size())
if config.subspace_alignment_mode > 0:
print('masked_feats', masked_feats.size())
print('subspace_masks', subspace_masks.size())
if config.instance_dim > 1:
print('sample_logits', sample_logits.size())
print('sample_indices', sample_indices.size())
print('time:', time() - start)
break
def test_image_hred_model(config):
db = vg(config, 'train')
loaddb = caption_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=caption_collate_fn)
net = ImageHREDModel(config)
net.train()
for name, param in net.named_parameters():
print(name, param.size())
for cnt, batched in enumerate(loader):
images = batched['images'].float()
sent_inds = batched['sent_inds'].long()
sent_msks = batched['sent_msks'].long()
img_feats, txt_feats = net(sent_inds, sent_msks, None, images)
print('images', images.size())
print('img_feats', img_feats.size())
print('txt_feats', txt_feats.size())
loss = net.forward_loss(img_feats, txt_feats)
print(loss)
metrics, caches = net.evaluate(img_feats, txt_feats)
print(metrics)
break
def test_region_grounding_model(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = RegionGroundingModel(config)
if config.pretrained is not None:
pretrained_path = osp.join(config.data_dir, 'caches/region_grounding_ckpts', config.pretrained+'.pkl')
states = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
net.load_state_dict(states['state_dict'], strict=False)
net.train()
for name, param in net.named_parameters():
print(name, param.size())
for cnt, batched in enumerate(loader):
scene_inds = batched['scene_inds'].long()
sent_inds = batched['sent_inds'].long()
sent_msks = batched['sent_msks'].long()
region_feats = batched['region_feats'].float()
region_clses = batched['region_clses'].long()
region_masks = batched['region_masks'].float()
img_feats, masked_feats, txt_feats, subspace_masks, sample_logits, sample_indices = \
net(scene_inds, sent_inds, sent_msks, None, None, None, region_feats, region_clses, region_masks, config.explore_mode)
if config.instance_dim > 1:
print(sample_indices[0])
# print('sample_logits', sample_logits.size())
# print('sample_indices', sample_indices.size())
txt_masks = txt_feats.new_ones(txt_feats.size(0), txt_feats.size(1))
losses = net.final_loss(img_feats, masked_feats, region_masks, txt_feats, txt_masks, sample_logits, sample_indices)
print('losses', losses.size(), torch.mean(losses))
if config.subspace_alignment_mode > 0:
metrics, cache_results = net.evaluate(masked_feats, region_masks, txt_feats)
else:
metrics, cache_results = net.evaluate(img_feats, region_masks, txt_feats)
print('metrics', metrics)
print('txt_feats', txt_feats.size())
print('img_feats', img_feats.size())
break
if __name__ == '__main__':
config, unparsed = get_config()
np.random.seed(config.seed)
random.seed(config.seed)
torch.manual_seed(config.seed)
if(config.cuda):
torch.cuda.manual_seed_all(config.seed)
prepare_directories(config)
# test_attention(config)
# test_softmax_rnn(config)
# test_image_model(config)
# test_region_model(config)
# test_region_grounding_model(config)
test_paragraph_model(config)
# test_image_hred_model(config)
# test_region_encoder(config)
# test_image_encoder(config)
# test_text_encoder(config)
# test_tirg_rnn(config)
# test_grounding_loss(config) | [
"[email protected]"
] | |
49ad24efef53d23c86760ee96c78f87e3dbe2cf5 | 7200d065030f2daf00a5249e9e4fe569438c78c7 | /scrapers/dizilab_scraper.py | 76713de8e84af6b17220f3eaed0295e7b7a714f8 | [] | no_license | matt2005/salts | c765b037be1a2bb0e486ae9b30eceaf2b7c3bf14 | 5f71bc71e7b0b480f40d948d5568604dd181b6ad | refs/heads/master | 2020-12-31T04:16:45.574380 | 2015-12-07T22:57:31 | 2015-12-07T22:57:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,957 | py | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import re
import urlparse
import urllib
from salts_lib import kodi
from salts_lib import dom_parser
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
BASE_URL = 'http://dizilab.com'
class Dizilab_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'Dizilab'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s ' % (item['quality'], item['host'])
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
for match in re.finditer('{\s*file\s*:\s*"([^"]+)', html):
stream_url = match.group(1)
if 'dizlab' in stream_url.lower():
continue
hoster = {'multi-part': False, 'host': self._get_direct_hostname(stream_url), 'class': self, 'quality': self._gv_get_quality(stream_url), 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
def get_url(self, video):
return super(Dizilab_Scraper, self)._default_get_url(video)
def _get_episode_url(self, show_url, video):
episode_pattern = 'class="episode"\s+href="([^"]+/sezon-%s/bolum-%s)"' % (video.season, video.episode)
title_pattern = 'class="episode-name"\s+href="(?P<url>[^"]+)">(?P<title>[^<]+)'
return super(Dizilab_Scraper, self)._default_get_episode_url(show_url, video, episode_pattern, title_pattern)
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/arsiv?limit=&tur=&orderby=&ulke=&order=&yil=&dizi_adi=')
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, cache_limit=8)
results = []
for item in dom_parser.parse_dom(html, 'div', {'class': 'tv-series-single'}):
try:
url = re.search('href="([^"]+)', item).group(1)
except:
url = ''
try:
match_year = re.search('<span>\s*(\d{4})\s*</span>', item).group(1)
except:
match_year = ''
try:
match_title = dom_parser.parse_dom(item, 'a', {'class': 'title'})
match_title = re.search('([^>]+)$', match_title[0]).group(1)
match_title = match_title.strip()
except:
match_title = ''
if url and match_title and (not year or not match_year or year == match_year):
result = {'url': self._pathify_url(url), 'title': match_title, 'year': ''}
results.append(result)
return results
| [
"[email protected]"
] | |
5804b448d279b66e3077be6b2016ef4e6230d463 | 46279163a543cd8820bdc38133404d79e787c5d2 | /benchmarks/tensorexpr/reduction.py | bc3e4e158a1750a0c9732c91297461f01ff5126b | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | erwincoumans/pytorch | 31738b65e7b998bfdc28d0e8afa7dadeeda81a08 | ae9f39eb580c4d92157236d64548b055f71cf14b | refs/heads/master | 2023-01-23T10:27:33.628897 | 2020-12-06T01:22:00 | 2020-12-06T01:23:40 | 318,930,000 | 5 | 1 | NOASSERTION | 2020-12-06T01:58:57 | 2020-12-06T01:58:56 | null | UTF-8 | Python | false | false | 5,706 | py | from . import benchmark
class ReduceBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, case, M, N, K):
super().__init__(mode, device, dtype)
self.case = case
self.M = M
self.N = N
self.K = K
self.inputs = [self.randn(
[M, N, K], device=device, dtype=dtype, requires_grad=self.requires_grad
)]
if case == "row":
self.dims = [1, 2]
elif case == "mid":
self.dims = [0, 2]
elif case == "col":
self.dims = [0, 1]
else:
raise ValueError("invalid case: %s" % case)
def forward(self, inputs):
x = self.add(inputs, 0.001)
y = self.sum(x, self.dims)
return y
def config(self):
return [self.M, self.N, self.K]
@staticmethod
def default_configs():
return [
# [512, 512, 512],
[512, 64, 512],
]
@staticmethod
def module():
return "reduce"
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1
algorithmic_count = 1
else:
sol_count = (1) + (1)
algorithmic_count = 1 + 1
buffer_size = self.M * self.N * self.K
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
class ReduceRowBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K):
super(ReduceRowBench, self).__init__(mode, device, dtype, "row", M, N, K)
@staticmethod
def module():
return "reduce_row"
class ReduceMidBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K):
super(ReduceMidBench, self).__init__(mode, device, dtype, "mid", M, N, K)
@staticmethod
def module():
return "reduce_mid"
class ReduceColBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K):
super(ReduceColBench, self).__init__(mode, device, dtype, "col", M, N, K)
@staticmethod
def module():
return "reduce_col"
class Reduce2DBench(benchmark.Benchmark):
'''
A benchmark class to validate 2 dimensional reduction performance.
Only a simple add is fused to induce the fuser and isolate reduction perf.
'''
def __init__(self, mode, device, dtype, red_dim, dim0, dim1):
super().__init__(mode, device, dtype)
self.red_dim = red_dim
self.dim0 = dim0
self.dim1 = dim1
self.inputs = [self.randn(
[dim0, dim1], device=device, dtype=dtype, requires_grad=self.requires_grad
)]
if red_dim != 0 and red_dim != 1 :
raise ValueError("invalid reduction dimension: {}".format(red_dim))
def forward(self, inputs):
x = self.add(inputs, 0.001)
y = self.sum(x, [self.red_dim])
return y
def config(self):
return [self.red_dim, self.dim0, self.dim1]
@staticmethod
def default_configs():
return [
[1, 640, 524288],
]
@staticmethod
def module():
return "reduce2d"
@staticmethod
def input_iterable() :
return True
def memory_workload(self):
assert self.mode == "fwd", "Only the forward operation is modeled!"
buffer_size = self.dim0 * self.dim1
if self.red_dim == 0 :
buffer_size += self.dim1
else :
buffer_size += self.dim0
return {
"sol": buffer_size,
"algorithmic": buffer_size,
}
class Reduce2DInnerBench(Reduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super(Reduce2DInnerBench, self).__init__(mode, device, dtype, 1, dim0, dim1)
@staticmethod
def module():
return "reduce2d_inner"
class Reduce2DOuterBench(Reduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super(Reduce2DOuterBench, self).__init__(mode, device, dtype, 0, dim0, dim1)
@staticmethod
def module():
return "reduce2d_outer"
benchmark.register_benchmark_class(ReduceRowBench)
benchmark.register_benchmark_class(ReduceMidBench)
benchmark.register_benchmark_class(ReduceColBench)
benchmark.register_benchmark_class(Reduce2DInnerBench)
benchmark.register_benchmark_class(Reduce2DOuterBench)
class DynamicReduce2DBench(benchmark.DynamicShape, Reduce2DBench):
'''
A benchmark class to validate 2 dimensional reduction performance.
Only a simple add is fused to induce the fuser and isolate reduction perf.
'''
def __init__(self, mode, device, dtype, red_dim, dim0, dim1):
benchmark.DynamicShape.__init__(self)
Reduce2DBench.__init__(self, mode, device, dtype, red_dim, dim0, dim1)
def instantiate_input(self):
dim0, dim1 = self.rand_shape([self.dim0, self.dim1])
self.inputs = [self.randn(
[dim0, dim1], device=self.device, dtype=self.dtype, requires_grad=self.requires_grad
)]
@staticmethod
def module():
return "dynamicreduce2d"
class DynamicReduce2DInnerBench(DynamicReduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super().__init__(mode, device, dtype, 1, dim0, dim1)
@staticmethod
def module():
return "reduce2d_dynamic_inner"
class DynamicReduce2DOuterBench(DynamicReduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super().__init__(mode, device, dtype, 0, dim0, dim1)
@staticmethod
def module():
return "reduce2d_dynamic_outer"
benchmark.register_benchmark_class(DynamicReduce2DInnerBench)
benchmark.register_benchmark_class(DynamicReduce2DOuterBench)
| [
"[email protected]"
] | |
084d8ca89f293bf5398b5ab07d7076af43a5fb8d | 590a0c3a7254b8dac85ab18072dbf766aca7af93 | /Python-Exercise-100/python-exercise-example07.py | 01777ba168c7f8e9c5ee7615fd7642d9f407aaf6 | [
"MIT"
] | permissive | MiracleWong/PythonPractice | 90c66d29a9cdf0200d3dbac946d05f12dd856e91 | 40aecd84045ad18f6aff95d5b8be8e352ca0a726 | refs/heads/master | 2021-08-15T17:19:51.543013 | 2021-06-15T03:59:51 | 2021-06-15T03:59:51 | 98,256,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 地址:http://www.runoob.com/python/python-exercise-example7.html
a = [1, 2, 4, 5, 5, 6, 7, 7]
b = a[:]
print(b)
| [
"[email protected]"
] | |
69bef76ac68fc60f87f5f5e549027b0bcfae66f7 | 91a2ecfaf5dc6c917ec2fda31f56291103f68ceb | /tests/protos/test_ctc_loss.py | 6da44120062bdda6381ed74e2c0f8225fffc8ae4 | [
"BSD-3-Clause"
] | permissive | MyrtleSoftware/myrtlespeech | 635d1d16d1bd60fb07a4d30edbf9acb61786c13f | 8522048fd37744ffa06827a0cbd202b839a15453 | refs/heads/master | 2021-07-16T14:55:00.479967 | 2020-03-20T14:33:15 | 2020-03-20T14:33:15 | 192,501,300 | 12 | 1 | NOASSERTION | 2020-03-20T14:33:17 | 2019-06-18T08:44:33 | Python | UTF-8 | Python | false | false | 1,042 | py | from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import hypothesis.strategies as st
from myrtlespeech.protos import ctc_loss_pb2
from tests.protos.utils import all_fields_set
# Fixtures and Strategies -----------------------------------------------------
@st.composite
def ctc_losses(
draw, return_kwargs: bool = False, alphabet_len: Optional[int] = None
) -> Union[
st.SearchStrategy[ctc_loss_pb2.CTCLoss],
st.SearchStrategy[Tuple[ctc_loss_pb2.CTCLoss, Dict]],
]:
"""Returns a SearchStrategy for CTCLoss plus maybe the kwargs."""
kwargs = {}
end = 1000
if alphabet_len is not None:
end = max(0, alphabet_len - 1)
kwargs["blank_index"] = draw(st.integers(0, end))
kwargs["reduction"] = draw(
st.sampled_from(ctc_loss_pb2.CTCLoss.REDUCTION.values())
)
all_fields_set(ctc_loss_pb2.CTCLoss, kwargs)
ctc_loss = ctc_loss_pb2.CTCLoss(**kwargs)
if not return_kwargs:
return ctc_loss
return ctc_loss, kwargs
| [
"[email protected]"
] | |
6b51b24a86d97f35f69a59c8dbc0e913bf0876c9 | cdf9ba7b329d66a1b664d505332d4a441f6bf075 | /benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_pinned/cmp_mcf/power.py | ba961d5f8f3483e208416648d0c7e4f2c4795df5 | [
"MIT"
] | permissive | TugberkArkose/MLScheduler | 3247c0bbc11c09261a3bad777f3940a465e5f15a | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | refs/heads/master | 2021-03-27T19:11:44.207818 | 2020-03-19T11:32:08 | 2020-03-19T11:32:08 | 92,518,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,592 | py | power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.115405,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.19984,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.114614,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.429859,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.114073,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.08077,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00418352,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.030252,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0309397,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.030252,
'Execution Unit/Register Files/Runtime Dynamic': 0.0351232,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0731013,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.213101,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 1.28615,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000506958,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000506958,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000440908,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000170326,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000444452,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00189928,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00488396,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0297431,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.89192,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0581824,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.101021,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.20366,
'Instruction Fetch Unit/Runtime Dynamic': 0.19573,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0379509,
'L2/Runtime Dynamic': 0.00918222,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.39798,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.571277,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0375566,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0375566,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.57605,
'Load Store Unit/Runtime Dynamic': 0.79405,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0926082,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.185217,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0328669,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0334364,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.117632,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00953991,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.332951,
'Memory Management Unit/Runtime Dynamic': 0.0429763,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 16.7931,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.00590118,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0622644,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.0681656,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 2.39625,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0870089,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.140342,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.07084,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.298191,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0995127,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.01747,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00364955,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0263907,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0269906,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0263907,
'Execution Unit/Register Files/Runtime Dynamic': 0.0306402,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0555979,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.162075,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.09897,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000458365,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000458365,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000402941,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000158012,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000387723,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00170739,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00426236,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0259468,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.65044,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.050756,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0881269,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.94905,
'Instruction Fetch Unit/Runtime Dynamic': 0.170799,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0321542,
'L2/Runtime Dynamic': 0.007576,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.24982,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.497683,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0327632,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0327632,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.40453,
'Load Store Unit/Runtime Dynamic': 0.692023,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0807884,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.161577,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0286721,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0291546,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.102618,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00832216,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.307981,
'Memory Management Unit/Runtime Dynamic': 0.0374767,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.3007,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0039256,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0458316,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0497572,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.0566,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0869202,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.140199,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0707678,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.297887,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0994127,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.01728,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00364582,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0263642,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0269631,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0263642,
'Execution Unit/Register Files/Runtime Dynamic': 0.0306089,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.055542,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.16191,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.09847,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000457936,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000457936,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000402566,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000157866,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000387327,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00170576,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00425829,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0259203,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.64875,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0507027,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0880371,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.94729,
'Instruction Fetch Unit/Runtime Dynamic': 0.170624,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0321237,
'L2/Runtime Dynamic': 0.00756408,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.24879,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.497168,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0327299,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0327298,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.40335,
'Load Store Unit/Runtime Dynamic': 0.691309,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0807063,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.161412,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0286429,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0291248,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.102513,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00831343,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.307826,
'Memory Management Unit/Runtime Dynamic': 0.0374383,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.2973,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0039216,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0457848,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0497064,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.05511,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0868907,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.140151,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0707437,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.297786,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0993778,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.01721,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00364458,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.026355,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0269539,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.026355,
'Execution Unit/Register Files/Runtime Dynamic': 0.0305985,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0555225,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.161855,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.09831,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000457793,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000457793,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000402441,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000157818,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000387195,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00170522,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00425693,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0259115,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.64819,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0506849,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0880071,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.9467,
'Instruction Fetch Unit/Runtime Dynamic': 0.170566,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0321135,
'L2/Runtime Dynamic': 0.00756057,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.24844,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.496997,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0327187,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0327186,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.40295,
'Load Store Unit/Runtime Dynamic': 0.691073,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0806787,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.161357,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0286331,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.029115,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.102479,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00831051,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.307774,
'Memory Management Unit/Runtime Dynamic': 0.0374255,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.2962,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00392027,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0457692,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0496895,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.05462,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 5.739548837198542,
'Runtime Dynamic': 5.739548837198542,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.280118,
'Runtime Dynamic': 0.0738874,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 59.9674,
'Peak Power': 93.0796,
'Runtime Dynamic': 8.63648,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 59.6873,
'Total Cores/Runtime Dynamic': 8.56259,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.280118,
'Total L3s/Runtime Dynamic': 0.0738874,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | [
"[email protected]"
] | |
c03744b393ec5f98ff295969921ddf3de80aecaf | 9c52998e7d92640b82284e7e85bf69205fc94d73 | /SeleniumLearningFiles/SeleniumLearning01/webdrivertest/web04.py | ec6aa9036031cb6a57f01829bff64e05c5c91ab3 | [] | no_license | github653224/GitProjects_SeleniumLearing | b0c57d27fa48b0cd7475f8d8e8b19c57160e65fc | 818b573a3b0f18def98610e59e3c0c6500a675bc | refs/heads/master | 2021-07-20T05:54:46.392948 | 2017-10-27T12:53:41 | 2017-10-27T12:53:41 | 107,764,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import time
from random import randint
verify =randint(1000,9999)
print(u"生成的随机数字: %d " %verify)
number=input("请输入随机数字:")
print(number)
number=int(number)
if number ==verify:
print ("登录成功!!")
elif number==132741:
print("登陆成功!!")
else:
print("输入错误")
| [
"[email protected]"
] | |
31bda42177c67668b02106a2e58888a61630ed09 | 99e1a15d8f605be456f17608843c309dd8a3260f | /src/Battle/Attack/Steps/Test/suite.py | a11d3df523d7d71da56074941becf66d934c86c9 | [] | no_license | sgtnourry/Pokemon-Project | e53604096dcba939efca358e4177374bffcf0b38 | 3931eee5fd04e18bb1738a0b27a4c6979dc4db01 | refs/heads/master | 2021-01-17T23:02:25.910738 | 2014-04-12T17:46:27 | 2014-04-12T17:46:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | import unittest
from Battle.Attack.Steps.Test.remove_pp_step_test import suite as remove_pp_step_suite
from Battle.Attack.Steps.Test.handle_miss_effects_step_test import suite as handle_miss_effects_step_suite
from Battle.Attack.Steps.Test.handle_contact_step_test import suite as handle_contact_step_suite
from Battle.Attack.Steps.Test.effects_step_test import suite as effects_step_suite
from Battle.Attack.Steps.Test.damage_step_test import suite as damage_step_suite
from Battle.Attack.Steps.Test.announcement_step_test import suite as announcement_step_suite
from Battle.Attack.Steps.Test.hit_step_test import suite as hit_step_suite
from Battle.Attack.Steps.Test.precondition_step_test import suite as precondition_step_suite
suites = [precondition_step_suite,
hit_step_suite,
announcement_step_suite,
damage_step_suite,
effects_step_suite,
handle_contact_step_suite,
handle_miss_effects_step_suite,
remove_pp_step_suite]
suite = unittest.TestSuite(suites) | [
"[email protected]"
] | |
6843646e4bfc8dd6d189f4981122d415672c1403 | 8937c4d452c98699610923f76a395a2247f576df | /preprocess/crop.py | 5b05cb13ad998812b4d8e78a1b99878b47e16046 | [] | no_license | mistycheney/MouseBrainAtlas | 812b204af06ed303f3c12d5c81edef50c8d9d1ed | bffbaa1ede9297084e64fc197716e63d5cb54275 | refs/heads/master | 2020-04-11T13:44:09.632311 | 2018-11-20T22:32:15 | 2018-11-20T22:32:15 | 20,377,173 | 3 | 9 | null | 2017-03-15T19:39:27 | 2014-06-01T12:42:08 | Jupyter Notebook | UTF-8 | Python | false | false | 3,884 | py | #! /usr/bin/env python
import os
import argparse
import sys
import time
import numpy as np
from multiprocess import Pool
sys.path.append(os.path.join(os.environ['REPO_DIR'], 'utilities'))
from utilities2015 import *
from metadata import *
from data_manager import *
from learning_utilities import *
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='')
parser.add_argument("stack", type=str, help="Brain name")
parser.add_argument("versions", type=str, help="json encoded str list")
parser.add_argument("resolutions", type=str, help="json encoded str list")
parser.add_argument("prep_in", type=str, help="")
parser.add_argument("prep_out", type=str, help="")
parser.add_argument("input_crop_json", type=str, help="")
parser.add_argument("output_crop_json", type=str, help="")
parser.add_argument("n_jobs", type=int, help="", default=1)
args = parser.parse_args()
versions = json.loads(args.versions)
if isinstance(versions, str):
versions = [versions]
else:
assert isinstance(versions, list), "Argument versions must be str or str list."
resolutions = json.loads(args.resolutions)
if isinstance(resolutions, str):
resolutions = [resolutions]
else:
assert isinstance(resolutions, list), "Argument resolutions must be str or str list."
n_jobs = args.n_jobs
def crop(stack, img_name, version, resol, x,y,w,h):
input_fp = DataManager.get_image_filepath_v2(stack=stack, prep_id=5, resol=resol, version=version, fn=img_name)
output_fp = DataManager.get_image_filepath_v2(stack=stack, fn=img_name, prep_id=2, version=version, resol=resol)
img = imread(input_fp)
save_data(img[y:y+h, x:x+w], output_fp)
for version in versions:
for resol in resolutions:
if resol == 'raw':
x = x_tb * 32
y = y_tb * 32
w = w_tb * 32
h = h_tb * 32
elif resol == 'thumbnail':
x = x_tb
y = y_tb
w = w_tb
h = h_tb
else:
raise
# input_dir = DataManager.get_image_dir_v2(stack=stack, prep_id=5, version=version, resol='raw')
out_dir = DataManager.get_image_dir_v2(stack=stack, prep_id=2, resol=resol, version=version)
print 'out_dir:', out_dir
# script = os.path.join(REPO_DIR, 'preprocess', 'warp_crop_IM_v3.py')
# ! rm -rf {out_dir}
create_if_not_exists(out_dir)
t = time.time()
pool = Pool(8)
_ = pool.map(lambda img_name: crop(stack=stack, img_name=img_name, version=version, resol=resol,
x=x, y=y, w=w, h=h),
metadata_cache['valid_filenames'][stack])
pool.close()
pool.join()
# for img_name in metadata_cache['valid_filenames'][stack]:
# f(stack=stack, img_name=img_name, version=version, resol=resol,
# x=x, y=y, w=w, h=h)
# run_distributed('convert \"%%(input_fp)s\" -crop %(w)dx%(h)d+%(x)d+%(y)d \"%%(output_fp)s\"' % \
# {'w':w_raw, 'h':h_raw, 'x':x_raw, 'y':y_raw},
# kwargs_list=[{'input_fp': DataManager.get_image_filepath_v2(stack=stack, prep_id=5, resol='raw', version=version, fn=img_name),
# 'output_fp': DataManager.get_image_filepath_v2(stack=stack, fn=img_name, prep_id=2, version=version, resol='raw')}
# for img_name in metadata_cache['valid_filenames'][stack]],
# # for img_name in ['CHATM3_slide35_2018_02_17-S1']],
# argument_type='single',
# jobs_per_node=1,
# local_only=True)
# wait_qsub_complete()
print 'done in', time.time() - t, 'seconds' # 1500s | [
"[email protected]"
] | |
04dd25f2e360e6a0b81d6329398e7373d37c3db2 | ff801544b1979442b886d2d1eaf8480e7d6b0d24 | /main.py | 20bae383952351920f5e31df5cc21b3dcc2b56c3 | [] | no_license | BLimmie/OctoGAN | 7d420cd223ea0dd77dd0dfa1827f12fcd32e9dec | 38bb4d76eb8dea22278da2d496b712c171be080f | refs/heads/master | 2021-05-11T02:11:55.498819 | 2018-01-21T17:34:58 | 2018-01-21T17:34:58 | 118,352,908 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,747 | py | from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True, help='cifar10 | lsun | imagenet | folder | lfw | fake')
parser.add_argument('--dataroot', required=True, help='path to dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
parser.add_argument('--imageSize', type=int, default=128, help='the height / width of the input image to network')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--niter', type=int, default=150, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--manualSeed', type=int, help='manual seed')
opt = parser.parse_args()
print(opt)
try:
os.makedirs(opt.outf)
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
if opt.dataset in ['imagenet', 'folder', 'lfw']:
# folder dataset
dataset = dset.ImageFolder(root=opt.dataroot,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'lsun':
dataset = dset.LSUN(db_path=opt.dataroot, classes=['bedroom_train'],
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'cifar10':
dataset = dset.CIFAR10(root=opt.dataroot, download=True,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'fake':
dataset = dset.FakeData(image_size=(3, opt.imageSize, opt.imageSize),
transform=transforms.ToTensor())
assert dataset
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class _netG(nn.Module):
def __init__(self, ngpu):
super(_netG, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( nz, ngf * 16, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 16),
nn.ReLU(True),
#
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
netG = _netG(ngpu)
netG.apply(weights_init)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
class _netD(nn.Module):
def __init__(self, ngpu):
super(_netD, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
#
nn.Conv2d(ndf * 8, ndf * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 16),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 16, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1).squeeze(1)
netD = _netD(ngpu)
netD.apply(weights_init)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion = nn.BCELoss()
input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
label = torch.FloatTensor(opt.batchSize)
real_label = 1
fake_label = 0
if opt.cuda:
netD.cuda()
netG.cuda()
criterion.cuda()
input, label = input.cuda(), label.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
fixed_noise = Variable(fixed_noise)
# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
for epoch in range(opt.niter):
for i, data in enumerate(dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real
netD.zero_grad()
real_cpu, _ = data
batch_size = real_cpu.size(0)
if opt.cuda:
real_cpu = real_cpu.cuda()
input.resize_as_(real_cpu).copy_(real_cpu)
label.resize_(batch_size).fill_(real_label)
inputv = Variable(input)
labelv = Variable(label)
output = netD(inputv)
errD_real = criterion(output, labelv)
errD_real.backward()
D_x = output.data.mean()
# train with fake
noise.resize_(batch_size, nz, 1, 1).normal_(0, 1)
noisev = Variable(noise)
fake = netG(noisev)
labelv = Variable(label.fill_(fake_label))
output = netD(fake.detach())
errD_fake = criterion(output, labelv)
errD_fake.backward()
D_G_z1 = output.data.mean()
errD = errD_real + errD_fake
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
labelv = Variable(label.fill_(real_label)) # fake labels are real for generator cost
output = netD(fake)
errG = criterion(output, labelv)
errG.backward()
D_G_z2 = output.data.mean()
optimizerG.step()
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, opt.niter, i, len(dataloader),
errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
if i % 100 == 0:
vutils.save_image(real_cpu,
'%s/real_samples.png' % opt.outf,
normalize=True)
fake = netG(fixed_noise)
vutils.save_image(fake.data,
'%s/fake_samples_epoch_%03d.png' % (opt.outf, epoch),
normalize=True)
# do checkpointing
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch)) | [
"[email protected]"
] | |
41f2df2137a227386f0dece011dcf1d628037fd7 | ad544b38ec09828cda1b1918f407975bc79bf976 | /missioncontrol/mc/mc/views.py | 82f5e002d54b800f164e42ee9229c4612ff2bd76 | [] | no_license | mattvenn/earth-to-mars | 6de13606f3f8087da40e8ed0543a03e0093c25fb | c2b0064ef87c3d095d231587ee3ef48b00360bfd | refs/heads/master | 2021-01-10T07:29:17.557441 | 2016-03-17T16:34:42 | 2016-03-17T16:34:42 | 45,628,116 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,754 | py | from mc import app
from mc import db
from sqlalchemy.exc import IntegrityError
import datetime
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, jsonify, make_response, send_file
from contextlib import closing
from flask_admin.contrib.sqla import ModelView
import time
from wtforms import TextAreaField, TextField, IntegerField, FloatField, SelectField, PasswordField
from wtforms import validators
from flask_wtf import Form
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from mc.models import Teams, School, Sample, Answers, Questions, GroupGraph, Photo, Panorama
from graphing import submit_graph, update_group_graph, get_group_graph_name
from werkzeug import secure_filename
import os
class SecureView(ModelView):
def is_accessible(self):
if 'logged_in' in session.keys():
return True
def inaccessible_callback(self, name, **kwargs):
# redirect to login page if user doesn't have access
return redirect(url_for('login', next=request.url))
@app.teardown_appcontext
def shutdown_session(exception=None):
db.session.remove()
# tested
def get_teams():
return Teams.query.all()
class LoginForm(Form):
username = TextField('Username', [validators.Required()])
password = PasswordField('Password', [validators.Required()])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if self.username.data != app.config['USERNAME']:
self.username.errors.append('Unknown username')
return False
if self.password.data != app.config['PASSWORD']:
self.password.errors.append('bad password')
return False
return True
class AnswerForm(Form):
team = QuerySelectField(query_factory=get_teams, allow_blank=True, blank_text=u'Please choose')
answer = TextAreaField('Answer', [validators.Required()])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if not self.team.data:
self.team.errors.append('choose a team')
return False
self.answer = Answers(None, self.answer.data, self.team.data)
return True
class PhotoForm(Form):
team = QuerySelectField(query_factory=get_teams, allow_blank=True, blank_text=u'Please choose')
maxx = app.config['MAX_X']
maxy = app.config['MAX_Y']
x = IntegerField('X', [validators.NumberRange(min=0, max=maxx - 1)])
y = IntegerField('Y', [validators.NumberRange(min=0, max=maxy - 1)])
photo = FileField('Image', validators=[
FileRequired(message="you must choose a photo"),
FileAllowed(['jpg', 'png'], message='only images allowed')
])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if not self.team.data:
self.team.errors.append('choose a team')
return False
return True
class SampleForm(Form):
team = QuerySelectField(query_factory=get_teams, allow_blank=True, blank_text=u'Please choose')
types = app.config['SAMPLE_TYPES']
methane = FloatField('Methane', [validators.NumberRange(min=types['methane']['min'], max=types['methane']['max'])])
temperature = FloatField('Temperature', [validators.NumberRange(min=types['temperature']['min'], max=types['temperature']['max'])])
humidity = FloatField('Humidity', [validators.NumberRange(min=types['humidity']['min'], max=types['humidity']['max'])])
maxx = app.config['MAX_X']
maxy = app.config['MAX_Y']
x = IntegerField('X', [validators.NumberRange(min=0, max=maxx - 1)])
y = IntegerField('Y', [validators.NumberRange(min=0, max=maxy - 1)])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if not self.team.data:
self.team.errors.append('choose a team')
return False
if Sample.query.filter(Sample.x == self.x.data, Sample.y == self.y.data, Sample.team == self.team.data).first():
self.team.errors.append('your team already uploaded this sample')
return False
return True
# tested
def add_school_point(points=1):
school = School.query.order_by(School.timestamp.desc()).first()
if school is not None:
school.points += points
db.session.commit()
# tested
def get_group_id():
try:
group_id = GroupGraph.query.all()[-1].id
except IndexError:
group_id = 0
return group_id
# tested
@app.route('/')
def mission_control():
school = School.query.order_by(School.timestamp.desc()).first()
now = datetime.datetime.now()
end_hour = app.config['END_HOUR']
end_min = app.config['END_MIN']
end_time = datetime.datetime.now().replace(hour=end_hour,minute=end_min,second=0)
delta = end_time - now
mins = delta.total_seconds() / 60
hours = mins / 60
mins = mins % 60
secs = delta.total_seconds() % 60
time_info = { 'now': now.strftime('%H:%M'), 'left': '%02d:%02d' % (hours, mins) }
pan = Panorama.query.first()
pan_info = { 'name': pan.get_pan_name(), 'num': pan.get_num_photos() }
return render_template('mission_control.html', school_info=school, time_info=time_info, pan_info=pan_info, group_id=get_group_id())
# tested
@app.route('/show/samples')
def show_samples():
samples = Sample.query.all()
return render_template('show_samples.html', samples=samples)
# tested
@app.route('/show/graph/<type>')
def show_group_graph(type):
return render_template('show_group_graph.html', type=type, group_id=get_group_id())
# tested
@app.route('/upload/sample', methods=['GET', 'POST'])
def add_sample():
form = SampleForm()
if form.validate_on_submit():
sample = Sample()
form.populate_obj(sample)
db.session.add(sample)
db.session.commit()
add_school_point()
submit_graph(sample) # make a graph
#update_group_graph(form.sample)
flash('sample logged')
return render_template('sample_submitted.html', sample=sample)
return render_template('add_sample.html', form=form)
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
def make_csv(head, list):
import StringIO
import csv
si = StringIO.StringIO()
cw = csv.writer(si)
cw.writerow(head)
for i in list:
cw.writerow(i.get_csv())
return si
def make_csv_response(head, list, name):
si = make_csv(head, list)
response = make_response(si.getvalue())
response.headers["Content-Disposition"] = "attachment; filename=%s" % name
return response
@app.route('/api/questions')
def api_get_questions():
questions = Questions.query.all()
head = Questions.get_csv_head()
return make_csv_response(head, questions,'questions.csv')
@app.route('/api/answers')
def api_get_answers():
answers = Answers.query.all()
head = Answers.get_csv_head()
return make_csv_response(head, answers,'answers.csv')
# build an archive of all the cool data and zip it
@app.route('/api/zipped-data')
def zipped_data():
import zipfile
import io
import json
memory_file = io.BytesIO()
with zipfile.ZipFile(memory_file, 'w') as zf:
for name in app.config['SAMPLE_TYPES'].keys():
graph_name = get_group_graph_name(name, get_group_id())
zf.write(graph_name, name + '.png')
answers = Answers.query.all()
head = Answers.get_csv_head()
answers_csv = make_csv(head, answers)
zf.writestr('answers.csv', answers_csv.getvalue())
questions = Questions.query.all()
head = Questions.get_csv_head()
questions_csv = make_csv(head, questions)
zf.writestr('questions.csv', questions_csv.getvalue())
samples = Sample.query.all()
data = { 'samples' : [sample.serialise() for sample in samples]}
zf.writestr('samples.json', json.dumps(data))
memory_file.seek(0)
return send_file(memory_file, attachment_filename='missioncontrol.zip', as_attachment=True)
# tested
@app.route('/api/team/<name>')
def api_get_team_by_name(name):
name = name.lower()
teams = get_teams()
for team in teams:
if team.name.lower() == name:
return jsonify(team.serialise())
raise InvalidUsage("no team of that name found")
# tested
@app.route('/api/samples')
def api_get_all_samples():
samples = Sample.query.all()
data = { 'samples' : [sample.serialise() for sample in samples]}
return jsonify(data)
# tested
@app.route('/api/sample/<int:sample_id>')
def api_get_sample(sample_id):
sample = Sample.query.get(sample_id)
if not sample:
raise InvalidUsage("no sample of that id found")
return jsonify(sample.serialise())
# tested
@app.route('/api/sample', methods=['POST'])
def api_add_sample():
if not request.json:
raise InvalidUsage("json needed")
form = SampleForm(data = request.get_json())
form.csrf_enabled = False
if not form.validate():
raise InvalidUsage("invalid data", payload=form.errors)
sample = Sample()
form.populate_obj(sample)
db.session.add(sample)
db.session.commit()
#update_group_graph(form.sample)
add_school_point()
return jsonify(sample.serialise()), 201
# tested
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
session['logged_in'] = True
flash('You were logged in')
return redirect('/admin')
return render_template('login.html', form=form)
# tested
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect('/admin')
# tested
@app.route('/answers/<int:question_id>')
def answers(question_id):
question = Questions.query.get(question_id)
return render_template('answer.html', question=question)
# tested
@app.route('/questions/<int:question_id>', methods=['GET', 'POST'])
def questions(question_id):
form = AnswerForm()
question = Questions.query.get(question_id)
if form.validate_on_submit():
form.answer.question = question
db.session.add(form.answer)
db.session.commit()
add_school_point(10)
flash('answer logged')
return redirect(url_for('answers', question_id=question_id))
return render_template('question.html', question=question, form=form)
@app.route('/upload/photo', methods=['GET', 'POST'])
def add_photo():
form = PhotoForm()
if form.validate_on_submit():
filename = secure_filename(form.photo.data.filename)
form.photo.data.save(os.path.join(app.static_folder, 'photos', filename))
photo = Photo()
form.populate_obj(photo)
photo.image_path = filename
db.session.add(photo)
db.session.commit()
pan = Panorama.query.first()
pan.add_to_panorama(photo)
add_school_point()
return render_template('photo_submitted.html', photo=photo)
return render_template('add_photo.html', form=form)
| [
"[email protected]"
] | |
821a36d24596e0ac1a7bce97e1a3d9b9992c271f | 03043b715d2e177dd3ba93078463ce79c33173dc | /NI_DAQmx/models/NI_PXIe_6535.py | ffdfbaabce93ed1ea32f606174fc1da92d542ec7 | [] | no_license | labscript-suite-bitbucket-archive/cavitylab-labscript_devices--forked-from--labscript_suite-labscript_devices | 2efc068eb35ca70e1eecab9c7fec7991fd596c9c | e665d3ee0ce1cfd7fb7cd5c6cc4d783528bc4935 | refs/heads/master | 2020-12-27T02:35:41.710162 | 2019-12-06T20:57:48 | 2019-12-06T20:57:48 | 253,143,395 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,629 | py | #####################################################################
# #
# /NI_DAQmx/models/_subclass_template.py #
# #
# Copyright 2018, Christopher Billington #
# #
# This file is part of the module labscript_devices, in the #
# labscript suite (see http://labscriptsuite.org), and is #
# licensed under the Simplified BSD License. See the license.txt #
# file in the root of the project for the full license. #
# #
#####################################################################
#####################################################################
# WARNING #
# #
# This file is auto-generated, any modifications may be #
# overwritten. See README.txt in this folder for details #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
str = unicode
from labscript_devices.NI_DAQmx.labscript_devices import NI_DAQmx
CAPABILITIES = {
'AI_range': None,
'AI_start_delay': None,
'AO_range': None,
'max_AI_multi_chan_rate': None,
'max_AI_single_chan_rate': None,
'max_AO_sample_rate': None,
'max_DO_sample_rate': 10000000.0,
'min_semiperiod_measurement': None,
'num_AI': 0,
'num_AO': 0,
'num_CI': 0,
'ports': {
'port0': {'num_lines': 8, 'supports_buffered': True},
'port1': {'num_lines': 8, 'supports_buffered': True},
'port2': {'num_lines': 8, 'supports_buffered': True},
'port3': {'num_lines': 8, 'supports_buffered': True},
'port4': {'num_lines': 6, 'supports_buffered': False},
},
'supports_buffered_AO': False,
'supports_buffered_DO': True,
'supports_semiperiod_measurement': False,
}
class NI_PXIe_6535(NI_DAQmx):
description = 'NI-PXIe-6535'
def __init__(self, *args, **kwargs):
# Any provided kwargs take precedent over capabilities
combined_kwargs = CAPABILITIES.copy()
combined_kwargs.update(kwargs)
NI_DAQmx.__init__(self, *args, **combined_kwargs)
| [
"[email protected]"
] | |
37e0fb4dbe4d99d999a4a4ff25c33d7f504d8fc8 | ab574f7511fa15e5ea50a26f26e3e38f7e33505a | /win_2018/scipy/special/_ufuncs_cxx.py | 65fc513447b7d344b151f7ba228174ebe12f7257 | [] | no_license | zclongpop123/maya_python_packages | 49d6b340512a2580bc8c14ae6281ca3f57017acd | 4dd4a48c41749443ac16053d20aec04e9d2db202 | refs/heads/master | 2021-11-30T01:49:41.846727 | 2021-11-17T01:47:08 | 2021-11-17T01:47:08 | 49,186,909 | 16 | 9 | null | 2017-03-07T00:13:41 | 2016-01-07T06:48:35 | Python | UTF-8 | Python | false | false | 288 | py | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, '_ufuncs_cxx.pyd')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| [
"[email protected]"
] | |
387635873635283c5290831c6f2104f6d7e1fed8 | aeb2f0bb7b01f87a1b6c65b88b216bed47025fe5 | /experiment/ex_025_predict.py | db89c037080c832fffa5c1b6a6ffee69035c39e7 | [] | no_license | kurupical/riiid | 7e68239cd50243fbb734bf433d60ebd7469cb180 | 7bab580ce03d03873748a6afc91092c11871465f | refs/heads/master | 2023-03-30T04:15:54.109815 | 2021-04-04T01:20:33 | 2021-04-04T01:20:33 | 302,828,112 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,041 | py | from datetime import datetime as dt
from feature_engineering.feature_factory import \
FeatureFactoryManager, \
TargetEncoder, \
CountEncoder, \
MeanAggregator, \
TagsSeparator, \
UserLevelEncoder, \
NUniqueEncoder, \
ShiftDiffEncoder
import pandas as pd
import glob
import os
import tqdm
import lightgbm as lgb
import pickle
import riiideducation
import numpy as np
from logging import Logger, StreamHandler, Formatter
import shutil
import time
import warnings
warnings.filterwarnings("ignore")
model_dir = "../output/ex_025/20201022082802"
data_types_dict = {
'row_id': 'int64',
'timestamp': 'int64',
'user_id': 'int32',
'content_id': 'int16',
'content_type_id': 'int8',
'task_container_id': 'int16',
'user_answer': 'int8',
'answered_correctly': 'int8',
}
prior_columns = ["prior_group_responses", "prior_group_answers_correct"]
def get_logger():
formatter = Formatter("%(asctime)s|%(levelname)s| %(message)s")
logger = Logger(name="log")
handler = StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def run(debug,
model_dir,
kaggle=False):
if kaggle:
files_dir = "/kaggle/input/riiid-split10/*.pickle"
else:
files_dir = "../input/riiid-test-answer-prediction/split10_base/*.pickle"
logger = get_logger()
# environment
env = riiideducation.make_env()
df_question = pd.read_csv("../input/riiid-test-answer-prediction/questions.csv",
dtype={"bundle_id": "int32",
"question_id": "int32",
"correct_answer": "int8",
"part": "int8"})
df_lecture = pd.read_csv("../input/riiid-test-answer-prediction/lectures.csv",
dtype={"lecture_id": "int32",
"tag": "int16",
"part": "int8"})
# model loading
models = []
for model_path in glob.glob(f"{model_dir}/*model*.pickle"):
with open(model_path, "rb") as f:
models.append(pickle.load(f))
# data preprocessing
logger = get_logger()
feature_factory_dict = {}
feature_factory_dict["tags"] = {
"TagsSeparator": TagsSeparator()
}
for column in ["content_id", "user_id", "content_type_id", "prior_question_had_explanation",
"tags1", "tags2", "tags3", "tags4", "tags5", "tags6",
("user_id", "content_type_id"), ("user_id", "prior_question_had_explanation")]:
is_partial_fit = column == "content_id"
is_onebyone = "content_id" in column
if type(column) == str:
feature_factory_dict[column] = {
"CountEncoder": CountEncoder(column=column, onebyone=is_onebyone),
"TargetEncoder": TargetEncoder(column=column, is_partial_fit=is_partial_fit, onebyone=is_onebyone)
}
else:
feature_factory_dict[column] = {
"CountEncoder": CountEncoder(column=list(column), onebyone=is_onebyone),
"TargetEncoder": TargetEncoder(column=list(column), is_partial_fit=is_partial_fit, onebyone=is_onebyone)
}
for column in ["part", ("user_id", "tag"), ("user_id", "part"), ("content_type_id", "part")]:
if type(column) == str:
feature_factory_dict[column] = {
"CountEncoder": CountEncoder(column=column)
}
else:
feature_factory_dict[column] = {
"CountEncoder": CountEncoder(column=list(column))
}
feature_factory_dict["user_id"]["MeanAggregatorTimestamp"] = MeanAggregator(column="user_id",
agg_column="timestamp",
remove_now=False)
feature_factory_dict["user_id"]["MeanAggregatorPriorQuestionElapsedTime"] = MeanAggregator(column="user_id",
agg_column="prior_question_elapsed_time",
remove_now=True)
feature_factory_dict["user_id"]["ShiftDiffEncoder"] = ShiftDiffEncoder(groupby="user_id",
column="timestamp")
feature_factory_dict["content_id"]["MeanAggregatorPriorQuestionElapsedTime"] = MeanAggregator(column="content_id",
agg_column="prior_question_elapsed_time",
remove_now=True)
feature_factory_manager = FeatureFactoryManager(feature_factory_dict=feature_factory_dict,
logger=logger)
for model_id, fname in enumerate(glob.glob(files_dir)):
logger.info(f"loading... {fname}")
df = pd.read_pickle(fname)
df["answered_correctly"] = df["answered_correctly"].replace(-1, np.nan)
df["prior_question_had_explanation"] = df["prior_question_had_explanation"].fillna(-1).astype("int8")
if debug:
df = df.head(1000)
df = pd.concat([pd.merge(df[df["content_type_id"] == 0], df_question,
how="left", left_on="content_id", right_on="question_id"),
pd.merge(df[df["content_type_id"] == 1], df_lecture,
how="left", left_on="content_id", right_on="lecture_id")]).sort_values(["user_id", "timestamp"])
feature_factory_manager.fit(df, is_first_fit=True)
iter_test = env.iter_test()
df_test_prev = pd.DataFrame()
df_test_prev1 = pd.DataFrame()
answered_correctlies = []
user_answers = []
i = 0
t = time.time()
for (df_test, df_sample_prediction) in iter_test:
i += 1
logger.info(f"[time: {int(time.time() - t)}iteration {i}: data_length: {len(df_test)}")
# 前回のデータ更新
if len(df_test_prev) > 0: # 初回のみパスするためのif
answered_correctly = df_test.iloc[0]["prior_group_answers_correct"]
answered_correctly = [int(x) for x in answered_correctly.replace("[", "").replace("'", "").replace("]", "").replace(" ", "").split(",")]
user_answer = df_test.iloc[0]["prior_group_responses"]
user_answer = [int(x) for x in user_answer.replace("[", "").replace("'", "").replace("]", "").replace(" ", "").split(",")]
answered_correctlies.extend(answered_correctly)
user_answers.extend(user_answer)
df_test_prev1["answered_correctly"] = answered_correctly
df_test_prev1["user_answer"] = user_answer
df_test_prev1["answered_correctly"] = df_test_prev1["answered_correctly"].replace(-1, np.nan)
df_test_prev1["prior_question_had_explanation"] = \
df_test_prev1["prior_question_had_explanation"].fillna(-1).astype("int8")
feature_factory_manager.fit(df_test_prev1, partial_predict_mode=True, onebyone_mode=True)
df_test_prev1 = pd.DataFrame()
if debug:
update_record = 50
else:
update_record = 150
# update1
if len(df_test_prev) > update_record:
df_test_prev["answered_correctly"] = answered_correctlies
df_test_prev["user_answer"] = user_answers
# df_test_prev = df_test_prev.drop(prior_columns, axis=1)
df_test_prev["answered_correctly"] = df_test_prev["answered_correctly"].replace(-1, np.nan)
df_test_prev["prior_question_had_explanation"] = df_test_prev["prior_question_had_explanation"].fillna(-1).astype("int8")
feature_factory_manager.fit(df_test_prev, partial_predict_mode=True, onebyone_mode=False)
df_test_prev = pd.DataFrame()
answered_correctlies = []
user_answers = []
# 今回のデータ取得&計算
# logger.info(f"[time: {int(time.time() - t)}dataload")
logger.info(f"merge... ")
w_df1 = pd.merge(df_test[df_test["content_type_id"] == 0], df_question, how="left", left_on="content_id",
right_on="question_id")
w_df2 = pd.merge(df_test[df_test["content_type_id"] == 1], df_lecture, how="left", left_on="content_id",
right_on="lecture_id")
df_test = pd.concat([w_df1, w_df2]).sort_values(["user_id", "timestamp"])
df_test["tag"] = df_test["tag"].fillna(-1)
df_test["correct_answer"] = df_test["correct_answer"].fillna(-1)
df_test["bundle_id"] = df_test["bundle_id"].fillna(-1)
logger.info(f"transform... ")
df_test["prior_question_had_explanation"] = df_test["prior_question_had_explanation"].astype("float16").fillna(-1).astype("int8")
df = feature_factory_manager.partial_predict(df_test)
df.columns = [x.replace(" ", "_") for x in df.columns]
logger.info(f"other... ")
# predict
predicts = []
cols = models[0].feature_name()
for model in models:
predicts.append(model.predict(df[cols]))
df["answered_correctly"] = np.array(predicts).transpose().mean(axis=1)
df_sample_prediction = pd.merge(df_sample_prediction[["row_id"]],
df[["row_id", "answered_correctly"]],
how="inner")
env.predict(df_sample_prediction)
df_test_prev = df_test_prev.append(df[cols + ["user_id", "tags"]])
df_test_prev1 = df[cols + ["user_id", "tags"]]
if i < 5:
df_test_prev.to_csv(f"{i}.csv")
if __name__ == "__main__":
run(debug=True,
model_dir=model_dir) | [
"[email protected]"
] | |
139a60ffd6e82195e835f691c53c0f317ab5a8d9 | acf7457d3a799cb9bff12686d2d616688bcd4b5b | /packages/python/plotly/plotly/validators/heatmap/_yperiod.py | 6496c7ed1592b867d1b2a5946e177c084910c381 | [
"MIT"
] | permissive | plotly/plotly.py | f4f61639f08160f16195efc95b5901dc5a937346 | 975a704074f01c078e0fdfa32bdf17130bf89e69 | refs/heads/master | 2023-09-06T06:15:08.340035 | 2023-08-24T12:28:14 | 2023-08-24T12:28:14 | 14,579,099 | 14,751 | 2,989 | MIT | 2023-09-08T19:55:32 | 2013-11-21T05:53:08 | Python | UTF-8 | Python | false | false | 470 | py | import _plotly_utils.basevalidators
class YperiodValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="yperiod", parent_name="heatmap", **kwargs):
super(YperiodValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"ytype": "scaled"}),
**kwargs,
)
| [
"[email protected]"
] | |
d978aee1a03ddbd4eec8a61a6d7792586dbbeb14 | a25aa09af984d08084a395f9b6df427d3756f11a | /35.Search Insert Position.py | 39611cdd7879d9f73747e131d4d9446fec4691dc | [] | no_license | luyihsien/leetcodepy | 31971e851a4ae77942a5d9e3ff07faea6e504c66 | a54bd09f4b28f106196a6cd8a0f9c056bcd237e6 | refs/heads/master | 2020-05-19T13:21:57.854086 | 2019-10-16T14:23:00 | 2019-10-16T14:23:00 | 185,037,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | ''''
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
'''
class Solution:
def searchInsert(self, nums, target):
if len(nums)==0:
return 0
for i in range(len(nums)):
if nums[i]==target:
return i
for i in range(1,len(nums)):
if nums[i]>target and nums[i-1]<target:
return i
if max(nums)<target:
return len(nums)
if min(nums)>target:
return 0
'''
成功
显示详情
执行用时 : 52 ms, 在Search Insert Position的Python3提交中击败了90.74% 的用户
内存消耗 : 13.5 MB, 在Search Insert Position的Python3提交中击败了96.03% 的用户
'''
| [
"[email protected]"
] | |
713a24a7ccdd51e993b29e4b2f542ce44c4723f6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03448/s790400785.py | 17c0ac19efb39097ef60a9bdde7f5b5bfd5d9764 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | def resolve():
A = int(input())
B = int(input())
C = int(input())
X = int(input())
ans = []
for a in range(A + 1):
for b in range(B + 1):
c = (X - 500 * a - 100 * b) / 50
if c <= C and c >= 0:
ans.append((a, b, c))
print((len(set(ans))))
return
resolve() | [
"[email protected]"
] | |
80fc4b38b7dff6b4f630a8e31f713c5c9b512f3c | 53163d4129930426c2d7aa650cb1b638d1347d21 | /lxmert/lxmert/src/tasks/nlvr2_model.py | ef93474403461f18461d1da85fb8877b6f6b5364 | [
"MIT"
] | permissive | fdsig/Transformer-MM-Explainability | 5e4d9d0c927afd0316311259fc318b325d74628e | accc4dd3491d321948e826079ce85f61bb02e0a6 | refs/heads/main | 2023-09-03T01:21:27.188260 | 2021-11-17T23:56:49 | 2021-11-17T23:56:49 | 433,759,755 | 1 | 0 | MIT | 2021-12-01T09:20:31 | 2021-12-01T09:20:31 | null | UTF-8 | Python | false | false | 1,773 | py | # coding=utf-8
# Copyleft 2019 project LXRT.
import torch.nn as nn
from lxrt.modeling import GeLU, BertLayerNorm
from lxrt.entry import LXRTEncoder
from param import args
class NLVR2Model(nn.Module):
def __init__(self):
super().__init__()
self.lxrt_encoder = LXRTEncoder(
args,
max_seq_length=20
)
self.hid_dim = hid_dim = self.lxrt_encoder.dim
self.logit_fc = nn.Sequential(
nn.Linear(hid_dim * 2, hid_dim * 2),
GeLU(),
BertLayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, 2)
)
self.logit_fc.apply(self.lxrt_encoder.model.init_bert_weights)
def forward(self, feat, pos, sent):
"""
:param feat: b, 2, o, f
:param pos: b, 2, o, 4
:param sent: b, (string)
:param leng: b, (numpy, int)
:return:
"""
# Pairing images and sentences:
# The input of NLVR2 is two images and one sentence. In batch level, they are saved as
# [ [img0_0, img0_1], [img1_0, img1_1], ...] and [sent0, sent1, ...]
# Here, we flat them to
# feat/pos = [ img0_0, img0_1, img1_0, img1_1, ...]
# sent = [ sent0, sent0, sent1, sent1, ...]
sent = sum(zip(sent, sent), ())
batch_size, img_num, obj_num, feat_size = feat.size()
assert img_num == 2 and obj_num == 36 and feat_size == 2048
feat = feat.view(batch_size * 2, obj_num, feat_size)
pos = pos.view(batch_size * 2, obj_num, 4)
# Extract feature --> Concat
x = self.lxrt_encoder(sent, (feat, pos))
x = x.view(-1, self.hid_dim*2)
# Compute logit of answers
logit = self.logit_fc(x)
return logit
| [
"[email protected]"
] | |
b1b504761ef386bea3c5ec22159ec1973a0ac635 | d4c47276c8fbd15240aa228eda04ee8e338caf02 | /Python/Python Lesson/Second/Lesson9/Sample8.py | 447d9972d35e1c1f96525406233e419f925a3a61 | [] | no_license | developer579/Practice | a745384450172fb327913c130303ab76492096f1 | 54084468af83afcc44530e757800c8c3678147c1 | refs/heads/main | 2023-05-06T01:36:06.222554 | 2021-06-02T07:04:03 | 2021-06-02T07:04:03 | 324,312,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | import re
ptr = ["TXT","TXT..",".TXT","..TXT"]
str = ["TXT","TXTT","TXTTT","TTXT","TTTXT"]
for valueptr in ptr:
print("------")
pattern = re.compile(valueptr)
for valuestr in str:
res = pattern.search(valuestr)
if res is not None:
m = "o"
else:
m = "x"
mrs = "(パターン)" + valueptr + "(文字列)" + valuestr + "(マッチ)" + m
print(mrs) | [
"[email protected]"
] | |
bfc47b482deb0ccf1f3e645d49665369758987ff | 3a3e823f6b94b7eae8a363b0b51b036d2b0a1669 | /metvae/dataset/biom.py | aa3196a0a38243f360389493a4983f3f36972811 | [] | no_license | mortonjt/metvae | 8a28bbbd72ee79d66992bd31bd82af65b83ea819 | f2f241fdedd2f4c045a088727df1f155b9ce9a20 | refs/heads/main | 2022-12-31T16:24:26.014394 | 2020-10-20T23:38:50 | 2020-10-20T23:38:50 | 305,812,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,780 | py | import os
import re
import biom
import math
import logging
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from typing import List
logger = logging.getLogger(__name__)
class BiomDataset(Dataset):
"""Loads a `.biom` file.
Parameters
----------
filename : Path
Filepath to biom table
metadata_file : Path
Filepath to sample metadata
batch_category : str
Column name forr batch indices
"""
def __init__(
self,
table: biom.Table,
metadata: pd.DataFrame = None,
batch_category: str = None,
):
super(BiomDataset).__init__()
self.table = table
self.metadata = metadata
self.batch_category = batch_category
self.populate()
def populate(self):
logger.info("Preprocessing dataset")
if self.metadata is not None:
# match the metadata with the table
ids = set(self.table.ids()) & set(self.metadata.index)
filter_f = lambda v, i, m: i in ids
self.table = self.table.filter(filter_f, axis='sample')
self.metadata = self.metadata.loc[self.table.ids()]
if self.metadata.index.name is None:
raise ValueError('`Index` must have a name either'
'`sampleid`, `sample-id` or #SampleID')
self.index_name = self.metadata.index.name
self.metadata = self.metadata.reset_index()
self.batch_indices = None
if self.batch_category is not None and self.metadata is not None:
batch_cats = np.unique(self.metadata[self.batch_category].values)
batch_cats = pd.Series(
np.arange(len(batch_cats)), index=batch_cats)
self.batch_indices = np.array(
list(map(lambda x: batch_cats.loc[x],
self.metadata[self.batch_category].values)))
logger.info("Finished preprocessing dataset")
def __len__(self) -> int:
return len(self.table.ids())
def __getitem__(self, i):
""" Returns all of the samples for a given subject
Returns
-------
counts : np.array
OTU counts for specified samples.
batch_indices : np.array
Membership ids for batch samples. If not specified, return None.
"""
sample_idx = self.table.ids()[i]
if self.batch_indices is not None:
batch_indices = self.batch_indices[i]
else:
batch_indices = None
counts = self.table.data(id=sample_idx, axis='sample')
return counts, batch_indices
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
start = 0
end = self.__len__()
if worker_info is None: # single-process data loading
for i in range(end):
yield self.__getitem__(i)
else:
worker_id = worker_info.id
w = float(worker_info.num_workers)
t = (end - start)
w = float(worker_info.num_workers)
per_worker = int(math.ceil(t / w))
worker_id = worker_info.id
iter_start = start + worker_id * per_worker
iter_end = min(iter_start + per_worker, end)
for i in range(iter_start, iter_end):
yield self.__getitem__(i)
class BiomBatchDataset(BiomDataset):
"""Loads a `.biom` file.
Parameters
----------
filename : Path
Filepath to biom table
metadata_file : Path
Filepath to sample metadata
batch_differentials : str
Pre-trained batch differentials effects
batch_category : str
Column name in metadata for batch indices
Notes
-----
Important, periods cannot be handled in the labels
in the batch_category. Make sure that these are converted to
hyphens or underscores.
"""
def __init__(
self,
table: biom.Table,
metadata: pd.DataFrame,
batch_differentials : pd.DataFrame,
batch_category: str,
format_columns=True,
):
super(BiomBatchDataset).__init__()
self.table = table
self.metadata = metadata
self.batch_category = batch_category
self.batch_differentials = batch_differentials
self.format_columns = format_columns
self.populate()
def populate(self):
logger.info("Preprocessing dataset")
# Match the metadata with the table
ids = set(self.table.ids()) & set(self.metadata.index)
filter_f = lambda v, i, m: i in ids
self.table = self.table.filter(filter_f, axis='sample')
self.metadata = self.metadata.loc[self.table.ids()]
if self.metadata.index.name is None:
raise ValueError('`Index` must have a name either'
'`sampleid`, `sample-id` or #SampleID')
self.index_name = self.metadata.index.name
self.metadata = self.metadata.reset_index()
# Clean up the batch indexes
if self.format_columns:
if (self.metadata[self.batch_category].dtypes == np.float64 or
self.metadata[self.batch_category].dtypes == np.int64):
# format the batch category column
m = self.metadata[self.batch_category].astype(np.int64)
self.metadata[self.batch_category] = m.astype(np.str)
cols = self.batch_differentials.columns
def regex_f(x):
return re.findall(r"\[([A-Za-z0-9_]+).*\]", x)[0]
cols = list(map(regex_f, cols))
print('columns', cols)
self.batch_differentials.columns = cols
# Retrieve batch labels
batch_cats = np.unique(self.metadata[self.batch_category].values)
batch_cats = pd.Series(
np.arange(len(batch_cats)), index=batch_cats)
self.batch_indices = np.array(
list(map(lambda x: batch_cats.loc[x],
self.metadata[self.batch_category].values)))
# Clean up batch differentials
table_features = set(self.table.ids(axis='observation'))
batch_features = set(self.batch_differentials.index)
ids = table_features & batch_features
filter_f = lambda v, i, m: i in ids
self.table = self.table.filter(filter_f, axis='observation')
table_obs = self.table.ids(axis='observation')
self.batch_differentials = self.batch_differentials.loc[table_obs]
logger.info("Finished preprocessing dataset")
def __getitem__(self, i):
""" Returns all of the samples for a given subject.
Returns
-------
counts : np.array
OTU counts for specified samples.
batch_indices : np.array
Membership ids for batch samples.
"""
sample_idx = self.table.ids()[i]
batch_index = self.batch_indices[i]
counts = self.table.data(id=sample_idx, axis='sample')
batch_diffs = self.batch_differentials
assert batch_index < batch_diffs.shape[1], f'Batch diffs " {batch_diffs.shape[1]} > index : {batch_index}'
batch_diffs = np.array(batch_diffs.iloc[:, batch_index].values)
return counts, batch_diffs
def collate_single_f(batch):
counts_list = np.vstack([b[0] for b in batch])
counts = torch.from_numpy(counts_list).float()
return counts
def collate_batch_f(batch):
counts_list = np.vstack([b[0] for b in batch])
batch_diffs = np.vstack([b[1] for b in batch])
counts = torch.from_numpy(counts_list).float()
batch_diffs = torch.from_numpy(batch_diffs).float()
return counts, batch_diffs
| [
"[email protected]"
] | |
6e0ae3e9c859c2ff133011147002083abb1e1ecf | 6dfb7fe44b6c5bfb7feb5a101656e3d3402a621f | /simp_py_examples/course/S1800/t105.py | 14b64f55e86d1ce9d76af5b273b6ada48bd93378 | [
"MIT"
] | permissive | kcfkwok2003/Simp_py | 11d6813fac83ab6309eb8efc22fcd8edde5b19b8 | f75e66da01b45dc8688dda602f8b33d4258f0c31 | refs/heads/master | 2021-05-11T00:36:36.872754 | 2018-12-19T01:41:15 | 2018-12-19T01:41:15 | 118,306,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from simp_py import tft
lcd = tft.tft
lcd.clear()
import time
cnt=10
while cnt >=0:
lcd.text(10,10, 'count: %s ' % cnt)
cnt -=1
time.sleep(1)
| [
"[email protected]"
] | |
a5da3fc38c2b91b2122f0fd2cb7e5d2e1f764ad9 | 9dc3ae479c1b5c6941681917151fcb0379f9173d | /CanvasFeatureFlag.py | 7a8e37d3b28a61f52fb91ba58b6f1eb53cf1381a | [] | no_license | cthacker-udel/Python-Canvas-API-Wrapper | bf2400b42b644791f45bbda7ed42e2c03a8d97b2 | 0263c591a2b02197529559346558b9be02f592c3 | refs/heads/master | 2023-08-25T12:01:48.417204 | 2021-10-09T10:49:51 | 2021-10-09T10:49:51 | 388,362,237 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | from CanvasClient import CanvasClient
class CanvasFeatureFlags(CanvasClient):
def __init__(self):
self.course_id = None
self.account_id = None
self.user_id = None
self.feature_id = None
self.state = None
def generate_queries(self):
body = {}
if self.state is not None:
body['state'] = self.state
return body
def clear_queries(self):
self.course_id = None
self.account_id = None
self.user_id = None
self.feature_id = None
self.state = None | [
"[email protected]"
] | |
3d613b080afe7af474f8504d12bf40d8034710ab | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/binaryTree2_20200615152326.py | 64f23d35b04053fcbead026e6e8a6c7c2d94f816 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # Create a node and assign a value to the node
class Node:
def __init__(self,data):
# designate one node as root
self.data = data
# then the two others as child nodes
self.left = None
self.right = None
# A
def printTree(self):
print(self.data)
root = Node(10)
root.left = Node(2)
root.right = Node(3)
root.printTree() | [
"[email protected]"
] | |
8a8680338eb791a54e04854473d5d7158ca44726 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/pytype/pytype/tools/merge_pyi/test_data/var_annot.comment.py | 8d3907c0a79e522e7a66e1587e8a8ca132b76a38 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:fbf532cb3bc3376967d6a665559e5b50273ee6371ee9080fcc2f2d7e3592c2eb
size 156
| [
"[email protected]"
] | |
fa634099a27ded13c1952c58524029bb04dfce23 | 41986b7a1b95784f0a6256ae24d5942c70ced4d7 | /prod/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/container/v1alpha1/container_v1alpha1_messages.py | 49c00a4745dfa8067e647185d258367759f8dcfb | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | wakabayashi-seiya/terraform_gcp | ed829a5a21d5d19d6663804ee5d5f7f3d23b4ec4 | f757e56779f33c2fabd8a8eed9c51ff0b897a38f | refs/heads/master | 2021-07-07T21:51:35.993317 | 2020-03-11T05:42:57 | 2020-03-11T05:42:57 | 239,411,772 | 0 | 1 | null | 2021-04-30T21:05:04 | 2020-02-10T02:32:04 | Python | UTF-8 | Python | false | false | 175,511 | py | """Generated message classes for container version v1alpha1.
Builds and manages container-based applications, powered by the open source
Kubernetes technology.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'container'
class AcceleratorConfig(_messages.Message):
r"""AcceleratorConfig represents a Hardware Accelerator request.
Fields:
acceleratorCount: The number of the accelerator cards exposed to an
instance.
acceleratorType: The accelerator type resource name. List of supported
accelerators [here](/compute/docs/gpus)
"""
acceleratorCount = _messages.IntegerField(1)
acceleratorType = _messages.StringField(2)
class AddonsConfig(_messages.Message):
r"""Configuration for the addons that can be automatically spun up in the
cluster, enabling additional functionality.
Fields:
cloudBuildConfig: Configuration for the Cloud Build addon.
cloudRunConfig: Configuration for the Cloud Run addon. The `IstioConfig`
addon must be enabled in order to enable Cloud Run. This option can only
be enabled at cluster creation time.
configConnectorConfig: Configuration for the ConfigConnector add-on, a
Kubernetes extension to manage hosted GCP services through the
Kubernetes API
dnsCacheConfig: Configuration for NodeLocalDNS, a dns cache running on
cluster nodes
gcePersistentDiskCsiDriverConfig: Configuration for the GCP Compute
Persistent Disk CSI driver.
horizontalPodAutoscaling: Configuration for the horizontal pod autoscaling
feature, which increases or decreases the number of replica pods a
replication controller has based on the resource usage of the existing
pods.
httpLoadBalancing: Configuration for the HTTP (L7) load balancing
controller addon, which makes it easy to set up HTTP load balancers for
services in a cluster.
istioConfig: Configuration for Istio, an open platform to connect, manage,
and secure microservices.
kalmConfig: Configuration for the KALM addon, which manages the lifecycle
of k8s applications.
kubernetesDashboard: Configuration for the Kubernetes Dashboard. This
addon is deprecated, and will be disabled in 1.15. It is recommended to
use the Cloud Console to manage and monitor your Kubernetes clusters,
workloads and applications. For more information, see:
https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards
networkPolicyConfig: Configuration for NetworkPolicy. This only tracks
whether the addon is enabled or not on the Master, it does not track
whether network policy is enabled for the nodes.
"""
cloudBuildConfig = _messages.MessageField('CloudBuildConfig', 1)
cloudRunConfig = _messages.MessageField('CloudRunConfig', 2)
configConnectorConfig = _messages.MessageField('ConfigConnectorConfig', 3)
dnsCacheConfig = _messages.MessageField('DnsCacheConfig', 4)
gcePersistentDiskCsiDriverConfig = _messages.MessageField('GcePersistentDiskCsiDriverConfig', 5)
horizontalPodAutoscaling = _messages.MessageField('HorizontalPodAutoscaling', 6)
httpLoadBalancing = _messages.MessageField('HttpLoadBalancing', 7)
istioConfig = _messages.MessageField('IstioConfig', 8)
kalmConfig = _messages.MessageField('KalmConfig', 9)
kubernetesDashboard = _messages.MessageField('KubernetesDashboard', 10)
networkPolicyConfig = _messages.MessageField('NetworkPolicyConfig', 11)
class AuthenticatorGroupsConfig(_messages.Message):
r"""Configuration for returning group information from authenticators.
Fields:
enabled: Whether this cluster should return group membership lookups
during authentication using a group of security groups.
securityGroup: The name of the security group-of-groups to be used. Only
relevant if enabled = true.
"""
enabled = _messages.BooleanField(1)
securityGroup = _messages.StringField(2)
class AutoUpgradeOptions(_messages.Message):
r"""AutoUpgradeOptions defines the set of options for the user to control
how the Auto Upgrades will proceed.
Fields:
autoUpgradeStartTime: [Output only] This field is set when upgrades are
about to commence with the approximate start time for the upgrades, in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
description: [Output only] This field is set when upgrades are about to
commence with the description of the upgrade.
"""
autoUpgradeStartTime = _messages.StringField(1)
description = _messages.StringField(2)
class AutoprovisioningNodePoolDefaults(_messages.Message):
r"""AutoprovisioningNodePoolDefaults contains defaults for a node pool
created by NAP.
Fields:
management: Specifies the node management options for NAP created node-
pools.
minCpuPlatform: Minimum CPU platform to be used for NAP created node
pools. The instance may be scheduled on the specified or newer CPU
platform. Applicable values are the friendly names of CPU platforms,
such as <code>minCpuPlatform: "Intel Haswell"</code> or
<code>minCpuPlatform: "Intel Sandy Bridge"</code>. For more
information, read [how to specify min CPU
platform](https://cloud.google.com/compute/docs/instances/specify-min-
cpu-platform) To unset the min cpu platform field pass "automatic" as
field value.
oauthScopes: Scopes that are used by NAP when creating node pools. If
oauth_scopes are specified, service_account should be empty.
serviceAccount: The Google Cloud Platform Service Account to be used by
the node VMs. If service_account is specified, scopes should be empty.
upgradeSettings: Specifies the upgrade settings for NAP created node pools
"""
management = _messages.MessageField('NodeManagement', 1)
minCpuPlatform = _messages.StringField(2)
oauthScopes = _messages.StringField(3, repeated=True)
serviceAccount = _messages.StringField(4)
upgradeSettings = _messages.MessageField('UpgradeSettings', 5)
class AvailableVersion(_messages.Message):
r"""AvailableVersion is an additional Kubernetes versions offered to users
who subscribed to the release channel.
Fields:
reason: Reason for availability.
version: Kubernetes version.
"""
reason = _messages.StringField(1)
version = _messages.StringField(2)
class BigQueryDestination(_messages.Message):
r"""Parameters for using BigQuery as the destination of resource usage
export.
Fields:
datasetId: The ID of a BigQuery Dataset.
"""
datasetId = _messages.StringField(1)
class BinaryAuthorization(_messages.Message):
r"""Configuration for Binary Authorization.
Fields:
enabled: Enable Binary Authorization for this cluster. If enabled, all
container images will be validated by Google Binauthz.
"""
enabled = _messages.BooleanField(1)
class CancelOperationRequest(_messages.Message):
r"""CancelOperationRequest cancels a single operation.
Fields:
name: The name (project, location, operation id) of the operation to
cancel. Specified in the format 'projects/*/locations/*/operations/*'.
operationId: Deprecated. The server-assigned `name` of the operation. This
field has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the operation resides.
This field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1)
operationId = _messages.StringField(2)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class CidrBlock(_messages.Message):
r"""CidrBlock contains an optional name and one CIDR block.
Fields:
cidrBlock: cidr_block must be specified in CIDR notation.
displayName: display_name is an optional field for users to identify CIDR
blocks.
"""
cidrBlock = _messages.StringField(1)
displayName = _messages.StringField(2)
class ClientCertificateConfig(_messages.Message):
r"""Configuration for client certificates on the cluster.
Fields:
issueClientCertificate: Issue a client certificate.
"""
issueClientCertificate = _messages.BooleanField(1)
class CloudBuildConfig(_messages.Message):
r"""Configuration options for the Cloud Build addon.
Fields:
enabled: Whether the Cloud Build addon is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class CloudNatStatus(_messages.Message):
r"""CloudNatStatus contains the desired state of the cloud nat functionality
on this cluster.
Fields:
enabled: Enables Cloud Nat on this cluster. On an update if
update.desired_cloud_nat_status.enabled = true, The API will check if
any Routers in the cluster's network has Cloud NAT enabled on the pod
range. a. If so, then the cluster nodes will be updated to not perform
SNAT. b. If no NAT configuration exists, a new Router with Cloud NAT
on the secondary range will be created first, and then the nodes
will be updated to no longer do SNAT.
"""
enabled = _messages.BooleanField(1)
class CloudRunConfig(_messages.Message):
r"""Configuration options for the Cloud Run feature.
Fields:
disabled: Whether Cloud Run is enabled for this cluster.
enableAlphaFeatures: Enable alpha features of Cloud Run. These features
are only available to trusted testers.
"""
disabled = _messages.BooleanField(1)
enableAlphaFeatures = _messages.BooleanField(2)
class Cluster(_messages.Message):
r"""A Google Kubernetes Engine cluster.
Enums:
NodeSchedulingStrategyValueValuesEnum: Defines behaviour of k8s scheduler.
StatusValueValuesEnum: [Output only] The current status of this cluster.
Messages:
ResourceLabelsValue: The resource labels for the cluster to use to
annotate any related GCE resources.
Fields:
addonsConfig: Configurations for the various addons available to run in
the cluster.
authenticatorGroupsConfig: Configuration controlling RBAC group membership
information.
autoscaling: Cluster-level autoscaling configuration.
binaryAuthorization: Configuration for Binary Authorization.
clusterIpv4Cidr: The IP address range of the container pods in this
cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing) notation (e.g. `10.96.0.0/14`). Leave blank to have one
automatically chosen or specify a `/14` block in `10.0.0.0/8`.
clusterTelemetry: Telemetry integration for the cluster.
conditions: Which conditions caused the current cluster state.
costManagementConfig: Configuration for the fine-grained cost management
feature.
createTime: [Output only] The time the cluster was created, in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
currentMasterVersion: The current software version of the master endpoint.
currentNodeCount: [Output only] The number of nodes currently in the
cluster. Deprecated. Call Kubernetes API directly to retrieve node
information.
currentNodeVersion: [Output only] Deprecated, use [NodePool.version
](/kubernetes-
engine/docs/reference/rest/v1alpha1/projects.zones.clusters.nodePool)
instead. The current version of the node software components. If they
are currently at multiple versions because they're in the process of
being upgraded, this reflects the minimum version of all nodes.
databaseEncryption: Configuration of etcd encryption.
databaseEncryptionKeyId: Resource name of a CloudKMS key to be used for
the encryption of secrets in etcd. Ex. projects/kms-
project/locations/global/keyRings/ring-1/cryptoKeys/key-1 Deprecated,
use database_encryption instead.
defaultMaxPodsConstraint: The default constraint on the maximum number of
pods that can be run simultaneously on a node in the node pool of this
cluster. Only honored if cluster created with IP Alias support.
description: An optional description of this cluster.
enableKubernetesAlpha: Kubernetes alpha features are enabled on this
cluster. This includes alpha API groups (e.g. v1alpha1) and features
that may not be production ready in the kubernetes version of the master
and nodes. The cluster has no SLA for uptime and master/node upgrades
are disabled. Alpha enabled clusters are automatically deleted thirty
days after creation.
enableTpu: Enable the ability to use Cloud TPUs in this cluster.
endpoint: [Output only] The IP address of this cluster's master endpoint.
The endpoint can be accessed from the internet at
`https://username:password@endpoint/`. See the `masterAuth` property of
this resource for username and password information.
expireTime: [Output only] The time the cluster will be automatically
deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
initialClusterVersion: The initial Kubernetes version for this cluster.
Valid versions are those found in validMasterVersions returned by
getServerConfig. The version can be upgraded over time; such upgrades
are reflected in currentMasterVersion and currentNodeVersion. Users may
specify either explicit versions offered by Kubernetes Engine or version
aliases, which have the following behavior: - "latest": picks the
highest valid Kubernetes version - "1.X": picks the highest valid
patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid
gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit
Kubernetes version - "","-": picks the default Kubernetes version
initialNodeCount: The number of nodes to create in this cluster. You must
ensure that your Compute Engine <a href="/compute/docs/resource-
quotas">resource quota</a> is sufficient for this number of instances.
You must also have available firewall and routes quota. For requests,
this field should only be used in lieu of a "node_pool" object, since
this configuration (along with the "node_config") will be used to create
a "NodePool" object with an auto-generated name. Do not use this and a
node_pool at the same time. This field is deprecated, use
node_pool.initial_node_count instead.
instanceGroupUrls: Deprecated. Use node_pools.instance_group_urls.
ipAllocationPolicy: Configuration for cluster IP allocation.
labelFingerprint: The fingerprint of the set of labels for this cluster.
legacyAbac: Configuration for the legacy ABAC authorization mode.
location: [Output only] The name of the Google Compute Engine
[zone](/compute/docs/regions-zones/regions-zones#available) or
[region](/compute/docs/regions-zones/regions-zones#available) in which
the cluster resides.
locations: The list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the cluster's nodes
should be located.
loggingService: The logging service the cluster should use to write logs.
Currently available options: * `logging.googleapis.com` - the Google
Cloud Logging service. * `none` - no logs will be exported from the
cluster. * if left as an empty string,`logging.googleapis.com` will be
used.
maintenancePolicy: Configure the maintenance policy for this cluster.
masterAuth: The authentication information for accessing the master
endpoint. If unspecified, the defaults are used: For clusters before
v1.12, if master_auth is unspecified, `username` will be set to "admin",
a random password will be generated, and a client certificate will be
issued.
masterAuthorizedNetworksConfig: The configuration options for master
authorized networks feature.
masterIpv4CidrBlock: The IP prefix in CIDR notation to use for the hosted
master network. This prefix will be used for assigning private IP
addresses to the master or set of masters, as well as the ILB VIP. This
field is deprecated, use private_cluster_config.master_ipv4_cidr_block
instead.
monitoringService: The monitoring service the cluster should use to write
metrics. Currently available options: * `monitoring.googleapis.com` -
the Google Cloud Monitoring service. * `none` - no metrics will be
exported from the cluster. * if left as an empty string,
`monitoring.googleapis.com` will be used.
name: The name of this cluster. The name must be unique within this
project and location (e.g. zone or region), and can be up to 40
characters with the following restrictions: * Lowercase letters,
numbers, and hyphens only. * Must start with a letter. * Must end with a
number or a letter.
network: The name of the Google Compute Engine [network](/compute/docs
/networks-and-firewalls#networks) to which the cluster is connected. If
left unspecified, the `default` network will be used.
networkConfig: Configuration for cluster networking.
networkPolicy: Configuration options for the NetworkPolicy feature.
nodeConfig: Parameters used in creating the cluster's nodes. For requests,
this field should only be used in lieu of a "node_pool" object, since
this configuration (along with the "initial_node_count") will be used to
create a "NodePool" object with an auto-generated name. Do not use this
and a node_pool at the same time. For responses, this field will be
populated with the node configuration of the first node pool. (For
configuration of each node pool, see `node_pool.config`) If
unspecified, the defaults are used. This field is deprecated, use
node_pool.config instead.
nodeIpv4CidrSize: [Output only] The size of the address space on each node
for hosting containers. This is provisioned from within the
`container_ipv4_cidr` range. This field will only be set when cluster is
in route-based network mode.
nodePools: The node pools associated with this cluster. This field should
not be set if "node_config" or "initial_node_count" are specified.
nodeSchedulingStrategy: Defines behaviour of k8s scheduler.
podSecurityPolicyConfig: Configuration for the PodSecurityPolicy feature.
privateCluster: If this is a private cluster setup. Private clusters are
clusters that, by default have no external IP addresses on the nodes and
where nodes and the master communicate over private IP addresses. This
field is deprecated, use private_cluster_config.enable_private_nodes
instead.
privateClusterConfig: Configuration for private cluster.
releaseChannel: Release channel configuration.
resourceLabels: The resource labels for the cluster to use to annotate any
related GCE resources.
resourceUsageExportConfig: Configuration for exporting resource usages.
Resource usage export is disabled when this config unspecified.
resourceVersion: Server-defined resource version (etag).
securityProfile: User selected security profile
selfLink: [Output only] Server-defined URL for the resource.
servicesIpv4Cidr: [Output only] The IP address range of the Kubernetes
services in this cluster, in [CIDR](http://en.wikipedia.org/wiki
/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). Service
addresses are typically put in the last `/16` from the container CIDR.
shieldedNodes: Shielded Nodes configuration.
status: [Output only] The current status of this cluster.
statusMessage: [Output only] Additional information about the current
status of this cluster, if available. Deprecated, use the field
conditions instead.
subnetwork: The name of the Google Compute Engine
[subnetwork](/compute/docs/subnetworks) to which the cluster is
connected. On output this shows the subnetwork ID instead of the name.
tierSettings: Cluster tier settings.
tpuIpv4CidrBlock: [Output only] The IP address range of the Cloud TPUs in
this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing) notation (e.g. `1.2.3.4/29`).
verticalPodAutoscaling: Cluster-level Vertical Pod Autoscaling
configuration.
workloadIdentityConfig: Configuration for the use of k8s Service Accounts
in GCP IAM policies.
zone: [Output only] The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field is deprecated, use location instead.
"""
class NodeSchedulingStrategyValueValuesEnum(_messages.Enum):
r"""Defines behaviour of k8s scheduler.
Values:
STRATEGY_UNSPECIFIED: Use default scheduling strategy.
PRIORITIZE_LEAST_UTILIZED: Least utilized nodes will be prioritized by
k8s scheduler.
PRIORITIZE_MEDIUM_UTILIZED: Nodes with medium utilization will be
prioritized by k8s scheduler. This option improves interoperability of
scheduler with cluster autoscaler.
"""
STRATEGY_UNSPECIFIED = 0
PRIORITIZE_LEAST_UTILIZED = 1
PRIORITIZE_MEDIUM_UTILIZED = 2
class StatusValueValuesEnum(_messages.Enum):
r"""[Output only] The current status of this cluster.
Values:
STATUS_UNSPECIFIED: Not set.
PROVISIONING: The PROVISIONING state indicates the cluster is being
created.
RUNNING: The RUNNING state indicates the cluster has been created and is
fully usable.
RECONCILING: The RECONCILING state indicates that some work is actively
being done on the cluster, such as upgrading the master or node
software. Details can be found in the `statusMessage` field.
STOPPING: The STOPPING state indicates the cluster is being deleted.
ERROR: The ERROR state indicates the cluster may be unusable. Details
can be found in the `statusMessage` field.
DEGRADED: The DEGRADED state indicates the cluster requires user action
to restore full functionality. Details can be found in the
`statusMessage` field.
"""
STATUS_UNSPECIFIED = 0
PROVISIONING = 1
RUNNING = 2
RECONCILING = 3
STOPPING = 4
ERROR = 5
DEGRADED = 6
@encoding.MapUnrecognizedFields('additionalProperties')
class ResourceLabelsValue(_messages.Message):
r"""The resource labels for the cluster to use to annotate any related GCE
resources.
Messages:
AdditionalProperty: An additional property for a ResourceLabelsValue
object.
Fields:
additionalProperties: Additional properties of type ResourceLabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResourceLabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
addonsConfig = _messages.MessageField('AddonsConfig', 1)
authenticatorGroupsConfig = _messages.MessageField('AuthenticatorGroupsConfig', 2)
autoscaling = _messages.MessageField('ClusterAutoscaling', 3)
binaryAuthorization = _messages.MessageField('BinaryAuthorization', 4)
clusterIpv4Cidr = _messages.StringField(5)
clusterTelemetry = _messages.MessageField('ClusterTelemetry', 6)
conditions = _messages.MessageField('StatusCondition', 7, repeated=True)
costManagementConfig = _messages.MessageField('CostManagementConfig', 8)
createTime = _messages.StringField(9)
currentMasterVersion = _messages.StringField(10)
currentNodeCount = _messages.IntegerField(11, variant=_messages.Variant.INT32)
currentNodeVersion = _messages.StringField(12)
databaseEncryption = _messages.MessageField('DatabaseEncryption', 13)
databaseEncryptionKeyId = _messages.StringField(14)
defaultMaxPodsConstraint = _messages.MessageField('MaxPodsConstraint', 15)
description = _messages.StringField(16)
enableKubernetesAlpha = _messages.BooleanField(17)
enableTpu = _messages.BooleanField(18)
endpoint = _messages.StringField(19)
expireTime = _messages.StringField(20)
initialClusterVersion = _messages.StringField(21)
initialNodeCount = _messages.IntegerField(22, variant=_messages.Variant.INT32)
instanceGroupUrls = _messages.StringField(23, repeated=True)
ipAllocationPolicy = _messages.MessageField('IPAllocationPolicy', 24)
labelFingerprint = _messages.StringField(25)
legacyAbac = _messages.MessageField('LegacyAbac', 26)
location = _messages.StringField(27)
locations = _messages.StringField(28, repeated=True)
loggingService = _messages.StringField(29)
maintenancePolicy = _messages.MessageField('MaintenancePolicy', 30)
masterAuth = _messages.MessageField('MasterAuth', 31)
masterAuthorizedNetworksConfig = _messages.MessageField('MasterAuthorizedNetworksConfig', 32)
masterIpv4CidrBlock = _messages.StringField(33)
monitoringService = _messages.StringField(34)
name = _messages.StringField(35)
network = _messages.StringField(36)
networkConfig = _messages.MessageField('NetworkConfig', 37)
networkPolicy = _messages.MessageField('NetworkPolicy', 38)
nodeConfig = _messages.MessageField('NodeConfig', 39)
nodeIpv4CidrSize = _messages.IntegerField(40, variant=_messages.Variant.INT32)
nodePools = _messages.MessageField('NodePool', 41, repeated=True)
nodeSchedulingStrategy = _messages.EnumField('NodeSchedulingStrategyValueValuesEnum', 42)
podSecurityPolicyConfig = _messages.MessageField('PodSecurityPolicyConfig', 43)
privateCluster = _messages.BooleanField(44)
privateClusterConfig = _messages.MessageField('PrivateClusterConfig', 45)
releaseChannel = _messages.MessageField('ReleaseChannel', 46)
resourceLabels = _messages.MessageField('ResourceLabelsValue', 47)
resourceUsageExportConfig = _messages.MessageField('ResourceUsageExportConfig', 48)
resourceVersion = _messages.StringField(49)
securityProfile = _messages.MessageField('SecurityProfile', 50)
selfLink = _messages.StringField(51)
servicesIpv4Cidr = _messages.StringField(52)
shieldedNodes = _messages.MessageField('ShieldedNodes', 53)
status = _messages.EnumField('StatusValueValuesEnum', 54)
statusMessage = _messages.StringField(55)
subnetwork = _messages.StringField(56)
tierSettings = _messages.MessageField('TierSettings', 57)
tpuIpv4CidrBlock = _messages.StringField(58)
verticalPodAutoscaling = _messages.MessageField('VerticalPodAutoscaling', 59)
workloadIdentityConfig = _messages.MessageField('WorkloadIdentityConfig', 60)
zone = _messages.StringField(61)
class ClusterAutoscaling(_messages.Message):
r"""ClusterAutoscaling contains global, per-cluster information required by
Cluster Autoscaler to automatically adjust the size of the cluster and
create/delete node pools based on the current needs.
Enums:
AutoscalingProfileValueValuesEnum: Defines autoscaling behaviour.
Fields:
autoprovisioningLocations: The list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the NodePool's nodes can
be created by NAP.
autoprovisioningNodePoolDefaults: AutoprovisioningNodePoolDefaults
contains defaults for a node pool created by NAP.
autoscalingProfile: Defines autoscaling behaviour.
enableNodeAutoprovisioning: Enables automatic node pool creation and
deletion.
resourceLimits: Contains global constraints regarding minimum and maximum
amount of resources in the cluster.
"""
class AutoscalingProfileValueValuesEnum(_messages.Enum):
r"""Defines autoscaling behaviour.
Values:
PROFILE_UNSPECIFIED: No change to autoscaling configuration.
OPTIMIZE_UTILIZATION: Prioritize optimizing utilization of resources.
BALANCED: Use default (balanced) autoscaling configuration.
"""
PROFILE_UNSPECIFIED = 0
OPTIMIZE_UTILIZATION = 1
BALANCED = 2
autoprovisioningLocations = _messages.StringField(1, repeated=True)
autoprovisioningNodePoolDefaults = _messages.MessageField('AutoprovisioningNodePoolDefaults', 2)
autoscalingProfile = _messages.EnumField('AutoscalingProfileValueValuesEnum', 3)
enableNodeAutoprovisioning = _messages.BooleanField(4)
resourceLimits = _messages.MessageField('ResourceLimit', 5, repeated=True)
class ClusterTelemetry(_messages.Message):
r"""Telemetry integration for the cluster.
Enums:
TypeValueValuesEnum: Type of the integration.
Fields:
type: Type of the integration.
"""
class TypeValueValuesEnum(_messages.Enum):
r"""Type of the integration.
Values:
UNSPECIFIED: Not set.
DISABLED: Monitoring integration is disabled.
ENABLED: Monitoring integration is enabled.
SYSTEM_ONLY: Only system components are monitored and logged.
"""
UNSPECIFIED = 0
DISABLED = 1
ENABLED = 2
SYSTEM_ONLY = 3
type = _messages.EnumField('TypeValueValuesEnum', 1)
class ClusterUpdate(_messages.Message):
r"""ClusterUpdate describes an update to the cluster. Exactly one update can
be applied to a cluster with each request, so at most one field can be
provided.
Fields:
concurrentNodeCount: Controls how many nodes to upgrade in parallel. A
maximum of 20 concurrent nodes is allowed. Deprecated. This feature will
be replaced by an equivalent new feature that gives better control over
concurrency. It is not planned to propagate this field to GA and it will
be eventually removed from the API.
desiredAddonsConfig: Configurations for the various addons available to
run in the cluster.
desiredBinaryAuthorization: The desired configuration options for the
Binary Authorization feature.
desiredCloudNatStatus: The desired status of Cloud NAT for this cluster.
Deprecated: use desired_default_snat_status instead.
desiredClusterAutoscaling: The desired cluster-level autoscaling
configuration.
desiredClusterTelemetry: The desired telemetry integration for the
cluster.
desiredCostManagementConfig: The desired configuration for the fine-
grained cost management feature.
desiredDatabaseEncryption: Configuration of etcd encryption.
desiredDefaultSnatStatus: The desired status of whether to disable default
sNAT for this cluster.
desiredImage: The desired name of the image to use for this node. This is
used to create clusters using a custom image.
desiredImageProject: The project containing the desired image to use for
this node. This is used to create clusters using a custom image.
desiredImageType: The desired image type for the node pool. NOTE: Set the
"desired_node_pool" field as well.
desiredIntraNodeVisibilityConfig: The desired config of Intra-node
visibility.
desiredLocations: The desired list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the cluster's nodes
should be located. Changing the locations a cluster is in will result in
nodes being either created or removed from the cluster, depending on
whether locations are being added or removed. This list must always
include the cluster's primary zone.
desiredLoggingService: The logging service the cluster should use to write
metrics. Currently available options: *
"logging.googleapis.com/kubernetes" - the Google Cloud Logging service
with Kubernetes-native resource model * "logging.googleapis.com" - the
Google Cloud Logging service * "none" - no logs will be exported from
the cluster
desiredMasterAuthorizedNetworksConfig: The desired configuration options
for master authorized networks feature.
desiredMasterVersion: The Kubernetes version to change the master to.
Users may specify either explicit versions offered by Kubernetes Engine
or version aliases, which have the following behavior: - "latest":
picks the highest valid Kubernetes version - "1.X": picks the highest
valid patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest
valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an
explicit Kubernetes version - "-": picks the default Kubernetes version
desiredMonitoringService: The monitoring service the cluster should use to
write metrics. Currently available options: *
"monitoring.googleapis.com/kubernetes" - the Google Cloud Monitoring
service with Kubernetes-native resource model *
"monitoring.googleapis.com" - the Google Cloud Monitoring service *
"none" - no metrics will be exported from the cluster
desiredNodePoolAutoscaling: Autoscaler configuration for the node pool
specified in desired_node_pool_id. If there is only one pool in the
cluster and desired_node_pool_id is not provided then the change applies
to that single node pool.
desiredNodePoolId: The node pool to be upgraded. This field is mandatory
if "desired_node_version", "desired_image_family",
"desired_node_pool_autoscaling", or "desired_workload_metadata_config"
is specified and there is more than one node pool on the cluster.
desiredNodeVersion: The Kubernetes version to change the nodes to
(typically an upgrade). Users may specify either explicit versions
offered by Kubernetes Engine or version aliases, which have the
following behavior: - "latest": picks the highest valid Kubernetes
version - "1.X": picks the highest valid patch+gke.N patch in the 1.X
version - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y
version - "1.X.Y-gke.N": picks an explicit Kubernetes version - "-":
picks the Kubernetes master version
desiredPodSecurityPolicyConfig: The desired configuration options for the
PodSecurityPolicy feature.
desiredPrivateClusterConfig: The desired private cluster configuration.
desiredPrivateIpv6Access: The desired status of Private IPv6 access for
this cluster.
desiredReleaseChannel: The desired release channel configuration.
desiredResourceUsageExportConfig: The desired configuration for exporting
resource usage.
desiredShieldedNodes: Configuration for Shielded Nodes.
desiredVerticalPodAutoscaling: Cluster-level Vertical Pod Autoscaling
configuration.
desiredWorkloadIdentityConfig: Configuration for Workload Identity.
privateClusterConfig: The desired private cluster configuration.
securityProfile: User may change security profile during update
"""
concurrentNodeCount = _messages.IntegerField(1, variant=_messages.Variant.INT32)
desiredAddonsConfig = _messages.MessageField('AddonsConfig', 2)
desiredBinaryAuthorization = _messages.MessageField('BinaryAuthorization', 3)
desiredCloudNatStatus = _messages.MessageField('CloudNatStatus', 4)
desiredClusterAutoscaling = _messages.MessageField('ClusterAutoscaling', 5)
desiredClusterTelemetry = _messages.MessageField('ClusterTelemetry', 6)
desiredCostManagementConfig = _messages.MessageField('CostManagementConfig', 7)
desiredDatabaseEncryption = _messages.MessageField('DatabaseEncryption', 8)
desiredDefaultSnatStatus = _messages.MessageField('DefaultSnatStatus', 9)
desiredImage = _messages.StringField(10)
desiredImageProject = _messages.StringField(11)
desiredImageType = _messages.StringField(12)
desiredIntraNodeVisibilityConfig = _messages.MessageField('IntraNodeVisibilityConfig', 13)
desiredLocations = _messages.StringField(14, repeated=True)
desiredLoggingService = _messages.StringField(15)
desiredMasterAuthorizedNetworksConfig = _messages.MessageField('MasterAuthorizedNetworksConfig', 16)
desiredMasterVersion = _messages.StringField(17)
desiredMonitoringService = _messages.StringField(18)
desiredNodePoolAutoscaling = _messages.MessageField('NodePoolAutoscaling', 19)
desiredNodePoolId = _messages.StringField(20)
desiredNodeVersion = _messages.StringField(21)
desiredPodSecurityPolicyConfig = _messages.MessageField('PodSecurityPolicyConfig', 22)
desiredPrivateClusterConfig = _messages.MessageField('PrivateClusterConfig', 23)
desiredPrivateIpv6Access = _messages.MessageField('PrivateIPv6Status', 24)
desiredReleaseChannel = _messages.MessageField('ReleaseChannel', 25)
desiredResourceUsageExportConfig = _messages.MessageField('ResourceUsageExportConfig', 26)
desiredShieldedNodes = _messages.MessageField('ShieldedNodes', 27)
desiredVerticalPodAutoscaling = _messages.MessageField('VerticalPodAutoscaling', 28)
desiredWorkloadIdentityConfig = _messages.MessageField('WorkloadIdentityConfig', 29)
privateClusterConfig = _messages.MessageField('PrivateClusterConfig', 30)
securityProfile = _messages.MessageField('SecurityProfile', 31)
class CompleteIPRotationRequest(_messages.Message):
r"""CompleteIPRotationRequest moves the cluster master back into single-IP
mode.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster id) of the cluster to complete
IP rotation. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ConfigConnectorConfig(_messages.Message):
r"""Configuration options for the Config Connector add-on.
Fields:
enabled: Whether Cloud Connector is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class ConsumptionMeteringConfig(_messages.Message):
r"""Parameters for controlling consumption metering.
Fields:
enabled: Whether to enable consumption metering for this cluster. If
enabled, a second BigQuery table will be created to hold resource
consumption records.
"""
enabled = _messages.BooleanField(1)
class ContainerProjectsAggregatedUsableSubnetworksListRequest(_messages.Message):
r"""A ContainerProjectsAggregatedUsableSubnetworksListRequest object.
Fields:
filter: Filtering currently only supports equality on the networkProjectId
and must be in the form: "networkProjectId=[PROJECTID]", where
`networkProjectId` is the project which owns the listed subnetworks.
This defaults to the parent project ID.
pageSize: The max number of results per page that should be returned. If
the number of available results is larger than `page_size`, a
`next_page_token` is returned which can be used to get the next page of
results in subsequent requests. Acceptable values are 0 to 500,
inclusive. (Default: 500)
pageToken: Specifies a page token to use. Set this to the next_page_token
returned by previous list requests to get the next page of results.
parent: The parent project where subnetworks are usable. Specified in the
format 'projects/*'.
"""
filter = _messages.StringField(1)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
parent = _messages.StringField(4, required=True)
class ContainerProjectsLocationsClustersDeleteRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersDeleteRequest object.
Fields:
clusterId: Deprecated. The name of the cluster to delete. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to delete.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2, required=True)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ContainerProjectsLocationsClustersGetJwksRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersGetJwksRequest object.
Fields:
parent: The cluster (project, location, cluster id) to get keys for.
Specified in the format 'projects/*/locations/*/clusters/*'.
"""
parent = _messages.StringField(1, required=True)
class ContainerProjectsLocationsClustersGetRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersGetRequest object.
Fields:
clusterId: Deprecated. The name of the cluster to retrieve. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to retrieve.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2, required=True)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ContainerProjectsLocationsClustersListRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersListRequest object.
Fields:
parent: The parent (project and location) where the clusters will be
listed. Specified in the format 'projects/*/locations/*'. Location "-"
matches all zones and all regions.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides, or
"-" for all zones. This field has been deprecated and replaced by the
parent field.
"""
parent = _messages.StringField(1, required=True)
projectId = _messages.StringField(2)
zone = _messages.StringField(3)
class ContainerProjectsLocationsClustersNodePoolsDeleteRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersNodePoolsDeleteRequest object.
Fields:
clusterId: Deprecate. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to delete. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to delete. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2, required=True)
nodePoolId = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class ContainerProjectsLocationsClustersNodePoolsGetRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersNodePoolsGetRequest object.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to get. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool. This field has been
deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2, required=True)
nodePoolId = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class ContainerProjectsLocationsClustersNodePoolsListRequest(_messages.Message):
r"""A ContainerProjectsLocationsClustersNodePoolsListRequest object.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the parent field.
parent: The parent (project, location, cluster id) where the node pools
will be listed. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the parent field.
"""
clusterId = _messages.StringField(1)
parent = _messages.StringField(2, required=True)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ContainerProjectsLocationsClustersWellKnownGetOpenidConfigurationRequest(_messages.Message):
r"""A
ContainerProjectsLocationsClustersWellKnownGetOpenidConfigurationRequest
object.
Fields:
parent: The cluster (project, location, cluster id) to get the discovery
document for. Specified in the format
'projects/*/locations/*/clusters/*'.
"""
parent = _messages.StringField(1, required=True)
class ContainerProjectsLocationsGetServerConfigRequest(_messages.Message):
r"""A ContainerProjectsLocationsGetServerConfigRequest object.
Fields:
name: The name (project and location) of the server config to get,
specified in the format 'projects/*/locations/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for. This
field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1, required=True)
projectId = _messages.StringField(2)
zone = _messages.StringField(3)
class ContainerProjectsLocationsListRequest(_messages.Message):
r"""A ContainerProjectsLocationsListRequest object.
Fields:
parent: Contains the name of the resource requested. Specified in the
format 'projects/*'.
"""
parent = _messages.StringField(1, required=True)
class ContainerProjectsLocationsOperationsGetRequest(_messages.Message):
r"""A ContainerProjectsLocationsOperationsGetRequest object.
Fields:
name: The name (project, location, operation id) of the operation to get.
Specified in the format 'projects/*/locations/*/operations/*'.
operationId: Deprecated. The server-assigned `name` of the operation. This
field has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1, required=True)
operationId = _messages.StringField(2)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class ContainerProjectsLocationsOperationsListRequest(_messages.Message):
r"""A ContainerProjectsLocationsOperationsListRequest object.
Fields:
parent: The parent (project and location) where the operations will be
listed. Specified in the format 'projects/*/locations/*'. Location "-"
matches all zones and all regions.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for, or `-`
for all zones. This field has been deprecated and replaced by the parent
field.
"""
parent = _messages.StringField(1, required=True)
projectId = _messages.StringField(2)
zone = _messages.StringField(3)
class ContainerProjectsZonesClustersDeleteRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersDeleteRequest object.
Fields:
clusterId: Deprecated. The name of the cluster to delete. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to delete.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1, required=True)
name = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesClustersGetRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersGetRequest object.
Fields:
clusterId: Deprecated. The name of the cluster to retrieve. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to retrieve.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1, required=True)
name = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesClustersListRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersListRequest object.
Fields:
parent: The parent (project and location) where the clusters will be
listed. Specified in the format 'projects/*/locations/*'. Location "-"
matches all zones and all regions.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides, or
"-" for all zones. This field has been deprecated and replaced by the
parent field.
"""
parent = _messages.StringField(1)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesClustersNodePoolsDeleteRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersNodePoolsDeleteRequest object.
Fields:
clusterId: Deprecate. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to delete. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to delete. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1, required=True)
name = _messages.StringField(2)
nodePoolId = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
zone = _messages.StringField(5, required=True)
class ContainerProjectsZonesClustersNodePoolsGetRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersNodePoolsGetRequest object.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to get. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool. This field has been
deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1, required=True)
name = _messages.StringField(2)
nodePoolId = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
zone = _messages.StringField(5, required=True)
class ContainerProjectsZonesClustersNodePoolsListRequest(_messages.Message):
r"""A ContainerProjectsZonesClustersNodePoolsListRequest object.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the parent field.
parent: The parent (project, location, cluster id) where the node pools
will be listed. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the parent field.
"""
clusterId = _messages.StringField(1, required=True)
parent = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesGetServerconfigRequest(_messages.Message):
r"""A ContainerProjectsZonesGetServerconfigRequest object.
Fields:
name: The name (project and location) of the server config to get,
specified in the format 'projects/*/locations/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for. This
field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesOperationsGetRequest(_messages.Message):
r"""A ContainerProjectsZonesOperationsGetRequest object.
Fields:
name: The name (project, location, operation id) of the operation to get.
Specified in the format 'projects/*/locations/*/operations/*'.
operationId: Deprecated. The server-assigned `name` of the operation. This
field has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
name = _messages.StringField(1)
operationId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesOperationsListRequest(_messages.Message):
r"""A ContainerProjectsZonesOperationsListRequest object.
Fields:
parent: The parent (project and location) where the operations will be
listed. Specified in the format 'projects/*/locations/*'. Location "-"
matches all zones and all regions.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for, or `-`
for all zones. This field has been deprecated and replaced by the parent
field.
"""
parent = _messages.StringField(1)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class CostManagementConfig(_messages.Message):
r"""Configuration for fine-grained cost management feature.
Fields:
enabled: Whether the feature is enabled or not.
"""
enabled = _messages.BooleanField(1)
class CreateClusterRequest(_messages.Message):
r"""CreateClusterRequest creates a cluster.
Fields:
cluster: A [cluster resource](/container-
engine/reference/rest/v1alpha1/projects.zones.clusters)
parent: The parent (project and location) where the cluster will be
created. Specified in the format 'projects/*/locations/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the parent field.
"""
cluster = _messages.MessageField('Cluster', 1)
parent = _messages.StringField(2)
projectId = _messages.StringField(3)
zone = _messages.StringField(4)
class CreateNodePoolRequest(_messages.Message):
r"""CreateNodePoolRequest creates a node pool for a cluster.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the parent field.
nodePool: The node pool to create.
parent: The parent (project, location, cluster id) where the node pool
will be created. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the parent field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the parent field.
"""
clusterId = _messages.StringField(1)
nodePool = _messages.MessageField('NodePool', 2)
parent = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class CustomImageConfig(_messages.Message):
r"""CustomImageConfig contains the information
Fields:
image: The name of the image to use for this node.
imageFamily: The name of the image family to use for this node.
imageProject: The project containing the image to use for this node.
"""
image = _messages.StringField(1)
imageFamily = _messages.StringField(2)
imageProject = _messages.StringField(3)
class DailyMaintenanceWindow(_messages.Message):
r"""Time window specified for daily maintenance operations.
Fields:
duration: [Output only] Duration of the time window, automatically chosen
to be smallest possible in the given scenario.
startTime: Time within the maintenance window to start the maintenance
operations. It must be in format "HH:MM", where HH : [00-23] and MM :
[00-59] GMT.
"""
duration = _messages.StringField(1)
startTime = _messages.StringField(2)
class DatabaseEncryption(_messages.Message):
r"""Configuration of etcd encryption.
Enums:
StateValueValuesEnum: Denotes the state of etcd encryption.
Fields:
keyName: Name of CloudKMS key to use for the encryption of secrets in
etcd. Ex. projects/my-project/locations/global/keyRings/my-
ring/cryptoKeys/my-key
state: Denotes the state of etcd encryption.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Denotes the state of etcd encryption.
Values:
UNKNOWN: Should never be set
ENCRYPTED: Secrets in etcd are encrypted.
DECRYPTED: Secrets in etcd are stored in plain text (at etcd level) -
this is unrelated to Google Compute Engine level full disk encryption.
"""
UNKNOWN = 0
ENCRYPTED = 1
DECRYPTED = 2
keyName = _messages.StringField(1)
state = _messages.EnumField('StateValueValuesEnum', 2)
class DefaultSnatStatus(_messages.Message):
r"""DefaultSnatStatus contains the desired state of whether default sNAT
should be disabled on the cluster.
Fields:
disabled: Disables cluster default sNAT rules.
"""
disabled = _messages.BooleanField(1)
class DnsCacheConfig(_messages.Message):
r"""Configuration for NodeLocal DNSCache
Fields:
enabled: Whether NodeLocal DNSCache is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class FeatureConfig(_messages.Message):
r"""FeatureConfig is the configuration for a specific feature including the
definition of the feature as well as the tier in which it resides.
Enums:
FeatureValueValuesEnum: The feature that is being configured with this
value.
TierValueValuesEnum: The tier in which the configured feature resides.
Fields:
feature: The feature that is being configured with this value.
tier: The tier in which the configured feature resides.
"""
class FeatureValueValuesEnum(_messages.Enum):
r"""The feature that is being configured with this value.
Values:
DEFAULT_FEATURE: DEFAULT_FEATURE is the default zero value of the
Feature. This value is valid.
VERTICAL_POD_AUTOSCALER: The vertical pod autoscaling feature.
NODE_AUTO_PROVISIONING: The node auto provisioning feature.
BINARY_AUTHORIZATION: The binary authorization feature.
RESOURCE_LABELS: The resource labels feature.
USAGE_METERING: The GKE usage metering feature.
CLOUD_RUN_ON_GKE: The Cloud Run on GKE feature.
"""
DEFAULT_FEATURE = 0
VERTICAL_POD_AUTOSCALER = 1
NODE_AUTO_PROVISIONING = 2
BINARY_AUTHORIZATION = 3
RESOURCE_LABELS = 4
USAGE_METERING = 5
CLOUD_RUN_ON_GKE = 6
class TierValueValuesEnum(_messages.Enum):
r"""The tier in which the configured feature resides.
Values:
TIER_UNSPECIFIED: TIER_UNSPECIFIED is the default value. If this value
is set during create or update, it defaults to the project level tier
setting.
STANDARD: Represents the standard tier or base Google Kubernetes Engine
offering.
ADVANCED: Represents the advanced tier.
"""
TIER_UNSPECIFIED = 0
STANDARD = 1
ADVANCED = 2
feature = _messages.EnumField('FeatureValueValuesEnum', 1)
tier = _messages.EnumField('TierValueValuesEnum', 2)
class GcePersistentDiskCsiDriverConfig(_messages.Message):
r"""Configuration for the GCE PD CSI driver. This option can only be enabled
at cluster creation time.
Fields:
enabled: Whether the GCE PD CSI driver is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class GetJSONWebKeysResponse(_messages.Message):
r"""GetJSONWebKeysResponse is a valid JSON Web Key Set as specififed in rfc
7517
Fields:
cacheHeader: OnePlatform automagically extracts this field and uses it to
set the HTTP Cache-Control header.
keys: The public component of the keys used by the cluster to sign token
requests.
"""
cacheHeader = _messages.MessageField('HttpCacheControlResponseHeader', 1)
keys = _messages.MessageField('Jwk', 2, repeated=True)
class GetOpenIDConfigResponse(_messages.Message):
r"""GetOpenIDConfigResponse is an OIDC discovery document for the cluster.
See the OpenID Connect Discovery 1.0 specification for details.
Fields:
cacheHeader: OnePlatform automagically extracts this field and uses it to
set the HTTP Cache-Control header.
claims_supported: Supported claims.
grant_types: Supported grant types.
id_token_signing_alg_values_supported: supported ID Token signing
Algorithms.
issuer: OIDC Issuer.
jwks_uri: JSON Web Key uri.
response_types_supported: Supported response types.
subject_types_supported: Supported subject types.
"""
cacheHeader = _messages.MessageField('HttpCacheControlResponseHeader', 1)
claims_supported = _messages.StringField(2, repeated=True)
grant_types = _messages.StringField(3, repeated=True)
id_token_signing_alg_values_supported = _messages.StringField(4, repeated=True)
issuer = _messages.StringField(5)
jwks_uri = _messages.StringField(6)
response_types_supported = _messages.StringField(7, repeated=True)
subject_types_supported = _messages.StringField(8, repeated=True)
class HorizontalPodAutoscaling(_messages.Message):
r"""Configuration options for the horizontal pod autoscaling feature, which
increases or decreases the number of replica pods a replication controller
has based on the resource usage of the existing pods.
Fields:
disabled: Whether the Horizontal Pod Autoscaling feature is enabled in the
cluster. When enabled, it ensures that metrics are collected into
Stackdriver Monitoring.
"""
disabled = _messages.BooleanField(1)
class HttpCacheControlResponseHeader(_messages.Message):
r"""RFC-2616: cache control support
Fields:
age: 14.6 response cache age, in seconds since the response is generated
directive: 14.9 request and response directives
expires: 14.21 response cache expires, in RFC 1123 date format
"""
age = _messages.IntegerField(1)
directive = _messages.StringField(2)
expires = _messages.StringField(3)
class HttpLoadBalancing(_messages.Message):
r"""Configuration options for the HTTP (L7) load balancing controller addon,
which makes it easy to set up HTTP load balancers for services in a cluster.
Fields:
disabled: Whether the HTTP Load Balancing controller is enabled in the
cluster. When enabled, it runs a small pod in the cluster that manages
the load balancers.
"""
disabled = _messages.BooleanField(1)
class IPAllocationPolicy(_messages.Message):
r"""Configuration for controlling how IPs are allocated in the cluster.
Fields:
allowRouteOverlap: If true, allow allocation of cluster CIDR ranges that
overlap with certain kinds of network routes. By default we do not allow
cluster CIDR ranges to intersect with any user declared routes. With
allow_route_overlap == true, we allow overlapping with CIDR ranges that
are larger than the cluster CIDR range. If this field is set to true,
then cluster and services CIDRs must be fully-specified (e.g.
`10.96.0.0/14`, but not `/14`), which means: 1) When `use_ip_aliases` is
true, `cluster_ipv4_cidr_block` and `services_ipv4_cidr_block` must
be fully-specified. 2) When `use_ip_aliases` is false,
`cluster.cluster_ipv4_cidr` muse be fully-specified.
clusterIpv4Cidr: This field is deprecated, use cluster_ipv4_cidr_block.
clusterIpv4CidrBlock: The IP address range for the cluster pod IPs. If
this field is set, then `cluster.cluster_ipv4_cidr` must be left blank.
This field is only applicable when `use_ip_aliases` is true. Set to
blank to have a range chosen with the default size. Set to /netmask
(e.g. `/14`) to have a range chosen with a specific netmask. Set to a
[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.
`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific
range to use.
clusterSecondaryRangeName: The name of the secondary range to be used for
the cluster CIDR block. The secondary range will be used for pod IP
addresses. This must be an existing secondary range associated with the
cluster subnetwork. This field is only applicable if use_ip_aliases is
true and create_subnetwork is false.
createSubnetwork: Whether a new subnetwork will be created automatically
for the cluster. This field is only applicable when `use_ip_aliases` is
true.
nodeIpv4Cidr: This field is deprecated, use node_ipv4_cidr_block.
nodeIpv4CidrBlock: The IP address range of the instance IPs in this
cluster. This is applicable only if `create_subnetwork` is true. Set
to blank to have a range chosen with the default size. Set to /netmask
(e.g. `/14`) to have a range chosen with a specific netmask. Set to a
[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.
`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific
range to use.
servicesIpv4Cidr: This field is deprecated, use services_ipv4_cidr_block.
servicesIpv4CidrBlock: The IP address range of the services IPs in this
cluster. If blank, a range will be automatically chosen with the default
size. This field is only applicable when `use_ip_aliases` is true. Set
to blank to have a range chosen with the default size. Set to /netmask
(e.g. `/14`) to have a range chosen with a specific netmask. Set to a
[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.
`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific
range to use.
servicesSecondaryRangeName: The name of the secondary range to be used as
for the services CIDR block. The secondary range will be used for
service ClusterIPs. This must be an existing secondary range associated
with the cluster subnetwork. This field is only applicable with
use_ip_aliases is true and create_subnetwork is false.
subnetworkName: A custom subnetwork name to be used if `create_subnetwork`
is true. If this field is empty, then an automatic name will be chosen
for the new subnetwork.
tpuIpv4CidrBlock: The IP address range of the Cloud TPUs in this cluster.
If unspecified, a range will be automatically chosen with the default
size. This field is only applicable when `use_ip_aliases` is true, and
it must not be specified when the `tpu_use_service_networking` is
`true`. Unspecified to have a range chosen with the default size `/20`.
Set to /netmask (e.g. `/14`) to have a range chosen with a specific
netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private
networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick
a specific range to use.
tpuUseServiceNetworking: Enable Cloud TPU's Service Networking mode. In
this mode, the CIDR blocks used by the Cloud TPUs will be allocated and
managed by Service Networking, instead of GKE. This field must be
`false` when `tpu_ipv4_cidr_block` is specified.
useIpAliases: Whether alias IPs will be used for pod IPs in the cluster.
This is used in conjunction with use_routes. It cannot be true if
use_routes is true. If both use_ip_aliases and use_routes are false,
then the server picks the default IP allocation mode
"""
allowRouteOverlap = _messages.BooleanField(1)
clusterIpv4Cidr = _messages.StringField(2)
clusterIpv4CidrBlock = _messages.StringField(3)
clusterSecondaryRangeName = _messages.StringField(4)
createSubnetwork = _messages.BooleanField(5)
nodeIpv4Cidr = _messages.StringField(6)
nodeIpv4CidrBlock = _messages.StringField(7)
servicesIpv4Cidr = _messages.StringField(8)
servicesIpv4CidrBlock = _messages.StringField(9)
servicesSecondaryRangeName = _messages.StringField(10)
subnetworkName = _messages.StringField(11)
tpuIpv4CidrBlock = _messages.StringField(12)
tpuUseServiceNetworking = _messages.BooleanField(13)
useIpAliases = _messages.BooleanField(14)
class IntraNodeVisibilityConfig(_messages.Message):
r"""IntraNodeVisibilityConfig contains the desired config of the intra-node
visibility on this cluster.
Fields:
enabled: Enables intra node visibility for this cluster.
"""
enabled = _messages.BooleanField(1)
class IstioConfig(_messages.Message):
r"""Configuration options for Istio addon.
Enums:
AuthValueValuesEnum: The specified Istio auth mode, either none, or mutual
TLS.
Fields:
auth: The specified Istio auth mode, either none, or mutual TLS.
csmMeshName: DEPRECATED: No longer used.
disabled: Whether Istio is enabled for this cluster.
"""
class AuthValueValuesEnum(_messages.Enum):
r"""The specified Istio auth mode, either none, or mutual TLS.
Values:
AUTH_NONE: auth not enabled
AUTH_MUTUAL_TLS: auth mutual TLS enabled
"""
AUTH_NONE = 0
AUTH_MUTUAL_TLS = 1
auth = _messages.EnumField('AuthValueValuesEnum', 1)
csmMeshName = _messages.StringField(2)
disabled = _messages.BooleanField(3)
class Jwk(_messages.Message):
r"""Jwk is a JSON Web Key as specified in RFC 7517
Fields:
alg: Algorithm.
crv: Used for ECDSA keys.
e: Used for RSA keys.
kid: Key ID.
kty: Key Type.
n: Used for RSA keys.
use: Permitted uses for the public keys.
x: Used for ECDSA keys.
y: Used for ECDSA keys.
"""
alg = _messages.StringField(1)
crv = _messages.StringField(2)
e = _messages.StringField(3)
kid = _messages.StringField(4)
kty = _messages.StringField(5)
n = _messages.StringField(6)
use = _messages.StringField(7)
x = _messages.StringField(8)
y = _messages.StringField(9)
class KalmConfig(_messages.Message):
r"""Configuration options for the KALM addon.
Fields:
enabled: Whether KALM is enabled for this cluster.
"""
enabled = _messages.BooleanField(1)
class KubernetesDashboard(_messages.Message):
r"""Configuration for the Kubernetes Dashboard.
Fields:
disabled: Whether the Kubernetes Dashboard is enabled for this cluster.
"""
disabled = _messages.BooleanField(1)
class LegacyAbac(_messages.Message):
r"""Configuration for the legacy Attribute Based Access Control
authorization mode.
Fields:
enabled: Whether the ABAC authorizer is enabled for this cluster. When
enabled, identities in the system, including service accounts, nodes,
and controllers, will have statically granted permissions beyond those
provided by the RBAC configuration or IAM.
"""
enabled = _messages.BooleanField(1)
class LinuxNodeConfig(_messages.Message):
r"""Parameters that can be configured on Linux nodes.
Messages:
SysctlsValue: The Linux kernel parameters to be applied to the nodes and
all pods running on the nodes. The following parameters are supported.
kernel.pid_max kernel.threads-max fs.inotify.max_queued_events
fs.inotify.max_user_instances fs.inotify.max_user_watches
net.core.netdev_budget net.core.netdev_budget_usecs
net.core.netdev_max_backlog net.core.rmem_default net.core.rmem_max
net.core.wmem_default net.core.wmem_max net.core.optmem_max
net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_mem
net.ipv4.tcp_fin_timeout net.ipv4.tcp_keepalive_intvl
net.ipv4.tcp_keepalive_probes net.ipv4.tcp_keepalive_time
net.ipv4.tcp_max_orphans net.ipv4.tcp_max_syn_backlog
net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries
net.ipv4.tcp_tw_reuse net.ipv4.udp_mem net.ipv4.udp_rmem_min
net.ipv4.udp_wmem_min net.netfilter.nf_conntrack_generic_timeout
net.netfilter.nf_conntrack_max
net.netfilter.nf_conntrack_tcp_timeout_close_wait
net.netfilter.nf_conntrack_tcp_timeout_established
Fields:
sysctls: The Linux kernel parameters to be applied to the nodes and all
pods running on the nodes. The following parameters are supported.
kernel.pid_max kernel.threads-max fs.inotify.max_queued_events
fs.inotify.max_user_instances fs.inotify.max_user_watches
net.core.netdev_budget net.core.netdev_budget_usecs
net.core.netdev_max_backlog net.core.rmem_default net.core.rmem_max
net.core.wmem_default net.core.wmem_max net.core.optmem_max
net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_mem
net.ipv4.tcp_fin_timeout net.ipv4.tcp_keepalive_intvl
net.ipv4.tcp_keepalive_probes net.ipv4.tcp_keepalive_time
net.ipv4.tcp_max_orphans net.ipv4.tcp_max_syn_backlog
net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries
net.ipv4.tcp_tw_reuse net.ipv4.udp_mem net.ipv4.udp_rmem_min
net.ipv4.udp_wmem_min net.netfilter.nf_conntrack_generic_timeout
net.netfilter.nf_conntrack_max
net.netfilter.nf_conntrack_tcp_timeout_close_wait
net.netfilter.nf_conntrack_tcp_timeout_established
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class SysctlsValue(_messages.Message):
r"""The Linux kernel parameters to be applied to the nodes and all pods
running on the nodes. The following parameters are supported.
kernel.pid_max kernel.threads-max fs.inotify.max_queued_events
fs.inotify.max_user_instances fs.inotify.max_user_watches
net.core.netdev_budget net.core.netdev_budget_usecs
net.core.netdev_max_backlog net.core.rmem_default net.core.rmem_max
net.core.wmem_default net.core.wmem_max net.core.optmem_max
net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_mem
net.ipv4.tcp_fin_timeout net.ipv4.tcp_keepalive_intvl
net.ipv4.tcp_keepalive_probes net.ipv4.tcp_keepalive_time
net.ipv4.tcp_max_orphans net.ipv4.tcp_max_syn_backlog
net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_tw_reuse
net.ipv4.udp_mem net.ipv4.udp_rmem_min net.ipv4.udp_wmem_min
net.netfilter.nf_conntrack_generic_timeout net.netfilter.nf_conntrack_max
net.netfilter.nf_conntrack_tcp_timeout_close_wait
net.netfilter.nf_conntrack_tcp_timeout_established
Messages:
AdditionalProperty: An additional property for a SysctlsValue object.
Fields:
additionalProperties: Additional properties of type SysctlsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a SysctlsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
sysctls = _messages.MessageField('SysctlsValue', 1)
class ListClustersResponse(_messages.Message):
r"""ListClustersResponse is the result of ListClustersRequest.
Fields:
clusters: A list of clusters in the project in the specified zone, or
across all ones.
missingZones: If any zones are listed here, the list of clusters returned
may be missing those zones.
"""
clusters = _messages.MessageField('Cluster', 1, repeated=True)
missingZones = _messages.StringField(2, repeated=True)
class ListLocationsResponse(_messages.Message):
r"""ListLocationsResponse returns the list of all GKE locations and their
recommendation state.
Fields:
locations: A full list of GKE locations.
nextPageToken: Only return ListLocationsResponse that occur after the
page_token. This value should be populated from the
ListLocationsResponse.next_page_token if that response token was set
(which happens when listing more Locations than fit in a single
ListLocationsResponse). This is currently not used and will be honored
once we use pagination.
"""
locations = _messages.MessageField('Location', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListNodePoolsResponse(_messages.Message):
r"""ListNodePoolsResponse is the result of ListNodePoolsRequest.
Fields:
nodePools: A list of node pools for a cluster.
"""
nodePools = _messages.MessageField('NodePool', 1, repeated=True)
class ListOperationsResponse(_messages.Message):
r"""ListOperationsResponse is the result of ListOperationsRequest.
Fields:
missingZones: If any zones are listed here, the list of operations
returned may be missing the operations from those zones.
operations: A list of operations in the project in the specified zone.
"""
missingZones = _messages.StringField(1, repeated=True)
operations = _messages.MessageField('Operation', 2, repeated=True)
class ListUsableSubnetworksResponse(_messages.Message):
r"""ListUsableSubnetworksResponse is the response of
ListUsableSubnetworksRequest.
Fields:
nextPageToken: This token allows you to get the next page of results for
list requests. If the number of results is larger than `page_size`, use
the `next_page_token` as a value for the query parameter `page_token` in
the next request. The value will become empty when there are no more
pages.
subnetworks: A list of usable subnetworks in the specified network
project.
"""
nextPageToken = _messages.StringField(1)
subnetworks = _messages.MessageField('UsableSubnetwork', 2, repeated=True)
class LocalSsdVolumeConfig(_messages.Message):
r"""LocalSsdVolumeConfig is comprised of three fields, count, type, and
format. Count is the number of ssds of this grouping requested, type is the
interface type and is either nvme or scsi, and format is whether the disk is
to be formatted with a filesystem or left for block storage
Enums:
FormatValueValuesEnum: Format of the local SSD (fs/block).
Fields:
count: Number of local SSDs to use
format: Format of the local SSD (fs/block).
type: Local SSD interface to use (nvme/scsi).
"""
class FormatValueValuesEnum(_messages.Enum):
r"""Format of the local SSD (fs/block).
Values:
FORMAT_UNSPECIFIED: Default value
FS: File system formatted
BLOCK: Raw block
"""
FORMAT_UNSPECIFIED = 0
FS = 1
BLOCK = 2
count = _messages.IntegerField(1, variant=_messages.Variant.INT32)
format = _messages.EnumField('FormatValueValuesEnum', 2)
type = _messages.StringField(3)
class Location(_messages.Message):
r"""Location returns the location name, and if the location is recommended
for GKE cluster scheduling.
Enums:
TypeValueValuesEnum: Contains the type of location this Location is for.
Regional or Zonal.
Fields:
name: Contains the name of the resource requested. Specified in the format
'projects/*/locations/*'.
recommended: Recommended is a bool combining the drain state of the
location (ie- has the region been drained manually?), and the stockout
status of any zone according to Zone Advisor. This will be internal only
for use by pantheon.
type: Contains the type of location this Location is for. Regional or
Zonal.
"""
class TypeValueValuesEnum(_messages.Enum):
r"""Contains the type of location this Location is for. Regional or Zonal.
Values:
LOCATION_TYPE_UNSPECIFIED: LOCATION_TYPE_UNSPECIFIED means the location
type was not determined.
ZONE: A GKE Location where Zonal clusters can be created.
REGION: A GKE Location where Regional clusters can be created.
"""
LOCATION_TYPE_UNSPECIFIED = 0
ZONE = 1
REGION = 2
name = _messages.StringField(1)
recommended = _messages.BooleanField(2)
type = _messages.EnumField('TypeValueValuesEnum', 3)
class MaintenancePolicy(_messages.Message):
r"""MaintenancePolicy defines the maintenance policy to be used for the
cluster.
Fields:
resourceVersion: A hash identifying the version of this policy, so that
updates to fields of the policy won't accidentally undo intermediate
changes (and so that users of the API unaware of some fields won't
accidentally remove other fields). Make a <code>get()</code> request to
the cluster to get the current resource version and include it with
requests to set the policy.
window: Specifies the maintenance window in which maintenance may be
performed.
"""
resourceVersion = _messages.StringField(1)
window = _messages.MessageField('MaintenanceWindow', 2)
class MaintenanceWindow(_messages.Message):
r"""MaintenanceWindow defines the maintenance window to be used for the
cluster.
Messages:
MaintenanceExclusionsValue: Exceptions to maintenance window. Non-
emergency maintenance should not occur in these windows.
Fields:
dailyMaintenanceWindow: DailyMaintenanceWindow specifies a daily
maintenance operation window.
maintenanceExclusions: Exceptions to maintenance window. Non-emergency
maintenance should not occur in these windows.
recurringWindow: RecurringWindow specifies some number of recurring time
periods for maintenance to occur. The time windows may be overlapping.
If no maintenance windows are set, maintenance can occur at any time.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MaintenanceExclusionsValue(_messages.Message):
r"""Exceptions to maintenance window. Non-emergency maintenance should not
occur in these windows.
Messages:
AdditionalProperty: An additional property for a
MaintenanceExclusionsValue object.
Fields:
additionalProperties: Additional properties of type
MaintenanceExclusionsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MaintenanceExclusionsValue object.
Fields:
key: Name of the additional property.
value: A TimeWindow attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('TimeWindow', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
dailyMaintenanceWindow = _messages.MessageField('DailyMaintenanceWindow', 1)
maintenanceExclusions = _messages.MessageField('MaintenanceExclusionsValue', 2)
recurringWindow = _messages.MessageField('RecurringTimeWindow', 3)
class MasterAuth(_messages.Message):
r"""The authentication information for accessing the master endpoint.
Authentication can be done using HTTP basic auth or using client
certificates.
Fields:
clientCertificate: [Output only] Base64-encoded public certificate used by
clients to authenticate to the cluster endpoint.
clientCertificateConfig: Configuration for client certificate
authentication on the cluster. For clusters before v1.12, if no
configuration is specified, a client certificate is issued.
clientKey: [Output only] Base64-encoded private key used by clients to
authenticate to the cluster endpoint.
clusterCaCertificate: [Output only] Base64-encoded public certificate that
is the root of trust for the cluster.
password: The password to use for HTTP basic authentication to the master
endpoint. Because the master endpoint is open to the Internet, you
should create a strong password. If a password is provided for cluster
creation, username must be non-empty.
username: The username to use for HTTP basic authentication to the master
endpoint. For clusters v1.6.0 and later, basic authentication can be
disabled by leaving username unspecified (or setting it to the empty
string).
"""
clientCertificate = _messages.StringField(1)
clientCertificateConfig = _messages.MessageField('ClientCertificateConfig', 2)
clientKey = _messages.StringField(3)
clusterCaCertificate = _messages.StringField(4)
password = _messages.StringField(5)
username = _messages.StringField(6)
class MasterAuthorizedNetworksConfig(_messages.Message):
r"""Configuration options for the master authorized networks feature.
Enabled master authorized networks will disallow all external traffic to
access Kubernetes master through HTTPS except traffic from the given CIDR
blocks, Google Compute Engine Public IPs and Google Prod IPs.
Fields:
cidrBlocks: cidr_blocks define up to 50 external networks that could
access Kubernetes master through HTTPS.
enabled: Whether or not master authorized networks is enabled.
"""
cidrBlocks = _messages.MessageField('CidrBlock', 1, repeated=True)
enabled = _messages.BooleanField(2)
class MaxPodsConstraint(_messages.Message):
r"""Constraints applied to pods.
Fields:
maxPodsPerNode: Constraint enforced on the max num of pods per node.
"""
maxPodsPerNode = _messages.IntegerField(1)
class Metric(_messages.Message):
r"""Progress metric is (string, int|float|string) pair.
Fields:
doubleValue: For metrics with floating point value.
intValue: For metrics with integer value.
name: Required. Metric name, e.g., "nodes total", "percent done".
stringValue: For metrics with custom values (ratios, visual progress,
etc.).
"""
doubleValue = _messages.FloatField(1)
intValue = _messages.IntegerField(2)
name = _messages.StringField(3)
stringValue = _messages.StringField(4)
class NetworkConfig(_messages.Message):
r"""Parameters for cluster networking.
Fields:
disableDefaultSnat: Whether the cluster disables default in-node sNAT
rules. In-node sNAT rules will be disabled when this flag is true. When
set to false, default IP masquerade rules will be applied to the nodes
to prevent sNAT on cluster internal traffic. Deprecated. Use
default_snat_status instead
enableCloudNat: Whether GKE Cloud NAT is enabled for this cluster.
Requires that the cluster has already set
IPAllocationPolicy.use_ip_aliases to true. Deprecated: use
disable_default_snat instead.
enableIntraNodeVisibility: Whether Intra-node visibility is enabled for
this cluster. This enables flow logs for same node pod to pod traffic.
enablePrivateIpv6Access: Whether or not Private IPv6 access is enabled.
This enables direct connectivity from GKE pods to Google Cloud services
over gRPC.
enableSharedNetwork: Deprecated: This flag doesn't need to be flipped for
using shared VPC and it has no effect.
network: Output only. The relative name of the Google Compute Engine
network(/compute/docs/networks-and-firewalls#networks) to which the
cluster is connected. Example: projects/my-project/global/networks/my-
network
subnetwork: Output only. The relative name of the Google Compute Engine
[subnetwork](/compute/docs/vpc) to which the cluster is connected.
Example: projects/my-project/regions/us-central1/subnetworks/my-subnet
"""
disableDefaultSnat = _messages.BooleanField(1)
enableCloudNat = _messages.BooleanField(2)
enableIntraNodeVisibility = _messages.BooleanField(3)
enablePrivateIpv6Access = _messages.BooleanField(4)
enableSharedNetwork = _messages.BooleanField(5)
network = _messages.StringField(6)
subnetwork = _messages.StringField(7)
class NetworkPolicy(_messages.Message):
r"""Configuration options for the NetworkPolicy feature.
https://kubernetes.io/docs/concepts/services-networking/networkpolicies/
Enums:
ProviderValueValuesEnum: The selected network policy provider.
Fields:
enabled: Whether network policy is enabled on the cluster.
provider: The selected network policy provider.
"""
class ProviderValueValuesEnum(_messages.Enum):
r"""The selected network policy provider.
Values:
PROVIDER_UNSPECIFIED: Not set
CALICO: Tigera (Calico Felix).
"""
PROVIDER_UNSPECIFIED = 0
CALICO = 1
enabled = _messages.BooleanField(1)
provider = _messages.EnumField('ProviderValueValuesEnum', 2)
class NetworkPolicyConfig(_messages.Message):
r"""Configuration for NetworkPolicy. This only tracks whether the addon is
enabled or not on the Master, it does not track whether network policy is
enabled for the nodes.
Fields:
disabled: Whether NetworkPolicy is enabled for this cluster.
"""
disabled = _messages.BooleanField(1)
class NodeConfig(_messages.Message):
r"""Parameters that describe the nodes in a cluster.
Messages:
LabelsValue: The map of Kubernetes labels (key/value pairs) to be applied
to each node. These will added in addition to any default label(s) that
Kubernetes may apply to the node. In case of conflict in label keys, the
applied set may differ depending on the Kubernetes version -- it's best
to assume the behavior is undefined and conflicts should be avoided. For
more information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/overview/working-with-
objects/labels/
MetadataValue: The metadata key/value pairs assigned to instances in the
cluster. Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less
than 128 bytes in length. These are reflected as part of a URL in the
metadata server. Additionally, to avoid ambiguity, keys must not
conflict with any other metadata keys for the project or be one of the
reserved keys: "cluster-location" "cluster-name" "cluster-uid"
"configure-sh" "containerd-configure-sh" "enable-os-login" "gci-
ensure-gke-docker" "gci-metrics-enabled" "gci-update-strategy"
"instance-template" "kube-env" "startup-script" "user-data"
"disable-address-manager" "windows-startup-script-ps1" "common-psm1"
"k8s-node-setup-psm1" "install-ssh-psm1" "user-profile-psm1" "serial-
port-logging-enable" Values are free-form strings, and only have meaning
as interpreted by the image running in the instance. The only
restriction placed on them is that each value's size must be less than
or equal to 32 KB. The total size of all keys and values must be less
than 512 KB.
Fields:
accelerators: A list of hardware accelerators to be attached to each node.
See https://cloud.google.com/compute/docs/gpus for more information
about support for GPUs.
bootDiskKmsKey: The Customer Managed Encryption Key used to encrypt the
boot disk attached to each node in the node pool. This should be of the
form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]
/cryptoKeys/[KEY_NAME]. For more information about protecting resources
with Cloud KMS Keys please see:
https://cloud.google.com/compute/docs/disks/customer-managed-encryption
diskSizeGb: Size of the disk attached to each node, specified in GB. The
smallest allowed disk size is 10GB. If unspecified, the default disk
size is 100GB.
diskType: Type of the disk attached to each node (e.g. 'pd-standard' or
'pd-ssd') If unspecified, the default disk type is 'pd-standard'
imageType: The image type to use for this node. Note that for a given
image type, the latest version of it will be used.
kubeletConfig: Node kubelet configs.
labels: The map of Kubernetes labels (key/value pairs) to be applied to
each node. These will added in addition to any default label(s) that
Kubernetes may apply to the node. In case of conflict in label keys, the
applied set may differ depending on the Kubernetes version -- it's best
to assume the behavior is undefined and conflicts should be avoided. For
more information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/overview/working-with-
objects/labels/
linuxNodeConfig: Parameters that can be configured on Linux nodes.
localSsdCount: The number of local SSD disks to be attached to the node.
The limit for this value is dependent upon the maximum number of disks
available on a machine per zone. See:
https://cloud.google.com/compute/docs/disks/local-ssd for more
information.
localSsdVolumeConfigs: Parameters for using Local SSD with extra options
as hostpath or local volumes
machineType: The name of a Google Compute Engine [machine
type](/compute/docs/machine-types) (e.g. `n1-standard-1`). If
unspecified, the default machine type is `n1-standard-1`.
metadata: The metadata key/value pairs assigned to instances in the
cluster. Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less
than 128 bytes in length. These are reflected as part of a URL in the
metadata server. Additionally, to avoid ambiguity, keys must not
conflict with any other metadata keys for the project or be one of the
reserved keys: "cluster-location" "cluster-name" "cluster-uid"
"configure-sh" "containerd-configure-sh" "enable-os-login" "gci-
ensure-gke-docker" "gci-metrics-enabled" "gci-update-strategy"
"instance-template" "kube-env" "startup-script" "user-data"
"disable-address-manager" "windows-startup-script-ps1" "common-psm1"
"k8s-node-setup-psm1" "install-ssh-psm1" "user-profile-psm1" "serial-
port-logging-enable" Values are free-form strings, and only have meaning
as interpreted by the image running in the instance. The only
restriction placed on them is that each value's size must be less than
or equal to 32 KB. The total size of all keys and values must be less
than 512 KB.
minCpuPlatform: Minimum CPU platform to be used by this instance. The
instance may be scheduled on the specified or newer CPU platform.
Applicable values are the friendly names of CPU platforms, such as
<code>minCpuPlatform: "Intel Haswell"</code> or
<code>minCpuPlatform: "Intel Sandy Bridge"</code>. For more
information, read [how to specify min CPU
platform](https://cloud.google.com/compute/docs/instances/specify-min-
cpu-platform)
nodeGroup: The optional node group. Setting this field will assign
instances of this pool to run on the specified node group. This is
useful for running workloads on [sole tenant
nodes](/compute/docs/nodes/)
nodeImageConfig: The node image configuration to use for this node pool.
Note that this is only applicable for node pools using
image_type=CUSTOM.
oauthScopes: The set of Google API scopes to be made available on all of
the node VMs under the "default" service account. The following scopes
are recommended, but not required, and by default are not included: *
`https://www.googleapis.com/auth/compute` is required for mounting
persistent storage on your nodes. *
`https://www.googleapis.com/auth/devstorage.read_only` is required for
communicating with **gcr.io** (the [Google Container Registry
](/container-registry/)). If unspecified, no scopes are added, unless
Cloud Logging or Cloud Monitoring are enabled, in which case their
required scopes will be added.
preemptible: Whether the nodes are created as preemptible VM instances.
See: https://cloud.google.com/compute/docs/instances/preemptible for
more inforamtion about preemptible VM instances.
reservationAffinity: The optional reservation affinity. Setting this field
will apply the specified [Zonal Compute
Reservation](/compute/docs/instances/reserving-zonal-resources) to this
node pool.
sandboxConfig: Sandbox configuration for this node.
serviceAccount: The Google Cloud Platform Service Account to be used by
the node VMs. Specify the email address of the Service Account;
otherwise, if no Service Account is specified, the "default" service
account is used.
shieldedInstanceConfig: Shielded Instance options.
tags: The list of instance tags applied to all nodes. Tags are used to
identify valid sources or targets for network firewalls and are
specified by the client during cluster or node pool creation. Each tag
within the list must comply with RFC1035.
taints: List of kubernetes taints to be applied to each node. For more
information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
workloadMetadataConfig: The workload metadata configuration for this node.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""The map of Kubernetes labels (key/value pairs) to be applied to each
node. These will added in addition to any default label(s) that Kubernetes
may apply to the node. In case of conflict in label keys, the applied set
may differ depending on the Kubernetes version -- it's best to assume the
behavior is undefined and conflicts should be avoided. For more
information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""The metadata key/value pairs assigned to instances in the cluster.
Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes
in length. These are reflected as part of a URL in the metadata server.
Additionally, to avoid ambiguity, keys must not conflict with any other
metadata keys for the project or be one of the reserved keys: "cluster-
location" "cluster-name" "cluster-uid" "configure-sh" "containerd-
configure-sh" "enable-os-login" "gci-ensure-gke-docker" "gci-metrics-
enabled" "gci-update-strategy" "instance-template" "kube-env"
"startup-script" "user-data" "disable-address-manager" "windows-
startup-script-ps1" "common-psm1" "k8s-node-setup-psm1" "install-ssh-
psm1" "user-profile-psm1" "serial-port-logging-enable" Values are free-
form strings, and only have meaning as interpreted by the image running in
the instance. The only restriction placed on them is that each value's
size must be less than or equal to 32 KB. The total size of all keys and
values must be less than 512 KB.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Additional properties of type MetadataValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
accelerators = _messages.MessageField('AcceleratorConfig', 1, repeated=True)
bootDiskKmsKey = _messages.StringField(2)
diskSizeGb = _messages.IntegerField(3, variant=_messages.Variant.INT32)
diskType = _messages.StringField(4)
imageType = _messages.StringField(5)
kubeletConfig = _messages.MessageField('NodeKubeletConfig', 6)
labels = _messages.MessageField('LabelsValue', 7)
linuxNodeConfig = _messages.MessageField('LinuxNodeConfig', 8)
localSsdCount = _messages.IntegerField(9, variant=_messages.Variant.INT32)
localSsdVolumeConfigs = _messages.MessageField('LocalSsdVolumeConfig', 10, repeated=True)
machineType = _messages.StringField(11)
metadata = _messages.MessageField('MetadataValue', 12)
minCpuPlatform = _messages.StringField(13)
nodeGroup = _messages.StringField(14)
nodeImageConfig = _messages.MessageField('CustomImageConfig', 15)
oauthScopes = _messages.StringField(16, repeated=True)
preemptible = _messages.BooleanField(17)
reservationAffinity = _messages.MessageField('ReservationAffinity', 18)
sandboxConfig = _messages.MessageField('SandboxConfig', 19)
serviceAccount = _messages.StringField(20)
shieldedInstanceConfig = _messages.MessageField('ShieldedInstanceConfig', 21)
tags = _messages.StringField(22, repeated=True)
taints = _messages.MessageField('NodeTaint', 23, repeated=True)
workloadMetadataConfig = _messages.MessageField('WorkloadMetadataConfig', 24)
class NodeKubeletConfig(_messages.Message):
r"""Node kubelet configs. NOTE: This is an Alpha only API.
Fields:
cpuCfsQuota: Enable CPU CFS quota enforcement for containers that specify
CPU limits. If this option is enabled, kubelet uses CFS quota
(https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt) to
enforce container CPU limits. Otherwise, CPU limits will not be enforced
at all. Disable this option to mitigate CPU throttling problems while
still having your pods to be in Guaranteed QoS class by specifying the
CPU limits. The default value is 'true' if unspecified.
cpuCfsQuotaPeriod: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
The string must be a sequence of decimal numbers, each with optional
fraction and a unit suffix, such as "300ms". Valid time units are "ns",
"us" (or "\xb5s"), "ms", "s", "m", "h". The value must be a positive
duration.
cpuManagerPolicy: Control the CPU management policy on the node. See
https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-
policies/ The following values are allowed. - "none": the default,
which represents the existing scheduling behavior. - "static": allows
pods with certain resource characteristics to be granted
increased CPU affinity and exclusivity on the node.
"""
cpuCfsQuota = _messages.BooleanField(1)
cpuCfsQuotaPeriod = _messages.StringField(2)
cpuManagerPolicy = _messages.StringField(3)
class NodeManagement(_messages.Message):
r"""NodeManagement defines the set of node management services turned on for
the node pool.
Fields:
autoRepair: Whether the nodes will be automatically repaired.
autoUpgrade: Whether the nodes will be automatically upgraded.
upgradeOptions: Specifies the Auto Upgrade knobs for the node pool.
"""
autoRepair = _messages.BooleanField(1)
autoUpgrade = _messages.BooleanField(2)
upgradeOptions = _messages.MessageField('AutoUpgradeOptions', 3)
class NodePool(_messages.Message):
r"""NodePool contains the name and configuration for a cluster's node pool.
Node pools are a set of nodes (i.e. VM's), with a common configuration and
specification, under the control of the cluster master. They may have a set
of Kubernetes labels applied to them, which may be used to reference them
during pod scheduling. They may also be resized up or down, to accommodate
the workload.
Enums:
StatusValueValuesEnum: [Output only] The status of the nodes in this pool
instance.
Fields:
autoscaling: Autoscaler configuration for this NodePool. Autoscaler is
enabled only if a valid configuration is present.
conditions: Which conditions caused the current node pool state.
config: The node configuration of the pool.
initialNodeCount: The initial node count for the pool. You must ensure
that your Compute Engine <a href="/compute/docs/resource-
quotas">resource quota</a> is sufficient for this number of instances.
You must also have available firewall and routes quota.
instanceGroupUrls: [Output only] The resource URLs of the [managed
instance groups](/compute/docs/instance-groups/creating-groups-of-
managed-instances) associated with this node pool.
locations: The list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the NodePool's nodes
should be located.
management: NodeManagement configuration for this NodePool.
maxPodsConstraint: The constraint on the maximum number of pods that can
be run simultaneously on a node in the node pool.
name: The name of the node pool.
podIpv4CidrSize: [Output only] The pod CIDR block size per node in this
node pool.
resourceVersion: Server-defined resource version (etag).
selfLink: [Output only] Server-defined URL for the resource.
status: [Output only] The status of the nodes in this pool instance.
statusMessage: [Output only] Additional information about the current
status of this node pool instance, if available. Deprecated, use the
field conditions instead.
upgradeSettings: Upgrade settings control disruption and speed of the
upgrade.
version: The version of the Kubernetes of this node.
"""
class StatusValueValuesEnum(_messages.Enum):
r"""[Output only] The status of the nodes in this pool instance.
Values:
STATUS_UNSPECIFIED: Not set.
PROVISIONING: The PROVISIONING state indicates the node pool is being
created.
RUNNING: The RUNNING state indicates the node pool has been created and
is fully usable.
RUNNING_WITH_ERROR: The RUNNING_WITH_ERROR state indicates the node pool
has been created and is partially usable. Some error state has
occurred and some functionality may be impaired. Customer may need to
reissue a request or trigger a new update.
RECONCILING: The RECONCILING state indicates that some work is actively
being done on the node pool, such as upgrading node software. Details
can be found in the `statusMessage` field.
STOPPING: The STOPPING state indicates the node pool is being deleted.
ERROR: The ERROR state indicates the node pool may be unusable. Details
can be found in the `statusMessage` field.
"""
STATUS_UNSPECIFIED = 0
PROVISIONING = 1
RUNNING = 2
RUNNING_WITH_ERROR = 3
RECONCILING = 4
STOPPING = 5
ERROR = 6
autoscaling = _messages.MessageField('NodePoolAutoscaling', 1)
conditions = _messages.MessageField('StatusCondition', 2, repeated=True)
config = _messages.MessageField('NodeConfig', 3)
initialNodeCount = _messages.IntegerField(4, variant=_messages.Variant.INT32)
instanceGroupUrls = _messages.StringField(5, repeated=True)
locations = _messages.StringField(6, repeated=True)
management = _messages.MessageField('NodeManagement', 7)
maxPodsConstraint = _messages.MessageField('MaxPodsConstraint', 8)
name = _messages.StringField(9)
podIpv4CidrSize = _messages.IntegerField(10, variant=_messages.Variant.INT32)
resourceVersion = _messages.StringField(11)
selfLink = _messages.StringField(12)
status = _messages.EnumField('StatusValueValuesEnum', 13)
statusMessage = _messages.StringField(14)
upgradeSettings = _messages.MessageField('UpgradeSettings', 15)
version = _messages.StringField(16)
class NodePoolAutoscaling(_messages.Message):
r"""NodePoolAutoscaling contains information required by cluster autoscaler
to adjust the size of the node pool to the current cluster usage.
Fields:
autoprovisioned: Can this node pool be deleted automatically.
enabled: Is autoscaling enabled for this node pool.
maxNodeCount: Maximum number of nodes in the NodePool. Must be >=
min_node_count. There has to enough quota to scale up the cluster.
minNodeCount: Minimum number of nodes in the NodePool. Must be >= 1 and <=
max_node_count.
"""
autoprovisioned = _messages.BooleanField(1)
enabled = _messages.BooleanField(2)
maxNodeCount = _messages.IntegerField(3, variant=_messages.Variant.INT32)
minNodeCount = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class NodeTaint(_messages.Message):
r"""Kubernetes taint is comprised of three fields: key, value, and effect.
Effect can only be one of three types: NoSchedule, PreferNoSchedule or
NoExecute. For more information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
Enums:
EffectValueValuesEnum: Effect for taint.
Fields:
effect: Effect for taint.
key: Key for taint.
value: Value for taint.
"""
class EffectValueValuesEnum(_messages.Enum):
r"""Effect for taint.
Values:
EFFECT_UNSPECIFIED: Not set
NO_SCHEDULE: NoSchedule
PREFER_NO_SCHEDULE: PreferNoSchedule
NO_EXECUTE: NoExecute
"""
EFFECT_UNSPECIFIED = 0
NO_SCHEDULE = 1
PREFER_NO_SCHEDULE = 2
NO_EXECUTE = 3
effect = _messages.EnumField('EffectValueValuesEnum', 1)
key = _messages.StringField(2)
value = _messages.StringField(3)
class Operation(_messages.Message):
r"""This operation resource represents operations that may have happened or
are happening on the cluster. All fields are output only.
Enums:
OperationTypeValueValuesEnum: The operation type.
StatusValueValuesEnum: The current status of the operation.
Fields:
clusterConditions: Which conditions caused the current cluster state.
detail: Detailed operation progress, if available.
endTime: [Output only] The time the operation completed, in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
location: [Output only] The name of the Google Compute Engine
[zone](/compute/docs/regions-zones/regions-zones#available) or
[region](/compute/docs/regions-zones/regions-zones#available) in which
the cluster resides.
name: The server-assigned ID for the operation.
nodepoolConditions: Which conditions caused the current node pool state.
operationType: The operation type.
progress: Output only. [Output only] Progress information for an
operation.
selfLink: Server-defined URL for the resource.
startTime: [Output only] The time the operation started, in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
status: The current status of the operation.
statusMessage: Output only. If an error has occurred, a textual
description of the error.
targetLink: Server-defined URL for the target of the operation.
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the operation is taking
place. This field is deprecated, use location instead.
"""
class OperationTypeValueValuesEnum(_messages.Enum):
r"""The operation type.
Values:
TYPE_UNSPECIFIED: Not set.
CREATE_CLUSTER: Cluster create.
DELETE_CLUSTER: Cluster delete.
UPGRADE_MASTER: A master upgrade.
UPGRADE_NODES: A node upgrade.
REPAIR_CLUSTER: Cluster repair.
UPDATE_CLUSTER: Cluster update.
CREATE_NODE_POOL: Node pool create.
DELETE_NODE_POOL: Node pool delete.
SET_NODE_POOL_MANAGEMENT: Set node pool management.
AUTO_REPAIR_NODES: Automatic node pool repair.
AUTO_UPGRADE_NODES: Automatic node upgrade.
SET_LABELS: Set labels.
SET_MASTER_AUTH: Set/generate master auth materials
SET_NODE_POOL_SIZE: Set node pool size.
SET_NETWORK_POLICY: Updates network policy for a cluster.
SET_MAINTENANCE_POLICY: Set the maintenance policy.
UPDATE_IP_ALLOCATION_POLICY: Update cluster IP allocation policy.
"""
TYPE_UNSPECIFIED = 0
CREATE_CLUSTER = 1
DELETE_CLUSTER = 2
UPGRADE_MASTER = 3
UPGRADE_NODES = 4
REPAIR_CLUSTER = 5
UPDATE_CLUSTER = 6
CREATE_NODE_POOL = 7
DELETE_NODE_POOL = 8
SET_NODE_POOL_MANAGEMENT = 9
AUTO_REPAIR_NODES = 10
AUTO_UPGRADE_NODES = 11
SET_LABELS = 12
SET_MASTER_AUTH = 13
SET_NODE_POOL_SIZE = 14
SET_NETWORK_POLICY = 15
SET_MAINTENANCE_POLICY = 16
UPDATE_IP_ALLOCATION_POLICY = 17
class StatusValueValuesEnum(_messages.Enum):
r"""The current status of the operation.
Values:
STATUS_UNSPECIFIED: Not set.
PENDING: The operation has been created.
RUNNING: The operation is currently running.
DONE: The operation is done, either cancelled or completed.
ABORTING: The operation is aborting.
"""
STATUS_UNSPECIFIED = 0
PENDING = 1
RUNNING = 2
DONE = 3
ABORTING = 4
clusterConditions = _messages.MessageField('StatusCondition', 1, repeated=True)
detail = _messages.StringField(2)
endTime = _messages.StringField(3)
location = _messages.StringField(4)
name = _messages.StringField(5)
nodepoolConditions = _messages.MessageField('StatusCondition', 6, repeated=True)
operationType = _messages.EnumField('OperationTypeValueValuesEnum', 7)
progress = _messages.MessageField('OperationProgress', 8)
selfLink = _messages.StringField(9)
startTime = _messages.StringField(10)
status = _messages.EnumField('StatusValueValuesEnum', 11)
statusMessage = _messages.StringField(12)
targetLink = _messages.StringField(13)
zone = _messages.StringField(14)
class OperationProgress(_messages.Message):
r"""Information about operation (or operation stage) progress.
Enums:
StatusValueValuesEnum: Status of an operation stage. Unset for single-
stage operations.
Fields:
metrics: Progress metric bundle, for example: metrics: [{name: "nodes
done", int_value: 15}, {name: "nodes total",
int_value: 32}] or metrics: [{name: "progress", double_value:
0.56}, {name: "progress scale", double_value: 1.0}]
name: A non-parameterized string describing an operation stage. Unset for
single-stage operations.
stages: Substages of an operation or a stage.
status: Status of an operation stage. Unset for single-stage operations.
"""
class StatusValueValuesEnum(_messages.Enum):
r"""Status of an operation stage. Unset for single-stage operations.
Values:
STATUS_UNSPECIFIED: Not set.
PENDING: The operation has been created.
RUNNING: The operation is currently running.
DONE: The operation is done, either cancelled or completed.
ABORTING: The operation is aborting.
"""
STATUS_UNSPECIFIED = 0
PENDING = 1
RUNNING = 2
DONE = 3
ABORTING = 4
metrics = _messages.MessageField('Metric', 1, repeated=True)
name = _messages.StringField(2)
stages = _messages.MessageField('OperationProgress', 3, repeated=True)
status = _messages.EnumField('StatusValueValuesEnum', 4)
class PodSecurityPolicyConfig(_messages.Message):
r"""Configuration for the PodSecurityPolicy feature.
Fields:
enabled: Enable the PodSecurityPolicy controller for this cluster. If
enabled, pods must be valid under a PodSecurityPolicy to be created.
"""
enabled = _messages.BooleanField(1)
class PremiumConfig(_messages.Message):
r"""PremiumConfig is the configuration for all premium features and tiers.
Fields:
features: The features that GKE provides.
tiers: The tiers that are part of the premium offering.
"""
features = _messages.MessageField('FeatureConfig', 1, repeated=True)
tiers = _messages.MessageField('TierConfig', 2, repeated=True)
class PrivateClusterConfig(_messages.Message):
r"""Configuration options for private clusters.
Fields:
enablePeeringRouteSharing: Whether to enable route sharing over the
network peering.
enablePrivateEndpoint: Whether the master's internal IP address is used as
the cluster endpoint.
enablePrivateNodes: Whether nodes have internal IP addresses only. If
enabled, all nodes are given only RFC 1918 private addresses and
communicate with the master via private networking.
masterIpv4CidrBlock: The IP range in CIDR notation to use for the hosted
master network. This range will be used for assigning internal IP
addresses to the master or set of masters, as well as the ILB VIP. This
range must not overlap with any other ranges in use within the cluster's
network.
peeringName: Output only. The peering name in the customer VPC used by
this cluster.
privateEndpoint: Output only. The internal IP address of this cluster's
endpoint.
publicEndpoint: Output only. The external IP address of this cluster's
endpoint.
"""
enablePeeringRouteSharing = _messages.BooleanField(1)
enablePrivateEndpoint = _messages.BooleanField(2)
enablePrivateNodes = _messages.BooleanField(3)
masterIpv4CidrBlock = _messages.StringField(4)
peeringName = _messages.StringField(5)
privateEndpoint = _messages.StringField(6)
publicEndpoint = _messages.StringField(7)
class PrivateIPv6Status(_messages.Message):
r"""PrivateIPv6Status contains the desired state of the IPv6 fast path on
this cluster. Private IPv6 access allows direct high speed communication
from GKE pods to gRPC Google cloud services over IPv6.
Fields:
enabled: Enables private IPv6 access to Google Cloud services for this
cluster.
"""
enabled = _messages.BooleanField(1)
class RecurringTimeWindow(_messages.Message):
r"""Represents an arbitrary window of time that recurs.
Fields:
recurrence: An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3)
for how this window reccurs. They go on for the span of time between the
start and end time. For example, to have something repeat every
weekday, you'd use: <code>FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR</code> To
repeat some window daily (equivalent to the DailyMaintenanceWindow):
<code>FREQ=DAILY</code> For the first weekend of every month:
<code>FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU</code> This specifies how
frequently the window starts. Eg, if you wanted to have a 9-5 UTC-4
window every weekday, you'd use something like: <code> start time =
2019-01-01T09:00:00-0400 end time = 2019-01-01T17:00:00-0400
recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR </code> Windows can span
multiple days. Eg, to make the window encompass every weekend from
midnight Saturday till the last minute of Sunday UTC: <code> start
time = 2019-01-05T00:00:00Z end time = 2019-01-07T23:59:00Z
recurrence = FREQ=WEEKLY;BYDAY=SA </code> Note the start and end time's
specific dates are largely arbitrary except to specify duration of the
window and when it first starts. The FREQ values of HOURLY, MINUTELY,
and SECONDLY are not supported.
window: The window of the first recurrence.
"""
recurrence = _messages.StringField(1)
window = _messages.MessageField('TimeWindow', 2)
class ReleaseChannel(_messages.Message):
r"""ReleaseChannel indicates which release channel a cluster is subscribed
to. Release channels are arranged in order of risk and frequency of updates.
When a cluster is subscribed to a release channel, Google maintains both the
master version and the node version. Node auto-upgrade defaults to true and
cannot be disabled. Updates to version related fields (e.g.
current_master_version) return an error.
Enums:
ChannelValueValuesEnum: channel specifies which release channel the
cluster is subscribed to.
Fields:
channel: channel specifies which release channel the cluster is subscribed
to.
"""
class ChannelValueValuesEnum(_messages.Enum):
r"""channel specifies which release channel the cluster is subscribed to.
Values:
UNSPECIFIED: No channel specified.
RAPID: RAPID channel is offered on an early access basis for customers
who want to test new releases before they are qualified for production
use or general availability. New upgrades will occur roughly weekly.
WARNING: Versions available in the RAPID Channel may be subject to
unresolved issues with no known workaround and are not for use with
production workloads or subject to any SLAs.
REGULAR: Clusters subscribed to REGULAR receive versions that are
considered GA quality. REGULAR is intended for production users who
want to take advantage of new features. New upgrades will occur
roughly every few weeks.
STABLE: Clusters subscribed to STABLE receive versions that are known to
be stable and reliable in production. STABLE is intended for
production users who need stability above all else, or for whom
frequent upgrades are too risky. New upgrades will occur roughly every
few months.
"""
UNSPECIFIED = 0
RAPID = 1
REGULAR = 2
STABLE = 3
channel = _messages.EnumField('ChannelValueValuesEnum', 1)
class ReleaseChannelConfig(_messages.Message):
r"""ReleaseChannelConfig exposes configuration for a release channel.
Enums:
ChannelValueValuesEnum: The release channel this configuration applies to.
Fields:
availableVersions: List of available versions for the release channel.
channel: The release channel this configuration applies to.
defaultVersion: The default version for newly created clusters on the
channel.
"""
class ChannelValueValuesEnum(_messages.Enum):
r"""The release channel this configuration applies to.
Values:
UNSPECIFIED: No channel specified.
RAPID: RAPID channel is offered on an early access basis for customers
who want to test new releases before they are qualified for production
use or general availability. New upgrades will occur roughly weekly.
WARNING: Versions available in the RAPID Channel may be subject to
unresolved issues with no known workaround and are not for use with
production workloads or subject to any SLAs.
REGULAR: Clusters subscribed to REGULAR receive versions that are
considered GA quality. REGULAR is intended for production users who
want to take advantage of new features. New upgrades will occur
roughly every few weeks.
STABLE: Clusters subscribed to STABLE receive versions that are known to
be stable and reliable in production. STABLE is intended for
production users who need stability above all else, or for whom
frequent upgrades are too risky. New upgrades will occur roughly every
few months.
"""
UNSPECIFIED = 0
RAPID = 1
REGULAR = 2
STABLE = 3
availableVersions = _messages.MessageField('AvailableVersion', 1, repeated=True)
channel = _messages.EnumField('ChannelValueValuesEnum', 2)
defaultVersion = _messages.StringField(3)
class ReservationAffinity(_messages.Message):
r"""[ReservationAffinity](/compute/docs/instances/reserving-zonal-resources)
is the configuration of desired reservation which instances could take
capacity from.
Enums:
ConsumeReservationTypeValueValuesEnum: Corresponds to the type of
reservation consumption.
Fields:
consumeReservationType: Corresponds to the type of reservation
consumption.
key: Corresponds to the label key of a reservation resource. To target a
SPECIFIC_RESERVATION by name, specify "googleapis.com/reservation-name"
as the key and specify the name of your reservation as its value.
values: Corresponds to the label value(s) of reservation resource(s).
"""
class ConsumeReservationTypeValueValuesEnum(_messages.Enum):
r"""Corresponds to the type of reservation consumption.
Values:
UNSPECIFIED: Default value. This should not be used.
NO_RESERVATION: Do not consume from any reserved capacity.
ANY_RESERVATION: Consume any reservation available.
SPECIFIC_RESERVATION: Must consume from a specific reservation. Must
specify key value fields for specifying the reservations.
"""
UNSPECIFIED = 0
NO_RESERVATION = 1
ANY_RESERVATION = 2
SPECIFIC_RESERVATION = 3
consumeReservationType = _messages.EnumField('ConsumeReservationTypeValueValuesEnum', 1)
key = _messages.StringField(2)
values = _messages.StringField(3, repeated=True)
class ResourceLimit(_messages.Message):
r"""Contains information about amount of some resource in the cluster. For
memory, value should be in GB.
Fields:
maximum: Maximum amount of the resource in the cluster.
minimum: Minimum amount of the resource in the cluster.
resourceType: Resource name "cpu", "memory" or gpu-specific string.
"""
maximum = _messages.IntegerField(1)
minimum = _messages.IntegerField(2)
resourceType = _messages.StringField(3)
class ResourceUsageExportConfig(_messages.Message):
r"""Configuration for exporting cluster resource usages.
Fields:
bigqueryDestination: Configuration to use BigQuery as usage export
destination.
consumptionMeteringConfig: Configuration to enable resource consumption
metering.
enableNetworkEgressMetering: Whether to enable network egress metering for
this cluster. If enabled, a daemonset will be created in the cluster to
meter network egress traffic.
"""
bigqueryDestination = _messages.MessageField('BigQueryDestination', 1)
consumptionMeteringConfig = _messages.MessageField('ConsumptionMeteringConfig', 2)
enableNetworkEgressMetering = _messages.BooleanField(3)
class RollbackNodePoolUpgradeRequest(_messages.Message):
r"""RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or
Failed NodePool upgrade. This will be an no-op if the last upgrade
successfully completed.
Fields:
clusterId: Deprecated. The name of the cluster to rollback. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node poll
to rollback upgrade. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to rollback. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
nodePoolId = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SandboxConfig(_messages.Message):
r"""SandboxConfig contains configurations of the sandbox to use for the
node.
Enums:
TypeValueValuesEnum: Type of the sandbox to use for the node.
Fields:
sandboxType: Type of the sandbox to use for the node (e.g. 'gvisor')
type: Type of the sandbox to use for the node.
"""
class TypeValueValuesEnum(_messages.Enum):
r"""Type of the sandbox to use for the node.
Values:
UNSPECIFIED: Default value. This should not be used.
GVISOR: Run sandbox using gvisor.
"""
UNSPECIFIED = 0
GVISOR = 1
sandboxType = _messages.StringField(1)
type = _messages.EnumField('TypeValueValuesEnum', 2)
class SecurityProfile(_messages.Message):
r"""User selected security profile
Fields:
disableRuntimeRules: Don't apply runtime rules. When set to true, no
objects/deployments will be installed in the cluster to enforce runtime
rules. This is useful to work with config-as-code systems
name: Name with version of selected security profile A security profile
name follows kebob-case (a-zA-Z*) and a version is like MAJOR.MINOR-
suffix suffix is ([a-zA-Z0-9\-_\.]+) e.g. default-1.0-gke.0
"""
disableRuntimeRules = _messages.BooleanField(1)
name = _messages.StringField(2)
class ServerConfig(_messages.Message):
r"""Kubernetes Engine service configuration.
Fields:
channels: List of release channel configurations.
defaultClusterVersion: Version of Kubernetes the service deploys by
default.
defaultImageType: Default image type.
premiumConfig: Premium configuration for the service.
validImageTypes: List of valid image types.
validMasterVersions: List of valid master versions.
validNodeVersions: List of valid node upgrade target versions.
"""
channels = _messages.MessageField('ReleaseChannelConfig', 1, repeated=True)
defaultClusterVersion = _messages.StringField(2)
defaultImageType = _messages.StringField(3)
premiumConfig = _messages.MessageField('PremiumConfig', 4)
validImageTypes = _messages.StringField(5, repeated=True)
validMasterVersions = _messages.StringField(6, repeated=True)
validNodeVersions = _messages.StringField(7, repeated=True)
class SetAddonsConfigRequest(_messages.Message):
r"""SetAddonsRequest sets the addons associated with the cluster.
Fields:
addonsConfig: The desired configurations for the various addons available
to run in the cluster.
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to set addons.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
addonsConfig = _messages.MessageField('AddonsConfig', 1)
clusterId = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetLabelsRequest(_messages.Message):
r"""SetLabelsRequest sets the Google Cloud Platform labels on a Google
Container Engine cluster, which will in turn set them for Google Compute
Engine resources used by that cluster
Messages:
ResourceLabelsValue: The labels to set for that cluster.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
labelFingerprint: The fingerprint of the previous set of labels for this
resource, used to detect conflicts. The fingerprint is initially
generated by Kubernetes Engine and changes after every request to modify
or update labels. You must always provide an up-to-date fingerprint hash
when updating or changing labels. Make a <code>get()</code> request to
the resource to get the latest fingerprint.
name: The name (project, location, cluster id) of the cluster to set
labels. Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
resourceLabels: The labels to set for that cluster.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ResourceLabelsValue(_messages.Message):
r"""The labels to set for that cluster.
Messages:
AdditionalProperty: An additional property for a ResourceLabelsValue
object.
Fields:
additionalProperties: Additional properties of type ResourceLabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResourceLabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterId = _messages.StringField(1)
labelFingerprint = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
resourceLabels = _messages.MessageField('ResourceLabelsValue', 5)
zone = _messages.StringField(6)
class SetLegacyAbacRequest(_messages.Message):
r"""SetLegacyAbacRequest enables or disables the ABAC authorization
mechanism for a cluster.
Fields:
clusterId: Deprecated. The name of the cluster to update. This field has
been deprecated and replaced by the name field.
enabled: Whether ABAC authorization will be enabled in the cluster.
name: The name (project, location, cluster id) of the cluster to set
legacy abac. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
enabled = _messages.BooleanField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetLocationsRequest(_messages.Message):
r"""SetLocationsRequest sets the locations of the cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
locations: The desired list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the cluster's nodes
should be located. Changing the locations a cluster is in will result in
nodes being either created or removed from the cluster, depending on
whether locations are being added or removed. This list must always
include the cluster's primary zone.
name: The name (project, location, cluster) of the cluster to set
locations. Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
locations = _messages.StringField(2, repeated=True)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetLoggingServiceRequest(_messages.Message):
r"""SetLoggingServiceRequest sets the logging service of a cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
loggingService: The logging service the cluster should use to write
metrics. Currently available options: * "logging.googleapis.com" - the
Google Cloud Logging service * "none" - no metrics will be exported from
the cluster
name: The name (project, location, cluster) of the cluster to set logging.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
loggingService = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetMaintenancePolicyRequest(_messages.Message):
r"""SetMaintenancePolicyRequest sets the maintenance policy for a cluster.
Fields:
clusterId: The name of the cluster to update.
maintenancePolicy: The maintenance policy to be set for the cluster. An
empty field clears the existing maintenance policy.
name: The name (project, location, cluster id) of the cluster to set
maintenance policy. Specified in the format
'projects/*/locations/*/clusters/*'.
projectId: The Google Developers Console [project ID or project
number](https://support.google.com/cloud/answer/6158840).
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
clusterId = _messages.StringField(1)
maintenancePolicy = _messages.MessageField('MaintenancePolicy', 2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetMasterAuthRequest(_messages.Message):
r"""SetMasterAuthRequest updates the admin password of a cluster.
Enums:
ActionValueValuesEnum: The exact form of action to be taken on the master
auth.
Fields:
action: The exact form of action to be taken on the master auth.
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to set auth.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
update: A description of the update.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
class ActionValueValuesEnum(_messages.Enum):
r"""The exact form of action to be taken on the master auth.
Values:
UNKNOWN: Operation is unknown and will error out.
SET_PASSWORD: Set the password to a user generated value.
GENERATE_PASSWORD: Generate a new password and set it to that.
SET_USERNAME: Set the username. If an empty username is provided, basic
authentication is disabled for the cluster. If a non-empty username
is provided, basic authentication is enabled, with either a provided
password or a generated one.
"""
UNKNOWN = 0
SET_PASSWORD = 1
GENERATE_PASSWORD = 2
SET_USERNAME = 3
action = _messages.EnumField('ActionValueValuesEnum', 1)
clusterId = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
update = _messages.MessageField('MasterAuth', 5)
zone = _messages.StringField(6)
class SetMonitoringServiceRequest(_messages.Message):
r"""SetMonitoringServiceRequest sets the monitoring service of a cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
monitoringService: The monitoring service the cluster should use to write
metrics. Currently available options: * "monitoring.googleapis.com" -
the Google Cloud Monitoring service * "none" - no metrics will be
exported from the cluster
name: The name (project, location, cluster) of the cluster to set
monitoring. Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
monitoringService = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetNetworkPolicyRequest(_messages.Message):
r"""SetNetworkPolicyRequest enables/disables network policy for a cluster.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster id) of the cluster to set
networking policy. Specified in the format
'projects/*/locations/*/clusters/*'.
networkPolicy: Configuration options for the NetworkPolicy feature.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
networkPolicy = _messages.MessageField('NetworkPolicy', 3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class SetNodePoolAutoscalingRequest(_messages.Message):
r"""SetNodePoolAutoscalingRequest sets the autoscaler settings of a node
pool.
Fields:
autoscaling: Autoscaling configuration for the node pool.
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool) of the node pool to
set autoscaler settings. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to upgrade. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
autoscaling = _messages.MessageField('NodePoolAutoscaling', 1)
clusterId = _messages.StringField(2)
name = _messages.StringField(3)
nodePoolId = _messages.StringField(4)
projectId = _messages.StringField(5)
zone = _messages.StringField(6)
class SetNodePoolManagementRequest(_messages.Message):
r"""SetNodePoolManagementRequest sets the node management properties of a
node pool.
Fields:
clusterId: Deprecated. The name of the cluster to update. This field has
been deprecated and replaced by the name field.
management: NodeManagement configuration for the node pool.
name: The name (project, location, cluster, node pool id) of the node pool
to set management properties. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to update. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
management = _messages.MessageField('NodeManagement', 2)
name = _messages.StringField(3)
nodePoolId = _messages.StringField(4)
projectId = _messages.StringField(5)
zone = _messages.StringField(6)
class SetNodePoolSizeRequest(_messages.Message):
r"""SetNodePoolSizeRequest sets the size a node pool.
Fields:
clusterId: Deprecated. The name of the cluster to update. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster, node pool id) of the node pool
to set size. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodeCount: The desired node count for the pool.
nodePoolId: Deprecated. The name of the node pool to update. This field
has been deprecated and replaced by the name field.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840).
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
nodeCount = _messages.IntegerField(3, variant=_messages.Variant.INT32)
nodePoolId = _messages.StringField(4)
projectId = _messages.StringField(5)
zone = _messages.StringField(6)
class ShieldedInstanceConfig(_messages.Message):
r"""A set of Shielded Instance options.
Fields:
enableIntegrityMonitoring: Defines whether the instance has integrity
monitoring enabled.
enableSecureBoot: Defines whether the instance has Secure Boot enabled.
"""
enableIntegrityMonitoring = _messages.BooleanField(1)
enableSecureBoot = _messages.BooleanField(2)
class ShieldedNodes(_messages.Message):
r"""Configuration of Shielded Nodes feature.
Fields:
enabled: Whether Shielded Nodes features are enabled on all nodes in this
cluster.
"""
enabled = _messages.BooleanField(1)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class StartIPRotationRequest(_messages.Message):
r"""StartIPRotationRequest creates a new IP for the cluster and then
performs a node upgrade on each node pool to point to the new IP.
Fields:
clusterId: Deprecated. The name of the cluster. This field has been
deprecated and replaced by the name field.
name: The name (project, location, cluster id) of the cluster to start IP
rotation. Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project
number](https://developers.google.com/console/help/new/#projectnumber).
This field has been deprecated and replaced by the name field.
rotateCredentials: Whether to rotate credentials during IP rotation.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
projectId = _messages.StringField(3)
rotateCredentials = _messages.BooleanField(4)
zone = _messages.StringField(5)
class StatusCondition(_messages.Message):
r"""StatusCondition describes why a cluster or a node pool has a certain
status (e.g., ERROR or DEGRADED).
Enums:
CodeValueValuesEnum: Machine-friendly representation of the condition
Fields:
code: Machine-friendly representation of the condition
message: Human-friendly representation of the condition
"""
class CodeValueValuesEnum(_messages.Enum):
r"""Machine-friendly representation of the condition
Values:
UNKNOWN: UNKNOWN indicates a generic condition.
GCE_STOCKOUT: GCE_STOCKOUT indicates that Google Compute Engine
resources are temporarily unavailable.
GKE_SERVICE_ACCOUNT_DELETED: GKE_SERVICE_ACCOUNT_DELETED indicates that
the user deleted their robot service account.
GCE_QUOTA_EXCEEDED: Google Compute Engine quota was exceeded.
SET_BY_OPERATOR: Cluster state was manually changed by an SRE due to a
system logic error.
CLOUD_KMS_KEY_ERROR: Unable to perform an encrypt operation against the
CloudKMS key used for etcd level encryption. More codes TBA
"""
UNKNOWN = 0
GCE_STOCKOUT = 1
GKE_SERVICE_ACCOUNT_DELETED = 2
GCE_QUOTA_EXCEEDED = 3
SET_BY_OPERATOR = 4
CLOUD_KMS_KEY_ERROR = 5
code = _messages.EnumField('CodeValueValuesEnum', 1)
message = _messages.StringField(2)
class TierConfig(_messages.Message):
r"""TierConfig is the configuration for a tier offering. For example the
GKE standard or advanced offerings which contain different levels of
functionality and possibly cost.
Enums:
ParentValueValuesEnum: The tier from which the tier being configured
inherits. The configured tier will inherit all the features from its
parent tier.
TierValueValuesEnum: The tier that is being configured with this value.
Fields:
parent: The tier from which the tier being configured inherits. The
configured tier will inherit all the features from its parent tier.
tier: The tier that is being configured with this value.
"""
class ParentValueValuesEnum(_messages.Enum):
r"""The tier from which the tier being configured inherits. The
configured tier will inherit all the features from its parent tier.
Values:
TIER_UNSPECIFIED: TIER_UNSPECIFIED is the default value. If this value
is set during create or update, it defaults to the project level tier
setting.
STANDARD: Represents the standard tier or base Google Kubernetes Engine
offering.
ADVANCED: Represents the advanced tier.
"""
TIER_UNSPECIFIED = 0
STANDARD = 1
ADVANCED = 2
class TierValueValuesEnum(_messages.Enum):
r"""The tier that is being configured with this value.
Values:
TIER_UNSPECIFIED: TIER_UNSPECIFIED is the default value. If this value
is set during create or update, it defaults to the project level tier
setting.
STANDARD: Represents the standard tier or base Google Kubernetes Engine
offering.
ADVANCED: Represents the advanced tier.
"""
TIER_UNSPECIFIED = 0
STANDARD = 1
ADVANCED = 2
parent = _messages.EnumField('ParentValueValuesEnum', 1)
tier = _messages.EnumField('TierValueValuesEnum', 2)
class TierSettings(_messages.Message):
r"""Cluster tier settings.
Enums:
TierValueValuesEnum: Cluster tier.
Fields:
tier: Cluster tier.
"""
class TierValueValuesEnum(_messages.Enum):
r"""Cluster tier.
Values:
TIER_UNSPECIFIED: TIER_UNSPECIFIED is the default value. If this value
is set during create or update, it defaults to the project level tier
setting.
STANDARD: Represents the standard tier or base Google Kubernetes Engine
offering.
ADVANCED: Represents the advanced tier.
"""
TIER_UNSPECIFIED = 0
STANDARD = 1
ADVANCED = 2
tier = _messages.EnumField('TierValueValuesEnum', 1)
class TimeWindow(_messages.Message):
r"""Represents an arbitrary window of time.
Fields:
endTime: The time that the window ends. The end time should take place
after the start time.
startTime: The time that the window first starts.
"""
endTime = _messages.StringField(1)
startTime = _messages.StringField(2)
class UpdateClusterRequest(_messages.Message):
r"""UpdateClusterRequest updates the settings of a cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
name: The name (project, location, cluster) of the cluster to update.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
update: A description of the update.
updatedCluster: The updated cluster object. This field must be empty if
'update' is set.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
name = _messages.StringField(2)
projectId = _messages.StringField(3)
update = _messages.MessageField('ClusterUpdate', 4)
updatedCluster = _messages.MessageField('Cluster', 5)
zone = _messages.StringField(6)
class UpdateMasterRequest(_messages.Message):
r"""UpdateMasterRequest updates the master of the cluster.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
masterVersion: The Kubernetes version to change the master to. Users may
specify either explicit versions offered by Kubernetes Engine or version
aliases, which have the following behavior: - "latest": picks the
highest valid Kubernetes version - "1.X": picks the highest valid
patch+gke.N patch in the 1.X version - "1.X.Y": picks the highest valid
gke.N patch in the 1.X.Y version - "1.X.Y-gke.N": picks an explicit
Kubernetes version - "-": picks the default Kubernetes version
name: The name (project, location, cluster) of the cluster to update.
Specified in the format 'projects/*/locations/*/clusters/*'.
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840).
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
masterVersion = _messages.StringField(2)
name = _messages.StringField(3)
projectId = _messages.StringField(4)
zone = _messages.StringField(5)
class UpdateNodePoolRequest(_messages.Message):
r"""SetNodePoolVersionRequest updates the version of a node pool.
Fields:
clusterId: Deprecated. The name of the cluster to upgrade. This field has
been deprecated and replaced by the name field.
image: The desired name of the image name to use for this node. This is
used to create clusters using a custom image.
imageProject: The project containing the desired image to use for this
node pool. This is used to create clusters using a custom image.
imageType: The desired image type for the node pool.
locations: The desired list of Google Compute Engine
[zones](/compute/docs/zones#available) in which the node pool's nodes
should be located. Changing the locations for a node pool will result in
nodes being either created or removed from the node pool, depending on
whether locations are being added or removed.
name: The name (project, location, cluster, node pool) of the node pool to
update. Specified in the format
'projects/*/locations/*/clusters/*/nodePools/*'.
nodePoolId: Deprecated. The name of the node pool to upgrade. This field
has been deprecated and replaced by the name field.
nodeVersion: The Kubernetes version to change the nodes to (typically an
upgrade). Users may specify either explicit versions offered by
Kubernetes Engine or version aliases, which have the following behavior:
- "latest": picks the highest valid Kubernetes version - "1.X": picks
the highest valid patch+gke.N patch in the 1.X version - "1.X.Y": picks
the highest valid gke.N patch in the 1.X.Y version - "1.X.Y-gke.N":
picks an explicit Kubernetes version - "-": picks the Kubernetes master
version
projectId: Deprecated. The Google Developers Console [project ID or
project number](https://support.google.com/cloud/answer/6158840). This
field has been deprecated and replaced by the name field.
updatedNodePool: The updated node pool object. This field must be empty if
any other node pool field is set (e.g. 'node_version', 'image_type',
'locations', etc.)
upgradeSettings: Upgrade settings control disruption and speed of the
upgrade.
workloadMetadataConfig: The desired workload metadata config for the node
pool.
zone: Deprecated. The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides. This
field has been deprecated and replaced by the name field.
"""
clusterId = _messages.StringField(1)
image = _messages.StringField(2)
imageProject = _messages.StringField(3)
imageType = _messages.StringField(4)
locations = _messages.StringField(5, repeated=True)
name = _messages.StringField(6)
nodePoolId = _messages.StringField(7)
nodeVersion = _messages.StringField(8)
projectId = _messages.StringField(9)
updatedNodePool = _messages.MessageField('NodePool', 10)
upgradeSettings = _messages.MessageField('UpgradeSettings', 11)
workloadMetadataConfig = _messages.MessageField('WorkloadMetadataConfig', 12)
zone = _messages.StringField(13)
class UpgradeSettings(_messages.Message):
r"""These upgrade settings control the level of parallelism and the level of
disruption caused by an upgrade. maxUnavailable controls the number of
nodes that can be simultaneously unavailable. maxSurge controls the number
of additional nodes that can be added to the node pool temporarily for the
time of the upgrade to increase the number of available nodes.
(maxUnavailable + maxSurge) determines the level of parallelism (how many
nodes are being upgraded at the same time). Note: upgrades inevitably
introduce some disruption since workloads need to be moved from old nodes to
new, upgraded ones. Even if maxUnavailable=0, this holds true. (Disruption
stays within the limits of PodDisruptionBudget, if it is configured.) For
example, a 5-node pool is created with maxSurge set to 2 and maxUnavailable
set to 1. During an upgrade, GKE creates 2 upgraded nodes, then brings down
up to 3 existing nodes after the upgraded nodes are ready. GKE will only
bring down 1 node at a time.
Fields:
maxSurge: The maximum number of nodes that can be created beyond the
current size of the node pool during the upgrade process.
maxUnavailable: The maximum number of nodes that can be simultaneously
unavailable during the upgrade process. A node is considered available
if its status is Ready.
"""
maxSurge = _messages.IntegerField(1, variant=_messages.Variant.INT32)
maxUnavailable = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class UsableSubnetwork(_messages.Message):
r"""UsableSubnetwork resource returns the subnetwork name, its associated
network and the primary CIDR range.
Fields:
ipCidrRange: The range of internal addresses that are owned by this
subnetwork.
network: Network Name.
secondaryIpRanges: Secondary IP ranges.
statusMessage: A human readable status message representing the reasons
for cases where the caller cannot use the secondary ranges under the
subnet. For example if the secondary_ip_ranges is empty due to a
permission issue, an insufficient permission message will be given by
status_message.
subnetwork: Subnetwork Name.
"""
ipCidrRange = _messages.StringField(1)
network = _messages.StringField(2)
secondaryIpRanges = _messages.MessageField('UsableSubnetworkSecondaryRange', 3, repeated=True)
statusMessage = _messages.StringField(4)
subnetwork = _messages.StringField(5)
class UsableSubnetworkSecondaryRange(_messages.Message):
r"""Secondary IP range of a usable subnetwork.
Enums:
StatusValueValuesEnum: This field is to determine the status of the
secondary range programmably.
Fields:
ipCidrRange: The range of IP addresses belonging to this subnetwork
secondary range.
rangeName: The name associated with this subnetwork secondary range, used
when adding an alias IP range to a VM instance.
status: This field is to determine the status of the secondary range
programmably.
"""
class StatusValueValuesEnum(_messages.Enum):
r"""This field is to determine the status of the secondary range
programmably.
Values:
UNKNOWN: UNKNOWN is the zero value of the Status enum. It's not a valid
status.
UNUSED: UNUSED denotes that this range is unclaimed by any cluster.
IN_USE_SERVICE: IN_USE_SERVICE denotes that this range is claimed by a
cluster for services. It cannot be used for other clusters.
IN_USE_SHAREABLE_POD: IN_USE_SHAREABLE_POD denotes this range was
created by the network admin and is currently claimed by a cluster for
pods. It can only be used by other clusters as a pod range.
IN_USE_MANAGED_POD: IN_USE_MANAGED_POD denotes this range was created by
Google Kubernetes Engine and is claimed for pods. It cannot be used
for other clusters.
"""
UNKNOWN = 0
UNUSED = 1
IN_USE_SERVICE = 2
IN_USE_SHAREABLE_POD = 3
IN_USE_MANAGED_POD = 4
ipCidrRange = _messages.StringField(1)
rangeName = _messages.StringField(2)
status = _messages.EnumField('StatusValueValuesEnum', 3)
class VerticalPodAutoscaling(_messages.Message):
r"""VerticalPodAutoscaling contains global, per-cluster information required
by Vertical Pod Autoscaler to automatically adjust the resources of pods
controlled by it.
Fields:
enabled: Enables vertical pod autoscaling.
"""
enabled = _messages.BooleanField(1)
class WorkloadIdentityConfig(_messages.Message):
r"""Configuration for the use of k8s Service Accounts in GCP IAM policies.
Fields:
identityNamespace: IAM Identity Namespace to attach all k8s Service
Accounts to.
workloadPool: The workload pool to attach all Kubernetes service accounts
to.
"""
identityNamespace = _messages.StringField(1)
workloadPool = _messages.StringField(2)
class WorkloadMetadataConfig(_messages.Message):
r"""WorkloadMetadataConfig defines the metadata configuration to expose to
workloads on the node pool.
Enums:
ModeValueValuesEnum: Mode is the configuration for how to expose metadata
to workloads running on the node pool.
NodeMetadataValueValuesEnum: NodeMetadata is the configuration for how to
expose metadata to the workloads running on the node.
Fields:
mode: Mode is the configuration for how to expose metadata to workloads
running on the node pool.
nodeMetadata: NodeMetadata is the configuration for how to expose metadata
to the workloads running on the node.
"""
class ModeValueValuesEnum(_messages.Enum):
r"""Mode is the configuration for how to expose metadata to workloads
running on the node pool.
Values:
MODE_UNSPECIFIED: Not set.
GCE_METADATA: Expose all GCE metadata to pods.
GKE_METADATA: Run the GKE Metadata Server on this node. The GKE Metadata
Server exposes a metadata API to workloads that is compatible with the
V1 Compute Metadata APIs exposed by the Compute Engine and App Engine
Metadata Servers. This feature can only be enabled if Workload
Identity is enabled at the cluster level.
"""
MODE_UNSPECIFIED = 0
GCE_METADATA = 1
GKE_METADATA = 2
class NodeMetadataValueValuesEnum(_messages.Enum):
r"""NodeMetadata is the configuration for how to expose metadata to the
workloads running on the node.
Values:
UNSPECIFIED: Not set.
SECURE: Prevent workloads not in hostNetwork from accessing certain VM
metadata, specifically kube-env, which contains Kubelet credentials,
and the instance identity token. Metadata concealment is a temporary
security solution available while the bootstrapping process for
cluster nodes is being redesigned with significant security
improvements. This feature is scheduled to be deprecated in the
future and later removed.
EXPOSE: Expose all VM metadata to pods.
GKE_METADATA_SERVER: Run the GKE Metadata Server on this node. The GKE
Metadata Server exposes a metadata API to workloads that is compatible
with the V1 Compute Metadata APIs exposed by the Compute Engine and
App Engine Metadata Servers. This feature can only be enabled if
Workload Identity is enabled at the cluster level.
"""
UNSPECIFIED = 0
SECURE = 1
EXPOSE = 2
GKE_METADATA_SERVER = 3
mode = _messages.EnumField('ModeValueValuesEnum', 1)
nodeMetadata = _messages.EnumField('NodeMetadataValueValuesEnum', 2)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| [
"[email protected]"
] | |
0efae463197cf4b67c08549dc4459158bc1c5d11 | a3c7c11c607800155457ea1f886e2d84eadd9610 | /examples/3_NeuralNetworks/convolutional_network.py | 17aa1d84f64834e38d5523b130d66d3e697d1ee0 | [
"MIT"
] | permissive | 353622088/CapsNet | eddba478143bd092ce27bd49dbb65c63d80824e4 | 04408978dfccd9a6545fc250648fd2f600974a95 | refs/heads/master | 2021-08-28T02:22:56.958370 | 2017-12-11T03:03:52 | 2017-12-11T03:03:52 | 112,295,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,934 | py | """ Convolutional Neural Network.
Build and train a convolutional neural network with TensorFlow.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
This example is using TensorFlow layers API, see 'convolutional_network_raw'
example for a raw implementation with variables.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""
from __future__ import division, print_function, absolute_import
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("../tmp/data/", one_hot=False)
import tensorflow as tf
# Training Parameters
learning_rate = 0.001
num_steps = 2000
batch_size = 128
# Network Parameters
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.75 # Dropout, probability to keep units
# Create the neural network
def conv_net(x_dict, n_classes, dropout, reuse, is_training):
# Define a scope for reusing the variables
with tf.variable_scope('ConvNet', reuse=reuse):
# TF Estimator input is a dict, in case of multiple inputs
x = x_dict['images']
# MNIST data input is a 1-D vector of 784 features (28*28 pixels)
# Reshape to match picture format [Height x Width x Channel]
# Tensor input become 4-D: [Batch Size, Height, Width, Channel]
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer with 32 filters and a kernel size of 5
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
# Convolution Layer with 64 filters and a kernel size of 3
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
# Flatten the data to a 1-D vector for the fully connected layer
fc1 = tf.contrib.layers.flatten(conv2)
# Fully connected layer (in tf contrib folder for now)
fc1 = tf.layers.dense(fc1, 1024)
# Apply Dropout (if is_training is False, dropout is not applied)
fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
# Output layer, class prediction
out = tf.layers.dense(fc1, n_classes)
return out
# Define the model function (following TF Estimator Template)
def model_fn(features, labels, mode):
# Build the neural network
# Because Dropout have different behavior at training and prediction time, we
# need to create 2 distinct computation graphs that still share the same weights.
logits_train = conv_net(features, num_classes, dropout, reuse=False,
is_training=True)
logits_test = conv_net(features, num_classes, dropout, reuse=True,
is_training=False)
# Predictions
pred_classes = tf.argmax(logits_test, axis=1)
pred_probas = tf.nn.softmax(logits_test)
# If prediction mode, early return
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)
# Define loss and optimizer
print(logits_train.shape)
print(labels.shape)
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits_train, labels=tf.cast(labels, dtype=tf.int32)))
# tf.summary.scalar(name='loss', tensor=loss_op)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op,
global_step=tf.train.get_global_step())
# Evaluate the accuracy of the model
acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)
# merge_all_op = tf.summary.merge_all()
# TF Estimators requires to return a EstimatorSpec, that specify
# the different ops for training, evaluating, ...
estim_specs = tf.estimator.EstimatorSpec(
mode=mode,
predictions=pred_classes,
loss=loss_op,
train_op=train_op,
eval_metric_ops={'accuracy': acc_op})
return estim_specs
# Build the Estimator
model = tf.estimator.Estimator(model_fn, model_dir='logdir')
# Define the input function for training
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.train.images}, y=mnist.train.labels,
batch_size=batch_size, num_epochs=None, shuffle=True)
# Train the Model
model.train(input_fn, steps=num_steps)
# Evaluate the Model
# Define the input function for evaluating
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.test.images}, y=mnist.test.labels,
batch_size=batch_size, shuffle=False)
# Use the Estimator 'evaluate' method
e = model.evaluate(input_fn)
print("Testing Accuracy:", e['accuracy'])
| [
"[email protected]"
] | |
0d35174dbee1362ac380bf5e44b079d867cc538d | 33ccdaa6293162511c4ad74284f69b2bd6451044 | /pyscutils/scvi_utils.py | 047d3b60725774029d64a5f1cef0c6622d33e66e | [
"BSD-3-Clause"
] | permissive | saketkc/pyscutils | f3f9199f0c2e3954dc79369b99f4612acd9cf0c2 | 282a6cc707deaee80ab8ebc5596d25b9e21d6ffb | refs/heads/master | 2023-01-23T02:26:28.599751 | 2020-11-19T00:31:07 | 2020-11-19T00:31:07 | 297,775,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50,655 | py | import os
import warnings
warnings.simplefilter("ignore")
import shutil
from typing import Dict, Iterable, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import proplot
import scanpy as sc
import scvi
import seaborn as sns
import torch
import torch.nn as nn
from adjustText import adjust_text
from scvi import set_seed
from scvi.dataset import AnnDatasetFromAnnData
from scvi.models.utils import one_hot
from scvi.inference import UnsupervisedTrainer, load_posterior
from scvi.models.distributions import (
NegativeBinomial,
Poisson,
ZeroInflatedNegativeBinomial,
)
from scvi.models.log_likelihood import log_nb_positive, log_zinb_positive
from scvi.models.modules import DecoderSCVI, Encoder, FCLayers, LinearDecoderSCVI
from scvi.models.vae import LDVAE, VAE
from torch.distributions import Normal
from torch.distributions import kl_divergence as kl
## Modifications from scVI code marked with '################ ===>'
def compute_scvi_latent(
adata: sc.AnnData,
n_latent: int = 50,
n_encoder: int = 1,
n_epochs: int = 200,
lr: float = 1e-3,
use_batches: bool = False,
use_cuda: bool = False,
linear: bool = False,
cell_offset: str = "none",
gene_offset: str = "none",
ldvae_bias: bool = False,
reconstruction_loss: str = "zinb",
hvg_genes=None,
) -> Tuple[scvi.inference.Posterior, np.ndarray]:
"""Train and return a scVI model and sample a latent space
:param adata: sc.AnnData object non-normalized
:param n_latent: dimension of the latent space
:param n_epochs: number of training epochs
:param lr: learning rate
:param use_batches
:param use_cuda
:return: (scvi.Posterior, latent_space)
"""
# Convert easily to scvi dataset
scviDataset = AnnDatasetFromAnnData(adata)
if isinstance(hvg_genes, int):
scviDataset.subsample_genes(hvg_genes)
# print(scviDataset.X.shape)
# print(scviDataset.X[:10,:5])
# print(scviDataset.raw.X.shape)
if isinstance(scviDataset.X, np.ndarray):
X = scviDataset.X
else:
X = scviDataset.X.toarray()
gene_mean = torch.mean(
torch.from_numpy(X).float().to(torch.cuda.current_device()), dim=1
)
cell_mean = torch.mean(
torch.from_numpy(X).float().to(torch.cuda.current_device()), dim=0
)
# Train a model
if not linear:
vae = VAEGeneCell(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
n_layers=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
reconstruction_loss=reconstruction_loss,
)
else:
vae = LDVAEGeneCell(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
n_layers_encoder=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
bias=ldvae_bias,
reconstruction_loss=reconstruction_loss,
)
trainer = UnsupervisedTrainer(vae, scviDataset, train_size=1.0, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs, lr=lr)
# Extract latent space
posterior = trainer.create_posterior(
trainer.model, scviDataset, indices=np.arange(len(scviDataset))
).sequential()
latent, _, _ = posterior.get_latent()
return posterior, latent, vae, trainer
# Decoder
class DecoderSCVI(nn.Module):
"""Decodes data from latent space of ``n_input`` dimensions ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
Parameters
----------
n_input
The dimensionality of the input (latent space)
n_output
The dimensionality of the output (data space)
n_cat_list
A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
n_layers
The number of fully-connected hidden layers
n_hidden
The number of nodes per hidden layer
dropout_rate
Dropout rate to apply to each of the hidden layers
Returns
-------
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
):
super().__init__()
self.px_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=0,
)
# mean gamma
self.px_scale_decoder = nn.Sequential(
nn.Linear(n_hidden, n_output), nn.Softmax(dim=-1)
)
# dispersion: here we only deal with gene-cell dispersion case
self.px_r_decoder = nn.Linear(n_hidden, n_output)
# dropout
self.px_dropout_decoder = nn.Linear(n_hidden, n_output)
def forward(
self, dispersion: str, z: torch.Tensor, library: torch.Tensor, *cat_list: int
):
"""The forward computation for a single sample.
#. Decodes the data from the latent space using the decoder network
#. Returns parameters for the ZINB distribution of expression
#. If ``dispersion != 'gene-cell'`` then value for that param will be ``None``
Parameters
----------
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
z :
tensor with shape ``(n_input,)``
library
library size
cat_list
list of category membership(s) for this sample
Returns
-------
4-tuple of :py:class:`torch.Tensor`
parameters for the ZINB distribution of expression
"""
# The decoder returns values for the parameters of the ZINB distribution
px = self.px_decoder(z, *cat_list)
px_scale = self.px_scale_decoder(px)
px_dropout = self.px_dropout_decoder(px)
# Clamp to high value: exp(12) ~ 160000 to avoid nans (computational stability)
px_rate = (torch.exp(library)) * px_scale # torch.clamp( , max=12)
px_r = self.px_r_decoder(px) if dispersion == "gene-cell" else None
return px_scale, px_r, px_rate, px_dropout
## Modifications from scVI code marked with '################ ===>'
class DecoderSCVIGeneCell(DecoderSCVI):
"""Decodes data from latent space of ``n_input`` dimensions ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
Parameters
----------
n_input
The dimensionality of the input (latent space)
n_output
The dimensionality of the output (data space)
n_cat_list
A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
n_layers
The number of fully-connected hidden layers
n_hidden
The number of nodes per hidden layer
dropout_rate
Dropout rate to apply to each of the hidden layers
Returns
-------
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
):
super().__init__(n_input, n_output, n_cat_list, n_layers, n_hidden)
def forward(
self,
dispersion: str,
z: torch.Tensor,
library: torch.Tensor,
*cat_list: int,
cell_offset: torch.Tensor,
gene_offset: torch.Tensor,
dispersion_clamp: list,
):
"""The forward computation for a single sample.
#. Decodes the data from the latent space using the decoder network
#. Returns parameters for the ZINB distribution of expression
#. If ``dispersion != 'gene-cell'`` then value for that param will be ``None``
Parameters
----------
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
z :
tensor with shape ``(n_input,)``
library
library size
cat_list
list of category membership(s) for this sample
Returns
-------
4-tuple of :py:class:`torch.Tensor`
parameters for the ZINB distribution of expression
"""
# The decoder returns values for the parameters of the ZINB distribution
px = self.px_decoder(z, *cat_list)
px_scale = self.px_scale_decoder(px)
px_dropout = self.px_dropout_decoder(px)
# Clamp to high value: exp(12) ~ 160000 to avoid nans (computational stability
################ ===>
cell_offset = torch.reshape(cell_offset, (cell_offset.shape[0], 1))
px_rate = (
(torch.exp(library) * (cell_offset)) * px_scale * gene_offset
) # torch.clamp( , max=12)
px_rate = (
(torch.exp(library) * (cell_offset)) * px_scale * gene_offset
) # torch.clamp( , max=12)
# px_rate = cell_offset #torch.exp(library) + cell_mean * px_scale # torch.clamp( , max=12)
# px_rate = torch.exp(library + cell_mean) * px_scale # torch.clamp( , max=12)
px_r = self.px_r_decoder(px) if dispersion == "gene-cell" else None
if dispersion == "gene-cell" and dispersion_clamp:
px_r = torch.clamp(px_r, min=dispersion_clamp[0], max=dispersion_clamp[1])
return px_scale, px_r, px_rate, px_dropout
class LinearDecoderSCVIGeneCell(nn.Module):
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
use_batch_norm: bool = True,
bias: bool = False,
):
super(LinearDecoderSCVIGeneCell, self).__init__()
# mean gamma
self.factor_regressor = FCLayers(
n_in=n_input,
n_out=n_output,
n_cat_list=n_cat_list,
n_layers=1,
use_relu=False,
use_batch_norm=use_batch_norm,
bias=bias,
dropout_rate=0,
)
# dropout
self.px_dropout_decoder = FCLayers(
n_in=n_input,
n_out=n_output,
n_cat_list=n_cat_list,
n_layers=1,
use_relu=False,
use_batch_norm=use_batch_norm,
bias=bias,
dropout_rate=0,
)
def forward(
self,
dispersion: str,
z: torch.Tensor,
library: torch.Tensor,
*cat_list: int,
cell_offset: torch.Tensor,
gene_offset: torch.Tensor,
):
# The decoder returns values for the parameters of the ZINB distribution
raw_px_scale = self.factor_regressor(z, *cat_list)
px_scale = torch.softmax(raw_px_scale, dim=-1)
px_dropout = self.px_dropout_decoder(z, *cat_list)
##px_rate = torch.exp(library) * px_scale
################ ===>
cell_offset = torch.reshape(cell_offset, (cell_offset.shape[0], 1))
px_rate = (
(torch.exp(library) * cell_offset) * px_scale * gene_offset
) # torch.clamp( , max=12)
px_r = None
return px_scale, px_r, px_rate, px_dropout
# VAEGeneCell model
class VAEGeneCell(nn.Module):
"""Variational auto-encoder model.
This is an implementation of the scVI model descibed in [Lopez18]_
Parameters
----------
n_input
Number of input genes
n_batch
Number of batches, if 0, no batch correction is performed.
n_labels
Number of labels
n_hidden
Number of nodes per hidden layer
n_latent
Dimensionality of the latent space
n_layers
Number of hidden layers used for encoder and decoder NNs
dropout_rate
Dropout rate for neural networks
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
log_variational
Log(data+1) prior to encoding for numerical stability. Not normalization.
reconstruction_loss
One of
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
* ``'poisson'`` - Poisson distribution
Examples
--------
>>> gene_dataset = CortexDataset()
>>> vae = VAE(gene_dataset.nb_genes, n_batch=gene_dataset.n_batches * False,
... n_labels=gene_dataset.n_labels)
"""
def __init__(
self,
n_input: int,
n_batch: int = 0,
n_labels: int = 0,
n_hidden: int = 128,
n_latent: int = 10,
n_layers: int = 1,
dropout_rate: float = 0.1,
dispersion: str = "gene",
log_variational: bool = True,
reconstruction_loss: str = "zinb",
latent_distribution: str = "normal",
cell_offset: str = "none", ################ ===>
gene_offset: str = "none", ################ ===>
dispersion_clamp: list = [],
beta_disentanglement: float = 1.0,
kl_type: str = "reverse",
):
super().__init__()
self.dispersion = dispersion
self.n_latent = n_latent
self.log_variational = log_variational
self.reconstruction_loss = reconstruction_loss
# Automatically deactivate if useless
self.n_batch = n_batch
self.n_labels = n_labels
self.latent_distribution = latent_distribution
################ ===>
self.cell_offset = cell_offset
self.gene_offset = gene_offset
self.dispersion_clamp = dispersion_clamp
self.beta_disentanglement = beta_disentanglement
self.kl_type = kl_type
if self.dispersion == "gene":
self.px_r = torch.nn.Parameter(torch.randn(n_input))
elif self.dispersion == "gene-batch":
self.px_r = torch.nn.Parameter(torch.randn(n_input, n_batch))
elif self.dispersion == "gene-label":
self.px_r = torch.nn.Parameter(torch.randn(n_input, n_labels))
elif self.dispersion == "gene-cell":
pass
else:
raise ValueError(
"dispersion must be one of ['gene', 'gene-batch',"
" 'gene-label', 'gene-cell'], but input was "
"{}.format(self.dispersion)"
)
# z encoder goes from the n_input-dimensional data to an n_latent-d
# latent space representation
self.z_encoder = Encoder(
n_input,
n_latent,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
distribution=latent_distribution,
)
# l encoder goes from n_input-dimensional data to 1-d library size
self.l_encoder = Encoder(
n_input, 1, n_layers=1, n_hidden=n_hidden, dropout_rate=dropout_rate
)
# decoder goes from n_latent-dimensional space to n_input-d data
################ ===>
self.decoder = DecoderSCVIGeneCell(
n_latent,
n_input,
n_cat_list=[n_batch],
n_layers=n_layers,
n_hidden=n_hidden,
)
def get_latents(self, x, y=None) -> torch.Tensor:
"""Returns the result of ``sample_from_posterior_z`` inside a list
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
y
tensor of cell-types labels with shape ``(batch_size, n_labels)`` (Default value = None)
Returns
-------
type
one element list of tensor
"""
return [self.sample_from_posterior_z(x, y)]
def sample_from_posterior_z(
self, x, y=None, give_mean=False, n_samples=5000
) -> torch.Tensor:
"""Samples the tensor of latent values from the posterior
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
y
tensor of cell-types labels with shape ``(batch_size, n_labels)`` (Default value = None)
give_mean
is True when we want the mean of the posterior distribution rather than sampling (Default value = False)
n_samples
how many MC samples to average over for transformed mean (Default value = 5000)
Returns
-------
type
tensor of shape ``(batch_size, n_latent)``
"""
if self.log_variational:
x = torch.log(1 + x)
qz_m, qz_v, z = self.z_encoder(x, y) # y only used in VAEC
if give_mean:
if self.latent_distribution == "ln":
samples = Normal(qz_m, qz_v.sqrt()).sample([n_samples])
z = self.z_encoder.z_transformation(samples)
z = z.mean(dim=0)
else:
z = qz_m
return z
def sample_from_posterior_l(self, x) -> torch.Tensor:
"""Samples the tensor of library sizes from the posterior
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
y
tensor of cell-types labels with shape ``(batch_size, n_labels)``
Returns
-------
type
tensor of shape ``(batch_size, 1)``
"""
if self.log_variational:
x = torch.log(1 + x)
ql_m, ql_v, library = self.l_encoder(x)
return library
def get_sample_scale(
self, x, batch_index=None, y=None, n_samples=1, transform_batch=None
) -> torch.Tensor:
"""Returns the tensor of predicted frequencies of expression
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
batch_index
array that indicates which batch the cells belong to with shape ``batch_size`` (Default value = None)
y
tensor of cell-types labels with shape ``(batch_size, n_labels)`` (Default value = None)
n_samples
number of samples (Default value = 1)
transform_batch
int of batch to transform samples into (Default value = None)
Returns
-------
type
tensor of predicted frequencies of expression with shape ``(batch_size, n_input)``
"""
return self.inference(
x,
batch_index=batch_index,
y=y,
n_samples=n_samples,
transform_batch=transform_batch,
)["px_scale"]
def get_sample_rate(
self, x, batch_index=None, y=None, n_samples=1, transform_batch=None
) -> torch.Tensor:
"""Returns the tensor of means of the negative binomial distribution
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
y
tensor of cell-types labels with shape ``(batch_size, n_labels)`` (Default value = None)
batch_index
array that indicates which batch the cells belong to with shape ``batch_size`` (Default value = None)
n_samples
number of samples (Default value = 1)
transform_batch
int of batch to transform samples into (Default value = None)
Returns
-------
type
tensor of means of the negative binomial distribution with shape ``(batch_size, n_input)``
"""
return self.inference(
x,
batch_index=batch_index,
y=y,
n_samples=n_samples,
transform_batch=transform_batch,
)["px_rate"]
def get_reconstruction_loss(
self, x, px_rate, px_r, px_dropout, **kwargs
) -> torch.Tensor:
# Reconstruction Loss
px_rate_ = px_rate
if self.reconstruction_loss == "zinb":
reconst_loss = (
-ZeroInflatedNegativeBinomial(
mu=px_rate_, theta=px_r, zi_logits=px_dropout
)
.log_prob(x)
.sum(dim=-1)
)
elif self.reconstruction_loss == "nb":
reconst_loss = (
-NegativeBinomial(mu=px_rate_, theta=px_r).log_prob(x).sum(dim=-1)
)
elif self.reconstruction_loss == "poisson":
reconst_loss = -Poisson(px_rate_).log_prob(x).sum(dim=-1)
return reconst_loss
def inference(
self, x, batch_index=None, y=None, n_samples=1, transform_batch=None, **kwargs
) -> Dict[str, torch.Tensor]:
"""Helper function used in forward pass"""
x_ = x
if self.log_variational:
x_ = torch.log(1 + x_)
# Sampling
qz_m, qz_v, z = self.z_encoder(x_, y)
ql_m, ql_v, library = self.l_encoder(x_)
if n_samples > 1:
qz_m = qz_m.unsqueeze(0).expand((n_samples, qz_m.size(0), qz_m.size(1)))
qz_v = qz_v.unsqueeze(0).expand((n_samples, qz_v.size(0), qz_v.size(1)))
# when z is normal, untran_z == z
untran_z = Normal(qz_m, qz_v.sqrt()).sample()
z = self.z_encoder.z_transformation(untran_z)
ql_m = ql_m.unsqueeze(0).expand((n_samples, ql_m.size(0), ql_m.size(1)))
ql_v = ql_v.unsqueeze(0).expand((n_samples, ql_v.size(0), ql_v.size(1)))
library = Normal(ql_m, ql_v.sqrt()).sample()
if transform_batch is not None:
dec_batch_index = transform_batch * torch.ones_like(batch_index)
else:
dec_batch_index = batch_index
################ ===>
try: # if use_cuda:
cell_offset = torch.ones(x.shape[0]).to(torch.cuda.current_device())
gene_offset = torch.ones(x.shape[1]).to(torch.cuda.current_device())
except:
cell_offset = torch.ones(x.shape[0])
gene_offset = torch.ones(x.shape[1])
if self.cell_offset == "count":
cell_offset = torch.sum(x, dim=1)
elif self.cell_offset == "mean":
cell_offset = torch.mean(x, dim=1)
if self.gene_offset == "count":
gene_offset = torch.sum(x, dim=0)
elif self.gene_offset == "mean":
gene_offset = torch.mean(x, dim=0)
px_scale, px_r, px_rate, px_dropout = self.decoder(
self.dispersion,
z,
library,
dec_batch_index,
y,
cell_offset=cell_offset, ################ ===>
gene_offset=gene_offset, ################ ===>
dispersion_clamp=self.dispersion_clamp,
)
if self.dispersion == "gene-label":
px_r = F.linear(
one_hot(y, self.n_labels), self.px_r
) # px_r gets transposed - last dimension is nb genes
elif self.dispersion == "gene-batch":
px_r = F.linear(one_hot(dec_batch_index, self.n_batch), self.px_r)
elif self.dispersion == "gene":
px_r = self.px_r
px_r = torch.exp(px_r)
return dict(
px_scale=px_scale,
px_r=px_r,
px_rate=px_rate,
px_dropout=px_dropout,
qz_m=qz_m,
qz_v=qz_v,
z=z,
ql_m=ql_m,
ql_v=ql_v,
library=library,
)
def forward(
self, x, local_l_mean, local_l_var, batch_index=None, y=None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Returns the reconstruction loss and the KL divergences
Parameters
----------
x
tensor of values with shape (batch_size, n_input)
local_l_mean
tensor of means of the prior distribution of latent variable l
with shape (batch_size, 1)
local_l_var
tensor of variancess of the prior distribution of latent variable l
with shape (batch_size, 1)
batch_index
array that indicates which batch the cells belong to with shape ``batch_size`` (Default value = None)
y
tensor of cell-types labels with shape (batch_size, n_labels) (Default value = None)
Returns
-------
type
the reconstruction loss and the Kullback divergences
"""
# Parameters for z latent distribution
outputs = self.inference(x, batch_index, y)
qz_m = outputs["qz_m"]
qz_v = outputs["qz_v"]
ql_m = outputs["ql_m"]
ql_v = outputs["ql_v"]
px_rate = outputs["px_rate"]
px_r = outputs["px_r"]
px_dropout = outputs["px_dropout"]
# KL Divergence
mean = torch.zeros_like(qz_m)
scale = torch.ones_like(qz_v)
# only use it on mean
if self.kl_type == "reverse":
kl_divergence_z = kl(
Normal(qz_m, torch.sqrt(qz_v)), Normal(mean, scale)
).sum(dim=1)
elif self.kl_type == "forward":
kl_divergence_z = kl(
Normal(mean, scale), Normal(qz_m, torch.sqrt(qz_v))
).sum(dim=1)
elif self.kl_type == "symmetric":
p_sum_q = Normal(mean + qz_m, scale + torch.sqrt(qz_v))
kl_divergence_z_f = kl(Normal(mean, scale), p_sum_q).sum(dim=1)
kl_divergence_z_r = kl(Normal(qz_m, torch.sqrt(qz_v)), p_sum_q).sum(dim=1)
kl_divergence_z = 0.5 * (kl_divergence_z_f + kl_divergence_z_r)
kl_divergence_l = kl(
Normal(ql_m, torch.sqrt(ql_v)),
Normal(local_l_mean, torch.sqrt(local_l_var)),
).sum(dim=1)
kl_divergence = kl_divergence_z * self.beta_disentanglement
reconst_loss = self.get_reconstruction_loss(
x,
px_rate,
px_r,
px_dropout,
)
return reconst_loss + kl_divergence_l, kl_divergence, 0.0
class LDVAEGeneCell(VAEGeneCell):
"""Linear-decoded Variational auto-encoder model.
Implementation of [Svensson20]_.
This model uses a linear decoder, directly mapping the latent representation
to gene expression levels. It still uses a deep neural network to encode
the latent representation.
Compared to standard VAE, this model is less powerful, but can be used to
inspect which genes contribute to variation in the dataset. It may also be used
for all scVI tasks, like differential expression, batch correction, imputation, etc.
However, batch correction may be less powerful as it assumes a linear model.
Parameters
----------
n_input
Number of input genes
n_batch
Number of batches
n_labels
Number of labels
n_hidden
Number of nodes per hidden layer (for encoder)
n_latent
Dimensionality of the latent space
n_layers_encoder
Number of hidden layers used for encoder NNs
dropout_rate
Dropout rate for neural networks
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
log_variational
Log(data+1) prior to encoding for numerical stability. Not normalization.
reconstruction_loss
One of
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
use_batch_norm
Bool whether to use batch norm in decoder
bias
Bool whether to have bias term in linear decoder
"""
def __init__(
self,
n_input: int,
n_batch: int = 0,
n_labels: int = 0,
n_hidden: int = 128,
n_latent: int = 10,
n_layers_encoder: int = 1,
dropout_rate: float = 0.1,
dispersion: str = "gene",
log_variational: bool = True,
reconstruction_loss: str = "nb",
use_batch_norm: bool = True,
bias: bool = False,
latent_distribution: str = "normal",
cell_offset: str = "none",
gene_offset: str = "none",
):
super().__init__(
n_input,
n_batch,
n_labels,
n_hidden,
n_latent,
n_layers_encoder,
dropout_rate,
dispersion,
log_variational,
reconstruction_loss,
latent_distribution,
cell_offset, ################ ===>
gene_offset, ################ ===>
)
self.use_batch_norm = use_batch_norm
self.z_encoder = Encoder(
n_input,
n_latent,
n_layers=n_layers_encoder,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
distribution=latent_distribution,
)
################ ===>
self.decoder = LinearDecoderSCVIGeneCell(
n_latent,
n_input,
n_cat_list=[n_batch],
use_batch_norm=use_batch_norm,
bias=bias,
)
@torch.no_grad()
def get_loadings(self) -> np.ndarray:
"""Extract per-gene weights (for each Z, shape is genes by dim(Z)) in the linear decoder."""
# This is BW, where B is diag(b) batch norm, W is weight matrix
if self.use_batch_norm is True:
w = self.decoder.factor_regressor.fc_layers[0][0].weight
bn = self.decoder.factor_regressor.fc_layers[0][1]
sigma = torch.sqrt(bn.running_var + bn.eps)
gamma = bn.weight
b = gamma / sigma
bI = torch.diag(b)
loadings = torch.matmul(bI, w)
else:
loadings = self.decoder.factor_regressor.fc_layers[0][0].weight
loadings = loadings.detach().cpu().numpy()
if self.n_batch > 1:
loadings = loadings[:, : -self.n_batch]
return loadings
def compute_scvi_latent(
adata: sc.AnnData,
n_latent: int = 50,
n_encoder: int = 1,
n_epochs: int = 200,
lr: float = 1e-3,
use_batches: bool = False,
use_cuda: bool = False,
linear: bool = False,
cell_offset: str = "none",
gene_offset: str = "none",
ldvae_bias: bool = False,
reconstruction_loss: str = "zinb",
dispersion: str = "gene",
hvg_genes="all",
point_size=10,
dispersion_clamp=[],
beta_disentanglement=1.0,
kl_type="reverse",
) -> Tuple[scvi.inference.Posterior, np.ndarray]:
"""Train and return a scVI model and sample a latent space
:param adata: sc.AnnData object non-normalized
:param n_latent: dimension of the latent space
:param n_epochs: number of training epochs
:param lr: learning rate
:param use_batches
:param use_cuda
:return: (scvi.Posterior, latent_space)
"""
# Convert easily to scvi dataset
scviDataset = AnnDatasetFromAnnData(adata)
if isinstance(hvg_genes, int):
scviDataset.subsample_genes(hvg_genes)
if isinstance(scviDataset.X, np.ndarray):
X = scviDataset.X
else:
X = scviDataset.X.toarray()
# Train a model
if not linear:
vae = VAEGeneCell(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
n_layers=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
reconstruction_loss=reconstruction_loss,
dispersion=dispersion,
dispersion_clamp=dispersion_clamp,
beta_disentanglement=beta_disentanglement,
kl_type=kl_type,
)
else:
vae = LDVAEGeneCell(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
n_layers_encoder=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
bias=ldvae_bias,
reconstruction_loss=reconstruction_loss,
dispersion=dispersion,
)
trainer = UnsupervisedTrainer(vae, scviDataset, train_size=1.0, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs, lr=lr)
# Extract latent space
posterior = trainer.create_posterior(
trainer.model, scviDataset, indices=np.arange(len(scviDataset))
).sequential()
latent, _, _ = posterior.get_latent()
return posterior, latent, vae, trainer
def RunVAE(
adata,
reconstruction_loss,
n_latent=30,
n_encoder=1,
linear=False,
cell_offset="none",
gene_offset="none",
ldvae=False,
ldvae_bias=False,
title_prefix="",
dispersion="gene",
hvg_genes="all",
point_size=5,
n_epochs=200,
lr=1e-3,
batch_size=1000,
use_cuda=False,
legend_loc="on data",
figsize=(10, 5),
legend_fontweight="normal",
sct_cell_pars=None,
outdir=None,
sct_gene_pars=None,
sct_model_pars_fit=None,
dispersion_clamp=[],
beta_disentanglement=1.0,
kl_type="reverse",
):
sct_gene_pars_df = pd.read_csv(sct_gene_pars, sep="\t", index_col=0)
sct_model_pars_fit_df = pd.read_csv(sct_model_pars_fit, sep="\t", index_col=0)
sct_model_paras_withgmean = sct_model_pars_fit_df.join(sct_gene_pars_df)
scvi_posterior, scvi_latent, scvi_vae, scvi_trainer = compute_scvi_latent(
adata,
n_encoder=n_encoder,
n_epochs=n_epochs,
n_latent=n_latent,
use_cuda=use_cuda,
linear=linear,
cell_offset=cell_offset,
gene_offset=gene_offset,
reconstruction_loss=reconstruction_loss,
dispersion=dispersion,
hvg_genes=hvg_genes,
dispersion_clamp=dispersion_clamp,
beta_disentanglement=beta_disentanglement,
kl_type=kl_type,
)
suffix = "_{}_{}_{}_{}".format(
cell_offset, gene_offset, reconstruction_loss, dispersion
)
scviDataset = AnnDatasetFromAnnData(adata)
if isinstance(hvg_genes, int):
scviDataset.subsample_genes(hvg_genes)
# posterior freq of genes per cell
# scale = scvi_posterior.sequential(batch_size=batch_size).get_sample_scale()
# scale = scale.detach()
scale = scvi_posterior.get_sample_scale()
# batch_size=batch_size
for _ in range(99):
scale += scvi_posterior.get_sample_scale()
scale /= 100
scale_df = pd.DataFrame(scale)
scale_df.index = list(adata.obs_names)
scale_df.columns = list(scviDataset.gene_ids)
scale_df = scale_df.T
scvi_latent_df = pd.DataFrame(scvi_latent)
scvi_latent_df.index = list(adata.obs_names)
if outdir:
os.makedirs(outdir, exist_ok=True)
scale_df.to_csv(
os.path.join(outdir, "SCVI_scale_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
scvi_latent_df.to_csv(
os.path.join(outdir, "SCVI_latent_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
adata.obsm["X_scvi"] = scvi_latent
for gene, gene_scale in zip(adata.var.index, np.squeeze(scale).T):
adata.obs["scale_" + gene] = gene_scale
sc.pp.neighbors(adata, use_rep="X_scvi", n_neighbors=20, n_pcs=30)
sc.tl.umap(adata, min_dist=0.3)
sc.tl.leiden(adata, key_added="X_scvi", resolution=0.8)
X_umap = adata.obsm["X_umap"]
X_umap_df = pd.DataFrame(X_umap)
X_umap_df.index = list(adata.obs_names)
if outdir:
X_umap_df.to_csv(
os.path.join(outdir, "SCVI_Xumap_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
scviDataset = AnnDatasetFromAnnData(adata)
if isinstance(hvg_genes, int):
scviDataset.subsample_genes(hvg_genes)
if isinstance(scviDataset.X, np.ndarray):
X = scviDataset.X
else:
X = scviDataset.X.toarray()
try:
X = torch.from_numpy(X).float().to(torch.cuda.current_device())
batch = torch.from_numpy(scviDataset.batch_indices.astype(float)).to(
torch.cuda.current_device()
)
except:
X = torch.from_numpy(X).float()
batch = torch.from_numpy(scviDataset.batch_indices.astype(float))
inference = scvi_vae.inference(X, batch)
# torch.cuda.empty_cache()
if reconstruction_loss == "nb":
reconst_loss = log_nb_positive(
X,
inference["px_rate"],
inference["px_r"],
inference["px_dropout"],
)
elif reconstruction_loss == "zinb":
reconst_loss = log_zinb_positive(
X,
inference["px_rate"],
inference["px_r"],
inference["px_dropout"],
)
gene_loss = np.nansum(reconst_loss.detach().cpu().numpy(), axis=0)
cell_loss = np.nansum(reconst_loss.detach().cpu().numpy(), axis=1)
gene_mean = np.array(adata[:, scviDataset.gene_names].X.mean(0))[0]
if not gene_mean.shape:
# TODO: need to handle this more gracefully
gene_mean = np.array(adata[:, scviDataset.gene_names].X.mean(0))
cell_mean = np.array(adata[:, scviDataset.gene_names].X.mean(1)).flatten()
fig1 = plt.figure(figsize=figsize)
ax = fig1.add_subplot(121)
ax.scatter(
gene_mean, gene_loss, label="Gene", alpha=0.5, color="black", s=point_size
)
gene_loss_df = pd.DataFrame([gene_mean, gene_loss])
gene_loss_df = gene_loss_df.T
gene_loss_df.index = list(scviDataset.gene_names)
gene_loss_df.columns = ["gene_mean", "gene_loss"]
cell_loss_df = pd.DataFrame([cell_mean, cell_loss])
cell_loss_df = cell_loss_df.T
cell_loss_df.index = list(adata.obs_names)
cell_loss_df.columns = ["cell_mean", "cell_loss"]
if outdir:
gene_loss_df.to_csv(
os.path.join(outdir, "SCVI_geneloss_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
cell_loss_df.to_csv(
os.path.join(outdir, "SCVI_cellloss_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
ax.set_xlabel("Mean counts")
ax.set_ylabel("Reconstuction loss")
ax.legend(scatterpoints=1)
ax = fig1.add_subplot(122)
sc.pl.umap(
adata,
color="named_clusters",
show=False,
legend_fontweight=legend_fontweight,
ax=ax,
size=point_size,
legend_loc=legend_loc,
)
title = "{} | Genewise | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
fig1.suptitle(title)
fig1.tight_layout(rect=[0, 0.03, 1, 0.95])
title = title.replace(" ", "").replace("=", "_")
if outdir:
os.makedirs(outdir, exist_ok=True)
fig1.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig1.savefig(os.path.join(outdir, "{}.png".format(title)))
fig2 = plt.figure(figsize=figsize)
ax = fig2.add_subplot(121)
ax.scatter(cell_mean, cell_loss, label="Cell", alpha=0.5, s=point_size)
ax.set_xlabel("Mean counts")
ax.set_ylabel("Reconstuction loss")
ax.legend(scatterpoints=1)
ax = fig2.add_subplot(122)
sc.pl.umap(
adata,
color="named_clusters",
show=False,
ax=ax,
legend_loc=legend_loc,
legend_fontweight=legend_fontweight,
size=point_size,
)
title = "{} | Cellwise | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
fig2.suptitle(title)
fig2.tight_layout(rect=[0, 0.03, 1, 0.95])
title = title.replace(" ", "").replace("=", "_")
if outdir:
fig2.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig2.savefig(os.path.join(outdir, "{}.png".format(title)))
if outdir:
model_name = "{} | Posterior | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
# scVI explicitly asks this path to be empty
shutil.rmtree(
os.path.join(outdir, model_name.replace(" ", "") + ".posterior"),
ignore_errors=True,
)
scvi_posterior.save_posterior(
os.path.join(outdir, model_name.replace(" ", "") + ".posterior")
)
if sct_cell_pars is None:
fig1.show()
fig2.show()
obj_to_return = (
scvi_posterior,
scvi_latent,
scvi_vae,
scvi_trainer,
fig1,
fig2,
None,
)
titles_to_return = (
"posterior",
"latent",
"vae",
"trainer",
"cellwise_plot",
"genewise_plot",
"libsize_plot",
)
return dict(zip(titles_to_return, obj_to_return))
title = "{} | Libsize | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
library_sizes = pd.DataFrame(scvi_posterior.get_stats())
sct_library_sizes = pd.read_csv(sct_cell_pars, sep="\t")
library_sizes.index = adata.obs_names
library_sizes.columns = ["scvi_libsize"]
library_sizes["scvi_loglibsize"] = np.log10(library_sizes["scvi_libsize"])
library_size_df = library_sizes.join(sct_library_sizes)
fig3 = plt.figure(figsize=(10, 5))
ax = fig3.add_subplot(121)
ax.scatter(
library_size_df["log_umi"],
library_size_df["scvi_libsize"],
alpha=0.5,
s=point_size,
)
ax.set_xlabel("log_umi")
ax.set_ylabel("scvi_libsize")
ax = fig3.add_subplot(122)
sc.pl.umap(
adata,
color="named_clusters",
show=False,
ax=ax,
legend_fontweight=legend_fontweight,
legend_loc=legend_loc,
size=point_size,
)
fig3.suptitle(title)
fig3.tight_layout(rect=[0, 0.03, 1, 0.95])
title = title.replace(" ", "").replace("=", "_")
if outdir:
fig3.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig3.savefig(os.path.join(outdir, "{}.png".format(title)))
fig1.show()
fig2.show()
fig3.show()
means_df = []
dropout_df = []
dispersion_df = []
for tensors in scvi_posterior.sequential(batch_size=batch_size):
sample_batch, _, _, batch_index, labels = tensors
outputs = scvi_posterior.model.inference(
sample_batch, batch_index=batch_index, y=labels
)
px_r = outputs["px_r"].detach().cpu().numpy()
px_rate = outputs["px_rate"].detach().cpu().numpy()
px_dropout = outputs["px_dropout"].detach().cpu().numpy()
dropout_df.append(px_dropout)
dispersion_df.append(px_r)
means_df.append(px_rate)
dropout_df = pd.DataFrame(np.vstack(dropout_df))
dispersion_df = pd.DataFrame(np.vstack(dispersion_df))
means_df = pd.DataFrame(np.vstack(means_df))
means_df.index = list(adata.obs_names)
means_df.columns = list(scviDataset.gene_names)
means_df = means_df.T
dropout_df.index = list(adata.obs_names)
dropout_df.columns = list(scviDataset.gene_names)
dropout_df = dropout_df.T
dispersion_df.index = list(adata.obs_names)
dispersion_df.columns = list(scviDataset.gene_names)
dispersion_df = dispersion_df.T
reconst_loss_df = pd.DataFrame(reconst_loss.detach().cpu().numpy())
reconst_loss_df.index = list(adata.obs_names)
reconst_loss_df.columns = list(scviDataset.gene_names)
reconst_loss_df = reconst_loss_df.T
if outdir:
os.makedirs(outdir, exist_ok=True)
means_df.to_csv(
os.path.join(outdir, "SCVI_means_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
dropout_df.to_csv(
os.path.join(outdir, "SCVI_dropout_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
dispersion_df.to_csv(
os.path.join(outdir, "SCVI_dispersions_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
reconst_loss_df.to_csv(
os.path.join(outdir, "SCVI_reconstloss_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
obj_to_return = (
scvi_posterior,
scvi_latent,
scvi_vae,
scvi_trainer,
fig1,
fig2,
fig3,
)
titles_to_return = (
"posterior",
"latent",
"vae",
"trainer",
"cellwise_plot",
"genewise_plot",
"libsize_plot",
)
sct_gene_pars_df = pd.read_csv(sct_gene_pars, sep="\t", index_col=0)
gene_cell_disp_summary_df = pd.DataFrame(
dispersion_df.median(1), columns=["gene_cell_mean_disp"]
)
merged_df = sct_gene_pars_df.join(gene_cell_disp_summary_df).dropna()
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(121)
ax.scatter(
merged_df["gmean"], merged_df["gene_cell_mean_disp"], alpha=0.5, label="Gene"
)
ax.legend(frameon=False)
ax.set_xlabel("Gene gmean")
ax.set_ylabel("SCVI theta")
merged_df = sct_gene_pars_df.join(sct_model_pars_fit_df)
ax = fig.add_subplot(122)
ax.scatter(merged_df["gmean"], merged_df["theta"], alpha=0.5, label="Gene")
ax.legend(frameon=False) # , loc='upper left')
ax.set_xlabel("Gene gmean")
ax.set_ylabel("SCT theta")
title = "{} | ThetaVSGmean | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
fig.suptitle(title)
fig.tight_layout()
title = title.replace(" ", "")
if outdir:
fig.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig.savefig(os.path.join(outdir, "{}.png".format(title)))
sct_library_sizes = pd.read_csv(sct_cell_pars, sep="\t")
mean_scvi_disp_df = pd.DataFrame(dispersion_df.mean(1), columns=["scvi_dispersion"])
sct_disp_df = pd.read_csv(
sct_cell_pars.replace("_cell_", "_model_"), sep="\t", index_col=0
)
joined_df = sct_disp_df.join(mean_scvi_disp_df)
title = "{} | Dispersion | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
fig4 = plt.figure(figsize=(10, 5))
ax = fig4.add_subplot(121)
ax.scatter(joined_df["theta"], joined_df["scvi_dispersion"], alpha=0.5)
ax.axline([0, 0], [1, 1], color="gray", linestyle="dashed")
ax.set_xlabel("SCT theta")
ax.set_ylabel("scVI theta")
ax = fig4.add_subplot(122)
sc.pl.umap(
adata,
color="named_clusters",
show=False,
ax=ax,
legend_fontweight=legend_fontweight,
legend_loc=legend_loc,
size=point_size,
)
fig4.suptitle(title)
fig4.tight_layout(rect=[0, 0.03, 1, 0.95])
title = title.replace(" ", "").replace("=", "_")
if outdir:
fig4.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig4.savefig(os.path.join(outdir, "{}.png".format(title)))
return dict(zip(titles_to_return, obj_to_return))
def RunSCVI(
counts_dir,
metadata_file,
sct_cell_pars,
outdir,
title_prefix="",
idents_col="phenoid",
reconstruction_loss="nb",
dispersion="gene-cell",
cell_offset="none",
gene_offset="none",
n_encoder=1,
hvg_genes=3000,
ldvae=False,
ldvae_bias=False,
use_cuda=True,
genes_to_exclude_file=None,
lr=1e-3,
kl_type="reverse",
**kwargs,
):
adata = sc.read_10x_mtx(counts_dir)
metadata = pd.read_csv(metadata_file, sep="\t", index_col=0)
adata.obs["named_clusters"] = metadata[idents_col]
n_epochs = np.min([round((20000 / adata.n_obs) * 400), 400])
sct_model_pars_fit = sct_cell_pars.replace("cell_pars", "model_pars_fit")
sct_gene_pars = sct_cell_pars.replace("cell_pars", "gene_attrs")
if genes_to_exclude_file:
genes_to_exclude_df = pd.read_csv(genes_to_exclude_file, sep="\t", index_col=0)
genes_to_exclude = genes_to_exclude_df.index.tolist()
all_genes = adata.var_names
genes_to_keep = list(set(all_genes).difference(genes_to_exclude))
adata = adata[:, genes_to_keep]
results = RunVAE(
adata,
reconstruction_loss,
linear=ldvae,
title_prefix=title_prefix,
n_encoder=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
hvg_genes=hvg_genes,
n_epochs=n_epochs,
lr=lr,
dispersion=dispersion,
use_cuda=use_cuda,
sct_cell_pars=sct_cell_pars,
sct_gene_pars=sct_gene_pars,
sct_model_pars_fit=sct_model_pars_fit,
outdir=outdir,
kl_type=kl_type,
**kwargs,
)
return results
| [
"[email protected]"
] | |
88f37dcfa3636c5a91c3546ae84c383167f931e2 | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python-flask/generated/openapi_server/models/com_adobe_cq_social_commons_emailreply_impl_custom_email_client_provider_properties.py | 4d9bc47c42da303a7c969c543512bee62080c310 | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 4,134 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.config_node_property_array import ConfigNodePropertyArray # noqa: F401,E501
from openapi_server.models.config_node_property_integer import ConfigNodePropertyInteger # noqa: F401,E501
from openapi_server import util
class ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, priority_order: ConfigNodePropertyInteger=None, reply_email_patterns: ConfigNodePropertyArray=None): # noqa: E501
"""ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties - a model defined in OpenAPI
:param priority_order: The priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties. # noqa: E501
:type priority_order: ConfigNodePropertyInteger
:param reply_email_patterns: The reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties. # noqa: E501
:type reply_email_patterns: ConfigNodePropertyArray
"""
self.openapi_types = {
'priority_order': ConfigNodePropertyInteger,
'reply_email_patterns': ConfigNodePropertyArray
}
self.attribute_map = {
'priority_order': 'priorityOrder',
'reply_email_patterns': 'replyEmailPatterns'
}
self._priority_order = priority_order
self._reply_email_patterns = reply_email_patterns
@classmethod
def from_dict(cls, dikt) -> 'ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The comAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties. # noqa: E501
:rtype: ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties
"""
return util.deserialize_model(dikt, cls)
@property
def priority_order(self) -> ConfigNodePropertyInteger:
"""Gets the priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:return: The priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:rtype: ConfigNodePropertyInteger
"""
return self._priority_order
@priority_order.setter
def priority_order(self, priority_order: ConfigNodePropertyInteger):
"""Sets the priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:param priority_order: The priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:type priority_order: ConfigNodePropertyInteger
"""
self._priority_order = priority_order
@property
def reply_email_patterns(self) -> ConfigNodePropertyArray:
"""Gets the reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:return: The reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:rtype: ConfigNodePropertyArray
"""
return self._reply_email_patterns
@reply_email_patterns.setter
def reply_email_patterns(self, reply_email_patterns: ConfigNodePropertyArray):
"""Sets the reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:param reply_email_patterns: The reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:type reply_email_patterns: ConfigNodePropertyArray
"""
self._reply_email_patterns = reply_email_patterns
| [
"[email protected]"
] | |
ed56edac7dcdd5606246aad436c9d852a3f3f40f | 786f34fc2fea4f764d083b2bb3fd75222dfbbac1 | /jobsPortal/jobsPortal/urls.py | df7cc52aa22d588c3e134c6f19a0b5e4a7a1e052 | [] | no_license | shivendra04/DjangoProjects | 6c4ddc58588c7033afa7a1f5a299e33b1afb3897 | d3a190fd47582190f2ad41d8dc4b30b7841cf679 | refs/heads/master | 2022-12-20T00:01:22.524044 | 2020-09-22T08:05:43 | 2020-09-22T08:05:43 | 297,578,265 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | """jobsPortal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from jobsApp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('home/', views.home),
path('home/hydjobs/', views.hydjob),
path('home/punejobs/', views.punejob),
path('home/banglorejobs/', views.banglorejob),
path('home/chennaijobs/', views.chennaijob),
]
| [
"[email protected]"
] | |
83cef915c5831fa22780de720175e98cce80ccc3 | 3a4f14d6638bc0c12c129ed73c6c3543437203df | /src/morphforgeexamples/multicell_simulation/multicell_simulation010.py | 4e246688646bd3831457507719b3611426692cef | [
"BSD-2-Clause"
] | permissive | unidesigner/morphforge | ef04ccb3877f069a0feea72eb1b44c97930dac44 | 510cd86549b2c2fb19296da2d4408ed8091fb962 | refs/heads/master | 2021-01-15T22:34:28.795355 | 2012-04-05T08:55:12 | 2012-04-05T08:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | """[*] Two cells connected with an AMPA synapse"""
| [
"[email protected]"
] | |
8e4033741ac16a69170a9bfaf0ba7158c207ddc2 | d0cf8b68b68e33900544dc056566511428692b71 | /tests/spoof/gs_feature_elision.py | c2aabeb4d4d1e9b78fab46632764e38d376bfe25 | [
"MIT"
] | permissive | ryanfb/OCRmyPDF | 3f1547c164d3b74b5e6c003bb875e50c292b36a4 | f6a4d8f1f808a1c963c85e498a773ef0439db5ed | refs/heads/master | 2021-01-21T04:25:00.603736 | 2017-08-27T20:53:36 | 2017-08-27T20:53:36 | 101,911,301 | 1 | 0 | null | 2017-08-30T17:44:15 | 2017-08-30T17:44:15 | null | UTF-8 | Python | false | false | 800 | py | #!/usr/bin/env python3
# © 2016 James R. Barlow: github.com/jbarlow83
import sys
import os
from subprocess import check_call
"""Replicate one type of Ghostscript feature elision warning during
PDF/A creation."""
def real_ghostscript(argv):
gs_args = ['gs'] + argv[1:]
os.execvp("gs", gs_args)
return # Not reachable
elision_warning = """GPL Ghostscript 9.20: Setting Overprint Mode to 1
not permitted in PDF/A-2, overprint mode not set"""
def main():
if '--version' in sys.argv:
print('9.20')
print('SPOOFED: ' + os.path.basename(__filename__))
sys.exit(0)
gs_args = ['gs'] + sys.argv[1:]
check_call(gs_args)
if '-sDEVICE=pdfwrite' in sys.argv[1:]:
print(elision_warning)
sys.exit(0)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c93c5ccd6c588a6c7f2b024b62acc6badd12163b | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /HDGiiCmSgJeeu3388_19.py | 09b87a15f58f460743f3b6ef6eaacc88c698ba44 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | """
A fuse melts when a current in an electrical device exceeds the fuse's rating,
breaking the circuit and preventing the heat from building up too much (which
can cause a fire). The ideal fuse to choose is **higher** than the device's
current output, yet **as close as possible** to it as well.
Given a list of _fuse ratings_ , and the _device's current output_ , return
which of the fuses is the best for the device.
### Examples
choose_fuse(["3V", "5V", "12V"], "4.5V") ➞ "5V"
choose_fuse(["5V", "14V", "2V"], "5.5V") ➞ "14V"
choose_fuse(["17V", "15V", "12V"], "9V") ➞ "12V"
### Notes
* You will be given three possible ratings in voltage.
* Fuses may not be in a sorted order.
* Assume that there is a valid fuse in every test case
"""
def choose_fuse(f, c):
f = [int(e[:-1]) for e in f if float(e[:-1]) >= float(c[:-1])]
return str(min(f))+'V'
| [
"[email protected]"
] | |
448a496d6cf183fe73cf62e90b39b8f5e925a6f8 | cc1d44cf04e5b2b15bb296a434aad4ae4bcfc4be | /python3/qr/zbarlight_test.py | 5944e63c9ba7fb774948ce49dce2fe4de1a416f1 | [] | no_license | ericosur/ericosur-snippet | dda2200546b13fb9b84632d115a0f4ca5e3d5c47 | 0309eeb614612f9a35843e2f45f4080ae03eaa81 | refs/heads/main | 2023-08-08T04:54:05.907435 | 2023-07-25T06:04:01 | 2023-07-25T06:04:01 | 23,057,196 | 2 | 1 | null | 2022-08-31T09:55:19 | 2014-08-18T03:18:52 | Perl | UTF-8 | Python | false | false | 864 | py | #!/usr/bin/env python3
# coding: utf-8
'''
apt-get install libzbar-dev
pip install zbarlight
I do not recomment use this module to decode qrcode.
'''
import sys
from PIL import Image
import common
try:
import zbarlight
except ImportError:
print('need to install zbarligt (python) and libzbar-dev')
sys.exit(1)
def read_image(fn):
''' read image '''
im = None
with open(fn, "rb") as fin:
im = Image.open(fin)
im.load()
return im
def process():
''' process '''
arr = common.get_pngs()
for fn in arr:
print('fn:', fn)
im = read_image(fn)
codes = zbarlight.scan_codes(['qrcode'], im)
# codes in type 'byte'
for s in codes:
print(s)
print(s.decode('utf-8'))
def main():
''' main '''
process()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ee2c1cb101ed600ef6a59804bd8a60d49f33250a | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2019_06_01/aio/_storage_management_client_async.py | c4106bd382d3bd7e0ec92066dc1895978266f306 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 8,245 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration_async import StorageManagementClientConfiguration
from .operations_async import Operations
from .operations_async import SkusOperations
from .operations_async import StorageAccountsOperations
from .operations_async import UsagesOperations
from .operations_async import ManagementPoliciesOperations
from .operations_async import PrivateEndpointConnectionsOperations
from .operations_async import PrivateLinkResourcesOperations
from .operations_async import ObjectReplicationPoliciesOperations
from .operations_async import EncryptionScopesOperations
from .operations_async import BlobServicesOperations
from .operations_async import BlobContainersOperations
from .operations_async import FileServicesOperations
from .operations_async import FileSharesOperations
from .operations_async import QueueServicesOperations
from .operations_async import QueueOperations
from .operations_async import TableServicesOperations
from .operations_async import TableOperations
from .. import models
class StorageManagementClient(object):
"""The Azure Storage Management API.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.storage.v2019_06_01.aio.operations_async.Operations
:ivar skus: SkusOperations operations
:vartype skus: azure.mgmt.storage.v2019_06_01.aio.operations_async.SkusOperations
:ivar storage_accounts: StorageAccountsOperations operations
:vartype storage_accounts: azure.mgmt.storage.v2019_06_01.aio.operations_async.StorageAccountsOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.storage.v2019_06_01.aio.operations_async.UsagesOperations
:ivar management_policies: ManagementPoliciesOperations operations
:vartype management_policies: azure.mgmt.storage.v2019_06_01.aio.operations_async.ManagementPoliciesOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.storage.v2019_06_01.aio.operations_async.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.storage.v2019_06_01.aio.operations_async.PrivateLinkResourcesOperations
:ivar object_replication_policies: ObjectReplicationPoliciesOperations operations
:vartype object_replication_policies: azure.mgmt.storage.v2019_06_01.aio.operations_async.ObjectReplicationPoliciesOperations
:ivar encryption_scopes: EncryptionScopesOperations operations
:vartype encryption_scopes: azure.mgmt.storage.v2019_06_01.aio.operations_async.EncryptionScopesOperations
:ivar blob_services: BlobServicesOperations operations
:vartype blob_services: azure.mgmt.storage.v2019_06_01.aio.operations_async.BlobServicesOperations
:ivar blob_containers: BlobContainersOperations operations
:vartype blob_containers: azure.mgmt.storage.v2019_06_01.aio.operations_async.BlobContainersOperations
:ivar file_services: FileServicesOperations operations
:vartype file_services: azure.mgmt.storage.v2019_06_01.aio.operations_async.FileServicesOperations
:ivar file_shares: FileSharesOperations operations
:vartype file_shares: azure.mgmt.storage.v2019_06_01.aio.operations_async.FileSharesOperations
:ivar queue_services: QueueServicesOperations operations
:vartype queue_services: azure.mgmt.storage.v2019_06_01.aio.operations_async.QueueServicesOperations
:ivar queue: QueueOperations operations
:vartype queue: azure.mgmt.storage.v2019_06_01.aio.operations_async.QueueOperations
:ivar table_services: TableServicesOperations operations
:vartype table_services: azure.mgmt.storage.v2019_06_01.aio.operations_async.TableServicesOperations
:ivar table: TableOperations operations
:vartype table: azure.mgmt.storage.v2019_06_01.aio.operations_async.TableOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = StorageManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.skus = SkusOperations(
self._client, self._config, self._serialize, self._deserialize)
self.storage_accounts = StorageAccountsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.management_policies = ManagementPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.object_replication_policies = ObjectReplicationPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.encryption_scopes = EncryptionScopesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.blob_services = BlobServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.blob_containers = BlobContainersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.file_services = FileServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.file_shares = FileSharesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.queue_services = QueueServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.queue = QueueOperations(
self._client, self._config, self._serialize, self._deserialize)
self.table_services = TableServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.table = TableOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "StorageManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"[email protected]"
] | |
77842a6aee9b5ded6310e374e78ec44dfddb45bd | d2cb930ed5df0b1b5f7944e00f6f884bf014803d | /douban/twisted-demo.py | fcf677fc5cecf53c84cde258c7d3baea35271f91 | [] | no_license | sixDegree/python-scrapy-demo | 3cae4298b01edab65449cfe9af56b2fa59f4c07d | b66530e54156be8c7877f1fc4d497fd497b6fdda | refs/heads/master | 2020-06-17T03:16:23.038061 | 2019-07-08T09:25:15 | 2019-07-08T09:25:15 | 195,777,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,184 | py | from twisted.internet import reactor # 事件循环(自动终止条件:所有socket都已移除)
from twisted.internet import defer # defer.Deferred 特殊的socket对象(需手动调用执行,手动移除)
from twisted.internet import task
import treq # 用于发送异步Request,返回Deferred对象
import time
# 延迟机制:
# Deferred 延迟对象,代表的是一个无法立即获取的值
def demo_defer1():
d = defer.Deferred()
print("called:", d.called) # False
print("call...")
d.callback("Hello")
print("called:", d.called) # True
print("result:", d.result) # Hello
def demo_defer2():
def done(v):
print("done called")
return "Hello " + v
d = defer.Deferred()
d.addCallback(done)
print("called:", d.called) # False
print("call...")
d.callback("Tom")
print("called:", d.called) # True
print("result:", d.result) # Hello Tom
def demo_defer3():
def status(*ds):
return [(getattr(d, 'result', 'N/A'), len(d.callbacks)) for d in ds]
def b_callback(arg):
print("b_callback called with arg =", arg)
return b
def on_done(arg):
print("on_done called with arg =", arg)
return arg
a = defer.Deferred()
b = defer.Deferred()
a.addCallback(b_callback).addCallback(on_done)
print(status(a, b)) # [('N/A', 2), ('N/A', 0)]
a.callback(3) # b_callback called with arg = 3
print(status(a, b)) # [(<Deferred at 0x1047a0da0>, 1), ('N/A', 1)]
b.callback(4) # on_done called with arg = 4
print(status(a, b)) # [(4, 0), (None, 0)]
def demo_defer4():
def status(*ds):
return [(getattr(d, 'result', 'N/A'), len(d.callbacks)) for d in ds]
def b_callback(arg):
print("b_callback called with arg =", arg)
return b
def on_done(arg):
print("on_done called with arg =", arg)
return arg
a = defer.Deferred()
b = defer.Deferred()
a.addCallback(b_callback).addCallback(on_done)
print(status(a, b)) # [('N/A', 2), ('N/A', 0)]
b.callback(4)
print(status(a, b)) # [('N/A', 2), (4, 0)]
a.callback(3) # b_callback called with arg = 3
# on_done called with arg = 4
print(status(a, b)) # [(4, 0), (None, 0)]
def demo_defer5():
def on_done(arg):
print("on_done called with arg =", arg)
return arg
dfds = [defer.Deferred() for i in range(5)]
defer.DeferredList(dfds).addCallback(on_done)
for i in range(5):
dfds[i].callback(i)
# on_done called with arg = [(True, 0), (True, 1), (True, 2), (True, 3), (True, 4)]
# on_done 要等到列表中所有延迟都触发(调用`callback(...)`)后调用
def demo_reactor1():
def done(arg):
print("Done", arg)
def defer_task():
print("Start")
d = defer.Deferred()
time.sleep(3)
d.callback("123")
return d
def stop():
reactor.stop()
defer_task().addCallback(done)
reactor.callLater(0, stop)
reactor.run()
def demo_reactor2():
def done(arg):
print("Done", arg)
def all_done(arg):
print("All done", arg)
def defer_task(i):
print("Start", i)
d = defer.Deferred()
d.addCallback(done)
time.sleep(2)
d.callback(i)
return d
def stop():
print("Stop reactor")
reactor.stop()
dfds = defer.DeferredList([defer_task(i) for i in range(5)])
dfds.addCallback(all_done)
reactor.callLater(0, stop)
reactor.run()
def demo_reactor3():
def done(arg):
print("Done", arg)
def all_done(arg):
print("All done", arg)
print("Stop reactor")
reactor.stop()
def defer_task(i):
print("Start", i)
return task.deferLater(reactor, 2, done, i)
dfds = defer.DeferredList([defer_task(i) for i in range(5)])
dfds.addBoth(all_done)
# dfds.addCallback(all_done)
# reactor.callLater(5, stop)
reactor.run()
def demo_treq_get(url):
def get_done(response):
print("get response:", response)
reactor.stop()
treq.get(url).addCallback(get_done)
reactor.run()
def main():
@defer.inlineCallbacks
def my_task1():
print("Start task1")
url = "http://www.baidu.com"
d = treq.get(url.encode('utf-8'))
d.addCallback(parse)
yield d
def my_task2():
print("Start task2")
return task.deferLater(reactor, 2, parse, "200")
@defer.inlineCallbacks # need use `yield`
def my_task3():
print("Start task3")
yield task.deferLater(reactor, 2, parse, "400")
def parse(response):
print("parse response:", response)
def all_done(arg):
print("All done", arg)
reactor.stop()
dfds = defer.DeferredList([my_task1(), my_task2(), my_task3(), ])
dfds.addBoth(all_done)
reactor.run()
if __name__ == "__main__":
# demo_defer1()
# demo_defer2()
# demo_defer3()
# demo_defer4()
# demo_defer5()
# demo_reactor1()
# demo_reactor2()
# demo_reactor3()
# demo_treq_get('http://www.baidu.com')
main()
| [
"[email protected]"
] | |
c98bf9af78911012a5d580d8fab568dc0dd4d262 | 5aa0e5f32d529c3321c28d37b0a12a8cf69cfea8 | /client/gui_lib/GUIElement.py | 9e1b3576bea5c0ed0b0177d38d061da26e549710 | [] | no_license | sheepsy90/survive | 26495f1ff2d8247fbb9470882f8be9f5272e7f2c | 0eddf637be0eacd34415761b78fc2c9d50bc1528 | refs/heads/master | 2021-01-09T05:55:16.546762 | 2017-02-03T20:15:28 | 2017-02-03T20:15:28 | 80,864,391 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | import pygame
class GUIElement(object):
TEXT = 2
BUTTON = 1
def __init__(self, name, rect):
self.name = name
self.x, self.y, self.width, self.height = rect
self.is_hover = False
self.gui_handler = None
self.focus = False
self.visible = True
self.z_order = 0
self.titleFont = pygame.font.Font('resources/fonts/VENUSRIS.ttf', 64)
def set_zorder(self, order):
self.z_order = order
def get_zorder(self):
return self.z_order
def get_name(self):
return self.name
def set_hover_state(self, mx, my):
if self.x <= mx <= self.width+self.x and self.y <= my <= self.height+self.y:
self.is_hover = True
else:
self.is_hover = False
def update(self, mx, my, mouse_buttons, events):
self.set_hover_state(mx, my)
def get_rect(self):
return pygame.Rect(self.x, self.y, self.width, self.height)
def is_hover_active(self):
return self.is_hover
def draw(self, renderer):
raise NotImplementedError
def register_gui_handler(self, gui_handler):
self.gui_handler = gui_handler
def enable_focus(self):
self.focus = True
def disable_focus(self):
self.focus = False
def has_focus(self):
return self.focus
def set_visible(self, value):
self.visible = value
def is_visible(self):
return self.visible | [
"[email protected]"
] | |
d6b7f74c1a8958d8c0d2b441c408b1a559b1d5a0 | 1d21b7bc9205c9c2acd8b8fd8ee75dec93e974d4 | /qa/rpc-tests/p2p-acceptblock.py | db03aff39949a8e3e99ec7b3f0a24f9f5da34678 | [
"MIT"
] | permissive | ZioFabry/LINC2 | 494d12be6034b7f5999960e3f3ed62f154be7ab8 | a2e0e06cf68771a82bb1d4da30e0c914c8589bbe | refs/heads/master | 2020-05-22T18:28:27.590171 | 2019-05-13T19:51:49 | 2019-05-13T19:51:49 | 186,471,965 | 0 | 0 | MIT | 2019-05-13T18:10:28 | 2019-05-13T18:10:27 | null | UTF-8 | Python | false | false | 12,328 | py | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LINCD", "lincd"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in xrange(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print "First height 2 block accepted by both nodes"
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in xrange(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print "Second height 2 block accepted only from whitelisted peer"
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in xrange(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print "Unrequested more-work block accepted from non-whitelisted peer"
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print "Successfully reorged to length 3 chain from whitelisted peer"
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in xrange(2):
for i in xrange(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print "Unrequested block too far-ahead not processed"
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print "Unrequested block far ahead of tip accepted from whitelisted peer"
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print "Unrequested block that would complete more-work chain was ignored"
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print "Inv at tip triggered getdata for unprocessed block"
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print "Successfully reorged to longer chain from non-whitelisted peer"
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| [
"[email protected]"
] | |
8bab37daf96d71aa280e74d681d7515f1291bf03 | c9f67529e10eb85195126cfa9ada2e80a834d373 | /lib/python3.5/site-packages/torch/distributions/geometric.py | 1e4b121cd7b4cfcccd548bf86ff634e3392b7ebe | [
"Apache-2.0"
] | permissive | chilung/dllab-5-1-ngraph | 10d6df73ea421bfaf998e73e514972d0cbe5be13 | 2af28db42d9dc2586396b6f38d02977cac0902a6 | refs/heads/master | 2022-12-17T19:14:46.848661 | 2019-01-14T12:27:07 | 2019-01-14T12:27:07 | 165,513,937 | 0 | 1 | Apache-2.0 | 2022-12-08T04:59:31 | 2019-01-13T14:19:16 | Python | UTF-8 | Python | false | false | 2,923 | py | from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, _finfo
from torch.nn.functional import binary_cross_entropy_with_logits
class Geometric(Distribution):
r"""
Creates a Geometric distribution parameterized by `probs`, where `probs` is the probability of success of Bernoulli
trials. It represents the probability that in k + 1 Bernoulli trials, the first k trials failed, before
seeing a success.
Samples are non-negative integers [0, inf).
Example::
>>> m = Geometric(torch.tensor([0.3]))
>>> m.sample() # underlying Bernoulli has 30% chance 1; 70% chance 0
2
[torch.FloatTensor of size 1]
Args:
probs (Number, Tensor): the probabilty of sampling `1`. Must be in range (0, 1]
logits (Number, Tensor): the log-odds of sampling `1`.
"""
arg_constraints = {'probs': constraints.unit_interval}
support = constraints.nonnegative_integer
def __init__(self, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
self.probs, = broadcast_all(probs)
if not self.probs.gt(0).all():
raise ValueError('All elements of probs must be greater than 0')
else:
self.logits, = broadcast_all(logits)
probs_or_logits = probs if probs is not None else logits
if isinstance(probs_or_logits, Number):
batch_shape = torch.Size()
else:
batch_shape = probs_or_logits.size()
super(Geometric, self).__init__(batch_shape, validate_args=validate_args)
@property
def mean(self):
return 1. / self.probs - 1.
@property
def variance(self):
return (1. / self.probs - 1.) / self.probs
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
u = self.probs.new(shape).uniform_(_finfo(self.probs).tiny, 1)
return (u.log() / (-self.probs).log1p()).floor()
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
value, probs = broadcast_all(value, self.probs.clone())
probs[(probs == 1) & (value == 0)] = 0
return value * (-probs).log1p() + self.probs.log()
def entropy(self):
return binary_cross_entropy_with_logits(self.logits, self.probs, reduce=False) / self.probs
| [
"[email protected]"
] | |
3387a7b1ab5c092a4be3f73958c4f37a2aec6a5c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02683/s728076842.py | 530d406c4a8a8bf681c980d60d4d26bc44d72770 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | import numpy as np
n,m,x=map(int,input().split())
a=2**64
b=[np.array(list(map(int,input().split())),"i8")for i in range(n)]
for i in range(2**n):
c=bin(i)[2:]
c="0"*(n-len(c))+c
l=np.zeros(m)
q=0
for j in range(n):
if c[j]=="1":
q+=b[j][0]
l+=b[j][1:]
if np.min(l)>=x:
a=min(a,q)
if a==2**64:
print(-1)
else:
print(a) | [
"[email protected]"
] | |
59fafc2d56a1ca1d9d3712f7dfda2784a96c910b | 71c331e4b1e00fa3be03b7f711fcb05a793cf2af | /QA-System-master/SpeechToText_test/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/recaptchaenterprise/v1/recaptchaenterprise_v1_client.py | 79510bf7357cd70baba2a7b3f103d23cabd30234 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | iofh/QA-System | 568228bb0c0adf9ec23b45cd144d61049e720002 | af4a8f1b5f442ddf4905740ae49ed23d69afb0f6 | refs/heads/master | 2022-11-27T23:04:16.385021 | 2020-08-12T10:11:44 | 2020-08-12T10:11:44 | 286,980,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,467 | py | """Generated client library for recaptchaenterprise version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.recaptchaenterprise.v1 import recaptchaenterprise_v1_messages as messages
class RecaptchaenterpriseV1(base_api.BaseApiClient):
"""Generated client library for service recaptchaenterprise version v1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://recaptchaenterprise.googleapis.com/'
MTLS_BASE_URL = 'https://recaptchaenterprise.mtls.googleapis.com/'
_PACKAGE = 'recaptchaenterprise'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'RecaptchaenterpriseV1'
_URL_VERSION = 'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new recaptchaenterprise handle."""
url = url or self.BASE_URL
super(RecaptchaenterpriseV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_assessments = self.ProjectsAssessmentsService(self)
self.projects_keys = self.ProjectsKeysService(self)
self.projects = self.ProjectsService(self)
class ProjectsAssessmentsService(base_api.BaseApiService):
"""Service class for the projects_assessments resource."""
_NAME = 'projects_assessments'
def __init__(self, client):
super(RecaptchaenterpriseV1.ProjectsAssessmentsService, self).__init__(client)
self._upload_configs = {
}
def Annotate(self, request, global_params=None):
r"""Annotates a previously created Assessment to provide additional information.
on whether the event turned out to be authentic or fradulent.
Args:
request: (RecaptchaenterpriseProjectsAssessmentsAnnotateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1AnnotateAssessmentResponse) The response message.
"""
config = self.GetMethodConfig('Annotate')
return self._RunMethod(
config, request, global_params=global_params)
Annotate.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/assessments/{assessmentsId}:annotate',
http_method='POST',
method_id='recaptchaenterprise.projects.assessments.annotate',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:annotate',
request_field='googleCloudRecaptchaenterpriseV1AnnotateAssessmentRequest',
request_type_name='RecaptchaenterpriseProjectsAssessmentsAnnotateRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1AnnotateAssessmentResponse',
supports_download=False,
)
def Create(self, request, global_params=None):
r"""Creates an Assessment of the likelihood an event is legitimate.
Args:
request: (RecaptchaenterpriseProjectsAssessmentsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1Assessment) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/assessments',
http_method='POST',
method_id='recaptchaenterprise.projects.assessments.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}/assessments',
request_field='googleCloudRecaptchaenterpriseV1Assessment',
request_type_name='RecaptchaenterpriseProjectsAssessmentsCreateRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1Assessment',
supports_download=False,
)
class ProjectsKeysService(base_api.BaseApiService):
"""Service class for the projects_keys resource."""
_NAME = 'projects_keys'
def __init__(self, client):
super(RecaptchaenterpriseV1.ProjectsKeysService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new reCAPTCHA Enterprise key.
Args:
request: (RecaptchaenterpriseProjectsKeysCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1Key) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys',
http_method='POST',
method_id='recaptchaenterprise.projects.keys.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}/keys',
request_field='googleCloudRecaptchaenterpriseV1Key',
request_type_name='RecaptchaenterpriseProjectsKeysCreateRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1Key',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes the specified key.
Args:
request: (RecaptchaenterpriseProjectsKeysDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleProtobufEmpty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys/{keysId}',
http_method='DELETE',
method_id='recaptchaenterprise.projects.keys.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RecaptchaenterpriseProjectsKeysDeleteRequest',
response_type_name='GoogleProtobufEmpty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Returns the specified key.
Args:
request: (RecaptchaenterpriseProjectsKeysGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1Key) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys/{keysId}',
http_method='GET',
method_id='recaptchaenterprise.projects.keys.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RecaptchaenterpriseProjectsKeysGetRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1Key',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Returns the list of all keys that belong to a project.
Args:
request: (RecaptchaenterpriseProjectsKeysListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1ListKeysResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys',
http_method='GET',
method_id='recaptchaenterprise.projects.keys.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['pageSize', 'pageToken'],
relative_path='v1/{+parent}/keys',
request_field='',
request_type_name='RecaptchaenterpriseProjectsKeysListRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1ListKeysResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates the specified key.
Args:
request: (RecaptchaenterpriseProjectsKeysPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1Key) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys/{keysId}',
http_method='PATCH',
method_id='recaptchaenterprise.projects.keys.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v1/{+name}',
request_field='googleCloudRecaptchaenterpriseV1Key',
request_type_name='RecaptchaenterpriseProjectsKeysPatchRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1Key',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(RecaptchaenterpriseV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| [
"[email protected]"
] | |
1b9d741b46cbdeed5b3a3bac485cf1c895171822 | 1d38c549c07f43cc26b7353ef95300b934eeed33 | /setup.py | 9475e9b22ed79c0c28f6d00f6eec5f19bf0269e4 | [] | no_license | pooyagheyami/Adel3 | a6354fbc5aa56a9c38a8b724c8d22bea689380a1 | 29e257e19fd6914de0e60c303871321e457a858b | refs/heads/master | 2022-11-07T21:53:13.958369 | 2020-06-12T13:22:55 | 2020-06-12T13:22:55 | 271,803,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,530 | py |
# ======================================================== #
# File automagically generated by GUI2Exe version 0.5.3
# Copyright: (c) 2007-2012 Andrea Gavana
# ======================================================== #
# Let's start with some default (for me) imports...
from distutils.core import setup
from py2exe.build_exe import py2exe
import glob
import os
import zlib
import shutil
# Remove the build folder
shutil.rmtree("build", ignore_errors=True)
class Target(object):
""" A simple class that holds information on our executable file. """
def __init__(self, **kw):
""" Default class constructor. Update as you need. """
self.__dict__.update(kw)
# Ok, let's explain why I am doing that.
# Often, data_files, excludes and dll_excludes (but also resources)
# can be very long list of things, and this will clutter too much
# the setup call at the end of this file. So, I put all the big lists
# here and I wrap them using the textwrap module.
data_files = [('GUI\AuiPanel', ['F:\\Adel2\\GUI\\AuiPanel\\__init__.pyc',
'F:\\Adel2\\GUI\\AuiPanel\\Rev.pyc',
'F:\\Adel2\\GUI\\AuiPanel\\Stat.pyc']),
('GUI\Edit', ['F:\\Adel2\\GUI\\Edit\\__init__.pyc',
'F:\\Adel2\\GUI\\Edit\\accsrh.pyc',
'F:\\Adel2\\GUI\\Edit\\buyit.pyc',
'F:\\Adel2\\GUI\\Edit\\EDA.pyc',
'F:\\Adel2\\GUI\\Edit\\Edacc.pyc',
'F:\\Adel2\\GUI\\Edit\\EDM.pyc',
'F:\\Adel2\\GUI\\Edit\\Edmolk6.pyc',
'F:\\Adel2\\GUI\\Edit\\Edmolk62.pyc',
'F:\\Adel2\\GUI\\Edit\\InAcc3.pyc',
'F:\\Adel2\\GUI\\Edit\\Pnl0.pyc',
'F:\\Adel2\\GUI\\Edit\\Specy.pyc']),
('Database', ['F:\\Adel2\\Database\\__init__.pyc',
'F:\\Adel2\\Database\\ABR.db',
'F:\\Adel2\\Database\\Company.db',
'F:\\Adel2\\Database\\DataGet.pyc',
'F:\\Adel2\\Database\\Main.db',
'F:\\Adel2\\Database\\MDataGet.pyc',
'F:\\Adel2\\Database\\Menu.db',
'F:\\Adel2\\Database\\MenuSet.pyc',
'F:\\Adel2\\Database\\Molk.db',
'F:\\Adel2\\Database\\wxsq2.pyc']),
('GUI', ['F:\\Adel2\\GUI\\__init__.pyc',
'F:\\Adel2\\GUI\\BG.pyc',
'F:\\Adel2\\GUI\\MainMenu.pyc',
'F:\\Adel2\\GUI\\proman.pyc',
'F:\\Adel2\\GUI\\window.pyc']),
('GUI\Input', ['F:\\Adel2\\GUI\\Input\\__init__.pyc',
'F:\\Adel2\\GUI\\Input\\accsrh.pyc',
'F:\\Adel2\\GUI\\Input\\buyit.pyc',
'F:\\Adel2\\GUI\\Input\\IAc.pyc',
'F:\\Adel2\\GUI\\Input\\IMK.pyc',
'F:\\Adel2\\GUI\\Input\\InAcc3.pyc',
'F:\\Adel2\\GUI\\Input\\InM6.pyc',
'F:\\Adel2\\GUI\\Input\\InMolk61.pyc',
'F:\\Adel2\\GUI\\Input\\InMolk62.pyc',
'F:\\Adel2\\GUI\\Input\\Pmenu.pyc',
'F:\\Adel2\\GUI\\Input\\Pnl0.pyc',
'F:\\Adel2\\GUI\\Input\\Specy.pyc']),
('GUI\Program', ['F:\\Adel2\\GUI\\Program\\quit.pyc',
'F:\\Adel2\\GUI\\Program\\DEF.pyc',
'F:\\Adel2\\GUI\\Program\\defin2.pyc',
'F:\\Adel2\\GUI\\Program\\Pnl0.pyc',
'F:\\Adel2\\GUI\\Program\\pro1.pyc',
'F:\\Adel2\\GUI\\Program\\proper.pyc']),
('GUI\Report', ['F:\\Adel2\\GUI\\Report\\__init__.pyc',
'F:\\Adel2\\GUI\\Report\\AD1.pyc',
'F:\\Adel2\\GUI\\Report\\ADftar.pyc',
'F:\\Adel2\\GUI\\Report\\buyit.pyc',
'F:\\Adel2\\GUI\\Report\\MD1.pyc',
'F:\\Adel2\\GUI\\Report\\MD2.pyc',
'F:\\Adel2\\GUI\\Report\\MDftar1.pyc',
'F:\\Adel2\\GUI\\Report\\MDftar4.pyc',
'F:\\Adel2\\GUI\\Report\\Pnl0.pyc',
'F:\\Adel2\\GUI\\Report\\RMolk61.pyc',
'F:\\Adel2\\GUI\\Report\\RMolk62.pyc',
'F:\\Adel2\\GUI\\Report\\Specy.pyc']),
('GUI\Develop', ['F:\\Adel2\\GUI\\Develop\\__init__.pyc',
'F:\\Adel2\\GUI\\Develop\\buyit.pyc',
'F:\\Adel2\\GUI\\Develop\\Pnl0.pyc']),
('GUI\Help', ['F:\\Adel2\\GUI\\Help\\__init__.pyc',
'F:\\Adel2\\GUI\\Help\\about.pyc',
'F:\\Adel2\\GUI\\Help\\Pnl0.pyc']),
('GUI\Connect', ['F:\\Adel2\\GUI\\Connect\\__init__.pyc',
'F:\\Adel2\\GUI\\Connect\\buyit.pyc',
'F:\\Adel2\\GUI\\Connect\\Pnl0.pyc']),
('Config', ['F:\\Adel2\\Config\\__init__.pyc',
'F:\\Adel2\\Config\\config.pyc',
'F:\\Adel2\\Config\\Init.pyc',
'F:\\Adel2\\Config\\program.ini']),
('Utility', ['F:\\Adel2\\Utility\\__init__.pyc',
'F:\\Adel2\\Utility\\Adaad2.pyc',
'F:\\Adel2\\Utility\\adadfa1',
'F:\\Adel2\\Utility\\B1.pyc',
'F:\\Adel2\\Utility\\barcode.png',
'F:\\Adel2\\Utility\\calcu.pyc',
'F:\\Adel2\\Utility\\calculator.bmp',
'F:\\Adel2\\Utility\\calfar01.pyc',
'F:\\Adel2\\Utility\\clacal3.pyc',
'F:\\Adel2\\Utility\\fakey.pyc'])]
includes = ['khayyam', 'wx', 'wx.dataview', 'wx.lib']
excludes = ['_gtkagg', '_tkagg', 'bsddb', 'curses', 'email', 'pywin.debugger',
'pywin.debugger.dbgcon', 'pywin.dialogs', 'tcl',
'Tkconstants', 'Tkinter']
packages = ['Config', 'Database', 'GUI', 'GUI.AuiPanel', 'GUI.Connect',
'GUI.Develop', 'GUI.Edit', 'GUI.Help', 'GUI.Input',
'GUI.Program', 'GUI.Report', 'Utility']
dll_excludes = ['libgdk-win32-2.0-0.dll', 'libgobject-2.0-0.dll', 'tcl84.dll',
'tk84.dll']
icon_resources = [(1, 'F:\\Adel2\\Res\\Icons\\f4.ico'), (2, 'F:\\Adel2\\Res\\Icons\\U5.ico')]
bitmap_resources = [(1, 'F:\\Adel2\\Utility\\calculator.bmp')]
other_resources = [(4, 24, 'F:\\Adel2\\Res\\Pics\\B10.jpg'), (5, 24, 'F:\\Adel2\\Res\\Pics\\B11.jpg'),
(6, 24, 'F:\\Adel2\\Res\\Pics\\B13.jpg'),
(7, 24, 'F:\\Adel2\\Res\\Pics\\B14.jpg'),
(8, 24, 'F:\\Adel2\\Res\\Pics\\B16.jpg'),
(1, 24, 'F:\\Adel2\\Res\\Pics\\B6.jpg'),
(2, 24, 'F:\\Adel2\\Res\\Pics\\B7.jpg'),
(3, 24, 'F:\\Adel2\\Res\\Pics\\B8.jpg')]
# This is a place where the user custom code may go. You can do almost
# whatever you want, even modify the data_files, includes and friends
# here as long as they have the same variable name that the setup call
# below is expecting.
# No custom code added
# Ok, now we are going to build our target class.
# I chose this building strategy as it works perfectly for me :-D
GUI2Exe_Target_1 = Target(
# what to build
script = "mainpro.py",
icon_resources = icon_resources,
bitmap_resources = bitmap_resources,
other_resources = other_resources,
dest_base = "mainpro",
version = "0.1",
company_name = "Chashme",
copyright = "Cheshme",
name = "Py2Exe Sample File",
)
# No custom class for UPX compression or Inno Setup script
# That's serious now: we have all (or almost all) the options py2exe
# supports. I put them all even if some of them are usually defaulted
# and not used. Some of them I didn't even know about.
setup(
# No UPX or Inno Setup
data_files = data_files,
options = {"py2exe": {"compressed": 0,
"optimize": 0,
"includes": includes,
"excludes": excludes,
"packages": packages,
"dll_excludes": dll_excludes,
"bundle_files": 3,
"dist_dir": "dist",
"xref": False,
"skip_archive": False,
"ascii": False,
"custom_boot_script": '',
}
},
zipfile = None,
console = [],
windows = [GUI2Exe_Target_1],
service = [],
com_server = [],
ctypes_com_server = []
)
# This is a place where any post-compile code may go.
# You can add as much code as you want, which can be used, for example,
# to clean up your folders or to do some particular post-compilation
# actions.
# No post-compilation code added
# And we are done. That's a setup script :-D
| [
"[email protected]"
] | |
6bf0a913bcc4d96db71c5ad8e16ab1214ef394f8 | 51bd1f17a4e9942128b2c0824d397ebb74067e4c | /py_box3/mkm/chemkin/__init__.py | 9f2981ce6f8ee408e5b347aef0ba9261ee2bc6fb | [] | no_license | jonlym/py_box | 3290db8fab2ca97fbd348d02ae4a3319207ccfb0 | ae5187a433ef654d6b96ee98ca7ab742d83d11d9 | refs/heads/master | 2021-01-19T05:42:10.056427 | 2018-12-20T18:44:01 | 2018-12-20T18:44:01 | 87,442,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,054 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 23 14:57:39 2016
@author: Jonathan Lym
"""
from py_box3.constants import T0, convert_unit
from ase.io import read
from py_box3.thermo.thermdat import Thermdat
from py_box3.thermo.thermdats import Thermdats
import numpy as np
class Chemkin(object):
def __init__(self,
species = None,
sites = None,
reactions = None,
BEPs = None,
LSRs = None,
DOEs = None,
GAs = None,
SAs = None,
StatpQ = None,
reactor_type = 1,
n_runs = 1,
multi_input = True,
standard_T_and_P = True,
Ts = [],
Ps = [],
Qs = [],
SA_Vs = [],
T_rise = 0.,
isothermal = True,
linear_T_ramp = False,
external_T = 923.,
heat_transfer_area_to_volume = 3.571,
heat_transfer_coefficient = 0.,
TPD_ramp = 0.,
MARI = '',
reactant = '',
volume = 100.,
nnodes = 1,
ttout = 1.e-2,
rtime = 1.e-4,
ntdec = 10.,
save_transient = False,
set_equation_tolerance = True,
absolute_tolerance = 1.e-10,
relative_tolerance = 1.e-8,
non_negative_composition = True,
restart_max = 0,
use_iterative_solver = False,
upper_bandwidth = 0,
lower_bandwidth = 0,
use_coverage_effects = False,
use_binding_energy_corrections = False,
use_BEPs = False,
use_LSRs = False,
use_different_activation_energy = False,
use_omega = False,
omega = 0.5,
T_ref = 1.,
reaction_path_analysis_mode = 1,
verbose_reaction_path_analysis = False,
reaction_path_analysis_T = 900.,
sensitivity_analysis = False,
design_of_experiments = False):
#Objects
self.species = species
self.sites = sites
self.reactions = reactions
self.BEPs = BEPs
self.LSRs = LSRs
self.DOEs = DOEs
self.GAs = GAs
self.SAs = SAs
self.StatpQ = StatpQ
#Reactor parameters
self.reactor_type = reactor_type
self.n_runs = n_runs
self.multi_input = multi_input
self.standard_T_and_P = standard_T_and_P
self.Ts = Ts
self.Ps = Ps
self.Qs = Qs
self.SA_Vs = SA_Vs
self.T_rise = T_rise
self.external_T = external_T
self.heat_transfer_area_to_volume = heat_transfer_area_to_volume
self.heat_transfer_coefficient = heat_transfer_coefficient
self.TPD_ramp = TPD_ramp
self.MARI = MARI
self.reactant = reactant
self.volume = volume
#Reactor Options
self.isothermal = isothermal
self.linear_T_ramp = linear_T_ramp
#Solver options
self.nnodes = nnodes
self.ttout = ttout
self.rtime = rtime
self.ntdec = ntdec
self.save_transient = save_transient
self.set_equation_tolerance = set_equation_tolerance
self.absolute_tolerance = absolute_tolerance
self.relative_tolerance = relative_tolerance
self.non_negative_composition = non_negative_composition
self.restart_max = restart_max
self.use_iterative_solver = use_iterative_solver
self.upper_bandwidth = upper_bandwidth
self.lower_bandwidth = lower_bandwidth
#Reaction options
self.use_coverage_effects = use_coverage_effects
self.use_binding_energy_corrections = use_binding_energy_corrections
self.use_BEPs = use_BEPs
self.use_LSRs = use_LSRs
self.use_different_activation_energy = use_different_activation_energy
self.use_omega = use_omega
self.omega = omega
self.T_ref = T_ref
#Output options
self.reaction_path_analysis_mode = reaction_path_analysis_mode
self.verbose_reaction_path_analysis = verbose_reaction_path_analysis
self.reaction_path_analysis_T = reaction_path_analysis_T
self.sensitivity_analysis = sensitivity_analysis
self.design_of_experiments = design_of_experiments
@classmethod
def from_INP(self, path = '.'):
sites = Sites.from_surf_inp(path = os.path.join(path, 'surf.inp'))
species = Species.from_thermdat(path = os.path.join(path, 'thermdat'))
species.get_sites(path = os.path.join(path, 'surf.inp'))
gas_reactions = Reactions.from_gas_inp(path = os.path.join(path, 'gas.inp'))
surf_reactions = Reactions.from_surf_inp(path = os.path.join(path, 'surf.inp'))
reactions = copy(gas_reactions).extend(copy(surf_reactions))
input_dict = self.read_tube_inp(path = os.path.join(path, 'tube.inp'), return_dict = True)
#Optional Objects
if tube_dict['use_BEPs']:
input_dict['BEPs'] = BEPs.from_BEP_inp(path = os.path.join(path, 'BEP.inp'))
if tube_dict['use_LSRs']:
input_dict['LSRs'] = LSRs.from_Scale_inp(path = os.path.join(path, 'Scale.inp'))
if tube_dict['design_of_experiments']:
input_dict['DOEs'] = DOEs.from_DOE_inp(path = os.path.join(path, 'DOE.inp'))
if tube_dict['use_GAs']:
input_dict['GAs'] = GAs.from_GA_inp(path = os.path.join(path, 'GA.inp'))
if tube_dict['sensitivity_analysis']:
input_dict['SAs'] = SAs.from_SA_inp(path = os.path.join(path, 'SA.inp'))
if tube_dict['use_binding_energy_corrections']:
input_dict['StatpQ'] = StatpQ.from_StatpQ_inp(path = os.path.join(path, 'StatpQ.inp'))
if tube_dict['multi_input']:
(Ts, Ps, Qs, SA_Vs) = self.read_T_flow_inp(path = os.path.join(path, 'T_flow.inp'))
if tube_dict['use_different_activation_energy']:
reactions.read_EAs_inp(path = os.path.join(path, 'EAs.inp'))
reactions.read_EAg_inp(path = os.path.join(path, 'EAg.inp'))
return cls(species = species, sites = sites, reactions = reactions, **input_dict)
def read_tube_inp(self, path = 'tube.inp', return_dict = True):
tube_dict = dict()
with open(path, 'r') as f_ptr:
i = 0
for line in f_ptr:
#Skip lines
if '!' == line[0] or 'EOF' in line:
continue
data = [x for x in line.replace('\n', '').split(' ') if x != '']
if i == 0:
tube_dict['reactor_type'] = int(data[0])
tube_dict['n_runs'] = int(data[1])
tube_dict['multi_input'] = char_to_boolean(data[2])
elif i == 1:
tube_dict['standard_T_and_P'] = char_to_boolean(data[0])
tube_dict['Ts'] = [float(data[1])]
tube_dict['Ps'] = [float(data[2])]
tube_dict['Qs'] = [float(data[3])]
tube_dict['SA_Vs'] = [float(data[4])]
tube_dict['T_rise'] = float(data[5])
elif i == 2:
tube_dict['isothermal'] = char_to_boolean(data[0])
tube_dict['linear_T_ramp'] = int(data[1])
elif i == 3:
tube_dict['external_T'] = float(data[0])
tube_dict['heat_transfer_area_to_volume'] = float(data[1])
tube_dict['heat_transfer_coefficient'] = float(data[2])
tube_dict['TPD_ramp'] = float(data[3])
elif i == 4:
tube_dict['MARI'] = data[0]
tube_dict['reactant'] = data[1]
elif i == 5:
tube_dict['volume'] = float(data[0])
tube_dict['nnodes'] = int(data[1])
tube_dict['ttout'] = float(data[2])
tube_dict['rtime'] = float(data[3])
tube_dict['ntdec'] = int(data[4])
tube_dict['save_transient'] = char_to_boolean(data[5])
elif i == 6:
tube_dict['set_equation_tolerance'] = char_to_boolean(data[0])
tube_dict['absolute_tolerance'] = float(data[1])
tube_dict['relative_tolerance'] = float(data[2])
tube_dict['non_negative_composition'] = char_to_boolean(data[3])
tube_dict['restart_max'] = int(data[4])
elif i == 7:
if data[0] == '0':
tube_dict['use_iterative_solver'] = False
elif data[0] == '1':
tube_dict['use_iterative_solver'] = True
else:
raise Exception('Invalid value for iSolver, {}'.format(data[0]))
tube_dict['upper_bandwidth'] = int(data[1])
tube_dict['lower_bandwidth'] = int(data[2])
elif i == 8:
tube_dict['use_coverage_effects'] = char_to_boolean(data[0])
tube_dict['use_binding_energy_corrections'] = char_to_boolean(data[1])
tube_dict['use_BEPs'] = char_to_boolean(data[2])
if data[3] == '0':
tube_dict['use_LSRs'] = False
elif data[3] == '3':
tube_dict['use_LSRs'] = True
else:
raise Exception('Invalid value for iScale, {}'.format(data[3]))
tube_dict['use_different_activation_energy'] = char_to_boolean(data[4])
tube_dict['use_omega'] = char_to_boolean(data[5])
tube_dict['omega'] = float(data[6])
tube_dict['T_ref'] = float(data[7])
elif i == 9:
tube_dict['reaction_path_analysis_mode'] = int(data[0])
tube_dict['verbose_reaction_path_analysis'] = char_to_boolean(data[1])
tube_dict['reaction_path_analysis_T'] = float(data[2])
tube_dict['sensitivity_analysis'] = char_to_boolean(data[3])
tube_dict['design_of_experiments'] = char_to_boolean(data[4])
i += 1
return tube_dict
def write_tube_inp(self, path = 'tube.inp'):
lines = []
lines.append('!irxtr (0=UHV/mol. beam, 1=batch, 2=cstr, 3=pfr) nruns MultiInput')
#lines.append('{}{}{}{}{}'.format(self.reactor_type))
lines.append('!lstp t[K] p[atm] velo[cm3/s] abyv[cm-1] trise[K]')
lines.append('!liso(yes=T,no=F) itpd (0=no, 1=UHV, 2=High Pressure) (itpd overrides liso)')
lines.append('!text aextbyv htc ramp [K/s]')
lines.append('!MARI Reactant')
lines.append('!rlen[cm3] nnodes ttout [s] rtime [s] ntdec ltra (F=only SS saved, T=transient saved)')
lines.append('!ltol abstol reltol NonNeg(F/T: constraints off/on) restart_max (<=0 means no limit)')
lines.append('!iSolver (0/1: iterative solver off/on) mu ml (upper/lower bandwidths for Krylov solver)')
lines.append('!lcov lStatpQ lBEP iScale lEA lomega omega Tref_beta (0: Tref=300K; 1: Tref=1K)')
lines.append('!mrpa verbose_rpa trpa lsen lDOE')
lines.append('EOF')
with open(path, 'w') as f_ptr:
f_ptr.write(lines[0])
def char_to_boolean(character):
if character.lower() == 't':
return True
elif character.lower() == 'f':
return False
else:
raise Exception('Invalid character, {}'.format(character))
def boolean_to_char(boolean):
if boolean:
return 'T'
else:
return 'F'
| [
"[email protected]"
] | |
6aaba7d662a21da85d2ba3e6b178f7ecf8d58cd2 | e7b07f173a8bc0d36e046c15df7bbe3d18d49a33 | /parse.py | 9d1894ef9159fb1b51738dbba15b24d5bcb61bc0 | [] | no_license | jcarbaugh/makeitwrk | 82b6e8079b118e8d668b2e6858096a54da33d5a8 | 83801b19c120b4cf728b8342c4933fefe54b54d8 | refs/heads/master | 2020-04-06T04:55:56.785930 | 2011-08-26T19:09:27 | 2011-08-26T19:09:27 | 2,275,931 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,029 | py | #!/usr/bin/env python
from struct import pack, unpack
import sys
CHUNK_TYPES = {
1: 'TRACK_CHUNK',
2: 'STREAM_CHUNK',
4: 'METER_CHUNK',
5: 'TEMPO_CHUNK',
6: 'SYSEX_CHUNK',
7: 'MEMRGN_CHUNK',
10: 'TIMEBASE_CHUNK',
# variables
3: 'VARS_CHUNK',
26: 'VARS_CHUNK_VAR',
# device stuff
33: 'DEVICES',
# track stuff?
36: 'TRACK_NAME?',
54: 'TRACK_PORT',
45: 'TRACK_DATA?',
255: 'END_CHUNK',
}
def solomon(arr, parts):
for i in range(0, parts * 8, 8):
yield arr[i:i+8]
def chunk_reader(wrkfile):
if wrkfile.read(8) != b'CAKEWALK':
raise ValueError('invalid file format')
wrkfile.read(1) # byte I don't care about
mm_version = wrkfile.read(2)
major = ord(mm_version[1])
minor = ord(mm_version[0])
version = "%i.%i" % (major, minor)
yield ('VERSION_CHUNK', 2, None, version)
while 1:
ch_type_data = wrkfile.read(1)[0]
ch_type = CHUNK_TYPES.get(ch_type_data, ch_type_data)
if ch_type == 'END_CHUNK':
break
ch_len = unpack('i', wrkfile.read(4))[0]
ch_data_offset = wrkfile.tell()
#print(ch_data_offset)
ch_data = wrkfile.read(ch_len)
yield (ch_type, ch_len, ch_data)
yield ('END_CHUNK', None, None, None)
wrkfile.close()
if __name__ == '__main__':
for chunk in chunk_reader(sys.stdin):
print(chunk)
# if chunk[0] == 'TRACK_NAME?':
# (tnum, tname_len) = unpack('HB', chunk[2][:3])
# tname = chunk[2][3:3+tname_len].decode('utf-8')
# print("[%02i] %s" % (tnum, tname))
# elif chunk[0] == 'TRACK_DATA?':
# (tnum, schunks) = unpack('=HxH', chunk[2][:5])
# print(' ', '------------')
# for s in solomon(chunk[2][7:], schunks):
# print(' ', unpack('8B', s))
"""
__TRACK_DATA__
#2 ?? CNT- ???? 16---------------
0900 00 0700 0000 B649 009023641E00 D449 009028643C00 104A 00902B643C00 4C4A 009029643C00 884A 009023641E00 A64A 009023641E00 E24A 009023641E00
0900 00 0700 0000 1E4B 009023641E00 3C4B 009028643C00 784B 00902B643C00 B44B 009029643C00 F04B 009023641E00 0E4C 009023641E00 4A4C 009023641E00
(30, 75, 0, 144, 35, 100, 30, 0)
submeasure . . . .
measure. . . .
? . . . .
? . . .
nt? . .
? .
-----?
------------------------------------
0000 00 0800 0000 E010 009045643C00 1C11 009045643C00 5811 00904C643C00 9411 009045643C00 D011 00904D643C00 0C12 00904C643C00 4812 009048643C00 8412 009045643C00
0200 00 1400 0000 8016 00902664E001 3417 009026643C00 7017 009026647800 E817 009026647800 2418 009026643C00 6018 00902264E001 1419 009022643C00 5019 009022647800 C819 009022647800041A009022643C00401A00901F64E001F41A00901F643C00301B00901F647800A81B00901F647800E41B00901F643C00201C00902164E001D41C009021643C00101D009021647800881D009021647800C41D009021643C00
__TRACK_NAME__
#2 L2 NAME* INSTRUMENT?
0000 05 4F7267616E FFFF 1500 FFFFFFFF 00000000000000 0A 0000000000
O R G A N
0100 0B 536C617020426173732031 FFFF 2500 FFFFFFFF 00000000000000 0A 0000010000
S L A P B A S S 1
0200 0B 536C617020426173732032 FFFF 2400 FFFFFFFF 00000000000000 FE 0000020000
S L A P B A S S 2
0300 0C 4869676820537472696E6773 FFFF 2C00 FFFFFFFF 00000000000000 0A 0000030000
H I G H S T R I N G S
0900 05 4472756D73 FFFF FFFF FFFFFFFF 00000000000000 0A 0000090000
D R U M S
-------------------------------------------
0000 05 4472756D73 FFFF FFFF FFFFFFFF 00000000000000 0A 0000090000
D R U M S
""" | [
"[email protected]"
] | |
602bf5ff185fae424574e01f0d60bafdc9fad426 | 9d032e9864ebda8351e98ee7950c34ce5168b3b6 | /301.py | 10f8978082ea2c4ee7bbac60f631a00e920d68cf | [] | no_license | snpushpi/P_solving | e0daa4809c2a3612ba14d7bff49befa7e0fe252b | 9980f32878a50c6838613d71a8ee02f492c2ce2c | refs/heads/master | 2022-11-30T15:09:47.890519 | 2020-08-16T02:32:49 | 2020-08-16T02:32:49 | 275,273,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | '''
Remove the minimum number of invalid parentheses in order to make the input string valid. Return all possible results.
Note: The input string may contain letters other than the parentheses ( and ).
Example 1:
Input: "()())()"
Output: ["()()()", "(())()"]
Example 2:
Input: "(a)())()"
Output: ["(a)()()", "(a())()"]
Example 3:
Input: ")("
Output: [""]
'''
def validstring(string):
count = 0
for char in string:
if char=='(':
count+=1
elif char==')':
count-=1
if count<0:
return False
return (count==0)
def main(input_string):
l = len(input_string)
queue = [input_string]
visited = set()
visited.add(input_string)
level = False
result = []
while queue:
new_str = queue.pop(0)
if validstring(new_str):
result.append(new_str)
level= True
if level:
continue
for i in range(len(new_str)):
if not (new_str[i]==')' or new_str[i]=='('):
continue
part_string = new_str[:i]+new_str[i+1:]
if part_string not in visited:
visited.add(part_string)
queue.append(part_string)
return result
print(main("()())()"))
| [
"[email protected]"
] | |
0285e95057b21742ade89d9041421eb988eb90fb | d79c152d072edd6631e22f886c8beaafe45aab04 | /nicolock/products/rest_urls.py | d58d9a92a31372b447067ee3dd7508ef1d810182 | [] | no_license | kabroncelli/Nicolock | 764364de8aa146721b2678c14be808a452d7a363 | 4c4343a9117b7eba8cf1daf7241de549b9a1be3b | refs/heads/master | 2020-03-11T11:02:43.074373 | 2018-04-18T17:38:33 | 2018-04-18T17:38:33 | 129,959,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import rest_views as views
urlpatterns = [
url(
regex=r'^products/(?P<pk>\d+)/$',
view=views.ProductDetail.as_view(),
name='product-detail'
),
url(
regex=r'^products/(?P<pk>\d+)/like/$',
view=views.ProductLike.as_view(),
name='product-like'
),
url(
regex=r'^categories/$',
view=views.CategoryList.as_view(),
name='category-list'
),
url(
regex=r'^categories/(?P<pk>\d+)/$',
view=views.CategoryDetail.as_view(),
name='category-detail'
),
]
| [
"[email protected]"
] | |
d3527c75633bd397f54893cab6262bed50e53879 | d17d65a3ee48b307a46a0b95a05f04131668edbe | /TestSuite/runner.py | 6a172fc2702d50f5b6f0558a2beab1d4f677a319 | [] | no_license | qlcfj001/ui_test | 28fa370a6f912b2ff9a551c681d35a452c57ee02 | 25020af19d84c9c2b1bad02aca89cc881e828bbb | refs/heads/master | 2023-06-15T18:10:02.177702 | 2021-07-15T06:35:10 | 2021-07-15T06:35:10 | 386,012,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | from Page.Base import base
from pageobjct.SearcH import Searchpage
from selenium.webdriver.common.by import By
#from TestSuite.Variablelayer.Variable import *
import time
import unittest
leave='成都'
leave_data="2021-07-20"
arrive='北京'
arrive_data='2021-07-30'
aa=Searchpage()
aa.search7(leave='成都',leave_data="2021-07-20",arrive='北京',arrive_data='2021-07-30')
| [
"[email protected]"
] | |
a4d250d72f94be4c124927e70b0c139ad9f85f9d | f8fbf0b0cc919d7d4d7c79532cc5434552d75eb8 | /docs/0.18.1/_static/notebooks/modeling.py | a5bdb272476813045d22aca2f06eddfb47942841 | [] | no_license | adonath/gammapy-docs | ae8571c6aa76d231ac54c93fb3c8968f9f79993b | 32b605d623abdcd2e82c30bcbf07ef30d259783a | refs/heads/main | 2023-02-25T05:24:53.211005 | 2022-10-13T00:09:12 | 2022-10-13T00:11:33 | 550,476,516 | 0 | 0 | null | 2022-10-12T20:45:50 | 2022-10-12T20:45:49 | null | UTF-8 | Python | false | false | 14,807 | py | #!/usr/bin/env python
# coding: utf-8
# # Modeling and fitting
#
#
# ## Prerequisites
#
# - Knowledge of spectral analysis to produce 1D On-Off datasets, [see the following tutorial](spectrum_analysis.ipynb)
# - Reading of pre-computed datasets [see the MWL tutorial](analysis_mwl.ipynb)
# - General knowledge on statistics and optimization methods
#
# ## Proposed approach
#
# This is a hands-on tutorial to `~gammapy.modeling`, showing how the model, dataset and fit classes work together. As an example we are going to work with HESS data of the Crab Nebula and show in particular how to :
# - perform a spectral analysis
# - use different fitting backends
# - acces covariance matrix informations and parameter errors
# - compute likelihood profile
# - compute confidence contours
#
# See also: [Models gallery tutorial](models.ipynb) and `docs/modeling/index.rst`.
#
#
# ## The setup
# In[ ]:
import numpy as np
from astropy import units as u
import matplotlib.pyplot as plt
import scipy.stats as st
from gammapy.modeling import Fit
from gammapy.datasets import Datasets, SpectrumDatasetOnOff
from gammapy.modeling.models import LogParabolaSpectralModel, SkyModel
from gammapy.visualization.utils import plot_contour_line
from itertools import combinations
# ## Model and dataset
#
# First we define the source model, here we need only a spectral model for which we choose a log-parabola
# In[ ]:
crab_spectrum = LogParabolaSpectralModel(
amplitude=1e-11 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
alpha=2.3,
beta=0.2,
)
crab_spectrum.alpha.max = 3
crab_spectrum.alpha.min = 1
crab_model = SkyModel(spectral_model=crab_spectrum, name="crab")
# The data and background are read from pre-computed ON/OFF datasets of HESS observations, for simplicity we stack them together.
# Then we set the model and fit range to the resulting dataset.
# In[ ]:
datasets = []
for obs_id in [23523, 23526]:
dataset = SpectrumDatasetOnOff.from_ogip_files(
f"$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs{obs_id}.fits"
)
datasets.append(dataset)
dataset_hess = Datasets(datasets).stack_reduce(name="HESS")
# Set model and fit range
dataset_hess.models = crab_model
e_min = 0.66 * u.TeV
e_max = 30 * u.TeV
dataset_hess.mask_fit = dataset_hess.counts.geom.energy_mask(e_min, e_max)
# ## Fitting options
#
#
#
# First let's create a `Fit` instance:
# In[ ]:
fit = Fit([dataset_hess], store_trace=True)
# By default the fit is performed using MINUIT, you can select alternative optimizers and set their option using the `optimize_opts` argument of the `Fit.run()` method. In addition we have specified to store the trace of parameter values of the fit.
#
# Note that, for now, covaraince matrix and errors are computed only for the fitting with MINUIT. However depending on the problem other optimizers can better perform, so somethimes it can be usefull to run a pre-fit with alternative optimization methods.
#
# For the "scipy" backend the available options are desribed in detail here:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
# In[ ]:
get_ipython().run_cell_magic('time', '', 'scipy_opts = {"method": "L-BFGS-B", "options": {"ftol": 1e-4, "gtol": 1e-05}}\nresult_scipy = fit.run(backend="scipy", optimize_opts=scipy_opts)')
# For the "sherpa" backend you can choose the optimization algorithm between method = {"simplex", "levmar", "moncar", "gridsearch"}.
# Those methods are described and compared in detail on http://cxc.cfa.harvard.edu/sherpa/methods/index.html.
# The available options of the optimization methods are described on the following page https://cxc.cfa.harvard.edu/sherpa/methods/opt_methods.html
# In[ ]:
get_ipython().run_cell_magic('time', '', 'sherpa_opts = {"method": "simplex", "ftol": 1e-3, "maxfev": int(1e4)}\nresults_simplex = fit.run(backend="sherpa", optimize_opts=sherpa_opts)')
# For the "minuit" backend see https://iminuit.readthedocs.io/en/latest/reference.html for a detailed description of the available options. If there is an entry ‘migrad_opts’, those options will be passed to [iminuit.Minuit.migrad](https://iminuit.readthedocs.io/en/latest/reference.html#iminuit.Minuit.migrad). Additionnaly you can set the fit tolerance using the [tol](https://iminuit.readthedocs.io/en/latest/reference.html#iminuit.Minuit.tol
# ) option. The minimization will stop when the estimated distance to the minimum is less than 0.001*tol (by default tol=0.1). The [strategy](https://iminuit.readthedocs.io/en/latest/reference.html#iminuit.Minuit.strategy) option change the speed and accuracy of the optimizer: 0 fast, 1 default, 2 slow but accurate. If you want more reliable error estimates, you should run the final fit with strategy 2.
#
# In[ ]:
get_ipython().run_cell_magic('time', '', 'minuit_opts = {"tol": 0.001, "strategy": 1}\nresult_minuit = fit.run(backend="minuit", optimize_opts=minuit_opts)')
# ## Fit quality assessment
#
# There are various ways to check the convergence and quality of a fit. Among them:
#
# - Refer to the automatically-generated results dictionary
# In[ ]:
print(result_scipy)
# In[ ]:
print(results_simplex)
# In[ ]:
print(result_minuit)
# - Check the trace of the fit e.g. in case the fit did not converge properly
# In[ ]:
result_minuit.trace
# - Check that the fitted values and errors for all parameters are reasonable, and no fitted parameter value is "too close" - or even outside - its allowed min-max range
# In[ ]:
result_minuit.parameters.to_table()
# - Plot fit statistic profiles for all fitted prameters, using `~gammapy.modeling.Fit.stat_profile()`. For a good fit and error estimate each profile should be parabolic
# In[ ]:
total_stat = result_minuit.total_stat
for par in dataset_hess.models.parameters:
if par.frozen is False:
profile = fit.stat_profile(parameter=par)
plt.plot(
profile[f"{par.name}_scan"], profile["stat_scan"] - total_stat
)
plt.xlabel(f"{par.unit}")
plt.ylabel("Delta TS")
plt.title(f"{par.name}: {par.value} +- {par.error}")
plt.show()
plt.close()
# - Inspect model residuals. Those can always be accessed using `~Dataset.residuals()`, that will return an array in case a the fitted `Dataset` is a `SpectrumDataset` and a full cube in case of a `MapDataset`. For more details, we refer here to the dedicated fitting tutorials: [analysis_3d.ipynb](analysis_3d.ipynb) (for `MapDataset` fitting) and [spectrum_analysis.ipynb](spectrum_analysis.ipynb) (for `SpectrumDataset` fitting).
# ## Covariance and parameters errors
#
# After the fit the covariance matrix is attached to the model. You can get the error on a specific parameter by accessing the `.error` attribute:
# In[ ]:
crab_model.spectral_model.alpha.error
# As an example, this step is needed to produce a butterfly plot showing the envelope of the model taking into account parameter uncertainties.
# In[ ]:
energy_range = [1, 10] * u.TeV
crab_spectrum.plot(energy_range=energy_range, energy_power=2)
ax = crab_spectrum.plot_error(energy_range=energy_range, energy_power=2)
# ## Confidence contours
#
#
# In most studies, one wishes to estimate parameters distribution using observed sample data.
# A 1-dimensional confidence interval gives an estimated range of values which is likely to include an unknown parameter.
# A confidence contour is a 2-dimensional generalization of a confidence interval, often represented as an ellipsoid around the best-fit value.
#
# Gammapy offers two ways of computing confidence contours, in the dedicated methods `Fit.minos_contour()` and `Fit.stat_profile()`. In the following sections we will describe them.
# An important point to keep in mind is: *what does a $N\sigma$ confidence contour really mean?* The answer is it represents the points of the parameter space for which the model likelihood is $N\sigma$ above the minimum. But one always has to keep in mind that **1 standard deviation in two dimensions has a smaller coverage probability than 68%**, and similarly for all other levels. In particular, in 2-dimensions the probability enclosed by the $N\sigma$ confidence contour is $P(N)=1-e^{-N^2/2}$.
# ### Computing contours using `Fit.minos_contour()`
# After the fit, MINUIT offers the possibility to compute the confidence confours.
# gammapy provides an interface to this functionnality throught the `Fit` object using the `minos_contour` method.
# Here we defined a function to automatize the contour production for the differents parameterer and confidence levels (expressed in term of sigma):
# In[ ]:
def make_contours(fit, result, npoints, sigmas):
cts_sigma = []
for sigma in sigmas:
contours = dict()
for par_1, par_2 in combinations(["alpha", "beta", "amplitude"], r=2):
contour = fit.minos_contour(
result.parameters[par_1],
result.parameters[par_2],
numpoints=npoints,
sigma=sigma,
)
contours[f"contour_{par_1}_{par_2}"] = {
par_1: contour[par_1].tolist(),
par_2: contour[par_2].tolist(),
}
cts_sigma.append(contours)
return cts_sigma
# Now we can compute few contours.
# In[ ]:
get_ipython().run_cell_magic('time', '', 'sigma = [1, 2]\ncts_sigma = make_contours(fit, result_minuit, 10, sigma)')
# Then we prepare some aliases and annotations in order to make the plotting nicer.
# In[ ]:
pars = {
"phi": r"$\phi_0 \,/\,(10^{-11}\,{\rm TeV}^{-1} \, {\rm cm}^{-2} {\rm s}^{-1})$",
"alpha": r"$\alpha$",
"beta": r"$\beta$",
}
panels = [
{
"x": "alpha",
"y": "phi",
"cx": (lambda ct: ct["contour_alpha_amplitude"]["alpha"]),
"cy": (
lambda ct: np.array(1e11)
* ct["contour_alpha_amplitude"]["amplitude"]
),
},
{
"x": "beta",
"y": "phi",
"cx": (lambda ct: ct["contour_beta_amplitude"]["beta"]),
"cy": (
lambda ct: np.array(1e11)
* ct["contour_beta_amplitude"]["amplitude"]
),
},
{
"x": "alpha",
"y": "beta",
"cx": (lambda ct: ct["contour_alpha_beta"]["alpha"]),
"cy": (lambda ct: ct["contour_alpha_beta"]["beta"]),
},
]
# Finally we produce the confidence contours figures.
# In[ ]:
fig, axes = plt.subplots(1, 3, figsize=(16, 5))
colors = ["m", "b", "c"]
for p, ax in zip(panels, axes):
xlabel = pars[p["x"]]
ylabel = pars[p["y"]]
for ks in range(len(cts_sigma)):
plot_contour_line(
ax,
p["cx"](cts_sigma[ks]),
p["cy"](cts_sigma[ks]),
lw=2.5,
color=colors[ks],
label=f"{sigma[ks]}" + r"$\sigma$",
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.legend()
plt.tight_layout()
# ### Computing contours using `Fit.stat_surface()`
# This alternative method for the computation of confidence contours, although more time consuming than `Fit.minos_contour()`, is expected to be more stable. It consists of a generalization of `Fit.stat_profile()` to a 2-dimensional parameter space. The algorithm is very simple:
# - First, passing two arrays of parameters values, a 2-dimensional discrete parameter space is defined;
# - For each node of the parameter space, the two parameters of interest are frozen. This way, a likelihood value ($-2\mathrm{ln}\,\mathcal{L}$, actually) is computed, by either freezing (default) or fitting all nuisance parameters;
# - Finally, a 2-dimensional surface of $-2\mathrm{ln}(\mathcal{L})$ values is returned.
# Using that surface, one can easily compute a surface of $TS = -2\Delta\mathrm{ln}(\mathcal{L})$ and compute confidence contours.
#
# Let's see it step by step.
# First of all, we can notice that this method is "backend-agnostic", meaning that it can be run with MINUIT, sherpa or scipy as fitting tools. Here we will stick with MINUIT, which is the default choice:
# In[ ]:
optimize_opts = {"backend": "minuit", "print_level": 0}
# As an example, we can compute the confidence contour for the `alpha` and `beta` parameters of the `dataset_hess`. Here we define the parameter space:
# In[ ]:
result = result_minuit
par_1 = result.parameters["alpha"]
par_2 = result.parameters["beta"]
x = par_1
y = par_2
x_values = np.linspace(1.55, 2.7, 20)
y_values = np.linspace(-0.05, 0.55, 20)
# Then we run the algorithm, by choosing `reoptimize=False` for the sake of time saving. In real life applications, we strongly recommend to use `reoptimize=True`, so that all free nuisance parameters will be fit at each grid node. This is the correct way, statistically speaking, of computing confidence contours, but is expected to be time consuming.
# In[ ]:
stat_surface = fit.stat_surface(
x, y, x_values, y_values, reoptimize=False, **optimize_opts
)
# In order to easily inspect the results, we can convert the $-2\mathrm{ln}(\mathcal{L})$ surface to a surface of statistical significance (in units of Gaussian standard deviations from the surface minimum):
# In[ ]:
# Compute TS
TS = stat_surface["stat_scan"] - result.total_stat
# In[ ]:
# Compute the corresponding statistical significance surface
gaussian_sigmas = np.sqrt(TS.T)
# Notice that, as explained before, $1\sigma$ contour obtained this way will not contain 68% of the probability, but rather
# In[ ]:
# Compute the corresponding statistical significance surface
# p_value = 1 - st.chi2(df=1).cdf(TS)
# gaussian_sigmas = st.norm.isf(p_value / 2).T
# Finally, we can plot the surface values together with contours:
# In[ ]:
fig, ax = plt.subplots(figsize=(8, 6))
# We choose to plot 1 and 2 sigma confidence contours
levels = [1, 2]
contours = plt.contour(gaussian_sigmas, levels=levels, colors="white")
plt.clabel(contours, fmt="%.0f$\,\sigma$", inline=3, fontsize=15)
im = plt.imshow(
gaussian_sigmas,
extent=[0, len(x_values) - 1, 0, len(y_values) - 1],
origin="lower",
)
fig.colorbar(im)
plt.xticks(range(len(x_values)), np.around(x_values, decimals=2), rotation=45)
plt.yticks(range(len(y_values)), np.around(y_values, decimals=2));
# Note that, if computed with `reoptimize=True`, this plot would be completely consistent with the third panel of the plot produced with `Fit.minos_contour` (try!).
# Finally, it is always remember that confidence contours are approximations. In particular, when the parameter range boundaries are close to the contours lines, it is expected that the statistical meaning of the countours is not well defined. That's why we advise to always choose a parameter space that com contain the contours you're interested in.
# In[ ]:
| [
"[email protected]"
] | |
06d3b8b17c46a0ae3faf7387123f73c73bea8d78 | 4766d241bbc736e070f79a6ae6a919a8b8bb442d | /20200215Python-China/0094. Binary Tree Inorder Traversal.py | 08893a77b8777c433e17edf90f755b8b4b58c958 | [] | no_license | yangzongwu/leetcode | f7a747668b0b5606050e8a8778cc25902dd9509b | 01f2edd79a1e922bfefecad69e5f2e1ff3a479e5 | refs/heads/master | 2021-07-08T06:45:16.218954 | 2020-07-18T10:20:24 | 2020-07-18T10:20:24 | 165,957,437 | 10 | 8 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | '''
Given a binary tree, return the inorder traversal of its nodes' values.
Example:
Input: [1,null,2,3]
1
\
2
/
3
Output: [1,3,2]
Follow up: Recursive solution is trivial, could you do it iteratively?
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def inorderTraversal(self, root: TreeNode) -> List[int]:
rep=[]
self.getInOrderTra(root,rep)
return rep
def getInOrderTra(self,root,rep):
if not root:
return
self.getInOrderTra(root.left,rep)
rep.append(root.val)
self.getInOrderTra(root.right,rep)
| [
"[email protected]"
] | |
552fea4e7e4a404550ffa6236bc4c30f22f33e18 | 3f9f7c73bb2f9da31c586d2b64e2cc94f35239dc | /django-polls/polls/tests/test_models.py | 94b7c24fbee98fcaf5c51ee69dd5ad670600b45b | [
"MIT"
] | permissive | jsterling23/DPY_Refresher | eb57e37d4bbad14143800719668b990b459fb56d | 4646b7ebd79ba853f5ccc172183f41257cc12b60 | refs/heads/master | 2020-03-23T19:11:32.626731 | 2018-07-29T01:17:49 | 2018-07-29T01:17:49 | 141,959,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | from django.test import TestCase
import datetime
from django.utils import timezone
from ..models import Question
from django.urls import reverse
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
# method should return false for future dated questions.
time = timezone.now() + datetime.timedelta(days=1, seconds=1)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_past_question(self):
# method should return false for past dated questions.
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
past_question = Question(pub_date=time)
self.assertIs(past_question.was_published_recently(), False)
def test_was_published_recently_with_current_question(self):
# should return True for current question
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
current_question = Question(pub_date=time)
self.assertIs(current_question.was_published_recently(), True)
| [
"[email protected]"
] | |
e5fefc6b8e0ec0d00e467d6808038193d92e8aa7 | 683b73e0c95c755a08e019529aed3ff1a8eb30f8 | /machina/apps/forum_moderation/__init__.py | f1911a14dbd6195e896b647fa949fa08a0c6abce | [
"BSD-3-Clause"
] | permissive | DrJackilD/django-machina | b3a7be9da22afd457162e0f5a147a7ed5802ade4 | 76858921f2cd247f3c1faf4dc0d9a85ea99be3e1 | refs/heads/master | 2020-12-26T08:19:09.838794 | 2016-03-11T03:55:25 | 2016-03-11T03:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | # -*- coding: utf-8 -*-
# Standard library imports
# Third party imports
# Local application / specific library imports
default_app_config = 'machina.apps.forum_moderation.registry_config.ModerationRegistryConfig'
| [
"[email protected]"
] | |
4ab4fec920df659a95a12694df60fd03dfca6791 | 08bfc8a1f8e44adc624d1f1c6250a3d9635f99de | /SDKs/swig/Examples/test-suite/python/abstract_virtual_runme.py | 2a8411578017fc06324e210386ddd29a61e19eb8 | [] | no_license | Personwithhat/CE_SDKs | cd998a2181fcbc9e3de8c58c7cc7b2156ca21d02 | 7afbd2f7767c9c5e95912a1af42b37c24d57f0d4 | refs/heads/master | 2020-04-09T22:14:56.917176 | 2019-07-04T00:19:11 | 2019-07-04T00:19:11 | 160,623,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | version https://git-lfs.github.com/spec/v1
oid sha256:fce41bedc93abe3933ce0f2546b68f02a08faf0778e211b1ba7b30a7f3909ed8
size 50
| [
"[email protected]"
] | |
c7ebc6f32e1358ed20f23dc25b3df7d6a66daf88 | 4aeaca4c58858125e844aad1cd988182201b5120 | /crane/files/timeHistoryParser.py | be957dd91e6668776b4c071a376eeffa2a646763 | [] | no_license | tkarna/crane | f18442a010af0909b7f5af9358cf9080ca1dd1e4 | b8313d0373d8206685d81aadccc425e432c6a010 | refs/heads/master | 2020-05-21T23:39:07.707777 | 2017-11-16T15:58:14 | 2017-11-16T15:58:14 | 53,163,424 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | """
Read SELFE time history (.th) files to a data container.
Jesse Lopez - 2016-04-15
"""
import datetime
import argparse
import numpy as np
from crane.data import timeArray
from crane.data import dataContainer
class thParser(object):
def __init__(self, filename, start_time):
self.filename = filename
self.start_date = start_time
self.time = None
self.data = None
def readFile(self):
"""Read time history file."""
th = np.loadtxt(self.filename)
self.time = timeArray.simulationToEpochTime(th[:, 0], self.start_date)
self.data = th[:, 1]
def genDataContainer(self, variable='variable', station='bvao',
depth='0', bracket='A', save=False):
"""Generate data container."""
x = y = z = 0
coordSys = ''
meta = {}
meta['tag'] = 'timeHistory'
meta['variable'] = variable
meta['location'] = station
meta['msldepth'] = depth
meta['bracket'] = bracket
dc = dataContainer.dataContainer.fromTimeSeries(
self.time, self.data, fieldNames=[variable],
x=x, y=y, z=z, timeFormat='epoch', coordSys=coordSys,
metaData=meta)
if save:
fname = './'+station+'_'+variable+'_'+'0'+'_'+self.start_date.strftime('%Y-%m-%d')+'.nc'
print fname
dc.saveAsNetCDF(fname)
return dc
def parseCommandLine():
parser = argparse.ArgumentParser(description='Read time history to dataContainer.')
parser.add_argument('filepath', type=str, help='Path to time history file.')
parser.add_argument('starttime', type=str, help='Start time of simulation YYYY-MM-DD')
parser.add_argument('variable', type=str, help='Variable name (e.g. - salinity, temp, turbidity)')
parser.add_argument('station', type=str, help='Station name (e.g. - saturn01, tpoin)')
parser.add_argument('depth', type=str, help='Station depth (e.g. - 0.1, 4.0)')
parser.add_argument('bracket', type=str, help='Bracket (e.g. - F, A, R)')
args = parser.parse_args()
st = datetime.datetime.strptime(args.starttime, '%Y-%m-%d')
th = thParser(args.filepath, st)
th.readFile()
th.genDataContainer(args.variable, args.station, args.depth, args.bracket, True)
if __name__ == '__main__':
parseCommandLine()
| [
"[email protected]"
] | |
075c8636339cb3b08aa5c4c3815994408a005e38 | 853d7bd91f4ba254fba0ff28f2e0a3eb2b74fa48 | /errata_tool/release.py | b5c1211cb9a8c86556c758725ad9297bc11a9fbb | [
"MIT"
] | permissive | smunilla/errata-tool | b07614daeceda4a1bfc18ce59679be0a93bb084f | 91bdfb17f15308b46298210fbb2fe5af786276bc | refs/heads/master | 2020-04-10T00:18:12.471123 | 2018-11-19T17:33:02 | 2018-11-28T15:40:08 | 160,681,680 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,800 | py | from __future__ import print_function
import sys
from datetime import date
from errata_tool import ErrataConnector
from errata_tool.product import Product
from errata_tool.product_version import ProductVersion
from errata_tool.user import User
class NoReleaseFoundError(Exception):
pass
class MultipleReleasesFoundError(Exception):
pass
class ReleaseCreationError(Exception):
pass
class Release(ErrataConnector):
def __init__(self, **kwargs):
if 'id' not in kwargs and 'name' not in kwargs:
raise ValueError('missing release "id" or "name" kwarg')
self.id = kwargs.get('id')
self.name = kwargs.get('name')
self.refresh()
def refresh(self):
url = self._url + '/api/v1/releases?'
if self.id is not None:
url += 'filter[id]=%s' % self.id
elif self.name is not None:
url += 'filter[name]=%s' % self.name
result = self._get(url)
if len(result['data']) < 1:
raise NoReleaseFoundError()
if len(result['data']) > 1:
# it's possible to accidentally have identically named releases,
# see engineering RT 461783
raise MultipleReleasesFoundError()
self.data = result['data'][0]
self.id = self.data['id']
self.name = self.data['attributes']['name']
self.description = self.data['attributes']['description']
self.type = self.data['attributes']['type']
self.is_active = self.data['attributes']['is_active']
self.enabled = self.data['attributes']['enabled']
self.blocker_flags = self.data['attributes']['blocker_flags']
self.is_pdc = self.data['attributes']['is_pdc']
self.product_versions = self.data['relationships']['product_versions']
self.url = self._url + '/release/show/%d' % self.id
# For displaying in scripts/logs:
self.edit_url = self._url + '/release/edit/%d' % self.id
def advisories(self):
"""
Find all advisories for this release.
:returns: a list of dicts, one per advisory.
For example:
[{
"id": 32972,
"advisory_name": "RHSA-2018:0546",
"product": "Red Hat Ceph Storage",
"release": "rhceph-3.0",
"synopsis": "Important: ceph security update",
"release_date": None,
"qe_owner": "[email protected]",
"qe_group": "RHC (Ceph) QE",
"status": "SHIPPED_LIVE",
"status_time": "March 15, 2018 18:29"
}]
"""
url = '/release/%d/advisories.json' % self.id
return self._get(url)
@classmethod
def create(klass, name, product, product_versions, type, program_manager,
default_brew_tag, blocker_flags, ship_date=None):
"""
Create a new release in the ET.
See https://bugzilla.redhat.com/1401608 for background.
Note this method enforces certain conventions:
* Always disables PDC for a release
* Always creates the releases as "enabled"
* Always allows multiple advisories per package
* Description is always the combination of the product's own
description (for example "Red Hat Ceph Storage") with the number
from the latter part of the release's name. So a new "rhceph-3.0"
release will have a description "Red Hat Ceph Storage 3.0".
:param name: short name for this release, eg "rhceph-3.0"
:param product: short name, eg. "RHCEPH".
:param product_versions: list of names, eg. ["RHEL-7-CEPH-3"]
:param type: "Zstream" or "QuarterlyUpdate"
:param program_manager: for example "anharris" (Drew Harris, Ceph PgM)
:param default_brew_tag: for example "ceph-3.0-rhel-7-candidate"
:param blocker_flags: for example, "ceph-3.0"
:param ship_date: date formatted as strftime("%Y-%b-%d"). For example,
"2017-Nov-17". If ommitted, the ship_date will
be set to today's date. (This can always be updated
later to match the ship date value in Product
Pages.)
"""
product = Product(product)
(_, number) = name.split('-', 1)
description = '%s %s' % (product.description, number)
program_manager = User(program_manager)
product_version_ids = set([])
for pv_name in product_versions:
pv = ProductVersion(pv_name)
product_version_ids.add(pv.id)
if ship_date is None:
today = date.today()
ship_date = today.strftime("%Y-%b-%d")
et = ErrataConnector()
url = et._url + '/release/create'
payload = {
'type': type,
'release[allow_blocker]': 0,
'release[allow_exception]': 0,
'release[allow_pkg_dupes]': 1,
'release[allow_shadow]': 0,
'release[blocker_flags]': blocker_flags,
'release[default_brew_tag]': default_brew_tag,
'release[description]': description,
'release[enable_batching]': 0,
'release[enabled]': 1,
'release[is_deferred]': 0,
'release[is_pdc]': 0,
'release[name]': name,
'release[product_id]': product.id,
'release[product_version_ids][]': product_version_ids,
'release[program_manager_id]': program_manager.id,
'release[ship_date]': ship_date,
'release[type]': type,
}
result = et._post(url, data=payload)
if (sys.version_info > (3, 0)):
body = result.text
else:
# Found during live testing:
# UnicodeEncodeError: 'ascii' codec can't encode character u'\xe1'
# in position 44306: ordinal not in range(128)
# Not sure why there was a non-ascii character in the ET's HTTP
# response, but this fixes it.
body = result.text.encode('utf-8')
if result.status_code != 200:
# help with debugging:
print(body)
result.raise_for_status()
# We can get a 200 HTTP status_code here even when the POST failed to
# create the release in the ET database. (This happens, for example, if
# there are no Approved Components defined in Bugzilla for the release
# flag, and the ET hits Bugzilla's XMLRPC::FaultException.)
if 'field_errors' in body:
print(body)
raise ReleaseCreationError('see field_errors <div>')
return klass(name=name)
| [
"[email protected]"
] | |
b9470a6364fcb617b3b2bbeb23ef97dce22221d7 | de6fb3a55196b6bd36a4fda0e08ad658679fb7a1 | /optin_manager/src/python/openflow/common/utils/formfields.py | adec249dc39015d89a6d299354718c9fd0f8e896 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | dana-i2cat/felix | 4a87af639e4c7db686bfa03f1ae4ce62711615e3 | 059ed2b3308bda2af5e1942dc9967e6573dd6a53 | refs/heads/master | 2021-01-02T23:12:43.840754 | 2016-02-04T10:04:24 | 2016-02-04T10:04:24 | 17,132,912 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | '''
Created on Jul 17, 2010
@author: jnaous
'''
from django import forms
from expedient.common.utils import validators
class MACAddressField(forms.CharField):
"""
A MAC Address form field.
"""
default_error_messages = {
'invalid': u'Enter a valid MAC address in "xx:xx:xx:xx:xx:xx" format.',
}
default_validators = [validators.validate_mac_address]
| [
"[email protected]"
] | |
22214c4cf02d9139ebf68302682f68b55190d51e | 3a7adfdcf7a5048045c8e95a93369a1796cfd532 | /conftest.py | 377ddc7028f2964dd5cf5621a68dc74e7967e513 | [
"BSD-3-Clause"
] | permissive | theGreenJedi/nixpy | e06025077d5d224a7d051532ebfbd48845339c58 | 40b5ecdaa9b074c7bf73137d1a94cb84fcbae5be | refs/heads/master | 2022-02-01T15:14:22.133157 | 2019-06-03T09:10:57 | 2019-06-03T09:10:57 | 197,896,640 | 1 | 0 | null | 2019-07-20T07:37:03 | 2019-07-20T07:37:02 | null | UTF-8 | Python | false | false | 808 | py | import pytest
import tempfile
from nixio.test.xcompat.compile import maketests
BINDIR = tempfile.mkdtemp(prefix="nixpy-tests-")
def pytest_addoption(parser):
parser.addoption("--nix-compat", action="store_true", default=False,
help=("Run nix compatibility tests "
"(requires NIX library)"))
@pytest.fixture
def bindir(request):
return BINDIR
def pytest_collection_modifyitems(config, items):
if config.getoption("--nix-compat"):
print("Compiling NIX compatibility tests")
maketests(BINDIR)
return
skip_compat = pytest.mark.skip(
reason="Use --nix-compat option to run compatibility tests"
)
for item in items:
if "compatibility" in item.keywords:
item.add_marker(skip_compat)
| [
"[email protected]"
] | |
6a337ebcad790f7341970c4a3e71d1686f6229c6 | 333b405c1775475ddfa9ed3f4fa05c06b4c2e3f2 | /cv2/cvbackup/mycv_0.510464.py | c1b80110eb76fc4413a5cbbc9977af4cd86de47d | [] | no_license | daxiongshu/network | b77d5bb73dd353537f7687e61855d982cbd34464 | 842a778d310410ae39e58925257a9e9960ef560a | refs/heads/master | 2020-04-15T16:11:31.101188 | 2016-02-16T01:32:21 | 2016-02-16T01:32:21 | 51,798,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,405 | py | from xgb_classifier import xgb_classifier
import pandas as pd
import numpy as np
import pickle
from sklearn.ensemble import AdaBoostClassifier,ExtraTreesClassifier,RandomForestRegressor
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import roc_auc_score, f1_score, log_loss, make_scorer
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC,SVC
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split,KFold,StratifiedKFold
from math import log, exp, sqrt,factorial
import numpy as np
from scipy import sparse
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
def rmsle(y,yp):
return (np.mean((yp-y)**2))**0.5
def multiclass_log_loss(y_true, y_pred, eps=1e-15):
predictions = np.clip(y_pred, eps, 1 - eps)
# normalize row sums to 1
predictions /= predictions.sum(axis=1)[:, np.newaxis]
actual = np.zeros(y_pred.shape)
n_samples = actual.shape[0]
#y_true-=1
actual[np.arange(n_samples), y_true.astype(int)] = 1
vectsum = np.sum(actual * np.log(predictions))
loss = -1.0 / n_samples * vectsum
return loss
def new_clf_train_predict(X,y,Xt):
clf=single_model()
clf.fit(X,y)
return clf.predict_proba(Xt)
def cut(yp):
yp[yp<0]=0
yp[yp>7]=7
yp=yp.astype(int)
return yp
def kfold_cv(X_train, y_train,k):
kf = StratifiedKFold(y_train,n_folds=k)
xx=[]
zz=[]
ypred=np.zeros((y_train.shape[0],3))
for train_index, test_index in kf:
X_train_cv, X_test_cv = X_train[train_index,:],X_train[test_index,:]
y_train_cv, y_test_cv = y_train[train_index],y_train[test_index]
clf=xgb_classifier(eta=0.1,gamma=0,col=0.4,min_child_weight=1,depth=7,num_round=160)
y_pred=clf.multi(X_train_cv,y_train_cv,X_test_cv,3,y_test=y_test_cv)
xx.append(multiclass_log_loss(y_test_cv,y_pred))
print xx[-1]#,y_pred.shape,zz[-1]
ypred[test_index]=y_pred
print xx
print 'average:',np.mean(xx),'std',np.std(xx)
return ypred,np.mean(xx)
mem = Memory("./mycache")
@mem.cache
def get_data(name):
data = load_svmlight_file(name)
return data[0], data[1]
X, _ = get_data('../sparse/rebuild1.svm')
X1, _ =get_data('../sparse/rebuild2.svm')
X2, _ = get_data('../sparse/rebuild3.svm')
X3, _ =get_data('../sparse/rebuild4.svm')
X4, _ =get_data('../sparse/rebuild5.svm')
X5, _ =get_data('../sparse/rebuild6.svm')
xx=[]
xx.append(np.sum(X.todense(),axis=1))
xx.append(np.sum(X1.todense(),axis=1))
xx.append(np.sum(X2.todense(),axis=1))
xx.append(np.sum(X3.todense(),axis=1))
xx.append(np.sum(X4.todense(),axis=1))
xx.append(np.std(X.todense(),axis=1))
xx.append(np.std(X1.todense(),axis=1))
xx.append(np.std(X2.todense(),axis=1))
xx.append(np.std(X3.todense(),axis=1))
xx.append(np.std(X4.todense(),axis=1))
#xx.append(np.sum(sparse.hstack([X,X1,X2,X3,X4],format='csr').todense(),axis=1))
#xx.append(np.max(X.todense(),axis=1)-np.min(X.todense(),axis=1))
#xx.append(np.max(X1.todense(),axis=1)-np.min(X1.todense(),axis=1))
#xx.append(np.max(X2.todense(),axis=1)-np.min(X2.todense(),axis=1))
#xx.append(np.max(X3.todense(),axis=1)-np.min(X3.todense(),axis=1))
#xx.append(np.max(X4.todense(),axis=1)-np.min(X4.todense(),axis=1))
xx=np.hstack(xx)
X=sparse.hstack([X,X1,X2,X3,X4,xx,pickle.load(open('../explore/X2.p'))],format='csr').todense()
train=pd.read_csv('../explore/train1.csv')
idname='id'
label='fault_severity'
idx=train[idname].as_matrix()
y=np.array(train[label])
import pickle
X=np.hstack([X,train.drop([label,idname],axis=1).as_matrix()])
#X=np.hstack([X,train[['location','volume']].as_matrix()])
print X.shape, y.shape
from scipy.stats import pearsonr
xx=[]
for i in X.T:
score=pearsonr(np.array(i.T).ravel(),y)[0]
if np.abs(score)>1e-2:
xx.append(np.array(i.T).ravel())
X=np.array(xx).T
print X.shape, y.shape
yp,score=kfold_cv(X,y,4)
print X.shape, y.shape
print yp.shape
s=pd.DataFrame({idname:idx,'predict_0':yp[:,0],'predict_1':yp[:,1],'predict_2':yp[:,2],'real':y})
s.to_csv('va.csv',index=False)
import subprocess
cmd='cp mycv.py cvbackup/mycv_%f.py'%(score)
subprocess.call(cmd,shell=True)
cmd='cp va.csv cvbackup/va_%f.csv'%(score)
subprocess.call(cmd,shell=True)
| [
"[email protected]"
] | |
d39dbb85f0ea8a843010ed2ff417e14430ec8b04 | ae381913c23385f004b82161624097645ba8c4c8 | /Huaxian_eemd/projects/plot_decompositions.py | 8dbd45db6556f91e1ce3f8e7adbb1107c6385152 | [
"MIT"
] | permissive | zjy8006/MonthlyRunoffForecastByAutoReg | aa37910fdc66276d0df9d30af6885209d4a4ebfc | 661fcb5dcdfbbb2ec6861e1668a035b50e69f7c2 | refs/heads/master | 2020-12-12T05:25:48.768993 | 2020-08-20T07:21:12 | 2020-08-20T07:21:12 | 259,588,564 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | import pandas as pd
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
sys.path.append(root_path)
from tools.plot_utils import plot_decompositions
signal = pd.read_csv(root_path+'/Huaxian_eemd/data/EEMD_TRAIN.csv')
plot_decompositions(signal)
| [
"[email protected]"
] | |
613939625c016e2ed72cd4b6885baa6b413b8c7e | 5946112229fe1d9a04b7536f076a656438fcd05b | /dev_env/lib/python3.8/site-packages/pygments/styles/rrt.py | 2b1908794c8703c74074b3c356e1d1022988809b | [] | no_license | Gear-Droid/openCV_study_project | 3b117967eb8a28bb0c90088e1556fbc1d306a98b | 28c9a494680c4a280f87dd0cc87675dfb2262176 | refs/heads/main | 2023-05-14T14:27:42.284265 | 2021-06-05T00:16:09 | 2021-06-05T00:16:09 | 307,807,458 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | # -*- coding: utf-8 -*-
"""
pygments.styles.rrt
~~~~~~~~~~~~~~~~~~~
pygments "rrt" theme, based on Zap and Emacs defaults.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Comment, Name, Keyword, String
class RrtStyle(Style):
"""
Minimalistic "rrt" theme, based on Zap and Emacs defaults.
"""
background_color = '#000000'
highlight_color = '#0000ff'
styles = {
Comment: '#00ff00',
Name.Function: '#ffff00',
Name.Variable: '#eedd82',
Name.Constant: '#7fffd4',
Keyword: '#ff0000',
Comment.Preproc: '#e5e5e5',
String: '#87ceeb',
Keyword.Type: '#ee82ee',
}
| [
"[email protected]"
] | |
4af4c6c67883138cb403bc55c20a57a17f3abf94 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_143/ch40_2020_03_25_11_34_14_842288.py | 7b4cd03ca35996a28aee9136ab7f8fc3ef414f7a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | def soma_valores(s):
i=0
y[i]=s[i]
while(i<=len(s)):
y[i+1]+=s[i+1]
i+=1
return y | [
"[email protected]"
] | |
1400cc7e36dc1608eda6cf944b667fb37a1ea0b3 | b19dfd6a3ba5d107d110fb936de2e91d1d92bb99 | /venv/lib/python3.7/site-packages/Satchmo-0.9.3-py3.7.egg/shipping/modules/ups/config.py | 5c8e90a363eefc21999a9a0da571173a720a91b8 | [] | no_license | siddhant3030/djangoecommerce | d8f5b21f29d17d2979b073fd9389badafc993b5c | b067cb1155c778fece4634d0a98631a0646dacff | refs/heads/master | 2022-12-13T15:28:39.229377 | 2019-09-28T10:30:02 | 2019-09-28T10:30:02 | 207,240,716 | 2 | 1 | null | 2022-12-11T01:34:25 | 2019-09-09T06:35:36 | Python | UTF-8 | Python | false | false | 3,913 | py | from decimal import Decimal
from django.utils.translation import ugettext_lazy as _
from livesettings.values import StringValue,ConfigurationGroup,BooleanValue,DecimalValue,MultipleStringValue
from livesettings.functions import config_register_list,config_get
SHIP_MODULES = config_get('SHIPPING', 'MODULES')
SHIP_MODULES.add_choice(('shipping.modules.ups', 'UPS'))
SHIPPING_GROUP = ConfigurationGroup('shipping.modules.ups',
_('UPS Shipping Settings'),
requires = SHIP_MODULES,
ordering = 101)
config_register_list(
StringValue(SHIPPING_GROUP,
'XML_KEY',
description=_("UPS XML Access Key"),
help_text=_("XML Access Key Provided by UPS"),
default=""),
StringValue(SHIPPING_GROUP,
'USER_ID',
description=_("UPS User ID"),
help_text=_("User ID provided by UPS site."),
default=""),
StringValue(SHIPPING_GROUP,
'ACCOUNT',
description=_("UPS Account Number"),
help_text=_("UPS Account Number."),
default=""),
StringValue(SHIPPING_GROUP,
'USER_PASSWORD',
description=_("UPS User Password"),
help_text=_("User password provided by UPS site."),
default=""),
MultipleStringValue(SHIPPING_GROUP,
'UPS_SHIPPING_CHOICES',
description=_("UPS Shipping Choices Available to customers. These are valid domestic codes only."),
choices = (
(('01', 'Next Day Air')),
(('02', 'Second Day Air')),
(('03', 'Ground')),
(('12', '3 Day Select')),
(('13', 'Next Day Air Saver')),
(('14', 'Next Day Air Early AM')),
(('59', '2nd Day Air AM')),
),
default = ('03',)),
DecimalValue(SHIPPING_GROUP,
'HANDLING_FEE',
description=_("Handling Fee"),
help_text=_("The cost of packaging and getting the package off"),
default=Decimal('0.00')),
StringValue(SHIPPING_GROUP,
'SHIPPING_CONTAINER',
description=_("Type of container used to ship product."),
choices = (
(('00', 'Unknown')),
(('01', 'UPS LETTER')),
(('02', 'PACKAGE / CUSTOMER SUPPLIED')),
),
default = "00"),
BooleanValue(SHIPPING_GROUP,
'SINGLE_BOX',
description=_("Single Box?"),
help_text=_("Use just one box and ship by weight? If no then every item will be sent in its own box."),
default=True),
BooleanValue(SHIPPING_GROUP,
'TIME_IN_TRANSIT',
description=_("Time in Transit?"),
help_text=_("Use the UPS Time In Transit API? It is slower but delivery dates are more accurate."),
default=False),
StringValue(SHIPPING_GROUP,
'PICKUP_TYPE',
description=_("UPS Pickup option."),
choices = (
(('01', 'DAILY PICKUP')),
(('03', 'CUSTOMER COUNTER')),
(('06', 'ONE TIME PICKUP')),
(('07', 'ON CALL PICKUP')),
),
default = "07"),
BooleanValue(SHIPPING_GROUP,
'LIVE',
description=_("Access production UPS server"),
help_text=_("Use this when your store is in production."),
default=False),
StringValue(SHIPPING_GROUP,
'CONNECTION',
description=_("Submit to URL"),
help_text=_("Address to submit live transactions."),
default="https://onlinetools.ups.com/ups.app/xml/Rate"),
StringValue(SHIPPING_GROUP,
'CONNECTION_TEST',
description=_("Submit to TestURL"),
help_text=_("Address to submit test transactions."),
default="https://wwwcie.ups.com/ups.app/xml/Rate"),
BooleanValue(SHIPPING_GROUP,
'VERBOSE_LOG',
description=_("Verbose logs"),
help_text=_("Send the entire request and response to the log - for debugging help when setting up UPS."),
default=False)
)
| [
"[email protected]"
] | |
4da4aa68a0cd83d1a57b20435439e06bad9395a2 | fc6f709f916fcd201938157990c77fa9202eefa7 | /model/optimizer.py | 4a9ee5afce8f27d52a2e33ea778b94ad326ffc29 | [
"MIT"
] | permissive | chenchy/StyleSpeech | 441ffd6d71ac0269d205ad66c9536fe00cb5267c | e0e4ad25681f9ecc2a01ba1b87cbe0c59472b792 | refs/heads/main | 2023-05-27T21:39:04.790584 | 2021-06-13T10:32:03 | 2021-06-13T11:26:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,650 | py | import torch
import numpy as np
class ScheduledOptimMain:
""" A simple wrapper class for learning rate scheduling """
def __init__(self, model, train_config, model_config, current_step):
self._optimizer = torch.optim.Adam(
model.parameters(),
betas=train_config["optimizer"]["betas"],
eps=train_config["optimizer"]["eps"],
weight_decay=train_config["optimizer"]["weight_decay"],
)
self.n_warmup_steps = train_config["optimizer"]["warm_up_step"]
self.anneal_steps = train_config["optimizer"]["anneal_steps"]
self.anneal_rate = train_config["optimizer"]["anneal_rate"]
self.current_step = current_step
self.init_lr = np.power(model_config["transformer"]["encoder_hidden"], -0.5)
def step_and_update_lr(self):
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
# print(self.init_lr)
self._optimizer.zero_grad()
def load_state_dict(self, path):
self._optimizer.load_state_dict(path)
def _get_lr_scale(self):
lr = np.min(
[
np.power(self.current_step, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.current_step,
]
)
for s in self.anneal_steps:
if self.current_step > s:
lr = lr * self.anneal_rate
return lr
def _update_learning_rate(self):
""" Learning rate scheduling per step """
self.current_step += 1
lr = self.init_lr * self._get_lr_scale()
for param_group in self._optimizer.param_groups:
param_group["lr"] = lr
class ScheduledOptimDisc:
""" A simple wrapper class for learning rate scheduling """
def __init__(self, model, train_config):
self._optimizer = torch.optim.Adam(
model.parameters(),
betas=train_config["optimizer"]["betas"],
eps=train_config["optimizer"]["eps"],
weight_decay=train_config["optimizer"]["weight_decay"],
)
self.init_lr = train_config["optimizer"]["lr_disc"]
self._init_learning_rate()
def step_and_update_lr(self):
self._optimizer.step()
def zero_grad(self):
# print(self.init_lr)
self._optimizer.zero_grad()
def load_state_dict(self, path):
self._optimizer.load_state_dict(path)
def _init_learning_rate(self):
lr = self.init_lr
for param_group in self._optimizer.param_groups:
param_group["lr"] = lr
| [
"[email protected]"
] | |
2512edb155a767f6b7f93f15c00b755dc45ef923 | 8e69eee9b474587925e22413717eb82e4b024360 | /v1.0.0.test/toontown/toon/DistributedToon.py | d9f2c0b0c6e00991934003b88c5ad0845ba6deeb | [
"MIT"
] | permissive | TTOFFLINE-LEAK/ttoffline | afaef613c36dc3b70514ccee7030ba73c3b5045b | bb0e91704a755d34983e94288d50288e46b68380 | refs/heads/master | 2020-06-12T15:41:59.411795 | 2020-04-17T08:22:55 | 2020-04-17T08:22:55 | 194,348,185 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 139,568 | py | from subprocess import Popen
import sys
from panda3d.core import *
from libotp import *
from toontown.toonbase.ToontownGlobals import *
from direct.actor import Actor
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from otp.otpbase import OTPGlobals
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
from otp.avatar import DistributedPlayer
from otp.avatar import Avatar, DistributedAvatar
from otp.speedchat import SCDecoders
from otp.chat import TalkAssistant
import Toon
from direct.task.Task import Task
from direct.distributed import DistributedSmoothNode
from direct.distributed import DistributedObject
from direct.fsm import ClassicFSM
from toontown.hood import ZoneUtil
from toontown.distributed import DelayDelete
from toontown.distributed.DelayDeletable import DelayDeletable
from direct.showbase import PythonUtil
from toontown.catalog import CatalogItemList
from toontown.catalog import CatalogItem
import TTEmote
from toontown.shtiker.OptionsPage import speedChatStyles
from toontown.fishing import FishCollection
from toontown.fishing import FishTank
from toontown.suit import SuitDNA
from toontown.coghq import CogDisguiseGlobals
from toontown.toonbase import TTLocalizer
import Experience, InventoryNew
from toontown.speedchat import TTSCDecoders
from toontown.chat import ToonChatGarbler
from toontown.chat import ResistanceChat
from direct.distributed.MsgTypes import *
from toontown.effects.ScavengerHuntEffects import *
from toontown.estate import FlowerCollection
from toontown.estate import FlowerBasket
from toontown.estate import GardenGlobals
from toontown.estate import DistributedGagTree
from toontown.golf import GolfGlobals
from toontown.parties.PartyGlobals import InviteStatus, PartyStatus
from toontown.parties.PartyInfo import PartyInfo
from toontown.parties.InviteInfo import InviteInfo
from toontown.parties.PartyReplyInfo import PartyReplyInfoBase
from toontown.parties.SimpleMailBase import SimpleMailBase
from toontown.parties import PartyGlobals
from toontown.friends import FriendHandle
import time, operator
from direct.interval.IntervalGlobal import Sequence, Wait, Func, Parallel, SoundInterval
from toontown.distributed import DelayDelete
from otp.otpbase import OTPLocalizer
from direct.showbase.InputStateGlobal import inputState
from toontown.avatar import ToontownAvatarUtils
from toontown.toon import NPCToons
from toontown.battle.BattleProps import globalPropPool
from toontown.char import CharDNA
import random, copy, webbrowser
if base.wantKarts:
from toontown.racing.KartDNA import *
class DistributedToon(DistributedPlayer.DistributedPlayer, Toon.Toon, DistributedSmoothNode.DistributedSmoothNode, DelayDeletable):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedToon')
partyNotify = DirectNotifyGlobal.directNotify.newCategory('DistributedToon_Party')
chatGarbler = ToonChatGarbler.ToonChatGarbler()
gmNameTag = None
def __init__(self, cr, bFake=False):
try:
self.DistributedToon_initialized
return
except:
self.DistributedToon_initialized = 1
DistributedPlayer.DistributedPlayer.__init__(self, cr)
Toon.Toon.__init__(self)
DistributedSmoothNode.DistributedSmoothNode.__init__(self, cr)
self.bFake = bFake
self.kart = None
self._isGM = False
self._gmType = None
self.trophyScore = 0
self.trophyStar = None
self.trophyStarSpeed = 0
self.safeZonesVisited = []
self.NPCFriendsDict = {}
self.earnedExperience = None
self.track = None
self.effect = None
self.maxCarry = 0
self.disguisePageFlag = 0
self.sosPageFlag = 0
self.disguisePage = None
self.sosPage = None
self.gardenPage = None
self.cogTypes = [0,
0,
0,
0]
self.cogLevels = [0,
0,
0,
0]
self.cogParts = [0,
0,
0,
0]
self.cogMerits = [0,
0,
0,
0]
self.savedCheesyEffect = CENormal
self.savedCheesyHoodId = 0
self.savedCheesyExpireTime = 0
if hasattr(base, 'wantPets') and base.wantPets:
self.petTrickPhrases = []
self.petDNA = None
self.customMessages = []
self.resistanceMessages = []
self.cogSummonsEarned = []
self.catalogNotify = ToontownGlobals.NoItems
self.mailboxNotify = ToontownGlobals.NoItems
self.simpleMailNotify = ToontownGlobals.NoItems
self.inviteMailNotify = ToontownGlobals.NoItems
self.catalogScheduleCurrentWeek = 0
self.catalogScheduleNextTime = 0
self.monthlyCatalog = CatalogItemList.CatalogItemList()
self.weeklyCatalog = CatalogItemList.CatalogItemList()
self.backCatalog = CatalogItemList.CatalogItemList()
self.onOrder = CatalogItemList.CatalogItemList(store=CatalogItem.Customization | CatalogItem.DeliveryDate)
self.onGiftOrder = CatalogItemList.CatalogItemList(store=CatalogItem.Customization | CatalogItem.DeliveryDate)
self.mailboxContents = CatalogItemList.CatalogItemList(store=CatalogItem.Customization)
self.deliveryboxContentsContents = CatalogItemList.CatalogItemList(store=CatalogItem.Customization | CatalogItem.GiftTag)
self.awardMailboxContents = CatalogItemList.CatalogItemList(store=CatalogItem.Customization)
self.onAwardOrder = CatalogItemList.CatalogItemList(store=CatalogItem.Customization | CatalogItem.DeliveryDate)
self.splash = None
self.tossTrack = None
self.pieTracks = {}
self.splatTracks = {}
self.lastTossedPie = 0
self.clothesTopsList = []
self.clothesBottomsList = []
self.hatList = []
self.glassesList = []
self.backpackList = []
self.shoesList = []
self.tunnelTrack = None
self.tunnelPivotPos = [-14, -6, 0]
self.tunnelCenterOffset = 9.0
self.tunnelCenterInfluence = 0.6
self.pivotAngle = 135
self.posIndex = 0
self.houseId = 0
self.money = 0
self.bankMoney = 0
self.maxMoney = 0
self.maxBankMoney = 0
self.emblems = [0, 0]
self.maxNPCFriends = 16
self.petId = 0
self.bPetTutorialDone = False
self.bFishBingoTutorialDone = False
self.bFishBingoMarkTutorialDone = False
self.accessories = []
if base.wantKarts:
self.kartDNA = [
-1] * getNumFields()
self.flowerCollection = None
self.shovel = 0
self.shovelSkill = 0
self.shovelModel = None
self.wateringCan = 0
self.wateringCanSkill = 0
self.wateringCanModel = None
self.gardenSpecials = []
self.unlimitedSwing = 0
self.soundSequenceList = []
self.boardingParty = None
self.__currentDialogue = None
self.mail = None
self.invites = []
self.hostedParties = []
self.partiesInvitedTo = []
self.partyReplyInfoBases = []
self.gmState = 0
self.gmNameTagEnabled = 0
self.gmNameTagColor = 'whiteGM'
self.gmNameTagString = ''
self._lastZombieContext = None
self.carActive = False
self.carInterest = None
self.activeIntervals = {}
self.locked = False
self.muted = False
self.transitioning = False
self.cogIndex = -1
self.immortalMode = False
self.unlimitedGags = False
self.instaKill = False
self.cage = None
self.cageCameraNode = None
self.unlocks = []
return
def disable(self):
for soundSequence in self.soundSequenceList:
soundSequence.finish()
self.soundSequenceList = []
self._stopZombieCheck()
if self.boardingParty:
self.boardingParty.demandDrop()
self.boardingParty = None
self.carActive = False
self.updateCarActive()
self.ignore('clientCleanup')
self.stopAnimations()
self.clearCheesyEffect()
self.stopBlink()
self.stopSmooth()
self.stopLookAroundNow()
self.setGhostMode(0)
if self.track != None:
self.track.finish()
DelayDelete.cleanupDelayDeletes(self.track)
self.track = None
if self.effect != None:
self.effect.destroy()
self.effect = None
if self.splash != None:
self.splash.destroy()
self.splash = None
if self.emote != None:
self.emote.finish()
self.emote = None
self.cleanupPies()
if self.isDisguised:
self.takeOffSuit()
if self.tunnelTrack:
self.tunnelTrack.finish()
self.tunnelTrack = None
self.setTrophyScore(0)
self.removeGMIcon()
self.cleanupIntervals()
if self.doId in self.cr.toons:
del self.cr.toons[self.doId]
if self.cage:
self.cage.removeNode()
if self.cageCameraNode:
self.cageCameraNode.removeNode()
DistributedPlayer.DistributedPlayer.disable(self)
return
def delete(self):
try:
self.DistributedToon_deleted
except:
self.DistributedToon_deleted = 1
del self.safeZonesVisited
DistributedPlayer.DistributedPlayer.delete(self)
Toon.Toon.delete(self)
DistributedSmoothNode.DistributedSmoothNode.delete(self)
def generate(self):
DistributedPlayer.DistributedPlayer.generate(self)
DistributedSmoothNode.DistributedSmoothNode.generate(self)
self.cr.toons[self.doId] = self
if base.cr.trophyManager != None:
base.cr.trophyManager.d_requestTrophyScore()
self.startBlink()
self.startSmooth()
self.accept('clientCleanup', self._handleClientCleanup)
return
def announceGenerate(self):
DistributedPlayer.DistributedPlayer.announceGenerate(self)
if self.animFSM.getCurrentState().getName() == 'off':
self.setAnimState('neutral')
self._startZombieCheck()
self.updateCarActive()
def _handleClientCleanup(self):
if self.track != None:
DelayDelete.cleanupDelayDeletes(self.track)
return
def setDNAString(self, dnaString):
Toon.Toon.setDNAString(self, dnaString)
base.cr.discordManager.setSmallImage(base.cr.discordManager.getSmallImage())
def setDNA(self, dna):
if base.cr.newsManager:
if base.cr.newsManager.isHolidayRunning(ToontownGlobals.SPOOKY_BLACK_CAT):
black = 26
heads = ['cls',
'css',
'csl',
'cll']
dna.setTemporary(random.choice(heads), black, black, black)
else:
dna.restoreTemporary(self.style)
oldHat = self.getHat()
oldGlasses = self.getGlasses()
oldBackpack = self.getBackpack()
oldShoes = self.getShoes()
self.setHat(0, 0, 0)
self.setGlasses(0, 0, 0)
self.setBackpack(0, 0, 0)
self.setShoes(0, 0, 0)
Toon.Toon.setDNA(self, dna)
self.setHat(*oldHat)
self.setGlasses(*oldGlasses)
self.setBackpack(*oldBackpack)
self.setShoes(*oldShoes)
def setMagicDNA(self, hp):
self.sendUpdate('setMagicDNA', [hp])
def setMagicHeadAccessories(self, h1, h2, g1, g2):
self.sendUpdate('setMagicHeadAccessories', [h1, h2, g1, g2])
def setMagicBodyAccessories(self, b1, b2, s1, s2):
self.sendUpdate('setMagicBodyAccessories', [b1, b2, s1, s2])
def setHat(self, idx, textureIdx, colorIdx):
Toon.Toon.setHat(self, idx, textureIdx, colorIdx)
def setGlasses(self, idx, textureIdx, colorIdx):
Toon.Toon.setGlasses(self, idx, textureIdx, colorIdx)
def setBackpack(self, idx, textureIdx, colorIdx):
Toon.Toon.setBackpack(self, idx, textureIdx, colorIdx)
def setShoes(self, idx, textureIdx, colorIdx):
Toon.Toon.setShoes(self, idx, textureIdx, colorIdx)
def setGM(self, type):
wasGM = self._isGM
self._isGM = type != 0
self._gmType = None
if self._isGM:
self._gmType = type - 1
if self._isGM != wasGM:
self._handleGMName()
return
def setExperience(self, experience):
self.experience = Experience.Experience(experience, self)
if self.inventory:
self.inventory.updateGUI()
def setInventory(self, inventoryNetString):
if not self.inventory:
self.inventory = InventoryNew.InventoryNew(self, inventoryNetString)
self.inventory.updateInvString(inventoryNetString)
def setLastHood(self, lastHood):
self.lastHood = lastHood
def setBattleId(self, battleId):
self.battleId = battleId
messenger.send('ToonBattleIdUpdate', [self.doId])
def b_setSCToontask(self, taskId, toNpcId, toonProgress, msgIndex):
self.setSCToontask(taskId, toNpcId, toonProgress, msgIndex)
self.d_setSCToontask(taskId, toNpcId, toonProgress, msgIndex)
return
def d_setSCToontask(self, taskId, toNpcId, toonProgress, msgIndex):
messenger.send('wakeup')
self.sendUpdate('setSCToontask', [taskId,
toNpcId,
toonProgress,
msgIndex])
def setSCToontask(self, taskId, toNpcId, toonProgress, msgIndex):
if self.doId in base.localAvatar.ignoreList:
return
chatString = TTSCDecoders.decodeTTSCToontaskMsg(taskId, toNpcId, toonProgress, msgIndex)
if chatString:
self.setChatAbsolute(chatString, CFSpeech | CFQuicktalker | CFTimeout)
def b_setSCSinging(self, msgIndex):
self.setSCSinging(msgIndex)
self.d_setSCSinging(msgIndex)
return
def d_setSCSinging(self, msgIndex):
messenger.send('wakeup')
self.sendUpdate('setSCSinging', [msgIndex])
def sendLogSuspiciousEvent(self, msg):
localAvatar.sendUpdate('logSuspiciousEvent', ['%s for %s' % (msg, self.doId)])
def setSCSinging(self, msgIndex):
self.sendUpdate('logSuspiciousEvent', ['invalid msgIndex in setSCSinging: %s from %s' % (msgIndex, self.doId)])
def d_reqSCResistance(self, msgIndex):
messenger.send('wakeup')
nearbyPlayers = self.getNearbyPlayers(ResistanceChat.EFFECT_RADIUS)
self.sendUpdate('reqSCResistance', [msgIndex, nearbyPlayers])
def getNearbyPlayers(self, radius, includeSelf=True):
nearbyToons = []
toonIds = self.cr.getObjectsOfExactClass(DistributedToon)
for toonId, toon in toonIds.items():
if toon is not self:
dist = toon.getDistance(self)
if dist < radius:
nearbyToons.append(toonId)
if includeSelf:
nearbyToons.append(self.doId)
return nearbyToons
def setSCResistance(self, msgIndex, nearbyToons=[]):
chatString = TTSCDecoders.decodeTTSCResistanceMsg(msgIndex)
if chatString:
self.setChatAbsolute(chatString, CFSpeech | CFTimeout)
ResistanceChat.doEffect(msgIndex, self, nearbyToons)
def d_battleSOS(self, requesterId, sendToId=None):
if not base.cr.isFriend(self.sendToId):
return
self.sendUpdate('battleSOS', [requesterId], sendToId)
def battleSOS(self, requesterId):
if not base.cr.isFriend(requesterId):
return
else:
avatar = base.cr.identifyAvatar(requesterId)
if isinstance(avatar, DistributedToon) or isinstance(avatar, FriendHandle.FriendHandle):
self.setSystemMessage(requesterId, TTLocalizer.MovieSOSWhisperHelp % avatar.getName(), whisperType=WhisperPopup.WTBattleSOS)
elif avatar is not None:
self.notify.warning('got battleSOS from non-toon %s' % requesterId)
return
def getDialogueArray(self, *args):
return Toon.Toon.getDialogueArray(self, *args)
def setDefaultShard(self, shard):
self.defaultShard = shard
def setDefaultZone(self, zoneId):
if zoneId >= 30000 and zoneId < 40000:
zoneId = zoneId + 2000
try:
hoodPhase = base.cr.hoodMgr.getPhaseFromHood(zoneId)
except:
self.defaultZone = ToontownCentral
return
if ZoneUtil.getCanonicalHoodId(zoneId) == FunnyFarm:
self.defaultZone = ToontownCentral
return
if not base.cr.isPaid() or launcher and not launcher.getPhaseComplete(hoodPhase):
self.defaultZone = ToontownCentral
else:
self.defaultZone = zoneId
def setShtickerBook(self, string):
pass
def setAsGM(self, state):
self.notify.debug('Setting GM State: %s' % state)
DistributedPlayer.DistributedPlayer.setAsGM(self, state)
def d_updateGMNameTag(self):
self.refreshName()
def updateGMNameTag(self, tagString, color, state):
try:
unicode(tagString, 'utf-8')
except UnicodeDecodeError:
self.sendUpdate('logSuspiciousEvent', ['invalid GM name tag: %s from %s' % (tagString, self.doId)])
return
def refreshName(self):
return
self.notify.debug('Refreshing GM Nametag String: %s Color: %s State: %s' % (self.gmNameTagString, self.gmNameTagColor, self.gmNameTagEnabled))
if hasattr(self, 'nametag') and self.gmNameTagEnabled:
self.setDisplayName(self.gmNameTagString)
self.setName(self.gmNameTagString)
self.trophyStar1 = loader.loadModel('models/misc/smiley')
self.trophyStar1.reparentTo(self.nametag.getNameIcon())
self.trophyStar1.setScale(1)
self.trophyStar1.setZ(2.25)
self.trophyStar1.setColor(Vec4(0.75, 0.75, 0.75, 0.75))
self.trophyStar1.setTransparency(1)
self.trophyStarSpeed = 15
else:
taskMgr.add(self.__refreshNameCallBack, self.uniqueName('refreshNameCallBack'))
def __starSpin1(self, task):
now = globalClock.getFrameTime()
r = now * 90 % 360.0
self.trophyStar1.setH(r)
return Task.cont
def __refreshNameCallBack(self, task):
if hasattr(self, 'nametag') and self.nametag.getName() != '':
self.refreshName()
return Task.done
else:
return Task.cont
def setTalk(self, fromAV, fromAC, avatarName, chat, mods, flags, raw):
if base.cr.avatarFriendsManager.checkIgnored(fromAV):
self.d_setWhisperIgnored(fromAV)
return
else:
if fromAV in self.ignoreList:
self.d_setWhisperIgnored(fromAV)
return
if base.config.GetBool('want-sleep-reply-on-regular-chat', 0):
if base.localAvatar.sleepFlag == 1:
self.sendUpdate('setSleepAutoReply', [base.localAvatar.doId], fromAV)
newText, scrubbed = self.scrubTalk(chat, mods, raw)
self.displayTalk(newText)
base.talkAssistant.receiveOpenTalk(fromAV, avatarName, fromAC, None, newText)
return
def isAvFriend(self, avId):
return base.cr.isFriend(avId) or base.cr.playerFriendsManager.isAvatarOwnerPlayerFriend(avId)
def setTalkWhisper(self, fromAV, fromAC, avatarName, chat, mods, flags, raw):
if not localAvatar.acceptingNonFriendWhispers:
if not self.isAvFriend(fromAV):
return
if base.cr.avatarFriendsManager.checkIgnored(fromAV):
self.d_setWhisperIgnored(fromAV)
return
else:
if fromAV in self.ignoreList:
self.d_setWhisperIgnored(fromAV)
return
if base.config.GetBool('ignore-whispers', 0):
return
if base.localAvatar.sleepFlag == 1:
if not base.cr.identifyAvatar(fromAV) == base.localAvatar:
self.sendUpdate('setSleepAutoReply', [base.localAvatar.doId], fromAV)
newText, scrubbed = self.scrubTalk(chat, mods, raw)
self.displayTalkWhisper(fromAV, avatarName, chat, mods, raw)
base.talkAssistant.receiveWhisperTalk(fromAV, avatarName, fromAC, None, self.doId, self.getName(), newText)
return
def setSleepAutoReply(self, fromId):
pass
def _isValidWhisperSource(self, source):
return isinstance(source, FriendHandle.FriendHandle) or isinstance(source, DistributedToon)
def setWhisperSCEmoteFrom(self, fromId, emoteId):
handle = base.cr.identifyFriend(fromId)
if handle == None:
return
else:
if not self._isValidWhisperSource(handle):
self.notify.warning('setWhisperSCEmoteFrom non-toon %s' % fromId)
return
if not localAvatar.acceptingNonFriendWhispers:
if not self.isAvFriend(fromId):
return
if base.cr.avatarFriendsManager.checkIgnored(fromId):
self.d_setWhisperIgnored(fromId)
return
if base.localAvatar.sleepFlag == 1:
if not base.cr.identifyAvatar(fromId) == base.localAvatar:
self.sendUpdate('setSleepAutoReply', [base.localAvatar.doId], fromId)
chatString = SCDecoders.decodeSCEmoteWhisperMsg(emoteId, handle.getName())
if chatString:
self.displayWhisper(fromId, chatString, WhisperPopup.WTEmote)
base.talkAssistant.receiveAvatarWhisperSpeedChat(TalkAssistant.SPEEDCHAT_EMOTE, emoteId, fromId)
return
def setWhisperSCFrom(self, fromId, msgIndex):
handle = base.cr.identifyFriend(fromId)
if handle == None:
return
else:
if not self._isValidWhisperSource(handle):
self.notify.warning('setWhisperSCFrom non-toon %s' % fromId)
return
if not localAvatar.acceptingNonFriendWhispers:
if not self.isAvFriend(fromId):
return
if base.cr.avatarFriendsManager.checkIgnored(fromId):
self.d_setWhisperIgnored(fromId)
return
if fromId in self.ignoreList:
self.d_setWhisperIgnored(fromId)
return
if base.localAvatar.sleepFlag == 1:
if not base.cr.identifyAvatar(fromId) == base.localAvatar:
self.sendUpdate('setSleepAutoReply', [base.localAvatar.doId], fromId)
chatString = SCDecoders.decodeSCStaticTextMsg(msgIndex)
if chatString:
self.displayWhisper(fromId, chatString, WhisperPopup.WTQuickTalker)
base.talkAssistant.receiveAvatarWhisperSpeedChat(TalkAssistant.SPEEDCHAT_NORMAL, msgIndex, fromId)
return
def setWhisperSCCustomFrom(self, fromId, msgIndex):
handle = base.cr.identifyFriend(fromId)
if handle == None:
return
else:
if not localAvatar.acceptingNonFriendWhispers:
if not self.isAvFriend(fromId):
return
return DistributedPlayer.DistributedPlayer.setWhisperSCCustomFrom(self, fromId, msgIndex)
def whisperSCToontaskTo(self, taskId, toNpcId, toonProgress, msgIndex, sendToId):
messenger.send('wakeup')
self.sendUpdate('setWhisperSCToontaskFrom', [self.doId,
taskId,
toNpcId,
toonProgress,
msgIndex], sendToId)
def setWhisperSCToontaskFrom(self, fromId, taskId, toNpcId, toonProgress, msgIndex):
sender = base.cr.identifyFriend(fromId)
if sender == None:
return
else:
if not localAvatar.acceptingNonFriendWhispers:
if not self.isAvFriend(fromId):
return
if fromId in self.ignoreList:
self.d_setWhisperIgnored(fromId)
chatString = TTSCDecoders.decodeTTSCToontaskMsg(taskId, toNpcId, toonProgress, msgIndex)
if chatString:
self.displayWhisper(fromId, chatString, WhisperPopup.WTQuickTalker)
return
def setMaxNPCFriends(self, max):
max &= 32767
if max != self.maxNPCFriends:
self.maxNPCFriends = max
messenger.send(self.uniqueName('maxNPCFriendsChange'))
else:
self.maxNPCFriends = max
def getMaxNPCFriends(self):
return self.maxNPCFriends
def getNPCFriendsDict(self):
return self.NPCFriendsDict
def setNPCFriendsDict(self, NPCFriendsList):
NPCFriendsDict = {}
for friendPair in NPCFriendsList:
npcFriends = NPCToons.loadCards(returnDict=True)
if friendPair[0] not in npcFriends:
continue
NPCFriendsDict[friendPair[0]] = friendPair[1]
self.NPCFriendsDict = NPCFriendsDict
def setMaxAccessories(self, max):
self.maxAccessories = max
def getMaxAccessories(self):
return self.maxAccessories
def setHatList(self, clothesList):
self.hatList = clothesList
def getHatList(self):
return self.hatList
def setGlassesList(self, clothesList):
self.glassesList = clothesList
def getGlassesList(self):
return self.glassesList
def setBackpackList(self, clothesList):
self.backpackList = clothesList
def getBackpackList(self):
return self.backpackList
def setShoesList(self, clothesList):
self.shoesList = clothesList
def getShoesList(self):
return self.shoesList
def isTrunkFull(self, extraAccessories=0):
numAccessories = (len(self.hatList) + len(self.glassesList) + len(self.backpackList) + len(self.shoesList)) / 3
return numAccessories + extraAccessories >= self.maxAccessories
def setMaxClothes(self, max):
self.maxClothes = max
def getMaxClothes(self):
return self.maxClothes
def getClothesTopsList(self):
return self.clothesTopsList
def setClothesTopsList(self, clothesList):
self.clothesTopsList = clothesList
def getClothesBottomsList(self):
return self.clothesBottomsList
def setClothesBottomsList(self, clothesList):
self.clothesBottomsList = clothesList
def catalogGenClothes(self, avId):
if avId == self.doId:
self.generateToonClothes()
self.loop('neutral')
def catalogGenAccessories(self, avId):
if avId == self.doId:
self.generateToonAccessories()
self.loop('neutral')
def isClosetFull(self, extraClothes=0):
numClothes = len(self.clothesTopsList) / 4 + len(self.clothesBottomsList) / 2
return numClothes + extraClothes >= self.maxClothes
def setMaxHp(self, hitPoints):
DistributedPlayer.DistributedPlayer.setMaxHp(self, hitPoints)
if self.inventory:
self.inventory.updateGUI()
def setHp(self, hp):
DistributedPlayer.DistributedPlayer.setHp(self, hp)
if self.isDisguised:
self.suit.currHP = self.hp
self.suit.maxHP = self.maxHp
if self.maxHp == self.hp:
self.suit.corpMedallion.show()
self.suit.healthBar.hide()
else:
self.suit.corpMedallion.hide()
self.suit.healthBar.show()
self.suit.updateHealthBar(self.hp, True, True)
def died(self):
messenger.send(self.uniqueName('died'))
if self.isLocal():
target_sz = ZoneUtil.getSafeZoneId(self.defaultZone)
place = self.cr.playGame.getPlace()
if place and place.fsm:
place.fsm.request('died', [
{'loader': ZoneUtil.getLoaderName(target_sz), 'where': ZoneUtil.getWhereName(target_sz, 1),
'how': 'teleportIn',
'hoodId': target_sz,
'zoneId': target_sz,
'shardId': None,
'avId': -1,
'battle': 1}])
return
def setInterface(self, string):
pass
def setZonesVisited(self, hoods):
self.safeZonesVisited = hoods
def setHoodsVisited(self, hoods):
self.hoodsVisited = hoods
if ToontownGlobals.SellbotHQ in hoods or ToontownGlobals.CashbotHQ in hoods or ToontownGlobals.LawbotHQ in hoods:
self.setDisguisePageFlag(1)
def wrtReparentTo(self, parent):
DistributedSmoothNode.DistributedSmoothNode.wrtReparentTo(self, parent)
def setTutorialAck(self, tutorialAck):
self.tutorialAck = tutorialAck
def setEarnedExperience(self, earnedExp):
self.earnedExperience = earnedExp
def b_setTunnelIn(self, endX, tunnelOrigin):
timestamp = globalClockDelta.getFrameNetworkTime()
pos = tunnelOrigin.getPos(render)
h = tunnelOrigin.getH(render)
self.setTunnelIn(timestamp, endX, pos[0], pos[1], pos[2], h)
self.d_setTunnelIn(timestamp, endX, pos[0], pos[1], pos[2], h)
def d_setTunnelIn(self, timestamp, endX, x, y, z, h):
self.sendUpdate('setTunnelIn', [timestamp,
endX,
x,
y,
z,
h])
def setTunnelIn(self, timestamp, endX, x, y, z, h):
t = globalClockDelta.networkToLocalTime(timestamp)
self.handleTunnelIn(t, endX, x, y, z, h)
def getTunnelInToonTrack(self, endX, tunnelOrigin):
pivotNode = tunnelOrigin.attachNewNode(self.uniqueName('pivotNode'))
pivotNode.setPos(*self.tunnelPivotPos)
pivotNode.setHpr(0, 0, 0)
pivotY = pivotNode.getY(tunnelOrigin)
endY = 5.0
straightLerpDur = abs(endY - pivotY) / ToonForwardSpeed
pivotDur = 2.0
pivotLerpDur = pivotDur * (90.0 / self.pivotAngle)
self.reparentTo(pivotNode)
self.setPos(0, 0, 0)
self.setX(tunnelOrigin, endX)
targetX = self.getX()
self.setX(self.tunnelCenterOffset + (targetX - self.tunnelCenterOffset) * (1.0 - self.tunnelCenterInfluence))
self.setHpr(tunnelOrigin, 0, 0, 0)
pivotNode.setH(-self.pivotAngle)
return Sequence(Wait(0.8), Parallel(LerpHprInterval(pivotNode, pivotDur, hpr=Point3(0, 0, 0), name=self.uniqueName('tunnelInPivot')), Sequence(Wait(pivotDur - pivotLerpDur), LerpPosInterval(self, pivotLerpDur, pos=Point3(targetX, 0, 0), name=self.uniqueName('tunnelInPivotLerpPos')))), Func(self.wrtReparentTo, render), Func(pivotNode.removeNode), LerpPosInterval(self, straightLerpDur, pos=Point3(endX, endY, 0.1), other=tunnelOrigin, name=self.uniqueName('tunnelInStraightLerp')))
def handleTunnelIn(self, startTime, endX, x, y, z, h):
self.stopSmooth()
tunnelOrigin = render.attachNewNode('tunnelOrigin')
tunnelOrigin.setPosHpr(x, y, z, h, 0, 0)
self.tunnelTrack = Sequence(self.getTunnelInToonTrack(endX, tunnelOrigin), Func(tunnelOrigin.removeNode), Func(self.startSmooth))
tOffset = globalClock.getFrameTime() - (startTime + self.smoother.getDelay())
if tOffset < 0.0:
self.tunnelTrack = Sequence(Wait(-tOffset), self.tunnelTrack)
self.tunnelTrack.start()
else:
self.tunnelTrack.start(tOffset)
def b_setTunnelOut(self, startX, startY, tunnelOrigin):
timestamp = globalClockDelta.getFrameNetworkTime()
pos = tunnelOrigin.getPos(render)
h = tunnelOrigin.getH(render)
self.setTunnelOut(timestamp, startX, startY, pos[0], pos[1], pos[2], h)
self.d_setTunnelOut(timestamp, startX, startY, pos[0], pos[1], pos[2], h)
def d_setTunnelOut(self, timestamp, startX, startY, x, y, z, h):
self.sendUpdate('setTunnelOut', [timestamp,
startX,
startY,
x,
y,
z,
h])
def setTunnelOut(self, timestamp, startX, startY, x, y, z, h):
t = globalClockDelta.networkToLocalTime(timestamp)
self.handleTunnelOut(t, startX, startY, x, y, z, h)
def getTunnelOutToonTrack(self, startX, startY, tunnelOrigin):
startPos = self.getPos(tunnelOrigin)
startHpr = self.getHpr(tunnelOrigin)
reducedAvH = PythonUtil.fitDestAngle2Src(startHpr[0], 180)
pivotNode = tunnelOrigin.attachNewNode(self.uniqueName('pivotNode'))
pivotNode.setPos(*self.tunnelPivotPos)
pivotNode.setHpr(0, 0, 0)
pivotY = pivotNode.getY(tunnelOrigin)
straightLerpDur = abs(startY - pivotY) / ToonForwardSpeed
pivotDur = 2.0
pivotLerpDur = pivotDur * (90.0 / self.pivotAngle)
def getTargetPos(self=self):
pos = self.getPos()
return Point3(self.tunnelCenterOffset + (pos[0] - self.tunnelCenterOffset) * (1.0 - self.tunnelCenterInfluence), pos[1], pos[2])
return Sequence(Parallel(LerpPosInterval(self, straightLerpDur, pos=Point3(startX, pivotY, 0.1), startPos=startPos, other=tunnelOrigin, name=self.uniqueName('tunnelOutStraightLerp')), LerpHprInterval(self, straightLerpDur * 0.8, hpr=Point3(reducedAvH, 0, 0), startHpr=startHpr, other=tunnelOrigin, name=self.uniqueName('tunnelOutStraightLerpHpr'))), Func(self.wrtReparentTo, pivotNode), Parallel(LerpHprInterval(pivotNode, pivotDur, hpr=Point3(-self.pivotAngle, 0, 0), name=self.uniqueName('tunnelOutPivot')), LerpPosInterval(self, pivotLerpDur, pos=getTargetPos, name=self.uniqueName('tunnelOutPivotLerpPos'))), Func(self.wrtReparentTo, render), Func(pivotNode.removeNode))
def handleTunnelOut(self, startTime, startX, startY, x, y, z, h):
tunnelOrigin = render.attachNewNode('tunnelOrigin')
tunnelOrigin.setPosHpr(x, y, z, h, 0, 0)
self.tunnelTrack = Sequence(Func(self.stopSmooth), self.getTunnelOutToonTrack(startX, startY, tunnelOrigin), Func(self.detachNode), Func(tunnelOrigin.removeNode))
tOffset = globalClock.getFrameTime() - (startTime + self.smoother.getDelay())
if tOffset < 0.0:
self.tunnelTrack = Sequence(Wait(-tOffset), self.tunnelTrack)
self.tunnelTrack.start()
else:
self.tunnelTrack.start(tOffset)
def enterTeleportOut(self, *args, **kw):
Toon.Toon.enterTeleportOut(self, *args, **kw)
if self.track:
self.track.delayDelete = DelayDelete.DelayDelete(self, 'enterTeleportOut')
def exitTeleportOut(self):
if self.track != None:
DelayDelete.cleanupDelayDeletes(self.track)
Toon.Toon.exitTeleportOut(self)
return
def b_setAnimState(self, animName, animMultiplier=1.0, callback=None, extraArgs=[]):
self.d_setAnimState(animName, animMultiplier, None, extraArgs)
self.setAnimState(animName, animMultiplier, None, None, callback, extraArgs)
return
def d_setAnimState(self, animName, animMultiplier=1.0, timestamp=None, extraArgs=[]):
timestamp = globalClockDelta.getFrameNetworkTime()
self.sendUpdate('setAnimState', [animName, animMultiplier, timestamp])
def setAnimState(self, animName, animMultiplier=1.0, timestamp=None, animType=None, callback=None, extraArgs=[]):
if not animName or animName == 'None':
return
if timestamp == None:
ts = 0.0
else:
ts = globalClockDelta.localElapsedTime(timestamp)
if base.config.GetBool('check-invalid-anims', True):
if animMultiplier > 1.0 and animName in ('neutral', ):
animMultiplier = 1.0
if self.animFSM.getStateNamed(animName):
self.animFSM.request(animName, [animMultiplier,
ts,
callback,
extraArgs])
self.cleanupPieInHand()
return
def b_setEmoteState(self, animIndex, animMultiplier):
self.setEmoteState(animIndex, animMultiplier)
self.d_setEmoteState(animIndex, animMultiplier)
def d_setEmoteState(self, animIndex, animMultiplier):
timestamp = globalClockDelta.getFrameNetworkTime()
self.sendUpdate('setEmoteState', [animIndex, animMultiplier, timestamp])
def setEmoteState(self, animIndex, animMultiplier, timestamp=None):
if animIndex == TTEmote.EmoteClear:
return
else:
if timestamp == None:
ts = 0.0
else:
ts = globalClockDelta.localElapsedTime(timestamp)
callback = None
extraArgs = []
extraArgs.insert(0, animIndex)
self.doEmote(animIndex, animMultiplier, ts, callback, extraArgs)
return
def setCogStatus(self, cogStatusList):
self.cogs = cogStatusList
def setCogCount(self, cogCountList):
self.cogCounts = cogCountList
if hasattr(self, 'suitPage'):
self.suitPage.updatePage()
def setCogRadar(self, radar):
self.cogRadar = radar
if hasattr(self, 'suitPage'):
self.suitPage.updateCogRadarButtons(radar)
def setBuildingRadar(self, radar):
self.buildingRadar = radar
if hasattr(self, 'suitPage'):
self.suitPage.updateBuildingRadarButtons(radar)
def setCogTypes(self, types):
self.cogTypes = types
if self.disguisePage:
self.disguisePage.updatePage()
def setCogLevels(self, levels):
self.cogLevels = levels
if self.disguisePage:
self.disguisePage.updatePage()
def getCogLevels(self):
return self.cogLevels
def setCogParts(self, parts):
self.cogParts = parts
if self.disguisePage:
self.disguisePage.updatePage()
def getCogParts(self):
return self.cogParts
def setCogMerits(self, merits):
self.cogMerits = merits
if self.disguisePage:
self.disguisePage.updatePage()
def readyForPromotion(self, dept):
merits = base.localAvatar.cogMerits[dept]
totalMerits = CogDisguiseGlobals.getTotalMerits(self, dept)
if merits >= totalMerits:
return 1
else:
return 0
def setCogIndex(self, index, cogType=0):
self.cogIndex = (index, cogType)
if self.cogIndex[0] == -1:
if self.isDisguised:
self.takeOffSuit()
else:
if -1 <= index <= 3:
cogIndex = self.cogTypes[index] + SuitDNA.suitsPerDept * index
cog = SuitDNA.suitHeadTypes[cogIndex]
else:
cog = SuitDNA.extraSuitsIndex2Head.get(index)
if cogType in ToontownGlobals.PutOnSuitRental:
self.putOnSuit(index, cogType=cogType, rental=True)
else:
self.putOnSuit(cog, cogType=cogType)
def getCogIndex(self):
return self.cogIndex
def setCharIndex(self, index):
if index == -1:
if self.isClassicChar:
self.becomeToon()
else:
self.becomeChar(index)
def setTPose(self):
if self.isDisguised:
self.updateToonDNA(self.style, 1, True)
self.generateToonAccessories()
suitType = self.suit.style.name
cogType = self.isCog
if self.suit.isRental:
index = ToontownGlobals.CogDepts.index(self.suit.style.dept)
self.putOnSuit(suitType=index, setDisplayName=True, cogType=cogType, rental=True, tpose=True)
else:
self.putOnSuit(suitType=suitType, setDisplayName=True, cogType=cogType, tpose=True)
elif self.isClassicChar:
charType = CharDNA.charTypes.index(self.char.style.name)
self.becomeChar(charType, True)
else:
self.updateToonDNA(self.style, 1, True)
self.generateToonAccessories()
def setMuzzle(self, muzzle):
self.hideNormalMuzzle()
self.hideSurpriseMuzzle()
self.hideSadMuzzle()
self.hideSmileMuzzle()
self.hideAngryMuzzle()
self.hideLaughMuzzle()
if muzzle == 0:
self.showNormalMuzzle()
elif muzzle == 1:
self.showSurpriseMuzzle()
elif muzzle == 2:
self.showSadMuzzle()
elif muzzle == 3:
self.showSmileMuzzle()
elif muzzle == 4:
self.showAngryMuzzle()
elif muzzle == 5:
self.showLaughMuzzle()
def setEyes(self, eyes):
Toon.Toon.setEyes(self, eyes)
def isCog(self):
if self.cogIndex[0] == -1:
return 0
else:
return 1
def setDisguisePageFlag(self, flag):
if flag and hasattr(self, 'book'):
self.loadDisguisePages()
self.disguisePageFlag = flag
def setSosPageFlag(self, flag):
if flag and hasattr(self, 'book'):
self.loadSosPages()
self.sosPageFlag = flag
def setFishCollection(self, genusList, speciesList, weightList):
self.fishCollection = FishCollection.FishCollection()
self.fishCollection.makeFromNetLists(genusList, speciesList, weightList)
def getFishCollection(self):
return self.fishCollection
def setMaxFishTank(self, maxTank):
self.maxFishTank = maxTank
def getMaxFishTank(self):
return self.maxFishTank
def setFishTank(self, genusList, speciesList, weightList):
self.fishTank = FishTank.FishTank()
self.fishTank.makeFromNetLists(genusList, speciesList, weightList)
messenger.send(self.uniqueName('fishTankChange'))
def getFishTank(self):
return self.fishTank
def isFishTankFull(self):
return len(self.fishTank) >= self.maxFishTank
def setFishingRod(self, rodId):
self.fishingRod = rodId
def getFishingRod(self):
return self.fishingRod
def setFishingTrophies(self, trophyList):
self.fishingTrophies = trophyList
def getFishingTrophies(self):
return self.fishingTrophies
def setQuests(self, flattenedQuests):
questList = []
questLen = 5
for i in xrange(0, len(flattenedQuests), questLen):
questList.append(flattenedQuests[i:i + questLen])
self.quests = questList
if self == base.localAvatar:
messenger.send('questsChanged')
def setQuestCarryLimit(self, limit):
self.questCarryLimit = limit
if self == base.localAvatar:
messenger.send('questsChanged')
def getQuestCarryLimit(self):
return self.questCarryLimit
def d_requestDeleteQuest(self, questDesc):
self.sendUpdate('requestDeleteQuest', [list(questDesc)])
def setMaxCarry(self, maxCarry):
self.maxCarry = maxCarry
if self.inventory:
self.inventory.updateGUI()
def getMaxCarry(self):
return self.maxCarry
def setCheesyEffect(self, effect, hoodId, expireTime):
self.savedCheesyEffect = effect
self.savedCheesyHoodId = hoodId
self.savedCheesyExpireTime = expireTime
if self == base.localAvatar:
self.notify.debug('setCheesyEffect(%s, %s, %s)' % (effect, hoodId, expireTime))
if effect != ToontownGlobals.CENormal:
serverTime = time.time() + self.cr.getServerDelta()
duration = expireTime * 60 - serverTime
if duration < 0:
self.notify.debug('effect should have expired %s ago.' % PythonUtil.formatElapsedSeconds(-duration))
else:
self.notify.debug('effect will expire in %s.' % PythonUtil.formatElapsedSeconds(duration))
if self.activeState == DistributedObject.ESGenerated:
self.reconsiderCheesyEffect(lerpTime=0.5)
else:
self.reconsiderCheesyEffect()
def reconsiderCheesyEffect(self, lerpTime=0):
effect = self.savedCheesyEffect
hoodId = self.savedCheesyHoodId
if not self.cr.areCheesyEffectsAllowed():
effect = CENormal
if hoodId != 0:
try:
currentHoodId = base.cr.playGame.hood.id
except:
currentHoodId = None
if hoodId == 1:
if currentHoodId == ToontownGlobals.ToontownCentral:
effect = CENormal
elif currentHoodId != None and currentHoodId != hoodId:
effect = CENormal
if self.ghostMode:
effect = CEGhost
self.applyCheesyEffect(effect, lerpTime=lerpTime)
return
def setGhostMode(self, flag):
if self.ghostMode != flag:
self.ghostMode = flag
if not hasattr(self, 'cr'):
return
if self.activeState <= DistributedObject.ESDisabled:
self.notify.debug('not applying cheesy effect to disabled Toon')
elif self.activeState == DistributedObject.ESGenerating:
self.reconsiderCheesyEffect()
elif self.activeState == DistributedObject.ESGenerated:
self.reconsiderCheesyEffect(lerpTime=0.5)
else:
self.notify.warning('unknown activeState: %s' % self.activeState)
self.showNametag2d()
self.showNametag3d()
if hasattr(self, 'collNode'):
if self.ghostMode:
self.collNode.setCollideMask(ToontownGlobals.GhostBitmask)
else:
self.collNode.setCollideMask(ToontownGlobals.WallBitmask | ToontownGlobals.PieBitmask)
if self.isLocal():
if self.ghostMode:
self.useGhostControls()
else:
self.useWalkControls()
if hasattr(base, 'wantPets') and base.wantPets:
def setPetTrickPhrases(self, petTricks):
self.petTrickPhrases = petTricks
if self.isLocal():
messenger.send('petTrickPhrasesChanged')
def setCustomMessages(self, customMessages):
self.customMessages = customMessages
if self.isLocal():
messenger.send('customMessagesChanged')
def setResistanceMessages(self, resistanceMessages):
self.resistanceMessages = resistanceMessages
if self.isLocal():
messenger.send('resistanceMessagesChanged')
def getResistanceMessageCharges(self, textId):
msgs = self.resistanceMessages
for i in xrange(len(msgs)):
if msgs[i][0] == textId:
return msgs[i][1]
return 0
def setCatalogSchedule(self, currentWeek, nextTime):
self.catalogScheduleCurrentWeek = currentWeek
self.catalogScheduleNextTime = nextTime
if self.isLocal():
self.notify.debug('setCatalogSchedule(%s, %s)' % (currentWeek, nextTime))
if nextTime:
serverTime = time.time() + self.cr.getServerDelta()
duration = nextTime * 60 - serverTime
self.notify.debug('next catalog in %s.' % PythonUtil.formatElapsedSeconds(duration))
def setCatalog(self, monthlyCatalog, weeklyCatalog, backCatalog):
self.monthlyCatalog = CatalogItemList.CatalogItemList(monthlyCatalog)
self.weeklyCatalog = CatalogItemList.CatalogItemList(weeklyCatalog)
self.backCatalog = CatalogItemList.CatalogItemList(backCatalog)
if self.catalogNotify == ToontownGlobals.NewItems:
self.catalogNotify = ToontownGlobals.OldItems
def setCatalogNotify(self, catalogNotify, mailboxNotify):
if len(self.weeklyCatalog) + len(self.monthlyCatalog) == 0:
catalogNotify = ToontownGlobals.NoItems
if len(self.mailboxContents) == 0:
mailboxNotify = ToontownGlobals.NoItems
self.catalogNotify = catalogNotify
self.mailboxNotify = mailboxNotify
if self.isLocal():
self.gotCatalogNotify = 1
self.refreshOnscreenButtons()
print 'local'
def setDeliverySchedule(self, onOrder):
self.onOrder = CatalogItemList.CatalogItemList(onOrder, store=CatalogItem.Customization | CatalogItem.DeliveryDate)
if self == base.localAvatar:
nextTime = self.onOrder.getNextDeliveryDate()
if nextTime != None:
serverTime = time.time() + self.cr.getServerDelta()
duration = nextTime * 60 - serverTime
self.notify.debug('next delivery in %s.' % PythonUtil.formatElapsedSeconds(duration))
messenger.send('setDeliverySchedule-%s' % self.doId)
return
def setMailboxContents(self, mailboxContents):
self.mailboxContents = CatalogItemList.CatalogItemList(mailboxContents, store=CatalogItem.Customization)
messenger.send('setMailboxContents-%s' % self.doId)
def setAwardSchedule(self, onOrder):
self.onAwardOrder = CatalogItemList.CatalogItemList(onOrder, store=CatalogItem.Customization | CatalogItem.DeliveryDate)
if self == base.localAvatar:
nextTime = self.onAwardOrder.getNextDeliveryDate()
if nextTime != None:
serverTime = time.time() + self.cr.getServerDelta()
duration = nextTime * 60 - serverTime
self.notify.debug('next delivery in %s.' % PythonUtil.formatElapsedSeconds(duration))
messenger.send('setAwardSchedule-%s' % self.doId)
return
def setAwardMailboxContents(self, awardMailboxContents):
self.notify.debug('Setting awardMailboxContents to %s.' % awardMailboxContents)
self.awardMailboxContents = CatalogItemList.CatalogItemList(awardMailboxContents, store=CatalogItem.Customization)
self.notify.debug('awardMailboxContents is %s.' % self.awardMailboxContents)
messenger.send('setAwardMailboxContents-%s' % self.doId)
def setAwardNotify(self, awardNotify):
self.notify.debug('setAwardNotify( %s )' % awardNotify)
self.awardNotify = awardNotify
if self.isLocal():
self.gotCatalogNotify = 1
self.refreshOnscreenButtons()
def setGiftSchedule(self, onGiftOrder):
self.onGiftOrder = CatalogItemList.CatalogItemList(onGiftOrder, store=CatalogItem.Customization | CatalogItem.DeliveryDate)
if self == base.localAvatar:
nextTime = self.onGiftOrder.getNextDeliveryDate()
if nextTime != None:
serverTime = time.time() + self.cr.getServerDelta()
duration = nextTime * 60 - serverTime
self.notify.debug('next delivery in %s.' % PythonUtil.formatElapsedSeconds(duration))
return
def playSplashEffect(self, x, y, z):
if localAvatar.zoneId not in [ToontownGlobals.DonaldsDock, ToontownGlobals.OutdoorZone] and (not hasattr(localAvatar, 'inEstate') or localAvatar.inEstate != 1):
if random.random() < 0.1:
self.sendLogSuspiciousEvent('AvatarHackWarning! playing hacked splash effect')
return
from toontown.effects import Splash
if self.splash == None:
self.splash = Splash.Splash(render)
self.splash.setPos(x, y, z)
self.splash.setScale(2)
self.splash.play()
place = base.cr.playGame.getPlace()
if place:
if hasattr(place.loader, 'submergeSound'):
base.playSfx(place.loader.submergeSound, node=self)
return
def d_playSplashEffect(self, x, y, z):
self.sendUpdate('playSplashEffect', [x, y, z])
def setTrackAccess(self, trackArray):
self.trackArray = trackArray
if self.inventory:
self.inventory.updateGUI()
def getTrackAccess(self):
return self.trackArray
def hasTrackAccess(self, track):
return self.trackArray[track]
def setTrackProgress(self, trackId, progress):
self.trackProgressId = trackId
self.trackProgress = progress
if hasattr(self, 'trackPage'):
self.trackPage.updatePage()
def getTrackProgress(self):
return [
self.trackProgressId, self.trackProgress]
def getTrackProgressAsArray(self, maxLength=15):
shifts = map(operator.rshift, maxLength * [self.trackProgress], xrange(maxLength - 1, -1, -1))
digits = map(operator.mod, shifts, maxLength * [2])
digits.reverse()
return digits
def setTeleportAccess(self, teleportZoneArray):
self.teleportZoneArray = teleportZoneArray
def getTeleportAccess(self):
return self.teleportZoneArray
def hasTeleportAccess(self, zoneId):
return zoneId in self.teleportZoneArray
def setQuestHistory(self, questList):
self.questHistory = questList
def getQuestHistory(self):
return self.questHistory
def setRewardHistory(self, rewardTier, rewardList):
self.rewardTier = rewardTier
self.rewardHistory = rewardList
def getRewardHistory(self):
return (
self.rewardTier, self.rewardHistory)
def doSmoothTask(self, task):
self.smoother.computeAndApplySmoothPosHpr(self, self)
self.setSpeed(self.smoother.getSmoothForwardVelocity(), self.smoother.getSmoothRotationalVelocity())
return Task.cont
def d_setParent(self, parentToken):
DistributedSmoothNode.DistributedSmoothNode.d_setParent(self, parentToken)
def setEmoteAccess(self, bits):
self.emoteAccess = bits
if self == base.localAvatar:
messenger.send('emotesChanged')
def b_setHouseId(self, id):
self.setHouseId(id)
self.d_setHouseId(id)
def d_setHouseId(self, id):
self.sendUpdate('setHouseId', [id])
def setHouseId(self, id):
self.houseId = id
def getHouseId(self):
return self.houseId
def setPosIndex(self, index):
self.posIndex = index
def getPosIndex(self):
return self.posIndex
def b_setSpeedChatStyleIndex(self, index):
realIndexToSend = 0
if type(index) == type(0) and 0 <= index and index < len(speedChatStyles):
realIndexToSend = index
else:
base.cr.centralLogger.writeClientEvent('Hacker alert b_setSpeedChatStyleIndex invalid')
self.setSpeedChatStyleIndex(realIndexToSend)
self.d_setSpeedChatStyleIndex(realIndexToSend)
return
def d_setSpeedChatStyleIndex(self, index):
realIndexToSend = 0
if type(index) == type(0) and 0 <= index and index < len(speedChatStyles):
realIndexToSend = index
else:
base.cr.centralLogger.writeClientEvent('Hacker alert d_setSpeedChatStyleIndex invalid')
self.sendUpdate('setSpeedChatStyleIndex', [realIndexToSend])
def setSpeedChatStyleIndex(self, index):
realIndexToUse = 0
if type(index) == type(0) and 0 <= index and index < len(speedChatStyles):
realIndexToUse = index
else:
base.cr.centralLogger.writeClientEvent('Hacker victim setSpeedChatStyleIndex invalid attacking toon = %d' % self.doId)
self.speedChatStyleIndex = realIndexToUse
nameKey, arrowColor, rolloverColor, frameColor = speedChatStyles[realIndexToUse]
self.nametag.setQtColor(VBase4(frameColor[0], frameColor[1], frameColor[2], 1))
if self.isLocal():
messenger.send('SpeedChatStyleChange', [])
def getSpeedChatStyleIndex(self):
return self.speedChatStyleIndex
def setMaxMoney(self, maxMoney):
self.maxMoney = maxMoney
def getMaxMoney(self):
return self.maxMoney
def setMoney(self, money):
if money != self.money:
self.money = money
messenger.send(self.uniqueName('moneyChange'), [self.money])
def getMoney(self):
return self.money
def setMaxBankMoney(self, maxMoney):
self.maxBankMoney = maxMoney
def getMaxBankMoney(self):
return self.maxBankMoney
def setBankMoney(self, money):
self.bankMoney = money
messenger.send(self.uniqueName('bankMoneyChange'), [self.bankMoney])
def getBankMoney(self):
return self.bankMoney
def getTotalMoney(self):
return self.getBankMoney() + self.getMoney()
def setEmblems(self, emblems):
if self.emblems != emblems:
self.emblems = emblems
messenger.send(self.uniqueName('emblemsChange'), [self.emblems])
def getEmblems(self):
return self.emblems
def isEnoughEmblemsToBuy(self, itemEmblemPrices):
for emblemIndex, emblemPrice in enumerate(itemEmblemPrices):
if emblemIndex >= len(self.emblems):
return False
if self.emblems[emblemIndex] < emblemPrice:
return False
return True
def isEnoughMoneyAndEmblemsToBuy(self, moneyPrice, itemEmblemPrices):
if self.getTotalMoney() < moneyPrice:
return False
for emblemIndex, emblemPrice in enumerate(itemEmblemPrices):
if emblemIndex >= len(self.emblems):
return False
if self.emblems[emblemIndex] < emblemPrice:
return False
return True
def presentPie(self, x, y, z, h, p, r, timestamp32):
if self.numPies <= 0:
return
else:
if not launcher.getPhaseComplete(5):
return
lastTossTrack = Sequence()
if self.tossTrack:
lastTossTrack = self.tossTrack
tossTrack = None
ts = globalClockDelta.localElapsedTime(timestamp32, bits=32)
ts -= self.smoother.getDelay()
ival = self.getPresentPieInterval(x, y, z, h, p, r)
if ts > 0:
startTime = ts
lastTossTrack.finish()
else:
ival = Sequence(Wait(-ts), ival)
lastTossTrack.finish()
startTime = 0
ival = Sequence(ival)
ival.start(startTime)
self.tossTrack = ival
return
def tossPie(self, x, y, z, h, p, r, sequence, power, timestamp32):
if self.numPies <= 0:
return
else:
if self.numPies != ToontownGlobals.FullPies:
self.setNumPies(self.numPies - 1)
self.lastTossedPie = globalClock.getFrameTime()
if not launcher.getPhaseComplete(5):
return
lastTossTrack = Sequence()
if self.tossTrack:
lastTossTrack = self.tossTrack
tossTrack = None
lastPieTrack = Sequence()
if sequence in self.pieTracks:
lastPieTrack = self.pieTracks[sequence]
del self.pieTracks[sequence]
ts = globalClockDelta.localElapsedTime(timestamp32, bits=32)
ts -= self.smoother.getDelay()
toss, pie, flyPie = self.getTossPieInterval(x, y, z, h, p, r, power)
if ts > 0:
startTime = ts
lastTossTrack.finish()
lastPieTrack.finish()
else:
toss = Sequence(Wait(-ts), toss)
pie = Sequence(Wait(-ts), pie)
lastTossTrack.finish()
lastPieTrack.finish()
startTime = 0
self.tossTrack = toss
toss.start(startTime)
pie = Sequence(pie, Func(self.pieFinishedFlying, sequence))
self.pieTracks[sequence] = pie
pie.start(startTime)
return
def pieFinishedFlying(self, sequence):
if sequence in self.pieTracks:
del self.pieTracks[sequence]
def pieFinishedSplatting(self, sequence):
if sequence in self.splatTracks:
del self.splatTracks[sequence]
def pieSplat(self, x, y, z, sequence, pieCode, timestamp32):
if self.isLocal():
return
elapsed = globalClock.getFrameTime() - self.lastTossedPie
if elapsed > 30:
return
if not launcher.getPhaseComplete(5):
return
lastPieTrack = Sequence()
if sequence in self.pieTracks:
lastPieTrack = self.pieTracks[sequence]
del self.pieTracks[sequence]
if sequence in self.splatTracks:
lastSplatTrack = self.splatTracks[sequence]
del self.splatTracks[sequence]
lastSplatTrack.finish()
ts = globalClockDelta.localElapsedTime(timestamp32, bits=32)
ts -= self.smoother.getDelay()
splat = self.getPieSplatInterval(x, y, z, pieCode)
splat = Sequence(Func(messenger.send, 'pieSplat', [self, pieCode]), splat)
if ts > 0:
startTime = ts
lastPieTrack.finish()
else:
splat = Sequence(Wait(-ts), splat)
startTime = 0
splat = Sequence(splat, Func(self.pieFinishedSplatting, sequence))
self.splatTracks[sequence] = splat
splat.start(startTime)
def cleanupPies(self):
for track in self.pieTracks.values():
track.finish()
self.pieTracks = {}
for track in self.splatTracks.values():
track.finish()
self.splatTracks = {}
self.cleanupPieInHand()
def cleanupPieInHand(self):
if self.tossTrack:
self.tossTrack.finish()
self.tossTrack = None
self.cleanupPieModel()
return
def setNumPies(self, numPies):
self.numPies = numPies
if self.isLocal():
self.updatePieButton()
if numPies == 0:
self.interruptPie()
def setPieType(self, pieType):
self.pieType = pieType
if self.isLocal():
self.updatePieButton()
def setTrophyScore(self, score):
self.trophyScore = score
if self.trophyStar != None:
self.trophyStar.removeNode()
self.trophyStar = None
if self.trophyStarSpeed != 0:
taskMgr.remove(self.uniqueName('starSpin'))
self.trophyStarSpeed = 0
if hasattr(self, 'gmIcon') and self.gmIcon:
return
else:
if self.trophyScore >= ToontownGlobals.TrophyStarLevels[4]:
self.trophyStar = loader.loadModel('phase_3.5/models/gui/name_star')
self.trophyStar.reparentTo(self.nametag.getNameIcon())
self.trophyStar.setScale(2)
self.trophyStar.setZ(2)
self.trophyStar.setColor(ToontownGlobals.TrophyStarColors[4])
self.trophyStarSpeed = 15
if self.trophyScore >= ToontownGlobals.TrophyStarLevels[5]:
taskMgr.add(self.__starSpin, self.uniqueName('starSpin'))
elif self.trophyScore >= ToontownGlobals.TrophyStarLevels[2]:
self.trophyStar = loader.loadModel('phase_3.5/models/gui/name_star')
self.trophyStar.reparentTo(self.nametag.getNameIcon())
self.trophyStar.setScale(1.5)
self.trophyStar.setZ(1.6)
self.trophyStar.setColor(ToontownGlobals.TrophyStarColors[2])
self.trophyStarSpeed = 10
if self.trophyScore >= ToontownGlobals.TrophyStarLevels[3]:
taskMgr.add(self.__starSpin, self.uniqueName('starSpin'))
elif self.trophyScore >= ToontownGlobals.TrophyStarLevels[0]:
self.trophyStar = loader.loadModel('phase_3.5/models/gui/name_star')
self.trophyStar.reparentTo(self.nametag.getNameIcon())
self.trophyStar.setScale(1.5)
self.trophyStar.setZ(1.6)
self.trophyStar.setColor(ToontownGlobals.TrophyStarColors[0])
self.trophyStarSpeed = 8
if self.trophyScore >= ToontownGlobals.TrophyStarLevels[1]:
taskMgr.add(self.__starSpin, self.uniqueName('starSpin'))
return
def __starSpin(self, task):
now = globalClock.getFrameTime()
r = now * self.trophyStarSpeed % 360.0
self.trophyStar.setR(r)
return Task.cont
def getZoneId(self):
place = base.cr.playGame.getPlace()
if place:
return place.getZoneId()
else:
return
return
def getRequestID(self):
return CLIENT_GET_AVATAR_DETAILS
def announceBingo(self):
self.setChatAbsolute(TTLocalizer.FishBingoBingo, CFSpeech | CFTimeout)
def squish(self, damage, noAnim=False):
if self == base.localAvatar:
if not noAnim:
base.cr.playGame.getPlace().fsm.request('squished')
self.stunToon()
self.setZ(self.getZ(render) + 0.025)
def d_squish(self, damage):
self.sendUpdate('squish', [damage])
def b_squish(self, damage, noAnim=False):
if not self.isStunned:
self.squish(damage, noAnim)
self.d_squish(damage)
self.playDialogueForString('!')
def getShadowJoint(self):
return Toon.Toon.getShadowJoint(self)
if base.wantKarts:
def hasKart(self):
return self.kartDNA[KartDNA.bodyType] != -1
def getKartDNA(self):
return self.kartDNA
def setTickets(self, numTickets):
self.tickets = numTickets
def getTickets(self):
return self.tickets
def getAccessoryByType(self, accType):
return self.kartDNA[accType]
def setCurrentKart(self, avId):
self.kartId = avId
def releaseKart(self):
self.kartId = None
return
def setKartBodyType(self, bodyType):
self.kartDNA[KartDNA.bodyType] = bodyType
def getKartBodyType(self):
return self.kartDNA[KartDNA.bodyType]
def setKartBodyColor(self, bodyColor):
self.kartDNA[KartDNA.bodyColor] = bodyColor
def getKartBodyColor(self):
return self.kartDNA[KartDNA.bodyColor]
def setKartAccessoryColor(self, accColor):
self.kartDNA[KartDNA.accColor] = accColor
def getKartAccessoryColor(self):
return self.kartDNA[KartDNA.accColor]
def setKartEngineBlockType(self, ebType):
self.kartDNA[KartDNA.ebType] = ebType
def getKartEngineBlockType(self):
return self.kartDNA[KartDNA.ebType]
def setKartSpoilerType(self, spType):
self.kartDNA[KartDNA.spType] = spType
def getKartSpoilerType(self):
return self.kartDNA[KartDNA.spType]
def setKartFrontWheelWellType(self, fwwType):
self.kartDNA[KartDNA.fwwType] = fwwType
def getKartFrontWheelWellType(self):
return self.kartDNA[KartDNA.fwwType]
def setKartBackWheelWellType(self, bwwType):
self.kartDNA[KartDNA.bwwType] = bwwType
def getKartBackWheelWellType(self):
return self.kartDNA[KartDNA.bwwType]
def setKartRimType(self, rimsType):
self.kartDNA[KartDNA.rimsType] = rimsType
def setKartDecalType(self, decalType):
self.kartDNA[KartDNA.decalType] = decalType
def getKartDecalType(self):
return self.kartDNA[KartDNA.decalType]
def getKartRimType(self):
return self.kartDNA[KartDNA.rimsType]
def setKartAccessoriesOwned(self, accessories):
while len(accessories) < 16:
accessories.append(-1)
self.accessories = accessories
def getKartAccessoriesOwned(self):
owned = copy.deepcopy(self.accessories)
while InvalidEntry in owned:
owned.remove(InvalidEntry)
return owned
def requestKartDNAFieldUpdate(self, dnaField, fieldValue):
self.notify.debug('requestKartDNAFieldUpdate - dnaField %s, fieldValue %s' % (dnaField, fieldValue))
self.sendUpdate('updateKartDNAField', [dnaField, fieldValue])
def requestAddOwnedAccessory(self, accessoryId):
self.notify.debug('requestAddOwnedAccessor - purchased accessory %s' % accessoryId)
self.sendUpdate('addOwnedAccessory', [accessoryId])
def requestRemoveOwnedAccessory(self, accessoryId):
self.notify.debug('requestRemoveOwnedAccessor - removed accessory %s' % accessoryId)
self.sendUpdate('removeOwnedAccessory', [accessoryId])
def setKartingTrophies(self, trophyList):
self.kartingTrophies = trophyList
def getKartingTrophies(self):
return self.kartingTrophies
def setKartingHistory(self, history):
self.kartingHistory = history
def getKartingHistory(self):
return self.kartingHistory
def setKartingPersonalBest(self, bestTimes):
self.kartingPersonalBest = bestTimes
def getKartingPersonalBest(self):
return self.kartingPersonalBest
def setKartingPersonalBest2(self, bestTimes2):
self.kartingPersonalBest2 = bestTimes2
def getKartingPersonalBest2(self):
return self.kartingPersonalBest2
def getKartingPersonalBestAll(self):
return self.kartingPersonalBest + self.kartingPersonalBest2
if hasattr(base, 'wantPets') and base.wantPets:
def setPetId(self, petId):
self.petId = petId
if petId == 0:
self.petDNA = None
elif self.isLocal():
base.cr.addPetToFriendsMap()
return
def getPetId(self):
return self.petId
def getPetId(self):
return self.petId
def hasPet(self):
return self.petId != 0
def b_setPetTutorialDone(self, bDone):
self.d_setPetTutorialDone(bDone)
self.setPetTutorialDone(bDone)
def d_setPetTutorialDone(self, bDone):
self.sendUpdate('setPetTutorialDone', [bDone])
def setPetTutorialDone(self, bDone):
self.bPetTutorialDone = bDone
def b_setFishBingoTutorialDone(self, bDone):
self.d_setFishBingoTutorialDone(bDone)
self.setFishBingoTutorialDone(bDone)
def d_setFishBingoTutorialDone(self, bDone):
self.sendUpdate('setFishBingoTutorialDone', [bDone])
def setFishBingoTutorialDone(self, bDone):
self.bFishBingoTutorialDone = bDone
def b_setFishBingoMarkTutorialDone(self, bDone):
self.d_setFishBingoMarkTutorialDone(bDone)
self.setFishBingoMarkTutorialDone(bDone)
def d_setFishBingoMarkTutorialDone(self, bDone):
self.sendUpdate('setFishBingoMarkTutorialDone', [bDone])
def setFishBingoMarkTutorialDone(self, bDone):
self.bFishBingoMarkTutorialDone = bDone
def b_setPetMovie(self, petId, flag):
self.d_setPetMovie(petId, flag)
self.setPetMovie(petId, flag)
def d_setPetMovie(self, petId, flag):
self.sendUpdate('setPetMovie', [petId, flag])
def setPetMovie(self, petId, flag):
pass
def lookupPetDNA(self):
if self.petId and not self.petDNA:
from toontown.pets import PetDetail
PetDetail.PetDetail(self.petId, self.__petDetailsLoaded)
def __petDetailsLoaded(self, pet):
self.petDNA = pet.style
def trickOrTreatTargetMet(self, beanAmount):
if self.effect:
self.effect.stop()
self.effect = TrickOrTreatTargetEffect(beanAmount)
self.effect.play()
def trickOrTreatMilestoneMet(self):
if self.effect:
self.effect.stop()
self.effect = TrickOrTreatMilestoneEffect()
self.effect.play()
def winterCarolingTargetMet(self, beanAmount):
if self.effect:
self.effect.stop()
self.effect = WinterCarolingEffect(beanAmount)
self.effect.play()
def d_reqCogSummons(self, type, suitIndex):
if type == 'single':
pass
elif type == 'building':
pass
elif type == 'invasion':
pass
self.sendUpdate('reqCogSummons', [type, suitIndex])
def cogSummonsResponse(self, returnCode, suitIndex, doId):
messenger.send('cog-summons-response', [returnCode, suitIndex, doId])
def setCogSummonsEarned(self, cogSummonsEarned):
self.cogSummonsEarned = cogSummonsEarned
def getCogSummonsEarned(self):
return self.cogSummonsEarned
def hasCogSummons(self, suitIndex, type=None):
summons = self.getCogSummonsEarned()
curSetting = summons[suitIndex]
if type == 'single':
return curSetting & 1
if type == 'building':
return curSetting & 2
if type == 'invasion':
return curSetting & 4
return curSetting
def setFlowerCollection(self, speciesList, varietyList):
self.flowerCollection = FlowerCollection.FlowerCollection()
self.flowerCollection.makeFromNetLists(speciesList, varietyList)
def getFlowerCollection(self):
return self.flowerCollection
def setMaxFlowerBasket(self, maxFlowerBasket):
self.maxFlowerBasket = maxFlowerBasket
def getMaxFlowerBasket(self):
return self.maxFlowerBasket
def isFlowerBasketFull(self):
return len(self.flowerBasket) >= self.maxFlowerBasket
def setFlowerBasket(self, speciesList, varietyList):
self.flowerBasket = FlowerBasket.FlowerBasket()
self.flowerBasket.makeFromNetLists(speciesList, varietyList)
messenger.send('flowerBasketUpdated')
def getFlowerBasket(self):
return self.flowerBasket
def setShovel(self, shovelId):
self.shovel = shovelId
def attachShovel(self):
self.shovelModel = self.getShovelModel()
self.shovelModel.reparentTo(self.rightHand)
return self.shovelModel
def detachShovel(self):
if self.shovelModel:
self.shovelModel.removeNode()
def getShovelModel(self):
shovels = loader.loadModel('phase_5.5/models/estate/shovels')
shovelId = ['A',
'B',
'C',
'D'][self.shovel]
shovel = shovels.find('**/shovel' + shovelId)
shovel.setH(-90)
shovel.setP(216)
shovel.setX(0.2)
shovel.detachNode()
shovels.removeNode()
return shovel
def setShovelSkill(self, skillLevel):
self.shovelSkill = skillLevel
def getBoxCapability(self):
return GardenGlobals.getShovelPower(self.shovel, self.shovelSkill)
def setWateringCan(self, wateringCanId):
self.wateringCan = wateringCanId
def attachWateringCan(self):
self.wateringCanModel = self.getWateringCanModel()
self.wateringCanModel.reparentTo(self.rightHand)
return self.wateringCanModel
def detachWateringCan(self):
if self.wateringCanModel:
self.wateringCanModel.removeNode()
def getWateringCanModel(self):
scalePosHprsTable = ((0.25, 0.1, 0, 0.2, -90, -125, -45),
(0.2, 0.0, 0.25, 0.2, -90, -125, -45),
(0.2, 0.2, 0.1, 0.2, -90, -125, -45),
(0.2, 0.0, 0.25, 0.2, -90, -125, -45))
cans = loader.loadModel('phase_5.5/models/estate/watering_cans')
canId = ['A',
'B',
'C',
'D'][self.wateringCan]
can = cans.find('**/water_can' + canId)
can.setScale(scalePosHprsTable[self.wateringCan][0])
can.setPos(scalePosHprsTable[self.wateringCan][1], scalePosHprsTable[self.wateringCan][2], scalePosHprsTable[self.wateringCan][3])
can.setHpr(scalePosHprsTable[self.wateringCan][4], scalePosHprsTable[self.wateringCan][5], scalePosHprsTable[self.wateringCan][6])
can.detachNode()
cans.removeNode()
if hasattr(base, 'rwc'):
if base.rwc:
if hasattr(self, 'wateringCan2'):
self.wateringCan2.removeNode()
self.wateringCan2 = can.copyTo(self.rightHand)
else:
self.wateringCan2.removeNode()
return can
def setWateringCanSkill(self, skillLevel):
self.wateringCanSkill = skillLevel
def setGardenSpecials(self, specials):
self.gardenSpecials = specials
if hasattr(self, 'gardenPage') and self.gardenPage:
self.gardenPage.updatePage()
def getGardenSpecials(self):
return self.gardenSpecials
def getMyTrees(self):
treeDict = self.cr.getObjectsOfClass(DistributedGagTree.DistributedGagTree)
trees = []
for tree in treeDict.values():
if tree.getOwnerId() == self.doId:
trees.append(tree)
if not trees:
pass
return trees
def isTreePlanted(self, track, level):
trees = self.getMyTrees()
for tree in trees:
if tree.gagTrack == track and tree.gagLevel == level:
return True
return False
def doIHaveRequiredTrees(self, track, level):
trees = self.getMyTrees()
trackAndLevelList = []
for tree in trees:
trackAndLevelList.append((tree.gagTrack, tree.gagLevel))
haveRequired = True
for curLevel in xrange(level):
testTuple = (
track, curLevel)
if testTuple not in trackAndLevelList:
haveRequired = False
break
return haveRequired
def setTrackBonusLevel(self, trackArray):
self.trackBonusLevel = trackArray
if self.inventory:
self.inventory.updateGUI()
def getTrackBonusLevel(self, track=None):
if track == None:
return self.trackBonusLevel
else:
return self.trackBonusLevel[track]
return
def checkGagBonus(self, track, level):
trackBonus = self.getTrackBonusLevel(track)
return trackBonus >= level
def setGardenTrophies(self, trophyList):
self.gardenTrophies = trophyList
def getGardenTrophies(self):
return self.gardenTrophies
def useSpecialResponse(self, returnCode):
messenger.send('use-special-response', [returnCode])
def setGardenStarted(self, bStarted):
self.gardenStarted = bStarted
def getGardenStarted(self):
return self.gardenStarted
def sendToGolfCourse(self, zoneId):
print 'sending to golfCourse'
hoodId = self.cr.playGame.hood.hoodId
golfRequest = {'loader': 'safeZoneLoader', 'where': 'golfcourse',
'how': 'teleportIn',
'hoodId': hoodId,
'zoneId': zoneId,
'shardId': None,
'avId': -1}
base.cr.playGame.getPlace().requestLeave(golfRequest)
return
def getGolfTrophies(self):
return self.golfTrophies
def getGolfCups(self):
return self.golfCups
def setGolfHistory(self, history):
self.golfHistory = history
self.golfTrophies = GolfGlobals.calcTrophyListFromHistory(self.golfHistory)
self.golfCups = GolfGlobals.calcCupListFromHistory(self.golfHistory)
if hasattr(self, 'book'):
self.addGolfPage()
def getGolfHistory(self):
return self.golfHistory
def hasPlayedGolf(self):
retval = False
for historyValue in self.golfHistory:
if historyValue:
retval = True
break
return retval
def setPackedGolfHoleBest(self, packedHoleBest):
unpacked = GolfGlobals.unpackGolfHoleBest(packedHoleBest)
self.setGolfHoleBest(unpacked)
def setGolfHoleBest(self, holeBest):
self.golfHoleBest = holeBest
def getGolfHoleBest(self):
return self.golfHoleBest
def setGolfCourseBest(self, courseBest):
self.golfCourseBest = courseBest
def getGolfCourseBest(self):
return self.golfCourseBest
def setUnlimitedSwing(self, unlimitedSwing):
self.unlimitedSwing = unlimitedSwing
def getUnlimitedSwing(self):
return self.unlimitedSwing
def getPinkSlips(self):
if hasattr(self, 'pinkSlips'):
return self.pinkSlips
else:
return 0
def setPinkSlips(self, pinkSlips):
self.pinkSlips = pinkSlips
def setAccess(self, access):
self.setGameAccess(access)
self.setDisplayName(self.getName())
def setGameAccess(self, access):
self.gameAccess = access
def getGameAccess(self):
if hasattr(self, 'gameAccess'):
return self.gameAccess
else:
return 0
def setDisplayName(self, str):
if not self.isDisguised:
self.setFancyNametag(name=str)
else:
self.removeFancyNametag()
Avatar.Avatar.setDisplayName(self, str)
def setFancyNametag(self, name=None):
if name == None:
name = self.getName()
if self.getNametagStyle() == 100:
self.setFont(ToontownGlobals.getToonFont())
else:
self.setFont(ToontownGlobals.getNametagFont(self.getNametagStyle()))
Avatar.Avatar.setDisplayName(self, name)
self.setFont(ToontownGlobals.getToonFont())
return
def removeFancyNametag(self):
self.nametag.clearShadow()
def getNametagStyle(self):
if hasattr(self, 'nametagStyle'):
return self.nametagStyle
else:
return 0
def setNametagStyle(self, nametagStyle):
if base.config.GetBool('want-nametag-avids', 0):
nametagStyle = 0
self.nametagStyle = nametagStyle
self.setDisplayName(self.getName())
def getAvIdName(self):
paidStr = PythonUtil.choice(self.getGameAccess() == OTPGlobals.AccessFull, 'P', 'F')
return '%s\n%s (%s)' % (self.getName(), self.doId, paidStr)
def getTTSVolume(self):
avatarPos = self.getPos(base.localAvatar)
result = int(round((avatarPos[0] + avatarPos[1]) / 2))
if result > 100:
result = 100
elif result < 0:
result = 0
volumeList = range(100, -1, -1)
return volumeList[result]
def playCurrentDialogue(self, dialogue, chatFlags, interrupt=1):
reality = False
if chatFlags & CFExclaim == 512:
reality = True
if interrupt and self.__currentDialogue is not None:
self.__currentDialogue.stop()
self.__currentDialogue = dialogue
if dialogue:
base.playSfx(dialogue, node=self)
elif chatFlags & CFSpeech != 0 or chatFlags & CFExclaim == 512:
if self.nametag.getNumChatPages() > 0:
self.playDialogueForString(self.nametag.getChat(), exclaim=reality)
if self.soundChatBubble != None:
base.playSfx(self.soundChatBubble, node=self)
elif self.nametag.getChatStomp() > 0:
self.playDialogueForString(self.nametag.getStompText(), self.nametag.getStompDelay(), exclaim=reality)
return
def playDialogueForString(self, chatString, delay=0.0, exclaim=False):
if len(chatString) == 0:
return
searchString = chatString.lower()
if searchString.find(OTPLocalizer.DialogSpecial) >= 0:
type = 'special'
elif searchString.find(OTPLocalizer.DialogExclamation) >= 0 or exclaim:
type = 'exclamation'
elif searchString.find(OTPLocalizer.DialogQuestion) >= 0:
type = 'question'
elif random.randint(0, 1):
type = 'statementA'
else:
type = 'statementB'
stringLength = len(chatString)
if stringLength <= OTPLocalizer.DialogLength1:
length = 1
elif stringLength <= OTPLocalizer.DialogLength2:
length = 2
elif stringLength <= OTPLocalizer.DialogLength3:
length = 3
else:
length = 4
self.playDialogue(type, length, chatString, delay)
def playDialogue(self, type, length, chatString='', delay=0.0):
if base.textToSpeech:
chatString = chatString.replace('WLDisplay', '')
soundSequence = Sequence(Wait(delay), Func(self.playTTS, chatString))
self.soundSequenceList.append(soundSequence)
soundSequence.start()
self.cleanUpSoundList()
return
else:
dialogueArray = self.getDialogueArray()
if dialogueArray == None:
return
sfxIndex = None
if type == 'statementA' or type == 'statementB':
if length == 1:
sfxIndex = 0
elif length == 2:
sfxIndex = 1
elif length >= 3:
sfxIndex = 2
elif type == 'question':
sfxIndex = 3
elif type == 'exclamation':
sfxIndex = 4
elif type == 'special':
sfxIndex = 5
else:
self.notify.error('unrecognized dialogue type: ', type)
if sfxIndex != None and sfxIndex < len(dialogueArray) and dialogueArray[sfxIndex] != None:
soundSequence = Sequence(Wait(delay), SoundInterval(dialogueArray[sfxIndex], node=None, listenerNode=base.localAvatar, loop=0, volume=1.0))
self.soundSequenceList.append(soundSequence)
soundSequence.start()
self.cleanUpSoundList()
return
def playTTS(self, chatString):
try:
animalType = self.style.getType()
if self.getTTSVolume() == 0:
return
if sys.platform == 'darwin':
if animalType in ToontownGlobals.Species2Voice.keys():
voice = ToontownGlobals.Species2Voice[animalType]
else:
voice = ToontownGlobals.DefaultVoice
Popen(['say', voice, chatString])
else:
if animalType in ToontownGlobals.Species2Pitch.keys():
pitch = '-p' + str(ToontownGlobals.Species2Pitch[animalType])
else:
pitch = '-p' + str(ToontownGlobals.DefaultPitch)
volume = '-a' + str(self.getTTSVolume())
Popen([base.textToSpeechPath, pitch, volume, '-ven', chatString])
return
except:
base.resetTextToSpeech()
self.setSystemMessage(0, TTLocalizer.TextToSpeechWarning)
return
def cleanUpSoundList(self):
removeList = []
for soundSequence in self.soundSequenceList:
if soundSequence.isStopped():
removeList.append(soundSequence)
for soundSequence in removeList:
self.soundSequenceList.remove(soundSequence)
def sendLogMessage(self, message):
self.sendUpdate('logMessage', [message])
def setChatAbsolute(self, chatString, chatFlags, dialogue=None, interrupt=1, quiet=0):
DistributedAvatar.DistributedAvatar.setChatAbsolute(self, chatString, chatFlags, dialogue, interrupt)
def setChatMuted(self, chatString, chatFlags, dialogue=None, interrupt=1, quiet=0):
self.nametag.setChat(chatString, chatFlags)
self.playCurrentDialogue(dialogue, chatFlags - CFSpeech, interrupt)
def displayTalk(self, chatString, mods=None):
flags = CFSpeech | CFTimeout
if base.talkAssistant.isThought(chatString):
flags = CFThought
chatString = base.talkAssistant.removeThoughtPrefix(chatString)
elif base.talkAssistant.isExclaim(chatString):
flags = CFExclaim | CFTimeout
chatString = base.talkAssistant.removeExclaimPrefix(chatString)
self.nametag.setChat(chatString, flags)
if base.toonChatSounds:
self.playCurrentDialogue(None, flags, interrupt=1)
return
def setMail(self, mail):
DistributedToon.partyNotify.debug('setMail called with %d mail items' % len(mail))
self.mail = []
for i in xrange(len(mail)):
oneMailItem = mail[i]
newMail = SimpleMailBase(*oneMailItem)
self.mail.append(newMail)
def setSimpleMailNotify(self, simpleMailNotify):
DistributedToon.partyNotify.debug('setSimpleMailNotify( %s )' % simpleMailNotify)
self.simpleMailNotify = simpleMailNotify
if self.isLocal():
self.gotCatalogNotify = 1
self.refreshOnscreenButtons()
def setInviteMailNotify(self, inviteMailNotify):
DistributedToon.partyNotify.debug('setInviteMailNotify( %s )' % inviteMailNotify)
self.inviteMailNotify = inviteMailNotify
if self.isLocal():
self.gotCatalogNotify = 1
self.refreshOnscreenButtons()
def setInvites(self, invites):
DistributedToon.partyNotify.debug('setInvites called passing in %d invites.' % len(invites))
self.invites = []
for i in xrange(len(invites)):
oneInvite = invites[i]
newInvite = InviteInfo(*oneInvite)
self.invites.append(newInvite)
def updateInviteMailNotify(self):
invitesInMailbox = self.getInvitesToShowInMailbox()
newInvites = 0
readButNotRepliedInvites = 0
for invite in invitesInMailbox:
if invite.status == PartyGlobals.InviteStatus.NotRead:
newInvites += 1
elif invite.status == PartyGlobals.InviteStatus.ReadButNotReplied:
readButNotRepliedInvites += 1
if __dev__:
partyInfo = self.getOnePartyInvitedTo(invite.partyId)
if not partyInfo:
self.notify.error('party info not found in partiesInvtedTo, partyId = %s' % str(invite.partyId))
if newInvites:
self.setInviteMailNotify(ToontownGlobals.NewItems)
elif readButNotRepliedInvites:
self.setInviteMailNotify(ToontownGlobals.OldItems)
else:
self.setInviteMailNotify(ToontownGlobals.NoItems)
def getInvitesToShowInMailbox(self):
result = []
for invite in self.invites:
appendInvite = True
if invite.status == InviteStatus.Accepted or invite.status == InviteStatus.Rejected:
appendInvite = False
if appendInvite:
partyInfo = self.getOnePartyInvitedTo(invite.partyId)
if not partyInfo:
appendInvite = False
if appendInvite:
if partyInfo.status == PartyGlobals.PartyStatus.Cancelled:
appendInvite = False
if appendInvite:
endDate = partyInfo.endTime.date()
curDate = base.cr.toontownTimeManager.getCurServerDateTime().date()
if endDate < curDate:
appendInvite = False
if appendInvite:
result.append(invite)
return result
def getNumInvitesToShowInMailbox(self):
result = len(self.getInvitesToShowInMailbox())
return result
def setHostedParties(self, hostedParties):
DistributedToon.partyNotify.debug('setHostedParties called passing in %d parties.' % len(hostedParties))
self.hostedParties = []
for i in xrange(len(hostedParties)):
hostedInfo = hostedParties[i]
newParty = PartyInfo(*hostedInfo)
self.hostedParties.append(newParty)
def setPartiesInvitedTo(self, partiesInvitedTo):
DistributedToon.partyNotify.debug('setPartiesInvitedTo called passing in %d parties.' % len(partiesInvitedTo))
self.partiesInvitedTo = []
for i in xrange(len(partiesInvitedTo)):
partyInfo = partiesInvitedTo[i]
newParty = PartyInfo(*partyInfo)
self.partiesInvitedTo.append(newParty)
self.updateInviteMailNotify()
def getOnePartyInvitedTo(self, partyId):
result = None
for i in xrange(len(self.partiesInvitedTo)):
partyInfo = self.partiesInvitedTo[i]
if partyInfo.partyId == partyId:
result = partyInfo
break
return result
def getInviteForPartyId(self, partyId):
result = None
for invite in self.invites:
if invite.partyId == partyId:
result = invite
break
return result
def setPartyReplies(self, replies):
DistributedToon.partyNotify.debug('setPartyReplies called passing in %d parties.' % len(replies))
self.partyReplyInfoBases = []
for i in xrange(len(replies)):
partyReply = replies[i]
repliesForOneParty = PartyReplyInfoBase(*partyReply)
self.partyReplyInfoBases.append(repliesForOneParty)
def setPartyCanStart(self, partyId):
DistributedToon.partyNotify.debug('setPartyCanStart called passing in partyId=%s' % partyId)
for partyInfo in self.hostedParties:
if partyInfo.partyId == partyId:
partyInfo.status = PartyGlobals.PartyStatus.CanStart
from toontown.shtiker import EventsPage
if hasattr(self, 'eventsPage') and base.localAvatar.book.entered and base.localAvatar.book.isOnPage(self.eventsPage) and self.eventsPage.getMode() == EventsPage.EventsPage_Host:
base.localAvatar.eventsPage.loadHostedPartyInfo()
if hasattr(self, 'displaySystemClickableWhisper'):
self.displaySystemClickableWhisper(0, TTLocalizer.PartyCanStart, whisperType=WhisperPopup.WTSystem)
else:
self.setSystemMessage(0, TTLocalizer.PartyCanStart)
def setPartyStatus(self, partyId, newStatus):
DistributedToon.partyNotify.debug('setPartyCanStatus called passing in partyId=%s status=%s' % (partyId, newStatus))
found = False
for partyInfo in self.hostedParties:
if partyInfo.partyId == partyId:
partyInfo.status = newStatus
found = True
break
for partyInfo in self.partiesInvitedTo:
if partyInfo.partyId == partyId:
partyInfo.status = newStatus
found = True
from toontown.shtiker import EventsPage
if hasattr(self, 'eventsPage') and base.localAvatar.book.entered and base.localAvatar.book.isOnPage(self.eventsPage) and self.eventsPage.getMode() == EventsPage.EventsPage_Invited:
base.localAvatar.eventsPage.loadInvitations()
if newStatus == PartyStatus.Started and hasattr(self, 'displaySystemClickableWhisper'):
invite = self.getInviteForPartyId(partyId)
if invite:
name = ' '
host = base.cr.identifyAvatar(partyInfo.hostId)
if host:
name = host.getName()
if invite.status == InviteStatus.Accepted:
displayStr = TTLocalizer.PartyHasStartedAcceptedInvite % TTLocalizer.GetPossesive(name)
self.displaySystemClickableWhisper(-1, displayStr, whisperType=WhisperPopup.WTSystem)
else:
displayStr = TTLocalizer.PartyHasStartedNotAcceptedInvite % TTLocalizer.GetPossesive(name)
self.setSystemMessage(partyInfo.hostId, displayStr, whisperType=WhisperPopup.WTSystem)
break
if not found:
self.notify.warning("setPartyCanStart can't find partyId=% status=%d" % (partyId, newStatus))
def announcePartyStarted(self, partyId):
DistributedToon.partyNotify.debug('announcePartyStarted')
return
for partyReplyInfo in self.partyReplyInfoBases:
if partyReplyInfo.partyId == partyId:
for singleReply in partyReplyInfo.replies:
toonId = singleReply.inviteeId
if base.cr.isFriend(toonId):
if base.cr.isFriendOnline(toonId):
if singleReply.status == InviteStatus.Accepted:
self.whisperSCTo(5302, toonId, 0)
else:
self.whisperSCTo(5302, toonId, 0)
def updateInvite(self, inviteKey, newStatus):
DistributedToon.partyNotify.debug('updateInvite( inviteKey=%d, newStatus=%s )' % (inviteKey, InviteStatus.getString(newStatus)))
for invite in self.invites:
if invite.inviteKey == inviteKey:
invite.status = newStatus
self.updateInviteMailNotify()
break
def updateReply(self, partyId, inviteeId, newStatus):
DistributedToon.partyNotify.debug('updateReply( partyId=%d, inviteeId=%d, newStatus=%s )' % (partyId, inviteeId, InviteStatus.getString(newStatus)))
for partyReplyInfoBase in self.partyReplyInfoBases:
if partyReplyInfoBase.partyId == partyId:
for reply in partyReplyInfoBase.replies:
if reply.inviteeId == inviteeId:
reply.status = newStatus
break
def scrubTalk(self, message, mods, raw):
scrubbed = 0
text = copy.copy(message)
for mod in mods:
index = mod[0]
length = mod[1] - mod[0] + 1
newText = text[0:index] + length * '\x07' + text[index + length:]
text = newText
for friendId, flags in self.friendsList:
if flags & ToontownGlobals.FriendChat:
text = copy.copy(raw)
if not self.isLocal() and self.playerType in [NametagGroup.CCNormal, NametagGroup.CCFreeChat]:
text = copy.copy(raw)
words = text.split(' ')
newwords = []
i = 0
for word in words:
if word == '':
newwords.append(word)
elif word == '.' and len(words) == 1:
newwords.append(word)
elif (word.startswith('.') or word.startswith('!')) and len(word) > 1 and i == 0:
if word[0] == '\x07' or len(word) > 1 and word[1] == '\x07':
newwords.append(word[0] + '\x01WLDisplay\x01' + self.chatGarbler.garbleSingle(self, word) + '\x02')
else:
flag = 0
for friendId, flags in self.friendsList:
if not flags & ToontownGlobals.FriendChat:
flag = 1
if flag:
newwords.append(word[0] + '\x01WLDisplay\x01' + word[1:] + '\x02')
else:
newwords.append(word)
scrubbed = 1
elif word[0] == '\x07' or len(word) > 1 and word[1] == '\x07':
newwords.append('\x01WLDisplay\x01' + self.chatGarbler.garbleSingle(self, word) + '\x02')
scrubbed = 1
elif base.whiteList.isWord(word):
newwords.append(word)
else:
flag = 0
for friendId, flags in self.friendsList:
if not flags & ToontownGlobals.FriendChat:
flag = 1
if flag:
scrubbed = 1
newwords.append('\x01WLDisplay\x01' + word + '\x02')
else:
newwords.append(word)
i += 1
newText = (' ').join(newwords)
return (
newText, scrubbed)
def replaceBadWords(self, text):
words = text.split(' ')
newwords = []
for word in words:
if word == '':
newwords.append(word)
elif word[0] == '\x07':
newwords.append('\x01WLRed\x01' + self.chatGarbler.garbleSingle(self, word) + '\x02')
elif base.whiteList.isWord(word):
newwords.append(word)
else:
newwords.append('\x01WLRed\x01' + word + '\x02')
newText = (' ').join(newwords)
return newText
def toonUp(self, hpGained, hasInteractivePropBonus=False):
if self.hp == None or hpGained < 0:
return
oldHp = self.hp
if self.hp + hpGained <= 0:
self.hp += hpGained
else:
self.hp = min(max(self.hp, 0) + hpGained, self.maxHp)
hpGained = self.hp - max(oldHp, 0)
if hpGained > 0:
self.showHpText(hpGained, hasInteractivePropBonus=hasInteractivePropBonus)
self.hpChange(quietly=0)
return
def showHpText(self, number, bonus=0, scale=1, hasInteractivePropBonus=False):
if self.HpTextEnabled and not self.ghostMode:
if number != 0:
if self.hpText:
self.hideHpText()
self.HpTextGenerator.setFont(OTPGlobals.getSignFont())
if number < 0:
self.HpTextGenerator.setText(str(number))
else:
hpGainedStr = '+' + str(number)
if hasInteractivePropBonus:
hpGainedStr += '\n' + TTLocalizer.InteractivePropTrackBonusTerms[0]
self.HpTextGenerator.setText(hpGainedStr)
self.HpTextGenerator.clearShadow()
self.HpTextGenerator.setAlign(TextNode.ACenter)
if bonus == 1:
r = 1.0
g = 1.0
b = 0
a = 1
elif bonus == 2:
r = 1.0
g = 0.5
b = 0
a = 1
elif number < 0:
r = 0.9
g = 0
b = 0
a = 1
else:
r = 0
g = 0.9
b = 0
a = 1
self.HpTextGenerator.setTextColor(r, g, b, a)
self.hpTextNode = self.HpTextGenerator.generate()
self.hpText = self.attachNewNode(self.hpTextNode)
self.hpText.setScale(scale)
self.hpText.setBillboardPointEye()
self.hpText.setBin('fixed', 100)
self.hpText.setPos(0, 0, self.height / 2)
seq = Sequence(self.hpText.posInterval(1.0, Point3(0, 0, self.height + 1.5), blendType='easeOut'), Wait(0.85), self.hpText.colorInterval(0.1, Vec4(r, g, b, 0)), Func(self.hideHpText))
seq.start()
def setAnimPlayRate(self, rate):
if self.getIsTransformed():
actor = self.getActiveTransformation()
actor.setPlayRate(rate, self.playingAnim)
else:
self.setPlayRate(rate, self.playingAnim)
if rate == 1:
self.forcedRate = -1
else:
self.forcedRate = rate
def setName(self, name='unknownDistributedAvatar'):
DistributedPlayer.DistributedPlayer.setName(self, name)
self._handleGMName(name)
base.cr.discordManager.setSmallImageText(base.cr.discordManager.getSmallImageText())
def _handleGMName(self, name=None):
if not name:
name = self.name
self.setDisplayName(name)
if self._isGM:
self.setGMIcon(self._gmType)
else:
self.removeGMIcon()
self.setNametagStyle(self.getNametagStyle())
def setGMIcon(self, gmType=None):
if hasattr(self, 'gmIcon') and self.gmIcon:
return
if not gmType:
gmType = self._gmType
iconInfo = (
('phase_3.5/models/gui/tt_m_gui_gm_toontroop_whistle', '**/*whistleIcon*', 'phase_3.5/maps/gamegui_palette_3clla_1.jpg',
4),
('phase_3.5/models/gui/tt_m_gui_gm_toonResistance_fist', '**/*fistIcon*', 'phase_3.5/maps/gamegui_palette_3clla_1.jpg',
4),
('phase_3.5/models/gui/tt_m_gui_gm_toontroop_getConnected', '**/*whistleIcon*', 'phase_3.5/maps/gamegui_palette_3clla_1.jpg',
4),
('phase_3.5/models/gui/tt_m_gui_gm_toontroop_whistle', '**/*whistleIcon*', 'phase_3.5/maps/gamegui_palette_3clla_2.jpg',
4),
('phase_3.5/models/gui/tt_m_gui_gm_toonResistance_fist', '**/*fistIcon*', 'phase_3.5/maps/gamegui_palette_3clla_2.jpg',
4),
('phase_3.5/models/gui/tt_m_gui_gm_toontroop_getConnected', '**/*whistleIcon*', 'phase_3.5/maps/gamegui_palette_3clla_2.jpg',
4),
('phase_3.5/models/gui/tt_m_gui_gm_toonResistance_fist', '**/*fistIcon*', 'phase_3.5/maps/gamegui_palette_3clla_3.jpg',
4),
('phase_3.5/models/gui/tt_m_gui_gm_toontroop_getConnected', '**/*whistleIcon*', 'phase_3.5/maps/gamegui_palette_3clla_3.jpg',
4))
if gmType > len(iconInfo) - 1:
return
modelName, searchString, texture, scale = iconInfo[gmType]
icons = loader.loadModel(modelName)
self.gmIcon = icons.find(searchString)
ts = self.gmIcon.findTextureStage('*')
tex = loader.loadTexture(texture)
self.gmIcon.setTexture(ts, tex, 1)
self.gmIcon.setScale(scale)
self.gmIcon.reparentTo(self.nametag.getNameIcon())
self.setTrophyScore(self.trophyScore)
self.gmIcon.setZ(-2.5)
self.gmIcon.setY(0.0)
self.gmIcon.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.gmIcon.setTransparency(1)
self.gmIconInterval = LerpHprInterval(self.gmIcon, 3.0, Point3(0, 0, 0), Point3(-360, 0, 0))
self.gmIconInterval.loop()
def setGMPartyIcon(self):
gmType = self._gmType
iconInfo = ('phase_3.5/models/gui/tt_m_gui_gm_toonResistance_fist', 'phase_3.5/models/gui/tt_m_gui_gm_toontroop_whistle',
'phase_3.5/models/gui/tt_m_gui_gm_toonResistance_fist', 'phase_3.5/models/gui/tt_m_gui_gm_toontroop_getConnected')
if gmType > len(iconInfo) - 1:
return
self.gmIcon = loader.loadModel(iconInfo[gmType])
self.gmIcon.reparentTo(self.nametag.getNameIcon())
self.gmIcon.setScale(3.25)
self.setTrophyScore(self.trophyScore)
self.gmIcon.setZ(1.0)
self.gmIcon.setY(0.0)
self.gmIcon.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.gmIcon.setTransparency(1)
self.gmIconInterval = LerpHprInterval(self.gmIcon, 3.0, Point3(0, 0, 0), Point3(-360, 0, 0))
self.gmIconInterval.loop()
def removeGMIcon(self):
if hasattr(self, 'gmIconInterval') and self.gmIconInterval:
self.gmIconInterval.finish()
del self.gmIconInterval
if hasattr(self, 'gmIcon') and self.gmIcon:
self.gmIcon.detachNode()
del self.gmIcon
def _startZombieCheck(self):
self._zombieCheckSerialGen = SerialNumGen(random.randrange(2147483648L))
taskMgr.doMethodLater(2.0 + 60.0 * random.random(), self._doZombieCheck, self._getZombieCheckTaskName())
def _stopZombieCheck(self):
taskMgr.remove(self._getZombieCheckTaskName())
def _getZombieCheckTaskName(self):
return self.uniqueName('zombieCheck')
def _doZombieCheck(self, task=None):
self._lastZombieContext = self._zombieCheckSerialGen.next()
self.cr.timeManager.checkAvOnDistrict(self, self._lastZombieContext)
taskMgr.doMethodLater(60.0, self._doZombieCheck, self._getZombieCheckTaskName())
def _zombieCheckResult(self, context, present):
if context == self._lastZombieContext:
print '_zombieCheckResult[%s]: %s' % (self.doId, present)
if not present:
self.notify.warning('hiding av %s because they are not on the district!' % self.doId)
self.setParent(OTPGlobals.SPHidden)
def setFriendsList(self, friendsList):
DistributedPlayer.DistributedPlayer.setFriendsList(self, friendsList)
for friendId, trueFriend in self.friendsList:
if (
friendId, trueFriend) in self.oldFriendsList:
continue
friend = self.cr.doId2do.get(friendId)
if friend:
base.cr.ttoffFriendsManager.friendOnline(friendId, 0, 0, False)
for friendPair in self.oldFriendsList:
if friendPair in self.friendsList:
continue
if type(friendPair) == tuple:
friendId = friendPair[0]
else:
friendId = friendPair
friend = self.cr.doId2do.get(friendId)
if not friend:
continue
if hasattr(base.localAvatar, 'inEstate') and base.localAvatar.inEstate:
base.cr.estateMgr.removeFriend(self.getDoId(), friendId)
def setImmortalMode(self, flag):
self.immoralMode = flag
messenger.send(self.uniqueName('magicWordChange'), [1, flag])
def getImmortalMode(self):
return self.immortalMode
def setUnlimitedGags(self, flag):
self.unlimitedGags = flag
messenger.send(self.uniqueName('magicWordChange'), [0, flag])
def getUnlimitedGags(self):
return self.unlimitedGags
def setInstaKill(self, flag):
self.instaKill = flag
messenger.send(self.uniqueName('magicWordChange'), [2, flag])
def getInstaKill(self):
return self.instaKill
def setRun(self):
if self.isLocal():
inputState.set('debugRunning', inputState.isSet('debugRunning') is not True)
def generateRainbow(self):
intervalName = 'RainbowSeq'
if self.activeIntervals.has_key(intervalName):
self.destroyRainbow()
return
red = (1.0, 0.0, 0.0, 1.0)
orange = (0.898, 0.42, 0.024, 1.0)
yellow = (0.945, 0.957, 0.259, 1.0)
green = (0.0, 1.0, 0.0, 1.0)
blue = (0.0, 0.0, 1.0, 1.0)
indigo = (0.247, 0.0, 1.0, 1.0)
violet = (0.498, 0.0, 1.0, 1.0)
rainbowSeq = Parallel()
for node in (render, render2d, aspect2d):
rainbowSeq.append(Sequence(LerpColorScaleInterval(node, 0.5, red), LerpColorScaleInterval(node, 0.5, orange), LerpColorScaleInterval(node, 0.5, yellow), LerpColorScaleInterval(node, 0.5, green), LerpColorScaleInterval(node, 0.5, blue), LerpColorScaleInterval(node, 0.5, indigo), LerpColorScaleInterval(node, 0.5, violet)))
rainbowSeq.loop()
intervalName = 'RainbowSeq'
self.storeInterval(rainbowSeq, intervalName)
def destroyRainbow(self):
intervalName = 'RainbowSeq'
self.clearInterval(intervalName)
for node in (render, render2d, aspect2d):
node.clearColorScale()
def generateFanfare(self):
from toontown.battle import Fanfare
fanfare = Sequence(Fanfare.makeFanfare(0, self)[0])
fanfare.start()
def generateTrolley(self, timestamp):
station = loader.loadModel('phase_4/models/modules/trolley_station_TT')
trolley = station.find('**/trolley_car')
trolley.setZ(100)
trolley.reparentTo(self)
station.removeNode()
dropSfx = loader.loadSfx('phase_5/audio/sfx/cogbldg_drop.ogg')
landSfx = loader.loadSfx('phase_5/audio/sfx/AA_drop_boat_cog.ogg')
trolleySfx = loader.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_bell_for_trolley.ogg')
fadeSfx = loader.loadSfx('phase_4/audio/sfx/SZ_trolley_bell.ogg')
magicTrolleySeq = Sequence(Func(base.playSfx, dropSfx), Parallel(trolley.scaleInterval(7, (1,
1,
1)), trolley.posInterval(7, (0,
0,
0))), Func(self.setAnimState, 'Squish'), Func(base.playSfx, landSfx), Func(base.playSfx, trolleySfx, 0, 1, 1.5), trolley.posInterval(0.1, (0,
0,
0.5)), trolley.posInterval(0.1, (0,
0,
0)), Wait(0.4), Func(base.playSfx, fadeSfx, 0, 1, 1.5), trolley.scaleInterval(1, (0,
0,
0)), Func(trolley.removeNode), Wait(1.3), Func(self.setAnimState, 'neutral'))
ts = globalClockDelta.localElapsedTime(timestamp)
magicTrolleySeq.start(ts)
def generateBrowserEasterEgg(self, index):
if not index:
webbrowser.open('https://www.infowars.com/')
elif index == 1:
webbrowser.open('https://www.msnbc.com/')
webbrowser.open('https://www.cnn.com/')
def generateGreenEffect(self, character='f', toonId=0):
intervalName = 'GreenSeq'
cogTypes = [
TTLocalizer.SellbotP.lower(), TTLocalizer.CashbotP.lower(), TTLocalizer.LawbotP.lower(), TTLocalizer.BossbotP.lower()]
if character in cogTypes:
cogFlyInPos = ToontownGlobals.GreenEffectMassFlyPositions
cogList = ToontownGlobals.GreenEffectMassFlyCogs
seq = Parallel()
for x in range(len(cogFlyInPos)):
cog = ToontownAvatarUtils.createCog(cogList[cogTypes.index(character)][x], self.getX() + cogFlyInPos[x][0], self.getY() + cogFlyInPos[x][1], self.getZ(), 0, 0, 0, parent=hidden)
cogFlyIn = cog.beginSupaFlyMove(VBase3(self.getX() + cogFlyInPos[x][0], self.getY() + cogFlyInPos[x][1], self.getZ()), 1, 'flyIn')
cogSeq = Sequence(Func(cog.addActive), Func(cog.headsUp, self), Func(cog.reparentTo, render), cogFlyIn, Func(cog.setChatAbsolute, TTLocalizer.GreenEffectPhase, CFSpeech | CFTimeout), ActorInterval(cog, 'victory'), Func(cog.loop, 'neutral'), Wait(1), Func(self.cleanupGreenEffect, cog))
seq.append(cogSeq)
seq.start()
self.storeInterval(seq, intervalName)
return
if toonId == 2:
if self.isDisguised:
if self.isCog not in ToontownGlobals.PutOnSuitToonHead:
cog = ToontownAvatarUtils.createCog(self.suit.style.name, 0, 8, self.getZ(self), self.getH(), 0, 0, parent=self, isSkelecog=self.suit.isSkeleton, isWaiter=self.suit.isWaiter, isVirtual=self.suit.isVirtual, isSkeleRevive=self.suit.isSkeleRevive, colorType=self.nametag.getColorCode(), level=self.cogLevels[SuitDNA.suitDepts.index(SuitDNA.getSuitDept(self.suit.style.name))] + 1)
cog.wrtReparentTo(hidden)
cogFlyIn = cog.beginSupaFlyMove(VBase3(cog.getX(), cog.getY(), cog.getZ()), 1, 'flyIn')
seq = Sequence(Func(cog.addActive), Func(cog.headsUp, self), Func(cog.reparentTo, render), cogFlyIn, Func(cog.setChatAbsolute, TTLocalizer.GreenEffectPhase, CFSpeech | CFTimeout), ActorInterval(cog, 'victory'), Func(cog.loop, 'neutral'), Wait(1), Func(self.cleanupGreenEffect, cog))
seq.start()
self.storeInterval(seq, intervalName)
return
else:
toon = ToontownAvatarUtils.createUniqueToon(self.getName(), self.style.asTuple(), self.hat, self.glasses, self.backpack, self.shoes, 0, 8, self.getZ(self), self.getH(), parent=self, isDisguised=True, suitType=self.suit.style.name, suitDept=self.suit.style.dept, isWaiter=self.suit.isWaiter, isRental=self.suit.isRental, colorType=self.nametag.getColorCode(), cogLevels=self.getCogLevels(), cheesyEffect=self.cheesyEffect)
toon.wrtReparentTo(hidden)
cogFlyIn = toon.getSuitTeleport(moveIn=1, startPos=(toon.getX(), toon.getY(), toon.getZ()))
seq = Sequence(Func(toon.addActive), Func(toon.headsUp, self), Func(toon.reparentTo, render), cogFlyIn, Func(toon.setChatAbsolute, TTLocalizer.GreenEffectPhase, CFSpeech | CFTimeout), ActorInterval(toon.suit, 'victory'), Func(toon.suit.loop, 'neutral'), Wait(1), Func(self.cleanupGreenEffect, toon, 1))
seq.start()
self.storeInterval(seq, intervalName)
return
else:
toon = ToontownAvatarUtils.createUniqueToon(self.getName(), self.style.asTuple(), self.hat, self.glasses, self.backpack, self.shoes, 0, 5, self.getZ(self), self.getH(), 0, 0, parent=self, colorType=self.nametag.getColorCode(), cheesyEffect=self.cheesyEffect, nametagStyle=self.nametagStyle)
toon.wrtReparentTo(hidden)
if toon.style.getAnimal() == 'bear':
angryToonSFX = loader.loadSfx('phase_3.5/audio/dial/AV_bear_exclaim.ogg')
else:
angryToonSFX = loader.loadSfx('phase_3.5/audio/sfx/avatar_emotion_angry.ogg')
toonTeleportIn = Sequence(Func(toon.animFSM.request, 'TeleportIn'), Wait(1.517), Func(toon.animFSM.request, 'neutral'))
seq = Sequence(Parallel(Func(toon.reparentTo, render), Func(toon.addActive)), Func(toon.headsUp, self), toonTeleportIn, Func(toon.setChatAbsolute, OTPLocalizer.SpeedChatStaticTextToontown.get(905), CFSpeech | CFTimeout), Parallel(SoundInterval(angryToonSFX, loop=1, node=toon), Sequence(Func(toon.angryEyes), Func(toon.blinkEyes), ActorInterval(toon, 'angry'), Func(toon.normalEyes), Func(toon.blinkEyes), Func(toon.loop, 'neutral')), Wait(3)), Func(toon.setChatAbsolute, TTLocalizer.GreenEffectPhase, CFSpeech | CFTimeout), ActorInterval(toon, 'hypnotize'), Func(self.cleanupGreenEffect, toon, 1))
seq.start()
self.storeInterval(seq, intervalName)
return
else:
if toonId != 0:
toon = ToontownAvatarUtils.createToon(toonId, 0, 5, self.getZ(self), self.getH(), 0, 0, parent=self)
toon.wrtReparentTo(hidden)
if toon.style.getAnimal() == 'bear':
angryToonSFX = loader.loadSfx('phase_3.5/audio/dial/AV_bear_exclaim.ogg')
else:
angryToonSFX = loader.loadSfx('phase_3.5/audio/sfx/avatar_emotion_angry.ogg')
toonTeleportIn = Sequence(Func(toon.animFSM.request, 'TeleportIn'), Wait(1.517), Func(toon.animFSM.request, 'neutral'))
seq = Sequence(Parallel(Func(toon.reparentTo, render), Func(toon.addActive)), Func(toon.headsUp, self), toonTeleportIn, Func(toon.setChatAbsolute, OTPLocalizer.SpeedChatStaticTextToontown.get(905), CFSpeech | CFTimeout), Parallel(SoundInterval(angryToonSFX, loop=1, node=toon), Sequence(Func(toon.angryEyes), Func(toon.blinkEyes), ActorInterval(toon, 'angry'), Func(toon.normalEyes), Func(toon.blinkEyes), Func(toon.loop, 'neutral')), Wait(3)), Func(toon.setChatAbsolute, TTLocalizer.GreenEffectPhase, CFSpeech | CFTimeout), ActorInterval(toon, 'hypnotize'), Func(self.cleanupGreenEffect, toon, 1))
seq.start()
self.storeInterval(seq, intervalName)
return
else:
if character == 'panda':
panda = Actor.Actor('phase_3/models/char/panda', {'walk': 'phase_3/models/char/panda-walk'})
panda.setBlend(frameBlend=base.settings.getBool('game', 'smooth-animations', False))
panda.setTransparency(1)
panda.setPosHpr(self.getX(), self.getY(), self.getZ(), self.getH() - 180, 0, 0)
panda.setScale(0.5)
walkNode = NodePath('Panda3DWalkNode')
walkNode.setPosHpr(self.getX(), self.getY(), self.getZ(), self.getH() - 180, 0, 0)
seq = Sequence(Func(panda.reparentTo, render), Func(panda.loop, 'walk'), Parallel(LerpColorScaleInterval(panda, 1.0, colorScale=VBase4(1, 1, 1, 1), startColorScale=VBase4(1, 1, 1, 0)), LerpPosInterval(panda, 5.0, (0,
-25,
0), other=walkNode), Sequence(Wait(4), LerpScaleInterval(panda, 1.0, 0))), Func(self.cleanupGreenEffect, panda, 2, walkNode))
seq.start()
self.storeInterval(seq, intervalName)
return
cog = ToontownAvatarUtils.createCog(character, 0, 8, self.getZ(self), self.getH(), 0, 0, parent=self)
cog.wrtReparentTo(hidden)
cogFlyIn = cog.beginSupaFlyMove(VBase3(cog.getX(), cog.getY(), cog.getZ()), 1, 'flyIn')
seq = Sequence(Func(cog.addActive), Func(cog.headsUp, self), Func(cog.reparentTo, render), cogFlyIn, Func(cog.setChatAbsolute, TTLocalizer.GreenEffectPhase, CFSpeech | CFTimeout), ActorInterval(cog, 'victory'), Func(cog.loop, 'neutral'), Wait(1), Func(self.cleanupGreenEffect, cog))
seq.start()
self.storeInterval(seq, intervalName)
return
def cleanupGreenEffect(self, character, type=0, node=None):
if character:
if type == 1:
if character.isDisguised:
if self.isCog != 0 and self.isCog != 5 and self.isCog != 9:
cogFlyOut = character.beginSupaFlyMove(VBase3(character.getX(), character.getY(), character.getZ()), 0, 'flyOut')
seq = Sequence(cogFlyOut, Func(character.reparentTo, hidden), Func(character.cleanup), Func(character.removeActive), Func(character.removeNode))
else:
cogFlyOut = character.getSuitTeleport(moveIn=0)
seq = Sequence(cogFlyOut, Func(character.reparentTo, hidden), Func(character.cleanup), Func(character.removeActive), Func(character.removeNode))
else:
seq = Sequence(Func(character.animFSM.request, 'TeleportOut'), Wait(character.getDuration('teleport') + 1.0), Func(character.reparentTo, hidden), Func(character.stopBlink), Func(character.cleanup), Func(character.removeActive), Func(character.removeNode))
elif type == 2:
seq = Sequence(Func(character.reparentTo, hidden), Func(character.cleanup), Func(character.removeNode), Func(node.removeNode))
else:
cogFlyOut = character.beginSupaFlyMove(VBase3(character.getX(), character.getY(), character.getZ()), 0, 'flyOut')
seq = Sequence(cogFlyOut, Func(character.reparentTo, hidden), Func(character.cleanup), Func(character.removeActive), Func(character.removeNode))
seq.start()
def cleanupGreenEffectIntervals(self):
intervalName = 'GreenSeq'
for key in self.activeIntervals.keys():
if intervalName in key:
self.clearInterval(key)
def generateSnapEffect(self):
from toontown.battle import BattleParticles
from toontown.battle import MovieSuitAttacks
headEffect = BattleParticles.createParticleEffect('RubOut', color=(0, 0, 0,
1))
torsoEffect = BattleParticles.createParticleEffect('RubOut', color=(0, 0, 0,
1))
legsEffect = BattleParticles.createParticleEffect('RubOut', color=(0, 0, 0,
1))
animal = self.style.getAnimal()
bodyScale = ToontownGlobals.toonBodyScales[animal]
def toonFacePoint(toon, zOffset=0, parent=render):
pnt = toon.getPos(parent)
pnt.setZ(pnt[2] + toon.shoulderHeight + 0.3 + zOffset)
return Point3(pnt)
headEffectHeight = toonFacePoint(self).getZ()
legsHeight = ToontownGlobals.legHeightDict[self.style.legs] * bodyScale
torsoEffectHeight = ToontownGlobals.torsoHeightDict[self.style.torso] * bodyScale / 2 + legsHeight
legsEffectHeight = legsHeight / 2
effectX = headEffect.getX()
effectY = headEffect.getY()
headEffect.setPos(effectX, effectY - 1.5, headEffectHeight)
torsoEffect.setPos(effectX, effectY - 1, torsoEffectHeight)
legsEffect.setPos(effectX, effectY - 0.6, legsEffectHeight)
headParts = self.getHeadParts()
torsoParts = self.getTorsoParts()
legsParts = self.getLegsParts()
headTrack = MovieSuitAttacks.getPartTrack(headEffect, 0, 2.0, [headEffect, self, 0])
torsoTrack = MovieSuitAttacks.getPartTrack(torsoEffect, 0, 2.0, [torsoEffect, self, 0])
legsTrack = MovieSuitAttacks.getPartTrack(legsEffect, 0, 2.0, [legsEffect, self, 0])
def hideParts(parts):
track = Parallel()
for partNum in xrange(0, parts.getNumPaths()):
nextPart = parts.getPath(partNum)
track.append(Func(nextPart.setTransparency, 1))
track.append(LerpFunctionInterval(nextPart.setAlphaScale, fromData=1, toData=0, duration=2.0))
return track
def showParts(parts):
track = Sequence()
for partNum in xrange(0, parts.getNumPaths()):
nextPart = parts.getPath(partNum)
track.append(LerpFunctionInterval(nextPart.setAlphaScale, fromData=0, toData=1, duration=2.0))
track.append(Func(nextPart.clearTransparency))
return track
snap = Sequence(Wait(2.5), Parallel(hideParts(headParts), hideParts(torsoParts), hideParts(legsParts), headTrack, torsoTrack, legsTrack), Wait(2), Parallel(showParts(headParts), showParts(torsoParts), showParts(legsParts)))
snap.start()
def generateOboeEffect(self):
oboe = base.loader.loadSfx('phase_14.5/audio/sfx/oboe.ogg')
base.playSfx(oboe, node=self)
def generateCage(self, doAnim=True):
if self.getLocked():
self.cage = loader.loadModel('phase_14/models/props/outpost_cage')
self.cage.setScale(0.01)
self.cageCameraNode = self.attachNewNode(self.uniqueName('cageCameraNode'))
self.cageCameraNode.setZ(100)
self.cageCameraNode.wrtReparentTo(render)
self.cage.reparentTo(self.cageCameraNode)
if self.isLocal():
base.localAvatar.stopUpdateSmartCamera()
base.camera.reparentTo(self.cageCameraNode)
base.camera.setPosHpr(7.5, 15, 4, 150, 0, 0)
else:
collisions = self.cage.findAllMatches('**/+CollisionNode')
if collisions:
for coll in collisions:
coll.stash()
if doAnim:
dropSfx = loader.loadSfx('phase_5/audio/sfx/cogbldg_drop.ogg')
dropSfx.setPlayRate(2)
landSfx = loader.loadSfx('phase_5/audio/sfx/AA_drop_bigweight.ogg')
cageSeq = Sequence(Func(self.setAnimState, 'neutral'), Func(base.playSfx, dropSfx), Parallel(self.cage.scaleInterval(3.5, (0.2,
0.2,
0.2)), self.cageCameraNode.posInterval(3.5, (self.getX(), self.getY(), self.getZ()))), Func(self.setZ, self.getZ() + 1), Func(base.playSfx, landSfx))
else:
self.cage.setScale(0.2, 0.2, 0.2)
self.cageCameraNode.reparentTo(self)
self.cageCameraNode.setZ(-1)
cageSeq = None
else:
if self.isLocal():
base.camera.reparentTo(base.localAvatar)
base.localAvatar.startUpdateSmartCamera()
if not self.cageCameraNode:
return
kapow = globalPropPool.getProp('kapow')
kapow.setBillboardPointWorld(2)
kapow.setScale(0.75)
kapow.setZ(2)
kapow.reparentTo(self.cageCameraNode)
boomSfx = loader.loadSfx('phase_3.5/audio/sfx/ENC_cogfall_apart.ogg')
cageSeq = Parallel(Parallel(SoundInterval(boomSfx, node=kapow, volume=1), ActorInterval(kapow, 'kapow')), Sequence(Wait(0.75), Func(kapow.removeNode), Func(self.cageCameraNode.removeNode)))
if cageSeq:
cageSeq.start()
self.storeInterval(cageSeq, 'cageSeq')
return
def setLocked(self, locked):
self.locked = locked
if not self.isLocal():
if locked and not self.isGenerated():
self.generateCage(False)
return
if self.isGenerated():
if locked:
self.disableAvatarControls()
self.collisionsOff()
self.disableSleeping()
self.obscureFriendsListButton(1)
self.hideClarabelleGui()
self.laffMeter.hide()
self.book.hideButton()
self.ignoreOnscreenHooks()
base.localAvatar.setTeleportAvailable(0)
base.localAvatar.setTeleportAllowed(0)
base.cr.playGame.getPlace().walkStateData.toggleBook('disable')
if base.cr.propGenerator:
base.cr.propGenerator.disableHotkey()
else:
self.collisionsOn()
self.enableAvatarControls()
self.enableSleeping()
self.obscureFriendsListButton(-1)
self.refreshOnscreenButtons()
self.laffMeter.show()
self.book.showButton()
self.acceptOnscreenHooks()
base.localAvatar.setTeleportAvailable(1)
base.localAvatar.setTeleportAllowed(1)
base.cr.playGame.getPlace().walkStateData.toggleBook('enable')
if base.cr.propGenerator:
base.cr.propGenerator.enableHotkey()
def getLocked(self):
return self.locked
def setMuted(self, muted, timed):
self.muted = muted
if muted:
if timed:
if timed > 1:
message = TTLocalizer.MutedTimedPlural % timed
else:
message = TTLocalizer.MutedTimedSingular % timed
else:
message = TTLocalizer.MutedTrue
else:
message = TTLocalizer.MutedFalse
self.setSystemMessage(0, message, WhisperPopup.WTEmote)
def getMuted(self):
return self.muted
def setTransitioning(self, transitioning):
self.transitioning = transitioning
def getTransitioning(self):
return self.transitioning
def playSound(self, sound, loop=0):
soundWithExt = sound + '.ogg'
bgmPhases = [3, 3.5, 4, 5.5, 6, 7, 8, 9, 10, 11, 12, 13, 14.5]
dialPhases = [3, 3.5, 4, 5.5, 6, 8]
sfxPhases = [3, 3.5, 4, 5, 5.5, 6, 8, 9, 10, 11, 12, 13, 14.5]
bgmSearchPath = DSearchPath()
for phase in bgmPhases:
bgmSearchPath.appendDirectory('/phase_' + str(phase) + '/audio/bgm')
dialSearchPath = DSearchPath()
for phase in dialPhases:
dialSearchPath.appendDirectory('/phase_' + str(phase) + '/audio/dial')
sfxSearchPath = DSearchPath()
for phase in sfxPhases:
sfxSearchPath.appendDirectory('/phase_' + str(phase) + '/audio/sfx')
filename = Filename(soundWithExt)
found = vfs.resolveFilename(filename, bgmSearchPath)
if found:
music = base.loader.loadMusic(filename.getFullpath())
base.playMusic(music, looping=loop, volume=0.8)
if not music.getLoop():
taskMgr.doMethodLater(music.length() + 1, self.playZoneMusic, self.taskName('play-zone-music'))
else:
found = vfs.resolveFilename(filename, dialSearchPath)
if not found:
found = vfs.resolveFilename(filename, sfxSearchPath)
if not found:
self.notify.warning('%s not found on:' % soundWithExt)
print bgmSearchPath
print dialSearchPath
print sfxSearchPath
else:
sfx = base.loader.loadSfx(filename.getFullpath())
base.playSfx(sfx, looping=loop, volume=0.8)
def playZoneMusic(self, task):
place = base.cr.playGame.getPlace()
if place:
base.playMusic(place.loader.music, looping=1, volume=0.8)
return task.done
def doTeleport(self, hood):
place = base.cr.playGame.getPlace()
if place:
place.doTeleport(hood)
def setToonScale(self, scale):
previousScale = self.toonScale
self.toonScale = scale
scaleTime = abs(previousScale - scale) / 2
scaleSeq = self._Toon__doToonScale(scale, scaleTime)
if self.isLocal():
scaleSeq.append(Sequence(Func(self.initCameraPositions), Func(self.resetCameraPosition)))
scaleSeq.start()
def getToonScale(self):
return self.toonScale
def setCarActive(self, carActive):
self.carActive = carActive
if self.isGenerated():
self.updateCarActive()
def getCarActive(self):
return self.carActive
def canRaceHere(self):
if self.getHp() <= 10:
return False
place = base.cr.playGame.place
if not place:
return False
from toontown.safezone.Playground import Playground
from toontown.town.Street import Street
from toontown.coghq.CogHQExterior import CogHQExterior
from toontown.coghq.FactoryExterior import FactoryExterior
from toontown.coghq.LawbotOfficeExterior import LawbotOfficeExterior
return isinstance(place, Playground) or isinstance(place, CogHQExterior) or isinstance(place, Street) or isinstance(place, FactoryExterior) or isinstance(place, LawbotOfficeExterior)
def updateCarActive(self):
if self.carActive:
if not self.carInterest and self.canRaceHere():
self.getDustCloud(0.0, scale=0.8).start()
self.carInterest = base.cr.addInterest(self.doId, [100], 'kart-%d' % self.doId)
else:
if self.carInterest:
if self.isGenerated():
self.getDustCloud(0.0, scale=0.8).start()
base.cr.removeInterest(self.carInterest)
self.carInterest = None
return
def setLoop(self, anim, start, end, part):
start = start if start != -1 else None
end = end if end != -1 else None
part = part if part else None
if self.getIsTransformed():
geom = self.getActiveTransformation()
geom.loop(anim, fromFrame=start, toFrame=end, partName=part)
else:
self.loop(anim, fromFrame=start, toFrame=end, partName=part)
return
def setPingPong(self, anim, start, end, part):
start = start if start != -1 else None
end = end if end != -1 else None
part = part if part else None
if self.getIsTransformed():
geom = self.getActiveTransformation()
geom.pingpong(anim, fromFrame=start, toFrame=end, partName=part)
else:
self.pingpong(anim, fromFrame=start, toFrame=end, partName=part)
return
def setPose(self, anim, frame, part):
part = part if part else None
if self.getIsTransformed():
geom = self.getActiveTransformation()
geom.pose(anim, frame, part)
else:
self.pose(anim, frame, part)
return
def storeInterval(self, interval, name):
if name in self.activeIntervals:
name = name + str(len(self.activeIntervals.keys()))
self.activeIntervals[name] = interval
def cleanupIntervals(self):
for interval in self.activeIntervals.values():
interval.finish()
DelayDelete.cleanupDelayDeletes(interval)
self.activeIntervals = {}
def clearInterval(self, name, finish=1):
if self.activeIntervals.has_key(name):
ival = self.activeIntervals[name]
if finish:
ival.finish()
else:
ival.pause()
if self.activeIntervals.has_key(name):
DelayDelete.cleanupDelayDeletes(ival)
del self.activeIntervals[name]
else:
self.notify.debug('interval: %s already cleared' % name)
def finishInterval(self, name):
if self.activeIntervals.has_key(name):
interval = self.activeIntervals[name]
interval.finish()
def isPlayerControlled(self):
return True
def setUnlocks(self, unlocks):
self.unlocks = unlocks
def getUnlocks(self):
return self.unlocks | [
"[email protected]"
] | |
eb388016f65246c4c31124d34d29159a438dc564 | 3d7039903da398ae128e43c7d8c9662fda77fbdf | /database/CSS/juejin_1927.py | 4cf0fa0be8441db15c31d26e93685b0b19eb0256 | [] | no_license | ChenYongChang1/spider_study | a9aa22e6ed986193bf546bb567712876c7be5e15 | fe5fbc1a5562ff19c70351303997d3df3af690db | refs/heads/master | 2023-08-05T10:43:11.019178 | 2021-09-18T01:30:22 | 2021-09-18T01:30:22 | 406,727,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78,514 | py | {"err_no": 0, "err_msg": "success", "data": [{"article_id": "6844903865737822216", "article_info": {"article_id": "6844903865737822216", "user_id": "1380642335503256", "category_id": "6809637767543259144", "tag_ids": [6809640392770715656, 6809640407484334093, 6809640614175604744, 6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903865737822216", "cover_image": "", "is_gfw": 0, "title": "[译] 将第三方动画库集成到项目中 — 第 1 部分", "brief_content": "创建以 CSS 为基础的动画可能是一个挑战。它们可能是复杂且耗时的。你是否需要在时间紧迫的情况下调整出一个完美的动画(库)来推进项目?这时,你该考虑使用一个拥有现成的动画插件的第三方 CSS 动画库。可是,你仍然会想:它们是什么?它们提供什么?我如何使用它们? 我们来看看吧。 …", "is_english": 0, "is_original": 1, "user_index": 8.7806176006279, "original_type": 0, "original_author": "", "content": "", "ctime": "1560509321", "mtime": "1599966935", "rtime": "1560509321", "draft_id": "6845076341839101965", "view_count": 578, "collect_count": 2, "digg_count": 4, "comment_count": 0, "hot_index": 32, "is_hot": 0, "rank_index": 0.00010607, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1380642335503256", "user_name": "掘金翻译计划", "company": "掘金", "job_title": "", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/cf1ff385ef84b2ef6001a7caa39476f7~300x300.image", "level": 3, "description": "可能是世界最大最好的英译中技术社区,最懂读者和译者的翻译平台 🤔", "followee_count": 25, "follower_count": 3400, "post_article_count": 55, "digg_article_count": 63, "got_digg_count": 1372, "got_view_count": 311309, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 1, "power": 4485, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546515, "tag_id": "6809640392770715656", "tag_name": "HTML", "color": "#E44D25", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f18965b2a0ef9cac862e.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239419, "mtime": 1631683077, "id_type": 9, "tag_alias": "", "post_article_count": 6109, "concern_user_count": 240134}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546676, "tag_id": "6809640614175604744", "tag_name": "掘金翻译计划", "color": "#0081ff", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/95f7e8be776556ab8d82.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1454716787, "mtime": 1631689800, "id_type": 9, "tag_alias": "", "post_article_count": 2502, "concern_user_count": 42848}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903865737822216, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": {"org_type": 1, "org_id": "6930489296285597696", "online_version_id": 6937212594310610981, "latest_version_id": 6937212594310610981, "power": 10141, "ctime": 1613630284, "mtime": 1631692819, "audit_status": 2, "status": 0, "org_version": {"version_id": "6937212594310610981", "icon": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/9763b1fa556f4cbd8ced21b60d3ed40c~tplv-k3u1fbpfcp-watermark.image", "background": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/2254bf401c3444129f8e3612c4b16308~tplv-k3u1fbpfcp-watermark.image", "name": "掘金翻译计划", "introduction": "# 掘金翻译计划\n\n\n[掘金翻译计划](https://juejin.im/tag/%E6%8E%98%E9%87%91%E7%BF%BB%E8%AF%91%E8%AE%A1%E5%88%92) 是一个翻译优质互联网技术文章的社区,文章来源为 [掘金](https://juejin.im) 上的英文分享文章。内容覆盖[区块链](#区块链)、[人工智能](#ai--deep-learning--machine-learning)、[Android](#android)、[iOS](#ios)、[前端](#前端)、[后端](#后端)、[设计](#设计)、[产品](#产品)、[算法](https://github.com/xitu/gold-miner/blob/master/algorithm.md)和[其他](#其他)等领域,以及各大型优质 [官方文档及手册](#官方文档及手册),读者为热爱新技术的新锐开发者。\n\n掘金翻译计划目前翻译完成 [2027](#近期文章列表) 余篇文章,官方文档及手册 [13](#官方文档及手册) 个,共有 [1000](https://github.com/xitu/gold-miner/wiki/%E8%AF%91%E8%80%85%E7%A7%AF%E5%88%86%E8%A1%A8) 余名译者贡献翻译和校对。\n\n# 官方指南\n\n[**推荐优质英文文章到掘金翻译计划**](https://github.com/xitu/gold-miner/issues/new/choose)\n\n<!--\nhttps://github.com/xitu/gold-miner/issues/new?title=推荐优秀英文文章&body=-%20原文链接:推荐文章前%20Google%20一下,尽量保证本文未被翻译%0A-%20简要介绍:介绍一下好不好啦,毕竟小编也看不太懂哎_(:з」∠)_)\n-->\n\n### 翻译计划译者教程\n\n1. [如何参与翻译](https://github.com/xitu/gold-miner/wiki/%E5%A6%82%E4%BD%95%E5%8F%82%E4%B8%8E%E7%BF%BB%E8%AF%91)\n2. [关于如何提交翻译以及后续更新的教程](https://github.com/xitu/gold-miner/wiki/%E5%85%B3%E4%BA%8E%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4%E7%BF%BB%E8%AF%91%E4%BB%A5%E5%8F%8A%E5%90%8E%E7%BB%AD%E6%9B%B4%E6%96%B0%E7%9A%84%E6%95%99%E7%A8%8B)\n3. [如何参与校对及校对的正确姿势](https://github.com/xitu/gold-miner/wiki/%E5%8F%82%E4%B8%8E%E6%A0%A1%E5%AF%B9%E7%9A%84%E6%AD%A3%E7%A1%AE%E5%A7%BF%E5%8A%BF)\n4. [文章分享到掘金指南](https://github.com/xitu/gold-miner/wiki/%E5%88%86%E4%BA%AB%E5%88%B0%E6%8E%98%E9%87%91%E6%8C%87%E5%8D%97)\n5. [译文排版规则指北](https://github.com/xitu/gold-miner/wiki/%E8%AF%91%E6%96%87%E6%8E%92%E7%89%88%E8%A7%84%E5%88%99%E6%8C%87%E5%8C%97)\n6.[积分兑换:小礼物列表](https://github.com/xitu/gold-miner/wiki/%E7%A7%AF%E5%88%86%E5%85%91%E6%8D%A2)\n\n\n\n\n", "weibo_link": "", "github_link": "https://github.com/xitu/gold-miner", "homepage_link": "", "ctime": 1615486318, "mtime": 1615486318, "org_id": "6930489296285597696", "brief_introduction": "一个帮助开发者成长的社区", "introduction_preview": "掘金翻译计划\n掘金翻译计划 是一个翻译优质互联网技术文章的社区,文章来源为 掘金 上的英文分享文章。内容覆盖区块链、人工智能、Android、iOS、前端、后端、设计、产品、算法和其他等领域,以及各大型优质 官方文档及手册,读者为热爱新技术的新锐开发者。\n掘金翻译计划目前翻译完成 2027 余篇文章,官方文档及手册 13 个,共有 1000 余名译者贡献翻译和校对。\n官方指南\n推荐优质英文文章到掘金翻译计划\n翻译计划译者教程\n\n如何参与翻译\n关于如何提交翻译以及后续更新的教程\n如何参与校对及校对的正确姿势\n文章分享到掘金指南\n译文排版规则指北\n6.积分兑换:小礼物列表\n"}, "follower_count": 1080, "article_view_count": 504149, "article_digg_count": 5100}, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6844903542549905416", "article_info": {"article_id": "6844903542549905416", "user_id": "2488950053713863", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342, 6809640407484334093], "visible_level": 0, "link_url": "https://www.w3cplus.com/css/aspect-ratio.html", "cover_image": "", "is_gfw": 0, "title": "CSS实现长宽比的几种方案", "brief_content": "", "is_english": 0, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1514429375", "mtime": "1598443303", "rtime": "1514429375", "draft_id": "0", "view_count": 740, "collect_count": 16, "digg_count": 41, "comment_count": 0, "hot_index": 78, "is_hot": 0, "rank_index": 0.00010604, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2488950053713863", "user_name": "tony1915", "company": "", "job_title": "前端工程师、PHP工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/mosaic-legacy/3791/5070639578~300x300.image", "level": 1, "description": "", "followee_count": 12, "follower_count": 0, "post_article_count": 6, "digg_article_count": 16, "got_digg_count": 41, "got_view_count": 1667, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 5, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6844903542549905416, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6844903849723969544", "article_info": {"article_id": "6844903849723969544", "user_id": "2664871915159992", "category_id": "6809637767543259144", "tag_ids": [6809640392770715656, 6809640407484334093, 6809640614175604744, 6809640398105870343, 6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903849723969544", "cover_image": "", "is_gfw": 0, "title": "[译] 为什么 HTML 中复选框样式难写 — 本文给你答案", "brief_content": "在当今世界,大多数网页开发者认为掌握 JavaScript 是优先选择,这理所当然,因为 JS 是浏览器脚本语言。虽然 HTML 和 CSS 决定网站的样式,但是 JS 凭借它能调用 HTML 和 CSS API,优良性能以及它的多功能性,深受网页开发者喜爱。像 React、V…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1558602504", "mtime": "1598505345", "rtime": "1558612678", "draft_id": "6845076318858510343", "view_count": 737, "collect_count": 8, "digg_count": 6, "comment_count": 0, "hot_index": 42, "is_hot": 0, "rank_index": 0.00010561, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2664871915159992", "user_name": "JiLanlan", "company": "", "job_title": "前端", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/414db4da0d7bba57c2bba82f4f61daab~300x300.image", "level": 1, "description": "这个人很懒", "followee_count": 25, "follower_count": 9, "post_article_count": 1, "digg_article_count": 17, "got_digg_count": 6, "got_view_count": 737, "post_shortmsg_count": 19, "digg_shortmsg_count": 20, "isfollowed": false, "favorable_author": 0, "power": 13, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546515, "tag_id": "6809640392770715656", "tag_name": "HTML", "color": "#E44D25", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f18965b2a0ef9cac862e.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239419, "mtime": 1631683077, "id_type": 9, "tag_alias": "", "post_article_count": 6109, "concern_user_count": 240134}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546676, "tag_id": "6809640614175604744", "tag_name": "掘金翻译计划", "color": "#0081ff", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/95f7e8be776556ab8d82.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1454716787, "mtime": 1631689800, "id_type": 9, "tag_alias": "", "post_article_count": 2502, "concern_user_count": 42848}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903849723969544, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6844903506370002958", "article_info": {"article_id": "6844903506370002958", "user_id": "413072061127479", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640562514198535, 6809640394175971342, 6809640956946546702], "visible_level": 0, "link_url": "https://css-tricks.com/creating-vue-js-transitions-animations/", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2017/10/26/adeb3e07b36267bd0bc6899754a96842~tplv-t2oaga2asx-image.image", "is_gfw": 1, "title": "[英] 如何使用 Vue.js 里的 Transition 和 Animation", "brief_content": "好用", "is_english": 1, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1508989799", "mtime": "1598617695", "rtime": "1508989799", "draft_id": "0", "view_count": 1081, "collect_count": 8, "digg_count": 28, "comment_count": 0, "hot_index": 82, "is_hot": 0, "rank_index": 0.00010511, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "413072061127479", "user_name": "阴明", "company": "TikTok", "job_title": "PM", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/26ad64c3c75daaff920a9eae4a6a7fa1~300x300.image", "level": 4, "description": "🌈 Crazy Monster!", "followee_count": 847, "follower_count": 78189, "post_article_count": 961, "digg_article_count": 3875, "got_digg_count": 33540, "got_view_count": 1805862, "post_shortmsg_count": 1359, "digg_shortmsg_count": 7099, "isfollowed": false, "favorable_author": 1, "power": 7189, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546639, "tag_id": "6809640562514198535", "tag_name": "动效", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/b08d8f2616e6e236fd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1441745685, "mtime": 1631632027, "id_type": 9, "tag_alias": "", "post_article_count": 1137, "concern_user_count": 60264}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546923, "tag_id": "6809640956946546702", "tag_name": "Element", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/2c670995272515e48cea.svg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489435335, "mtime": 1631690287, "id_type": 9, "tag_alias": "", "post_article_count": 1925, "concern_user_count": 15229}], "user_interact": {"id": 6844903506370002958, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6977353622891790366", "article_info": {"article_id": "6977353622891790366", "user_id": "1918010987388829", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/39e4eaeb23ec44019017374f1125cec7~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "CSS基础知识要点(Y5)", "brief_content": "1.【border】边框(布局时会占用空间) 1.1设置边框 1.2边框方向 1.3边框宽度 1.4圆角边框 2.【outline】轮廓线(不影响布局) (同边框一致)", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1624541928", "mtime": "1624608507", "rtime": "1624608507", "draft_id": "6977348211337855007", "view_count": 54, "collect_count": 0, "digg_count": 0, "comment_count": 0, "hot_index": 2, "is_hot": 0, "rank_index": 0.00010498, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1918010987388829", "user_name": "Web程序贵", "company": "南京巅峰数据服务有限公司", "job_title": "前端开发实习生", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/6790a317242054936b8b900663fa6fc5~300x300.image", "level": 1, "description": "前端小白,只会基础知识。", "followee_count": 5, "follower_count": 6, "post_article_count": 25, "digg_article_count": 8, "got_digg_count": 17, "got_view_count": 1455, "post_shortmsg_count": 1, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 31, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6977353622891790366, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6844903645373267981", "article_info": {"article_id": "6844903645373267981", "user_id": "1275089218971021", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640394175971342], "visible_level": 0, "link_url": "https://zhuanlan.zhihu.com/p/40148221", "cover_image": "", "is_gfw": 0, "title": "使用CSS Grid的九大误区", "brief_content": "在Web世界中,大家都知道,使用任何一项新技术都易于犯错,特别是像CSS Grid这样的与过去有很大变化的东西。初学者或者有一定经验的Web开发人员,都无法一时之间就能把控所有。@Jen Simmons录制了一个视频,向大家阐述了使用CSS Grid的九大误区,也是使用CSS …", "is_english": 0, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1532409305", "mtime": "1598460652", "rtime": "1532409305", "draft_id": "0", "view_count": 875, "collect_count": 10, "digg_count": 19, "comment_count": 0, "hot_index": 62, "is_hot": 0, "rank_index": 0.00010467, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1275089218971021", "user_name": "清蒸不是水煮", "company": "前东家是掘金", "job_title": "前沸点运营", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/7/2/16459c1e94f61f29~tplv-t2oaga2asx-image.image", "level": 3, "description": "how r u today,掘金沸点了解下", "followee_count": 431, "follower_count": 13067, "post_article_count": 92, "digg_article_count": 2555, "got_digg_count": 2045, "got_view_count": 209280, "post_shortmsg_count": 267, "digg_shortmsg_count": 8818, "isfollowed": false, "favorable_author": 0, "power": 2713, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903645373267981, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6844903958452895752", "article_info": {"article_id": "6844903958452895752", "user_id": "2418581312908605", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903958452895752", "cover_image": "", "is_gfw": 0, "title": "CSS梳理之流的破坏float与BFC", "brief_content": "“流”,文档流,是CSS中最基本的定位和布局机制。 浏览器中的元素默认的从左到右(内联元素),从上到下(块级元素)如同流水一般堆砌的布局方式。 被设置了float属性的元素呈现包裹性,即其自身的width不是默认撑满父元素,而是和height属性一样由子元素决定。 脱离文档流,…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1570500757", "mtime": "1600147960", "rtime": "1570527267", "draft_id": "6845076485842141191", "view_count": 612, "collect_count": 0, "digg_count": 3, "comment_count": 0, "hot_index": 33, "is_hot": 0, "rank_index": 0.00010457, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2418581312908605", "user_name": "一不小心就😍 😞 😒", "company": "jsd", "job_title": "前端工程师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/mirror-assets/16c69bd772ce8163f5b~tplv-t2oaga2asx-image.image", "level": 1, "description": "js、css、html一把梭", "followee_count": 5, "follower_count": 4, "post_article_count": 6, "digg_article_count": 3, "got_digg_count": 22, "got_view_count": 4855, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 70, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903958452895752, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6902797258434019336", "article_info": {"article_id": "6902797258434019336", "user_id": "4195392104709821", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "2d和3d变换", "brief_content": "transform-origin将当前变换原点(x1,y1,z1)变为(x1+x2,y1+y2,z1+z2)%pxem水平方向取值,相对元素的宽,leftcenterbottom垂直方向取值,相对元素", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1607182834", "mtime": "1607223129", "rtime": "1607223129", "draft_id": "6902793496462098445", "view_count": 170, "collect_count": 2, "digg_count": 2, "comment_count": 0, "hot_index": 10, "is_hot": 0, "rank_index": 0.00010438, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4195392104709821", "user_name": "moonlightop", "company": "", "job_title": "", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/c69001f5df60f71b0dea1a46c99115d5~300x300.image", "level": 1, "description": "一个前端小白", "followee_count": 62, "follower_count": 3, "post_article_count": 32, "digg_article_count": 40, "got_digg_count": 19, "got_view_count": 3597, "post_shortmsg_count": 0, "digg_shortmsg_count": 19, "isfollowed": false, "favorable_author": 0, "power": 55, "study_point": 1130, "university": {"university_id": "6888594362209402887", "name": "广州大学", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 2, "select_event_count": 0, "select_online_course_count": 0, "identity": 1, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6902797258434019336, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6844903846930546695", "article_info": {"article_id": "6844903846930546695", "user_id": "694547079240798", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903846930546695", "cover_image": "", "is_gfw": 0, "title": "移动端适配--meta标签玩的是什么", "brief_content": "基本一直都在做移动端的开发,rem布局也写了很久,不过对于实现的原理有些模棱两可的盲点,自己总结一下留着以后回顾。 本文分以下几个层面,主打用最最通俗的语言来阐述。 viewport是什么?翻译过来就是视窗的意思,只不过在移动端,视窗稍微有点绕。在解释这个之前,不得不引出几个词…", "is_english": 0, "is_original": 1, "user_index": 0.65875638216419, "original_type": 0, "original_author": "", "content": "", "ctime": "1558275477", "mtime": "1599905366", "rtime": "1558275940", "draft_id": "6845076314139918350", "view_count": 746, "collect_count": 10, "digg_count": 4, "comment_count": 0, "hot_index": 41, "is_hot": 0, "rank_index": 0.00010411, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "694547079240798", "user_name": "Ace7523", "company": "58·转转", "job_title": "前端", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/2/7/168c5bafba01c7e8~tplv-t2oaga2asx-image.image", "level": 2, "description": "一个前端", "followee_count": 6, "follower_count": 258, "post_article_count": 15, "digg_article_count": 0, "got_digg_count": 517, "got_view_count": 35896, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 875, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903846930546695, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6844903551945146381", "article_info": {"article_id": "6844903551945146381", "user_id": "958429868601117", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903551945146381", "cover_image": "", "is_gfw": 0, "title": "【前端Talkking】CSS3系列-css3之线性渐变初探", "brief_content": "入行前端一年多的时间,想提高自己的css技术水平,于是在网上看了些关于css的书籍,想买几本比较好的css书籍啃啃,找来找去,终于找到了《CSS揭秘》这本书。入手这本书后,从开始看到后面,发现书中的很多效果都可以使用渐变来实现,于是,我对渐变产生了兴趣,决定好好掌握css3中的…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1516243112", "mtime": "1598687005", "rtime": "1516243112", "draft_id": "6845075367766540296", "view_count": 774, "collect_count": 18, "digg_count": 37, "comment_count": 0, "hot_index": 75, "is_hot": 0, "rank_index": 0.00010406, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "958429868601117", "user_name": "micstone", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/d38a1911eeb141a4525bd8219e57ba81~300x300.image", "level": 2, "description": "", "followee_count": 63, "follower_count": 38, "post_article_count": 14, "digg_article_count": 42, "got_digg_count": 242, "got_view_count": 8691, "post_shortmsg_count": 1, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 257, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903551945146381, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6844903602125799431", "article_info": {"article_id": "6844903602125799431", "user_id": "958429868601117", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903602125799431", "cover_image": "", "is_gfw": 0, "title": "【前端Talkking】CSS系列——CSS深入理解之absolute定位", "brief_content": "1. 写在前面 本篇将要介绍的绝对定位absolute属性和此前介绍的CSS系列——CSS深入理解之float浮动有着几分的相似性,可以认为两者是兄弟关系,都具有“包裹性”、“高度塌陷”、“块状化”的特性,它们在很多场合都可以互相替代。很多人可能有这样的疑问:一个属性名是“po…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1525501365", "mtime": "1599468445", "rtime": "1525748308", "draft_id": "6845075421478780936", "view_count": 846, "collect_count": 12, "digg_count": 25, "comment_count": 0, "hot_index": 67, "is_hot": 0, "rank_index": 0.00010393, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "958429868601117", "user_name": "micstone", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/d38a1911eeb141a4525bd8219e57ba81~300x300.image", "level": 2, "description": "", "followee_count": 63, "follower_count": 38, "post_article_count": 14, "digg_article_count": 42, "got_digg_count": 242, "got_view_count": 8691, "post_shortmsg_count": 1, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 257, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903602125799431, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6946083880734556167", "article_info": {"article_id": "6946083880734556167", "user_id": "1530949760190653", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "css布局----flex弹性布局(移动端完美解决方案)", "brief_content": "Flexible Box 模型,通常被称为 flexbox,是一种一维的布局模型。它给 flexbox 的子元素之间提供了强大的空间分布和对齐能力。本文给出了 flexbox 的主要特性。 我们说 flexbox 是一种一维的布局,是因为一个 flexbox 一次只能处理一个维…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1617261260", "mtime": "1617268073", "rtime": "1617268073", "draft_id": "6946083236044865544", "view_count": 64, "collect_count": 1, "digg_count": 2, "comment_count": 0, "hot_index": 5, "is_hot": 0, "rank_index": 0.00010386, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1530949760190653", "user_name": "李不要熬夜", "company": "", "job_title": "", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/d2ea4973fef32ce3195174fc72296119~300x300.image", "level": 1, "description": "", "followee_count": 2, "follower_count": 2, "post_article_count": 73, "digg_article_count": 0, "got_digg_count": 20, "got_view_count": 4330, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 63, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6946083880734556167, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6844903656135852046", "article_info": {"article_id": "6844903656135852046", "user_id": "3298190613550462", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://www.zcfy.cc/article/everything-you-need-to-know-about-css-variables", "cover_image": "", "is_gfw": 0, "title": "关于CSS变量你需要知道的一切", "brief_content": "这是我新书的第一章(PDF和Mobi格式都有)。 多数编程语言都支持变量。但是CSS从最初就一直缺少对原生变量的功能支持。 你写CSS吧?那你就没办法用变量。不过,你还可以用Sass这样的预编译器。 Sass这样的预编译器就把变量作为一个巨大的卖点,一个尝试新东西的原因。你知道…", "is_english": 0, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1534158219", "mtime": "1598463380", "rtime": "1534158219", "draft_id": "0", "view_count": 620, "collect_count": 8, "digg_count": 29, "comment_count": 0, "hot_index": 60, "is_hot": 0, "rank_index": 0.00010366, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3298190613550462", "user_name": "众成翻译", "company": "", "job_title": "翻译,求知的另一种表达", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/3/30/16275a86e4bb52a2~tplv-t2oaga2asx-image.image", "level": 2, "description": "众成翻译官方账号", "followee_count": 35, "follower_count": 7819, "post_article_count": 567, "digg_article_count": 235, "got_digg_count": 8123, "got_view_count": 268377, "post_shortmsg_count": 1, "digg_shortmsg_count": 6, "isfollowed": false, "favorable_author": 0, "power": 416, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903656135852046, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6844903655770947597", "article_info": {"article_id": "6844903655770947597", "user_id": "1398234520749448", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640394175971342, 6809641113071124493, 6809640482725953550], "visible_level": 0, "link_url": "https://juejin.im/post/6844903655770947597", "cover_image": "", "is_gfw": 0, "title": "BFC 初体验", "brief_content": "BFC(Block Formatting Context),中文翻译为“块格式化上下文”。它有一个明显的特性,那就是如果一个元素拥有了 BFC 特性,那么它内部元素不受外部元素的影响,外部元素也不会受其内部元素的影响。 先来看看 MDN 对 BFC 的解释。 好像也没具体的说什…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1534134794", "mtime": "1599566968", "rtime": "1534140750", "draft_id": "6845075593206202382", "view_count": 802, "collect_count": 19, "digg_count": 19, "comment_count": 1, "hot_index": 60, "is_hot": 0, "rank_index": 0.00010363, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1398234520749448", "user_name": "yyzclyang", "company": "平安", "job_title": "前端", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/mirror-assets/168e08f23f2be8bbb96~tplv-t2oaga2asx-image.image", "level": 3, "description": "", "followee_count": 1, "follower_count": 51, "post_article_count": 27, "digg_article_count": 2, "got_digg_count": 675, "got_view_count": 40454, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 1079, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2547036, "tag_id": "6809641113071124493", "tag_name": "容器", "color": "", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/153024333442233331e5dd27e1829e0d4f73b714ce46b.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1530152271, "mtime": 1631452898, "id_type": 9, "tag_alias": "docker", "post_article_count": 2140, "concern_user_count": 4343}, {"id": 2546581, "tag_id": "6809640482725953550", "tag_name": "程序员", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/63baec1130bde0284e98.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1438712834, "mtime": 1631686409, "id_type": 9, "tag_alias": "", "post_article_count": 16341, "concern_user_count": 275512}], "user_interact": {"id": 6844903655770947597, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6902063717236604936", "article_info": {"article_id": "6902063717236604936", "user_id": "4195392104709821", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "box-shadow和text-shadow", "brief_content": "box盒模型定义参考文章:https://zhuanlan.zhihu.com/p/291489867盒子内部盒子外部content-box(标准盒模型)boxWidth=width+border2+", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1607012831", "mtime": "1607057355", "rtime": "1607057355", "draft_id": "6902062718887395341", "view_count": 167, "collect_count": 1, "digg_count": 2, "comment_count": 0, "hot_index": 10, "is_hot": 0, "rank_index": 0.00010347, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4195392104709821", "user_name": "moonlightop", "company": "", "job_title": "", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/c69001f5df60f71b0dea1a46c99115d5~300x300.image", "level": 1, "description": "一个前端小白", "followee_count": 62, "follower_count": 3, "post_article_count": 32, "digg_article_count": 40, "got_digg_count": 19, "got_view_count": 3597, "post_shortmsg_count": 0, "digg_shortmsg_count": 19, "isfollowed": false, "favorable_author": 0, "power": 55, "study_point": 1130, "university": {"university_id": "6888594362209402887", "name": "广州大学", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 2, "select_event_count": 0, "select_online_course_count": 0, "identity": 1, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6902063717236604936, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6844903568923688967", "article_info": {"article_id": "6844903568923688967", "user_id": "3562073403955902", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342, 6809640407484334093, 6809640381920051207, 6809640625856577549], "visible_level": 0, "link_url": "https://www.liayal.com/article/5a96599bca0de01ec9713c43", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/3/2/161e571ddff83cc6~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "初探CSS Grid布局", "brief_content": "在CSS3中纳入的Flex Box布局给前端开发者带来了极大的便利,它的强大是有目共睹的。很多以前需要以前复查代码实现的布局,现在通过 Flex Box 很容易就实现。而在下一版本的CSS规范中正在讨论纳入一个更加强大的布局系统,它就是今天要说的: CSS Grid Layou…", "is_english": 0, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1519972901", "mtime": "1598689120", "rtime": "1520219199", "draft_id": "0", "view_count": 812, "collect_count": 15, "digg_count": 30, "comment_count": 1, "hot_index": 71, "is_hot": 0, "rank_index": 0.0001031, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3562073403955902", "user_name": "MaelWeb", "company": "记小栈", "job_title": "Web攻城狮", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/mirror-assets/168e08807ec8e0f3c6b~tplv-t2oaga2asx-image.image", "level": 0, "description": "游走在技术与艺术的边缘地带,偶是一枚前端攻城狮!", "followee_count": 3, "follower_count": 3, "post_article_count": 13, "digg_article_count": 10, "got_digg_count": 236, "got_view_count": 10228, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 0, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546507, "tag_id": "6809640381920051207", "tag_name": "Chrome", "color": "#4586F2", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/084db5f7bc6a239be270.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234593, "mtime": 1631675564, "id_type": 9, "tag_alias": "", "post_article_count": 2663, "concern_user_count": 131553}, {"id": 2546683, "tag_id": "6809640625856577549", "tag_name": "浏览器", "color": "#47ebc7", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/baf3558e2acdfa623201.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1460153459, "mtime": 1631677186, "id_type": 9, "tag_alias": "", "post_article_count": 3341, "concern_user_count": 28324}], "user_interact": {"id": 6844903568923688967, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6859685285127520269", "article_info": {"article_id": "6859685285127520269", "user_id": "1011206429616429", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/1f473330ce8d4127a0219a933de1f60f~tplv-k3u1fbpfcp-zoom-1.image", "is_gfw": 0, "title": "Re:从零开始の CSS 学习笔记——布局(下)", "brief_content": "container 中的子元素,与列数一一对应。且默认只占据第一行。所以添加 border 后 效果如下图所示", "is_english": 0, "is_original": 1, "user_index": 1.419022582702909, "original_type": 0, "original_author": "", "content": "", "ctime": "1597144973", "mtime": "1597210097", "rtime": "1597210097", "draft_id": "6859683107088531469", "view_count": 255, "collect_count": 0, "digg_count": 2, "comment_count": 0, "hot_index": 14, "is_hot": 0, "rank_index": 0.000103, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1011206429616429", "user_name": "SamRock", "company": "", "job_title": "随缘更新", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/7b99cc610e9bcf77091bfbe7c6414a2e~300x300.image", "level": 2, "description": "这人在尝试成为段子手/键盘侠/杠精失败后,终于回来老老实实的搬砖了 ~", "followee_count": 16, "follower_count": 29, "post_article_count": 38, "digg_article_count": 2, "got_digg_count": 58, "got_view_count": 12004, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 178, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6859685285127520269, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6901614433013858318", "article_info": {"article_id": "6901614433013858318", "user_id": "3799550220589549", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS盒模型", "brief_content": "在 CSS 中,所有的元素都被一个个的“盒子(box)”包围。每一个盒子包括内容(content)、内边距(padding)、边框(border)、外边距(margin),这些元素共同组成盒模型。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1606907364", "mtime": "1606972920", "rtime": "1606963918", "draft_id": "6901488070235947022", "view_count": 178, "collect_count": 0, "digg_count": 2, "comment_count": 0, "hot_index": 10, "is_hot": 0, "rank_index": 0.00010296, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3799550220589549", "user_name": "远方的朋友", "company": "", "job_title": "", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/mosaic-legacy/3793/3131589739~300x300.image", "level": 1, "description": "", "followee_count": 1, "follower_count": 0, "post_article_count": 23, "digg_article_count": 1, "got_digg_count": 5, "got_view_count": 1269, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 17, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6901614433013858318, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6844903552637206535", "article_info": {"article_id": "6844903552637206535", "user_id": "4388906147515367", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640858137133064, 6809640394175971342, 6809640381920051207], "visible_level": 0, "link_url": "https://juejin.im/post/6844903552637206535", "cover_image": "", "is_gfw": 0, "title": "奇舞周刊第 244 期:Web 前端中的 AR 开发技术", "brief_content": "“在使用 React Native 开发中,我们熟练的采用 JavaScript 的方式发送请求的方式发送一个请求到服务端,但是处理这个请求的过程其实和处理 Web 应用中发送的请求的过程是不一样的。” 科普文一篇,前端方向的 AR 技术总结。 “我们需要明确的一点是: 重构不…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1516349545", "mtime": "1598444833", "rtime": "1516349545", "draft_id": "6845075368387280904", "view_count": 765, "collect_count": 9, "digg_count": 36, "comment_count": 0, "hot_index": 74, "is_hot": 0, "rank_index": 0.0001028, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4388906147515367", "user_name": "奇舞精选", "company": "奇虎360", "job_title": "前端", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/8235463c80ecb19922c8ee10c40a1ca6~300x300.image", "level": 4, "description": "《奇舞精选》是由奇舞团维护的前端技术社区。除周五外,每天向大家推荐一篇前端相关技术文章,每周五向大家推送汇总的奇舞周刊。", "followee_count": 17, "follower_count": 7188, "post_article_count": 35, "digg_article_count": 39, "got_digg_count": 6416, "got_view_count": 246181, "post_shortmsg_count": 0, "digg_shortmsg_count": 5, "isfollowed": false, "favorable_author": 1, "power": 8858, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 1, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546852, "tag_id": "6809640858137133064", "tag_name": "ECharts", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/31cb8ecaf02c4a8f966b.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489098685, "mtime": 1631675169, "id_type": 9, "tag_alias": "", "post_article_count": 882, "concern_user_count": 13707}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546507, "tag_id": "6809640381920051207", "tag_name": "Chrome", "color": "#4586F2", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/084db5f7bc6a239be270.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234593, "mtime": 1631675564, "id_type": 9, "tag_alias": "", "post_article_count": 2663, "concern_user_count": 131553}], "user_interact": {"id": 6844903552637206535, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": {"org_type": 1, "org_id": "6930504142150434824", "online_version_id": 6932281596162605059, "latest_version_id": 6932281596162605059, "power": 20675, "ctime": 1613635306, "mtime": 1631692819, "audit_status": 2, "status": 0, "org_version": {"version_id": "6932281596162605059", "icon": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/067e419907a94f4aa1099af8d9ba3ed5~tplv-k3u1fbpfcp-watermark.image", "background": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/051403ec6e1c482fa371dea0ab6b6475~tplv-k3u1fbpfcp-watermark.image", "name": "奇舞团", "introduction": "奇舞团(75.team)是360集团最大的大前端团队,代表集团参与W3C和Ecma会员(TC39)工作,拥有Web前端、服务端、Android、iOS、设计等岗位人员,旗下的品牌和产品有SpriteJS、ThinkJS、MeshJS、Chimee、Firekylin、QMap、QCharts、JARVIS、QiShare、aTaller、声享、即视、众成翻译、奇舞学院、奇舞周刊等。\n\n奇舞团是一个向内赋能的团队。我们为新人设定合适的成长计划和方案,提供工程师、培训师和翻译官等多种角色定位,提供丰富的专业力、通用力和领导力培训课程;\n\n奇舞团是一个产生影响的团队。我们通过奇舞周刊传播Web前端技术,通过QiShare传播移动端技术,通过众成翻译传播高质量图书内容,通过成员和业务传播团队文化。\n\n奇舞团是一个开放和欢乐的团队,有着丰富和浓厚的团队文化。 每周一晚上,我们在泛前端分享会上交流知识; 每月末周四下午,我们在员工生日趴上喝茶吃瓜; 每月末周五上午,我们在周会上总结工作和认识新人; 每季度末,我们在颁奖会上为季度之星鼓掌; 每季度,我们都有机会在大草原上策马,在轰趴馆里玩耍,在农家院里打牌,携手朝山顶出发; 在每年7月5日,我们会拍一张全家福。", "weibo_link": "http://weibo.com/u/2565405913", "github_link": "", "homepage_link": "https://75.team/", "ctime": 1614052668, "mtime": 1614052668, "org_id": "6930504142150434824", "brief_introduction": "360奇舞团(奇虎75Team)是奇虎360公司技术中台前端工程师 + 部分特约嘉宾 组成的一个前端团队。 在这里,我们一起工作学习、一起沉淀、一起分享、一起为前端贡献影响。 开放是我们的特色,快乐是我们的使命。", "introduction_preview": "奇舞团(75.team)是360集团最大的大前端团队,代表集团参与W3C和Ecma会员(TC39)工作,拥有Web前端、服务端、Android、iOS、设计等岗位人员,旗下的品牌和产品有SpriteJS、ThinkJS、MeshJS、Chimee、Firekylin、QMap、QCharts、JARVIS、QiShare、aTaller、声享、即视、众成翻译、奇舞学院、奇舞周刊等。\n奇舞团是一个向内赋能的团队。我们为新人设定合适的成长计划和方案,提供工程师、培训师和翻译官等多种角色定位,提供丰富的专业力、通用力和领导力培训课程;\n奇舞团是一个产生影响的团队。我们通过奇舞周刊传播Web前端技术,通过QiShare传播移动端技术,通过众成翻译传播高质量图书内容,通过成员和业务传播团队文化。\n奇舞团是一个开放和欢乐的团队,有着丰富和浓厚的团队文化。 每周一晚上,我们在泛前端分享会上交流知识; 每月末周四下午,我们在员工生日趴上喝茶吃瓜; 每月末周五上午,我们在周会上总结工作和认识新人; 每季度末,我们在颁奖会上为季度之星鼓掌; 每季度,我们都有机会在大草原上策马,在轰趴馆里玩耍,在农家院里打牌,携手朝山顶出发; 在每年7月5日,我们会拍一张全家福。"}, "follower_count": 8019, "article_view_count": 894502, "article_digg_count": 11730}, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}, {"article_id": "6844903590843121671", "article_info": {"article_id": "6844903590843121671", "user_id": "800100193937240", "category_id": "6809637767543259144", "tag_ids": [6809640837895585805, 6809640398105870343, 6809640407484334093, 6809640394175971342], "visible_level": 0, "link_url": "https://codeburst.io/front-end-v-back-end-explained-by-waiting-tables-at-a-restaurant-174000a4498d", "cover_image": "", "is_gfw": 0, "title": "只要你去过餐厅就能理解的前后端通俗解释", "brief_content": "If you have ever visited a sit-down restaurant, then you can understand the difference between front-end and back-end in web development. W…", "is_english": 0, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1523602862", "mtime": "1599463623", "rtime": "1523604632", "draft_id": "0", "view_count": 818, "collect_count": 8, "digg_count": 24, "comment_count": 4, "hot_index": 68, "is_hot": 0, "rank_index": 0.00010279, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "800100193937240", "user_name": "误入理工科的疯子", "company": "", "job_title": "前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/a48c3088705b3cb173ade00b5f3974a4~300x300.image", "level": 2, "description": "我们生活中最大的现实就是超现实", "followee_count": 1, "follower_count": 185, "post_article_count": 18, "digg_article_count": 50, "got_digg_count": 261, "got_view_count": 35314, "post_shortmsg_count": 0, "digg_shortmsg_count": 4, "isfollowed": false, "favorable_author": 0, "power": 515, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546837, "tag_id": "6809640837895585805", "tag_name": "服务器", "color": "#a3abad", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/be1879c7e9983dab0049.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489042149, "mtime": 1631666741, "id_type": 9, "tag_alias": "", "post_article_count": 10408, "concern_user_count": 20830}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903590843121671, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045801020402403015006738"}], "cursor": "eyJ2IjoiNzAwNzgwMzIxNDc1ODE1MDE3NSIsImkiOjQyMjB9", "count": 4601, "has_more": true} | [
"[email protected]"
] | |
5b8aced9977d9f12adf0d4b703c3e25b1e55c899 | e16911f1fae7bf90f405e055e0f90731ae8c8042 | /etc/st2packgen/files/actions/lib/k8sbase.py | 89df63259b4fbf47136ae2a8cdf29077dfb9461e | [] | no_license | bobhenkel/stackstorm-kubernetes | 87136448434b1a6c821cfeb757f88833ca8ecf02 | 32b8538597bc5290a18cefadbf98fea7f8bb38bd | refs/heads/master | 2021-04-25T22:06:36.392650 | 2017-11-02T04:30:02 | 2017-11-02T04:30:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,242 | py | from __future__ import absolute_import
from pyswagger.core import BaseClient
from requests import Session, Request
import six
import json
import base64
class Client(BaseClient):
# declare supported schemes here
__schemes__ = set(['http', 'https'])
def __init__(self, config=None, auth=None, send_opt=None, extraheaders=None):
""" constructor
:param auth pyswagger.SwaggerAuth: auth info used when requesting
:param send_opt dict: options used in requests.send, ex verify=False
"""
super(Client, self).__init__(auth)
if send_opt is None:
send_opt = {}
self.__s = Session()
self.__send_opt = send_opt
self.extraheaders = extraheaders
auth = base64.b64encode(config['user'] + ":" + config['password'])
self.authhead = {"authorization": "Basic " + auth}
def request(self, req_and_resp, opt):
# passing to parent for default patching behavior,
# applying authorizations, ...etc.
req, resp = super(Client, self).request(req_and_resp, opt)
req.prepare(scheme=self.prepare_schemes(req).pop(), handle_files=False)
req._patch(opt)
file_obj = []
def append(name, obj):
f = obj.data or open(obj.filename, 'rb')
if 'Content-Type' in obj.header:
file_obj.append((name, (obj.filename, f, obj.header['Content-Type'])))
else:
file_obj.append((name, (obj.filename, f)))
for k, v in six.iteritems(req.files):
if isinstance(v, list):
for vv in v:
append(k, vv)
else:
append(k, v)
rq = Request(
method=req.method.upper(),
url=req.url,
params=req.query,
data=req.data,
headers=req.header,
files=file_obj
)
rq = self.__s.prepare_request(rq)
rq.headers.update(self.authhead)
rs = self.__s.send(rq, stream=True, **self.__send_opt)
myresp = {}
myresp['status'] = rs.status_code
myresp['data'] = json.loads(rs.content.rstrip())
# myresp['headers'] = rs.headers
return myresp
| [
"[email protected]"
] | |
c06bcf0c5bf8278caf07c0496ba1c817c184ba8d | 3d2e5d1092acccfb73c07d68b6beeffc44b3f776 | /imitation/src/environments/simulation/pybullet_env.py | 10ef9e12e56c2333e0813282dd5bdfe598ed1611 | [] | no_license | MatthijsBiondina/WorldModels | f6cbcfe5349da7119329ef10831810d1b85c9d02 | ab468f1aa978e3aa4e05174db24922085d1e33b1 | refs/heads/master | 2022-12-22T11:54:46.040828 | 2020-09-23T11:41:48 | 2020-09-23T11:41:48 | 248,212,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,560 | py | import gym
import pybulletgym
import numpy as np
from src.environments.general.environment_template import Environment
from src.utils import config as cfg
_ = pybulletgym
PREP_VECTORS = {'InvertedPendulumSwingupPyBulletEnv-v0': np.array([1, 0.2, 1, 1, 0.067], dtype=np.float16)}
def preprocess_observation(obs):
"""
:param obs: unprocessed observation
:return: normalized observation
"""
return np.clip(obs * PREP_VECTORS[cfg.env_name], -1., 1.)
class SimEnv(Environment):
def __init__(self, save_loc: str):
super().__init__(save_loc)
self.env = gym.make(cfg.env_name)
self.t = 0
self.actions = [np.zeros(self.action_size)] * cfg.latency
def reset(self):
"""
Reset environment
:return: observation at t=0
"""
self.t = 0
self.actions = [np.zeros(self.action_size)] * cfg.latency
return preprocess_observation(self.env.reset())
def step(self, action: np.ndarray):
"""
Perform action and observe next state. Action is repeated 'action_repeat' times.
:param action: the action to take
:return: next observation, reward, terminal state
"""
obs, done = None, None
reward = 0
self.actions.append(action)
for k in range(cfg.action_repeat):
obs, reward_k, done, _ = self.env.step(self.actions[0])
reward += reward_k
done = done or self.t == cfg.max_episode_length
if done:
break
self.actions.pop(0)
return preprocess_observation(obs), reward, done
def render(self) -> np.ndarray:
"""
Renders the environment to RGB array
:return: frame capture of environment
"""
return self.env.render(mode='rgb_array')
def close(self):
"""
Cleanup
:return: n/a
"""
self.env.close()
def sample_random_action(self) -> np.ndarray:
"""
Sample an action randomly from a uniform distribution over all valid actions
:return: random action
"""
return self.env.action_space.sample()
@property
def obs_size(self) -> int:
"""
GETTER METHOD
:return: size of observations in this environment
"""
return self.env.observation_space.shape[0]
@property
def action_size(self):
"""
GETTER METHOD
:return: size of actions in this environment
"""
return self.env.action_space.shape[0]
| [
"[email protected]"
] | |
c8a58abf83afbf6366b65b7dc1ee8f6a5d6ef831 | 24ffbd64e1892ab633ca785e969ccef43f17a9f2 | /picomotor/devices/h2_yr.py | efa1098cd7f197e7875e4fee3720cf40bfa6fb58 | [] | no_license | yesrgang/labrad_tools.srq | e29fcbfc4f5228955de1faddab6a66df52ccdd03 | 0dfbf2609d2f7a7e499167decedb0d9ea3677978 | refs/heads/master | 2021-06-18T19:59:21.448762 | 2021-02-04T22:03:49 | 2021-02-04T22:03:49 | 155,478,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from picomotor.devices.nf8742.device import NF8742
class Motor(NF8742):
socket_address = ('192.168.1.20', 23)
controller_axis = 4
Device = Motor
| [
"[email protected]"
] | |
cf26d52e9926a5a057a1fb70657bda084f53ef49 | 60b1f668808de2b82c2fcb62b07b45bb165219f2 | /egoi-api/models/form.py | 4e4d4d5517af495318cbbc38c7b97704ef21786d | [] | no_license | andersonmiguel/Egoi | 6d37bf7a3a7555e764f7a6e792b3ef1c68fe8e20 | b5f59f9b33ea94e170f4e7e26c6a37a78d2874c2 | refs/heads/master | 2022-06-21T07:18:44.920786 | 2020-05-04T17:29:02 | 2020-05-04T17:29:02 | 261,250,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,120 | py | # coding: utf-8
"""
APIv3 (Beta)
# Introduction Just a quick peek!!! This is our new version of API. Remember, it is not stable yet!!! But we invite you play with it and give us your feedback ;) # Getting Started E-goi can be integrated with many environments and programming languages via our REST API. We've created a developer focused portal to give your organization a clear and quick overview of how to integrate with E-goi. The developer portal focuses on scenarios for integration and flow of events. We recommend familiarizing yourself with all of the content in the developer portal, before start using our rest API. The E-goi APIv3 is served over HTTPS. To ensure data privacy, unencrypted HTTP is not supported. Request data is passed to the API by POSTing JSON objects to the API endpoints with the appropriate parameters. BaseURL = api.egoiapp.com # RESTful Services This API supports 5 HTTP methods: * <b>GET</b>: The HTTP GET method is used to **read** (or retrieve) a representation of a resource. * <b>POST</b>: The POST verb is most-often utilized to **create** new resources. * <b>PATCH</b>: PATCH is used for **modify** capabilities. The PATCH request only needs to contain the changes to the resource, not the complete resource * <b>PUT</b>: PUT is most-often utilized for **update** capabilities, PUT-ing to a known resource URI with the request body containing the newly-updated representation of the original resource. * <b>DELETE</b>: DELETE is pretty easy to understand. It is used to **delete** a resource identified by a URI. # Authentication We use a custom authentication method, you will need a apikey that you can find in your account settings. Below you will see a curl example to get your account information: #!/bin/bash curl -X GET 'https://api.egoiapp.com/my-account' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' Here you can see a curl Post example with authentication: #!/bin/bash curl -X POST 'http://api.egoiapp.com/tags' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' \\ -H 'Content-Type: application/json' \\ -d '{`name`:`Your custom tag`,`color`:`#FFFFFF`}' # SDK Get started quickly with E-goi with our integration tools. Our SDK is a modern open source library that makes it easy to integrate your application with E-goi services. * <b><a href='https://github.com/E-goi/sdk-java'>Java</a></b> * <b><a href='https://github.com/E-goi/sdk-php'>PHP</a></b> * <b><a href='https://github.com/E-goi/sdk-python'>Python</a></b> <security-definitions/> # noqa: E501
The version of the OpenAPI document: 3.0.0-beta
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from egoi-api.configuration import Configuration
class Form(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'form_id': 'int',
'internal_title': 'str',
'title': 'str',
'language': 'Language',
'list_id': 'int',
'default': 'bool',
'owner': 'int',
'status': 'str',
'created': 'datetime',
'updated': 'datetime'
}
attribute_map = {
'form_id': 'form_id',
'internal_title': 'internal_title',
'title': 'title',
'language': 'language',
'list_id': 'list_id',
'default': 'default',
'owner': 'owner',
'status': 'status',
'created': 'created',
'updated': 'updated'
}
def __init__(self, form_id=None, internal_title='$request.body#/title', title=None, language=None, list_id=None, default=None, owner=None, status=None, created=None, updated=None, local_vars_configuration=None): # noqa: E501
"""Form - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._form_id = None
self._internal_title = None
self._title = None
self._language = None
self._list_id = None
self._default = None
self._owner = None
self._status = None
self._created = None
self._updated = None
self.discriminator = None
if form_id is not None:
self.form_id = form_id
if internal_title is not None:
self.internal_title = internal_title
self.title = title
if language is not None:
self.language = language
if list_id is not None:
self.list_id = list_id
if default is not None:
self.default = default
if owner is not None:
self.owner = owner
if status is not None:
self.status = status
if created is not None:
self.created = created
if updated is not None:
self.updated = updated
@property
def form_id(self):
"""Gets the form_id of this Form. # noqa: E501
:return: The form_id of this Form. # noqa: E501
:rtype: int
"""
return self._form_id
@form_id.setter
def form_id(self, form_id):
"""Sets the form_id of this Form.
:param form_id: The form_id of this Form. # noqa: E501
:type: int
"""
if (self.local_vars_configuration.client_side_validation and
form_id is not None and form_id < 1): # noqa: E501
raise ValueError("Invalid value for `form_id`, must be a value greater than or equal to `1`") # noqa: E501
self._form_id = form_id
@property
def internal_title(self):
"""Gets the internal_title of this Form. # noqa: E501
Internal title of the form # noqa: E501
:return: The internal_title of this Form. # noqa: E501
:rtype: str
"""
return self._internal_title
@internal_title.setter
def internal_title(self, internal_title):
"""Sets the internal_title of this Form.
Internal title of the form # noqa: E501
:param internal_title: The internal_title of this Form. # noqa: E501
:type: str
"""
self._internal_title = internal_title
@property
def title(self):
"""Gets the title of this Form. # noqa: E501
Title of the form # noqa: E501
:return: The title of this Form. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this Form.
Title of the form # noqa: E501
:param title: The title of this Form. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and title is None: # noqa: E501
raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501
self._title = title
@property
def language(self):
"""Gets the language of this Form. # noqa: E501
:return: The language of this Form. # noqa: E501
:rtype: Language
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this Form.
:param language: The language of this Form. # noqa: E501
:type: Language
"""
self._language = language
@property
def list_id(self):
"""Gets the list_id of this Form. # noqa: E501
:return: The list_id of this Form. # noqa: E501
:rtype: int
"""
return self._list_id
@list_id.setter
def list_id(self, list_id):
"""Sets the list_id of this Form.
:param list_id: The list_id of this Form. # noqa: E501
:type: int
"""
if (self.local_vars_configuration.client_side_validation and
list_id is not None and list_id < 1): # noqa: E501
raise ValueError("Invalid value for `list_id`, must be a value greater than or equal to `1`") # noqa: E501
self._list_id = list_id
@property
def default(self):
"""Gets the default of this Form. # noqa: E501
True if this is the default form in the list, false otherwise # noqa: E501
:return: The default of this Form. # noqa: E501
:rtype: bool
"""
return self._default
@default.setter
def default(self, default):
"""Sets the default of this Form.
True if this is the default form in the list, false otherwise # noqa: E501
:param default: The default of this Form. # noqa: E501
:type: bool
"""
self._default = default
@property
def owner(self):
"""Gets the owner of this Form. # noqa: E501
:return: The owner of this Form. # noqa: E501
:rtype: int
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this Form.
:param owner: The owner of this Form. # noqa: E501
:type: int
"""
if (self.local_vars_configuration.client_side_validation and
owner is not None and owner < 1): # noqa: E501
raise ValueError("Invalid value for `owner`, must be a value greater than or equal to `1`") # noqa: E501
self._owner = owner
@property
def status(self):
"""Gets the status of this Form. # noqa: E501
Status of the form # noqa: E501
:return: The status of this Form. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Form.
Status of the form # noqa: E501
:param status: The status of this Form. # noqa: E501
:type: str
"""
allowed_values = ["active", "unpublished", "cloned", "deleted"] # noqa: E501
if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def created(self):
"""Gets the created of this Form. # noqa: E501
The date and time # noqa: E501
:return: The created of this Form. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this Form.
The date and time # noqa: E501
:param created: The created of this Form. # noqa: E501
:type: datetime
"""
self._created = created
@property
def updated(self):
"""Gets the updated of this Form. # noqa: E501
The date and time # noqa: E501
:return: The updated of this Form. # noqa: E501
:rtype: datetime
"""
return self._updated
@updated.setter
def updated(self, updated):
"""Sets the updated of this Form.
The date and time # noqa: E501
:param updated: The updated of this Form. # noqa: E501
:type: datetime
"""
self._updated = updated
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Form):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Form):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
092db6afd0b046dcf1485a91be052fd57d5c502e | a177931c2914cc9820c578add9d57aa6c75084ce | /tips/customHTML/test_genTABHTML.py | cfd92464403354ae73e44a3df5bc666a81d2eb93 | [] | no_license | zhangshoug/others | 45d94f96701362cb077eb994c27295247a6fb712 | 3a8a8366f2598a5e88b44d18d346e81f4eef659e | refs/heads/master | 2022-12-18T22:37:13.505543 | 2020-09-28T08:54:28 | 2020-09-28T08:54:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,021 | py | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: test_genTABHTML
Description : tab css style test
Author : pchaos
date: 2019/9/9
-------------------------------------------------
Change Activity:
2019/9/9:
-------------------------------------------------
"""
import unittest
from unittest import TestCase
from .genTabHTML import genTABHTML
class TestGenTABHTML(TestCase):
def test_genHTML(self):
# 需要生成的文件名list。模板文件为:template.html,模板数据文件名为:需要生成的文件名+".ini"
flist = ["main.htm", "main_tech.htm", "hacker.html"]
# inifile = '{}.ini'.format(flist[0])
renderList = []
for fn in flist:
inifile = '{}.ini'.format(fn)
gh = genTABHTML()
# gh.outputFilename = fn
gh.outputFilename = "test"
gh.iniFilename = inifile
try:
templateFile = "customHTML/template.tab.table.html"
of, render = gh.genHTML(None,
# of, render = gh.genHTML("a{}".format(fn),
title=fn.split(".")[0],
prettify=False,
template=templateFile)
except Exception as e:
templateFile = "template.tab.table.html"
of, render = gh.genHTML(None,
# of, render = gh.genHTML("a{}".format(fn),
title=fn.split(".")[0],
prettify=False,
template=templateFile)
print("输出文件完成 {}".format(of))
# print(render)
self.assertTrue(len(render) > 100)
renderList.append(render)
print(renderList)
# main
inifile = '{}.ini'.format(flist[0])
gh = genTABHTML()
# gh.outputFilename = fn
gh.iniFilename = inifile
try:
templateFile = "template.tab.html"
render = gh.renders(renderList,
prettify=True,
# template="customHTML/template.tab.html",
template=templateFile,
title="Main")
except Exception as e:
templateFile = "customHTML/template.tab.html"
render = gh.renders(renderList,
prettify=True,
# template="customHTML/template.tab.html",
template=templateFile,
title="Main")
saveText = ""
for r in render:
saveText += r
gh.save('main.htm', saveText)
print("输出文件完成 {}".format(render))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6b7ec47b7dfaed08aeefb1d1ec11acaff71addf7 | 447e9ec821dc7505cc9b73fb7abeb220fe2b3a86 | /rvpy/logistic.py | 2d66e011e93fb9f8e4dc0e7ab086276b4445ba04 | [
"MIT"
] | permissive | timbook/rvpy | ecd574f91ed50fd47b6ead8517954f01e33c03a7 | 301fd61df894d4b300176e287bf9e725378c38eb | refs/heads/master | 2020-03-19T04:01:49.283213 | 2018-12-18T19:21:07 | 2018-12-18T19:21:07 | 135,788,512 | 1 | 0 | MIT | 2018-12-18T19:21:08 | 2018-06-02T04:55:39 | Python | UTF-8 | Python | false | false | 3,722 | py | import numpy as np
from math import log, exp
from scipy.stats import logistic, fisk
from . import distribution
class Logistic(distribution.Distribution):
"""
Logistic Distribution using the following parameterization:
f(x | loc, scale) = exp(-z) / (s * (1 + exp(-z))^2)
where z = (x - loc) / scale
Parameters
----------
loc : float, positive
Location parameter
scale : float, positive
Scale parameter
Methods
-------
exp()
Transforms self to LogLogistic
Relationships
-------------
Let X be Logistic, a, b float. Then:
* aX + b is Logistic
* exp(X) is Log-Logistic
"""
def __init__(self, loc=0, scale=1):
"""
Parameters
----------
loc : float, positive
Location parameter
scale : float, positive
Scale parameter
"""
assert scale > 0, "scale parameter must be positive"
# Parameters
self.loc = loc
self.scale = scale
# Scipy backend
self.sp = logistic(loc=loc, scale=scale)
super().__init__()
def __repr__(self):
return f"Logistic(loc={self.loc}, scale={self.scale})"
def __add__(self, other):
if isinstance(other, (int, float)):
return Logistic(self.loc + other, self.scale)
else:
raise TypeError(f"Can't add or subtract objects of type {type(other)} to Logistic")
def __mul__(self, other):
if isinstance(other, (int, float)):
return Logistic(other * self.loc, other * self.scale)
else:
raise TypeError(f"Can't multiply objects of type {type(other)} by Logistic")
def __truediv__(self, other):
if isinstance(other, (int, float)):
return self.__mul__(1/other)
else:
raise TypeError(f"Can't divide objects of type {type(other)} by Logistic")
def exp(self):
return LogLogistic(alpha=exp(self.loc), beta=1/self.scale)
# TODO: Gumbel - Gumbel = Logistic
class LogLogistic(distribution.Distribution):
"""
LogLogistic Distribution using the following parameterization:
f(x | a, b) = (b/a) * (x/a)^(b-1) / (1 + (x/a)^b)^2
Parameters
----------
alpha : float, positive
Scale parameter
beta : float, positive
Shape parameter
Methods
-------
log()
Transforms self to Logistic
Relationships
-------------
Let X be LogLogistic, k > 0 float. Then:
* kX is LogLogistic
* log(X) is Logistic
"""
def __init__(self, alpha, beta):
"""
Parameters
----------
alpha : float, positive
Scale parameter
beta : float, positive
Shape parameter
"""
assert alpha > 0, "alpha must be positive"
assert beta > 0, "alpha must be positive"
# Parameters
self.alpha = alpha
self.beta = beta
# Scipy backend
self.sp = fisk(c=beta, scale=alpha)
super().__init__()
def __repr__(self):
return f"LogLogistic(alpha={self.alpha}, beta={self.beta})"
def __mul__(self, other):
if isinstance(other, (int, float)):
return LogLogistic(other*self.alpha, self.beta)
else:
raise TypeError(f"Can't multiply objects of type {type(other)} by LogLogistic")
def __truediv__(self, other):
if isinstance(other, (int, float)):
return self.__mul__(1/other)
else:
raise TypeError(f"Can't divide objects of type {type(other)} by LogLogistic")
def log(self):
return Logistic(loc=np.log(self.alpha), scale=1/self.beta)
| [
"[email protected]"
] | |
df016bf13355458c6083ae6c2005a1cebd3ceecb | 7b6313d1c4e0e8a5bf34fc8ac163ad446bc69354 | /datastructure and algorithms/[hackerrank]The Hurdle Race.py | 5bcab2ab43d0415da1bf267cba2ff15bee29380b | [] | no_license | menuka-maharjan/competitive_programming | c6032ae3ddcbc974e0e62744989a2aefa30864b2 | 22d0cea0f96d8bd6dc4d81b146ba20ea627022dd | refs/heads/master | 2023-05-01T05:23:09.641733 | 2021-05-23T16:22:21 | 2021-05-23T16:22:21 | 332,250,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | nk=input().split()
n=int(nk[0])
k=int(nk[1])
l=list(map(int,input().rstrip().split()))
x=max(l)
if((x-k)>=0):
print(x-k)
else:
print(0)
| [
"[email protected]"
] | |
85596fb3ff870c316d4d7b3553f515d5d673f9b9 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007/desktop/kde/autostart/actions.py | 5bd7b2827ebfb6bdfc4093743e2fb7ed2daacc96 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import kde
def setup():
kde.configure()
def build():
kde.make()
def install():
kde.install() | [
"[email protected]"
] | |
677993bbfd1033c8a7be8606b387754616bdceda | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_tree_isomorphism.py | 3082365a4bb61f2d8c99fcddb56c72e2af1d0aeb | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,443 | py | import networkx as nx
import random
import time
from networkx.classes.function import is_directed
from networkx.algorithms.isomorphism.tree_isomorphism import (
rooted_tree_isomorphism,
tree_isomorphism,
)
# have this work for graph
# given two trees (either the directed or undirected)
# transform t2 according to the isomorphism
# and confirm it is identical to t1
# randomize the order of the edges when constructing
def check_isomorphism(t1, t2, isomorphism):
# get the name of t1, given the name in t2
mapping = {v2: v1 for (v1, v2) in isomorphism}
# these should be the same
d1 = is_directed(t1)
d2 = is_directed(t2)
assert d1 == d2
edges_1 = []
for (u, v) in t1.edges():
if d1:
edges_1.append((u, v))
else:
# if not directed, then need to
# put the edge in a consistent direction
if u < v:
edges_1.append((u, v))
else:
edges_1.append((v, u))
edges_2 = []
for (u, v) in t2.edges():
# translate to names for t1
u = mapping[u]
v = mapping[v]
if d2:
edges_2.append((u, v))
else:
if u < v:
edges_2.append((u, v))
else:
edges_2.append((v, u))
return sorted(edges_1) == sorted(edges_2)
def test_hardcoded():
print("hardcoded test")
# define a test problem
edges_1 = [
("a", "b"),
("a", "c"),
("a", "d"),
("b", "e"),
("b", "f"),
("e", "j"),
("e", "k"),
("c", "g"),
("c", "h"),
("g", "m"),
("d", "i"),
("f", "l"),
]
edges_2 = [
("v", "y"),
("v", "z"),
("u", "x"),
("q", "u"),
("q", "v"),
("p", "t"),
("n", "p"),
("n", "q"),
("n", "o"),
("o", "r"),
("o", "s"),
("s", "w"),
]
# there are two possible correct isomorphisms
# it currently returns isomorphism1
# but the second is also correct
isomorphism1 = [
("a", "n"),
("b", "q"),
("c", "o"),
("d", "p"),
("e", "v"),
("f", "u"),
("g", "s"),
("h", "r"),
("i", "t"),
("j", "y"),
("k", "z"),
("l", "x"),
("m", "w"),
]
# could swap y and z
isomorphism2 = [
("a", "n"),
("b", "q"),
("c", "o"),
("d", "p"),
("e", "v"),
("f", "u"),
("g", "s"),
("h", "r"),
("i", "t"),
("j", "z"),
("k", "y"),
("l", "x"),
("m", "w"),
]
t1 = nx.Graph()
t1.add_edges_from(edges_1)
root1 = "a"
t2 = nx.Graph()
t2.add_edges_from(edges_2)
root2 = "n"
isomorphism = sorted(rooted_tree_isomorphism(t1, root1, t2, root2))
# is correct by hand
assert (isomorphism == isomorphism1) or (isomorphism == isomorphism2)
# check algorithmically
assert check_isomorphism(t1, t2, isomorphism)
# try again as digraph
t1 = nx.DiGraph()
t1.add_edges_from(edges_1)
root1 = "a"
t2 = nx.DiGraph()
t2.add_edges_from(edges_2)
root2 = "n"
isomorphism = sorted(rooted_tree_isomorphism(t1, root1, t2, root2))
# is correct by hand
assert (isomorphism == isomorphism1) or (isomorphism == isomorphism2)
# check algorithmically
assert check_isomorphism(t1, t2, isomorphism)
# randomly swap a tuple (a,b)
def random_swap(t):
(a, b) = t
if random.randint(0, 1) == 1:
return (a, b)
else:
return (b, a)
# given a tree t1, create a new tree t2
# that is isomorphic to t1, with a known isomorphism
# and test that our algorithm found the right one
def positive_single_tree(t1):
assert nx.is_tree(t1)
nodes1 = [n for n in t1.nodes()]
# get a random permutation of this
nodes2 = nodes1.copy()
random.shuffle(nodes2)
# this is one isomorphism, however they may be multiple
# so we don't necessarily get this one back
someisomorphism = [(u, v) for (u, v) in zip(nodes1, nodes2)]
# map from old to new
map1to2 = {u: v for (u, v) in someisomorphism}
# get the edges with the transformed names
edges2 = [random_swap((map1to2[u], map1to2[v])) for (u, v) in t1.edges()]
# randomly permute, to ensure we're not relying on edge order somehow
random.shuffle(edges2)
# so t2 is isomorphic to t1
t2 = nx.Graph()
t2.add_edges_from(edges2)
# lets call our code to see if t1 and t2 are isomorphic
isomorphism = tree_isomorphism(t1, t2)
# make sure we got a correct solution
# although not necessarily someisomorphism
assert len(isomorphism) > 0
assert check_isomorphism(t1, t2, isomorphism)
# run positive_single_tree over all the
# non-isomorphic trees for k from 4 to maxk
# k = 4 is the first level that has more than 1 non-isomorphic tree
# k = 13 takes about 2.86 seconds to run on my laptop
# larger values run slow down significantly
# as the number of trees grows rapidly
def test_positive(maxk=14):
print("positive test")
for k in range(2, maxk + 1):
start_time = time.time()
trial = 0
for t in nx.nonisomorphic_trees(k):
positive_single_tree(t)
trial += 1
print(k, trial, time.time() - start_time)
# test the trivial case of a single node in each tree
# note that nonisomorphic_trees doesn't work for k = 1
def test_trivial():
print("trivial test")
# back to an undirected graph
t1 = nx.Graph()
t1.add_node("a")
root1 = "a"
t2 = nx.Graph()
t2.add_node("n")
root2 = "n"
isomorphism = rooted_tree_isomorphism(t1, root1, t2, root2)
assert isomorphism == [("a", "n")]
assert check_isomorphism(t1, t2, isomorphism)
# test another trivial case where the two graphs have
# different numbers of nodes
def test_trivial_2():
print("trivial test 2")
edges_1 = [("a", "b"), ("a", "c")]
edges_2 = [("v", "y")]
t1 = nx.Graph()
t1.add_edges_from(edges_1)
t2 = nx.Graph()
t2.add_edges_from(edges_2)
isomorphism = tree_isomorphism(t1, t2)
# they cannot be isomorphic,
# since they have different numbers of nodes
assert isomorphism == []
# the function nonisomorphic_trees generates all the non-isomorphic
# trees of a given size. Take each pair of these and verify that
# they are not isomorphic
# k = 4 is the first level that has more than 1 non-isomorphic tree
# k = 11 takes about 4.76 seconds to run on my laptop
# larger values run slow down significantly
# as the number of trees grows rapidly
def test_negative(maxk=11):
print("negative test")
for k in range(4, maxk + 1):
test_trees = list(nx.nonisomorphic_trees(k))
start_time = time.time()
trial = 0
for i in range(len(test_trees) - 1):
for j in range(i + 1, len(test_trees)):
trial += 1
assert tree_isomorphism(test_trees[i], test_trees[j]) == []
print(k, trial, time.time() - start_time)
| [
"[email protected]"
] | |
a3c03bb30d7ab9d2444696500ece8c13bfd13edd | 2fabea234735beefc980b77b213fcb0dfb394980 | /tensorflow_probability/python/math/sparse_test.py | aca018215524f5574b3df657c781c4d51d85533d | [
"Apache-2.0"
] | permissive | tarrou/probability | 0eee452b525a6e6b3c7c98d467468e47f07e861b | d4d80a1c04ad0b3e98758ebc3f7f82887274384d | refs/heads/master | 2020-08-08T11:16:42.441268 | 2019-12-06T17:35:17 | 2019-12-06T17:35:17 | 213,819,828 | 0 | 0 | Apache-2.0 | 2019-10-09T04:20:19 | 2019-10-09T04:20:19 | null | UTF-8 | Python | false | false | 6,549 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sparse ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_case
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
def _assert_sparse_tensor_value(test_case_instance, expected, actual):
test_case_instance.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case_instance.assertAllEqual(expected.indices, actual.indices)
test_case_instance.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case_instance.assertAllEqual(expected.values, actual.values)
test_case_instance.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case_instance.assertAllEqual(expected.dense_shape, actual.dense_shape)
@test_util.run_all_in_graph_and_eager_modes
class SparseTest(test_case.TestCase):
# Copied (with modifications) from:
# tensorflow/contrib/layers/python/ops/sparse_ops.py.
def test_dense_to_sparse_1d(self):
st = tfp.math.dense_to_sparse([1, 0, 2, 0])
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.int32)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([1, 2], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_1d_float(self):
st = tfp.math.dense_to_sparse([1.5, 0.0, 2.3, 0.0])
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.float32)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllClose([1.5, 2.3], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_1d_bool(self):
st = tfp.math.dense_to_sparse([True, False, True, False])
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.bool)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([True, True], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_1d_str(self):
st = tfp.math.dense_to_sparse([b'qwe', b'', b'ewq', b''])
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([b'qwe', b'ewq'], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_1d_str_special_ignore(self):
st = tfp.math.dense_to_sparse(
[b'qwe', b'', b'ewq', b''], ignore_value=b'qwe')
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[1], [2], [3]], result.indices)
self.assertAllEqual([b'', b'ewq', b''], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_2d(self):
st = tfp.math.dense_to_sparse([[1, 2, 0, 0], [3, 4, 5, 0]])
result = self.evaluate(st)
self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5], result.values)
self.assertAllEqual([2, 4], result.dense_shape)
def test_dense_to_sparse_3d(self):
st = tfp.math.dense_to_sparse(
[[[1, 2, 0, 0],
[3, 4, 5, 0]],
[[7, 8, 0, 0],
[9, 0, 0, 0]]])
result = self.evaluate(st)
self.assertAllEqual(
[[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.dense_shape)
def test_dense_to_sparse_unknown_1d_shape(self):
tensor = tf1.placeholder_with_default(
np.array([0, 100, 0, 3], np.int32), shape=[None])
st = tfp.math.dense_to_sparse(tensor)
result = self.evaluate(st)
self.assertAllEqual([[1], [3]], result.indices)
self.assertAllEqual([100, 3], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_unknown_3d_shape(self):
tensor = tf1.placeholder_with_default(
np.array([[[1, 2, 0, 0], [3, 4, 5, 0]], [[7, 8, 0, 0], [9, 0, 0, 0]]],
np.int32),
shape=[None, None, None])
st = tfp.math.dense_to_sparse(tensor)
result = self.evaluate(st)
self.assertAllEqual(
[[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.dense_shape)
def test_dense_to_sparse_unknown_rank(self):
ph = tf1.placeholder_with_default(
np.array([[1, 2, 0, 0], [3, 4, 5, 0]], np.int32), shape=None)
st = tfp.math.dense_to_sparse(ph)
result = self.evaluate(st)
self.assertAllEqual(
[[0, 0],
[0, 1],
[1, 0],
[1, 1],
[1, 2]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5], result.values)
self.assertAllEqual([2, 4], result.dense_shape)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
f37f65c77fc2cbe630313fe9779572d9243628eb | 96aa2367affe0dff353e1aaac8713ded087c5f68 | /utils/spiderPlot_SA.py | 335ed09082b623795670281ed3731ae77c81e7d3 | [
"Apache-2.0"
] | permissive | NMTHydro/Recharge | 0fcca9a72b631d6c3834c62b84dfb096da6cb210 | bbc1a05add92064acffeffb19f04e370b99a7918 | refs/heads/develop | 2020-05-21T17:39:37.702622 | 2020-04-08T17:10:40 | 2020-04-08T17:10:40 | 60,631,952 | 8 | 1 | null | 2016-10-26T17:01:21 | 2016-06-07T17:13:30 | Python | UTF-8 | Python | false | false | 5,333 | py | # ===============================================================================
# Copyright 2016 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance
# with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =================================IMPORTS=======================================
import os
import matplotlib.pyplot as plt
from matplotlib import rc
from numpy import linspace, array, add, multiply, set_printoptions
from pandas import read_pickle, set_option, options
def round_to_value(number, roundto):
return round(number / roundto) * roundto
rc('mathtext', default='regular')
set_option('display.max_rows', None)
set_option('display.max_columns', None)
set_option('display.width', None)
set_option('display.precision', 3)
options.display.float_format = '${:,.2f}'.format
set_printoptions(threshold=3000, edgeitems=5000, precision=3)
set_option('display.height', None)
set_option('display.max_rows', None)
TEMPS = range(-5, 6)
ALL_PCT = [x * 0.1 for x in range(5, 16)]
ndvi_range = linspace(0.9, 1.7, 11)
NDVI_RANGE = array([round_to_value(x, 0.05) for x in ndvi_range])
def make_spider_plot(dataframe, ndvi, all_pct, temps, fig_path=None, show=False):
display_pct = [(int(x)) for x in add(multiply(all_pct, 100.0), -100)]
dfs = os.listdir(dataframe)
print 'pickled dfs: {}'.format(dfs)
filename = '_basic_sensitivity_2.pkl'
if filename in dfs:
df = read_pickle(os.path.join(dataframe, filename))
df.to_csv(os.path.join(fig_path, 'sample_df_basic_2.csv'))
pass
print df
xx = 1
for index, row in df.iterrows():
fig = plt.figure(xx, figsize=(20, 10))
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
ax3 = ax1.twiny()
fig.subplots_adjust(bottom=0.2)
print 'shape temps: {}, shape row[0]: {}'.format(len(temps), len(row[0]))
ax2.plot(temps, row[0], 'black', label='Temperature (+/- 5 deg C)', marker='8')
ax1.plot(display_pct, row[1], 'blue', label='Precipitation (+/- 50%)', marker='8')
ax1.plot(display_pct, row[2], 'purple', label='Reference Evapotranspiration (+/- 50%)', marker='8')
ax1.plot(display_pct, row[3], 'brown', label='Total Available Water (+/- 50%)', marker='8')
ax3.plot(ndvi, row[4], 'green', linestyle='-.', label='Normalized Density Vegetation\n'
' Index Conversion Factor (0.9 - 1.8)', marker='8')
ax1.plot(display_pct, row[5], 'red', label='Soil Hydraulic Conductivity (+/- 50%)', marker='8')
ax1.set_xlabel(r"Parameter Change (%)", fontsize=16)
ax1.set_ylabel(r"Total Recharge in 14-Year Simulation (mm)", fontsize=16)
ax2.set_xlabel(r"Temperature Change (C)", fontsize=16)
ax2.xaxis.set_ticks_position("bottom")
ax2.xaxis.set_label_position("bottom")
ax2.spines["bottom"].set_position(("axes", -0.15))
ax2.set_frame_on(True)
ax2.patch.set_visible(False)
for sp in ax2.spines.itervalues():
sp.set_visible(False)
ax2.spines['bottom'].set_visible(True)
ax3.set_xlabel(r"NDVI to Crop Coefficient Conversion Factor", fontsize=16)
ax3.xaxis.set_ticks_position("top")
ax3.xaxis.set_label_position("top")
# ax3.spines["top"].set_position(("axes", 1.0))
ax3.set_frame_on(True)
ax3.patch.set_visible(False)
for sp in ax3.spines.itervalues():
sp.set_visible(False)
ax3.spines['top'].set_visible(True)
plt.title('Variation of ETRM Pysical Parameters at {}'.format(str(index).replace('_', ' ')),
y=1.08, fontsize=20)
handle1, label1 = ax1.get_legend_handles_labels()
handle2, label2 = ax2.get_legend_handles_labels()
handle3, label3 = ax3.get_legend_handles_labels()
handles, labels = handle1 + handle2 + handle3, label1 + label2 + label3
ax1.legend(handles, labels, loc=0)
if show:
plt.show()
# if fig_path:
# plt.savefig(os.path.join(fig_path, '{}_spider'.format(index)), dpi=600, ext='jpg', close=True,
# verbose=True)
plt.close(fig)
if __name__ == '__main__':
root = os.path.join('F:\\', 'ETRM_Inputs')
sensitivity = os.path.join(root, 'sensitivity_analysis')
pickles = os.path.join(sensitivity, 'pickled')
figure_save_path = os.path.join(sensitivity, 'figures')
make_spider_plot(pickles, NDVI_RANGE, ALL_PCT, TEMPS, figure_save_path, show=True)
# ========================== EOF ==============================================
| [
"[email protected]"
] | |
516e00001cc17c4e8ab48673154d9f69351bbfe1 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_09_01/models/task_run_request.py | 2f2ed7a707c8b543f090be7f386215b7b75e10ce | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,824 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .run_request import RunRequest
class TaskRunRequest(RunRequest):
"""The parameters for a task run request.
All required parameters must be populated in order to send to Azure.
:param is_archive_enabled: The value that indicates whether archiving is
enabled for the run or not. Default value: False .
:type is_archive_enabled: bool
:param type: Required. Constant filled by server.
:type type: str
:param task_name: Required. The name of task against which run has to be
queued.
:type task_name: str
:param values: The collection of overridable values that can be passed
when running a task.
:type values:
list[~azure.mgmt.containerregistry.v2018_09_01.models.SetValue]
"""
_validation = {
'type': {'required': True},
'task_name': {'required': True},
}
_attribute_map = {
'is_archive_enabled': {'key': 'isArchiveEnabled', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'task_name': {'key': 'taskName', 'type': 'str'},
'values': {'key': 'values', 'type': '[SetValue]'},
}
def __init__(self, **kwargs):
super(TaskRunRequest, self).__init__(**kwargs)
self.task_name = kwargs.get('task_name', None)
self.values = kwargs.get('values', None)
self.type = 'TaskRunRequest'
| [
"[email protected]"
] | |
1b5849466318aa075976375e01fa22fddd690edc | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/operations/_network_interface_load_balancers_operations.py | e42bd6eccf89e6b11dbf117b8ae8f3bcc1bcf2ca | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 5,600 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceLoadBalancersOperations(object):
"""NetworkInterfaceLoadBalancersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkInterfaceLoadBalancerListResult"]
"""List all load balancers in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceLoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.NetworkInterfaceLoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceLoadBalancerListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceLoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/loadBalancers'} # type: ignore
| [
"[email protected]"
] | |
9070f9ba6596fb792ae2d17601a5a9c0581820c3 | fd8405ac0a5d062907c153f2f2e3569571366539 | /irbisbooks/core/urls.py | 17e44ae4a60722f69bb0d5da5d79b7b2b8dec070 | [] | no_license | ri-gilfanov/irbis-books | aab471833035ae51088bccfb0806b863aaba3468 | 0b2a32013ab7f0c0d167e0864a7cb858e8e75add | refs/heads/master | 2021-01-25T13:19:07.818513 | 2018-03-02T09:47:06 | 2018-03-02T09:47:06 | 121,642,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.book_search, name='book_search'),
path('book_download/', views.book_download, name='book_download'),
] | [
"[email protected]"
] | |
b103132e0bee93fd37295128ccea5a1e416e708e | 3cdb4faf34d8375d6aee08bcc523adadcb0c46e2 | /web/env/lib/python3.6/site-packages/django/db/models/sql/compiler.py | 27b8cc343b29121d30713bacbde5e9dfc595aef5 | [
"MIT",
"GPL-3.0-only"
] | permissive | rizwansoaib/face-attendence | bc185d4de627ce5adab1cda7da466cb7a5fddcbe | 59300441b52d32f3ecb5095085ef9d448aef63af | refs/heads/master | 2020-04-25T23:47:47.303642 | 2019-09-12T14:26:17 | 2019-09-12T14:26:17 | 173,157,284 | 45 | 12 | MIT | 2020-02-11T23:47:55 | 2019-02-28T17:33:14 | Python | UTF-8 | Python | false | false | 67,037 | py | import collections
import functools
import re
import warnings
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import OrderBy, Random, RawSQL, Ref
from django.db.models.query_utils import QueryWrapper, select_related_descend
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError, NotSupportedError
from django.utils.deprecation import RemovedInDjango30Warning
from django.utils.inspect import func_supports_parameter
FORCE = object()
class SQLCompiler:
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)')
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
self.where, self.having = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
for expr, _, _ in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
# Skip References to the select clause, as all expressions in the
# select clause are already part of the group by.
if not expr.contains_aggregate and not is_ref:
expressions.extend(expr.get_source_expressions())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
if (sql, tuple(params)) not in seen:
result.append((sql, params))
seen.add((sql, tuple(params)))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (getattr(expr, 'target', None) == self.query.model._meta.pk and
getattr(expr, 'alias', None) == self.query.base_table):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias for expr in expressions
if hasattr(expr, 'target') and expr.target.primary_key
}
expressions = [pk] + [
expr for expr in expressions
if expr in having or (
getattr(expr, 'alias', None) is not None and expr.alias not in pk_aliases
)
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr for expr in expressions
if hasattr(expr, 'target') and expr.target.primary_key and expr.target.model._meta.managed
}
aliases = {expr.alias for expr in pks}
expressions = [
expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases
]
return expressions
def get_select(self):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
cols = self.get_default_columns()
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
try:
sql, params = self.compile(col, select_format=True)
except EmptyResultSet:
# Select a predicate that's always False.
sql, params = '0', ()
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by or self.query.get_meta().ordering or [])
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for field in ordering:
if hasattr(field, 'resolve_expression'):
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = order == 'DESC'
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT clause
order_by.append((
OrderBy(self.query.annotations[col], descending=descending),
False))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query._extra or col not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
if self.query.combinator:
src = expr.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias:
continue
if src == sel_expr:
expr.set_source_expressions([RawSQL('%d' % (idx + 1), ())])
break
else:
raise DatabaseError('ORDER BY term does not match any column in the result set.')
resolved = expr.resolve_expression(
self.query, allow_joins=True, reuse=None)
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql).group(1)
if (without_ordering, tuple(params)) in seen:
continue
seen.add((without_ordering, tuple(params)))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql).group(1)
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
name in self.query.external_aliases and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node, select_format=False):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
if select_format is FORCE or (select_format and not self.query.subquery):
return node.output_field.select_format(self, sql, params)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection)
for query in self.query.combined_queries if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.')
if compiler.get_order_by():
raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.')
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query.set_values((
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
))
parts += (compiler.as_sql(),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == 'union' or (combinator == 'difference' and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == 'union':
combinator_sql += ' ALL'
braces = '({})' if features.supports_slicing_ordering_in_compound else '{}'
sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts))
result = [' {} '.format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, 'supports_select_{}'.format(combinator)):
raise NotSupportedError('{} is not supported on this database backend.'.format(combinator))
result, params = self.get_combinator_sql(combinator, self.query.combinator_all)
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.where) if self.where is not None else ("", [])
having, h_params = self.compile(self.having) if self.having is not None else ("", [])
result = ['SELECT']
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result += [', '.join(out_cols), 'FROM', *from_]
params.extend(f_params)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError('select_for_update cannot be used outside of a transaction.')
if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit:
raise NotSupportedError(
'LIMIT/OFFSET is not supported with '
'select_for_update on this database backend.'
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
# If it's a NOWAIT/SKIP LOCKED/OF query but the backend
# doesn't support it, raise NotSupportedError to prevent a
# possible deadlock.
if nowait and not self.connection.features.has_select_for_update_nowait:
raise NotSupportedError('NOWAIT is not supported on this database backend.')
elif skip_locked and not self.connection.features.has_select_for_update_skip_locked:
raise NotSupportedError('SKIP LOCKED is not supported on this database backend.')
elif of and not self.connection.features.has_select_for_update_of:
raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.')
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
)
if for_update_part and self.connection.features.for_update_after_from:
result.append(for_update_part)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError('annotate() + distinct(fields) is not implemented.')
order_by = order_by or self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if self.query.explain_query:
result.insert(0, self.connection.ops.explain_query_prefix(
self.query.explain_format,
**self.query.explain_options
))
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limit_offset:
result.append(self.connection.ops.limit_offset_sql(self.query.low_mark, self.query.high_mark))
if for_update_part and not self.connection.features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if not alias and with_col_aliases:
alias = 'col%d' % index
if alias:
sub_selects.append("%s.%s" % (
self.connection.ops.quote_name('subquery'),
self.connection.ops.quote_name(alias),
))
else:
select_clone = select.relabeled_clone({select.alias: 'subquery'})
subselect, subparams = select_clone.as_sql(self, self.connection)
sub_selects.append(subselect)
sub_params.extend(subparams)
return 'SELECT %s FROM (%s) subquery' % (
', '.join(sub_selects),
' '.join(result),
), tuple(sub_params + params)
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(name)
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == 'DESC'
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts, transform_function = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name:
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(transform_function(t, alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices, self.query._filtered_relations)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.remote_field.model,
'field': f,
'reverse': False,
'local_setter': f.set_cached_value,
'remote_setter': f.remote_field.set_cached_value if f.unique else lambda x, y: None,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins([related_field_name], opts, root_alias)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': f.remote_field.set_cached_value,
'remote_setter': f.set_cached_value,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins([name], opts, root_alias)
model = join_opts.model
alias = joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
def local_setter(obj, from_obj):
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(obj, from_obj):
setattr(from_obj, name, obj)
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': local_setter,
'remote_setter': remote_setter,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select, opts=model._meta, root_alias=alias,
cur_depth=cur_depth + 1, requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield 'self'
else:
field = klass_info['field']
if klass_info['reverse']:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get('related_klass_infos', [])
)
result = []
invalid_names = []
for name in self.query.select_for_update_of:
parts = [] if name == 'self' else name.split(LOOKUP_SEP)
klass_info = self.klass_info
for part in parts:
for related_klass_info in klass_info.get('related_klass_infos', []):
field = related_klass_info['field']
if related_klass_info['reverse']:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
select_index = klass_info['select_fields'][0]
col = self.select[select_index][0]
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: %s.' % (
', '.join(invalid_names),
', '.join(_get_field_choices()),
)
)
return result
def deferred_to_columns(self):
"""
Convert the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Return the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
convs = []
for conv in (backend_converters + field_converters):
if func_supports_parameter(conv, 'context'):
warnings.warn(
'Remove the context parameter from %s.%s(). Support for it '
'will be removed in Django 3.0.' % (
conv.__self__.__class__.__name__,
conv.__name__,
),
RemovedInDjango30Warning,
)
conv = functools.partial(conv, context={})
convs.append(conv)
converters[i] = (convs, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch and not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
for row in result[0]:
if not isinstance(row, str):
yield ' '.join(str(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
return_id = False
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, 'as_sql'):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = '%s', [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(self.query, allow_joins=False, for_save=True)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
'can only be used to update, not to insert.' % (value, field)
)
if value.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
if value.contains_over_clause:
raise FieldError('Window expressions are not allowed in this query.')
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
fields = self.query.fields or [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (not self.return_id and self.connection.features.has_bulk_insert)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
if self.return_id and self.connection.features.can_return_id_from_insert:
if self.connection.features.can_return_ids_from_bulk_insert:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += [r_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, return_id=False):
assert not (
return_id and len(self.query.objs) != 1 and
not self.connection.features.can_return_ids_from_bulk_insert
)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not return_id:
return
if self.connection.features.can_return_ids_from_bulk_insert and len(self.query.objs) > 1:
return self.connection.ops.fetch_returned_insert_ids(cursor)
if self.connection.features.can_return_id_from_insert:
assert len(self.query.objs) == 1
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(
cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column
)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
assert len([t for t in self.query.alias_map if self.query.alias_refcount[t] > 0]) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.base_table)]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
if val.contains_over_clause:
raise FieldError('Window expressions are not allowed in this query.')
elif hasattr(val, 'prepare_database_save'):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
table = self.query.base_table
result = [
'UPDATE %s SET' % qn(table),
', '.join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(True)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super().pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation, select_format=FORCE)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
| [
"[email protected]"
] | |
42d0987e6e1898a0e5f60a297e7db42a013fab6d | bcf332d2f6ef6970cfaa480400a112ecee3f16b8 | /stage07-artist2/s1level42.py | c5f34c2ae0814db387a0d43027c8ee7cd714f9b1 | [
"Unlicense"
] | permissive | skilstak/code-dot-org-python | e1907d29f3727060e5064a5eefd68a0f9f4f5c70 | ba127124386ecfdc20bd84592b3c271f8205d748 | refs/heads/master | 2020-04-04T19:34:23.531210 | 2015-07-10T12:39:19 | 2015-07-10T12:39:19 | 26,862,410 | 7 | 4 | null | 2014-11-21T20:28:20 | 2014-11-19T13:24:30 | Python | UTF-8 | Python | false | false | 465 | py | """Stage 7: Puzzle 8 of 11
Here's the solution to the previous puzzle. Can you add just 2 more
lines of code to complete the drawing?
"""
import sys
sys.path.append('..')
import codestudio
artist = codestudio.load('s1level42')
artist.speed = 'faster'
a = artist
for count2 in range(10):
artist.color = artist.random_color()
for count in range(4):
artist.move_forward(20)
artist.turn_right(90)
artist.move_forward(20)
artist.check()
| [
"[email protected]"
] | |
950b22a78a928e4427896cec1ba0d7c4cac4e011 | 6a4bfff7fcd78a0057401652c7f80d9a95a67267 | /painless_redirects/tests/test_models.py | 2f5b98013047caa595a23ef12657abfbbafe3877 | [
"MIT"
] | permissive | benzkji/django-painless-redirects | 25987ff984830be7e45b4d0af9a9cd0046beabe7 | 153721486b214ddd5365b6ac5769129562254dd5 | refs/heads/master | 2023-05-24T14:23:53.783400 | 2020-06-22T10:35:29 | 2020-06-22T10:35:29 | 22,944,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | """Tests for the models of the painless_redirects app."""
from django.test import TestCase
from . import factories
class RedirectModelTestCase(TestCase):
def test_model(self):
obj = factories.RedirectFactory()
self.assertTrue(obj.pk)
def test_redirect_value(self):
obj = factories.RedirectFactory()
self.assertEqual(obj.redirect_value('http'), "/the-new-path/")
obj.new_site = factories.SiteFactory()
self.assertEqual(obj.redirect_value('https'), "https://%s/the-new-path/" % obj.new_site.domain)
| [
"[email protected]"
] | |
9f99434b0414a1ef779501b64fddd6cde711ca08 | 93022749a35320a0c5d6dad4db476b1e1795e318 | /issm/giaivins.py | 8b3e6e1be28e45ec640be9f57bc01bb251bc69f2 | [
"BSD-3-Clause"
] | permissive | pf4d/issm_python | 78cd88e9ef525bc74e040c1484aaf02e46c97a5b | 6bf36016cb0c55aee9bf3f7cf59694cc5ce77091 | refs/heads/master | 2022-01-17T16:20:20.257966 | 2019-07-10T17:46:31 | 2019-07-10T17:46:31 | 105,887,661 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,277 | py | from issm.fielddisplay import fielddisplay
from issm.project3d import project3d
from issm.checkfield import checkfield
from issm.WriteData import WriteData
class giaivins(object):
"""
GIA class definition
Usage:
giaivins=giaivins();
"""
def __init__(self): # {{{
self.mantle_viscosity = float('NaN');
self.lithosphere_thickness = float('NaN');
self.cross_section_shape = 0;
#set defaults
self.setdefaultparameters()
#}}}
def __repr__(self): # {{{
string=' giaivins solution parameters:'
string="%s\n%s"%(string,fielddisplay(self,'mantle_viscosity','mantle viscosity constraints (NaN means no constraint) (Pa s)'))
string="%s\n%s"%(string,fielddisplay(self,'lithosphere_thickness','lithosphere thickness constraints (NaN means no constraint) (m)'))
string="%s\n%s"%(string,fielddisplay(self,'cross_section_shape',"1: square-edged, 2: elliptical-edged surface"))
return string
#}}}
def extrude(self,md): # {{{
self.mantle_viscosity=project3d(md,'vector',self.mantle_viscosity,'type','node')
self.lithosphere_thickness=project3d(md,'vector',self.lithosphere_thickness,'type','node')
return self
#}}}
def setdefaultparameters(self): # {{{
self.cross_section_shape=1;
return self
#}}}
def checkconsistency(self,md,solution,analyses): # {{{
# Early return
if ('GiaAnalysis' not in analyses):
return md
md = checkfield(md,'fieldname','gia.mantle_viscosity','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices],'>',0)
md = checkfield(md,'fieldname','gia.lithosphere_thickness','NaN',1,'Inf',1,'size',[md.mesh.numberofvertices],'>',0)
md = checkfield(md,'fieldname','gia.cross_section_shape','numel',[1],'values',[1,2])
#be sure that if we are running a masstransport ice flow model coupled with giaivins, that thickness forcings
#are not provided into the future.
return md
# }}}
def marshall(self,prefix,md,fid): # {{{
WriteData(fid,prefix,'object',self,'fieldname','mantle_viscosity','format','DoubleMat','mattype',1);
WriteData(fid,prefix,'object',self,'fieldname','lithosphere_thickness','format','DoubleMat','mattype',1,'scale',10.**3.);
WriteData(fid,prefix,'object',self,'fieldname','cross_section_shape','format','Integer');
# }}}
| [
"[email protected]"
] | |
fa4c4bebb84eeea7871eaf044e4ec0be599f769c | 3d9506b859cdbf38a21549cd3d64b69ecde7674e | /GoogleCodeJam/2020KickstartRoundB/BusRoute.py | b7cceed2c849cd5b217cc8829a02467223137486 | [] | no_license | bradykim7/Algorithm | 1ae4c6e4e6d72687b660ddf0768a9174cc8d7b8c | 053210a1205f4e62b367f85b65dcb60fcad74008 | refs/heads/master | 2022-06-25T04:46:55.265058 | 2022-06-17T08:08:52 | 2022-06-17T08:08:52 | 233,500,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | import sys;
if __name__=='__main__':
t = int(input());
for i in range(t):
nd = input().split();
n=int(nd[0]); d=int(nd[1]);
ans =d;
x= list(map(int,input().rstrip().split()));
for j in x:
ans -= d % j
print('Case #%d: %d'%(i+1,ans))
| [
"[email protected]"
] | |
9d55ea5cb4addbc1cc6d2fe4e49086c6505e4551 | a75f9cf4f03b01f8e7cc12d311434beca1b233e5 | /vstools/writers.py | fbfa81a2230924a07bc92e36d66720df61542f97 | [
"Apache-2.0"
] | permissive | libyal/vstools | 3169dbf62be79eb309f9d23a06e068cb3bd1ed81 | f251133b39131735576baad2077bc47821e9b99b | refs/heads/main | 2023-04-13T06:54:29.549207 | 2023-04-10T10:41:06 | 2023-04-10T10:41:06 | 95,857,124 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 75,608 | py | # -*- coding: utf-8 -*-
"""Project and solution file writer classes."""
import abc
import re
from vstools import definitions
class FileWriter(object):
"""File writer."""
def __init__(self, encoding='utf-8', end_of_line='\r\n'):
"""Initializes a file writer.
Args:
encoding (str): encoding.
end_of_line (str): end of line.
"""
super(FileWriter, self).__init__()
self._encoding = encoding
self._end_of_line = end_of_line
self._file = None
def Close(self):
"""Closes the project file."""
self._file.close()
def Open(self, filename):
"""Opens the project file.
Args:
filename (str): path of the file.
"""
# Using binary mode to make sure to write Windows/DOS end of lines.
self._file = open(filename, 'wb') # pylint: disable=consider-using-with
def WriteBinaryData(self, data):
"""Writes binary data.
Args:
data (bytes): binary data.
"""
self._file.write(data)
def WriteLine(self, line):
"""Writes a line."""
line = ''.join([line, self._end_of_line])
line = line.encode(self._encoding)
self.WriteBinaryData(line)
def WriteLines(self, lines):
"""Writes lines."""
for line in lines:
self.WriteLine(line)
class VSProjectFileWriter(FileWriter):
"""Visual Studio project file writer."""
def __init__(self, encoding='utf-8', end_of_line='\r\n'):
"""Initializes a Visual Studio project file writer.
Args:
encoding (str): encoding.
end_of_line (str): end of line.
"""
super(VSProjectFileWriter, self).__init__(
encoding=encoding, end_of_line=end_of_line)
@abc.abstractmethod
def WriteFooter(self):
"""Writes a file footer."""
@abc.abstractmethod
def WriteHeader(self):
"""Writes a file header."""
class VS2008ProjectFileWriter(VSProjectFileWriter):
"""Visual Studio 2008 project file writer."""
_CONFIGURATION_OPTIONS = [
('ConfigurationType', 'output_type', False),
('CharacterSet', 'character_set', False),
('ManagedExtensions', 'managed_extensions', True),
('WholeProgramOptimization', 'whole_program_optimization', True),
]
_TOOL_COMPILER_CONFIGURATION_OPTIONS = [
('Optimization', 'optimization', True),
('AdditionalIncludeDirectories', 'include_directories', False),
('PreprocessorDefinitions', 'preprocessor_definitions', False),
('BasicRuntimeChecks', 'basic_runtime_checks', True),
('SmallerTypeCheck', 'smaller_type_check', True),
('RuntimeLibrary', 'runtime_library', False),
('UsePrecompiledHeader', 'precompiled_header', True),
('WarningLevel', 'warning_level', False),
('WarnAsError', 'warning_as_error', True),
('Detect64BitPortabilityProblems',
'detect_64bit_portability_problems', True),
('DebugInformationFormat', 'debug_information_format', True),
('CompileAs', 'compile_as', False),
]
_TOOL_LIBRARIAN_CONFIGURATION_OPTIONS = [
('OutputFile', 'librarian_output_file', False),
('ModuleDefinitionFile', 'librarian_module_definition_file', False),
('IgnoreAllDefaultLibraries', 'librarian_ignore_defaults', False),
]
_TOOL_LINKER_CONFIGURATION_OPTIONS1 = [
# ('AdditionalDependencies', 'additional_dependencies', True),
('OutputFile', 'linker_output_file', True),
('LinkIncremental', 'link_incremental', True),
]
_TOOL_LINKER_CONFIGURATION_OPTIONS2 = [
# ('AdditionalLibraryDirectories', 'library_directories', False),
('GenerateDebugInformation', 'generate_debug_information', True),
('SubSystem', 'sub_system', True),
('OptimizeReferences', 'optimize_references', True),
('EnableCOMDATFolding', 'enable_comdat_folding', True),
('RandomizedBaseAddress', 'randomized_base_address', True),
('DataExecutionPrevention', 'data_execution_prevention', True),
('TargetMachine', 'target_machine', True),
('ImportLibrary', 'import_library', True),
]
def __init__(self):
"""Initializes a Visual Studio project file writer."""
super(VS2008ProjectFileWriter, self).__init__()
self._version = 2008
def _WriteConfiguration(self, project_configuration):
"""Writes the project configuration.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
self.WriteLine('\t\t<Configuration')
self.WriteLine('\t\t\tName="{0:s}|{1:s}"'.format(
project_configuration.name, project_configuration.platform))
self.WriteLines([
'\t\t\tOutputDirectory="$(SolutionDir)$(ConfigurationName)"',
'\t\t\tIntermediateDirectory="$(ConfigurationName)"'])
for definition, name, is_optional in self._CONFIGURATION_OPTIONS:
self._WriteConfigurationOption(
project_configuration, definition, name, is_optional, 3)
self.WriteLine('\t\t\t>')
tools = [
('VCPreBuildEventTool', []),
('VCCustomBuildTool', []),
('VCXMLDataGeneratorTool', []),
('VCWebServiceProxyGeneratorTool', []),
('VCMIDLTool', []),
('VCCLCompilerTool', self._TOOL_COMPILER_CONFIGURATION_OPTIONS),
('VCManagedResourceCompilerTool', []),
('VCResourceCompilerTool', []),
('VCPreLinkEventTool', []),
]
# TODO: add "librarian values set" to project configuration?
if project_configuration.librarian_output_file:
tool = ('VCLibrarianTool', self._TOOL_LIBRARIAN_CONFIGURATION_OPTIONS)
tools.append(tool)
for name, configuration_options in tools:
self._WriteConfigurationTool(
project_configuration, name, configuration_options)
if project_configuration.linker_values_set:
self._WriteConfigurationLinkerTool(project_configuration)
tools = [('VCALinkTool', [])]
if project_configuration.linker_values_set:
tools.append(('VCManifestTool', []))
tools.extend([
('VCXDCMakeTool', []),
('VCBscMakeTool', []),
('VCFxCopTool', [])
])
if project_configuration.linker_values_set:
tools.append(('VCAppVerifierTool', []))
tools.append(('VCPostBuildEventTool', []))
for name, configuration_options in tools:
self._WriteConfigurationTool(
project_configuration, name, configuration_options)
self.WriteLine('\t\t</Configuration>')
def _WriteConfigurationLinkerTool(self, project_configuration):
"""Writes the project configuration linker tool.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
self._WriteConfigurationToolHeader('VCLinkerTool')
if project_configuration.additional_dependencies:
self.WriteLine('\t\t\t\tAdditionalDependencies="{0:s}"'.format(
' '.join(sorted(project_configuration.additional_dependencies))))
for definition, name, is_optional in (
self._TOOL_LINKER_CONFIGURATION_OPTIONS1):
self._WriteConfigurationOption(
project_configuration, definition, name, is_optional, 4)
library_directories = ['"$(OutDir)"']
library_directories.extend(project_configuration.library_directories)
library_directories = ';'.join(library_directories)
self.WriteLine('\t\t\t\tAdditionalLibraryDirectories="{0:s}"'.format(
library_directories))
for definition, name, is_optional in (
self._TOOL_LINKER_CONFIGURATION_OPTIONS2):
self._WriteConfigurationOption(
project_configuration, definition, name, is_optional, 4)
self._WriteConfigurationToolFooter()
def _WriteConfigurationOption(
self, project_configuration, definition, name, is_optional,
indentation_level):
"""Parses a configuration option.
An optional configuration option will not be written when its configuration
value is not set.
Args:
project_configuration (VSProjectConfiguration): project configuration.
definition (str): definition of the configuration value in file.
name (str): name of the configuration value in the project information.
is_optional (bool): True if the configuration option is optional.
indentation_level (int): indentation level.
"""
configuration_value = getattr(project_configuration, name, '')
if name == 'include_directories':
configuration_value = ';'.join(configuration_value)
if not is_optional or configuration_value:
indentation = '\t' * indentation_level
line = '{0:s}{1:s}="{2:s}"'.format(
indentation, definition, configuration_value)
self.WriteLine(line)
def _WriteConfigurationTool(
self, project_configuration, name, configuration_options):
"""Writes a project configuration tool.
Args:
project_configuration (VSProjectConfiguration): project configuration.
name (str): name of the tool.
configuration_options (list[tuple[str, str, bool]]): configuration
options defined as a tuple of definition, name and is optional.
"""
self._WriteConfigurationToolHeader(name)
# pylint: disable=redefined-argument-from-local
for definition, name, is_optional in configuration_options:
self._WriteConfigurationOption(
project_configuration, definition, name, is_optional, 4)
self._WriteConfigurationToolFooter()
def _WriteConfigurationToolFooter(self):
"""Writes the project configuration tool footer."""
self.WriteLine('\t\t\t/>')
def _WriteConfigurationToolHeader(self, name):
"""Writes the project configuration tool header.
Args:
name (str): name of the tool.
"""
self.WriteLines([
'\t\t\t<Tool',
'\t\t\t\tName="{0:s}"'.format(name)])
def _WriteHeaderFiles(self, header_files):
"""Writes the header files.
Args:
header_files (list[str]): header filenames.
"""
self.WriteLines([
'\t\t<Filter',
'\t\t\tName="Header Files"',
'\t\t\tFilter="h;hpp;hxx;hm;inl;inc;xsd"',
'\t\t\tUniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"',
'\t\t\t>'])
for filename in header_files:
self.WriteLine('\t\t\t<File')
self.WriteLine('\t\t\t\tRelativePath="{0:s}"'.format(filename))
self.WriteLines([
'\t\t\t\t>',
'\t\t\t</File>'])
self.WriteLine('\t\t</Filter>')
def _WriteResourceFiles(self, resource_files):
"""Writes the resource files.
Args:
resource_files (list[str]): resource filenames.
"""
self.WriteLines([
'\t\t<Filter',
'\t\t\tName="Resource Files"',
('\t\t\tFilter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;'
'resx;tiff;tif;png;wav"'),
'\t\t\tUniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}"',
'\t\t\t>'])
for filename in resource_files:
self.WriteLine('\t\t\t<File')
self.WriteLine('\t\t\t\tRelativePath="{0:s}"'.format(filename))
self.WriteLines([
'\t\t\t\t>',
'\t\t\t</File>'])
self.WriteLine('\t\t</Filter>')
def _WriteSourceFiles(self, source_files):
"""Writes the source files.
Args:
source_files (list[str]): source filenames.
"""
self.WriteLines([
'\t\t<Filter',
'\t\t\tName="Source Files"',
'\t\t\tFilter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"',
'\t\t\tUniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"',
'\t\t\t>'])
for filename in source_files:
self.WriteLine('\t\t\t<File')
self.WriteLine('\t\t\t\tRelativePath="{0:s}"'.format(filename))
self.WriteLines([
'\t\t\t\t>',
'\t\t\t</File>'])
self.WriteLine('\t\t</Filter>')
def WriteConfigurations(self, project_configurations):
"""Writes the configurations.
Args:
project_configurations (VSConfigurations): configurations.
"""
self.WriteLine('\t<Configurations>')
for project_configuration in project_configurations.GetSorted():
self._WriteConfiguration(project_configuration)
self.WriteLine('\t</Configurations>')
self.WriteLines([
'\t<References>',
'\t</References>'])
# pylint: disable=unused-argument
def WriteDependencies(self, dependencies, solution_projects_by_guid):
"""Writes the dependencies.
Args:
dependencies (list[str]): GUIDs of the dependencies.
solution_projects_by_guid (dict[str, VSSolutionProject]): projects
per lower case GUID.
"""
return
def WriteFiles(self, source_files, header_files, resource_files):
"""Writes the files.
Args:
source_files (list[str]): source filenames.
header_files (list[str]): header filenames.
resource_files (list[str]): resource filenames.
"""
self.WriteLine('\t<Files>')
self._WriteSourceFiles(source_files)
self._WriteHeaderFiles(header_files)
self._WriteResourceFiles(resource_files)
self.WriteLine('\t</Files>')
self.WriteLines([
'\t<Globals>',
'\t</Globals>'])
def WriteFooter(self):
"""Writes a file footer."""
self.WriteLine('</VisualStudioProject>')
def WriteHeader(self):
"""Writes a file header."""
self.WriteLine('<?xml version="1.0" encoding="Windows-1252"?>')
# pylint: disable=unused-argument
def WriteProjectConfigurations(self, project_configurations):
"""Writes the project configurations.
Args:
project_configurations (VSConfigurations): configurations.
"""
return
def WriteProjectInformation(self, project_information):
"""Writes the project information.
Args:
project_information (VSProjectInformation): project information.
"""
self.WriteLines([
'<VisualStudioProject',
'\tProjectType="Visual C++"',
'\tVersion="9,00"'])
self.WriteLine('\tName="{0:s}"'.format(project_information.name))
self.WriteLine('\tProjectGUID="{{{0:s}}}"'.format(
project_information.guid.upper()))
self.WriteLine(
'\tRootNamespace="{0:s}"'.format(project_information.root_name_space))
if project_information.keyword:
self.WriteLine(
'\tKeyword="{0:s}"'.format(project_information.keyword))
# Also seen 196613.
self.WriteLines([
'\tTargetFrameworkVersion="131072"',
'\t>'])
# TODO: handle platforms.
self.WriteLines([
'\t<Platforms>',
'\t\t<Platform',
'\t\t\tName="Win32"',
'\t\t/>',
'\t</Platforms>'])
self.WriteLines([
'\t<ToolFiles>',
'\t</ToolFiles>'])
class VS2010ProjectFileWriter(VSProjectFileWriter):
"""Visual Studio 2010 project file writer."""
def __init__(self):
"""Initializes a Visual Studio project file writer."""
super(VS2010ProjectFileWriter, self).__init__()
self._project_file_version = '10.0.40219.1'
self._tools_version = '4.0'
self._version = 2010
def _WriteClCompileSection(self, project_configuration):
"""Writes the CLCompile section.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
include_directories = ';'.join(project_configuration.include_directories)
include_directories = re.sub(r'"', r'', include_directories)
if include_directories and include_directories[-1] != ';':
include_directories = '{0:s};'.format(
include_directories)
include_directories = '{0:s}%(AdditionalIncludeDirectories)'.format(
include_directories)
preprocessor_definitions = project_configuration.preprocessor_definitions
if preprocessor_definitions and preprocessor_definitions[-1] != ';':
preprocessor_definitions = '{0:s};'.format(preprocessor_definitions)
preprocessor_definitions = '{0:s}%(PreprocessorDefinitions)'.format(
preprocessor_definitions)
self.WriteLine(' <ClCompile>')
if project_configuration.optimization != '':
self.WriteLine(' <Optimization>{0:s}</Optimization>'.format(
project_configuration.optimization_string))
if project_configuration.enable_intrinsic_functions != '':
self.WriteLine((
' <IntrinsicFunctions>{0:s}</IntrinsicFunctions>').format(
project_configuration.enable_intrinsic_functions))
if project_configuration.whole_program_optimization:
self.WriteLine((
' <WholeProgramOptimization>{0:s}'
'</WholeProgramOptimization>').format(
project_configuration.whole_program_optimization_string))
self.WriteLine((
' <AdditionalIncludeDirectories>{0:s}'
'</AdditionalIncludeDirectories>').format(include_directories))
self.WriteLine((
' <PreprocessorDefinitions>{0:s}'
'</PreprocessorDefinitions>').format(preprocessor_definitions))
if project_configuration.basic_runtime_checks != '':
self.WriteLine((
' <BasicRuntimeChecks>{0:s}'
'</BasicRuntimeChecks>').format(
project_configuration.basic_runtime_checks_string))
if project_configuration.smaller_type_check != '':
self.WriteLine((
' <SmallerTypeCheck>{0:s}</SmallerTypeCheck>').format(
project_configuration.smaller_type_check))
self.WriteLine((
' <RuntimeLibrary>{0:s}</RuntimeLibrary>').format(
project_configuration.runtime_librarian_string))
if project_configuration.enable_function_level_linking != '':
self.WriteLine((
' <FunctionLevelLinking>{0:s}</FunctionLevelLinking>').format(
project_configuration.enable_function_level_linking))
if project_configuration.precompiled_header != '':
# A value of 0 is represented by a new line.
if project_configuration.precompiled_header == '0':
self.WriteLines([
' <PrecompiledHeader>',
' </PrecompiledHeader>'])
else:
self.WriteLine((
' <PrecompiledHeader>{0:s}</PrecompiledHeader>').format(
project_configuration.precompiled_header_string))
self.WriteLine(' <WarningLevel>{0:s}</WarningLevel>'.format(
project_configuration.warning_level_string))
if project_configuration.warning_as_error:
self.WriteLine((
' <TreatWarningAsError>{0:s}'
'</TreatWarningAsError>').format(
project_configuration.warning_as_error))
if project_configuration.debug_information_format != '':
# A value of 0 is represented by a new line.
if project_configuration.debug_information_format == '0':
self.WriteLines([
' <DebugInformationFormat>',
' </DebugInformationFormat>'])
else:
self.WriteLine((
' <DebugInformationFormat>{0:s}'
'</DebugInformationFormat>').format(
project_configuration.debug_information_format_string))
if project_configuration.compile_as:
self.WriteLine(' <CompileAs>{0:s}</CompileAs>'.format(
project_configuration.compile_as_string))
self.WriteLine(' </ClCompile>')
def _WriteConfigurationPropertyGroup(self, project_configuration):
"""Writes the configuration property group.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
self._WriteConfigurationPropertyGroupHeader(project_configuration)
self.WriteLine(' <ConfigurationType>{0:s}</ConfigurationType>'.format(
project_configuration.output_type_string))
if project_configuration.character_set:
self.WriteLine(' <CharacterSet>{0:s}</CharacterSet>'.format(
project_configuration.character_set_string))
if project_configuration.managed_extensions == '1':
self.WriteLine(' <CLRSupport>true</CLRSupport>')
if project_configuration.whole_program_optimization:
self.WriteLine((
' <WholeProgramOptimization>{0:s}'
'</WholeProgramOptimization>').format(
project_configuration.whole_program_optimization_string))
platform_toolset = project_configuration.GetPlatformToolset(self._version)
if platform_toolset:
self.WriteLine(' <PlatformToolset>{0:s}</PlatformToolset>'.format(
platform_toolset))
self._WriteConfigurationPropertyGroupFooter()
def _WriteConfigurationPropertyGroupFooter(self):
"""Writes the configuration property group footer."""
self.WriteLine(' </PropertyGroup>')
def _WriteConfigurationPropertyGroupHeader(self, project_configuration):
"""Writes the configuration property group header.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
self.WriteLine((
' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'=='
'\'{0:s}|{1:s}\'" Label="Configuration">').format(
project_configuration.name, project_configuration.platform))
def _WriteHeaderFiles(self, header_files):
"""Writes the header files.
Args:
header_files (list[str]): header filenames.
"""
if header_files:
self.WriteLine(' <ItemGroup>')
for filename in header_files:
self.WriteLine(' <ClInclude Include="{0:s}" />'.format(filename))
self.WriteLine(' </ItemGroup>')
def _WriteItemDefinitionGroup(self, project_configuration):
"""Writes the item definition group.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
self._WriteItemDefinitionGroupHeader(project_configuration)
# Write the compiler specific section.
self._WriteClCompileSection(project_configuration)
# Write the librarian specific section.
if project_configuration.librarian_output_file:
self._WriteLibrarianSection(project_configuration)
# Write the linker specific section.
if (project_configuration.linker_values_set or
project_configuration.output_type == (
definitions.OUTPUT_TYPE_APPLICATION)):
self._WriteLinkerSection(project_configuration)
self._WriteItemDefinitionGroupFooter()
def _WriteItemDefinitionGroupFooter(self):
"""Writes the item definition group header."""
self.WriteLine(' </ItemDefinitionGroup>')
def _WriteItemDefinitionGroupHeader(self, project_configuration):
"""Writes the item definition group header.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
self.WriteLine((
' <ItemDefinitionGroup Condition="\'$(Configuration)|'
'$(Platform)\'==\'{0:s}|{1:s}\'">').format(
project_configuration.name, project_configuration.platform))
def _WriteLibrarianSection(self, project_configuration):
"""Writes the librarian section.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
librarian_output_file = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)',
project_configuration.librarian_output_file)
self.WriteLines([
' <Lib>',
' <OutputFile>{0:s}</OutputFile>'.format(librarian_output_file)])
if project_configuration.module_definition_file != '':
self.WriteLine((
' <ModuleDefinitionFile>{0:s}'
'</ModuleDefinitionFile>').format(
project_configuration.module_definition_file))
else:
self.WriteLines([
' <ModuleDefinitionFile>',
' </ModuleDefinitionFile>'])
if project_configuration.librarian_ignore_defaults != '':
self.WriteLine((
' <IgnoreAllDefaultLibraries>{0:s}'
'</IgnoreAllDefaultLibraries>').format(
project_configuration.librarian_ignore_defaults))
self.WriteLine(' </Lib>')
def _WriteLinkerSection(self, project_configuration):
"""Writes the linker section.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
self.WriteLine(' <Link>')
# Visual Studio will convert an empty additional dependencies value.
if project_configuration.additional_dependencies:
additional_dependencies = ';'.join(
sorted(project_configuration.additional_dependencies))
additional_dependencies = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)', additional_dependencies)
if additional_dependencies and additional_dependencies[-1] != ';':
additional_dependencies = '{0:s};'.format(additional_dependencies)
additional_dependencies = '{0:s}%(AdditionalDependencies)'.format(
additional_dependencies)
self.WriteLine((
' <AdditionalDependencies>{0:s}'
'</AdditionalDependencies>').format(
additional_dependencies))
if project_configuration.linker_output_file:
linker_output_file = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)',
project_configuration.linker_output_file)
self.WriteLine(' <OutputFile>{0:s}</OutputFile>'.format(
linker_output_file))
if project_configuration.module_definition_file != '':
self.WriteLine((
' <ModuleDefinitionFile>{0:s}'
'</ModuleDefinitionFile>').format(
project_configuration.module_definition_file))
if project_configuration.library_directories:
library_directories = ';'.join(project_configuration.library_directories)
library_directories = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)', library_directories)
library_directories = re.sub(r'"', r'', library_directories)
if library_directories and library_directories[-1] != ';':
library_directories = '{0:s};'.format(library_directories)
library_directories = '{0:s}%(AdditionalLibraryDirectories)'.format(
library_directories)
self.WriteLine((
' <AdditionalLibraryDirectories>{0:s}'
'</AdditionalLibraryDirectories>').format(
library_directories))
if project_configuration.generate_debug_information != '':
self.WriteLine((
' <GenerateDebugInformation>{0:s}'
'</GenerateDebugInformation>').format(
project_configuration.generate_debug_information))
if project_configuration.sub_system != '':
self.WriteLine(' <SubSystem>{0:s}</SubSystem>'.format(
project_configuration.sub_system_string))
if project_configuration.optimize_references == '0':
self.WriteLines([
' <OptimizeReferences>',
' </OptimizeReferences>'])
elif project_configuration.optimize_references != '':
self.WriteLine((
' <OptimizeReferences>{0:s}</OptimizeReferences>').format(
project_configuration.optimize_references_string))
if project_configuration.enable_comdat_folding == '0':
self.WriteLines([
' <EnableCOMDATFolding>',
' </EnableCOMDATFolding>'])
elif project_configuration.enable_comdat_folding != '':
self.WriteLine((
' <EnableCOMDATFolding>{0:s}</EnableCOMDATFolding>').format(
project_configuration.enable_comdat_folding_string))
if project_configuration.randomized_base_address != '':
self.WriteLine((
' <RandomizedBaseAddress>{0:s}'
'</RandomizedBaseAddress>').format(
project_configuration.randomized_base_address_string))
if project_configuration.fixed_base_address == '0':
self.WriteLines([
' <FixedBaseAddress>',
' </FixedBaseAddress>'])
if project_configuration.data_execution_prevention != '':
# A value of 0 is represented by a new line.
if project_configuration.data_execution_prevention == '0':
self.WriteLines([
' <DataExecutionPrevention>',
' </DataExecutionPrevention>'])
else:
self.WriteLine((
' <DataExecutionPrevention>{0:s}'
'</DataExecutionPrevention>').format(
project_configuration.data_execution_prevention_string))
if project_configuration.import_library:
import_library = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)',
project_configuration.import_library)
self.WriteLine(' <ImportLibrary>{0:s}</ImportLibrary>'.format(
import_library))
if project_configuration.target_machine != '':
self.WriteLine(' <TargetMachine>{0:s}</TargetMachine>'.format(
project_configuration.target_machine_string))
self.WriteLine(' </Link>')
def _WriteOutIntDirConditions(
self, configuration_name, project_configurations):
"""Writes the OutDir and IntDir conditions.
Args:
configuration_name (str): name of the configuration.
project_configurations (VSConfigurations): configurations.
"""
for configuration_platform in sorted(project_configurations.platforms):
project_configuration = project_configurations.GetByIdentifier(
configuration_name, configuration_platform)
if len(project_configurations.platforms) == 1:
self.WriteLine((
' <OutDir Condition="\'$(Configuration)|$(Platform)\'=='
'\'{0:s}|{1:s}\'">$(SolutionDir)$(Configuration)\\'
'</OutDir>').format(
project_configuration.name, project_configuration.platform))
else:
self.WriteLine((
' <OutDir Condition="\'$(Configuration)|$(Platform)\'=='
'\'{0:s}|{1:s}\'">$(SolutionDir)$(Configuration)\\$(Platform)\\'
'</OutDir>').format(
project_configuration.name, project_configuration.platform))
for configuration_platform in sorted(project_configurations.platforms):
project_configuration = project_configurations.GetByIdentifier(
configuration_name, configuration_platform)
if len(project_configurations.platforms) == 1:
self.WriteLine((
' <IntDir Condition="\'$(Configuration)|$(Platform)\'=='
'\'{0:s}|{1:s}\'">$(Configuration)\\</IntDir>').format(
project_configuration.name, project_configuration.platform))
else:
self.WriteLine((
' <IntDir Condition="\'$(Configuration)|$(Platform)\'=='
'\'{0:s}|{1:s}\'">$(Configuration)\\$(Platform)\\</IntDir>').format(
project_configuration.name, project_configuration.platform))
def _WriteOutIntDirPropertyGroups(self, project_configurations):
"""Writes the OutDir and IntDir property groups.
Args:
project_configurations (VSConfigurations): configurations.
"""
self.WriteLines([
' <PropertyGroup>',
' <_ProjectFileVersion>{0:s}</_ProjectFileVersion>'.format(
self._project_file_version)])
# Mimic Visual Studio behavior and output the configurations
# in platforms by name.
for configuration_name in sorted(project_configurations.names):
self._WriteOutIntDirConditions(configuration_name, project_configurations)
for configuration_platform in sorted(project_configurations.platforms):
project_configuration = project_configurations.GetByIdentifier(
configuration_name, configuration_platform)
if project_configuration.link_incremental != '':
self.WriteLine((
' <LinkIncremental Condition="\'$(Configuration)|'
'$(Platform)\'==\'{0:s}|{1:s}\'">{2:s}</LinkIncremental>').format(
project_configuration.name, project_configuration.platform,
project_configuration.link_incremental_string))
self.WriteLine(' </PropertyGroup>')
def _WriteResourceFiles(self, resource_files):
"""Writes the resource files.
Args:
resource_files (list[str]): resource filenames.
"""
if resource_files:
self.WriteLine(' <ItemGroup>')
for filename in resource_files:
self.WriteLine(' <ResourceCompile Include="{0:s}" />'.format(
filename))
self.WriteLine(' </ItemGroup>')
def _WriteSourceFiles(self, source_files):
"""Writes the source files.
Args:
source_files (list[str]): source filenames.
"""
if source_files:
self.WriteLine(' <ItemGroup>')
for filename in source_files:
self.WriteLine(' <ClCompile Include="{0:s}" />'.format(filename))
self.WriteLine(' </ItemGroup>')
def WriteConfigurations(self, project_configurations):
"""Writes the configurations.
Args:
project_configurations (VSConfigurations): configurations.
"""
self.WriteLine(
' <Import Project="$(VCTargetsPath)\\Microsoft.Cpp.Default.props" />')
# Mimic Visual Studio behavior and output the configurations
# in reverse order of name.
for project_configuration in project_configurations.GetSorted(reverse=True):
self._WriteConfigurationPropertyGroup(project_configuration)
self.WriteLines([
' <Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props" />',
' <ImportGroup Label="ExtensionSettings">',
' </ImportGroup>'])
# Mimic Visual Studio behavior and output the configurations
# in reverse of name.
for project_configuration in project_configurations.GetSorted(reverse=True):
self.WriteLines([
(' <ImportGroup Condition="\'$(Configuration)|$(Platform)\'=='
'\'{0:s}|{1:s}\'" Label="PropertySheets">'.format(
project_configuration.name, project_configuration.platform)),
(' <Import Project="$(UserRootDir)\\Microsoft.Cpp.$(Platform)'
'.user.props" Condition="exists(\'$(UserRootDir)\\Microsoft.Cpp'
'.$(Platform).user.props\')" Label="LocalAppDataPlatform" />'),
' </ImportGroup>'])
self.WriteLine(' <PropertyGroup Label="UserMacros" />')
self._WriteOutIntDirPropertyGroups(project_configurations)
for project_configuration in project_configurations.GetSorted():
self._WriteItemDefinitionGroup(project_configuration)
def WriteDependencies(self, dependencies, solution_projects_by_guid):
"""Writes the dependencies.
Args:
dependencies (list[str]): GUIDs of the dependencies.
solution_projects_by_guid (dict[str, VSSolutionProject]): projects
per lower case GUID.
"""
if dependencies:
self.WriteLine(' <ItemGroup>')
dependencies_by_name = {}
# Mimic Visual Studio behavior and output the dependencies in order
# of name (perhaps filename?).
for dependency_guid in dependencies:
dependency_project = solution_projects_by_guid[dependency_guid]
dependencies_by_name[dependency_project.name] = dependency_project
for dependency_name in sorted(dependencies_by_name):
dependency_project = dependencies_by_name[dependency_name]
dependency_filename = '..\\{0:s}.vcxproj'.format(
dependency_project.filename)
dependency_guid = dependency_project.guid.lower()
self.WriteLines([
(' <ProjectReference Include="{0:s}">').format(
dependency_filename),
' <Project>{{{0:s}}}</Project>'.format(dependency_guid),
' <ReferenceOutputAssembly>false</ReferenceOutputAssembly>',
' </ProjectReference>'])
self.WriteLine(' </ItemGroup>')
def WriteFiles(self, source_files, header_files, resource_files):
"""Writes the files.
Args:
source_files (list[str]): source filenames.
header_files (list[str]): header filenames.
resource_files (list[str]): resource filenames.
"""
self._WriteSourceFiles(source_files)
self._WriteHeaderFiles(header_files)
self._WriteResourceFiles(resource_files)
def WriteFooter(self):
"""Writes a file footer."""
self.WriteLines([
' <Import Project="$(VCTargetsPath)\\Microsoft.Cpp.targets" />',
' <ImportGroup Label="ExtensionTargets">',
' </ImportGroup>'])
# The last line has no \r\n.
self._file.write(b'</Project>')
def WriteHeader(self):
"""Writes a file header."""
self._file.write(b'\xef\xbb\xbf')
self.WriteLines([
'<?xml version="1.0" encoding="utf-8"?>',
('<Project DefaultTargets="Build" ToolsVersion="{0:s}" '
'xmlns="http://schemas.microsoft.com/developer/msbuild/2003">').format(
self._tools_version)])
def WriteProjectConfigurations(self, project_configurations):
"""Writes the project configurations.
Args:
project_configurations (VSConfigurations): configurations.
"""
self.WriteLine(' <ItemGroup Label="ProjectConfigurations">')
for project_configuration in project_configurations.GetSorted():
self.WriteLine(' <ProjectConfiguration Include="{0:s}|{1:s}">'.format(
project_configuration.name, project_configuration.platform))
self.WriteLine(' <Configuration>{0:s}</Configuration>'.format(
project_configuration.name))
self.WriteLine(' <Platform>{0:s}</Platform>'.format(
project_configuration.platform))
self.WriteLine(' </ProjectConfiguration>')
self.WriteLine(' </ItemGroup>')
def WriteProjectInformation(self, project_information):
"""Writes the project information.
Args:
project_information (VSProjectInformation): project information.
"""
self.WriteLine(' <PropertyGroup Label="Globals">')
self.WriteLine(' <ProjectGuid>{{{0:s}}}</ProjectGuid>'.format(
project_information.guid))
self.WriteLine(' <RootNamespace>{0:s}</RootNamespace>'.format(
project_information.root_name_space))
if project_information.keyword:
self.WriteLine(' <Keyword>{0:s}</Keyword>'.format(
project_information.keyword))
self.WriteLine(' </PropertyGroup>')
class VS2012ProjectFileWriter(VS2010ProjectFileWriter):
"""Visual Studio 2012 project file writer."""
def __init__(self):
"""Initializes a Visual Studio project file writer."""
super(VS2012ProjectFileWriter, self).__init__()
self._project_file_version = '11.0.61030.0'
self._tools_version = '4.0'
self._version = 2012
def _WriteClCompileSection(self, project_configuration):
"""Writes the CLCompile section.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
include_directories = ';'.join(project_configuration.include_directories)
include_directories = re.sub(r'"', r'', include_directories)
if include_directories and include_directories[-1] != ';':
include_directories = '{0:s};'.format(
include_directories)
include_directories = '{0:s}%(AdditionalIncludeDirectories)'.format(
include_directories)
preprocessor_definitions = project_configuration.preprocessor_definitions
if preprocessor_definitions and preprocessor_definitions[-1] != ';':
preprocessor_definitions = '{0:s};'.format(preprocessor_definitions)
preprocessor_definitions = '{0:s}%(PreprocessorDefinitions)'.format(
preprocessor_definitions)
self.WriteLine(' <ClCompile>')
if project_configuration.optimization != '':
self.WriteLine(' <Optimization>{0:s}</Optimization>'.format(
project_configuration.optimization_string))
if project_configuration.enable_intrinsic_functions != '':
self.WriteLine((
' <IntrinsicFunctions>{0:s}</IntrinsicFunctions>').format(
project_configuration.enable_intrinsic_functions))
self.WriteLine((
' <AdditionalIncludeDirectories>{0:s}'
'</AdditionalIncludeDirectories>').format(include_directories))
self.WriteLine((
' <PreprocessorDefinitions>{0:s}'
'</PreprocessorDefinitions>').format(preprocessor_definitions))
if project_configuration.basic_runtime_checks != '':
self.WriteLine((
' <BasicRuntimeChecks>{0:s}'
'</BasicRuntimeChecks>').format(
project_configuration.basic_runtime_checks_string))
if project_configuration.smaller_type_check != '':
self.WriteLine((
' <SmallerTypeCheck>{0:s}</SmallerTypeCheck>').format(
project_configuration.smaller_type_check))
self.WriteLine((
' <RuntimeLibrary>{0:s}</RuntimeLibrary>').format(
project_configuration.runtime_librarian_string))
if project_configuration.enable_function_level_linking != '':
self.WriteLine((
' <FunctionLevelLinking>{0:s}</FunctionLevelLinking>').format(
project_configuration.enable_function_level_linking))
if project_configuration.precompiled_header != '':
# A value of 0 is represented by an empty XML tag.
if project_configuration.precompiled_header == '0':
self.WriteLine(' <PrecompiledHeader />')
else:
self.WriteLine((
' <PrecompiledHeader>{0:s}</PrecompiledHeader>').format(
project_configuration.precompiled_header_string))
self.WriteLine(' <WarningLevel>{0:s}</WarningLevel>'.format(
project_configuration.warning_level_string))
if project_configuration.warning_as_error:
self.WriteLine((
' <TreatWarningAsError>{0:s}'
'</TreatWarningAsError>').format(
project_configuration.warning_as_error))
if project_configuration.debug_information_format != '':
# A value of 0 is represented by an empty XML tag.
if project_configuration.debug_information_format == '0':
self.WriteLine(' <DebugInformationFormat />')
else:
self.WriteLine((
' <DebugInformationFormat>{0:s}'
'</DebugInformationFormat>').format(
project_configuration.debug_information_format_string))
if project_configuration.compile_as:
self.WriteLine(' <CompileAs>{0:s}</CompileAs>'.format(
project_configuration.compile_as_string))
self.WriteLine(' </ClCompile>')
def _WriteConfigurationPropertyGroup(self, project_configuration):
"""Writes the configuration property group.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
self._WriteConfigurationPropertyGroupHeader(project_configuration)
self.WriteLine(' <ConfigurationType>{0:s}</ConfigurationType>'.format(
project_configuration.output_type_string))
platform_toolset = project_configuration.GetPlatformToolset(self._version)
if platform_toolset:
self.WriteLine(' <PlatformToolset>{0:s}</PlatformToolset>'.format(
platform_toolset))
if project_configuration.character_set:
self.WriteLine(' <CharacterSet>{0:s}</CharacterSet>'.format(
project_configuration.character_set_string))
if project_configuration.managed_extensions == '1':
self.WriteLine(' <CLRSupport>true</CLRSupport>')
if project_configuration.whole_program_optimization:
self.WriteLine((
' <WholeProgramOptimization>{0:s}'
'</WholeProgramOptimization>').format(
project_configuration.whole_program_optimization_string))
self._WriteConfigurationPropertyGroupFooter()
def _WriteItemDefinitionGroup(self, project_configuration):
"""Writes the item definition group.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
self._WriteItemDefinitionGroupHeader(project_configuration)
# Write the compiler specific section.
self._WriteClCompileSection(project_configuration)
# Write the librarian specific section.
if project_configuration.librarian_output_file:
self._WriteLibrarianSection(project_configuration)
# Write the linker specific section.
if (project_configuration.linker_values_set or
project_configuration.output_type == (
definitions.OUTPUT_TYPE_APPLICATION)):
self._WriteLinkerSection(project_configuration)
self._WriteItemDefinitionGroupFooter()
def _WriteLibrarianSection(self, project_configuration):
"""Writes the librarian section.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
librarian_output_file = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)',
project_configuration.librarian_output_file)
self.WriteLines([
' <Lib>',
' <OutputFile>{0:s}</OutputFile>'.format(librarian_output_file)])
if project_configuration.module_definition_file != '':
self.WriteLine((
' <ModuleDefinitionFile>{0:s}'
'</ModuleDefinitionFile>').format(
project_configuration.module_definition_file))
else:
self.WriteLine(' <ModuleDefinitionFile />')
if project_configuration.librarian_ignore_defaults != '':
self.WriteLine((
' <IgnoreAllDefaultLibraries>{0:s}'
'</IgnoreAllDefaultLibraries>').format(
project_configuration.librarian_ignore_defaults))
self.WriteLine(' </Lib>')
def _WriteLinkerSection(self, project_configuration):
"""Writes the linker section.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
self.WriteLine(' <Link>')
# Visual Studio will convert an empty additional dependencies value.
if project_configuration.additional_dependencies:
additional_dependencies = ';'.join(
sorted(project_configuration.additional_dependencies))
additional_dependencies = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)', additional_dependencies)
if additional_dependencies and additional_dependencies[-1] != ';':
additional_dependencies = '{0:s};'.format(additional_dependencies)
additional_dependencies = (
'{0:s}%(AdditionalDependencies)').format(
additional_dependencies)
self.WriteLine((
' <AdditionalDependencies>{0:s}'
'</AdditionalDependencies>').format(
additional_dependencies))
if project_configuration.linker_output_file:
linker_output_file = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)',
project_configuration.linker_output_file)
self.WriteLine(' <OutputFile>{0:s}</OutputFile>'.format(
linker_output_file))
if project_configuration.module_definition_file != '':
self.WriteLine((
' <ModuleDefinitionFile>{0:s}'
'</ModuleDefinitionFile>').format(
project_configuration.module_definition_file))
if project_configuration.library_directories:
library_directories = ';'.join(project_configuration.library_directories)
library_directories = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)', library_directories)
library_directories = re.sub(r'"', r'', library_directories)
if library_directories and library_directories[-1] != ';':
library_directories = '{0:s};'.format(library_directories)
library_directories = (
'{0:s}%(AdditionalLibraryDirectories)').format(
library_directories)
self.WriteLine((
' <AdditionalLibraryDirectories>{0:s}'
'</AdditionalLibraryDirectories>').format(
library_directories))
if project_configuration.generate_debug_information != '':
self.WriteLine((
' <GenerateDebugInformation>{0:s}'
'</GenerateDebugInformation>').format(
project_configuration.generate_debug_information))
if project_configuration.sub_system != '':
self.WriteLine(' <SubSystem>{0:s}</SubSystem>'.format(
project_configuration.sub_system_string))
if project_configuration.optimize_references == '0':
self.WriteLine(' <OptimizeReferences />')
elif project_configuration.optimize_references != '':
self.WriteLine((
' <OptimizeReferences>{0:s}</OptimizeReferences>').format(
project_configuration.optimize_references_string))
if project_configuration.enable_comdat_folding == '0':
self.WriteLine(' <EnableCOMDATFolding />')
elif project_configuration.enable_comdat_folding != '':
self.WriteLine((
' <EnableCOMDATFolding>{0:s}</EnableCOMDATFolding>').format(
project_configuration.enable_comdat_folding_string))
if project_configuration.randomized_base_address != '':
self.WriteLine((
' <RandomizedBaseAddress>{0:s}'
'</RandomizedBaseAddress>').format(
project_configuration.randomized_base_address_string))
if project_configuration.fixed_base_address == '0':
# A value of 0 is represented by an empty XML tag.
self.WriteLine(' <FixedBaseAddress />')
if project_configuration.data_execution_prevention != '':
# A value of 0 is represented by an empty XML tag.
if project_configuration.data_execution_prevention == '0':
self.WriteLine(' <DataExecutionPrevention />')
else:
self.WriteLine((
' <DataExecutionPrevention>{0:s}'
'</DataExecutionPrevention>').format(
project_configuration.data_execution_prevention_string))
if (project_configuration.target_machine != '' and
project_configuration.linker_values_set):
self.WriteLine(' <TargetMachine>{0:s}</TargetMachine>'.format(
project_configuration.target_machine_string))
if project_configuration.import_library:
import_library = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)',
project_configuration.import_library)
self.WriteLine(' <ImportLibrary>{0:s}</ImportLibrary>'.format(
import_library))
self.WriteLine(' </Link>')
def _WriteOutIntDirConditions(
self, configuration_name, project_configurations):
"""Writes the OutDir and IntDir conditions.
Args:
configuration_name (str): name of the configuration.
project_configurations (VSConfigurations): configurations.
"""
for configuration_platform in sorted(project_configurations.platforms):
project_configuration = project_configurations.GetByIdentifier(
configuration_name, configuration_platform)
if len(project_configurations.platforms) == 1:
self.WriteLines([
(' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'=='
'\'{0:s}|{1:s}\'">').format(
project_configuration.name, project_configuration.platform),
' <OutDir>$(SolutionDir)$(Configuration)\\</OutDir>',
' <IntDir>$(Configuration)\\</IntDir>'])
else:
self.WriteLines([
(' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'=='
'\'{0:s}|{1:s}\'">').format(
project_configuration.name, project_configuration.platform),
(' <OutDir>$(SolutionDir)$(Configuration)\\$(Platform)\\'
'</OutDir>'),
' <IntDir>$(Configuration)\\$(Platform)\\</IntDir>'])
if project_configuration.linker_values_set:
self.WriteLine(' <LinkIncremental>false</LinkIncremental>')
self.WriteLine(' </PropertyGroup>')
def _WriteOutIntDirPropertyGroups(self, project_configurations):
"""Writes the OutDir and IntDir property groups.
Args:
project_configurations (VSConfigurations): configurations.
"""
self.WriteLines([
' <PropertyGroup>',
' <_ProjectFileVersion>{0:s}</_ProjectFileVersion>'.format(
self._project_file_version),
' </PropertyGroup>'])
# Mimic Visual Studio behavior and output the configurations
# in platforms by name.
for configuration_name in sorted(project_configurations.names):
self._WriteOutIntDirConditions(configuration_name, project_configurations)
# for configuration_platform in sorted(project_configurations.platforms):
# project_configuration = project_configurations.GetByIdentifier(
# configuration_name, configuration_platform)
# if project_configuration.link_incremental != '':
# self.WriteLine((
# ' <LinkIncremental Condition="\'$(Configuration)|'
# '$(Platform)\'==\'{0:s}|{1:s}\'">{2:s}'
# '</LinkIncremental>').format(
# project_configuration.name, project_configuration.platform,
# project_configuration.link_incremental_string))
class VS2013ProjectFileWriter(VS2010ProjectFileWriter):
"""Visual Studio 2013 project file writer."""
def __init__(self):
"""Initializes a Visual Studio project file writer."""
super(VS2013ProjectFileWriter, self).__init__()
self._project_file_version = '12.0.21005.1'
self._tools_version = '12.0'
self._version = 2013
class VS2015ProjectFileWriter(VS2012ProjectFileWriter):
"""Visual Studio 2015 project file writer."""
def __init__(self):
"""Initializes a Visual Studio project file writer."""
super(VS2015ProjectFileWriter, self).__init__()
self._project_file_version = '14.0.25431.1'
self._tools_version = '14.0'
self._version = 2015
def _WriteOutIntDirConditions(
self, configuration_name, project_configurations):
"""Writes the OutDir and IntDir conditions.
Args:
configuration_name (str): name of the configuration.
project_configurations (VSConfigurations): configurations.
"""
for configuration_platform in sorted(project_configurations.platforms):
project_configuration = project_configurations.GetByIdentifier(
configuration_name, configuration_platform)
if len(project_configurations.platforms) == 1:
self.WriteLines([
(' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'=='
'\'{0:s}|{1:s}\'">').format(
project_configuration.name, project_configuration.platform),
' <OutDir>$(SolutionDir)$(Configuration)\\</OutDir>',
' <IntDir>$(Configuration)\\</IntDir>'])
else:
self.WriteLines([
(' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'=='
'\'{0:s}|{1:s}\'">').format(
project_configuration.name, project_configuration.platform),
(' <OutDir>$(SolutionDir)$(Configuration)\\$(Platform)\\'
'</OutDir>'),
' <IntDir>$(Configuration)\\$(Platform)\\</IntDir>'])
self.WriteLine(' </PropertyGroup>')
class VS2017ProjectFileWriter(VS2012ProjectFileWriter):
"""Visual Studio 2017 project file writer."""
def __init__(self):
"""Initializes a Visual Studio project file writer."""
super(VS2017ProjectFileWriter, self).__init__()
self._project_file_version = '15.0.26730.3'
self._tools_version = '15.0'
self._version = 2017
def _WriteItemDefinitionGroup(self, project_configuration):
"""Writes the item definition group.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
self._WriteItemDefinitionGroupHeader(project_configuration)
# Write the compiler specific section.
self._WriteClCompileSection(project_configuration)
# Write the librarian specific section.
if project_configuration.librarian_output_file:
self._WriteLibrarianSection(project_configuration)
# Write the linker specific section.
if (project_configuration.linker_values_set or
project_configuration.output_type == (
definitions.OUTPUT_TYPE_APPLICATION)):
self._WriteLinkerSection(project_configuration)
self._WriteItemDefinitionGroupFooter()
def _WriteLinkerSection(self, project_configuration):
"""Writes the linker section.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
self.WriteLine(' <Link>')
# Visual Studio will convert an empty additional dependencies value.
if project_configuration.additional_dependencies:
additional_dependencies = ';'.join(
sorted(project_configuration.additional_dependencies))
additional_dependencies = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)', additional_dependencies)
if additional_dependencies and additional_dependencies[-1] != ';':
additional_dependencies = '{0:s};'.format(additional_dependencies)
additional_dependencies = '{0:s}%(AdditionalDependencies)'.format(
additional_dependencies)
self.WriteLine((
' <AdditionalDependencies>{0:s}'
'</AdditionalDependencies>').format(
additional_dependencies))
if project_configuration.linker_output_file:
linker_output_file = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)',
project_configuration.linker_output_file)
self.WriteLine(' <OutputFile>{0:s}</OutputFile>'.format(
linker_output_file))
if project_configuration.module_definition_file != '':
self.WriteLine((
' <ModuleDefinitionFile>{0:s}'
'</ModuleDefinitionFile>').format(
project_configuration.module_definition_file))
if project_configuration.library_directories:
library_directories = ';'.join(project_configuration.library_directories)
library_directories = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)', library_directories)
library_directories = re.sub(r'"', r'', library_directories)
if library_directories and library_directories[-1] != ';':
library_directories = '{0:s};'.format(library_directories)
library_directories = '{0:s}%(AdditionalLibraryDirectories)'.format(
library_directories)
self.WriteLine((
' <AdditionalLibraryDirectories>{0:s}'
'</AdditionalLibraryDirectories>').format(
library_directories))
if project_configuration.generate_debug_information != '':
self.WriteLine((
' <GenerateDebugInformation>{0:s}'
'</GenerateDebugInformation>').format(
project_configuration.generate_debug_information))
if project_configuration.sub_system != '':
self.WriteLine(' <SubSystem>{0:s}</SubSystem>'.format(
project_configuration.sub_system_string))
if project_configuration.optimize_references == '0':
self.WriteLines([
' <OptimizeReferences>',
' </OptimizeReferences>'])
elif project_configuration.optimize_references != '':
self.WriteLine((
' <OptimizeReferences>{0:s}</OptimizeReferences>').format(
project_configuration.optimize_references_string))
if project_configuration.enable_comdat_folding == '0':
self.WriteLines([
' <EnableCOMDATFolding>',
' </EnableCOMDATFolding>'])
elif project_configuration.enable_comdat_folding != '':
self.WriteLine((
' <EnableCOMDATFolding>{0:s}</EnableCOMDATFolding>').format(
project_configuration.enable_comdat_folding_string))
if project_configuration.randomized_base_address != '':
self.WriteLine((
' <RandomizedBaseAddress>{0:s}'
'</RandomizedBaseAddress>').format(
project_configuration.randomized_base_address_string))
if project_configuration.fixed_base_address == '0':
self.WriteLines([
' <FixedBaseAddress>',
' </FixedBaseAddress>'])
if project_configuration.data_execution_prevention != '':
# A value of 0 is represented by a new line.
if project_configuration.data_execution_prevention == '0':
self.WriteLines([
' <DataExecutionPrevention>',
' </DataExecutionPrevention>'])
else:
self.WriteLine((
' <DataExecutionPrevention>{0:s}'
'</DataExecutionPrevention>').format(
project_configuration.data_execution_prevention_string))
if project_configuration.import_library:
import_library = re.sub(
r'[$][(]OutDir[)]\\', r'$(OutDir)',
project_configuration.import_library)
self.WriteLine(' <ImportLibrary>{0:s}</ImportLibrary>'.format(
import_library))
if project_configuration.target_machine != '':
self.WriteLine(' <TargetMachine>{0:s}</TargetMachine>'.format(
project_configuration.target_machine_string))
if project_configuration.output_type != definitions.OUTPUT_TYPE_APPLICATION:
self.WriteLine(
' <ImportLibrary>$(OutDir)$(ProjectName).lib</ImportLibrary>')
self.WriteLine(' </Link>')
def _WriteOutIntDirConditions(
self, configuration_name, project_configurations):
"""Writes the OutDir and IntDir conditions.
Args:
configuration_name (str): name of the configuration.
project_configurations (VSConfigurations): configurations.
"""
for configuration_platform in sorted(project_configurations.platforms):
project_configuration = project_configurations.GetByIdentifier(
configuration_name, configuration_platform)
if len(project_configurations.platforms) == 1:
self.WriteLines([
(' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'=='
'\'{0:s}|{1:s}\'">').format(
project_configuration.name, project_configuration.platform),
' <OutDir>$(SolutionDir)$(Configuration)\\</OutDir>',
' <IntDir>$(Configuration)\\</IntDir>'])
else:
self.WriteLines([
(' <PropertyGroup Condition="\'$(Configuration)|$(Platform)\'=='
'\'{0:s}|{1:s}\'">').format(
project_configuration.name, project_configuration.platform),
(' <OutDir>$(SolutionDir)$(Configuration)\\$(Platform)\\'
'</OutDir>'),
' <IntDir>$(Configuration)\\$(Platform)\\</IntDir>'])
if project_configuration.output_type == (
definitions.OUTPUT_TYPE_APPLICATION):
self.WriteLine(' <LinkIncremental>false</LinkIncremental>')
self.WriteLine(' </PropertyGroup>')
def WriteHeader(self):
"""Writes a file header."""
self.WriteLines([
'<?xml version="1.0" encoding="utf-8"?>',
('<Project DefaultTargets="Build" ToolsVersion="{0:s}" '
'xmlns="http://schemas.microsoft.com/developer/msbuild/2003">').format(
self._tools_version)])
class VS2019ProjectFileWriter(VS2017ProjectFileWriter):
"""Visual Studio 2019 project file writer."""
def __init__(self):
"""Initializes a Visual Studio project file writer."""
super(VS2019ProjectFileWriter, self).__init__()
self._project_file_version = '16.0.33423.256'
self._tools_version = '15.0'
self._version = 2019
class VS2022ProjectFileWriter(VS2017ProjectFileWriter):
"""Visual Studio 2022 project file writer."""
def __init__(self):
"""Initializes a Visual Studio project file writer."""
super(VS2022ProjectFileWriter, self).__init__()
self._project_file_version = '17.0.33516.290'
self._tools_version = 'Current'
self._version = 2022
def _WriteConfigurationPropertyGroup(self, project_configuration):
"""Writes the configuration property group.
Args:
project_configuration (VSProjectConfiguration): project configuration.
"""
self._WriteConfigurationPropertyGroupHeader(project_configuration)
self.WriteLine(' <ConfigurationType>{0:s}</ConfigurationType>'.format(
project_configuration.output_type_string))
self.WriteLine(' <PlatformToolset>v143</PlatformToolset>')
if project_configuration.character_set:
self.WriteLine(' <CharacterSet>{0:s}</CharacterSet>'.format(
project_configuration.character_set_string))
if project_configuration.managed_extensions == '1':
self.WriteLine(' <CLRSupport>true</CLRSupport>')
if project_configuration.whole_program_optimization:
self.WriteLine((
' <WholeProgramOptimization>{0:s}'
'</WholeProgramOptimization>').format(
project_configuration.whole_program_optimization_string))
platform_toolset = project_configuration.GetPlatformToolset(self._version)
if platform_toolset:
self.WriteLine(' <PlatformToolset>{0:s}</PlatformToolset>'.format(
platform_toolset))
self._WriteConfigurationPropertyGroupFooter()
def WriteProjectInformation(self, project_information):
"""Writes the project information.
Args:
project_information (VSProjectInformation): project information.
"""
self.WriteLine(' <PropertyGroup Label="Globals">')
self.WriteLine(' <VCProjectVersion>17.0</VCProjectVersion>')
self.WriteLine(' <ProjectGuid>{{{0:s}}}</ProjectGuid>'.format(
project_information.guid))
self.WriteLine(' <RootNamespace>{0:s}</RootNamespace>'.format(
project_information.root_name_space))
if project_information.keyword:
self.WriteLine(' <Keyword>{0:s}</Keyword>'.format(
project_information.keyword))
self.WriteLine(' </PropertyGroup>')
class VSSolutionFileWriter(FileWriter):
"""Visual Studio solution file writer."""
def _WriteProjectConfigurationPlatforms(
self, solution_configurations, solution_projects):
"""Writes the project configuration platforms.
Args:
solution_configurations (VSConfigurations): configurations.
solution_projects (list[VSSolutionProject]): projects.
"""
if solution_configurations.number_of_configurations > 0:
self.WriteLine(
'\tGlobalSection(ProjectConfigurationPlatforms) = postSolution')
for configuration_platform in sorted(solution_configurations.platforms):
for solution_project in solution_projects:
for configuration_name in sorted(solution_configurations.names):
configuration = solution_configurations.GetByIdentifier(
configuration_name, configuration_platform)
self.WriteLine((
'\t\t{{{0:s}}}.{1:s}|{2:s}.ActiveCfg = {1:s}|{2:s}').format(
solution_project.guid.upper(), configuration.name,
configuration.platform))
self.WriteLine((
'\t\t{{{0:s}}}.{1:s}|{2:s}.Build.0 = {1:s}|{2:s}').format(
solution_project.guid.upper(), configuration.name,
configuration.platform))
self.WriteLine('\tEndGlobalSection')
# pylint: disable=unused-argument
def _WriteSolutionConfigurationPlatforms(
self, solution_configurations, solution_projects):
"""Writes the solution configuration platforms.
Args:
solution_configurations (VSConfigurations): configurations.
solution_projects (list[VSSolutionProject]): projects.
"""
if solution_configurations.number_of_configurations > 0:
self.WriteLine(
'\tGlobalSection(SolutionConfigurationPlatforms) = preSolution')
for configuration_platform in sorted(solution_configurations.platforms):
for configuration_name in sorted(solution_configurations.names):
configuration = solution_configurations.GetByIdentifier(
configuration_name, configuration_platform)
self.WriteLine('\t\t{0:s}|{1:s} = {0:s}|{1:s}'.format(
configuration.name, configuration.platform))
self.WriteLine('\tEndGlobalSection')
def _WriteSolutionProperties(self):
"""Writes the solution properties."""
self.WriteLines([
'\tGlobalSection(SolutionProperties) = preSolution',
'\t\tHideSolutionNode = FALSE',
'\tEndGlobalSection'])
@abc.abstractmethod
def WriteHeader(self):
"""Writes a file header."""
@abc.abstractmethod
def WriteProject(self, solution_project):
"""Writes a project section.
Args:
solution_project (VSSolutionProject): project.
"""
def WriteProjects(self, solution_projects):
"""Writes the projects.
Args:
solution_projects (list[VSSolutionProject]): projects.
"""
for solution_project in solution_projects:
self.WriteProject(solution_project)
class VS2008SolutionFileWriter(VSSolutionFileWriter):
"""Visual Studio 2008 solution file writer."""
def WriteConfigurations(self, solution_configurations, solution_projects):
"""Writes the configurations.
Args:
solution_configurations (VSConfigurations): configurations.
solution_projects (list[VSSolutionProject]): projects.
"""
self.WriteLine('Global')
self._WriteSolutionConfigurationPlatforms(
solution_configurations, solution_projects)
self._WriteProjectConfigurationPlatforms(
solution_configurations, solution_projects)
self._WriteSolutionProperties()
self.WriteLine('EndGlobal')
def WriteHeader(self):
"""Writes a file header."""
self.WriteBinaryData(b'\xef\xbb\xbf\r\n')
self.WriteLines([
'Microsoft Visual Studio Solution File, Format Version 10.00',
'# Visual C++ Express 2008'])
def WriteProject(self, solution_project):
"""Writes a project section.
Args:
solution_project (VSSolutionProject): project.
"""
solution_project_filename = '{0:s}.vcproj'.format(
solution_project.filename)
self.WriteLine((
'Project("{{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}}") = "{0:s}", '
'"{1:s}", "{{{2:s}}}"').format(
solution_project.name, solution_project_filename,
solution_project.guid.upper()))
if solution_project.dependencies:
self.WriteLine(
'\tProjectSection(ProjectDependencies) = postProject')
for dependency_guid in solution_project.dependencies:
self.WriteLine('\t\t{{{0:s}}} = {{{0:s}}}'.format(
dependency_guid.upper()))
self.WriteLine('\tEndProjectSection')
self.WriteLine('EndProject')
class VS2010SolutionFileWriter(VSSolutionFileWriter):
"""Visual Studio 2010 solution file writer."""
def WriteConfigurations(self, solution_configurations, solution_projects):
"""Writes the configurations.
Args:
solution_configurations (VSConfigurations): configurations.
solution_projects (list[VSSolutionProject]): projects.
"""
self.WriteLine('Global')
self._WriteSolutionConfigurationPlatforms(
solution_configurations, solution_projects)
self._WriteProjectConfigurationPlatforms(
solution_configurations, solution_projects)
self._WriteSolutionProperties()
self.WriteLine('EndGlobal')
def WriteHeader(self):
"""Writes a file header."""
self.WriteBinaryData(b'\xef\xbb\xbf\r\n')
self.WriteLines([
'Microsoft Visual Studio Solution File, Format Version 11.00',
'# Visual C++ Express 2010'])
def WriteProject(self, solution_project):
"""Writes a project section.
Args:
solution_project (VSSolutionProject): project.
"""
solution_project_filename = '{0:s}.vcxproj'.format(
solution_project.filename)
self.WriteLine((
'Project("{{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}}") = "{0:s}", '
'"{1:s}", "{{{2:s}}}"').format(
solution_project.name, solution_project_filename,
solution_project.guid.upper()))
self.WriteLine('EndProject')
class VS2012SolutionFileWriter(VS2010SolutionFileWriter):
"""Visual Studio 2012 solution file writer."""
def WriteHeader(self):
"""Writes a file header."""
self.WriteBinaryData(b'\xef\xbb\xbf\r\n')
self.WriteLines([
'Microsoft Visual Studio Solution File, Format Version 12.00',
'# Visual Studio Express 2012 for Windows Desktop'])
def WriteProject(self, solution_project):
"""Writes a project section.
Args:
solution_project (VSSolutionProject): project.
"""
solution_project_filename = '{0:s}.vcxproj'.format(
solution_project.filename)
self.WriteLine((
'Project("{{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}}") = "{0:s}", '
'"{1:s}", "{{{2:s}}}"').format(
solution_project.name, solution_project_filename,
solution_project.guid.upper()))
# TODO: what about:
# '\tProjectSection(ProjectDependencies) = postProject'
# '\t\t{%GUID%} = {%GUID}'
# '\tEndProjectSection'
self.WriteLine('EndProject')
class VS2013SolutionFileWriter(VS2010SolutionFileWriter):
"""Visual Studio 2013 solution file writer."""
def WriteHeader(self):
"""Writes a file header."""
self.WriteBinaryData(b'\xef\xbb\xbf\r\n')
self.WriteLines([
'Microsoft Visual Studio Solution File, Format Version 12.00',
'# Visual Studio Express 2013 for Windows Desktop',
'VisualStudioVersion = 12.0.21005.1',
'MinimumVisualStudioVersion = 10.0.40219.1'])
class VS2015SolutionFileWriter(VS2010SolutionFileWriter):
"""Visual Studio 2015 solution file writer."""
def WriteHeader(self):
"""Writes a file header."""
self.WriteBinaryData(b'\xef\xbb\xbf\r\n')
self.WriteLines([
'Microsoft Visual Studio Solution File, Format Version 12.00',
'# Visual Studio 14',
'VisualStudioVersion = 14.0.25420.1',
'MinimumVisualStudioVersion = 10.0.40219.1'])
class VS2017SolutionFileWriter(VS2010SolutionFileWriter):
"""Visual Studio 2017 solution file writer."""
def _WriteExtensibilityGlobals(self):
"""Writes the extensibility globals."""
# TODO: determine if GUID is unique.
self.WriteLines([
'\tGlobalSection(ExtensibilityGlobals) = postSolution',
'\t\tSolutionGuid = {E41FC29C-7FE6-4F98-85AD-1ED968E86446}',
'\tEndGlobalSection'])
def WriteConfigurations(self, solution_configurations, solution_projects):
"""Writes the configurations.
Args:
solution_configurations (VSConfigurations): configurations.
solution_projects (list[VSSolutionProject]): projects.
"""
self.WriteLine('Global')
self._WriteSolutionConfigurationPlatforms(
solution_configurations, solution_projects)
self._WriteProjectConfigurationPlatforms(
solution_configurations, solution_projects)
self._WriteSolutionProperties()
# self._WriteExtensibilityGlobals()
self.WriteLine('EndGlobal')
def WriteHeader(self):
"""Writes a file header."""
self.WriteBinaryData(b'\xef\xbb\xbf\r\n')
self.WriteLines([
'Microsoft Visual Studio Solution File, Format Version 12.00',
'# Visual Studio 15',
'VisualStudioVersion = 15.0.26730.10',
'MinimumVisualStudioVersion = 10.0.40219.1'])
class VS2019SolutionFileWriter(VS2017SolutionFileWriter):
"""Visual Studio 2019 solution file writer."""
def WriteHeader(self):
"""Writes a file header."""
self.WriteBinaryData(b'\xef\xbb\xbf\r\n')
self.WriteLines([
'Microsoft Visual Studio Solution File, Format Version 12.00',
'# Visual Studio Version 16',
'VisualStudioVersion = 16.0.33423.256',
'MinimumVisualStudioVersion = 10.0.40219.1'])
class VS2022SolutionFileWriter(VS2017SolutionFileWriter):
"""Visual Studio 2022 solution file writer."""
def WriteHeader(self):
"""Writes a file header."""
self.WriteBinaryData(b'\xef\xbb\xbf\r\n')
self.WriteLines([
'Microsoft Visual Studio Solution File, Format Version 12.00',
'# Visual Studio Version 17',
'VisualStudioVersion = 17.5.33516.290',
'MinimumVisualStudioVersion = 10.0.40219.1'])
| [
"[email protected]"
] | |
5b98146395ad29c6511925bbc47a3402f1251fa2 | 1e168ced1a4bdb53967021e082b98027aea9d38a | /1.알고리즘정리/정렬/삽입정렬.py | 6e0f94afc79ed7d33b51a468d14c6182e85e3d68 | [] | no_license | vvspearlvvs/CodingTest | 3ebf921308570ac11eb87e6660048ccfcaf90ce4 | fc61b71d955f73ef8710f792d008bc671614ef7a | refs/heads/main | 2023-07-13T15:57:11.312519 | 2021-08-25T02:15:28 | 2021-08-25T02:15:28 | 354,232,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | #삽입정렬
arr = [7, 5, 9, 0, 3, 1, 6, 2, 4, 8]
for i in range(len(arr)):
for j in range(i,0,-1):
if arr[j]<arr[j-1]: #한칸씩 왼쪽으로 이동
arr[j],arr[j-1]=arr[j-1],arr[j]
else:
break
print(arr)
print("최종")
print(arr)
| [
"[email protected]"
] | |
8b0bcb3eb0687fab864e824994d9b70939870f5d | 5bcee9248d0bdebb134c61b4d0a3f3113337a569 | /lesson_0902/01_lists.py | 816ff09874e0073dca2b2f3d1f0fd9d842bcbb7b | [] | no_license | 100ballovby/6V_Lesson | c2edbc652ea2ebec07eeed60060c16ae4b4792e4 | 4b6dfda323a628558bd63bd5569960004fc335dd | refs/heads/master | 2023-05-08T07:49:14.569854 | 2021-05-25T06:40:53 | 2021-05-25T06:40:53 | 330,888,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | '''
Список - упорядоченная структура данных, заключенная в квадратные
скобочки. Элементы разделены между собой запятой.
Чтобы создать список, необходимо придумать ему имя, поставить знак принадлежности (=)
и открыть квадратные скобки.
список = [1, 26, 15, 5.6, 'привет, Андрей']
'''
cars = ['audi', 'mercedes', 'toyota', 'skoda', 'seat']
# хочу вывести весь список
print(cars)
# хочу вывести из списка тойоту
print(cars[2])
print(cars[-1]) # вывести последний элемент списка
import random # модуль рандом создает случайности
print('My first car was', cars[random.randint(0, 4)])
# randint(a, b) - выдать случайное число (random int)
# в диапазоне от a до b
print(random.randint(-100, 100))
| [
"[email protected]"
] | |
b6683e488f292d0548f63346115c9b555ac19d7a | b7c1e5d140c3c41e86f206047145f7f296fed53a | /Textbook/Chapter 5/pandasSeriesVsDataFrame.py | e8417f1cc0a8b2c5317aff757d4ee250887236df | [
"MIT"
] | permissive | jlcatonjr/Learn-Python-for-Stats-and-Econ | c2fbe29b324e70ceb832beafdd42d0accb37d9f9 | 194671592937562e08c92e0ef5f4793d4911701c | refs/heads/master | 2023-05-11T17:17:05.934290 | 2023-05-10T20:12:10 | 2023-05-10T20:12:10 | 148,912,065 | 22 | 21 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | #pandasSeriesVsDataFrame.py
import numpy as np
import pandas as pd
dataDict = {"range":np.arange(10)}
dataSeries = pd.Series(dataDict)
print(dataSeries)
print(dataSeries["range"])
dataDF=pd.DataFrame(dataDict)
print(dataDF)
print(dataDF["range"])
print(dataDF["range"][5:9])
#print(dataDF.loc[5:9]) | [
"[email protected]"
] | |
7baa26a26fc7ed616e1f4cfa37d283d39e72ebf3 | bbdd7f44884844cd0f7332d63945852dc2b53083 | /mypy_drf_plugin/transformers/fields.py | f4f8a10b2f9cc833f0b0e6cedc3fe13340f2fdf9 | [
"MIT"
] | permissive | private-forks/djangorestframework-stubs | e258e1dfc2af80fdf93322338ea3ce5452087e2d | 18427718c913f3d23ef7a4636c8205df42999cf2 | refs/heads/master | 2020-04-25T09:11:04.067894 | 2019-02-24T22:25:03 | 2019-02-24T22:25:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | from mypy.nodes import TypeInfo, Var
from mypy.plugin import FunctionContext
from mypy.types import AnyType, Instance, Type, TypeOfAny
from mypy_django_plugin import helpers
def get_private_descriptor_type(type_info: TypeInfo, private_field_name: str, is_nullable: bool) -> Type:
if not type_info.has_readable_member(private_field_name):
return AnyType(TypeOfAny.unannotated)
node = type_info.get(private_field_name).node
if isinstance(node, Var):
descriptor_type = node.type
if is_nullable:
descriptor_type = helpers.make_optional(descriptor_type)
return descriptor_type
return AnyType(TypeOfAny.unannotated)
def fill_parameters_of_descriptor_methods_from_private_attributes(ctx: FunctionContext) -> Type:
default_return_type = ctx.default_return_type
if not isinstance(default_return_type, Instance):
return default_return_type
is_nullable = bool(helpers.parse_bool(helpers.get_argument_by_name(ctx, 'allow_null')))
get_type = get_private_descriptor_type(default_return_type.type, '_pyi_private_get_type',
is_nullable=is_nullable)
return helpers.reparametrize_instance(default_return_type, [get_type])
| [
"[email protected]"
] | |
e02299e147fabe086c8864cff41d59b0059baa48 | 4da0c8906c9cd671e3a4bee3a6ee801a353e3d9a | /Water/Water/urls.py | 8ce00454b8099894f86046e7d4be2dfd650f7cf9 | [] | no_license | avpakh/GVK | 2a5a699caa8a986a3fd0dadbe2160fc9da5bf193 | ac8b8d8ad5cd5ef8485e98cd532a29cd420e0cae | refs/heads/master | 2020-06-13T10:35:36.663668 | 2017-01-06T09:01:42 | 2017-01-06T09:01:42 | 75,392,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | """Water URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from watres import urls as watres_url
from watstat import urls as watstat_url
from watres import views
from django.conf.urls.static import static
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$',views.index_view),
url(r'^watres/',include(watres_url)),
url(r'^watstat/',include(watstat_url)),
]
if settings.DEBUG:
if settings.MEDIA_ROOT:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
urlpatterns += staticfiles_urlpatterns()
| [
"[email protected]"
] | |
64acd726fc80f2bd6451b0e36ae4cde1f625e944 | 8c2de4da068ba3ed3ce1adf0a113877385b7783c | /hyperion/torch/trainers/xvector_trainer.py | 190b2a30b1c2f28d38d0c6999040ce4ae6a76f9f | [
"Apache-2.0"
] | permissive | hyperion-ml/hyperion | a024c718c4552ba3a03aae2c2ca1b8674eaebc76 | c4c9eee0acab1ba572843373245da12d00dfffaa | refs/heads/master | 2023-08-28T22:28:37.624139 | 2022-03-25T16:28:08 | 2022-03-25T16:28:08 | 175,275,679 | 55 | 20 | Apache-2.0 | 2023-09-13T15:35:46 | 2019-03-12T18:40:19 | Python | UTF-8 | Python | false | false | 5,015 | py | """
Copyright 2019 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import os
from collections import OrderedDict as ODict
import logging
import torch
import torch.nn as nn
from ..utils import MetricAcc
from .torch_trainer import TorchTrainer
class XVectorTrainer(TorchTrainer):
"""Trainer to train x-vector style models.
Attributes:
model: x-Vector model object.
optim: pytorch optimizer object or options dict
epochs: max. number of epochs
exp_path: experiment output path
cur_epoch: current epoch
grad_acc_steps: gradient accumulation steps to simulate larger batch size.
device: cpu/gpu device
metrics: extra metrics to compute besides cxe.
lrsched: learning rate scheduler object or options dict
loggers: LoggerList object, loggers write training progress to std. output and file.
If None, it uses default loggers.
ddp: if True use distributed data parallel training
ddp_type: type of distributed data parallel in (ddp, oss_ddp, oss_shared_ddp)
loss: if None, it uses cross-entropy
train_mode: training mode in ['train', 'ft-full', 'ft-last-layer']
use_amp: uses mixed precision training.
log_interval: number of optim. steps between log outputs
use_tensorboard: use tensorboard logger
use_wandb: use wandb logger
wandb: wandb dictionary of options
grad_clip: norm to clip gradients, if 0 there is no clipping
grad_clip_norm: norm type to clip gradients
swa_start: epoch to start doing swa
swa_lr: SWA learning rate
swa_anneal_epochs: SWA learning rate anneal epochs
cpu_offload: CPU offload of gradients when using fully sharded ddp
"""
def __init__(
self,
model,
optim={},
epochs=100,
exp_path="./train",
cur_epoch=0,
grad_acc_steps=1,
device=None,
metrics=None,
lrsched=None,
loggers=None,
ddp=False,
ddp_type="ddp",
loss=None,
train_mode="train",
use_amp=False,
log_interval=10,
use_tensorboard=False,
use_wandb=False,
wandb={},
grad_clip=0,
grad_clip_norm=2,
swa_start=0,
swa_lr=1e-3,
swa_anneal_epochs=10,
cpu_offload=False,
):
if loss is None:
loss = nn.CrossEntropyLoss()
super().__init__(
model,
loss,
optim,
epochs,
exp_path,
cur_epoch=cur_epoch,
grad_acc_steps=grad_acc_steps,
device=device,
metrics=metrics,
lrsched=lrsched,
loggers=loggers,
ddp=ddp,
ddp_type=ddp_type,
train_mode=train_mode,
use_amp=use_amp,
log_interval=log_interval,
use_tensorboard=use_tensorboard,
use_wandb=use_wandb,
wandb=wandb,
grad_clip=grad_clip,
grad_clip_norm=grad_clip_norm,
swa_start=swa_start,
swa_lr=swa_lr,
swa_anneal_epochs=swa_anneal_epochs,
cpu_offload=cpu_offload,
)
def train_epoch(self, data_loader):
"""Training epoch loop
Args:
data_loader: pytorch data loader returning features and class labels.
"""
self.model.update_loss_margin(self.cur_epoch)
metric_acc = MetricAcc(device=self.device)
batch_metrics = ODict()
self.set_train_mode()
for batch, (data, target) in enumerate(data_loader):
self.loggers.on_batch_begin(batch)
if batch % self.grad_acc_steps == 0:
self.optimizer.zero_grad()
data, target = data.to(self.device), target.to(self.device)
batch_size = data.shape[0]
with self.amp_autocast():
output = self.model(data, target, **self.amp_args)
loss = self.loss(output, target).mean() / self.grad_acc_steps
if self.use_amp:
self.grad_scaler.scale(loss).backward()
else:
loss.backward()
if (batch + 1) % self.grad_acc_steps == 0:
if self.lr_scheduler is not None and not self.in_swa:
self.lr_scheduler.on_opt_step()
self.update_model()
batch_metrics["loss"] = loss.item() * self.grad_acc_steps
for k, metric in self.metrics.items():
batch_metrics[k] = metric(output, target)
metric_acc.update(batch_metrics, batch_size)
logs = metric_acc.metrics
logs["lr"] = self._get_lr()
self.loggers.on_batch_end(logs=logs, batch_size=batch_size)
logs = metric_acc.metrics
logs = ODict(("train_" + k, v) for k, v in logs.items())
logs["lr"] = self._get_lr()
return logs
| [
"[email protected]"
] | |
f352ec7987f6f9addb4cc8a333cc19463e602697 | 5332fef91e044555e605bb37cbef7c4afeaaadb0 | /hy-data-analysis-with-python-2020/part02-e06_file_count/test/test_file_count.py | c7d3f00f44cd8f760c403784983ad6ec08d26a70 | [] | no_license | nopomi/hy-data-analysis-python-2019 | f3baa96bbe9b6ee7f0b3e6f6b8b0f3adfc3b6cc8 | 464685cb377cfdeee890a008fbfbd9ed6e3bcfd0 | refs/heads/master | 2021-07-10T16:16:56.592448 | 2020-08-16T18:27:38 | 2020-08-16T18:27:38 | 185,044,621 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,560 | py | #!/usr/bin/env python3
import sys
import unittest
from unittest.mock import patch
from itertools import repeat
from tmc import points
from tmc.utils import load, get_out
module_name="src.file_count"
file_count = load(module_name, "file_count")
main = load(module_name, "main")
class FileCount(unittest.TestCase):
@points('p02-06.1')
def test_first(self):
l, w, c = file_count("src/test.txt")
self.assertEqual(l, 8, msg="Wrong number of lines for file 'test.txt'!")
self.assertEqual(w, 105, msg="Wrong number of words for file 'test.txt'!")
self.assertEqual(c, 647, msg="Wrong number of characters for file 'test.txt'!")
@points('p02-06.1')
def test_calls(self):
with patch('builtins.open', side_effect=open) as o:
file_count("src/test.txt")
o.assert_called_once()
@points('p02-06.2')
def test_main(self):
orig_argv = sys.argv
n = 7
sys.argv[1:] = ["file%i" % i for i in range(n)]
with patch('src.file_count.file_count', side_effect=repeat((0,0,0))) as fc:
main()
self.assertEqual(fc.call_count, n,
msg="Wrong number of calls to function 'file_count' for %i command line parameters!" % n)
result = get_out().split('\n')
for i, line in enumerate(result):
self.assertEqual(line.strip(), "0\t0\t0\tfile%i" % i,
msg="Wrong result on line %i!" % i)
sys.argv = orig_argv
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
0340fad6844580f9a0ff3797769971efcc2f644a | 52a4d869976a97498bdf56a8d0ff92cac138a136 | /Bioinformatics Textbook Track/Chapter 1/rosalind_ba1d.py | 4e6d4b0953bb2d76fa147c0368a4f8c3ded360aa | [] | no_license | aakibinesar/Rosalind | d726369a787d848cc378976b886189978a60a3a5 | 375bbdbfb16bf11b2f980701bbd0ba74a1605cdb | refs/heads/master | 2022-08-18T09:36:00.941080 | 2020-05-24T18:49:38 | 2020-05-24T18:49:38 | 264,722,651 | 0 | 0 | null | 2020-05-17T17:51:03 | 2020-05-17T17:40:59 | null | UTF-8 | Python | false | false | 747 | py | def occurrences(genome, sub):
"""
:param genome: genome for processing
:param sub: pattern for which we find indexes of occurnces
:return: list of indexes
"""
start = 0
indexes = []
while True:
start = genome.find(sub, start)
if start > 0:
indexes.append(start)
else:
break
start += 1
return indexes
def read_data_from(file_name):
with open(file_name, "r") as file:
pattern = file.readline().strip()
genome = file.readline().strip()
return genome, pattern
if __name__ == "__main__":
genome, pattern = read_data_from("rosalind_ba1d.txt")
indexes = occurrences(genome, pattern)
for ind in indexes:
print ind, | [
"[email protected]"
] | |
f1c755702c61d3a4c3f5e88391da6a3096250b2f | 5399dd4580ea3f528753bc8b52a981743d62f8bb | /keras/keras36_hist3_wine.py | 6844fef8e2c4a5ad39b62167985de24abdf45314 | [] | no_license | iwillbeaprogramer/Study | 3ac7c118ffe3981d78b4ad263cb62432eae13970 | 3bfe571da5bbfc545b994e5878e217f9306bde14 | refs/heads/main | 2023-05-07T16:31:05.564973 | 2021-05-27T14:50:00 | 2021-05-27T14:50:00 | 324,044,441 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler,OneHotEncoder
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.callbacks import EarlyStopping
import matplotlib.pyplot as plt
early_stopping = EarlyStopping(monitor='loss',patience=10)
datasets = load_wine()
x = datasets.data
y = datasets.target
encoder = OneHotEncoder()
y = encoder.fit_transform(y.reshape(-1,1)).toarray()
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2)
x_train,x_val,y_train,y_val = train_test_split(x_train,y_train,test_size=0.2)
scaler = MinMaxScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.fit_transform(x_test)
x_val = scaler.fit_transform(x_val)
model = Sequential()
model.add(Dense(128,activation='relu',input_dim=13))
model.add(Dense(64,activation='relu'))
model.add(Dense(32,activation='relu'))
model.add(Dense(16,activation='relu'))
model.add(Dense(8,activation='relu'))
model.add(Dense(3,activation='softmax'))
model.compile(loss = 'categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
hist = model.fit(x_train,y_train,validation_data=(x_val,y_val),epochs=300,batch_size=4)
loss = model.evaluate(x_test,y_test,batch_size=4)
y_pred = model.predict(x_test)
print('loss : ',loss[0],'\naccuracy : ',loss[1])
'''
DNN
loss : 3.391478821868077e-05
accuracy : 1.0
'''
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title('loss & acc')
plt.ylabel('loss, acc')
plt.xlabel('epochs')
plt.legend(['train_loss','val_loss','train_acc','val_acc'])
plt.show() | [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.