blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
03d4807bf6ae79a977ee60b6b4de35c94aeb6e7f | 88a5dae03f0304d3fb7add71855d2ddc6d8e28e3 | /main/ext/__init__.py | 362e9cace53732e41d9341d5e951472eba630fbc | [
"Apache-2.0"
] | permissive | huangpd/Shape | eabb59781ac6a055f7b7036fef926023cbcd4882 | fddbbb765e353584752066f7c839293ebd10c4df | refs/heads/master | 2020-03-26T13:04:22.224367 | 2018-05-10T09:06:10 | 2018-05-10T09:06:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | #-*-coding:utf-8-*-
from flask_bcrypt import Bcrypt
bcrypt = Bcrypt()
from flask_bootstrap import Bootstrap
bootstrap = Bootstrap()
from flask_mail import Mail
mail=Mail()
from flask_login import LoginManager
login_manager = LoginManager()
login_manager.login_view="auth.login_index"
login_manager.session_protection="strong"
login_manager.login_message="登录以获得更多功能"
login_manager.login_message_category="info"
| [
"[email protected]"
] | |
8426f5e2a7f3115533abb324288bc031ba59ff53 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/guestconfiguration/outputs.py | b1d2bbd2207b1aaffbc05852618b9e218ea32400 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,362 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = [
'AssignmentInfoResponse',
'AssignmentReportResourceComplianceReasonResponse',
'AssignmentReportResourceResponse',
'AssignmentReportResponse',
'ConfigurationInfoResponse',
'ConfigurationParameterResponse',
'ConfigurationSettingResponse',
'GuestConfigurationAssignmentPropertiesResponse',
'GuestConfigurationNavigationResponse',
'VMInfoResponse',
]
@pulumi.output_type
class AssignmentInfoResponse(dict):
"""
Information about the guest configuration assignment.
"""
def __init__(__self__, *,
name: str,
configuration: Optional['outputs.ConfigurationInfoResponse'] = None):
"""
Information about the guest configuration assignment.
:param str name: Name of the guest configuration assignment.
:param 'ConfigurationInfoResponseArgs' configuration: Information about the configuration.
"""
pulumi.set(__self__, "name", name)
if configuration is not None:
pulumi.set(__self__, "configuration", configuration)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the guest configuration assignment.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def configuration(self) -> Optional['outputs.ConfigurationInfoResponse']:
"""
Information about the configuration.
"""
return pulumi.get(self, "configuration")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AssignmentReportResourceComplianceReasonResponse(dict):
"""
Reason and code for the compliance of the guest configuration assignment resource.
"""
def __init__(__self__, *,
code: str,
phrase: str):
"""
Reason and code for the compliance of the guest configuration assignment resource.
:param str code: Code for the compliance of the guest configuration assignment resource.
:param str phrase: Reason for the compliance of the guest configuration assignment resource.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "phrase", phrase)
@property
@pulumi.getter
def code(self) -> str:
"""
Code for the compliance of the guest configuration assignment resource.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def phrase(self) -> str:
"""
Reason for the compliance of the guest configuration assignment resource.
"""
return pulumi.get(self, "phrase")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AssignmentReportResourceResponse(dict):
"""
The guest configuration assignment resource.
"""
def __init__(__self__, *,
compliance_status: str,
properties: Any,
resource_id: str,
reasons: Optional[Sequence['outputs.AssignmentReportResourceComplianceReasonResponse']] = None):
"""
The guest configuration assignment resource.
:param str compliance_status: A value indicating compliance status of the machine for the assigned guest configuration.
:param Any properties: Properties of a guest configuration assignment resource.
:param str resource_id: Name of the guest configuration assignment resource setting.
:param Sequence['AssignmentReportResourceComplianceReasonResponseArgs'] reasons: Compliance reason and reason code for a resource.
"""
pulumi.set(__self__, "compliance_status", compliance_status)
pulumi.set(__self__, "properties", properties)
pulumi.set(__self__, "resource_id", resource_id)
if reasons is not None:
pulumi.set(__self__, "reasons", reasons)
@property
@pulumi.getter(name="complianceStatus")
def compliance_status(self) -> str:
"""
A value indicating compliance status of the machine for the assigned guest configuration.
"""
return pulumi.get(self, "compliance_status")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Properties of a guest configuration assignment resource.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> str:
"""
Name of the guest configuration assignment resource setting.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter
def reasons(self) -> Optional[Sequence['outputs.AssignmentReportResourceComplianceReasonResponse']]:
"""
Compliance reason and reason code for a resource.
"""
return pulumi.get(self, "reasons")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AssignmentReportResponse(dict):
def __init__(__self__, *,
compliance_status: str,
end_time: str,
id: str,
operation_type: str,
report_id: str,
start_time: str,
assignment: Optional['outputs.AssignmentInfoResponse'] = None,
resources: Optional[Sequence['outputs.AssignmentReportResourceResponse']] = None,
vm: Optional['outputs.VMInfoResponse'] = None):
"""
:param str compliance_status: A value indicating compliance status of the machine for the assigned guest configuration.
:param str end_time: End date and time of the guest configuration assignment compliance status check.
:param str id: ARM resource id of the report for the guest configuration assignment.
:param str operation_type: Type of report, Consistency or Initial
:param str report_id: GUID that identifies the guest configuration assignment report under a subscription, resource group.
:param str start_time: Start date and time of the guest configuration assignment compliance status check.
:param 'AssignmentInfoResponseArgs' assignment: Configuration details of the guest configuration assignment.
:param Sequence['AssignmentReportResourceResponseArgs'] resources: The list of resources for which guest configuration assignment compliance is checked.
:param 'VMInfoResponseArgs' vm: Information about the VM.
"""
pulumi.set(__self__, "compliance_status", compliance_status)
pulumi.set(__self__, "end_time", end_time)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "operation_type", operation_type)
pulumi.set(__self__, "report_id", report_id)
pulumi.set(__self__, "start_time", start_time)
if assignment is not None:
pulumi.set(__self__, "assignment", assignment)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if vm is not None:
pulumi.set(__self__, "vm", vm)
@property
@pulumi.getter(name="complianceStatus")
def compliance_status(self) -> str:
"""
A value indicating compliance status of the machine for the assigned guest configuration.
"""
return pulumi.get(self, "compliance_status")
@property
@pulumi.getter(name="endTime")
def end_time(self) -> str:
"""
End date and time of the guest configuration assignment compliance status check.
"""
return pulumi.get(self, "end_time")
@property
@pulumi.getter
def id(self) -> str:
"""
ARM resource id of the report for the guest configuration assignment.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="operationType")
def operation_type(self) -> str:
"""
Type of report, Consistency or Initial
"""
return pulumi.get(self, "operation_type")
@property
@pulumi.getter(name="reportId")
def report_id(self) -> str:
"""
GUID that identifies the guest configuration assignment report under a subscription, resource group.
"""
return pulumi.get(self, "report_id")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> str:
"""
Start date and time of the guest configuration assignment compliance status check.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def assignment(self) -> Optional['outputs.AssignmentInfoResponse']:
"""
Configuration details of the guest configuration assignment.
"""
return pulumi.get(self, "assignment")
@property
@pulumi.getter
def resources(self) -> Optional[Sequence['outputs.AssignmentReportResourceResponse']]:
"""
The list of resources for which guest configuration assignment compliance is checked.
"""
return pulumi.get(self, "resources")
@property
@pulumi.getter
def vm(self) -> Optional['outputs.VMInfoResponse']:
"""
Information about the VM.
"""
return pulumi.get(self, "vm")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConfigurationInfoResponse(dict):
"""
Information about the configuration.
"""
def __init__(__self__, *,
name: str,
version: str):
"""
Information about the configuration.
:param str name: Name of the configuration.
:param str version: Version of the configuration.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the configuration.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def version(self) -> str:
"""
Version of the configuration.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConfigurationParameterResponse(dict):
"""
Represents a configuration parameter.
"""
def __init__(__self__, *,
name: Optional[str] = None,
value: Optional[str] = None):
"""
Represents a configuration parameter.
:param str name: Name of the configuration parameter.
:param str value: Value of the configuration parameter.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the configuration parameter.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Value of the configuration parameter.
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConfigurationSettingResponse(dict):
"""
Configuration setting of LCM (Local Configuration Manager).
"""
def __init__(__self__, *,
action_after_reboot: Optional[str] = None,
allow_module_overwrite: Optional[str] = None,
configuration_mode: Optional[str] = None,
configuration_mode_frequency_mins: Optional[float] = None,
reboot_if_needed: Optional[str] = None,
refresh_frequency_mins: Optional[float] = None):
"""
Configuration setting of LCM (Local Configuration Manager).
:param str action_after_reboot: Specifies what happens after a reboot during the application of a configuration. The possible values are ContinueConfiguration and StopConfiguration
:param str allow_module_overwrite: If true - new configurations downloaded from the pull service are allowed to overwrite the old ones on the target node. Otherwise, false
:param str configuration_mode: Specifies how the LCM(Local Configuration Manager) actually applies the configuration to the target nodes. Possible values are ApplyOnly, ApplyAndMonitor, and ApplyAndAutoCorrect.
:param float configuration_mode_frequency_mins: How often, in minutes, the current configuration is checked and applied. This property is ignored if the ConfigurationMode property is set to ApplyOnly. The default value is 15.
:param str reboot_if_needed: Set this to true to automatically reboot the node after a configuration that requires reboot is applied. Otherwise, you will have to manually reboot the node for any configuration that requires it. The default value is false. To use this setting when a reboot condition is enacted by something other than DSC (such as Windows Installer), combine this setting with the xPendingReboot module.
:param float refresh_frequency_mins: The time interval, in minutes, at which the LCM checks a pull service to get updated configurations. This value is ignored if the LCM is not configured in pull mode. The default value is 30.
"""
if action_after_reboot is not None:
pulumi.set(__self__, "action_after_reboot", action_after_reboot)
if allow_module_overwrite is not None:
pulumi.set(__self__, "allow_module_overwrite", allow_module_overwrite)
if configuration_mode is not None:
pulumi.set(__self__, "configuration_mode", configuration_mode)
if configuration_mode_frequency_mins is None:
configuration_mode_frequency_mins = 15
if configuration_mode_frequency_mins is not None:
pulumi.set(__self__, "configuration_mode_frequency_mins", configuration_mode_frequency_mins)
if reboot_if_needed is None:
reboot_if_needed = 'False'
if reboot_if_needed is not None:
pulumi.set(__self__, "reboot_if_needed", reboot_if_needed)
if refresh_frequency_mins is None:
refresh_frequency_mins = 30
if refresh_frequency_mins is not None:
pulumi.set(__self__, "refresh_frequency_mins", refresh_frequency_mins)
@property
@pulumi.getter(name="actionAfterReboot")
def action_after_reboot(self) -> Optional[str]:
"""
Specifies what happens after a reboot during the application of a configuration. The possible values are ContinueConfiguration and StopConfiguration
"""
return pulumi.get(self, "action_after_reboot")
@property
@pulumi.getter(name="allowModuleOverwrite")
def allow_module_overwrite(self) -> Optional[str]:
"""
If true - new configurations downloaded from the pull service are allowed to overwrite the old ones on the target node. Otherwise, false
"""
return pulumi.get(self, "allow_module_overwrite")
@property
@pulumi.getter(name="configurationMode")
def configuration_mode(self) -> Optional[str]:
"""
Specifies how the LCM(Local Configuration Manager) actually applies the configuration to the target nodes. Possible values are ApplyOnly, ApplyAndMonitor, and ApplyAndAutoCorrect.
"""
return pulumi.get(self, "configuration_mode")
@property
@pulumi.getter(name="configurationModeFrequencyMins")
def configuration_mode_frequency_mins(self) -> Optional[float]:
"""
How often, in minutes, the current configuration is checked and applied. This property is ignored if the ConfigurationMode property is set to ApplyOnly. The default value is 15.
"""
return pulumi.get(self, "configuration_mode_frequency_mins")
@property
@pulumi.getter(name="rebootIfNeeded")
def reboot_if_needed(self) -> Optional[str]:
"""
Set this to true to automatically reboot the node after a configuration that requires reboot is applied. Otherwise, you will have to manually reboot the node for any configuration that requires it. The default value is false. To use this setting when a reboot condition is enacted by something other than DSC (such as Windows Installer), combine this setting with the xPendingReboot module.
"""
return pulumi.get(self, "reboot_if_needed")
@property
@pulumi.getter(name="refreshFrequencyMins")
def refresh_frequency_mins(self) -> Optional[float]:
"""
The time interval, in minutes, at which the LCM checks a pull service to get updated configurations. This value is ignored if the LCM is not configured in pull mode. The default value is 30.
"""
return pulumi.get(self, "refresh_frequency_mins")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GuestConfigurationAssignmentPropertiesResponse(dict):
"""
Guest configuration assignment properties.
"""
def __init__(__self__, *,
assignment_hash: str,
compliance_status: str,
last_compliance_status_checked: str,
latest_report_id: str,
provisioning_state: str,
target_resource_id: str,
context: Optional[str] = None,
guest_configuration: Optional['outputs.GuestConfigurationNavigationResponse'] = None,
latest_assignment_report: Optional['outputs.AssignmentReportResponse'] = None):
"""
Guest configuration assignment properties.
:param str assignment_hash: Combined hash of the configuration package and parameters.
:param str compliance_status: A value indicating compliance status of the machine for the assigned guest configuration.
:param str last_compliance_status_checked: Date and time when last compliance status was checked.
:param str latest_report_id: Id of the latest report for the guest configuration assignment.
:param str provisioning_state: The provisioning state, which only appears in the response.
:param str target_resource_id: VM resource Id.
:param str context: The source which initiated the guest configuration assignment. Ex: Azure Policy
:param 'GuestConfigurationNavigationResponseArgs' guest_configuration: The guest configuration to assign.
:param 'AssignmentReportResponseArgs' latest_assignment_report: Last reported guest configuration assignment report.
"""
pulumi.set(__self__, "assignment_hash", assignment_hash)
pulumi.set(__self__, "compliance_status", compliance_status)
pulumi.set(__self__, "last_compliance_status_checked", last_compliance_status_checked)
pulumi.set(__self__, "latest_report_id", latest_report_id)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "target_resource_id", target_resource_id)
if context is not None:
pulumi.set(__self__, "context", context)
if guest_configuration is not None:
pulumi.set(__self__, "guest_configuration", guest_configuration)
if latest_assignment_report is not None:
pulumi.set(__self__, "latest_assignment_report", latest_assignment_report)
@property
@pulumi.getter(name="assignmentHash")
def assignment_hash(self) -> str:
"""
Combined hash of the configuration package and parameters.
"""
return pulumi.get(self, "assignment_hash")
@property
@pulumi.getter(name="complianceStatus")
def compliance_status(self) -> str:
"""
A value indicating compliance status of the machine for the assigned guest configuration.
"""
return pulumi.get(self, "compliance_status")
@property
@pulumi.getter(name="lastComplianceStatusChecked")
def last_compliance_status_checked(self) -> str:
"""
Date and time when last compliance status was checked.
"""
return pulumi.get(self, "last_compliance_status_checked")
@property
@pulumi.getter(name="latestReportId")
def latest_report_id(self) -> str:
"""
Id of the latest report for the guest configuration assignment.
"""
return pulumi.get(self, "latest_report_id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> str:
"""
VM resource Id.
"""
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter
def context(self) -> Optional[str]:
"""
The source which initiated the guest configuration assignment. Ex: Azure Policy
"""
return pulumi.get(self, "context")
@property
@pulumi.getter(name="guestConfiguration")
def guest_configuration(self) -> Optional['outputs.GuestConfigurationNavigationResponse']:
"""
The guest configuration to assign.
"""
return pulumi.get(self, "guest_configuration")
@property
@pulumi.getter(name="latestAssignmentReport")
def latest_assignment_report(self) -> Optional['outputs.AssignmentReportResponse']:
"""
Last reported guest configuration assignment report.
"""
return pulumi.get(self, "latest_assignment_report")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GuestConfigurationNavigationResponse(dict):
"""
Guest configuration is an artifact that encapsulates DSC configuration and its dependencies. The artifact is a zip file containing DSC configuration (as MOF) and dependent resources and other dependencies like modules.
"""
def __init__(__self__, *,
content_hash: str,
content_uri: str,
configuration_parameter: Optional[Sequence['outputs.ConfigurationParameterResponse']] = None,
configuration_setting: Optional['outputs.ConfigurationSettingResponse'] = None,
kind: Optional[str] = None,
name: Optional[str] = None,
version: Optional[str] = None):
"""
Guest configuration is an artifact that encapsulates DSC configuration and its dependencies. The artifact is a zip file containing DSC configuration (as MOF) and dependent resources and other dependencies like modules.
:param str content_hash: Combined hash of the guest configuration package and configuration parameters.
:param str content_uri: Uri of the storage where guest configuration package is uploaded.
:param Sequence['ConfigurationParameterResponseArgs'] configuration_parameter: The configuration parameters for the guest configuration.
:param 'ConfigurationSettingResponseArgs' configuration_setting: The configuration setting for the guest configuration.
:param str kind: Kind of the guest configuration. For example:DSC
:param str name: Name of the guest configuration.
:param str version: Version of the guest configuration.
"""
pulumi.set(__self__, "content_hash", content_hash)
pulumi.set(__self__, "content_uri", content_uri)
if configuration_parameter is not None:
pulumi.set(__self__, "configuration_parameter", configuration_parameter)
if configuration_setting is not None:
pulumi.set(__self__, "configuration_setting", configuration_setting)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="contentHash")
def content_hash(self) -> str:
"""
Combined hash of the guest configuration package and configuration parameters.
"""
return pulumi.get(self, "content_hash")
@property
@pulumi.getter(name="contentUri")
def content_uri(self) -> str:
"""
Uri of the storage where guest configuration package is uploaded.
"""
return pulumi.get(self, "content_uri")
@property
@pulumi.getter(name="configurationParameter")
def configuration_parameter(self) -> Optional[Sequence['outputs.ConfigurationParameterResponse']]:
"""
The configuration parameters for the guest configuration.
"""
return pulumi.get(self, "configuration_parameter")
@property
@pulumi.getter(name="configurationSetting")
def configuration_setting(self) -> Optional['outputs.ConfigurationSettingResponse']:
"""
The configuration setting for the guest configuration.
"""
return pulumi.get(self, "configuration_setting")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of the guest configuration. For example:DSC
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the guest configuration.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
Version of the guest configuration.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VMInfoResponse(dict):
"""
Information about the VM.
"""
def __init__(__self__, *,
id: str,
uuid: str):
"""
Information about the VM.
:param str id: Azure resource Id of the VM.
:param str uuid: UUID(Universally Unique Identifier) of the VM.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "uuid", uuid)
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id of the VM.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def uuid(self) -> str:
"""
UUID(Universally Unique Identifier) of the VM.
"""
return pulumi.get(self, "uuid")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
b4e76b67a52d7e11e271463c76c756cd39c39301 | f09978f2a0850278255bd198222cd3990cb0c687 | /gear/schema.py | 9e678012c38374b5baee61fdf28ff22143a7874c | [] | no_license | szpone/climbing-gear | 0e4e53b99a0b550c0e172af21c2c9e08e2c3f1ba | 78ab13b97b4b66464859b95ba6e5ed8587d5e60c | refs/heads/master | 2022-12-12T11:08:57.277056 | 2019-06-05T16:06:02 | 2019-06-05T16:06:02 | 185,016,538 | 1 | 0 | null | 2022-11-22T03:49:28 | 2019-05-05T10:30:11 | Python | UTF-8 | Python | false | false | 514 | py | import graphene
from graphene_django.types import DjangoObjectType, ObjectType
from .models import Gear
class GearType(DjangoObjectType):
class Meta:
model = Gear
class Query(ObjectType):
gear = graphene.Field(GearType, id=graphene.Int())
gears = graphene.List(GearType)
def resolve_gear(self, info, gear_id):
return Gear.objects.filter(id=gear_id).first()
def resolve_gears(self, info, **kwargs):
return Gear.objects.all()
schema = graphene.Schema(query=Query)
| [
"[email protected]"
] | |
4eee374d40da98978fa6eead0dbd109ebd17f59e | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2449/60657/249828.py | b6f2e07860c983d2311d854da47037a89843a79d | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | import math
A=input().split(',')
B=input()
def judge(A,B):
if A.count(B)!=0:
return A.index(B)
else:
return -1
print(judge(A,B)) | [
"[email protected]"
] | |
7b91e3b074f85271a746505ec2100144aaa01af3 | d7641647d67d110e08997767e85bbea081c2537b | /bitmovin_api_sdk/models/filter.py | 6836a5d837fc6e5d357ec0e2fa2c5884394e48d2 | [
"MIT"
] | permissive | aachenmax/bitmovin-api-sdk-python | d3ded77c459852cbea4927ff28c2a4ad39e6026a | 931bcd8c4695a7eb224a7f4aa5a189ba2430e639 | refs/heads/master | 2022-11-16T08:59:06.830567 | 2020-07-06T07:16:51 | 2020-07-06T07:16:51 | 267,538,689 | 0 | 1 | MIT | 2020-07-06T07:16:52 | 2020-05-28T08:44:44 | Python | UTF-8 | Python | false | false | 1,781 | py | # coding: utf-8
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.bitmovin_resource import BitmovinResource
import pprint
class Filter(BitmovinResource):
discriminator_value_class_map = {
'CROP': 'CropFilter',
'CONFORM': 'ConformFilter',
'WATERMARK': 'WatermarkFilter',
'ENHANCED_WATERMARK': 'EnhancedWatermarkFilter',
'ROTATE': 'RotateFilter',
'DEINTERLACE': 'DeinterlaceFilter',
'AUDIO_MIX': 'AudioMixFilter',
'DENOISE_HQDN3D': 'DenoiseHqdn3dFilter',
'TEXT': 'TextFilter',
'UNSHARP': 'UnsharpFilter',
'SCALE': 'ScaleFilter',
'INTERLACE': 'InterlaceFilter',
'AUDIO_VOLUME': 'AudioVolumeFilter',
'EBU_R128_SINGLE_PASS': 'EbuR128SinglePassFilter'
}
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
if hasattr(super(Filter, self), "to_dict"):
result = super(Filter, self).to_dict()
for k, v in iteritems(self.discriminator_value_class_map):
if v == type(self).__name__:
result['type'] = k
break
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Filter):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
f59eef689da00fb5d14fdfaddf69c05fcdb4d412 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/labservices/azure-mgmt-labservices/azure/mgmt/labservices/models/lab_account_fragment.py | 0e3a2fb1fa1897771c1c81ee386226f6730a5827 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 2,376 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class LabAccountFragment(Resource):
"""Represents a lab account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The identifier of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The location of the resource.
:type location: str
:param tags: The tags of the resource.
:type tags: dict[str, str]
:param enabled_region_selection: Represents if region selection is enabled
:type enabled_region_selection: bool
:param provisioning_state: The provisioning status of the resource.
:type provisioning_state: str
:param unique_identifier: The unique immutable identifier of a resource
(Guid).
:type unique_identifier: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'enabled_region_selection': {'key': 'properties.enabledRegionSelection', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'unique_identifier': {'key': 'properties.uniqueIdentifier', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LabAccountFragment, self).__init__(**kwargs)
self.enabled_region_selection = kwargs.get('enabled_region_selection', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.unique_identifier = kwargs.get('unique_identifier', None)
| [
"[email protected]"
] | |
d82cf9e821ecf30bd91d020d422728952809a303 | 597ed154876611a3d65ca346574f4696259d6e27 | /dbaas/workflow/steps/tests/test_vm_step.py | 1f05feed7c79364c570f0ed132f5da3578825a91 | [] | permissive | soitun/database-as-a-service | 41984d6d2177734b57d726cd3cca7cf0d8c5f5d6 | 1282a46a9437ba6d47c467f315b5b6a3ac0af4fa | refs/heads/master | 2023-06-24T17:04:49.523596 | 2018-03-15T19:35:10 | 2018-03-15T19:35:10 | 128,066,738 | 0 | 0 | BSD-3-Clause | 2022-05-10T22:39:58 | 2018-04-04T13:33:42 | Python | UTF-8 | Python | false | false | 1,661 | py | from mock import patch
from physical.tests.factory import HostFactory, EnvironmentFactory
from ..util.vm import VmStep, MigrationWaitingBeReady
from . import TestBaseStep
@patch('workflow.steps.util.vm.get_credentials_for', return_value=True)
@patch('workflow.steps.util.vm.CloudStackProvider', return_value=object)
class VMStepTests(TestBaseStep):
def setUp(self):
super(VMStepTests, self).setUp()
self.host = self.instance.hostname
def test_environment(self, *args, **kwargs):
vm_step = VmStep(self.instance)
self.assertEqual(vm_step.environment, self.environment)
def test_host(self, *args, **kwargs):
vm_step = VmStep(self.instance)
self.assertEqual(vm_step.host, self.host)
@patch('workflow.steps.util.vm.get_credentials_for', return_value=True)
@patch('workflow.steps.util.vm.CloudStackProvider', return_value=object)
class VMStepTestsMigration(TestBaseStep):
def setUp(self):
super(VMStepTestsMigration, self).setUp()
self.host = self.instance.hostname
self.future_host = HostFactory()
self.host.future_host = self.future_host
self.host.save()
self.environment_migrate = EnvironmentFactory()
self.environment.migrate_environment = self.environment_migrate
self.environment.save()
def test_environment(self, *args, **kwargs):
vm_step = MigrationWaitingBeReady(self.instance)
self.assertEqual(vm_step.environment, self.environment_migrate)
def test_host(self, *args, **kwargs):
vm_step = MigrationWaitingBeReady(self.instance)
self.assertEqual(vm_step.host, self.future_host)
| [
"[email protected]"
] | |
1dfee621f2c8bf35b8a73f7fbbb1a64d238e125a | bbb21bb79c8c3efbad3dd34ac53fbd6f4590e697 | /week3/TODO/TODO/settings.py | 947cd11c197d9ed2bf30a09cd9c4016007788b22 | [] | no_license | Nusmailov/BFDjango | b14c70c42da9cfcb68eec6930519da1d0b1f53b6 | cab7f0da9b03e9094c21efffc7ab07e99e629b61 | refs/heads/master | 2020-03-28T21:11:50.706778 | 2019-01-21T07:19:19 | 2019-01-21T07:19:19 | 149,136,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,094 | py | """
Django settings for TODO project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mitb8&^*0ibt!u_xqe1!tjzumo65hy@cnxt-z#+9+p@m$u8qnn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TODO.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TODO.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
cdc0900d3b2677c0be666cbdd443353d5385757e | 64ffb2e803a19e5dc75ec8fa0f277609d34e0cc7 | /dynamodb/update_counter_atomically.py | ddfbf9dfad90311a0332a3cbd1a990c0830b6c51 | [] | no_license | arunpa0206/awstraining | 687bc4206dfd65693039c525e8a4ff39d14e89d5 | 2eae2353b75a2774f9f47b40d76d63c7f9e08bb4 | refs/heads/master | 2021-05-10T15:06:48.652021 | 2019-08-20T10:36:29 | 2019-08-20T10:36:29 | 118,538,574 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | from __future__ import print_function # Python 2/3 compatibility
import boto3
import json
import decimal
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
dynamodb = boto3.resource('dynamodb', region_name='us-west-2', endpoint_url="http://localhost:8000")
table = dynamodb.Table('Movies')
title = "The Big New Movie"
year = 2015
response = table.update_item(
Key={
'year': year,
'title': title
},
UpdateExpression="set info.rating = info.rating + :val",
ExpressionAttributeValues={
':val': decimal.Decimal(1)
},
ReturnValues="UPDATED_NEW"
)
print("UpdateItem succeeded:")
print(json.dumps(response, indent=4, cls=DecimalEncoder))
| [
"[email protected]"
] | |
303d2e67444557cb4fd051f1250a360cb9ef821c | 892c7bd301eeadf57b546f039faf499448112ddc | /organizacion/migrations/0004_escuelacampo.py | 0f8af8ca46dffde4815519fa6295128bd78c2024 | [
"MIT"
] | permissive | ErickMurillo/aprocacaho | beed9c4b031cf26a362e44fc6a042b38ab246c27 | eecd216103e6b06e3ece174c89d911f27b50585a | refs/heads/master | 2022-11-23T15:03:32.687847 | 2019-07-01T19:16:37 | 2019-07-01T19:16:37 | 53,867,804 | 0 | 1 | MIT | 2022-11-22T01:02:51 | 2016-03-14T15:23:39 | HTML | UTF-8 | Python | false | false | 903 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-04 14:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organizacion', '0003_auto_20160803_2128'),
]
operations = [
migrations.CreateModel(
name='EscuelaCampo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=200)),
('organizacion', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organizacion.Organizacion')),
],
options={
'verbose_name': 'Escuela de campo',
'verbose_name_plural': 'Escuelas de campo',
},
),
]
| [
"[email protected]"
] | |
1b595d943f2f3026a02236c4b6a6caade13ea718 | ea6b3b74c8f1ff9333c5d4b06a0e4dd9bbdb3bba | /tests/protocol/test_json_protocol.py | 5a79da739005fd27e01a88e27d335942a77b03c2 | [
"MIT"
] | permissive | sgalkina/venom | d495d296a388afcb25525491bbbe590bfd258a05 | e372ab9002e71ba4e2422aabd02143e4f1247dba | refs/heads/master | 2021-01-23T03:27:17.239289 | 2017-03-24T15:05:56 | 2017-03-24T15:05:56 | 86,077,951 | 0 | 0 | null | 2017-03-24T14:40:46 | 2017-03-24T14:40:46 | null | UTF-8 | Python | false | false | 5,448 | py | from unittest import SkipTest
from unittest import TestCase
from venom import Message
from venom.common import StringValue, IntegerValue, BoolValue, NumberValue
from venom.exceptions import ValidationError
from venom.fields import String, Number, Field, Repeat
from venom.protocol import JSON
class Foo(Message):
string = String()
parent = Field('tests.protocol.test_json_protocol.Foo')
string_value = Field(StringValue)
class JSONProtocolTestCase(TestCase):
def test_encode_message(self):
class Pet(Message):
sound = String()
protocol = JSON(Pet)
self.assertEqual(protocol.encode(Pet('hiss!')), {'sound': 'hiss!'})
self.assertEqual(protocol.decode({'sound': 'meow'}), Pet('meow'))
self.assertEqual(protocol.decode({}), Pet())
with self.assertRaises(ValidationError) as e:
protocol.decode('bad')
self.assertEqual(e.exception.description, "'bad' is not of type 'object'")
self.assertEqual(e.exception.path, [])
@SkipTest
def test_encode_message_field_attribute(self):
# NOTE: removed support for field attributes.
class Pet(Message):
size = Number(attribute='weight')
protocol = JSON(Pet)
pet = Pet()
pet.size = 2.5
self.assertEqual(protocol.encode(pet), {'weight': 2.5})
self.assertEqual(protocol.decode({'weight': 2.5}), Pet(2.5))
def test_encode_repeat_field(self):
class Pet(Message):
sounds = Repeat(String())
protocol = JSON(Pet)
self.assertEqual(protocol.encode(Pet(['hiss!', 'slither'])), {'sounds': ['hiss!', 'slither']})
self.assertEqual(protocol.decode({'sounds': ['meow', 'purr']}), Pet(['meow', 'purr']))
self.assertEqual(protocol.decode({}), Pet())
self.assertEqual(protocol.encode(Pet()), {})
with self.assertRaises(ValidationError) as e:
protocol.decode({'sounds': 'meow, purr'})
self.assertEqual(e.exception.description, "'meow, purr' is not of type 'list'")
self.assertEqual(e.exception.path, ['sounds'])
def test_validation_field_string(self):
class Foo(Message):
string = String()
protocol = JSON(Foo)
with self.assertRaises(ValidationError) as e:
protocol.decode({'string': None})
self.assertEqual(e.exception.description, "None is not of type 'str'")
self.assertEqual(e.exception.path, ['string'])
def test_validation_path(self):
protocol = JSON(Foo)
with self.assertRaises(ValidationError) as e:
protocol.decode({'string': 42})
self.assertEqual(e.exception.description, "42 is not of type 'str'")
self.assertEqual(e.exception.path, ['string'])
# FIXME With custom encoding/decoding for values this won't happen.
with self.assertRaises(ValidationError) as e:
protocol.decode({'string_value': {'value': None}})
self.assertEqual(e.exception.description, "{'value': None} is not of type 'str'")
self.assertEqual(e.exception.path, ['string_value'])
with self.assertRaises(ValidationError) as e:
protocol.decode({'parent': {'string_value': 42}})
self.assertEqual(e.exception.description, "42 is not of type 'str'")
self.assertEqual(e.exception.path, ['parent', 'string_value'])
def test_unpack_invalid_json(self):
class Pet(Message):
sound = String()
protocol = JSON(Pet)
with self.assertRaises(ValidationError) as e:
protocol.unpack(b'')
self.assertEqual(e.exception.description, "Invalid JSON: Expected object or value")
self.assertEqual(e.exception.path, [])
with self.assertRaises(ValidationError) as e:
protocol.unpack(b'fs"ad')
def test_pack(self):
class Pet(Message):
sound = String()
protocol = JSON(Pet)
self.assertEqual(protocol.pack(Pet()), b'{}')
self.assertEqual(protocol.pack(Pet('hiss!')), b'{"sound":"hiss!"}')
def test_string_value(self):
protocol = JSON(StringValue)
self.assertEqual(protocol.encode(StringValue('hiss!')), 'hiss!')
self.assertEqual(protocol.decode('hiss!'), StringValue('hiss!'))
self.assertEqual(protocol.pack(StringValue()), b'""')
self.assertEqual(protocol.pack(StringValue('hiss!')), b'"hiss!"')
with self.assertRaises(ValidationError):
protocol.decode(42)
def test_integer_value(self):
protocol = JSON(IntegerValue)
self.assertEqual(protocol.encode(IntegerValue(2)), 2)
self.assertEqual(protocol.decode(2), IntegerValue(2))
with self.assertRaises(ValidationError):
protocol.decode('hiss!')
def test_number_value(self):
protocol = JSON(NumberValue)
self.assertEqual(protocol.encode(NumberValue(2.5)), 2.5)
self.assertEqual(protocol.decode(2.5), NumberValue(2.5))
with self.assertRaises(ValidationError):
protocol.decode('hiss!')
def test_bool_value(self):
protocol = JSON(BoolValue)
self.assertEqual(protocol.encode(BoolValue()), False)
self.assertEqual(protocol.encode(BoolValue(True)), True)
self.assertEqual(protocol.decode(False), BoolValue(False))
with self.assertRaises(ValidationError):
protocol.decode('hiss!')
| [
"[email protected]"
] | |
3129d119bb1773e4909ac9e1ecf759cef0cad06e | 539789516d0d946e8086444bf4dc6f44d62758c7 | /inference/python/inference.py | 7fc210e7978a31556f20ba12d8a1baa22d2ff6c4 | [] | no_license | hoangcuong2011/etagger | ad05ca0c54f007f54f73d39dc539c3737d5acacf | 611da685d72da207870ddb3dc403b530c859d603 | refs/heads/master | 2020-05-03T15:15:33.395186 | 2019-03-28T01:40:21 | 2019-03-28T01:40:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,683 | py | from __future__ import print_function
import sys
import os
path = os.path.dirname(os.path.abspath(__file__)) + '/../..'
sys.path.append(path)
import time
import argparse
import tensorflow as tf
import numpy as np
# for LSTMBlockFusedCell(), https://github.com/tensorflow/tensorflow/issues/23369
tf.contrib.rnn
# for QRNN
try: import qrnn
except: sys.stderr.write('import qrnn, failed\n')
from embvec import EmbVec
from config import Config
from token_eval import TokenEval
from chunk_eval import ChunkEval
from input import Input
def load_frozen_graph(frozen_graph_filename, prefix='prefix'):
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
op_dict=None,
producer_op_list=None,
name=prefix,
)
return graph
def inference(config, frozen_pb_path):
"""Inference for bucket
"""
# load graph
graph = load_frozen_graph(frozen_pb_path)
for op in graph.get_operations():
sys.stderr.write(op.name + '\n')
# create session with graph
# if graph is optimized by tensorRT, then
# from tensorflow.contrib import tensorrt as trt
# gpu_ops = tf.GPUOptions(per_process_gpu_memory_fraction = 0.50)
gpu_ops = tf.GPUOptions()
'''
session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_ops)
'''
session_conf = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False,
gpu_options=gpu_ops,
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
sess = tf.Session(graph=graph, config=session_conf)
# mapping placeholders and tensors
p_is_train = graph.get_tensor_by_name('prefix/is_train:0')
p_sentence_length = graph.get_tensor_by_name('prefix/sentence_length:0')
p_input_data_pos_ids = graph.get_tensor_by_name('prefix/input_data_pos_ids:0')
p_input_data_chk_ids = graph.get_tensor_by_name('prefix/input_data_chk_ids:0')
p_input_data_word_ids = graph.get_tensor_by_name('prefix/input_data_word_ids:0')
p_input_data_wordchr_ids = graph.get_tensor_by_name('prefix/input_data_wordchr_ids:0')
t_logits_indices = graph.get_tensor_by_name('prefix/logits_indices:0')
t_sentence_lengths = graph.get_tensor_by_name('prefix/sentence_lengths:0')
num_buckets = 0
total_duration_time = 0.0
bucket = []
while 1:
try: line = sys.stdin.readline()
except KeyboardInterrupt: break
if not line: break
line = line.strip()
if not line and len(bucket) >= 1:
start_time = time.time()
# Build input data
inp = Input(bucket, config, build_output=False)
feed_dict = {p_input_data_pos_ids: inp.example['pos_ids'],
p_input_data_chk_ids: inp.example['chk_ids'],
p_is_train: False,
p_sentence_length: inp.max_sentence_length}
feed_dict[p_input_data_word_ids] = inp.example['word_ids']
feed_dict[p_input_data_wordchr_ids] = inp.example['wordchr_ids']
if 'elmo' in config.emb_class:
feed_dict[p_elmo_input_data_wordchr_ids] = inp.example['elmo_wordchr_ids']
if 'bert' in config.emb_class:
feed_dict[p_bert_input_data_token_ids] = inp.example['bert_token_ids']
feed_dict[p_bert_input_data_token_masks] = inp.example['bert_token_masks']
feed_dict[p_bert_input_data_segment_ids] = inp.example['bert_segment_ids']
if 'elmo' in config.emb_class:
feed_dict[p_bert_input_data_elmo_indices] = inp.example['bert_elmo_indices']
logits_indices, sentence_lengths = sess.run([t_logits_indices, t_sentence_lengths], feed_dict=feed_dict)
tags = config.logit_indices_to_tags(logits_indices[0], sentence_lengths[0])
for i in range(len(bucket)):
if 'bert' in config.emb_class:
j = inp.example['bert_wordidx2tokenidx'][0][i]
out = bucket[i] + ' ' + tags[j]
else:
out = bucket[i] + ' ' + tags[i]
sys.stdout.write(out + '\n')
sys.stdout.write('\n')
bucket = []
duration_time = time.time() - start_time
out = 'duration_time : ' + str(duration_time) + ' sec'
sys.stderr.write(out + '\n')
num_buckets += 1
total_duration_time += duration_time
if line : bucket.append(line)
if len(bucket) != 0:
start_time = time.time()
# Build input data
inp = Input(bucket, config, build_output=False)
feed_dict = {model.input_data_pos_ids: inp.example['pos_ids'],
model.input_data_chk_ids: inp.example['chk_ids'],
model.is_train: False,
model.sentence_length: inp.max_sentence_length}
feed_dict[model.input_data_word_ids] = inp.example['word_ids']
feed_dict[model.input_data_wordchr_ids] = inp.example['wordchr_ids']
if 'elmo' in config.emb_class:
feed_dict[model.elmo_input_data_wordchr_ids] = inp.example['elmo_wordchr_ids']
if 'bert' in config.emb_class:
feed_dict[model.bert_input_data_token_ids] = inp.example['bert_token_ids']
feed_dict[model.bert_input_data_token_masks] = inp.example['bert_token_masks']
feed_dict[model.bert_input_data_segment_ids] = inp.example['bert_segment_ids']
if 'elmo' in config.emb_class:
feed_dict[model.bert_input_data_elmo_indices] = inp.example['bert_elmo_indices']
logits_indices, sentence_lengths = sess.run([t_logits_indices, t_sentence_lengths], feed_dict=feed_dict)
tags = config.logit_indices_to_tags(logits_indices[0], sentence_lengths[0])
for i in range(len(bucket)):
if 'bert' in config.emb_class:
j = inp.example['bert_wordidx2tokenidx'][0][i]
out = bucket[i] + ' ' + tags[j]
else:
out = bucket[i] + ' ' + tags[i]
sys.stdout.write(out + '\n')
sys.stdout.write('\n')
duration_time = time.time() - start_time
out = 'duration_time : ' + str(duration_time) + ' sec'
tf.logging.info(out)
num_buckets += 1
total_duration_time += duration_time
out = 'total_duration_time : ' + str(total_duration_time) + ' sec' + '\n'
out += 'average processing time / bucket : ' + str(total_duration_time / num_buckets) + ' sec'
tf.logging.info(out)
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--emb_path', type=str, help='path to word embedding vector + vocab(.pkl)', required=True)
parser.add_argument('--wrd_dim', type=int, help='dimension of word embedding vector', required=True)
parser.add_argument('--word_length', type=int, default=15, help='max word length')
parser.add_argument('--frozen_path', type=str, help='path to frozen model(ex, ./exported/ner_frozen.pb)', required=True)
args = parser.parse_args()
tf.logging.set_verbosity(tf.logging.INFO)
args.restore = None
config = Config(args, is_training=False, emb_class='glove', use_crf=True)
inference(config, args.frozen_path)
| [
"[email protected]"
] | |
da0de991295a250dbfc4238a27b5f8573f7770a8 | 48c6b58e07891475a2c60a8afbbbe6447bf527a7 | /src/tests/control/test_orders.py | 74c7007fa3a989b7c5f5133361a5abbc1a3dcc33 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | awg24/pretix | a9f86fe2dd1f3269734ed39b3ea052ef292ff110 | b1d67a48601838bac0d4e498cbe8bdcd16013d60 | refs/heads/master | 2021-01-15T23:40:28.582518 | 2015-08-27T14:05:18 | 2015-08-27T14:05:18 | 42,126,402 | 1 | 0 | null | 2015-09-08T16:58:52 | 2015-09-08T16:58:51 | null | UTF-8 | Python | false | false | 3,084 | py | from datetime import timedelta
from decimal import Decimal
import pytest
from django.utils.timezone import now
from pretix.base.models import (
Event, EventPermission, Item, Order, OrderPosition, Organizer,
OrganizerPermission, User,
)
@pytest.fixture
def env():
o = Organizer.objects.create(name='Dummy', slug='dummy')
event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(), plugins='pretix.plugins.banktransfer'
)
user = User.objects.create_user('[email protected]', '[email protected]', 'dummy')
EventPermission.objects.create(
event=event,
user=user,
can_view_orders=True,
can_change_orders=True
)
o = Order.objects.create(
code='FOO', event=event,
user=user, status=Order.STATUS_PENDING,
datetime=now(), expires=now() + timedelta(days=10),
total=0, payment_provider='banktransfer'
)
ticket = Item.objects.create(event=event, name='Early-bird ticket',
category=None, default_price=23,
admission=True)
event.settings.set('attendee_names_asked', True)
OrderPosition.objects.create(
order=o,
item=ticket,
variation=None,
price=Decimal("14"),
attendee_name="Peter"
)
return event, user, o
@pytest.mark.django_db
def test_order_list(client, env):
client.login(identifier='[email protected]', password='dummy')
response = client.get('/control/event/dummy/dummy/orders/')
assert 'FOO' in response.rendered_content
response = client.get('/control/event/dummy/dummy/orders/?user=peter')
assert 'FOO' not in response.rendered_content
response = client.get('/control/event/dummy/dummy/orders/?user=dummy')
assert 'FOO' in response.rendered_content
response = client.get('/control/event/dummy/dummy/orders/?status=p')
assert 'FOO' not in response.rendered_content
response = client.get('/control/event/dummy/dummy/orders/?status=n')
assert 'FOO' in response.rendered_content
@pytest.mark.django_db
def test_order_detail(client, env):
client.login(identifier='[email protected]', password='dummy')
response = client.get('/control/event/dummy/dummy/orders/FOO/')
assert 'Early-bird' in response.rendered_content
assert 'Peter' in response.rendered_content
@pytest.mark.django_db
def test_order_transition_cancel(client, env):
client.login(identifier='[email protected]', password='dummy')
client.post('/control/event/dummy/dummy/orders/FOO/transition', {
'status': 'c'
})
o = Order.objects.current.get(identity=env[2].identity)
assert o.status == Order.STATUS_CANCELLED
@pytest.mark.django_db
def test_order_transition_to_paid_success(client, env):
client.login(identifier='[email protected]', password='dummy')
client.post('/control/event/dummy/dummy/orders/FOO/transition', {
'status': 'p'
})
o = Order.objects.current.get(identity=env[2].identity)
assert o.status == Order.STATUS_PAID
| [
"[email protected]"
] | |
56ac5f435c1505b586a07d2bf83a64eff2564702 | 60b5a9a8b519cb773aca004b7217637f8a1a0526 | /customer/urls.py | 2bf028b3dc13eed4b4e9793741b3f478b4d5d355 | [] | no_license | malep2007/dag-bragan-erp-backend | 76ce90c408b21b0bda73c6dd972e2f77b7f21b1f | e98182af2848a6533ddd28c586649a8fee1dc695 | refs/heads/master | 2021-08-11T01:29:27.864747 | 2019-01-15T17:46:26 | 2019-01-15T17:46:26 | 151,831,965 | 0 | 0 | null | 2021-06-10T20:56:21 | 2018-10-06T11:10:12 | Python | UTF-8 | Python | false | false | 437 | py | from django.urls import path, reverse
from . import views
urlpatterns = [
path('', views.CustomerListView.as_view(), name='index'),
path('<int:pk>/', views.CustomerDetailView.as_view(), name="detail"),
path('edit/<int:pk>/', views.CustomerUpdateView.as_view(), name='edit'),
path('add/', views.CustomerCreateView.as_view(), name='add'),
path('delete/<int:pk>/', views.CustomerDeleteView.as_view(), name='delete'),
]
| [
"[email protected]"
] | |
a1b2d1e62a5a9c0b2e499246d138951a2a9f20f9 | 64a80df5e23b195eaba7b15ce207743e2018b16c | /Downloads/adafruit-circuitpython-bundle-py-20201107/lib/adafruit_wsgi/wsgi_app.py | 44171f51cfa9e62e9b7fdc09a66fc806d95d7b4a | [] | no_license | aferlazzo/messageBoard | 8fb69aad3cd7816d4ed80da92eac8aa2e25572f5 | f9dd4dcc8663c9c658ec76b2060780e0da87533d | refs/heads/main | 2023-01-27T20:02:52.628508 | 2020-12-07T00:37:17 | 2020-12-07T00:37:17 | 318,548,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,807 | py | # The MIT License (MIT)
#
# Copyright (c) 2019 Matthew Costi for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`wsgi_app`
================================================================================
CircuitPython framework for creating WSGI server compatible web applications.
This does *not* include server implementation, which is necessary in order
to create a web application with this library.
* Circuit Python implementation of an WSGI Server for ESP32 devices:
https://github.com/adafruit/Adafruit_CircuitPython_ESP32SPI.git
* Author(s): Matthew Costi
Implementation Notes
--------------------
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import re
from adafruit_wsgi.request import Request
__version__ = "1.1.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_WSGI.git"
class WSGIApp:
"""
The base WSGI Application class.
"""
def __init__(self):
self._routes = []
self._variable_re = re.compile("^<([a-zA-Z]+)>$")
def __call__(self, environ, start_response):
"""
Called whenever the server gets a request.
The environ dict has details about the request per wsgi specification.
Call start_response with the response status string and headers as a list of tuples.
Return a single item list with the item being your response data string.
"""
status = ""
headers = []
resp_data = []
request = Request(environ)
match = self._match_route(request.path, request.method.upper())
if match:
args, route = match
status, headers, resp_data = route["func"](request, *args)
start_response(status, headers)
return resp_data
def on_request(self, methods, rule, request_handler):
"""
Register a Request Handler for a particular HTTP method and path.
request_handler will be called whenever a matching HTTP request is received.
request_handler should accept the following args:
(Dict environ)
request_handler should return a tuple in the shape of:
(status, header_list, data_iterable)
:param list methods: the methods of the HTTP request to handle
:param str rule: the path rule of the HTTP request
:param func request_handler: the function to call
"""
regex = "^"
rule_parts = rule.split("/")
for part in rule_parts:
var = self._variable_re.match(part)
if var:
# If named capture groups ever become a thing, use this regex instead
# regex += "(?P<" + var.group("var") + r">[a-zA-Z0-9_-]*)\/"
regex += r"([a-zA-Z0-9_-]+)\/"
else:
regex += part + r"\/"
regex += "?$" # make last slash optional and that we only allow full matches
self._routes.append(
(re.compile(regex), {"methods": methods, "func": request_handler})
)
def route(self, rule, methods=None):
"""
A decorator to register a route rule with an endpoint function.
if no methods are provided, default to GET
"""
if not methods:
methods = ["GET"]
return lambda func: self.on_request(methods, rule, func)
def _match_route(self, path, method):
for matcher, route in self._routes:
match = matcher.match(path)
if match and method in route["methods"]:
return (match.groups(), route)
return None
| [
"[email protected]"
] | |
2ebfb27d864daa8609758160bd3ee3c6122b704a | e147827b4f6fbc4dd862f817e9d1a8621c4fcedc | /apps/doc/views.py | ab450d855ef75b700dd41676295f6518652efa34 | [] | no_license | Jsummer121/DjangoBolgProject | ba3ebe27a1ac67439de67b9f10c17d1c16e43f84 | d64f9579d29ac5e3979d40303e84f4be6852fa96 | refs/heads/master | 2023-01-30T16:26:33.566665 | 2020-12-15T12:00:16 | 2020-12-15T12:00:16 | 321,654,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,834 | py | from django.shortcuts import render
from django.views import View
from django.http import HttpResponse, FileResponse, StreamingHttpResponse, Http404
from doc import models
import requests
from django.utils.encoding import escape_uri_path
from Django_pro.settings import DOC_FILE_URL
def doc(request):
docs = models.Doc.objects.only('image_url', 'desc', 'title').filter(is_delete=False)
return render(request, 'doc/docDownload.html', context={'docs': docs})
class DocDownload(View):
def get(self, request, doc_id):
doc_file = models.Doc.objects.only('file_url').filter(is_delete=False, id=doc_id).first()
if doc_file:
# /media/流畅的Python.pdf
doc_url = doc_file.file_url
# http://192.168.216.137:8000/media/流畅的Python.pdf
doc_url = DOC_FILE_URL + doc_url
# a = requests.get(doc_url)
# res = HttpResponse(a) #下面是简写
res = FileResponse(requests.get(doc_url))
ex_name = doc_url.split('.')[-1] # pdf
if not ex_name:
raise Http404('文件名异常')
else:
ex_name = ex_name.lower()
if ex_name == 'pdf':
res['Content-type'] = 'application/pdf'
elif ex_name == 'doc':
res['Content-type'] = 'application/msowrd'
elif ex_name == 'ppt':
res['Content-type'] = 'application/powerpoint'
else:
raise Http404('文件格式不正确')
doc_filename = escape_uri_path(doc_url.split('/')[-1])
# attachment 保存 inline 显示
res["Content-Disposition"] = "attachment; filename*=UTF-8''{}".format(doc_filename)
return res
else:
raise Http404('文档不存在')
| [
"[email protected]"
] | |
8329fa5bea57d4f6278bd16ce249d56f50672bc7 | 2c872fedcdc12c89742d10c2f1c821eed0470726 | /pyNet/day06/code/test_poll.py | 5f18a3bc99a060b68f525c9f9e72ac31b6e740a3 | [] | no_license | zuigehulu/AID1811 | 581c3c7a37df9fa928bc632e4891fc9bafe69201 | 10cab0869875290646a9e5d815ff159d0116990e | refs/heads/master | 2020-04-19T16:33:04.174841 | 2019-01-30T07:58:24 | 2019-01-30T07:58:24 | 168,307,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | from multiprocessing import Pool
from time import sleep
L = [1,2,3,4,5]
def a(ne):
print(ne**2)
sleep(2)
pool = Pool()
for i in L:
pool.apply_async(a,(i,))
pool.close()
pool.join() | [
"[email protected]"
] | |
0a5a579dd0a6d232526835dc574518dcbab6e108 | 8393f28f390e222b0429fc4f3f07590f86333d8d | /linux-stuff/bin/svn-merge-meld | 7c7beb77f18c8c2b9a4dbfe1bd016a679f58f12d | [] | no_license | jmangelson/settings | fe118494252da35b175d159bbbef118f22b189fb | df9291f8947ba1ceb7c83a731dfbe9e775ce5add | refs/heads/master | 2021-01-16T17:39:24.105679 | 2015-02-20T01:17:26 | 2015-02-20T01:17:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | #!/usr/bin/env python
# svn merge-tool python wrapper for meld
import sys
import subprocess
# path to meld ($ which meld)
meld = "/usr/bin/meld"
log = False
f = open('/tmp/svn-merge-meld.log', 'a')
def main():
if log:
f.write("call: %r\n" % sys.argv)
# file paths
base = sys.argv[1]
theirs = sys.argv[2]
mine = sys.argv[3]
merged = sys.argv[4]
partial = sys.argv[5]
# the call to meld
cmd = [meld, mine, theirs, merged]
# Call meld, making sure it exits correctly
subprocess.check_call(cmd)
try:
main()
except Exception as e:
print "Oh noes, an error: %r" % e
if log:
f.write("Error: %r\n" % e)
sys.exit(-1)
| [
"devnull@localhost"
] | devnull@localhost |
|
55b1ef245e9a7cb31d87bfd61a9576b63fdc7fdc | 244ecfc2017a48c70b74556be8c188e7a4815848 | /res/scripts/client/gui/scaleform/daapi/view/lobby/cybersport/staticformationunitview.py | 8a3ccb32d12fe54c88e9e5068baa81fe574abd77 | [] | no_license | webiumsk/WOT-0.9.12 | c1e1259411ba1e6c7b02cd6408b731419d3174e5 | 5be5fd9186f335e7bae88c9761c378ff5fbf5351 | refs/heads/master | 2021-01-10T01:38:36.523788 | 2015-11-18T11:33:37 | 2015-11-18T11:33:37 | 46,414,438 | 1 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 14,220 | py | # 2015.11.18 11:53:51 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/cyberSport/StaticFormationUnitView.py
import BigWorld
from UnitBase import UNIT_OP
from gui import makeHtmlString
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.shared.formatters import text_styles, icons
from gui.shared.utils.functions import makeTooltip
from gui.Scaleform.daapi.view.lobby.profile.ProfileUtils import ProfileUtils
from gui.Scaleform.daapi.view.lobby.rally.vo_converters import makeVehicleVO
from gui.Scaleform.daapi.view.lobby.rally.ActionButtonStateVO import ActionButtonStateVO
from gui.Scaleform.daapi.view.lobby.rally import vo_converters, rally_dps
from gui.Scaleform.daapi.view.meta.StaticFormationUnitMeta import StaticFormationUnitMeta
from gui.Scaleform.locale.CYBERSPORT import CYBERSPORT
from gui.Scaleform.locale.RES_ICONS import RES_ICONS
from gui.clubs import events_dispatcher as club_events
from gui.clubs.club_helpers import ClubListener
from gui.clubs.settings import getLadderChevron64x64, getLadderBackground
from gui.prb_control import settings
from gui.prb_control.context import unit_ctx
from gui.prb_control.settings import REQUEST_TYPE
from gui.shared import g_itemsCache
from gui.shared.view_helpers.emblems import ClubEmblemsHelper
from gui.game_control.battle_availability import isHourInForbiddenList
from helpers import int2roman
class StaticFormationUnitView(StaticFormationUnitMeta, ClubListener, ClubEmblemsHelper):
ABSENT_VALUES = '--'
def __init__(self):
super(StaticFormationUnitView, self).__init__()
self.__extra = self.unitFunctional.getExtra()
self.__clubDBID = self.__extra.clubDBID
def getCoolDownRequests(self):
requests = super(StaticFormationUnitView, self).getCoolDownRequests()
requests.extend((REQUEST_TYPE.CLOSE_SLOT, REQUEST_TYPE.CHANGE_RATED))
return requests
def onClubEmblem64x64Received(self, clubDbID, emblem):
if emblem:
self.as_setTeamIconS(self.getMemoryTexturePath(emblem))
def onClubMembersChanged(self, members):
self.__updateHeader()
self._updateMembersData()
def onClubUpdated(self, club):
self.__updateHeader()
def onAccountClubStateChanged(self, state):
self.__updateHeader()
def onAccountClubRestrictionsChanged(self):
self.__updateHeader()
def onClubNameChanged(self, name):
self.__updateHeader()
def onClubLadderInfoChanged(self, ladderInfo):
self.__updateHeader()
def onClubsSeasonStateChanged(self, seasonState):
self.__updateHeader()
def onStatusChanged(self):
self.__updateHeader()
def __makeLegionnairesCountString(self, unit):
legionnairesString = makeHtmlString('html_templates:lobby/cyberSport/staticFormationUnitView', 'legionnairesCount', {'cur': unit.getLegionaryCount(),
'max': unit.getLegionaryMaxCount()})
return legionnairesString
def onUnitPlayerRolesChanged(self, pInfo, pPermissions):
functional = self.unitFunctional
_, unit = functional.getUnit()
if self._candidatesDP is not None:
self._candidatesDP.rebuild(functional.getCandidates())
self.as_setLegionnairesCountS(False, self.__makeLegionnairesCountString(unit))
self.__updateHeader()
self._updateMembersData()
self.__updateTotalData()
return
def onUnitFlagsChanged(self, flags, timeLeft):
functional = self.unitFunctional
pInfo = functional.getPlayerInfo()
isCreator = pInfo.isCreator()
if isCreator and flags.isOpenedStateChanged():
self.as_setOpenedS(flags.isOpened(), vo_converters.makeStaticFormationStatusLbl(flags))
if flags.isChanged():
self._updateMembersData()
else:
self._setActionButtonState()
def onUnitSettingChanged(self, opCode, value):
if opCode == UNIT_OP.SET_COMMENT:
self.as_setCommentS(self.unitFunctional.getCensoredComment())
elif opCode in [UNIT_OP.CLOSE_SLOT, UNIT_OP.OPEN_SLOT]:
functional = self.unitFunctional
_, unit = functional.getUnit()
unitFlags = functional.getFlags()
slotState = functional.getSlotState(value)
pInfo = functional.getPlayerInfo()
canAssign, vehicles = pInfo.canAssignToSlot(value)
canTakeSlot = not (pInfo.isLegionary() and unit.isClub())
vehCount = len(vehicles)
slotLabel = vo_converters.makeStaticSlotLabel(unitFlags, slotState, pInfo.isCreator(), vehCount, pInfo.isLegionary(), unit.isRated())
if opCode == UNIT_OP.CLOSE_SLOT:
self.as_closeSlotS(value, settings.UNIT_CLOSED_SLOT_COST, slotLabel)
else:
self.as_openSlotS(value, canAssign and canTakeSlot, slotLabel, vehCount)
self.__updateTotalData()
self._setActionButtonState()
def onUnitVehicleChanged(self, dbID, vInfo):
functional = self.unitFunctional
pInfo = functional.getPlayerInfo(dbID=dbID)
if pInfo.isInSlot:
slotIdx = pInfo.slotIdx
if not vInfo.isEmpty():
vehicleVO = makeVehicleVO(g_itemsCache.items.getItemByCD(vInfo.vehTypeCD), functional.getRosterSettings().getLevelsRange())
slotCost = vInfo.vehLevel
else:
slotState = functional.getSlotState(slotIdx)
vehicleVO = None
if slotState.isClosed:
slotCost = settings.UNIT_CLOSED_SLOT_COST
else:
slotCost = 0
self.as_setMemberVehicleS(slotIdx, slotCost, vehicleVO)
self.__updateTotalData()
if pInfo.isCurrentPlayer() or functional.getPlayerInfo().isCreator():
self._setActionButtonState()
return
def onUnitMembersListChanged(self):
functional = self.unitFunctional
_, unit = functional.getUnit()
if self._candidatesDP is not None:
self._candidatesDP.rebuild(functional.getCandidates())
self.as_setLegionnairesCountS(False, self.__makeLegionnairesCountString(unit))
self.__updateHeader()
self._updateMembersData()
self.__updateTotalData()
return
def onUnitExtraChanged(self, extra):
self.__extra = self.unitFunctional.getExtra()
self.__updateHeader()
self._updateMembersData()
self.__updateTotalData()
def onUnitRejoin(self):
super(StaticFormationUnitView, self).onUnitRejoin()
functional = self.unitFunctional
_, unit = functional.getUnit()
if self._candidatesDP is not None:
self._candidatesDP.rebuild(functional.getCandidates())
self.as_setLegionnairesCountS(False, self.__makeLegionnairesCountString(unit))
self.__updateHeader()
self._updateMembersData()
self.__updateTotalData()
return
def toggleStatusRequest(self):
self.requestToOpen(not self.unitFunctional.getFlags().isOpened())
def initCandidatesDP(self):
self._candidatesDP = rally_dps.StaticFormationCandidatesDP()
self._candidatesDP.init(self.app, self.as_getCandidatesDPS(), self.unitFunctional.getCandidates())
def rebuildCandidatesDP(self):
self._candidatesDP.rebuild(self.unitFunctional.getCandidates())
def setRankedMode(self, isRated):
self.sendRequest(unit_ctx.ChangeRatedUnitCtx(isRated, 'prebattle/change_settings'))
def showTeamCard(self):
club_events.showClubProfile(self.__clubDBID)
def onSlotsHighlihgtingNeed(self, databaseID):
functional = self.unitFunctional
availableSlots = list(functional.getPlayerInfo(databaseID).getAvailableSlots(True))
pInfo = functional.getPlayerInfo(dbID=databaseID)
if not pInfo.isInSlot and pInfo.isLegionary():
_, unit = functional.getUnit()
if unit.isRated():
self.as_highlightSlotsS([])
return []
if unit.getLegionaryCount() >= unit.getLegionaryMaxCount():
legionariesSlots = unit.getLegionarySlots().values()
self.as_highlightSlotsS(legionariesSlots)
return legionariesSlots
self.as_highlightSlotsS(availableSlots)
return availableSlots
def _updateRallyData(self):
functional = self.unitFunctional
data = vo_converters.makeStaticFormationUnitVO(functional, unitIdx=functional.getUnitIdx(), app=self.app)
self.as_updateRallyS(data)
def _setActionButtonState(self):
self.as_setActionButtonStateS(ActionButtonStateVO(self.unitFunctional))
def _getVehicleSelectorDescription(self):
return CYBERSPORT.WINDOW_VEHICLESELECTOR_INFO_UNIT
def _populate(self):
super(StaticFormationUnitView, self)._populate()
self.startClubListening(self.__clubDBID)
settings = self.unitFunctional.getRosterSettings()
self._updateVehiclesLabel(int2roman(settings.getMinLevel()), int2roman(settings.getMaxLevel()))
self.__updateHeader()
_, unit = self.unitFunctional.getUnit()
self.as_setLegionnairesCountS(False, self.__makeLegionnairesCountString(unit))
self._updateVehiclesLabel(int2roman(settings.getMinLevel()), int2roman(settings.getMaxLevel()))
self.clubsCtrl.getAvailabilityCtrl().onStatusChanged += self.onStatusChanged
def _dispose(self):
self.ABSENT_VALUES = None
self.__extra = None
self.stopClubListening(self.__clubDBID)
self.clubsCtrl.getAvailabilityCtrl().onStatusChanged -= self.onStatusChanged
super(StaticFormationUnitView, self)._dispose()
return
def __updateHeader(self):
club = self.clubsCtrl.getClub(self.__clubDBID)
canSetRanked = club is not None and club.getPermissions().canSetRanked()
seasonState = self.clubsCtrl.getSeasonState()
modeLabel = ''
modeTooltip = ''
modeTooltipType = ''
isFixedMode = True
isModeTooltip = False
if self.__extra.isRatedBattle:
isFixedMode = not canSetRanked
if canSetRanked:
modeLabel = CYBERSPORT.STATICFORMATION_UNITVIEW_SETUNRANKEDMODE
else:
modeLabel = CYBERSPORT.STATICFORMATION_UNITVIEW_RANKEDMODE
elif seasonState.isSuspended():
modeLabel = CYBERSPORT.STATICFORMATION_UNITVIEW_MODECHANGEWARNING_SEASONPAUSED
isModeTooltip = True
modeTooltipType = TOOLTIPS_CONSTANTS.COMPLEX
modeTooltip = makeTooltip(CYBERSPORT.STATICFORMATION_UNITVIEW_MODECHANGEWARNING_SEASONPAUSEDTOOLTIP_HEADER, CYBERSPORT.STATICFORMATION_UNITVIEW_MODECHANGEWARNING_SEASONPAUSEDTOOLTIP_BODY)
elif seasonState.isFinished():
modeLabel = CYBERSPORT.STATICFORMATION_UNITVIEW_MODECHANGEWARNING_SEASONFINISHED
isModeTooltip = True
modeTooltipType = TOOLTIPS_CONSTANTS.COMPLEX
modeTooltip = makeTooltip(CYBERSPORT.STATICFORMATION_UNITVIEW_MODECHANGEWARNING_SEASONFINISHEDTOOLTIP_HEADER, CYBERSPORT.STATICFORMATION_UNITVIEW_MODECHANGEWARNING_SEASONFINISHEDTOOLTIP_BODY)
elif canSetRanked:
isFixedMode = False
modeLabel = CYBERSPORT.STATICFORMATION_UNITVIEW_SETRANKEDMODE
if len(modeLabel):
if canSetRanked and seasonState.isActive() or self.__extra.isRatedBattle:
modeLabel = text_styles.neutral(modeLabel)
else:
modeLabel = text_styles.standard(modeLabel)
if isHourInForbiddenList(self.clubsCtrl.getAvailabilityCtrl().getForbiddenHours()):
modeLabel = '{0}{1}'.format(icons.alert(), text_styles.main(CYBERSPORT.LADDERREGULATIONS_WARNING))
isFixedMode = True
isModeTooltip = True
modeTooltipType = TOOLTIPS_CONSTANTS.LADDER_REGULATIONS
bgSource = RES_ICONS.MAPS_ICONS_LIBRARY_CYBERSPORT_LEAGUERIBBONS_UNRANKED
battles = self.ABSENT_VALUES
winRate = self.ABSENT_VALUES
leagueIcon = getLadderChevron64x64()
enableWinRateTF = False
if club is not None:
clubTotalStats = club.getTotalDossier().getTotalStats()
battles = BigWorld.wg_getNiceNumberFormat(clubTotalStats.getBattlesCount())
division = club.getLadderInfo().division
leagueIcon = getLadderChevron64x64(division)
winRateValue = ProfileUtils.getValueOrUnavailable(clubTotalStats.getWinsEfficiency())
if winRateValue != ProfileUtils.UNAVAILABLE_VALUE:
enableWinRateTF = True
winRate = ProfileUtils.formatFloatPercent(winRateValue)
else:
winRate = self.ABSENT_VALUES
if self.__extra.isRatedBattle:
bgSource = getLadderBackground(division)
self.requestClubEmblem64x64(club.getClubDbID(), club.getEmblem64x64())
self.as_setHeaderDataS({'clubId': self.__extra.clubDBID,
'teamName': self.__extra.clubName,
'isRankedMode': bool(self.__extra.isRatedBattle),
'battles': battles,
'winRate': winRate,
'enableWinRateTF': enableWinRateTF,
'leagueIcon': leagueIcon,
'isFixedMode': isFixedMode,
'modeLabel': modeLabel,
'modeTooltip': modeTooltip,
'bgSource': bgSource,
'modeTooltipType': modeTooltipType,
'isModeTooltip': isModeTooltip})
return
def __updateTotalData(self):
functional = self.unitFunctional
unitStats = functional.getStats()
canDoAction, restriction = functional.validateLevels(stats=unitStats)
self.as_setTotalLabelS(canDoAction, vo_converters.makeTotalLevelLabel(unitStats, restriction), unitStats.curTotalLevel)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\cybersport\staticformationunitview.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:53:52 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
0a551818b8e85dd12f84691ab34b3df1f13c138e | 14ddda0c376f984d2a3f7dcd0ca7aebb7c49648d | /bnn_mcmc_examples/examples/mlp/noisy_xor/setting2/mcmc/metropolis_hastings/pilot_visual_summary.py | 34d2bb1186d2c1da6be83394dc47fe6431283c68 | [
"MIT"
] | permissive | papamarkou/bnn_mcmc_examples | 62dcd9cc0cf57cda39aa46c2f2f237bbcd2d35bb | 7bb4ecfb33db4c30a8e61e31f528bda0efb24e3d | refs/heads/main | 2023-07-12T20:51:28.302981 | 2021-08-22T13:06:17 | 2021-08-22T13:06:17 | 316,554,634 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,553 | py | # %% Import packages
import kanga.plots as ps
from kanga.chains import ChainArray
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.mcmc.constants import diagnostic_iter_thres
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.mcmc.metropolis_hastings.constants import sampler_output_pilot_path
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.model import model
# %% Load chain array
chain_array = ChainArray.from_file(keys=['sample', 'accepted'], path=sampler_output_pilot_path)
# %% Drop burn-in samples
chain_array.vals['sample'] = chain_array.vals['sample'][diagnostic_iter_thres:, :]
chain_array.vals['accepted'] = chain_array.vals['accepted'][diagnostic_iter_thres:]
# %% Plot traces of simulated chain
for i in range(model.num_params()):
ps.trace(
chain_array.get_param(i),
title=r'Traceplot of $\theta_{{{}}}$'.format(i+1),
xlabel='Iteration',
ylabel='Parameter value'
)
# %% Plot running means of simulated chain
for i in range(model.num_params()):
ps.running_mean(
chain_array.get_param(i),
title=r'Running mean plot of parameter $\theta_{{{}}}$'.format(i+1),
xlabel='Iteration',
ylabel='Running mean'
)
# %% Plot histograms of marginals of simulated chain
for i in range(model.num_params()):
ps.hist(
chain_array.get_param(i),
bins=30,
density=True,
title=r'Histogram of parameter $\theta_{{{}}}$'.format(i+1),
xlabel='Parameter value',
ylabel='Parameter relative frequency'
)
| [
"[email protected]"
] | |
44dc7ace3c96940a36a7ea468124c78e03900455 | 1620e0af4a522db2bac16ef9c02ac5b5a4569d70 | /Ekeopara_Praise/Phase 2/LIST/Day44 Tasks/Task4.py | e3f303c59954e7bec5cf6fa62b4f49925de56d80 | [
"MIT"
] | permissive | Ekeopara-Praise/python-challenge-solutions | cda07902c9ffc09ba770ae7776e5e01026406a05 | 068b67c05524b5c5a0d6084315eca3424c768421 | refs/heads/master | 2022-12-15T15:29:03.031583 | 2020-09-25T06:46:27 | 2020-09-25T06:46:27 | 263,758,530 | 2 | 0 | null | 2020-05-13T22:37:33 | 2020-05-13T22:37:32 | null | UTF-8 | Python | false | false | 141 | py | '''4. Write a Python program to concatenate elements of a list. '''
num = ['1', '2', '3', '4', '5']
print('-'.join(num))
print(''.join(num)) | [
"[email protected]"
] | |
46b56de9bf7ead1838fe58206ae4c91ce5bcfbb2 | 00792a90bfa302af8614f4a5f955c071ed320acf | /apps/control_params/tests.py | c93cb528dd87a34cb25ffb1ba372511159998b42 | [] | no_license | elcolie/HT6MInterface | dceb8f5e9b501b8836904559bd40259ccfe49085 | 04abf3cc73618c1cf059fa67da8a043ec9fb43b3 | refs/heads/master | 2022-02-04T08:02:49.023460 | 2021-01-21T06:55:39 | 2021-01-21T06:55:39 | 123,398,906 | 0 | 0 | null | 2022-01-21T20:20:29 | 2018-03-01T07:30:16 | JavaScript | UTF-8 | Python | false | false | 4,395 | py | from django.test import TestCase
from apps.control_params.api.serializers import ControlParameterSerializer
from apps.control_params.models import ControlParameter
from apps.heating_params.models import HeatingParameter
class TestControlParameter(TestCase):
def setUp(self):
pass
def test_blank_initial_data(self):
"""If no supply then serializer will be stuck at `heating_params`"""
serializer = ControlParameterSerializer(data={})
assert False is serializer.is_valid()
def test_control_param_serializer_number_mismatch(self):
data = {
'no_break_point': 3,
'max_run_time': 10,
'heating_params': [
{
'break_point_number': 2,
'breakpoint_time': 0,
'timestep': 0.01,
'particle_species': "E",
'rate_of_particle_source': 0,
'radial_position': 0,
'radial_width': 0.5,
'nbi_power': 0,
'nbi_radial_position': 0,
'nbi_radial_width': 0.5,
'icrf_power': 0,
'icrf_radial': 0,
'icrf_radial_width': 0.5,
},
{
'break_point_number': 2,
'breakpoint_time': 0,
'timestep': 0.01,
'particle_species': "H",
'rate_of_particle_source': 0,
'radial_position': 0,
'radial_width': 0.5,
'nbi_power': 0,
'nbi_radial_position': 0,
'nbi_radial_width': 0.5,
'icrf_power': 0,
'icrf_radial': 0,
'icrf_radial_width': 0.5,
},
]
}
serializer = ControlParameterSerializer(data=data)
detail = f"Heating params count is mismatch with given number of break point"
assert False is serializer.is_valid()
assert detail == str(serializer.errors.get('heating_params')[0])
def test_control_param_serializer(self):
data = {
'no_break_point': 3,
'max_run_time': 10,
'heating_params': [
{
'break_point_number': 2,
'breakpoint_time': 0,
'timestep': 0.01,
'particle_species': "E",
'rate_of_particle_source': 0,
'radial_position': 0,
'radial_width': 0.5,
'nbi_power': 0,
'nbi_radial_position': 0,
'nbi_radial_width': 0.5,
'icrf_power': 0,
'icrf_radial': 0,
'icrf_radial_width': 0.5,
},
{
'break_point_number': 2,
'breakpoint_time': 0,
'timestep': 0.01,
'particle_species': "H",
'rate_of_particle_source': 0,
'radial_position': 0,
'radial_width': 0.5,
'nbi_power': 0,
'nbi_radial_position': 0,
'nbi_radial_width': 0.5,
'icrf_power': 0,
'icrf_radial': 0,
'icrf_radial_width': 0.5,
},
{
'break_point_number': 2,
'breakpoint_time': 0,
'timestep': 0.01,
'particle_species': "E",
'rate_of_particle_source': 0,
'radial_position': 0,
'radial_width': 0.5,
'nbi_power': 0,
'nbi_radial_position': 0,
'nbi_radial_width': 0.5,
'icrf_power': 0,
'icrf_radial': 0,
'icrf_radial_width': 0.5,
},
]
}
serializer = ControlParameterSerializer(data=data)
is_valid = serializer.is_valid()
serializer.save()
assert is_valid is serializer.is_valid()
assert 3 == HeatingParameter.objects.count()
assert 1 == ControlParameter.objects.count()
| [
"[email protected]"
] | |
07feee452428ecf97bd5edc3add50468a4a465d2 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_309/ch26_2019_08_19_14_12_37_089425.py | 8dadd9e96c3304269c01917bc7478e247c45840a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | dias = int(input("dias:"))
horas = int(input("horas:"))
minutos = int(input("minutos:"))
segundos = int(input("segundos:"))
print((dias*86.400 + horas*3.600 + minutos*60 + segundos))
| [
"[email protected]"
] | |
ef8433f6bae0df2f57342d5ef4f9efcd844ecde0 | ddf1267a1a7cb01e70e3b12ad4a7bfaf291edb3e | /src/search/tasks.py | 2c428bb843cb84de7aa107d3c9693be9e16496f7 | [
"MIT"
] | permissive | Garinmckayl/researchhub-backend | 46a17513c2c9928e51db4b2ce5a5b62df453f066 | cd135076d9a3b49a08456f7ca3bb18ff35a78b95 | refs/heads/master | 2023-06-17T04:37:23.041787 | 2021-05-18T01:26:46 | 2021-05-18T01:26:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,339 | py | from oauth.utils import get_orcid_works, check_doi_in_works
from paper.models import Paper
from paper.utils import download_pdf
from researchhub.celery import app
from utils.orcid import orcid_api
from user.models import Author
from purchase.models import Wallet
VALID_LICENSES = []
@app.task
def download_pdf_by_license(item, paper_id):
try:
licenses = item['license']
for license in licenses:
if license in VALID_LICENSES:
pdf, filename = get_pdf_and_filename(item['links'])
paper = Paper.objects.get(pk=paper_id)
paper.file.save(filename, pdf)
paper.save(update_fields=['file'])
break
except Exception:
pass
def get_pdf_and_filename(links):
for link in links:
if link['content-type'] == 'application/pdf':
return download_pdf(link['URL'])
return None, None
@app.task
def create_authors_from_crossref(crossref_authors, paper_id, paper_doi):
paper = None
try:
paper = Paper.objects.get(pk=paper_id)
except Paper.DoesNotExist:
pass
for crossref_author in crossref_authors:
try:
first_name = crossref_author['given']
last_name = crossref_author['family']
except KeyError:
break
affiliation = None
if len(crossref_author['affiliation']) > 0:
FIRST = 0
affiliation = crossref_author['affiliation'][FIRST]['name']
try:
orcid_id = crossref_author['ORCID'].split('/')[-1]
get_or_create_orcid_author(orcid_id, first_name, last_name, paper)
except KeyError:
orcid_authors = search_orcid_author(
first_name,
last_name,
affiliation
)
for orcid_author in orcid_authors:
works = get_orcid_works(orcid_author)
if (len(works) > 0) and check_doi_in_works(paper_doi, works):
create_orcid_author(orcid_author, paper)
def search_orcid_author(given_names, family_name, affiliation=None):
matches = []
try:
author_name_results = orcid_api.search_by_name(
given_names,
family_name
)
authors = author_name_results.json()['result']
if authors is not None:
for author in authors:
uid = author['orcid-identifier']['path']
author_id_results = orcid_api.search_by_id(uid)
matches.append(author_id_results.json())
except Exception as e:
print(e)
return matches
def create_orcid_author(orcid_author, paper):
name = orcid_author['person']['name']
first_name = name['given-names']['value']
last_name = name['family-name']['value']
orcid_id = orcid_author['orcid-identifier']['path']
get_or_create_orcid_author(orcid_id, first_name, last_name, paper)
def get_or_create_orcid_author(orcid_id, first_name, last_name, paper):
author, created = Author.models.get_or_create(
orcid_id=orcid_id,
defaults={
'first_name': first_name,
'last_name': last_name,
}
)
wallet, _ = Wallet.models.get_or_create(
author=author
)
if paper is not None:
paper.authors.add(author)
| [
"[email protected]"
] | |
3fa9322ab882012f8dd6fc64efa180bbd27ec444 | f0856e60a095ce99ec3497b3f27567803056ac60 | /keras/keras19~31[scaler, CNN(GAP,DNN)]/keras31_cifar100_3_Djsull.py | 9150dcc697bf72ace67e7f4f1b9da8a5c55e6d9b | [] | no_license | hjuju/TF_Study-HAN | dcbac17ce8b8885f5fb7d7f554230c2948fda9ac | c0faf98380e7f220868ddf83a9aaacaa4ebd2c2a | refs/heads/main | 2023-09-04T09:13:33.212258 | 2021-10-27T08:00:49 | 2021-10-27T08:00:49 | 384,371,952 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,418 | py | import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler, QuantileTransformer,MaxAbsScaler, PowerTransformer, OneHotEncoder
from tensorflow.keras.datasets import cifar100
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input, Conv2D, Flatten, MaxPool2D
from keras.utils import to_categorical
from tensorflow.keras.callbacks import EarlyStopping
from icecream import ic
import time
#1. 데이터 전처리
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train = x_train.reshape(50000, 32 * 32 * 3)
x_test = x_test.reshape(10000, 32 * 32 * 3) # 2차원으로 바꿔줌
ic(x_train)
# # x_train = x_train/255.
# # x_test = x_test/255.
# y_train = to_categorical(y_train)
# y_test = to_categorical(y_test)
# scaler = StandardScaler()
# x_train = scaler.fit_transform(x_train) # x_train에서만 사용가능 x_train = scaler.fit(x_train), x_train = scaler.transform(x_train)를 한줄로
# x_test = scaler.transform(x_test)
# x_train = x_train.reshape(50000, 32, 32, 3)
# x_test = x_test.reshape(10000, 32 ,32, 3) # 스케일링 후 4차원으로 원위치
# # print(np.unique(y_train)) # [0 1 2 3 4 5 6 7 8 9]
# # one = OneHotEncoder() # shape를 2차원으로 잡아야함
# # y_train = y_train.reshape(-1,1) # 2차원으로 변경
# # y_test = y_test.reshape(-1,1)
# # one.fit(y_train)
# # y_train = one.transform(y_train).toarray() # (50000, 100)
# # y_test = one.transform(y_test).toarray() # (10000, 100)
# # to categorical -> 3,4,6,8 되어있어도 0,1,2가 자동생성(shape에 더 유연)
# # 3, 4, 5 ,6, 7 이면 그대로 3,4,5,6,7(shape가 2차원이어야함)
# #2. 모델링
# model = Sequential()
# model.add(Conv2D(filters=128, kernel_size=(2, 2), padding='valid', activation='relu', input_shape=(32, 32, 3)))
# model.add(Conv2D(128, (2, 2), padding='same', activation='relu'))
# model.add(MaxPool2D())
# model.add(Conv2D(128, (2, 2), padding='valid', activation='relu'))
# model.add(Conv2D(128, (2, 2), padding='same', activation='relu'))
# model.add(MaxPool2D())
# model.add(Conv2D(64, (2, 2), activation='relu'))
# model.add(Conv2D(64, (2, 2), padding='same', activation='relu')) # 큰사이즈 아닌 이상 4,4 까지 올라가지 않음
# model.add(MaxPool2D()) # 556개 / 나가는 데이터를 확인해서 레이의 노드 개수 구성
# model.add(Flatten())
# model.add(Dense(128, activation='relu'))
# model.add(Dense(128, activation='relu'))
# model.add(Dense(128, activation='relu'))
# model.add(Dense(100, activation='softmax'))
# #3. 컴파일, 훈련
# es = EarlyStopping(monitor='val_loss', patience=10, mode='auto', verbose=1)
# model.compile(loss='categorical_crossentropy', optimizer='adam',
# metrics=['acc'])
# start = time.time()
# hist = model.fit(x_train, y_train, epochs=100, batch_size=64,
# validation_split=0.25, callbacks=[es])
# 걸린시간 = round((time.time() - start) /60,1)
# #4. evaluating, prediction
# loss = model.evaluate(x_test, y_test, batch_size=128)
# print('loss = ', loss[0])
# print('accuracy = ', loss[1])
# ic(f'{걸린시간}분')
# import matplotlib.pyplot as plt
# plt.figure(figsize=(9,5))
# #1
# plt.subplot(2,1,1) # 그림을 2개그리는데 1행1렬
# plt.plot(hist.history['loss'], marker='.', c='red', label='loss')
# plt.plot(hist.history['val_loss'], marker='.', c='blue', label='val_loss')
# plt.grid()
# plt.title('loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(loc='upper right')
# #2
# plt.subplot(2,1,2) # 그림을 2개그리는데 1행1렬
# plt.plot(hist.history['acc'])
# plt.plot(hist.history['val_acc'])
# plt.grid()
# plt.title('acc')
# plt.ylabel('acc')
# plt.xlabel('epoch')
# plt.legend(['acc', 'val_acc'])
# plt.show
# '''
# loss = 3.0406737327575684
# accuracy = 0.3928000032901764
# batch_size=64, validation_split=0.25
# loss = 5.080616474151611
# accuracy = 0.33799999952316284
# ic| f'{걸린시간}분': '3.5분'
# 모델수정 / patience=7,epochs=100, batch_size=64, validation_split=0.25
# loss = 2.777371406555176
# accuracy = 0.376800000667572
# ''' | [
"[email protected]"
] | |
c97e9f32dd8b94b6bb3365179ef73965eccd8be5 | bedae10cbaf676d8f309fa593028558d9a6e9c6b | /Algorithm/Easy/1000+/1206NextGreaterElementI.py | dfcffec1d8a09564dfb341ed4eb30870284fee73 | [
"MIT"
] | permissive | MartinYan623/Lint-Code | 5800d61a54f87306c25ff2e3d535145312b42c66 | 57d2fa441d6496234615736e3f55d0b71aaa51dc | refs/heads/master | 2021-06-06T13:51:19.587424 | 2021-04-21T12:23:19 | 2021-04-21T12:23:19 | 139,412,536 | 0 | 0 | null | 2020-08-08T10:28:52 | 2018-07-02T08:18:11 | Python | UTF-8 | Python | false | false | 627 | py | class Solution:
"""
@param nums1: an array
@param nums2: an array
@return: find all the next greater numbers for nums1's elements in the corresponding places of nums2
"""
def nextGreaterElement(self, nums1, nums2):
# Write your code here
for i in range(len(nums1)):
index=nums2.index(nums1[i])
flag=False
for j in range(index+1,len(nums2)):
if nums2[j]>nums1[i]:
flag=True
nums1[i]=nums2[j]
break
if flag==False:
nums1[i]=-1
return nums1
| [
"[email protected]"
] | |
964de307289972354a1b551f7c32d12f000e98d4 | 95ec5d4d14516be1a1fdcc8bd1fb29279dfaff3c | /settings.py | 513790faf5a80aaabdf246439d26eae875211e35 | [] | no_license | gitter-badger/dev4gov.org | 16e25621a81552a6458cdd21cb96f17c7e222350 | a03165e921d0e76ad4283c970b3e0540f7d53c75 | refs/heads/master | 2021-01-18T06:52:15.360799 | 2011-08-21T09:01:55 | 2011-08-21T09:01:55 | 41,620,266 | 0 | 0 | null | 2015-08-30T07:25:32 | 2015-08-30T07:25:32 | null | UTF-8 | Python | false | false | 5,041 | py | # Django settings for dev4gov_org project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'f4x@q6h+!nk6&=nf#ro5hh(p-%!ohxm_s70dyd7e@1@7@t)s3g'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'dev4gov_org.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"[email protected]"
] | |
d8600af0b88b95f8cda4ccde3d48eef8e17c2e47 | 436177bf038f9941f67e351796668700ffd1cef2 | /venv/Lib/site-packages/sklearn/tests/test_calibration.py | b8585c22bb36278fb772ff8f40c7129b07a1ad2e | [] | no_license | python019/matplotlib_simple | 4359d35f174cd2946d96da4d086026661c3d1f9c | 32e9a8e773f9423153d73811f69822f9567e6de4 | refs/heads/main | 2023-08-22T18:17:38.883274 | 2021-10-07T15:55:50 | 2021-10-07T15:55:50 | 380,471,961 | 29 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,088 | py | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn.base import BaseEstimator
from sklearn.model_selection import LeaveOneOut, train_test_split
from sklearn.utils._testing import (assert_array_almost_equal,
assert_almost_equal,
assert_array_equal,
assert_raises, ignore_warnings)
from sklearn.utils.extmath import softmax
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification, make_blobs
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold, cross_val_predict
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.isotonic import IsotonicRegression
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@pytest.fixture(scope="module")
def data():
X, y = make_classification(
n_samples=200, n_features=6, random_state=42
)
return X, y
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration(data, method, ensemble):
# Test calibration objects with isotonic and sigmoid
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
cal_clf = CalibratedClassifierCV(clf, cv=y.size + 1, ensemble=ensemble)
assert_raises(ValueError, cal_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
# Note that this fit overwrites the fit on the entire training
# set
cal_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_cal_clf = cal_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
cal_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf,
prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
cal_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
cal_clf.fit(this_X_train, (y_train + 1) % 2, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_cal_clf,
1 - prob_pos_cal_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss((y_test + 1) % 2,
prob_pos_cal_clf_relabeled))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_bad_method(data, ensemble):
# Check only "isotonic" and "sigmoid" are accepted as methods
X, y = data
clf = LinearSVC()
clf_invalid_method = CalibratedClassifierCV(
clf, method="foo", ensemble=ensemble
)
with pytest.raises(ValueError):
clf_invalid_method.fit(X, y)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_regressor(data, ensemble):
# `base-estimator` should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
X, y = data
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), ensemble=ensemble)
with pytest.raises(RuntimeError):
clf_base_regressor.fit(X, y)
def test_calibration_default_estimator(data):
# Check base_estimator default is LinearSVC
X, y = data
calib_clf = CalibratedClassifierCV(cv=2)
calib_clf.fit(X, y)
base_est = calib_clf.calibrated_classifiers_[0].base_estimator
assert isinstance(base_est, LinearSVC)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_cv_splitter(data, ensemble):
# Check when `cv` is a CV splitter
X, y = data
splits = 5
kfold = KFold(n_splits=splits)
calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=ensemble)
assert isinstance(calib_clf.cv, KFold)
assert calib_clf.cv.n_splits == splits
calib_clf.fit(X, y)
expected_n_clf = splits if ensemble else 1
assert len(calib_clf.calibrated_classifiers_) == expected_n_clf
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_sample_weight(data, method, ensemble):
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(
base_estimator, method=method, ensemble=ensemble
)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert diff > 0.1
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_parallel_execution(data, method, ensemble):
"""Test parallel calibration"""
X, y = data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
base_estimator = LinearSVC(random_state=42)
cal_clf_parallel = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=2, ensemble=ensemble
)
cal_clf_parallel.fit(X_train, y_train)
probs_parallel = cal_clf_parallel.predict_proba(X_test)
cal_clf_sequential = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=1, ensemble=ensemble
)
cal_clf_sequential.fit(X_train, y_train)
probs_sequential = cal_clf_sequential.predict_proba(X_test)
assert_allclose(probs_parallel, probs_sequential)
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
# increase the number of RNG seeds to assess the statistical stability of this
# test:
@pytest.mark.parametrize('seed', range(2))
def test_calibration_multiclass(method, ensemble, seed):
def multiclass_brier(y_true, proba_pred, n_classes):
Y_onehot = np.eye(n_classes)[y_true]
return np.sum((Y_onehot - proba_pred) ** 2) / Y_onehot.shape[0]
# Test calibration for multiclass with classifier that implements
# only decision function.
clf = LinearSVC(random_state=7)
X, y = make_blobs(n_samples=500, n_features=100, random_state=seed,
centers=10, cluster_std=15.0)
# Use an unbalanced dataset by collapsing 8 clusters into one class
# to make the naive calibration based on a softmax more unlikely
# to work.
y[y > 2] = 2
n_classes = np.unique(y).shape[0]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
# Check probabilities sum to 1
assert_allclose(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that the dataset is not too trivial, otherwise it's hard
# to get interesting calibration data during the internal
# cross-validation loop.
assert 0.65 < clf.score(X_test, y_test) < 0.95
# Check that the accuracy of the calibrated model is never degraded
# too much compared to the original classifier.
assert cal_clf.score(X_test, y_test) > 0.95 * clf.score(X_test, y_test)
# Check that Brier loss of calibrated classifier is smaller than
# loss obtained by naively turning OvR decision function to
# probabilities via a softmax
uncalibrated_brier = \
multiclass_brier(y_test, softmax(clf.decision_function(X_test)),
n_classes=n_classes)
calibrated_brier = multiclass_brier(y_test, probas,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
clf = RandomForestClassifier(n_estimators=30, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
uncalibrated_brier = multiclass_brier(y_test, clf_probs,
n_classes=n_classes)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
calibrated_brier = multiclass_brier(y_test, cal_clf_probs,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
# Check error if clf not prefit
unfit_clf = CalibratedClassifierCV(clf, cv="prefit")
with pytest.raises(NotFittedError):
unfit_clf.fit(X_calib, y_calib)
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
cal_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = cal_clf.predict_proba(this_X_test)
y_pred = cal_clf.predict(this_X_test)
prob_pos_cal_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
def test_calibration_ensemble_false(data, method):
# Test that `ensemble=False` is the same as using predictions from
# `cross_val_predict` to train calibrator.
X, y = data
clf = LinearSVC(random_state=7)
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3, ensemble=False)
cal_clf.fit(X, y)
cal_probas = cal_clf.predict_proba(X)
# Get probas manually
unbiased_preds = cross_val_predict(
clf, X, y, cv=3, method='decision_function'
)
if method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
else:
calibrator = _SigmoidCalibration()
calibrator.fit(unbiased_preds, y)
# Use `clf` fit on all data
clf.fit(X, y)
clf_df = clf.decision_function(X)
manual_probas = calibrator.predict(clf_df)
assert_allclose(cal_probas[:, 1], manual_probas)
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert len(prob_true) == len(prob_pred)
assert len(prob_true) == 2
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
# test that quantiles work as expected
y_true2 = np.array([0, 0, 0, 0, 1, 1])
y_pred2 = np.array([0., 0.1, 0.2, 0.5, 0.9, 1.])
prob_true_quantile, prob_pred_quantile = calibration_curve(
y_true2, y_pred2, n_bins=2, strategy='quantile')
assert len(prob_true_quantile) == len(prob_pred_quantile)
assert len(prob_true_quantile) == 2
assert_almost_equal(prob_true_quantile, [0, 2 / 3])
assert_almost_equal(prob_pred_quantile, [0.1, 0.8])
# Check that error is raised when invalid strategy is selected
assert_raises(ValueError, calibration_curve, y_true2, y_pred2,
strategy='percentile')
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_nan_imputer(ensemble):
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', SimpleImputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(
clf, cv=2, method='isotonic', ensemble=ensemble
)
clf_c.fit(X, y)
clf_c.predict(X)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_prob_sum(ensemble):
# Test that sum of probabilities is 1. A non-regression test for
# issue #7796
num_classes = 2
X, y = make_classification(n_samples=10, n_features=5,
n_classes=num_classes)
clf = LinearSVC(C=1.0, random_state=7)
clf_prob = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
clf_prob.fit(X, y)
probs = clf_prob.predict_proba(X)
assert_array_almost_equal(probs.sum(axis=1), np.ones(probs.shape[0]))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_less_classes(ensemble):
# Test to check calibration works fine when train set in a test-train
# split does not contain all classes
# Since this test uses LOO, at each iteration train set will not contain a
# class label
X = np.random.randn(10, 5)
y = np.arange(10)
clf = LinearSVC(C=1.0, random_state=7)
cal_clf = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
cal_clf.fit(X, y)
for i, calibrated_classifier in \
enumerate(cal_clf.calibrated_classifiers_):
proba = calibrated_classifier.predict_proba(X)
if ensemble:
# Check that the unobserved class has proba=0
assert_array_equal(proba[:, i], np.zeros(len(y)))
# Check for all other classes proba>0
assert np.all(proba[:, :i] > 0)
assert np.all(proba[:, i + 1:] > 0)
else:
# Check `proba` are all 1/n_classes
assert np.allclose(proba, 1 / proba.shape[0])
@ignore_warnings(category=FutureWarning)
@pytest.mark.parametrize('X', [np.random.RandomState(42).randn(15, 5, 2),
np.random.RandomState(42).randn(15, 5, 2, 6)])
def test_calibration_accepts_ndarray(X):
"""Test that calibration accepts n-dimensional arrays as input"""
y = [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0]
class MockTensorClassifier(BaseEstimator):
"""A toy estimator that accepts tensor inputs"""
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def decision_function(self, X):
# toy decision function that just needs to have the right shape:
return X.reshape(X.shape[0], -1).sum(axis=1)
calibrated_clf = CalibratedClassifierCV(MockTensorClassifier())
# we should be able to fit this classifier with no error
calibrated_clf.fit(X, y)
@pytest.fixture
def text_data():
text_data = [
{'state': 'NY', 'age': 'adult'},
{'state': 'TX', 'age': 'adult'},
{'state': 'VT', 'age': 'child'},
]
text_labels = [1, 0, 1]
return text_data, text_labels
@pytest.fixture
def text_data_pipeline(text_data):
X, y = text_data
pipeline_prefit = Pipeline([
('vectorizer', DictVectorizer()),
('clf', RandomForestClassifier())
])
return pipeline_prefit.fit(X, y)
def test_calibration_pipeline(text_data, text_data_pipeline):
# Test that calibration works in prefit pipeline with transformer,
# where `X` is not array-like, sparse matrix or dataframe at the start.
# See https://github.com/scikit-learn/scikit-learn/issues/8710
X, y = text_data
clf = text_data_pipeline
calib_clf = CalibratedClassifierCV(clf, cv='prefit')
calib_clf.fit(X, y)
# Check attributes are obtained from fitted estimator
assert_array_equal(calib_clf.classes_, clf.classes_)
msg = "'CalibratedClassifierCV' object has no attribute"
with pytest.raises(AttributeError, match=msg):
calib_clf.n_features_in_
@pytest.mark.parametrize('clf, cv', [
pytest.param(LinearSVC(C=1), 2),
pytest.param(LinearSVC(C=1), 'prefit'),
])
def test_calibration_attributes(clf, cv):
# Check that `n_features_in_` and `classes_` attributes created properly
X, y = make_classification(n_samples=10, n_features=5,
n_classes=2, random_state=7)
if cv == 'prefit':
clf = clf.fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv=cv)
calib_clf.fit(X, y)
if cv == 'prefit':
assert_array_equal(calib_clf.classes_, clf.classes_)
assert calib_clf.n_features_in_ == clf.n_features_in_
else:
classes = LabelEncoder().fit(y).classes_
assert_array_equal(calib_clf.classes_, classes)
assert calib_clf.n_features_in_ == X.shape[1]
# FIXME: remove in 1.1
def test_calibrated_classifier_cv_deprecation(data):
# Check that we raise the proper deprecation warning if accessing
# `calibrators_` from the `_CalibratedClassifier`.
X, y = data
calib_clf = CalibratedClassifierCV(cv=2).fit(X, y)
with pytest.warns(FutureWarning):
calibrators = calib_clf.calibrated_classifiers_[0].calibrators_
for clf1, clf2 in zip(
calibrators, calib_clf.calibrated_classifiers_[0].calibrators
):
assert clf1 is clf2
| [
"[email protected]"
] | |
6c35b1cfd607a05efda8da84959d5075ad4cce77 | 4db21365bd1f78d0c3258efba0af2cb10696fa32 | /main/settings.py | 073da02a715681d860283236b176447a2fa5284c | [] | no_license | gichimux/beegee_cms | e62a2da86cc23395f3ce8a1dc3041dc9742a2315 | c4395f6d0bc334cb4158208d6d2a124c70da9ed0 | refs/heads/master | 2020-08-07T21:11:53.345827 | 2019-10-08T08:53:52 | 2019-10-08T08:53:52 | 213,588,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,088 | py | """
Django settings for main project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-^@!%3l)0af)45%2l=mdl8zspo$y1ob6ntx^^c*-v&=g&u!vmk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
96dbfb206fea6616d529302a4dd2d8b79d04dcdb | 4e8876d7b29cf9fb05849da77553b8a7e3783bdc | /src/plugins/processing/algs/gdal/contour.py | e25947294d997251bea2fcf065aa480f5e025270 | [] | no_license | hydrology-tep/hep-qgis-plugin-lite | 48477f504b6fc1a9a9446c7c7f5666f4b2ccfee7 | 781cbaa1b3e9331de6741dd44a22322048ab176c | refs/heads/master | 2021-03-27T17:01:18.284421 | 2018-06-27T12:09:58 | 2018-06-27T12:09:58 | 70,825,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,908 | py | # -*- coding: utf-8 -*-
"""
***************************************************************************
contour.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterString
from processing.core.outputs import OutputVector
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class contour(GdalAlgorithm):
INPUT_RASTER = 'INPUT_RASTER'
OUTPUT_VECTOR = 'OUTPUT_VECTOR'
INTERVAL = 'INTERVAL'
FIELD_NAME = 'FIELD_NAME'
EXTRA = 'EXTRA'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'contour.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Contour')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Extraction')
self.addParameter(ParameterRaster(self.INPUT_RASTER,
self.tr('Input layer'), False))
self.addParameter(ParameterNumber(self.INTERVAL,
self.tr('Interval between contour lines'), 0.0,
99999999.999999, 10.0))
self.addParameter(ParameterString(self.FIELD_NAME,
self.tr('Attribute name (if not set, no elevation attribute is attached)'),
'ELEV', optional=True))
self.addParameter(ParameterString(self.EXTRA,
self.tr('Additional creation parameters'), '', optional=True))
self.addOutput(OutputVector(self.OUTPUT_VECTOR,
self.tr('Contours')))
def getConsoleCommands(self):
output = self.getOutputValue(self.OUTPUT_VECTOR)
interval = unicode(self.getParameterValue(self.INTERVAL))
fieldName = unicode(self.getParameterValue(self.FIELD_NAME))
extra = self.getParameterValue(self.EXTRA)
if extra is not None:
extra = unicode(extra)
arguments = []
if len(fieldName) > 0:
arguments.append('-a')
arguments.append(fieldName)
arguments.append('-i')
arguments.append(interval)
driver = GdalUtils.getVectorDriverFromFileName(output)
arguments.append('-f')
arguments.append(driver)
if extra and len(extra) > 0:
arguments.append(extra)
arguments.append(self.getParameterValue(self.INPUT_RASTER))
arguments.append(output)
return ['gdal_contour', GdalUtils.escapeAndJoin(arguments)]
| [
"[email protected]"
] | |
ab5d8fbd62d3448fb69cf6581a66121ca6459a25 | 459929ce79538ec69a6f8c32e608f4e484594d68 | /venv/Lib/site-packages/virtualbox/__about__.py | 600822f21eb32a6edbdfa087453d0b2e1ea10fc2 | [
"Apache-2.0"
] | permissive | yychai97/Kubernetes | ec2ef2a98a4588b7588a56b9d661d63222278d29 | 2955227ce81bc21f329729737b5c528b02492780 | refs/heads/master | 2023-07-02T18:36:41.382362 | 2021-08-13T04:20:27 | 2021-08-13T04:20:27 | 307,412,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | __title__ = "virtualbox"
__author__ = "Michael Dorman"
__author_email__ = "[email protected]"
__maintainer__ = "Seth Michael Larson"
__maintainer_email__ = "[email protected]"
__version__ = "2.1.1"
__license__ = "Apache-2.0"
__url__ = "https://github.com/sethmlarson/virtualbox-python"
| [
"[email protected]"
] | |
4749bf6ccf6bd5a56d395c5462ac67cbfea6b435 | 7936ebf5b94c3d153fb55248b52db2eff724427c | /11/homework11/zhihu_top100.py | 6fb64a0e8b94e74945b6a87d6f31271cd6307984 | [
"MIT"
] | permissive | xiaodongzi/pytohon_teach_material | f9e95f7b294a9e49d86d1a8e25cbef5efef3aaf7 | 13ed128a993637d0203f1f8c5419d781d7212883 | refs/heads/master | 2021-05-30T09:48:16.898483 | 2016-01-24T17:02:34 | 2016-01-24T17:02:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | # coding: utf-8
import requests
from pyquery import PyQuery as pq
question_num = 1
page_num = 1
to_stop = False
scrap_questions_num = 100
while True:
url = "http://www.zhihu.com/topic/19776749/top-answers?page=%d" % (page_num)
res = requests.get(url)
# print res.encoding
for p in pq(res.content).find('.feed-main'):
# print type(p)
print question_num, '. ' ,pq(p).find('.question_link').text()
relative_link = pq(p).find('.question_link').attr('href')
absolute_link = 'http://www.zhihu.com' + relative_link
print ' 链接 ', absolute_link
print ' vote: ', pq(p).find('.zm-item-vote-count').text()
print ' 回答摘要'
print ' ', pq(p).find('.zh-summary').text()[:-4]
print '-' * 60
print
if question_num == scrap_questions_num:
to_stop = True
break
question_num += 1
page_num += 1
if to_stop ==True:
break | [
"[email protected]"
] | |
0bf78b5a94b1e07dee662b8e341ee34aea435e03 | 54857571461a579bed30cee27871aaa5fe396bcc | /nltk-0.9.7/src/nltk/inference/inference.py | 0b6d64c2a35e50e6cfaa2627aae6c30fe56517a5 | [] | no_license | ahmedBazaz/affective-text-classification | 78375182e800b39e0e309e8b469e273c0d9590f0 | 719e9b26e60863c620662564fb9cfeafc004777f | refs/heads/master | 2021-01-10T14:50:01.100274 | 2009-01-09T03:59:01 | 2009-01-09T03:59:01 | 48,296,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,176 | py | # Natural Language Toolkit: Interface to Theorem Provers
#
# Author: Dan Garrette <[email protected]>
# Ewan Klein <[email protected]>
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from nltk.sem import logic
import api
import tableau
import prover9
import mace
import resolution
"""
A wrapper module that calls theorem provers and model builders.
"""
def get_prover(goal=None, assumptions=None, prover_name=None):
"""
@param goal: Input expression to prove
@type goal: L{logic.Expression}
@param assumptions: Input expressions to use as assumptions in the proof
@type assumptions: L{list} of logic.Expression objects
"""
if not prover_name:
prover_name = 'Prover9'
if prover_name.lower() == 'tableau':
return api.BaseProverCommand(tableau.Tableau(), goal, assumptions)
elif prover_name.lower() == 'prover9':
return prover9.Prover9Command(goal, assumptions)
elif prover_name.lower() == 'resolution':
return resolution.ResolutionCommand(goal, assumptions)
raise Exception('\'%s\' is not a valid prover name' % prover_name)
def get_model_builder(goal=None, assumptions=None, model_builder_name=None):
"""
@param goal: Input expression to prove
@type goal: L{logic.Expression}
@param assumptions: Input expressions to use as assumptions in the proof
@type assumptions: L{list} of logic.Expression objects
"""
if not model_builder_name:
model_builder_name = 'Mace'
if model_builder_name.lower() == 'mace':
return mace.MaceCommand(goal, assumptions)
def get_parallel_prover_builder(goal=None, assumptions=None,
prover_name='', model_builder_name=''):
prover = get_prover(prover_name=prover_name)
model_builder = get_model_builder(model_builder_name=model_builder_name)
return api.ParallelProverBuilderCommand(prover.get_prover(),
model_builder.get_model_builder(),
goal, assumptions)
def demo():
lp = logic.LogicParser()
a = lp.parse(r'some x.(man(x) and walks(x))')
b = lp.parse(r'some x.(walks(x) and man(x))')
bicond = logic.IffExpression(a, b)
print "Trying to prove:\n '%s <-> %s'" % (a, b)
print 'tableau: %s' % get_prover(bicond, prover_name='tableau').prove()
print 'Prover9: %s' % get_prover(bicond, prover_name='Prover9').prove()
print '\n'
lp = logic.LogicParser()
a = lp.parse(r'all x.(man(x) -> mortal(x))')
b = lp.parse(r'man(socrates)')
c1 = lp.parse(r'mortal(socrates)')
c2 = lp.parse(r'-mortal(socrates)')
print get_prover(c1, [a,b], 'prover9').prove()
print get_prover(c2, [a,b], 'prover9').prove()
print get_model_builder(c1, [a,b], 'mace').build_model()
print get_model_builder(c2, [a,b], 'mace').build_model()
print get_parallel_prover_builder(c1, [a,b]).prove(True)
print get_parallel_prover_builder(c1, [a,b]).build_model(True)
if __name__ == '__main__':
demo()
| [
"tytung@6129d76e-ddfe-11dd-a37d-c9d1c40e0883"
] | tytung@6129d76e-ddfe-11dd-a37d-c9d1c40e0883 |
f36f09b4e05bbc16b9f5367879c5ca25aebf7d66 | bd55c7d73a95caed5f47b0031264ec05fd6ff60a | /apps/qa/migrations/0006_coupon_vendor_branch.py | 35b01a8e9fea5a66447f1b448de6613892793c36 | [] | no_license | phonehtetpaing/ebdjango | 3c8610e2d96318aff3b1db89480b2f298ad91b57 | 1b77d7662ec2bce9a6377690082a656c8e46608c | refs/heads/main | 2023-06-26T13:14:55.319687 | 2021-07-21T06:04:58 | 2021-07-21T06:04:58 | 381,564,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | # Generated by Django 2.0.5 on 2019-03-12 08:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0123_auto_20190312_1736'),
('qa', '0005_auto_20190312_1732'),
]
operations = [
migrations.AddField(
model_name='coupon',
name='vendor_branch',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='coupon_vendor_branch', to='core.VendorBranch', verbose_name='vendor_branch'),
),
]
| [
"[email protected]"
] | |
5c15afa29895acb8165f67f96d1744092f542d33 | ed269e9a4d9d6bfbb833381b7aef65a23f391fe2 | /比赛/5479. 千位分隔数.py | f95b565d2780cc9f0cda6a36ec21c68410b1d997 | [] | no_license | Comyn-Echo/leeCode | fcff0d4c4c10209a47bd7c3204e3f64565674c91 | 67e9daecb7ffd8f7bcb2f120ad892498b1219327 | refs/heads/master | 2023-04-28T17:35:52.963069 | 2021-05-19T01:52:16 | 2021-05-19T01:52:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | class Solution(object):
def thousandSeparator(self, n):
"""
:type n: int
:rtype: str
"""
ans =""
res= n % 1000
n = n // 1000
ans = str(res) + ans
if n ==0:
return ans
if len(str(res)) == 2:
ans = "0" + ans
elif len(str(res)) ==1:
ans = "00" + ans
while n !=0:
res = n % 1000
n = n // 1000
ans = str(res) +"." + ans
if n == 0:
return ans
if len(str(res)) == 2:
ans = "0" + ans
elif len(str(res)) ==1:
ans = "00" + ans
return ans
ans = Solution.thousandSeparator(None, 7)
print(ans) | [
"[email protected]"
] | |
a38219cf230e02a4b51f77acdf5bb58c8c66cc5d | c2ae65792af1fab2e7843303ef90790819f872e8 | /SampleCodes/Webview/v3/lib/python3.5/copyreg.py | 3ba6ec1ff37b3f84594bbbe694537c660c2574bd | [] | no_license | behappyyoung/PythonSampleCodes | 47c224ca76ce509a03c8b75ef6b4bf7f49ebdd7f | f7640467273fa8ea3c7e443e798737ca5bcea6f9 | refs/heads/master | 2023-03-15T00:53:21.034605 | 2023-02-13T17:12:32 | 2023-02-13T17:12:32 | 26,919,763 | 3 | 3 | null | 2023-03-07T12:45:21 | 2014-11-20T15:57:16 | Python | UTF-8 | Python | false | false | 97 | py | /usr/local/Cellar/python3/3.5.2/Frameworks/Python.framework/Versions/3.5/lib/python3.5/copyreg.py | [
"[email protected]"
] | |
172a86f3c38e5011aa0bf1ac25cc91867d724c2f | 9e5353ba6e50f77a40a765bd494d8bfb990c8922 | /stream_backend/api/serializers.py | d5946df70464fd9e7b8cffcfad2c351823f30c86 | [] | no_license | admiralbolt/stream-stuff | d9e24f1d78ac142416525b9b42cc53ef0bc4712a | 29cfa96f9e8d40c531362aced47ebacadccbe759 | refs/heads/master | 2023-08-05T00:02:17.812991 | 2021-09-23T05:47:16 | 2021-09-23T05:47:16 | 261,022,447 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | import json
from rest_framework import serializers
from api import models
class JsonSerializer(serializers.Field):
"""Custom serializer for json fields.
Internally json fields are represented as a string.
Externally it's json. What the fuck did you expect?
"""
def to_representation(self, value):
return json.loads(value) if value else []
def to_internal_value(self, data):
return json.dumps(data)
class CustomEmoteSerializer(serializers.ModelSerializer):
class Meta:
model = models.CustomEmote
fields = "__all__"
class KeyValueSerializer(serializers.ModelSerializer):
"""Serialize a key value pair.
In theory we could use a json serialized field here but I've found that just
doing the translation by hand works better.
"""
class Meta:
model = models.KeyValue
fields = "__all__"
class ScriptSerializer(serializers.ModelSerializer):
"""Serialize a script model."""
class Meta:
model = models.Script
fields = "__all__"
class SoundSerializer(serializers.ModelSerializer):
"""Serialize a sound model."""
class Meta:
model = models.Sound
fields = "__all__"
class TwitchClipSerializer(serializers.ModelSerializer):
"""Serialize dat boi."""
class Meta:
model = models.TwitchClip
fields = "__all__"
| [
"[email protected]"
] | |
3794ad4e6c4c29f51277e6c3db63938934199c94 | 912b3b5321c7e26887af94cf2f97e4892c8c956a | /Day6/1_os模块.py | e6c1d55e65f6ce1f83dbdb50a2a2369a9e7f34ed | [] | no_license | nmap1208/2016-python-oldboy | a3a614694aead518b86bcb75127e1ed2ef94604a | 873820e30aeb834b6a95bae66259506955436097 | refs/heads/master | 2021-05-31T04:43:14.636250 | 2016-05-06T01:24:39 | 2016-05-06T01:24:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | # -*- coding:utf-8 -*-
import os
a = os.popen('ls')
print(type(a))
print(a.read())
b = os.system('ls')
print(b)
c = os.stat('1_os模块.py')
print(c)
print(os.path.isfile('.'))
print(os.path.isdir('.'))
print()
print(os.path.isfile('1_os模块.py'))
print(os.path.isdir('1_os模块.py')) | [
"[email protected]"
] | |
e01e2b05fabcddca2a5a6ff51953f8e148933344 | 34ddec647d6ad357c1527cf713eaeaee4eb575aa | /2020/24/part1.py | 15437d944874572ab3349f6d824f88d3d20bf217 | [
"Unlicense"
] | permissive | cheshyre/advent-of-code | 98327c564f6b401244778aaf9a16043000b4d85e | 7ecb827745bd59e6ad249707bd976888006f935c | refs/heads/master | 2022-12-21T15:53:38.789228 | 2022-12-20T20:07:28 | 2022-12-20T20:07:28 | 75,426,961 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | import os
import hex_grid
cur_dir = os.path.dirname(os.path.abspath(__file__))
tiles_count = {}
with open(f"{cur_dir}/input") as f:
for line in f:
instrs = hex_grid.parse_instructions(line)
point = (0, 0)
for i in instrs:
point = hex_grid.apply_instruction_to_point(point, i)
if point in tiles_count:
del tiles_count[point]
else:
tiles_count[point] = 1
print(f"There are {len(tiles_count)} black tiles.")
| [
"[email protected]"
] | |
8ca1f76025a6c70f3e1501bb42a2497806635dcd | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/e2e/jit/test_TransformerDecoderLayer_base.py | a05bd02f5bfde173c39458efb57dde220590c836 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 668 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_TransformerDecoderLayer_base():
"""test TransformerDecoderLayer_base"""
jit_case = JitTrans(case=yml.get_case_info("TransformerDecoderLayer_base"))
jit_case.jit_run()
| [
"[email protected]"
] | |
7779863b118dff78c2699620fdef4105a1714c2c | ad2777c974326177b7036f023301c19e7ecbf4e8 | /rolld.py | c25440d5a316afa3b13b57ddfd90b978c8491674 | [] | no_license | lraulin/bin | c67f5f52667a4d63e4ceace8837750e0e5dc2287 | a67026b920fea5d8731c47bad448f977f245a58d | refs/heads/master | 2021-01-22T04:15:08.948736 | 2018-06-21T00:20:10 | 2018-06-21T00:20:10 | 92,446,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | import random
def roll(max):
roll1 = max * random.random()
| [
"[email protected]"
] | |
9f96bd3e842b17ffff0232b9c3744b778aa03a07 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_None/trend_MovingAverage/cycle_0/ar_/test_artificial_32_None_MovingAverage_0__20.py | 4d34289521e410c56e4feef0b93dd14485083f72 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 262 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0); | [
"[email protected]"
] | |
00054f224feac895bdeb59caf0cd9aa1a4ec7ba7 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r8/Gen/DecFiles/options/13102401.py | 51ba130c83268a3466ef39a7a7bdf749d0a89dca | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/13102401.py generated: Fri, 27 Mar 2015 15:48:14
#
# Event Type: 13102401
#
# ASCII decay Descriptor: [B_s0 -> rho+ K-]cc
#
from Configurables import Generation
Generation().EventType = 13102401
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bs_rho+K-=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 531,-531 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 531
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 531,-531 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_531.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 13102401
| [
"[email protected]"
] | |
2a95a869e7d772ab128482d441931e4fa0c543aa | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /FGzWE8vNyxtTrw3Qg_9.py | 5ad6edc044c49e09b4fc47b751fcc79350dfb72e | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py |
def get_nbrs(grid, r, c):
nbrs = [[r+dr, c+dc] for dr, dc in [[-1,0],[0,1],[1,0],[0,-1]]]
return [[nr, nc] for nr, nc in nbrs if 0<=nr<len(grid) and 0<=nc<len(grid[0]) and grid[nr][nc]==1]
def is_region(grid, r, c):
if grid[r][c] != 1: return False
# set all interconnected cells in region to 0
# using backtracking to cells with multiple neighbours
stack = []
while True:
grid[r][c] = 0
nbrs = get_nbrs(grid, r, c)
if not nbrs:
if not stack: break
r, c = stack.pop()
else:
if len(nbrs) > 1: stack.append([r, c])
r, c = nbrs[0]
return True
def num_regions(grid):
return sum(1 for r in range(len(grid)) for c in range(len(grid[0])) if is_region(grid, r, c))
| [
"[email protected]"
] | |
1af7478a5ccc39c7e8958468814792161a1bd6df | 70c3cf5f0c58b0074b33f653500604b5f4f7e198 | /rm_scraping/scrape.py | 4de5cfa3e045ca42c9b60e3faf2c82cac2d44c8e | [] | no_license | colinmorris/wiki-controversial-titles | 659a7264c7fe652b696e20414acbd74a4cb1b3f6 | b089c08655527e10624ecd912a0058fd1f150778 | refs/heads/master | 2020-06-03T05:04:07.017575 | 2019-07-27T22:32:34 | 2019-07-27T22:32:34 | 191,450,697 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,391 | py | import csv
import os
import mwclient
import argparse
import pandas as pd
from RM import RM
from constants import *
FLUSH_EVERY = 50
LIMIT = 0
NEXT_ID = 0
def scrape_rms_for_title(title, f_fail, debug=0):
global NEXT_ID
pg = wiki.pages[title]
section_ix = 1
while 1:
try:
section = pg.text(section=section_ix)
except KeyError:
break
if RM.section_is_rm(section):
try:
yield RM(section, title, debug=debug, id=NEXT_ID)
except Exception as e:
row = '{}\t{}\n'.format(title, section_ix)
f_fail.write(row)
print('Exception:', e)
else:
NEXT_ID += 1
section_ix += 1
def flush_rms(rms, rm_w, votes_w, pols_w):
rm_w.writerows(rm.row for rm in rms)
vote_rows = []
pol_rows = []
for rm in rms:
for vote in rm.votes:
vote['rm_id'] = rm.id
vote_rows.extend(rm.votes)
for user, counts in rm.user_to_policies.items():
for pol, n in counts.items():
row = dict(user=user, pol=pol, n=n, rm_id=rm.id)
pol_rows.append(row)
votes_w.writerows(vote_rows)
pols_w.writerows(pol_rows)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clobber', action='store_true', help='Overwrite existing csv files')
parser.add_argument('-r', '--title-re',
help='Regex to add as an intitle filter to search query')
parser.add_argument('--invert-titlematch', action='store_true',
help='Invert the intitle filter')
args = parser.parse_args()
if args.clobber:
fresh = True
else:
try:
st = os.stat('rms.csv')
except FileNotFoundError:
fresh = True
else:
fresh = st.st_size == 0
extant_pages = set()
if not fresh:
df = pd.read_csv('rms.csv')
NEXT_ID = df['id'].max() + 1
print("Found existing files. Appending. Ids starting at {}".format(NEXT_ID))
extant_pages = set(df['talkpage'].values)
oflag = 'w' if fresh else 'a'
frm = open('rms.csv', oflag)
fvotes = open('votes.csv', oflag)
fpols = open('pols.csv', oflag)
out_rm = csv.DictWriter(frm, RM.COLS)
out_votes = csv.DictWriter(fvotes, RM.VOTE_COLS)
out_pols = csv.DictWriter(fpols, RM.POL_COLS)
writers = [out_rm, out_votes, out_pols]
if fresh:
for wr in writers:
wr.writeheader()
wiki = mwclient.Site(('https', 'en.wikipedia.org'))
query = 'insource:/"{}"/'.format(RMTOP)
if args.title_re:
query += ' {}intitle:/{}/'.format(
('-' if args.invert_titlematch else ''),
args.title_re
)
results = wiki.search(query, namespace=1)
rms = []
failures = []
f_fail = open('failures.tsv', oflag)
i_pg = 0
i_rm = 0
skipped = 0
for result in results:
# Don't rescrape pages we've already done.
if result['title'] in extant_pages:
skipped += 1
continue
for rm in scrape_rms_for_title(result['title'], f_fail):
rms.append(rm)
i_rm += 1
if len(rms) >= FLUSH_EVERY:
flush_rms(rms, out_rm, out_votes, out_pols)
rms = []
if LIMIT and i_rm >= LIMIT:
print("Reached limit. rms={}. Stopping".format(i_rm))
break
i_pg += 1
if i_pg % 100 == 0:
print("i_pg = {}; skipped = {}".format(i_pg, skipped))
if rms:
flush_rms(rms, out_rm, out_votes, out_pols)
for f in [frm, fvotes, fpols, f_fail]:
f.close()
print("Skipped {} pages".format(skipped))
| [
"[email protected]"
] | |
d6cbdb0585782c2794ba7450f08232c03959e33d | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/input/alan-hicks/django-dmarc/dmarc/views.py | 9a3cac700e01af8df8f7ac8922d8369c5b52f135 | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 1,280 | py | #----------------------------------------------------------------------
# Copyright (c) 2015, Persistent Objects Ltd http://p-o.co.uk/
#
# License: BSD
#----------------------------------------------------------------------
"""
DMARC views
http://dmarc.org/resources/specification/
"""
from django.contrib.admin.views.decorators import staff_member_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
from dmarc.models import Report
# Create your views here.
@staff_member_required
def dmarc_report(request):
report_list = Report.objects.select_related(
'reporter',
).prefetch_related(
'records__results'
).order_by('-date_begin', 'reporter__org_name').all()
paginator = Paginator(report_list, 2)
page = request.GET.get('page')
try:
reports = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
reports = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
reports = paginator.page(paginator.num_pages)
context = {
"reports": reports,
}
return render(request, 'dmarc/report.html', context)
| [
"[email protected]"
] | |
7e618e38824b5016cc3f2a51fcaa867cf87c2493 | a6f7b9c9cdfbc44af3c1c332abc94450cbd0e61b | /binpack/apps.py | cfdc666405675fd847c0dec63cbed9680510946a | [] | no_license | igorpejic/visapi | fe2e03a22d2e55e9fe7b31a2b21f098a83743c4d | 29564eb69efb691f7c27e45a4265dc803efcac8b | refs/heads/master | 2022-12-11T09:21:21.459796 | 2020-12-18T05:48:57 | 2020-12-18T05:48:57 | 224,496,779 | 9 | 1 | null | 2022-12-08T04:22:42 | 2019-11-27T18:51:07 | Jupyter Notebook | UTF-8 | Python | false | false | 89 | py | from django.apps import AppConfig
class BinpackConfig(AppConfig):
name = 'binpack'
| [
"[email protected]"
] | |
0e58b2eb4476360bd160080cb9a03e7fcad7a6e2 | 490ffe1023a601760ae7288e86723f0c6e366bba | /kolla-docker/zun-ui/zun_ui/enabled/_2340_admin_container_providervms_panel.py | 4812c633141e50e75d7f5283c994e5efb453fe51 | [
"Apache-2.0"
] | permissive | bopopescu/Cloud-User-Management | 89696a5ea5d2f95191327fbeab6c3e400bbfb2b8 | 390988bf4915a276c7bf8d96b62c3051c17d9e6e | refs/heads/master | 2022-11-19T10:09:36.662906 | 2018-11-07T20:28:31 | 2018-11-07T20:28:31 | 281,786,345 | 0 | 0 | null | 2020-07-22T21:26:07 | 2020-07-22T21:26:06 | null | UTF-8 | Python | false | false | 964 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'container.providervms'
# The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'container'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'admin'
# Python panel class of the PANEL to be added.
ADD_PANEL = 'zun_ui.content.container.providervms.panel.Providervms'
| [
"[email protected]"
] | |
6133de21acc69badb689577af432bce59a5def07 | 14cef240063145bba81d7ac4bd25ed671585527c | /core/database/crud/bottify_user.py | 4433dceb8bad52c68591d531e46bc649e45080ee | [] | no_license | Kroonjay/Bottify | f139d3cf6753c36b85ec061888a88c9f82dfd715 | c30c9cf924d19d053b0f678eb9d69143398ea83a | refs/heads/main | 2023-07-30T02:10:08.878698 | 2021-09-29T16:30:35 | 2021-09-29T16:30:35 | 411,117,108 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,716 | py | import logging
from databases import Database
from uuid import UUID
from core.security.password import get_password_hash
from core.models.user import BottifyUserInModel, BottifyUserModel
from core.database.tables.bottify_user import get_bottify_user_table
from core.database.helpers import build_model_from_row
user_table = get_bottify_user_table()
async def read_user_by_id(database: Database, user_id: int):
query = user_table.select().where(user_table.c.id == user_id).limit(1)
row = await database.fetch_one(query)
return build_model_from_row(row, BottifyUserModel)
async def read_user_by_guid(database: Database, guid_in: UUID):
if isinstance(guid_in, UUID):
user_guid = guid_in
elif isinstance(guid_in, str):
try:
user_guid = UUID(guid_in)
except ValueError as e:
logging.error(f"Read User by Guid:Failed to Parse UUID from String")
return None
else:
logging.error(
f"Read User by Guid:User GUID must be either UUID or String:Got: {type(guid_in)}"
)
return None
query = user_table.select().where(user_table.c.guid == user_guid).limit(1)
row = await database.fetch_one(query)
return build_model_from_row(row, BottifyUserModel)
async def read_user_by_username(database: Database, username: str):
if not isinstance(username, str):
logging.error(
f"Read User by Username:Username Must be type String:Got: {type(username)}"
)
query = user_table.select().where(user_table.c.username == username).limit(1)
row = await database.fetch_one(query)
return build_model_from_row(row, BottifyUserModel)
async def create_user(database: Database, user_in: BottifyUserInModel):
query = user_table.insert()
hashed_password = get_password_hash(user_in.password)
success = False
if not hashed_password:
logging.error(
f"Create User Error:Failed to Hash Password:User Data: {user_in.json()}"
)
return success
user_data = user_in.dict(exclude={"password"})
user_data.update({"hashed_password": hashed_password})
await database.execute(query, values=user_data)
success = True
return success
async def read_users(database: Database, limit: int):
if not isinstance(limit, int):
logging.error(
f"Read Users Error:Limit Param Must be an Integer:Got: {type(limit)}"
)
query = user_table.select().limit(limit)
users = []
async for row in database.iterate(query):
users.append(build_model_from_row(row, BottifyUserModel))
if not users:
logging.error(f"Read Users Error:Failed to Read Any Users")
return users
| [
"[email protected]"
] | |
6ec3308ca74aee29ace51e8fb3b39a143120e86f | 29eacf3b29753d65d8ec0ab4a60ea1f7ddecbd68 | /tests/api_workflow/test_api_workflow_selection.py | 73147fd03b6bbd08ffaf12ded248a6f812a0cb81 | [
"MIT"
] | permissive | lightly-ai/lightly | 5b655fe283b7cc2ddf1d7f5bd098603fc1cce627 | 5650ee8d4057139acf8aa10c884d5d5cdc2ccb17 | refs/heads/master | 2023-08-17T11:08:00.135920 | 2023-08-16T12:43:02 | 2023-08-16T12:43:02 | 303,705,119 | 2,473 | 229 | MIT | 2023-09-14T14:47:16 | 2020-10-13T13:02:56 | Python | UTF-8 | Python | false | false | 7,897 | py | from typing import List
import pytest
from pytest_mock import MockerFixture
from lightly.active_learning.config.selection_config import SelectionConfig
from lightly.api import ApiWorkflowClient, api_workflow_selection
from lightly.openapi_generated.swagger_client.models import (
JobResultType,
JobState,
JobStatusData,
JobStatusDataResult,
SamplingCreateRequest,
SamplingMethod,
TagData,
)
from tests.api_workflow import utils
def _get_tags(dataset_id: str, tag_name: str = "just-a-tag") -> List[TagData]:
return [
TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=None,
bit_mask_data="0x1",
name=tag_name,
tot_size=4,
created_at=1577836800,
changes=[],
)
]
def _get_sampling_create_request(tag_name: str = "new-tag") -> SamplingCreateRequest:
return SamplingCreateRequest(
new_tag_name=tag_name,
method=SamplingMethod.RANDOM,
config={},
)
def test_selection__tag_exists(mocker: MockerFixture) -> None:
tag_name = "some-tag"
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient,
"get_all_tags",
return_value=_get_tags(dataset_id=utils.generate_id(), tag_name=tag_name),
)
client = ApiWorkflowClient()
with pytest.raises(RuntimeError) as exception:
client.selection(selection_config=SelectionConfig(name=tag_name))
assert (
str(exception.value) == "There already exists a tag with tag_name some-tag"
)
def test_selection__no_tags(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "get_all_tags", return_value=[])
client = ApiWorkflowClient()
with pytest.raises(RuntimeError) as exception:
client.selection(selection_config=SelectionConfig(name="some-tag"))
assert str(exception.value) == "There exists no initial-tag for this dataset."
def test_selection(mocker: MockerFixture) -> None:
tag_name = "some-tag"
dataset_id = utils.generate_id()
mocker.patch("time.sleep")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_all_tags", return_value=_get_tags(dataset_id=dataset_id)
)
mocker.patch.object(
ApiWorkflowClient,
"_create_selection_create_request",
return_value=_get_sampling_create_request(),
)
mocked_selection_api = mocker.MagicMock()
mocked_sampling_response = mocker.MagicMock()
mocked_sampling_response.job_id = utils.generate_id()
mocked_selection_api.trigger_sampling_by_id.return_value = mocked_sampling_response
mocked_jobs_api = mocker.MagicMock()
mocked_get_job_status = mocker.MagicMock(
return_value=JobStatusData(
id=utils.generate_id(),
wait_time_till_next_poll=1,
created_at=0,
status=JobState.FINISHED,
result=JobStatusDataResult(type=JobResultType.SAMPLING, data="new-tag-id"),
)
)
mocked_jobs_api.get_job_status_by_id = mocked_get_job_status
mocked_tags_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._selection_api = mocked_selection_api
client._jobs_api = mocked_jobs_api
client._tags_api = mocked_tags_api
client._dataset_id = dataset_id
client.embedding_id = "embedding-id"
client.selection(selection_config=SelectionConfig(name=tag_name))
mocked_get_job_status.assert_called_once()
mocked_tags_api.get_tag_by_tag_id.assert_called_once_with(
dataset_id=dataset_id, tag_id="new-tag-id"
)
def test_selection__job_failed(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
job_id = "some-job-id"
mocker.patch("time.sleep")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_all_tags", return_value=_get_tags(dataset_id=dataset_id)
)
mocker.patch.object(
ApiWorkflowClient,
"_create_selection_create_request",
return_value=_get_sampling_create_request(),
)
mocked_selection_api = mocker.MagicMock()
mocked_sampling_response = mocker.MagicMock()
mocked_sampling_response.job_id = job_id
mocked_selection_api.trigger_sampling_by_id.return_value = mocked_sampling_response
mocked_jobs_api = mocker.MagicMock()
mocked_get_job_status = mocker.MagicMock(
return_value=JobStatusData(
id=utils.generate_id(),
wait_time_till_next_poll=1,
created_at=0,
status=JobState.FAILED,
error="bad job",
)
)
mocked_jobs_api.get_job_status_by_id = mocked_get_job_status
client = ApiWorkflowClient()
client._selection_api = mocked_selection_api
client._jobs_api = mocked_jobs_api
client._dataset_id = dataset_id
client.embedding_id = "embedding-id"
with pytest.raises(RuntimeError) as exception:
client.selection(selection_config=SelectionConfig(name="some-tag"))
assert str(exception.value) == (
"Selection job with job_id some-job-id failed with error bad job"
)
def test_selection__too_many_errors(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
job_id = "some-job-id"
mocker.patch("time.sleep")
mocked_print = mocker.patch("builtins.print")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_all_tags", return_value=_get_tags(dataset_id=dataset_id)
)
mocker.patch.object(
ApiWorkflowClient,
"_create_selection_create_request",
return_value=_get_sampling_create_request(),
)
mocked_selection_api = mocker.MagicMock()
mocked_sampling_response = mocker.MagicMock()
mocked_sampling_response.job_id = job_id
mocked_selection_api.trigger_sampling_by_id.return_value = mocked_sampling_response
mocked_jobs_api = mocker.MagicMock()
mocked_get_job_status = mocker.MagicMock(
side_effect=[Exception("surprise!") for _ in range(20)]
)
mocked_jobs_api.get_job_status_by_id = mocked_get_job_status
client = ApiWorkflowClient()
client._selection_api = mocked_selection_api
client._jobs_api = mocked_jobs_api
client._dataset_id = dataset_id
client.embedding_id = "embedding-id"
with pytest.raises(Exception) as exception:
client.selection(selection_config=SelectionConfig(name="some-tag"))
assert str(exception.value) == "surprise!"
mocked_print.assert_called_once_with(
"Selection job with job_id some-job-id could not be started "
"because of error: surprise!"
)
def test_upload_scores(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
tags = _get_tags(dataset_id=dataset_id, tag_name="initial-tag")
tag_id = tags[0].id
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient,
"get_all_tags",
return_value=tags,
)
mocker.patch.object(
api_workflow_selection, "_parse_active_learning_scores", return_value=[1]
)
mocked_api = mocker.MagicMock()
mocked_create_score = mocked_api.create_or_update_active_learning_score_by_tag_id
client = ApiWorkflowClient()
client._scores_api = mocked_api
client._dataset_id = dataset_id
mocked_create_score.reset_mock()
client.upload_scores(al_scores={"score_type": [1, 2, 3]}, query_tag_id=tag_id)
mocked_create_score.assert_called_once()
kwargs = mocked_create_score.call_args[1]
assert kwargs.get("tag_id") == tag_id
| [
"[email protected]"
] | |
8fe975eac45d0cbc7088a107247e236f4fea121b | 79a836022275b94b687325ae36980cafe6d66788 | /setup.py | 18eba6b9644a02f8b6a1d99711326daac0f2de62 | [] | no_license | reminder-bot/start | 1194adede56c46b587e27b003c0c401ceb7b9056 | 33c613d5a9c168635ad221d864e25d27c726ae5a | refs/heads/master | 2020-03-21T23:34:21.716780 | 2018-08-30T20:13:14 | 2018-08-30T20:13:14 | 139,195,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | import os
import configparser
try:
os.mkdir('../DATA')
except FileExistsError:
pass
files = ['todos']
contents = ['{}']
for fn, content in zip(files, contents):
if fn + '.json' in os.listdir('../DATA/'):
continue
f = open('../DATA/' + fn + '.json', 'w')
f.write(content)
f.close()
if 'config.ini' not in os.listdir('..'):
config = configparser.ConfigParser()
config['DEFAULT'] = {
'token' : 'token',
'dbl_token' : 'discordbotslist token',
'patreon_server' : 'serverid',
'patreon_enabled' : 'yes',
'strings_location' : './languages/'
}
config['WEB'] = {
'DISCORD_OAUTH_CLIENT_ID' : 'id',
'DISCORD_OAUTH_CLIENT_SECRET' : 'secretkey',
'SECRET' : 'secretkey'
}
config['MYSQL'] = {
'user' : 'username',
'passwd' : 'password',
'host' : 'localhost',
'database' : 'reminders'
'database_sfx' : 'soundfx'
}
with open('../config.ini', 'w') as f:
config.write(f)
| [
"[email protected]"
] | |
d7420989469dab17d9f1146e6f856d16c343fb1e | 054eefaa17157b32869ea986347b3e539d2bf06b | /big_o_coding/Green_06/midterm1.py | bbd16356c363c790e981dd3ec7c049cf0c48699b | [] | no_license | baocogn/self-learning | f2cb2f45f05575b6d195fc3c407daf4edcfe7d0e | f50a3946966354c793cac6b28d09cb5dba2ec57a | refs/heads/master | 2021-07-12T23:32:14.728163 | 2019-02-10T14:24:46 | 2019-02-10T14:24:46 | 143,170,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | n = int(input())
result = 1
if n == 1:
result = 1 * 15000
elif 2 <= n <= 5:
result = 1 * 15000 + (n - 1) * 13500
elif 6 <= n <= 11:
result = 1 * 15000 + 4 * 13500 + (n - 5) * 11000
elif n >= 12:
result = int((1 * 15000 + 4 * 13500 + (n - 5) * 11000) * (1 - 0.1))
print(result ) | [
"[email protected]"
] | |
addce9e9601f6db6495755d3b9f0ef59ec7bae2b | ac6a1789722de5e37be54b39b964beef005d111d | /rest_registration/utils/validation.py | d23aac4f20409a39eba749fbb9ac6225315dc012 | [
"MIT"
] | permissive | sunbeamer/django-rest-registration | cd194ccf152c62802ca6f7d7a048764da8aadf8a | dd25b84d0151630659da4c2c17ed48d26238e006 | refs/heads/master | 2023-03-29T10:39:06.225559 | 2021-03-25T23:21:46 | 2021-03-25T23:21:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,971 | py | import functools
from collections import OrderedDict
from collections.abc import Mapping
from typing import Any, Callable, Dict, Iterable, List
from django.contrib.auth.password_validation import validate_password
from django.core.exceptions import ValidationError as DjangoValidationError
from django.utils.translation import gettext as _
from rest_framework.exceptions import ErrorDetail, ValidationError
from rest_framework.settings import api_settings
from rest_registration.utils.users import (
build_initial_user,
get_user_by_verification_id
)
Validator = Callable[[Any], None]
def wrap_validation_error_with_field(field_name: str):
def decorator(func: Validator):
@functools.wraps(func)
def wrapper(value: Any) -> None:
try:
func(value)
except ValidationError as exc:
raise ValidationError({field_name: exc.detail}) from None
return wrapper
return decorator
@wrap_validation_error_with_field('password_confirm')
def validate_user_password_confirm(user_data: Dict[str, Any]) -> None:
if user_data['password'] != user_data['password_confirm']:
raise ValidationError(ErrorDetail(
_("Passwords don't match"),
code='passwords-do-not-match'),
)
@wrap_validation_error_with_field('password')
def validate_user_password(user_data: Dict[str, Any]) -> None:
password = user_data['password']
user = build_initial_user(user_data)
return _validate_user_password(password, user)
@wrap_validation_error_with_field('password')
def validate_password_with_user_id(user_data: Dict[str, Any]) -> None:
password = user_data['password']
user_id = user_data['user_id']
user = get_user_by_verification_id(user_id, require_verified=False)
return _validate_user_password(password, user)
def _validate_user_password(password, user) -> None:
try:
validate_password(password, user=user)
except DjangoValidationError as exc:
raise ValidationError(list(exc.messages)) from None
def run_validators(validators: Iterable[Validator], value: Any) -> None:
fields_errors = OrderedDict() # type: Dict[str, Any]
non_field_errors = [] # type: List[Any]
for validator in validators:
try:
validator(value)
except ValidationError as exc:
if isinstance(exc.detail, Mapping):
for field_name, field_errors in exc.detail.items():
fields_errors.setdefault(field_name, []).extend(
field_errors)
elif isinstance(exc.detail, list):
non_field_errors.extend(exc.detail)
if fields_errors:
errors = {}
errors.update(fields_errors)
errors.setdefault(
api_settings.NON_FIELD_ERRORS_KEY, []).extend(non_field_errors)
raise ValidationError(errors)
if non_field_errors:
raise ValidationError(non_field_errors)
| [
"[email protected]"
] | |
64cbfec3f3acc0e1d61883835c5f39fc1e73c1c0 | 9845815f0ff30819d6504adcac96b45deb865697 | /forestConfigs/runForest_PbPb_MIX_75X_JECv4_localVoronoi.py | 0ba7b44a0af6b2ba9e08c237c363a9fe1e412f27 | [] | no_license | dgulhan/forestConfigs | 8efc4dc5f2341e877ae46dca8d9ae3dbe2d5895d | 743178fa48457f6b6bdd49c9337931a4299b3994 | refs/heads/master | 2021-01-19T11:22:51.780050 | 2015-12-04T17:50:36 | 2015-12-04T17:50:36 | 22,839,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,648 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process('HiForest')
process.options = cms.untracked.PSet(
# wantSummary = cms.untracked.bool(True)
#SkipEvent = cms.untracked.vstring('ProductNotFound')
)
#####################################################################################
# HiForest labelling info
#####################################################################################
process.load("HeavyIonsAnalysis.JetAnalysis.HiForest_cff")
process.HiForest.inputLines = cms.vstring("HiForest V3",)
import subprocess
version = subprocess.Popen(["(cd $CMSSW_BASE/src && git describe --tags)"], stdout=subprocess.PIPE, shell=True).stdout.read()
if version == '':
version = 'no git info'
process.HiForest.HiForestVersion = cms.untracked.string(version)
#####################################################################################
# Input source
#####################################################################################
process.source = cms.Source("PoolSource",
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = cms.untracked.vstring(
"file:step3_10.root"
))
# Number of events we want to process, -1 = all events
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10))
#####################################################################################
# Load Global Tag, Geometry, etc.
#####################################################################################
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.Geometry.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('Configuration.StandardSequences.Digi_cff')
process.load('Configuration.StandardSequences.SimL1Emulator_cff')
process.load('Configuration.StandardSequences.DigiToRaw_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
# PbPb 53X MC
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc_HIon', '')
################
###Centrality###
################
process.load("RecoHI.HiCentralityAlgos.CentralityBin_cfi")
process.centralityBin.Centrality = cms.InputTag("hiCentrality")
process.centralityBin.centralityVariable = cms.string("HFtowers")
# process.centralityBin.nonDefaultGlauberModel = cms.string("HydjetDrum5")
# process.centralityBin.nonDefaultGlauberModel = cms.string("HydjetDrum5")
process.GlobalTag.toGet.extend([
cms.PSet(record = cms.string("HeavyIonRcd"),
tag = cms.string("CentralityTable_HFtowers200_HydjetDrum5_v750x02_mc"),
connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS"),
label = cms.untracked.string("HFtowers")
),
])
##########
###JEC####
##########
process.load("CondCore.DBCommon.CondDBCommon_cfi")
from CondCore.DBCommon.CondDBSetup_cfi import *
process.jec = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0)
),
timetype = cms.string('runnumber'),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK1Calo_offline'),
label = cms.untracked.string('AK1Calo_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK2Calo_offline'),
label = cms.untracked.string('AK2Calo_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK3Calo_offline'),
label = cms.untracked.string('AK3Calo_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK4Calo_offline'),
label = cms.untracked.string('AK4Calo_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK5Calo_offline'),
label = cms.untracked.string('AK5Calo_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK6Calo_offline'),
label = cms.untracked.string('AK6Calo_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK1PF_offline'),
label = cms.untracked.string('AK1PF_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK2PF_offline'),
label = cms.untracked.string('AK2PF_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK3PF_offline'),
label = cms.untracked.string('AK3PF_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK4PF_offline'),
label = cms.untracked.string('AK4PF_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK5PF_offline'),
label = cms.untracked.string('AK5PF_offline')
),
cms.PSet(
record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_HI_PythiaCUETP8M1_5020GeV_753p1_v4_AK6PF_offline'),
label = cms.untracked.string('AK6PF_offline')
),
## here you add as many jet types as you need
## note that the tag name is specific for the particular sqlite file
),
connect = cms.string('sqlite:HI_PythiaCUETP8M1_5020GeV_753p1_v4.db')
# uncomment above tag lines and this comment to use MC JEC
# connect = cms.string('sqlite:Summer12_V7_MC.db')
)
## add an es_prefer statement to resolve a possible conflict from simultaneous connection to a global tag
process.es_prefer_jec = cms.ESPrefer('PoolDBESSource','jec')
##############
###Gen jets###
##############
process.load('RecoHI.HiJetAlgos.HiGenJets_cff')
process.load('RecoJets.Configuration.GenJetParticles_cff')
process.akHiGenJets = cms.Sequence(
process.genParticlesForJets +
process.ak1HiGenJets +
process.ak2HiGenJets +
process.ak3HiGenJets +
process.ak4HiGenJets +
process.ak5HiGenJets +
process.ak6HiGenJets)
#################
###Voronoi#######
#################
process.load("CondCore.DBCommon.CondDBCommon_cfi")
from CondCore.DBCommon.CondDBSetup_cfi import *
process.uetable = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0)
),
timetype = cms.string('runnumber'),
toGet = cms.VPSet(
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("UETable_PF_v00_mc"),
#label = cms.untracked.string("UETable_PF")
label = cms.untracked.string("UETable_PFMarta")
),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("UETable_Calo_v00_mc"),
#label = cms.untracked.string("UETable_PF")
label = cms.untracked.string("UETable_CaloMarta")
)
),
connect = cms.string('sqlite:output.db')
# uncomment above tag lines and this comment to use MC JEC
# connect = cms.string('sqlite:Summer12_V7_MC.db')
)
## add an es_prefer statement to resolve a possible conflict from simultaneous connection to a global tag
process.es_prefer_uetable = cms.ESPrefer('PoolDBESSource','uetable')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.HiReRecoJets_HI_cff')
process.voronoiBackgroundPF.tableLabel = cms.string("UETable_PFMarta")
process.voronoiBackgroundCalo.tableLabel = cms.string("UETable_CaloMarta")
#####################################################################################
# Define tree output
#####################################################################################
process.TFileService = cms.Service("TFileService",
fileName=cms.string("HiForest.root"))
#####################################################################################
# Additional Reconstruction and Analysis: Main Body
#####################################################################################
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu1CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs1CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs1PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu1PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu2CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs2CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs2PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu2PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu3CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs3CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs3PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu3PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu4CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs4CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs4PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu4PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu5CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs5CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs5PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu5PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu6CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs6CaloJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs6PFJetSequence_PbPb_mc_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu6PFJetSequence_PbPb_mc_cff')
process.jetSequences = cms.Sequence(process.voronoiBackgroundPF+
process.voronoiBackgroundCalo+
process.akPu1CaloJetSequence +
process.akVs1CaloJetSequence +
process.akVs1PFJetSequence +
process.akPu1PFJetSequence +
process.akPu2CaloJetSequence +
process.akVs2CaloJetSequence +
process.akVs2PFJetSequence +
process.akPu2PFJetSequence +
process.akPu3CaloJetSequence +
process.akVs3CaloJetSequence +
process.akVs3PFJetSequence +
process.akPu3PFJetSequence +
process.akPu4CaloJetSequence +
process.akVs4CaloJetSequence +
process.akVs4PFJetSequence +
process.akPu4PFJetSequence +
process.akPu5CaloJetSequence +
process.akVs5CaloJetSequence +
process.akVs5PFJetSequence +
process.akPu5PFJetSequence +
process.akPu6CaloJetSequence +
process.akVs6CaloJetSequence +
process.akVs6PFJetSequence +
process.akPu6PFJetSequence
)
process.load('HeavyIonsAnalysis.EventAnalysis.hievtanalyzer_mc_cfi')
process.hiEvtAnalyzer.doMC = cms.bool(False) #the gen info dataformat has changed in 73X, we need to update hiEvtAnalyzer code
process.load('HeavyIonsAnalysis.EventAnalysis.hltanalysis_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.HiGenAnalyzer_cfi')
#####################################################################################
# To be cleaned
process.load('HeavyIonsAnalysis.JetAnalysis.ExtraTrackReco_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.TrkAnalyzers_MC_cff')
process.load("HeavyIonsAnalysis.TrackAnalysis.METAnalyzer_cff")
process.load("HeavyIonsAnalysis.JetAnalysis.pfcandAnalyzer_cfi")
process.load('HeavyIonsAnalysis.JetAnalysis.rechitanalyzer_cfi')
process.rechitAna = cms.Sequence(process.rechitanalyzer+process.pfTowers)
process.pfcandAnalyzer.skipCharged = False
process.pfcandAnalyzer.pfPtMin = 0
#####################################################################################
#########################
# Track Analyzer
#########################
process.anaTrack.qualityStrings = cms.untracked.vstring(['highPurity','tight','loose'])
process.pixelTrack.qualityStrings = cms.untracked.vstring('highPurity')
process.hiTracks.cut = cms.string('quality("highPurity")')
# set track collection to iterative tracking
process.anaTrack.trackSrc = cms.InputTag("hiGeneralTracks")
# clusters missing in recodebug - to be resolved
process.anaTrack.doPFMatching = True
process.anaTrack.pfCandSrc = cms.InputTag("particleFlowTmp")
process.anaTrack.doSimVertex = True
process.anaTrack.doSimTrack = True
# process.ppTrack.fillSimTrack = True
process.load("SimTracker.TrackAssociation.trackingParticleRecoTrackAsssociation_cff")
process.tpRecoAssocGeneralTracks = process.trackingParticleRecoTrackAsssociation.clone()
process.tpRecoAssocGeneralTracks.label_tr = cms.InputTag("hiGeneralTracks")
process.quickTrackAssociatorByHits.ComponentName = cms.string('quickTrackAssociatorByHits')
process.quickTrackAssociatorByHits.SimToRecoDenominator = cms.string('reco')
process.quickTrackAssociatorByHits.Cut_RecoToSim = cms.double(0.5)
process.quickTrackAssociatorByHits.Quality_SimToReco = cms.double(0.0)
#####################
# Photons
#####################
process.load('HeavyIonsAnalysis.PhotonAnalysis.ggHiNtuplizer_cfi')
process.ggHiNtuplizer.genParticleSrc = cms.InputTag("genParticles")
process.ggHiNtuplizerGED = process.ggHiNtuplizer.clone(recoPhotonSrc = cms.InputTag('gedPhotonsTmp'),
recoPhotonHiIsolationMap = cms.InputTag('photonIsolationHIProducerGED')
)
#####################
# HYDJET RECO file didn't have ak2GenJets and ak6GenJets as input, so removed them
# and ran our own hiGenJets sequence
# from RecoHI.HiJetAlgos.HiGenJets_cff import ak3HiGenJets, ak4HiGenJets
# from RecoJets.Configuration.GenJetParticles_cff import genParticlesForJets
# genParticlesForJets.ignoreParticleIDs += cms.vuint32( 12,14,16)
# process.hiSelectGenJets = cms.Sequence(
# genParticlesForJets +
# ak3HiGenJets +
# ak4HiGenJets
# )
process.HiGenParticleAna.genParticleSrc = cms.untracked.InputTag("genParticles")
process.load("GeneratorInterface.HiGenCommon.HeavyIon_cff")
process.ana_step = cms.Path(process.heavyIon*
process.hltanalysis *
#temp process.hltobject *
process.centralityBin *
process.hiEvtAnalyzer*
process.HiGenParticleAna*
process.quickTrackAssociatorByHits*
process.tpRecoAssocGeneralTracks + #used in HiPFJetAnalyzer
process.akHiGenJets +
process.hiReRecoCaloJets +
process.hiReRecoPFJets +
process.jetSequences +
process.ggHiNtuplizer +
process.ggHiNtuplizerGED +
process.pfcandAnalyzer +
process.rechitAna +
process.HiForest +
# process.cutsTPForFak +
# process.cutsTPForEff +
process.anaTrack
#process.pixelTrack
)
process.load('HeavyIonsAnalysis.JetAnalysis.EventSelection_cff')
process.phltJetHI = cms.Path( process.hltJetHI )
process.pcollisionEventSelection = cms.Path(process.collisionEventSelection)
process.load('CommonTools.RecoAlgos.HBHENoiseFilterResultProducer_cfi')
process.pHBHENoiseFilterResultProducer = cms.Path( process.HBHENoiseFilterResultProducer )
process.phfCoincFilter = cms.Path(process.hfCoincFilter )
process.phfCoincFilter3 = cms.Path(process.hfCoincFilter3 )
process.pprimaryVertexFilter = cms.Path(process.primaryVertexFilter )
process.phltPixelClusterShapeFilter = cms.Path(process.siPixelRecHits*process.hltPixelClusterShapeFilter )
process.phiEcalRecHitSpikeFilter = cms.Path(process.hiEcalRecHitSpikeFilter )
process.pAna = cms.EndPath(process.skimanalysis)
# Customization
| [
"[email protected]"
] | |
c8275c0263fa17dd5c699419bd33e02aa94828bc | 384813261c9e8d9ee03e141ba7270c48592064e9 | /new_project/fastsklearnfeature/interactiveAutoML/new_bench/multiobjective/metalearning/openml_data/private_models/randomforest/sam_node.py | 8fbb83881a619090a269f8cb2979875d31f3c78e | [] | no_license | pratyushagnihotri/DFS | b99d87c085e67888b81c19629c338dae92272a3b | 3b60e574905e93c24a2b883cc251ecc286cb2263 | refs/heads/master | 2023-04-18T22:17:36.816581 | 2021-04-20T13:41:29 | 2021-04-20T13:41:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,128 | py | ''' A class defining the nodes in our Differentially Private Random Decision Forest '''
from collections import defaultdict
import random
import numpy as np
import math
from scipy import stats # for Exponential Mechanism
class node:
def __init__(self, parent_node, split_value_from_parent, splitting_attribute, tree_level, id, children,
svfp_numer=None):
self._parent_node = parent_node
self._split_value_from_parent = split_value_from_parent
self._svfp_numer = svfp_numer
self._splitting_attribute = splitting_attribute
# self._level = tree_level # comment out unless needed. saves memory.
# self._id = id # comment out unless needed. saves memory.
self._children = children
self._class_counts = defaultdict(int)
self._noisy_majority = None
self._empty = 0 # 1 if leaf and has no records
self._sensitivity = -1.0
def add_child(self, child_node):
self._children.append(child_node)
def increment_class_count(self, class_value):
self._class_counts[class_value] += 1
def set_noisy_majority(self, epsilon, class_values):
if not self._noisy_majority and not self._children: # to make sure this code is only run once per leaf
for val in class_values:
if val not in self._class_counts: self._class_counts[val] = 0
if max([v for k, v in self._class_counts.items()]) < 1:
self._empty = 1
self._noisy_majority = random.choice([k for k, v in self._class_counts.items()])
return 0 # we dont want to count purely random flips
else:
all_counts = sorted([v for k, v in self._class_counts.items()], reverse=True)
count_difference = all_counts[0] - all_counts[1]
self._sensitivity = math.exp(-1 * count_difference * epsilon)
self._sens_of_sens = 1.
self._noisy_sensitivity = 1.
self._noisy_majority = self.expo_mech(epsilon, self._sensitivity, self._class_counts)
if self._noisy_majority != int(
max(self._class_counts.keys(), key=(lambda key: self._class_counts[key]))):
# print('majority: '+str(self._noisy_majority)+' vs. max_count: '+str( max(self._class_counts.keys(), key=(lambda key: self._class_counts[key]))))
return 1 # we're summing the flipped majorities
else:
return 0
else:
return 0
def laplace(self, e, counts):
noisy_counts = {}
for label, count in counts.items():
noisy_counts[label] = max(0, int(count + np.random.laplace(scale=float(1. / e))))
return int(max(noisy_counts.keys(), key=(lambda key: noisy_counts[key])))
def expo_mech(self, e, s, counts):
''' For this implementation of the Exponetial Mechanism, we use a piecewise linear scoring function,
where the element with the maximum count has a score of 1, and all other elements have a score of 0. '''
weighted = []
max_count = max([v for k, v in counts.items()])
for label, count in counts.items():
''' if the score is non-monotonic, s needs to be multiplied by 2 '''
if count == max_count:
if s < 1.0e-10:
power = 50 # e^50 is already astronomical. sizes beyond that dont matter
else:
power = min(50, (e * 1) / (2 * s)) # score = 1
else:
power = 0 # score = 0
weighted.append([label, math.exp(power)])
sum = 0.
for label, count in weighted:
sum += count
for i in range(len(weighted)):
weighted[i][1] /= sum
customDist = stats.rv_discrete(name='customDist',
values=([lab for lab, cou in weighted], [cou for lab, cou in weighted]))
best = customDist.rvs()
# print("best_att examples = "+str(customDist.rvs(size=20)))
return int(best) | [
"[email protected]"
] | |
44ed7aab029125950b6f5f506929e89f4de0dcdf | 6968c7f9d2b20b5296663829f99a27d184a59fc1 | /autodisc/autodisc/gui/jupyter/imagelistwidget.py | b6152e78a221de869f622b5b6696836db4ed377e | [
"MIT",
"Apache-2.0"
] | permissive | flowersteam/automated_discovery_of_lenia_patterns | d42dff37323d51732571b33845c0562d844f498f | 97cc7cde2120fa95225d1e470e00b8aa8c034e97 | refs/heads/master | 2020-06-29T07:08:58.404541 | 2020-05-14T07:37:10 | 2020-05-14T07:37:10 | 200,470,902 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,855 | py | import autodisc as ad
import ipywidgets
import numpy as np
import IPython.display
class ImageListWidget(ipywidgets.VBox):
@staticmethod
def get_default_gui_config():
default_config = ad.Config()
default_config.elements_per_page = 100
default_config.output_layout = ad.Config()
# default_config.output_layout.border='3px solid black'
default_config.box_layout = ad.Config()
default_config.box_layout.overflow_y = 'scroll'
default_config.box_layout.width = '100%'
default_config.box_layout.height = '500px'
default_config.box_layout.flex_flow = 'row wrap'
default_config.box_layout.display = 'flex'
default_config.content_ouput = ad.Config()
default_config.page_label = ad.Config()
default_config.page_selection = ad.Config()
default_config.page_selection.description = 'Page: '
default_config.previous_page_button = ad.Config()
default_config.previous_page_button.description = '<'
default_config.previous_page_button.layout = ad.Config()
default_config.previous_page_button.layout.width = '20px'
default_config.next_page_button = ad.Config()
default_config.next_page_button.description = '>'
default_config.next_page_button.layout = ad.Config()
default_config.next_page_button.layout.width = '20px'
default_config.button_box = ad.Config()
default_config.button_box.layout = ad.Config()
default_config.button_box.layout.flex_flow = 'row'
default_config.button_box.layout.display = 'flex'
default_config.button_box.layout.align_items = 'center'
default_config.button_box.layout['justify-content'] = 'flex-end'
default_config.button_box.layout.width = '100%'
default_config.image_items = ad.Config()
default_config.image_items.layout = ad.Config()
default_config.image_items.layout.height = '200px'
default_config.image_items.layout.width = '200px'
default_config.image_items.layout.border = '2px solid white'
default_config.image_captions = ad.Config()
return default_config
def __init__(self, images=None, config=None, **kwargs):
self.config = ad.config.set_default_config(kwargs, config, ImageListWidget.get_default_gui_config())
self.images = None
self.main_box = None
self.content_ouput_widget = ipywidgets.Output(**self.config.content_ouput)
self.page_label_widget = ipywidgets.Label(**self.config.page_label, value='of 0')
self.previous_page_button_widget = ipywidgets.Button(**self.config.previous_page_button)
self.previous_page_button_widget.on_click(self.on_prev_page_button_click)
self.page_selection_widget = ipywidgets.Dropdown(**self.config.page_selection)
self.page_selection_widget.observe(self.on_page_selection_change)
self.next_page_button_widget = ipywidgets.Button(**self.config.next_page_button)
self.next_page_button_widget.on_click(self.on_next_page_button_click)
self.page_selection_widget_ignore_next_value_change = False
self.button_box_widget = ipywidgets.Box(
[self.page_selection_widget,
self.page_label_widget,
self.previous_page_button_widget,
self.next_page_button_widget],
**self.config.button_box
)
super().__init__([self.content_ouput_widget, self.button_box_widget], layout=self.config.output_layout)
self.cur_page_idx = 0
if images is not None:
self.update(images)
def update(self, images, captions=None):
self.images = images
self.captions = captions
if self.images is not None and self.images:
# update page selection widget
n_pages = int(np.ceil(len(self.images) / self.config.elements_per_page))
opts = [page_idx + 1 for page_idx in range(n_pages)]
self.page_selection_widget.options = opts
# update number of pages
self.page_label_widget.value = 'of {}'.format(n_pages)
self.update_page_items(0, force_update=True)
else:
self.page_selection_widget.options = []
self.page_label_widget.value = 'of 0'
self.content_ouput_widget.clear_output()
def update_page_items(self, page_idx, force_update=False):
if self.images is not None and self.images:
n_pages = int(np.ceil(len(self.images) / self.config.elements_per_page))
if n_pages == 0:
self.content_ouput_widget.clear_output()
elif page_idx >= 0 and page_idx < n_pages and (self.cur_page_idx != page_idx or force_update):
items = []
self.cur_page_idx = page_idx
start_idx = self.config.elements_per_page * self.cur_page_idx
end_idx = min(self.config.elements_per_page * (self.cur_page_idx + 1), len(self.images))
for image_idx in range(start_idx, end_idx):
image = self.images[image_idx]
item_elems = []
if self.captions is not None:
if image_idx < len(self.captions):
caption_text = self.captions[image_idx]
else:
caption_text = ''
caption_widget = ipywidgets.Label(
value=caption_text,
**self.config.image_captions
)
item_elems.append(caption_widget)
img_widget = ipywidgets.Image(
value=image,
format='png',
**self.config.image_items
)
item_elems.append(img_widget)
items.append(ipywidgets.VBox(item_elems))
self.main_box = ipywidgets.Box(items, layout=self.config.box_layout)
self.content_ouput_widget.clear_output(wait=True)
with self.content_ouput_widget:
IPython.display.display(self.main_box)
self.page_selection_widget.value = page_idx + 1
else:
self.content_ouput_widget.clear_output()
def on_prev_page_button_click(self, button):
self.update_page_items(self.cur_page_idx - 1)
def on_next_page_button_click(self, button):
self.update_page_items(self.cur_page_idx + 1)
def on_page_selection_change(self, change):
if change['type'] == 'change' and change['name'] == 'value':
if self.page_selection_widget.value is not None:
self.update_page_items(self.page_selection_widget.value - 1) | [
"[email protected]"
] | |
2ac952f31d08278c866ed2990a35fd7f970f3e15 | fdf3aff5344271ef69ac7441c5dbca9cbf832cd1 | /car_location/location/models/__init__.py | 1219e9aa74d0d07e37129adcf33bba5812ee7ee2 | [] | no_license | lffsantos/DesafioPython | 6069b3277780326611e34ae024f7506f3d56c5b4 | fbc451b77c0310630fd95cbd23c339e194af88d1 | refs/heads/master | 2021-01-17T07:42:12.181187 | 2016-01-19T03:39:20 | 2016-01-19T03:39:20 | 49,730,610 | 0 | 0 | null | 2016-01-19T03:39:22 | 2016-01-15T16:25:30 | JavaScript | UTF-8 | Python | false | false | 277 | py | __author__ = 'lucas'
from car_location.location.models import categoriaveiculo
from car_location.location.models import veiculo
from car_location.location.models import cliente
from car_location.location.models import locacao
from car_location.location.models import devolucao | [
"[email protected]"
] | |
9b86763b34bce30afdb20d256f2e76972cc7a3ed | 06919b9fd117fce042375fbd51d7de6bb9ae14fc | /py/tests/problems/hashtable/sparse_matrix.py | 6ffec84f318bf38c68b1a11b7e3818d670628f49 | [
"MIT"
] | permissive | bmoretz/Daily-Coding-Problem | 0caf2465579e81996869ee3d2c13c9ad5f87aa8f | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | refs/heads/master | 2022-12-07T15:41:06.498049 | 2021-11-18T19:45:19 | 2021-11-18T19:45:19 | 226,376,236 | 1 | 0 | MIT | 2022-11-22T09:20:23 | 2019-12-06T17:17:00 | C++ | UTF-8 | Python | false | false | 1,219 | py | import unittest
from dcp.problems.hashtable.sparse_matrix import SparseMatrix
class Test_SparseMatrix(unittest.TestCase):
def setUp(self):
pass
def test_case1(self):
mat = [[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], \
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], \
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], \
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], \
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1]]
n, m = 100, 100
sm = SparseMatrix(mat, n, m)
non_zero = [[5, 13, 19, 28], \
[5, 13, 19, 28], \
[5, 13, 19, 28], \
[5, 13, 19, 28], \
[5, 13, 19, 28]]
for n, row in enumerate(mat):
for m, _ in enumerate(row):
if m in non_zero[n]:
assert sm.get(n, m) != 0
else:
assert sm.get(n, m) == 0 | [
"[email protected]"
] | |
4404d9fc262775f54d590079477f8a1ba5b93179 | e65ae5bd9ae1c93e7117e630f7340bc73aa71212 | /lib/gevent/greentest/test__semaphore.py | 480ec0e930466916a152bfe75550bf85470a4e0e | [
"MIT"
] | permissive | nadirhamid/oneline | e98ff1ed81da0536f9602ecdde2fb2a4fe80d256 | 833ebef0e26ae8e0cc452756381227746d830b23 | refs/heads/master | 2021-01-21T04:27:41.715047 | 2016-05-30T03:50:34 | 2016-05-30T03:50:34 | 23,320,578 | 1 | 2 | NOASSERTION | 2020-03-12T17:22:24 | 2014-08-25T16:29:36 | Python | UTF-8 | Python | false | false | 640 | py | import greentest
import gevent
from gevent.lock import Semaphore
class TestTimeoutAcquire(greentest.TestCase):
# issue 39
def test_acquire_returns_false_after_timeout(self):
s = Semaphore(value=0)
result = s.acquire(timeout=0.01)
assert result is False, repr(result)
def test_release_twice(self):
s = Semaphore()
result = []
s.rawlink(lambda s: result.append('a'))
s.release()
s.rawlink(lambda s: result.append('b'))
s.release()
gevent.sleep(0.001)
self.assertEqual(result, ['a', 'b'])
if __name__ == '__main__':
greentest.main()
| [
"[email protected]"
] | |
58c7af9907e90657db990a4e460eb35ea902d102 | f3693916a8b118bf139364604dac3f51235ed613 | /functional/Components/Authorization_System/Authorization_System_generateToken_POST/test_TC_43372_Authorizationsystems_POST_Pastdate_For_Not_Before_Time.py | fbedd554265220c2b614fc0c146a20e9c5d9bc1c | [] | no_license | muktabehera/QE | e7d62284889d8241d22506f6ee20547f1cfe6db1 | 3fedde591568e35f7b80c5bf6cd6732f8eeab4f8 | refs/heads/master | 2021-03-31T02:19:15.369562 | 2018-03-13T02:45:10 | 2018-03-13T02:45:10 | 124,984,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,941 | py | # -*- coding: UTF-8 -*-
"""PFE Component Tests - Authorization_Systems.
* TC-43372 - Authorization_Systems POST:
Verify that user is unable to generate Token on providing time before expiration time for 'notBeforeTime' field using request POST /authorizationSystems/{id}/generateToken ".
Equivalent test CURL command:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X POST -d @<JSON_data_file> -H "Content-Type: application/json"
"<PF_host>://<client_host>/authorizationSystems/<data_ID1_under_test>/generateToken"
Same, with test data:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X POST -d @<JSON_data_file> -H "Content-Type: application/json"
"<PF_host>://<client_host>/authorizationSystems/ab.qumu.com/generateToken"
JSON data sent to PathFinder in this test:
{'audience': 'qed:a1',
'expirationTime': '2017-09-30T06:10:50.714Z',
'generatedToken': 'string',
'issueTime': '2016-01-29T06:10:50.714Z',
'macKey': '123456789012345678901234567890121',
'notBeforeTime': '2017-09-20T06:10:50.714Z',
'permissions': ['MANAGE_SYSTEM', 'MANAGE_CONFIGURATION'],
'qeda': {},
'qedp': {},
'subject': 'sub1',
'url': '',
'useCompactPermissions': True}
"""
import pytest
from qe_common import *
logger = init_logger()
@pytest.mark.draft # remove this after script passed unit tests successfuly
@pytest.mark.components
@pytest.allure.story('Authorization_Systems')
@pytest.allure.feature('POST')
class Test_PFE_Components(object):
"""PFE Authorization_Systems test cases."""
@pytest.allure.link('https://jira.qumu.com/browse/TC-43372')
@pytest.mark.Authorization_Systems
@pytest.mark.POST
def test_TC_43372_POST_Authorization_Systems_Pastdate_For_Not_Before_Time(self, context):
"""TC-43372 - Authorization_Systems-POST
Verify that user is unable to generate Token on providing time before expiration time for 'notBeforeTime' field using request POST /authorizationSystems/{id}/generateToken "."""
# Define a test step
with pytest.allure.step("""Verify that user is unable to generate Token on providing time before expiration time for 'notBeforeTime' field using request POST /authorizationSystems/{id}/generateToken "."""):
### Positive test example
# Test case configuration
tokenGenerationDetails = context.sc.TokenGenerationDetails(
audience='qed:a1',
expirationTime='2017-09-30T06:10:50.714Z',
generatedToken='string',
issueTime='2016-01-29T06:10:50.714Z',
jwtId=None,
macKey='123456789012345678901234567890121',
notBeforeTime='2017-09-20T06:10:50.714Z',
permissions=['MANAGE_SYSTEM', 'MANAGE_CONFIGURATION'],
qeda={},
qedp={},
referrer=None,
subject='sub1',
url='',
useCompactPermissions=True)
# generateToken the Authorization_Systems.
# The `check` call validates return code
# and some of the swagger schema.
# Most schema checks are disabled.
response = check(
context.cl.Authorization_Systems.generateToken(
id='generateToken',
body=tokenGenerationDetails
)
)
### Can add tests here to validate the response content
with pytest.allure.step("""Verify that user is unable to generate Token on providing time before expiration time for 'notBeforeTime' field using request POST /authorizationSystems/{id}/generateToken "."""):
### Negative test example
# Test case configuration
tokenGenerationDetails = context.sc.TokenGenerationDetails(
audience='qed:a1',
expirationTime='2017-09-30T06:10:50.714Z',
generatedToken='string',
issueTime='2016-01-29T06:10:50.714Z',
jwtId=None,
macKey='123456789012345678901234567890121',
notBeforeTime='2017-09-20T06:10:50.714Z',
permissions=['MANAGE_SYSTEM', 'MANAGE_CONFIGURATION'],
qeda={},
qedp={},
referrer=None,
subject='sub1',
url='',
useCompactPermissions=True)
# prepare the request, so we can modify it
request = context.cl.Authorization_Systems.generateToken(
id='generateToken',
body=tokenGenerationDetails
)
### Invalid JSON Error injection example
### Errors that result in valid JSON can be configured above.
### Otherwise, uncomment the code below (request.future....)
# Get the generated payload and corrupt the metric
# request.future.request.data = request.future.request.data.replace(
# '"metric": 1,', '"metric":,'
# )
# generateToken the Authorization_Systems, and check we got the error we expect
try:
client, response = check(
request,
quiet=True, returnResponse=True
)
except (HTTPBadRequest, HTTPForbidden) as e: # 400, 403 error
get_error_message(e) | expect.any(
should.start_with('may not be empty'),
should.start_with('Invalid page parameter specified'),
should.contain('Invalid Authorization Token')
)
else:
raise Exception(
"Expected error message, got {} status code instead.".format(
response.status_code))
| [
"[email protected]"
] | |
de4031edd500d91f3c5f79daceda0b6ddd0c105d | 53faa0ef3496997412eb5e697bc85eb09a28f8c9 | /pipeline/0x02-databases/34-log_stats.py | cd026394d8134a1af7cdf365a1a6c146de8897f9 | [] | no_license | oran2527/holbertonschool-machine_learning | aaec2ffe762b959573f98a5f4e002272a5d643a3 | 8761eb876046ad3c0c3f85d98dbdca4007d93cd1 | refs/heads/master | 2023-08-14T00:37:31.163130 | 2021-09-20T13:34:33 | 2021-09-20T13:34:33 | 330,999,053 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | #!/usr/bin/env python3
""" stats about Nginx logs stored in MongoDB """
from pymongo import MongoClient
if __name__ == "__main__":
""" stats about Nginx logs stored in MongoDB """
client = MongoClient('mongodb://127.0.0.1:27017')
collection_logs = client.logs.nginx
num_docs = collection_logs.count_documents({})
print("{} logs".format(num_docs))
print("Methods:")
methods = ["GET", "POST", "PUT", "PATCH", "DELETE"]
for method in methods:
num_method = collection_logs.count_documents({"method": method})
print("\tmethod {}: {}".format(method, num_method))
filter_path = {"method": "GET", "path": "/status"}
num_path = collection_logs.count_documents(filter_path)
print("{} status check".format(num_path))
| [
"[email protected]"
] | |
cbe4d8dfdab89e21fe288bd6986ab78a30943da9 | a1807bf5ca332fecc7e775c9bde25eeed318db9d | /disclosure_backend/tests/test_docgen.py | 63a82b798a7aed4f2eaba4f1d41ba995ffbe972e | [] | no_license | MrMaksimize/disclosure-backend | 2c6a8936c08cd4c3ff328ee114a8050e410989cf | 6d97305b4656bd630b9e12aef953daed51c84ed7 | refs/heads/master | 2020-12-26T00:46:07.104157 | 2016-01-06T17:04:38 | 2016-01-06T17:04:38 | 49,169,984 | 0 | 0 | null | 2016-02-12T14:05:13 | 2016-01-07T00:19:30 | Python | UTF-8 | Python | false | false | 479 | py | import os
from django.conf import settings
from django.core.management import call_command
from rest_framework.test import APITestCase
class DocGenerationTest(APITestCase):
def test_generate_docs(self):
""" Test createcalaccessrawmodeldocs"""
call_command('createcalaccessrawmodeldocs')
# Didn't throw; check some minimum level of output.
docs_dir = os.path.join(settings.REPO_DIR, 'docs')
self.assertTrue(os.path.exists(docs_dir))
| [
"[email protected]"
] | |
a94dbdf4fc6e774943ac77d02fc7c1c4ab4a4eff | 99767736ea5f34be4438ce689fc27454dffbf15c | /build/lib/sqlalchemy_nav/__init__.py | f5667ece8a87d320adf715b232e0f99d96ab7b47 | [
"MIT"
] | permissive | dsbowen/sqlalchemy-nav | 4600ff85c99878d98167fee000d5b9cd6a0a90bc | d60b28fe74cdde65de68a140d0c2845d92fb9b0f | refs/heads/master | 2020-08-02T09:35:27.233849 | 2020-06-10T16:50:22 | 2020-06-10T16:50:22 | 211,304,650 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | """Mixins for SQLAlchemy-Nav
SQLAlchemy-Nav Mixins can be used to create dynamic navigation bar models
compatible with Bootstrap 4. Navigation bars can contain navigation
items, dropdown items, and custom html.
All models store their HTML in a `MutableSoup` attribute, `body`. This is
essentially a `BeautifulSoup` object which you can use to insert custom
HTML.
`Navitem`s are nested in `Navbar`s, and `Dropdownitem`s are nested in
`Navitem`s.
"""
from sqlalchemy_nav.navbar import NavbarMixin
from sqlalchemy_nav.navitem import NavitemMixin
from sqlalchemy_nav.dropdownitem import DropdownitemMixin | [
"[email protected]"
] | |
4239b59efd8be01546de57fd9436920f76c9aaf9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_089/ch14_2020_03_09_13_31_11_006129.py | 151ce51fd678c102515933e6bb62da962336d66d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | def calcula_distancia_do_projetil(v,t,h):
D= (v**2/2*9.8)*(1+(1+(2*9.8*h)/v**2*(math.sin(t))**2))*math.sin(2*t)
return D | [
"[email protected]"
] | |
a3bd9f7287b261d6b7e3c747f1d10e15bca2a1c1 | 2855f26e603ec7bf5b18876b54b75ee4577bdf2c | /witdraw/forms.py | 65f12090c349714d0754149c7cec48b2f49658bc | [] | no_license | zkenstein/ppob_multipay_v2 | e8ea789c395c6fa5b83ba56fbaf5ea08a2a77a14 | 85296f925acf3e94cc371637805d454581391f6e | refs/heads/master | 2022-03-04T13:53:30.893380 | 2019-11-16T22:49:50 | 2019-11-16T22:49:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | from django import forms
from django.contrib.auth.models import User
from .models import Witdraw
MIN_COMMISION = 10000
class WitdrawForm(forms.ModelForm):
class Meta:
model = Witdraw
fields = [
'create_by', 'amount'
]
def __init__(self, *args, **kwargs):
super(WitdrawForm, self).__init__(*args, **kwargs)
self.fields['create_by'].queryset = User.objects.filter(
profile__user_type=2
)
def clean_amount(self):
amount = self.cleaned_data.get('amount')
if amount < MIN_COMMISION:
raise forms.ValidationError('Monimal withdraw 10.000')
return amount
def clean_create_by(self):
usr = self.cleaned_data.get('create_by')
if usr.profile.user_type != 2:
raise forms.ValidationError('User is not an agen')
if usr.profile.ponsel is None or usr.profile.ponsel == '':
raise forms.ValidationError('Ponsel canot be empty')
if usr.profile.wallet.commision < MIN_COMMISION:
raise forms.ValidationError('Commision not enought to withdraw')
return usr | [
"[email protected]"
] | |
d4e3751b2d4796c72be497007fe4c7d8ca67e18e | 6db97ab761d59452c05611354637dfb2ce693c96 | /src/compas_fab/geometry/frame.py | 60723945ebff9a82556936c9e69f14253f61e9df | [
"MIT"
] | permissive | Mahdi-Soheyli/compas_fab | e885efbdd5531ae5f245bf02b2f1acce0a308680 | 0e7d426903a5d9a1bca947cd7a1251031c4c71b4 | refs/heads/master | 2020-05-02T16:53:13.265526 | 2019-03-20T13:37:37 | 2019-03-20T13:37:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34 | py | from compas.geometry import Frame
| [
"[email protected]"
] | |
ac33c346ad83106d32dfc516843c5835c52734e9 | 3ed70536d4d06b2ac43b64976ddc43a5d7025b31 | /uri1091.py | 4cb102241664ec3f00f7c77717e8df84b2c4c8f9 | [] | no_license | LuisHenrique01/Questoes_URI | 7f1d397e3cd055349939184603eb86cb4bf43d65 | 35c8e77eb7cd9da96df4268b5d71f3ad87446c89 | refs/heads/master | 2020-07-22T08:12:12.700484 | 2020-04-12T17:39:29 | 2020-04-12T17:39:29 | 207,126,339 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | def main():
while True:
rep = int(input())
if rep == 0: break
div_x, div_y = list(map(int, input().split()))
for _ in range(rep):
ponto_x, ponto_y = list(map(int, input().split()))
if ponto_x == div_x or ponto_y == div_y:
print('divisa')
elif ponto_x > div_x and ponto_y > div_y:
print('NE')
elif ponto_x > div_x and ponto_y < div_y:
print('SE')
elif ponto_x < div_x and ponto_y > div_y:
print('NO')
else:
print('SO')
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
bbb33c2583e79f2ebfcf80477d93aa479721526b | 648f742d6db2ea4e97b83c99b6fc49abd59e9667 | /common/vault/oas/models/contracts_smart_contract.py | d5d7062b8e4de5bd91efe115deb981877b802760 | [] | no_license | jmiller-tm/replit | c56ce63718f6eb2d9b53bd09d3f7b3ef3496cb86 | c8e6af3268c4ef8da66516154850919ea79055dc | refs/heads/main | 2023-08-30T00:49:35.738089 | 2021-11-16T23:09:08 | 2021-11-16T23:09:08 | 428,809,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,856 | py | # coding: utf-8
"""
vault/kernel/core_api/proto/v1/accounts/core_api_account_schedule_tags.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ContractsSmartContract(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'str',
'smart_contract_param_vals': 'dict(str, str)',
'smart_contract_version_id': 'str'
}
attribute_map = {
'code': 'code',
'smart_contract_param_vals': 'smart_contract_param_vals',
'smart_contract_version_id': 'smart_contract_version_id'
}
def __init__(self, code=None, smart_contract_param_vals=None, smart_contract_version_id=None): # noqa: E501
"""ContractsSmartContract - a model defined in Swagger""" # noqa: E501
self._code = None
self._smart_contract_param_vals = None
self._smart_contract_version_id = None
self.discriminator = None
if code is not None:
self.code = code
if smart_contract_param_vals is not None:
self.smart_contract_param_vals = smart_contract_param_vals
if smart_contract_version_id is not None:
self.smart_contract_version_id = smart_contract_version_id
@property
def code(self):
"""Gets the code of this ContractsSmartContract. # noqa: E501
Source code of the Smart Contract that is to be simulated. # noqa: E501
:return: The code of this ContractsSmartContract. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this ContractsSmartContract.
Source code of the Smart Contract that is to be simulated. # noqa: E501
:param code: The code of this ContractsSmartContract. # noqa: E501
:type: str
"""
self._code = code
@property
def smart_contract_param_vals(self):
"""Gets the smart_contract_param_vals of this ContractsSmartContract. # noqa: E501
Values for the Smart Contract parameters. # noqa: E501
:return: The smart_contract_param_vals of this ContractsSmartContract. # noqa: E501
:rtype: dict(str, str)
"""
return self._smart_contract_param_vals
@smart_contract_param_vals.setter
def smart_contract_param_vals(self, smart_contract_param_vals):
"""Sets the smart_contract_param_vals of this ContractsSmartContract.
Values for the Smart Contract parameters. # noqa: E501
:param smart_contract_param_vals: The smart_contract_param_vals of this ContractsSmartContract. # noqa: E501
:type: dict(str, str)
"""
self._smart_contract_param_vals = smart_contract_param_vals
@property
def smart_contract_version_id(self):
"""Gets the smart_contract_version_id of this ContractsSmartContract. # noqa: E501
The ID that will be used as the Smart Contract ID in the simulation and can be referenced by the simulation instructions. # noqa: E501
:return: The smart_contract_version_id of this ContractsSmartContract. # noqa: E501
:rtype: str
"""
return self._smart_contract_version_id
@smart_contract_version_id.setter
def smart_contract_version_id(self, smart_contract_version_id):
"""Sets the smart_contract_version_id of this ContractsSmartContract.
The ID that will be used as the Smart Contract ID in the simulation and can be referenced by the simulation instructions. # noqa: E501
:param smart_contract_version_id: The smart_contract_version_id of this ContractsSmartContract. # noqa: E501
:type: str
"""
self._smart_contract_version_id = smart_contract_version_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ContractsSmartContract, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ContractsSmartContract):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
0db6856e41bbe6b3773d8320f95dd2e5edbcc1d6 | 451f158c20fd425bc9d14c8e27e1a8f415423276 | /novels_search/config/config.py | d38b0dcf8002cdb6327e83d0f826c02ffffffbc9 | [
"Apache-2.0"
] | permissive | TimeCharmer/novels-search | 3767a77c237426a66f25287abae3c0a44528cf52 | ab8152ff12d828dba0a8b52aa9c08675b21a1c5f | refs/heads/master | 2021-01-19T14:21:28.438011 | 2017-04-12T09:37:48 | 2017-04-12T09:37:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | #!/usr/bin/env python
import logging
from aiocache import RedisCache
# Search engine
URL_PHONE = 'https://m.baidu.com/s'
URL_PC = 'http://www.baidu.com/s'
BAIDU_RN = 15
SO_URL = "https://www.so.com/s"
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'
# logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')
LOGGER = logging.getLogger('novels_search')
# aiocache
REDIS_DICT = dict(
IS_CACHE=True,
REDIS_ENDPOINT="",
REDIS_PORT=6379,
PASSWORD="",
CACHE_DB=0,
SESSION_DB=1,
POOLSIZE=4,
)
AIO_CACHE = RedisCache(endpoint=REDIS_DICT['REDIS_ENDPOINT'], port=REDIS_DICT['REDIS_PORT'], namespace="main")
# mongodb
MONGODB = dict(
HOST="",
PORT="",
USERNAME='',
PASSWORD='',
DATABASE='owllook',
)
# website
WEBSITE = dict(
IS_RUNNING=True,
TOKEN=''
)
AUTH = {
"Owllook-Api-Key": ""
}
HOST = ['owllook.net', 'www.owllook.net', '0.0.0.0:8000']
TIMEZONE = 'Asia/Shanghai'
| [
"[email protected]"
] | |
a71c39e3394fc5cc6525d2128a4f4548fe0a677b | 042bd40e554ac7fcd618c334ae98b4f43248a250 | /interfaces/python/lib/ocean_dummy.py | 41ebdac07d314bb378d87fb2fc951791b1c79acd | [
"Apache-2.0"
] | permissive | kant/ocean-tensor-package | 8a62df968335de2057ff095f0910e5ad5fcff8e1 | fb3fcff8bba7f4ef6cd8b8d02f0e1be1258da02d | refs/heads/master | 2020-03-29T04:01:22.064480 | 2018-09-19T19:17:19 | 2018-09-19T19:17:19 | 149,511,923 | 0 | 0 | Apache-2.0 | 2018-09-19T21:03:14 | 2018-09-19T21:03:14 | null | UTF-8 | Python | false | false | 925 | py | # -------------------------------------------------------------------------
# Copyright 2018, IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
from pyOceanDummy_itf import *
from pyOceanDummy_cpu import *
# The GPU implementation is optional
try :
from pyOceanDummy_gpu import *
except ValueError :
# The module does not exist
pass
| [
"[email protected]"
] | |
8817c54b5350de86ca658ecf083530659a7b4852 | ba0e07b34def26c37ee22b9dac1714867f001fa5 | /unreleased/azure-mgmt-eventhub/azure/mgmt/eventhub/models/consumer_group_create_or_update_parameters.py | 22ea6ee0888f32dfc1f858599060ca80abe0a49a | [
"MIT"
] | permissive | CharaD7/azure-sdk-for-python | b11a08ac7d24a22a808a18203072b4c7bd264dfa | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | refs/heads/master | 2023-05-12T12:34:26.172873 | 2016-10-26T21:35:20 | 2016-10-26T21:35:20 | 72,448,760 | 1 | 0 | MIT | 2023-05-04T17:15:01 | 2016-10-31T15:14:09 | Python | UTF-8 | Python | false | false | 2,128 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConsumerGroupCreateOrUpdateParameters(Model):
"""Parameters supplied to the CreateOrUpdate Consumer Group operation.
:param location: Location of the resource.
:type location: str
:param type: ARM type of the namespace.
:type type: str
:param name: Name of the consumer group.
:type name: str
:param created_at: Exact time the message was created.
:type created_at: datetime
:param event_hub_path: The path of the event hub.
:type event_hub_path: str
:param updated_at: The exact time the message has been updated.
:type updated_at: datetime
:param user_metadata: The user metadata.
:type user_metadata: str
"""
_validation = {
'location': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'},
'event_hub_path': {'key': 'properties.eventHubPath', 'type': 'str'},
'updated_at': {'key': 'properties.updatedAt', 'type': 'iso-8601'},
'user_metadata': {'key': 'properties.userMetadata', 'type': 'str'},
}
def __init__(self, location, type=None, name=None, created_at=None, event_hub_path=None, updated_at=None, user_metadata=None):
self.location = location
self.type = type
self.name = name
self.created_at = created_at
self.event_hub_path = event_hub_path
self.updated_at = updated_at
self.user_metadata = user_metadata
| [
"[email protected]"
] | |
0db5b3deb80041a74fe00a76329d36249f0746ad | 42dd79c614b775e6e8e782ea7ab332aef44251b9 | /extra_apps/xadmin/views/website.py | 02012eff0b7d66b2d7a36ed53d7a74ac75de61ae | [] | no_license | Annihilater/imooc | 114575638f251a0050a0240d5a25fc69ef07d9ea | 547046cff32ce413b0a4e21714cb9ab9ce19bc49 | refs/heads/master | 2020-05-03T09:06:18.247371 | 2019-12-04T09:24:55 | 2019-12-04T09:24:55 | 178,545,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,949 | py | from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.views.decorators.cache import never_cache
from django.contrib.auth.views import LoginView as login
from django.contrib.auth.views import LogoutView as logout
from django.http import HttpResponse
from .base import BaseAdminView, filter_hook
from .dashboard import Dashboard
from xadmin.forms import AdminAuthenticationForm
from xadmin.models import UserSettings
from xadmin.layout import FormHelper
class IndexView(Dashboard):
title = _("Main Dashboard")
icon = "fa fa-dashboard"
def get_page_id(self):
return "home"
class UserSettingView(BaseAdminView):
@never_cache
def post(self, request):
key = request.POST["key"]
val = request.POST["value"]
us, created = UserSettings.objects.get_or_create(user=self.user, key=key)
us.value = val
us.save()
return HttpResponse("")
class LoginView(BaseAdminView):
title = _("Please Login")
login_form = None
login_template = None
@filter_hook
def update_params(self, defaults):
pass
@never_cache
def get(self, request, *args, **kwargs):
context = self.get_context()
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
context.update(
{
"title": self.title,
"helper": helper,
"app_path": request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
}
)
defaults = {
"extra_context": context,
# 'current_app': self.admin_site.name,
"authentication_form": self.login_form or AdminAuthenticationForm,
"template_name": self.login_template or "xadmin/views/login.html",
}
self.update_params(defaults)
# return login(request, **defaults)
return login.as_view(**defaults)(request)
@never_cache
def post(self, request, *args, **kwargs):
return self.get(request)
class LogoutView(BaseAdminView):
logout_template = None
need_site_permission = False
@filter_hook
def update_params(self, defaults):
pass
@never_cache
def get(self, request, *args, **kwargs):
context = self.get_context()
defaults = {
"extra_context": context,
# 'current_app': self.admin_site.name,
"template_name": self.logout_template or "xadmin/views/logged_out.html",
}
if self.logout_template is not None:
defaults["template_name"] = self.logout_template
self.update_params(defaults)
# return logout(request, **defaults)
return logout.as_view(**defaults)(request)
@never_cache
def post(self, request, *args, **kwargs):
return self.get(request)
| [
"[email protected]"
] | |
592f65c3845cec1b556e21772988fe41c2d61145 | aca2258cf58e0d2c7e4939e73bcb82b6c135282c | /libs/Mailman/mailman/commands/tests/test_membership.py | 6cf4802c6c8546a83b4d135e007b28482e0492be | [] | no_license | masomel/py-import-analysis | cfe6749a1d7430b179559b9e0911b8c8df507be7 | 7edf8148e34b9f73ca6433ceb43a1770f4fa32c1 | refs/heads/master | 2021-03-16T10:00:24.205301 | 2019-08-01T20:32:34 | 2019-08-01T20:32:34 | 112,668,748 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | # Copyright (C) 2016-2017 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Test the Leave command."""
import unittest
from mailman.app.lifecycle import create_list
from mailman.commands.eml_membership import Leave
from mailman.email.message import Message
from mailman.interfaces.mailinglist import SubscriptionPolicy
from mailman.interfaces.usermanager import IUserManager
from mailman.runners.command import Results
from mailman.testing.helpers import set_preferred
from mailman.testing.layers import ConfigLayer
from zope.component import getUtility
class TestLeave(unittest.TestCase):
layer = ConfigLayer
def setUp(self):
self._mlist = create_list('[email protected]')
self._command = Leave()
def test_confirm_leave_not_a_member(self):
self._mlist.unsubscription_policy = SubscriptionPolicy.confirm
# Try to unsubscribe someone who is not a member. Anne is a real
# user, with a validated address, but she is not a member of the
# mailing list.
anne = getUtility(IUserManager).create_user('[email protected]')
set_preferred(anne)
# Initiate an unsubscription.
msg = Message()
msg['From'] = '[email protected]'
results = Results()
self._command.process(self._mlist, msg, {}, (), results)
self.assertEqual(
str(results).splitlines()[-1],
'leave: [email protected] is not a member of [email protected]')
| [
"[email protected]"
] | |
4b0e7d05d72b190fc3957af9c61e79e11a21b644 | abccdbf9b0849b47960c3c352870793405debfed | /0x07-python-test_driven_development/4-print_square.py | fab6205b3d5afa34eec331a57f3ea50045fc96f1 | [] | no_license | hunterxx0/holbertonschool-higher_level_programming | 88b1b0f31b536c6940f2e64a6924a06ba9cbf193 | 44064cf0722cd20d93f58b64ab185d2898770d73 | refs/heads/master | 2022-12-20T12:14:15.877147 | 2020-09-24T21:25:54 | 2020-09-24T21:25:54 | 259,276,369 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | #!/usr/bin/python3
"""
a square printing function: print_square()::
>>> def print_square(1):
#
"""
def print_square(size):
"""
Prints a square of size 'size'
"""
if type(size) is not int:
raise TypeError('size must be an integer')
elif size < 0:
raise ValueError('size must be >= 0')
else:
for x in range(size):
for y in range(size):
print("#", end="")
print()
| [
"[email protected]"
] | |
a8c4360159626be4980ee48d7a6491db264ceafc | 162e2588156cb2c0039c926c5c442363d9f77b00 | /tests/integration_tests/data_steward/analytics/cdr_ops/report_runner_test.py | c00229bcb0b01b9d9828c4aa35f5c20ef5eb9760 | [
"MIT"
] | permissive | nishanthpp93/curation | 38be687240b52decc25ffb7b655f25e9faa40e47 | ac9f38b2f4580ae806121dd929293159132c7d2a | refs/heads/develop | 2022-08-08T20:33:53.125216 | 2021-12-03T21:38:48 | 2021-12-03T21:38:48 | 155,608,471 | 1 | 0 | MIT | 2020-10-09T01:14:39 | 2018-10-31T18:54:34 | Python | UTF-8 | Python | false | false | 3,081 | py | import os
import unittest
from tempfile import NamedTemporaryFile
from pathlib import PurePath
from bs4 import BeautifulSoup as bs
from analytics.cdr_ops.report_runner import IPYNB_SUFFIX, HTML_SUFFIX, main
TEST_NOTEBOOK = """
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["parameters"]
project_id = ''
dataset_id = ''
table_name = ''
# -
print(
f'project_id={project_id}, dataset_id={dataset_id}, table_name={table_name}'
)
"""
class ReportRunnerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.temp_notebook_py_file = NamedTemporaryFile('w',
suffix='.py',
delete=True)
self.temp_notebook_py_file.write(TEST_NOTEBOOK.strip())
self.temp_notebook_py_file.flush()
self.notebook_py_path = self.temp_notebook_py_file.name
self.notebook_ipynb_path = PurePath(
self.notebook_py_path).with_suffix(IPYNB_SUFFIX)
self.notebook_html_path = PurePath(
self.notebook_py_path).with_suffix(HTML_SUFFIX)
self.parameters = {
'project_id': 'project_id',
'dataset_id': 'dataset_id',
'table_name': 'condition'
}
def tearDown(self):
# This removes the python file automatically
self.temp_notebook_py_file.close()
# Remove the ipynb and html files
os.remove(self.notebook_ipynb_path)
os.remove(self.notebook_html_path)
def test_main(self):
# Running the notebook and saving to the HTML page
main(self.notebook_py_path, self.parameters, self.notebook_py_path)
# Testing the content of the HTML page
with open(self.notebook_html_path, 'r') as f:
soup = bs(f, parser="lxml", features="lxml")
output_divs = soup.findAll('div', {"class": "jp-RenderedText"})
output_div_count = len(output_divs)
self.assertEqual(
output_div_count, 1,
f'Expected exactly 1 <div class="jp-RenderedText"> element, saw {output_div_count}'
)
output_pres = output_divs[0].findAll('pre')
output_pres_count = len(output_pres)
self.assertEqual(
output_pres_count, 1,
f'Expected exactly one <pre> element under <div class="jp-RenderedText">, saw {output_pres_count}'
)
actual = output_pres[0].get_text().strip()
expected = ', '.join(
[f'{k}={v}' for k, v in self.parameters.items()])
self.assertEqual(actual, expected)
| [
"[email protected]"
] | |
a4ca9efbd49fe56401b0c9f2e47ed03de5d2e30e | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-3865.py | b81595c3efd5b844c317e6d5fbd8ebfbb6201a00 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,291 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:$Type) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
] | |
5846bc204c7e1842e8b5ea77991c70bcba7181e3 | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/python_all/52_14.py | ac7dd979fdab752a1073fb88aae9f43db82f325a | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | Python – Test for Even values dictionary values lists
Given a dictionary with lists as values, map Boolean values depending upon all
values in List are Even or not.
> **Input** : {“Gfg” : [6, 8, 10], “is” : [8, 10, 12, 16], “Best” : [10, 16,
> 14, 6]}
> **Output** : {‘Gfg’: True, ‘is’: True, ‘Best’: True}
> **Explanation** : All lists have even numbers.
>
> **Input** : {“Gfg” : [6, 5, 10], “is” : [8, 10, 11, 16], “Best” : [10, 16,
> 14, 6]}
> **Output** : {‘Gfg’: False, ‘is’: False, ‘Best’: True}
> **Explanation** : Only “Best” has even numbers.
**Method #1 : Using loop**
This is brute way in which this task can be performed. In this, we iterate for
all the values and check if all list values are Even if yes, we assign key as
True else False.
## Python3
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Test for Even values dictionary values lists
# Using loop
# initializing dictionary
test_dict = {"Gfg" : [6, 7, 3],
"is" : [8, 10, 12, 16],
"Best" : [10, 16, 14, 6]}
# printing original dictionary
print("The original dictionary is : " + str(test_dict))
res = dict()
for sub in test_dict:
flag = 1
# checking for even elements
for ele in test_dict[sub]:
if ele % 2 != 0:
flag = 0
break
# adding True if all Even elements
res[sub] = True if flag else False
# printing result
print("The computed dictionary : " + str(res))
---
__
__
**Output**
The original dictionary is : {'Gfg': [6, 7, 3], 'is': [8, 10, 12, 16], 'Best': [10, 16, 14, 6]}
The computed dictionary : {'Gfg': False, 'is': True, 'Best': True}
**Method #2 : Using all() + dictionary comprehension**
This is yet another way in which this task can be performed. In this, we check
for all the elements using all() and dictionary comprehension is used to
remake the result.
## Python3
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Test for Even values dictionary values lists
# Using all() + dictionary comprehension
# initializing dictionary
test_dict = {"Gfg" : [6, 7, 3],
"is" : [8, 10, 12, 16],
"Best" : [10, 16, 14, 6]}
# printing original dictionary
print("The original dictionary is : " + str(test_dict))
# using all to check for all even elements
res = {sub : all(ele % 2 == 0 for ele in
test_dict[sub]) for sub in test_dict}
# printing result
print("The computed dictionary : " + str(res))
---
__
__
**Output**
The original dictionary is : {'Gfg': [6, 7, 3], 'is': [8, 10, 12, 16], 'Best': [10, 16, 14, 6]}
The computed dictionary : {'Gfg': False, 'is': True, 'Best': True}
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"[email protected]"
] | |
8eef75cab1181157c9944e567533b91f03ae8168 | 7f0c02b3eef636cc382484dd8015207c35cc83a8 | /lib/python/treadmill/runtime/linux/image/_docker.py | fa24bbf1455842f1673f31d8a4867769d207bc30 | [
"Apache-2.0"
] | permissive | ceache/treadmill | 4efa69482dafb990978bfdcb54b24c16ca5d1147 | 26a1f667fe272ff1762a558acfd66963494020ca | refs/heads/master | 2021-01-12T12:44:13.474640 | 2019-08-20T23:22:37 | 2019-08-20T23:22:37 | 151,146,942 | 0 | 0 | Apache-2.0 | 2018-10-01T19:31:51 | 2018-10-01T19:31:51 | null | UTF-8 | Python | false | false | 4,405 | py | """Docker funtion in linux runtime
"""
import grp # pylint: disable=import-error
import io
import logging
import os
from treadmill import exc
from treadmill import fs
from treadmill import subproc
from treadmill import supervisor
from treadmill import utils
from treadmill import dockerutils
from treadmill.appcfg import abort as app_abort
from treadmill.fs import linux as fs_linux
from .. import _manifest
_LOGGER = logging.getLogger(__name__)
_CONTAINER_DOCKER_ENV_DIR = os.path.join('docker', 'env')
_CONTAINER_DOCKER_ETC_DIR = os.path.join('docker', 'etc')
_PASSWD_PATTERN = '{NAME}:x:{UID}:{GID}:{INFO}:{HOME}:{SHELL}'
_GROUP_PATTERN = '{NAME}:x:{GID}'
def _has_docker(app):
return hasattr(app, 'docker') and app.docker
def create_docker_environ_dir(container_dir, root_dir, app):
"""Creates environ dir for docker"""
if not _has_docker(app):
return
env_dir = os.path.join(container_dir, _CONTAINER_DOCKER_ENV_DIR)
env = {}
treadmill_bind_preload_so = os.path.basename(
subproc.resolve('treadmill_bind_preload.so')
)
if app.ephemeral_ports.tcp or app.ephemeral_ports.udp:
env['LD_PRELOAD'] = os.path.join(
_manifest.TREADMILL_BIND_PATH,
'$LIB',
treadmill_bind_preload_so
)
supervisor.create_environ_dir(env_dir, env)
# Bind the environ directory in the container volume
fs.mkdir_safe(os.path.join(root_dir, _CONTAINER_DOCKER_ENV_DIR))
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, _CONTAINER_DOCKER_ENV_DIR),
source=os.path.join(container_dir, _CONTAINER_DOCKER_ENV_DIR),
recursive=False, read_only=True
)
def prepare_docker_daemon_path(newroot_norm, app, data):
"""Mount tmpfs for docker
"""
if not _has_docker(app):
return
# /etc/docker as temp fs as dockerd create /etc/docker/key.json
try:
fs_linux.mount_tmpfs(newroot_norm, '/etc/docker')
except FileNotFoundError as err:
_LOGGER.error('Failed to mount docker tmpfs: %s', err)
# this exception is caught by sproc run to generate abort event
raise exc.ContainerSetupError(
msg=str(err),
reason=app_abort.AbortedReason.UNSUPPORTED,
)
# Setup the dockerd confdir
dockerutils.prepare_docker_confdir(
os.path.join(newroot_norm, 'etc', 'docker'),
app,
data
)
def overlay_docker(container_dir, root_dir, app):
"""Mount etc/hosts for docker container
"""
# FIXME: This path is mounted as RW because ro volume in treadmill
# container can not be mounted in docker 'Error response from
# daemon: chown /etc/hosts: read-only file system.'
if not _has_docker(app):
return
overlay_dir = os.path.join(container_dir, 'overlay')
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, _CONTAINER_DOCKER_ETC_DIR, 'hosts'),
source=os.path.join(overlay_dir, 'etc/hosts'),
recursive=False, read_only=False
)
_create_overlay_passwd(root_dir, app.proid)
_create_overlay_group(root_dir, app.proid)
def _create_overlay_group(root_dir, proid):
"""create a overlay /etc/group in oder to mount into container
"""
path = os.path.join(root_dir, _CONTAINER_DOCKER_ETC_DIR, 'group')
(_uid, gid) = utils.get_uid_gid(proid)
with io.open(path, 'w') as f:
root = _GROUP_PATTERN.format(
NAME='root',
GID=0
)
f.write('{}\n'.format(root))
group = _GROUP_PATTERN.format(
NAME=grp.getgrgid(gid).gr_name,
GID=gid
)
f.write('{}\n'.format(group))
def _create_overlay_passwd(root_dir, proid):
"""create a overlay /etc/passwd in order to mount into container
"""
path = os.path.join(root_dir, _CONTAINER_DOCKER_ETC_DIR, 'passwd')
(uid, gid) = utils.get_uid_gid(proid)
with io.open(path, 'w') as f:
root = _PASSWD_PATTERN.format(
NAME='root',
UID=0,
GID=0,
INFO='root',
HOME='/root',
SHELL='/bin/sh'
)
f.write('{}\n'.format(root))
user = _PASSWD_PATTERN.format(
NAME=proid,
UID=uid,
GID=gid,
INFO='',
HOME='/',
SHELL='/sbin/nologin'
)
f.write('{}\n'.format(user))
| [
"[email protected]"
] | |
34698f6c132ed077c67d3a15f869d8d78bcefe61 | 3e3863e9eced23d646cd039a395b08ed6d1f3929 | /training/medium/robbery-optimisation.py | ca3d8f40995da67cf5301d321958008d3857282b | [] | no_license | Coni63/CG_repo | dd608bdbd2560598a72339d150ec003e6b688cac | d30e01dfe2a12e26c85799c82cf38e606ffdbc16 | refs/heads/master | 2020-06-25T04:03:54.260340 | 2019-10-20T16:16:40 | 2019-10-20T16:16:40 | 199,195,242 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
n = int(input())
housevalue = [int(input()) for i in range(n)]
n = len(housevalue)
pn = housevalue[0]
qn = 0
for i in range(1, n):
pn1 = pn
qn1 = qn
pn = qn1 + housevalue[i]
qn = max(pn1, qn1)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
print(max(pn,qn))
| [
"="
] | = |
fc9f01887c4a6b276e93e9c6fd48ae39dd9e98b0 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R2/benchmark/startQiskit_Class66.py | de1dd2f58ce2a9d58b5a7c4f8933d6310d93f36a | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,442 | py | # qubit number=3
# total number=11
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.x(input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.cx(input_qubit[2],input_qubit[1]) # number=4
prog.cx(input_qubit[2],input_qubit[1]) # number=10
prog.z(input_qubit[2]) # number=3
prog.y(input_qubit[2]) # number=5
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_Class66.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"[email protected]"
] | |
463cdc82d5cd7cd1180bc5eaf2219bb87377ff45 | c9ad6ad969de505b3c8471c6f46dfd782a0fb498 | /0x07-python-test_driven_development/2-matrix_divided.py | e9a56cfcebadaa9b6d3ac60f351c118538368d68 | [] | no_license | enterpreneur369/holbertonschool-higher_level_programming | 002fd5a19b40c8b1db06b34c4344e307f24c17ac | dd7d3f14bf3bacb41e2116d732ced78998a4afcc | refs/heads/master | 2022-06-20T00:57:27.736122 | 2020-05-06T14:26:10 | 2020-05-06T14:26:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,261 | py | #!/usr/bin/python3
""" Module 2-matrix_mul
Module that contains the function matrix_mul
"""
def matrix_mul(m_a, m_b):
""" Returns a product matrix
Arguments:
m_a (list:int, list:float): First matrix
m_b (list:int, list:float): Second matrix
"""
res = []
row_err = "Each row of the matrix must have the\
same size"
tp_err = "div must be a number"
lt_erra = "m_a must be a list"
lt_errb = "m_b must be a list"
lt2d_erra = "m_a must be a list of lists"
lt2d_errb = "m_b must be a list of lists"
lt_emptya = "m_a can't be empty"
lt_emptyb = "m_a can't be empty"
lte_erra = "m_a should contain only integers or floats"
lte_errb = "m_b should contain only integers or floats"
lte_sizera = "each row of m_a must be of the same size"
lte_sizerb = "each row of m_b must be of the same size"
mul_err = "m_a and m_b can't be multiplied"
if not isinstance(m_a, list):
raise TypeError(lt_erra)
if not isinstance(m_b, list):
raise TypeError(lt_errb)
if m_a[0] is None or not isinstance(m_a[0], list):
raise TypeError(lt2d_erra)
if m_b[0] is None or not isinstance(m_b[0], list):
raise TypeError(lt2d_errb)
if m_a == [] or m_a == [[]]:
raise ValueError(lt_emptya)
if m_b == [] or m_b == [[]]:
raise ValueError(lt_emptyb)
lenr0, lenc0 = len(m_a), len(m_a[0])
i, j = 0, 0
typ = None
for i in range(lenr0):
for j in range(lenc0):
if i == 0 and j == 0:
if isinstance(m_a[i][j], int):
typ = int
elif isinstance(m_a[i][j], float):
typ = float
else:
raise TypeError(lte_erra)
else:
if isinstance(m_a[i][j], typ):
continue
else:
raise TypeError(lte_erra)
lenr0, lenc0 = len(m_b), len(m_b[0])
i, j = 0, 0
typ = None
for i in range(lenr0):
for j in range(lenc0):
if i == 0 and j == 0:
if isinstance(m_b[i][j], int):
typ = int
elif isinstance(m_b[i][j], float):
typ = float
else:
raise TypeError(lte_erra)
else:
if isinstance(m_b[i][j], typ):
continue
else:
raise TypeError(lte_errb)
lenr0, lenc0 = len(m_a), len(m_a[0])
n = lenr0
i, j, cs = 0, 0, 0
for i in range(lenr0):
for j in range(lenc0):
if len(m_a[i]) != lenc0:
raise TypeError(lte_sizera)
lenr0, lenc0 = len(m_b), len(m_b[0])
p = lenc0
i, j, cs = 0, 0, 0
for i in range(lenr0):
for j in range(lenc0):
if len(m_b[i]) != lenc0:
raise TypeError(lte_sizerb)
lenr0, lenc0 = len(m_b), len(m_b[0])
i, k, cs = 0, 0, 0
for i in range(n):
row = []
cs = 0
for k in range(p):
try:
cs += m_a[i][k] * m_b[k][j]
row.append(cs)
except ValueError:
raise ValueError(mul_err)
res.append(row)
return (res)
| [
"[email protected]"
] | |
b150f199a4268e8ab72d5c9a9ce49b2d6abe73d4 | 698cb8d24879fe75669af6f2667c3f88660a0a1e | /deepModel/s11b_ALS_CONCAT.py | 29627c713b760a2dcfce82233dba73e25b24c24f | [] | no_license | HuichuanLI/Recommand-Algorithme | c83c5d34d75eebd127e2aef7abc8b7152fc54f96 | 302e14a3f7e5d72ded73b72a538596b6dc1233ff | refs/heads/master | 2023-05-11T03:01:30.940242 | 2023-04-30T08:03:19 | 2023-04-30T08:03:19 | 187,097,782 | 71 | 19 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | import numpy as np
from data_set import filepaths as fp
from torch.utils.data import DataLoader
from torch import nn
import torch.nn.functional as F
import torch
from sklearn.metrics import precision_score, recall_score, accuracy_score
from basic_sim import dataloader
class ALS_MLP(nn.Module):
def __init__(self, n_users, n_items, dim):
super(ALS_MLP, self).__init__()
'''
:param n_users: 用户数量
:param n_items: 物品数量
:param dim: 向量维度
'''
# 随机初始化用户的向量
self.users = nn.Embedding(n_users, dim, max_norm=1)
# 随机初始化物品的向量
self.items = nn.Embedding(n_items, dim, max_norm=1)
# 第一层的输入的维度是向量维度乘以2,因为用户与物品拼接之后的向量维度自然是原来2倍。
self.denseLayer1 = self.dense_layer(dim * 2, dim)
self.denseLayer2 = self.dense_layer(dim, dim // 2)
# 修后一层的输出维度是1,该值经Sigmoid激活后即最为模型输出
self.denseLayer3 = self.dense_layer(dim // 2, 1)
self.sigmoid = nn.Sigmoid()
def dense_layer(self, in_features, out_features):
# 每一个mlp单元包含一个线性层和激活层,当前代码中激活层采取Tanh双曲正切函数。
return nn.Sequential(
nn.Linear(in_features, out_features),
nn.Tanh()
)
def forward(self, u, v, isTrain=True):
'''
:param u: 用户索引id shape:[batch_size]
:param i: 用户索引id shape:[batch_size]
:return: 用户向量与物品向量的内积 shape:[batch_size]
'''
# [batch_size, dim]
u = self.users(u)
v = self.items(v)
# [batch_size, dim*2]
uv = torch.cat([u, v], dim=1)
# [batch_size, dim]
uv = self.denseLayer1(uv)
# [batch_size, dim//2]
uv = self.denseLayer2(uv)
# 训练时采取dropout来防止过拟合
if isTrain: uv = F.dropout(uv)
# [batch_size,1]
uv = self.denseLayer3(uv)
# [batch_size]
uv = torch.squeeze(uv)
logit = self.sigmoid(uv)
return logit
def doEva(net, d):
d = torch.LongTensor(d)
u, i, r = d[:, 0], d[:, 1], d[:, 2]
with torch.no_grad():
out = net(u, i, False)
y_pred = np.array([1 if i >= 0.5 else 0 for i in out])
y_true = r.detach().numpy()
p = precision_score(y_true, y_pred)
r = recall_score(y_true, y_pred)
acc = accuracy_score(y_true, y_pred)
return p, r, acc
def train(epochs=10, batchSize=1024, lr=0.001, dim=128, eva_per_epochs=1):
'''
:param epochs: 迭代次数
:param batchSize: 一批次的数量
:param lr: 学习率
:param dim: 用户物品向量的维度
:param eva_per_epochs: 设定每几次进行一次验证
'''
# 读取数据
user_set, item_set, train_set, test_set = \
dataloader.readRecData(fp.Ml_100K.RATING, test_ratio=0.1)
# 初始化ALS模型
net = ALS_MLP(len(user_set), len(item_set), dim)
# 定义优化器
optimizer = torch.optim.AdamW(net.parameters(), lr=lr, weight_decay=0.2)
# 定义损失函数
criterion = torch.nn.BCELoss()
# 开始迭代
for e in range(epochs):
all_lose = 0
# 每一批次地读取数据
for u, i, r in DataLoader(train_set, batch_size=batchSize, shuffle=True):
optimizer.zero_grad()
r = torch.FloatTensor(r.detach().numpy())
result = net(u, i)
loss = criterion(result, r)
all_lose += loss
loss.backward()
optimizer.step()
print('epoch {}, avg_loss = {:.4f}'.format(e, all_lose / (len(train_set) // batchSize)))
# 评估模型
if e % eva_per_epochs == 0:
p, r, acc = doEva(net, train_set)
print('train: Precision {:.4f} | Recall {:.4f} | accuracy {:.4f}'.format(p, r, acc))
p, r, acc = doEva(net, test_set)
print('test: Precision {:.4f} | Recall {:.4f} | accuracy {:.4f}'.format(p, r, acc))
if __name__ == '__main__':
train()
| [
"[email protected]"
] | |
78b7438d65e518367530ce1ce4adeed283a97e9a | 002ee33a04a6a74c10be79a2d667871de90fe728 | /faq/views.py | 4c8191dd9394c62569a71e75a3d988cd4a34e227 | [] | no_license | Code-Institute-Submissions/final-milestone-eCommerce | dc5866c61acd31bbf59ed31168e3e8110262a737 | d1547f90dc26ca20be299b98966865ef88df0027 | refs/heads/master | 2022-11-26T00:27:32.014852 | 2020-08-07T14:44:55 | 2020-08-07T14:44:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | from django.shortcuts import render
from .models import FAQ
def show_faqs(request):
"""Renders all of the current frequently asked questions
to the faq.html page
"""
faqs = FAQ.objects.all()
return render(request, 'faq/faq.html', {'faqs': faqs})
| [
"[email protected]"
] | |
41ae5d87df532d2ee47b6a435c5e70a35ee8c637 | f8295c4b18d76d2c4467de8351802c2c741f06d9 | /example_project/example_project/urls.py | f8969c615b427f40f3029ac2d2ff64d5671751db | [] | no_license | stefanw/django-wikidata | 993b95ea7060c22a1c7ba4cdb46f3fbeb9338aca | 41b41650a1f5b893a7aa7855864a9a05e8e5d372 | refs/heads/master | 2020-04-24T20:25:31.897429 | 2019-02-24T09:38:52 | 2019-02-24T09:38:52 | 172,243,029 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | """example_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
0b026b7588cfd52cc92d6fd76b2985618ef2f533 | 60ca69e2a4c6b05e6df44007fd9e4a4ed4425f14 | /beginner_contest/175/C.py | 430190fbe6f342eff4aae54d28abe6bb704ad2fd | [
"MIT"
] | permissive | FGtatsuro/myatcoder | 12a9daafc88efbb60fc0cd8840e594500fc3ee55 | 25a3123be6a6311e7d1c25394987de3e35575ff4 | refs/heads/master | 2021-06-13T15:24:07.906742 | 2021-05-16T11:47:09 | 2021-05-16T11:47:09 | 195,441,531 | 0 | 0 | MIT | 2021-05-16T11:47:10 | 2019-07-05T16:47:58 | Python | UTF-8 | Python | false | false | 331 | py | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
x, k, d = map(int, input().split())
x = abs(x)
if x - (k * d) >= 0:
print(x - (k * d))
sys.exit(0)
else:
remain = k - (x // d)
p_min = x % d
n_min = abs(p_min -d)
if remain % 2 == 0:
print(p_min)
else:
print(n_min)
| [
"[email protected]"
] | |
84c2d1b9d4d9b14db0bd5e93aeac841a4e9ea9b0 | ddd3b6663fbcc5b64fe9a96a3da87dd1460e1ab4 | /src/routes/user.py | 8908f3e57f79e7b83ad52961a8451953a51f62fe | [] | no_license | ranihorev/scihive-backend | 3d72e35829d97368a331bc85c362c7af29b63eb9 | d246a8ed07b0fd793a1a9c3497c976cbd4957b3d | refs/heads/master | 2022-06-17T17:32:35.834425 | 2021-04-02T14:40:07 | 2021-04-02T14:40:07 | 184,781,038 | 13 | 4 | null | 2022-05-25T03:51:56 | 2019-05-03T15:41:16 | Python | UTF-8 | Python | false | false | 5,862 | py | import os
from flask import Blueprint, jsonify
import logging
from flask_jwt_extended.view_decorators import jwt_optional
from flask_restful import Api, Resource, abort, reqparse, marshal_with, fields
from flask_jwt_extended import (create_access_token, jwt_required, jwt_refresh_token_required,
get_jwt_identity, get_raw_jwt, set_access_cookies, unset_access_cookies)
from google.oauth2 import id_token
from google.auth.transport import requests
from ..models import User, db, RevokedToken, Paper
from .user_utils import generate_hash, get_jwt_email, get_user_optional, verify_hash, get_user_by_email
from .notifications.index import deserialize_token
app = Blueprint('user', __name__)
api = Api(app)
logger = logging.getLogger(__name__)
parser = reqparse.RequestParser()
parser.add_argument('email', help='This field cannot be blank', required=True)
parser.add_argument('password', help='This field cannot be blank', required=True)
parser.add_argument('username', required=False)
# Based on https://github.com/oleg-agapov/flask-jwt-auth/
def make_error(status_code, message):
response = jsonify()
response.status_code = status_code
return response
class UserRegistration(Resource):
def post(self):
abort(404, message='Password registration has been removed')
def get_user_profile(user: User):
return {'username': user.username, 'firstName': user.first_name,
'lastName': user.last_name, 'email': user.email, 'provider': user.provider}
class UserLogin(Resource):
def post(self):
data = parser.parse_args()
current_user = get_user_by_email(data['email'])
if not current_user:
abort(401, message='User {} doesn\'t exist'.format(data['email']))
elif current_user.pending:
abort(403, message='User is pending. Please log in via Google')
elif current_user.provider:
abort(403, message='For security reasons, please log in via Google')
if verify_hash(data['password'], current_user.password):
access_token = create_access_token(identity=dict(email=data['email']))
resp = jsonify(get_user_profile(current_user))
set_access_cookies(resp, access_token)
return resp
else:
return abort(401, message="Wrong credentials")
class UserLogoutAccess(Resource):
@jwt_required
def post(self):
jti = get_raw_jwt()['jti']
try:
db.session.add(RevokedToken(token=jti))
db.session.commit()
resp = jsonify({'message': 'Access token has been revoked'})
unset_access_cookies(resp)
return resp
except:
return {'message': 'Something went wrong'}, 500
class TokenRefresh(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
return {'access_token': access_token}
class ValidateUser(Resource):
@jwt_optional
def get(self):
user = get_user_optional()
if user:
return get_user_profile(user)
return None
class Unsubscribe(Resource):
@marshal_with({'title': fields.String})
def post(self, token):
try:
email, paper_id = deserialize_token(token)
user = get_user_by_email(email)
# Verify paper exists
paper = Paper.query.get_or_404(paper_id)
except Exception as e:
abort(404, message='invalid token')
return
user.unsubscribed_papers.append(paper)
db.session.commit()
return paper
class GoogleLogin(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('token', help='This field cannot be blank', required=True, location='json')
data = parser.parse_args()
try:
info = id_token.verify_oauth2_token(data['token'], requests.Request(), os.environ.get('GOOGLE_CLIENT_ID'))
except ValueError as e:
print(e)
abort(403, message='invalid token')
email = info['email']
current_user_email = get_jwt_email()
if current_user_email and current_user_email != email:
# TODO: Allow linking non-matching email addresses
abort(403, message='Your Google email address does not match your existing user')
# create user if not missing
user = User.query.filter_by(email=email).first()
first_name: str = info.get('given_name')
last_name: str = info.get('family_name')
if not user:
username = '_'.join(filter(None, [first_name, last_name])) or email.split('@')[0]
username.replace(' ', '_')
new_user = User(username=username,
email=email, password='', first_name=first_name, last_name=last_name, provider='Google')
db.session.add(new_user)
db.session.commit()
elif not user.provider:
user.first_name = first_name
user.last_name = last_name
user.provider = 'Google'
user.pending = False
db.session.commit()
access_token = create_access_token(
identity={'email': email, 'provider': 'Google', 'first_name': first_name, 'last_name': last_name})
resp = jsonify({'message': 'User was created/merged'})
set_access_cookies(resp, access_token)
return resp
api.add_resource(GoogleLogin, '/google_login')
api.add_resource(UserRegistration, '/register')
api.add_resource(UserLogin, '/login')
api.add_resource(UserLogoutAccess, '/logout')
api.add_resource(TokenRefresh, '/token/refresh')
api.add_resource(ValidateUser, '/validate')
api.add_resource(Unsubscribe, '/unsubscribe/<token>')
| [
"[email protected]"
] | |
73797439d36e04dea271e61b61aa8620a1227750 | f3b233e5053e28fa95c549017bd75a30456eb50c | /CDK2_input/L26/26-1S_wat_20Abox/set_1.py | 36b3bd309b7e3c60deb656a873098ec88d7a6bb5 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | import os
dir = '/mnt/scratch/songlin3/run/CDK2/L26/wat_20Abox/ti_one-step/26_1S/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_1.in'
temp_pbs = filesdir + 'temp_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_1.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
5af6b7fe044a6e854da42d9b35f8adab2005fab2 | 8e138461e9ed8e36245965e215685ce978742535 | /qiskit/transpiler/passes/utils/gate_direction.py | 90e66821a268e9c5a5fce4ec630584ef532d26a3 | [
"Apache-2.0"
] | permissive | faraimazh/qiskit-terra | 15d8c378114ee109f7b757a7d3795b4c9079c0a8 | 11c2e3ed89452cb6487db784c17c68a8a6284a57 | refs/heads/master | 2023-03-16T11:31:27.071954 | 2022-09-27T00:33:02 | 2022-09-27T00:33:02 | 220,650,207 | 0 | 0 | Apache-2.0 | 2023-03-06T18:13:26 | 2019-11-09T13:59:40 | Python | UTF-8 | Python | false | false | 8,988 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Rearrange the direction of the cx nodes to match the directed coupling map."""
from math import pi
from qiskit.transpiler.layout import Layout
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.circuit import QuantumRegister
from qiskit.dagcircuit import DAGCircuit
from qiskit.circuit.library.standard_gates import RYGate, HGate, CXGate, ECRGate, RZXGate
class GateDirection(TransformationPass):
"""Modify asymmetric gates to match the hardware coupling direction.
This pass makes use of the following identities::
┌───┐┌───┐┌───┐
q_0: ──■── q_0: ┤ H ├┤ X ├┤ H ├
┌─┴─┐ = ├───┤└─┬─┘├───┤
q_1: ┤ X ├ q_1: ┤ H ├──■──┤ H ├
└───┘ └───┘ └───┘
┌──────┐ ┌───────────┐┌──────┐┌───┐
q_0: ┤0 ├ q_0: ┤ RY(-pi/2) ├┤1 ├┤ H ├
│ ECR │ = └┬──────────┤│ ECR │├───┤
q_1: ┤1 ├ q_1: ─┤ RY(pi/2) ├┤0 ├┤ H ├
└──────┘ └──────────┘└──────┘└───┘
┌──────┐ ┌───┐┌──────┐┌───┐
q_0: ┤0 ├ q_0: ┤ H ├┤1 ├┤ H ├
│ RZX │ = ├───┤│ RZX │├───┤
q_1: ┤1 ├ q_1: ┤ H ├┤0 ├┤ H ├
└──────┘ └───┘└──────┘└───┘
"""
def __init__(self, coupling_map, target=None):
"""GateDirection pass.
Args:
coupling_map (CouplingMap): Directed graph represented a coupling map.
target (Target): The backend target to use for this pass. If this is specified
it will be used instead of the coupling map
"""
super().__init__()
self.coupling_map = coupling_map
self.target = target
# Create the replacement dag and associated register.
self._cx_dag = DAGCircuit()
qr = QuantumRegister(2)
self._cx_dag.add_qreg(qr)
self._cx_dag.apply_operation_back(HGate(), [qr[0]], [])
self._cx_dag.apply_operation_back(HGate(), [qr[1]], [])
self._cx_dag.apply_operation_back(CXGate(), [qr[1], qr[0]], [])
self._cx_dag.apply_operation_back(HGate(), [qr[0]], [])
self._cx_dag.apply_operation_back(HGate(), [qr[1]], [])
self._ecr_dag = DAGCircuit()
qr = QuantumRegister(2)
self._ecr_dag.add_qreg(qr)
self._ecr_dag.apply_operation_back(RYGate(-pi / 2), [qr[0]], [])
self._ecr_dag.apply_operation_back(RYGate(pi / 2), [qr[1]], [])
self._ecr_dag.apply_operation_back(ECRGate(), [qr[1], qr[0]], [])
self._ecr_dag.apply_operation_back(HGate(), [qr[0]], [])
self._ecr_dag.apply_operation_back(HGate(), [qr[1]], [])
@staticmethod
def _rzx_dag(parameter):
_rzx_dag = DAGCircuit()
qr = QuantumRegister(2)
_rzx_dag.add_qreg(qr)
_rzx_dag.apply_operation_back(HGate(), [qr[0]], [])
_rzx_dag.apply_operation_back(HGate(), [qr[1]], [])
_rzx_dag.apply_operation_back(RZXGate(parameter), [qr[1], qr[0]], [])
_rzx_dag.apply_operation_back(HGate(), [qr[0]], [])
_rzx_dag.apply_operation_back(HGate(), [qr[1]], [])
return _rzx_dag
def run(self, dag):
"""Run the GateDirection pass on `dag`.
Flips the cx nodes to match the directed coupling map. Modifies the
input dag.
Args:
dag (DAGCircuit): DAG to map.
Returns:
DAGCircuit: The rearranged dag for the coupling map
Raises:
TranspilerError: If the circuit cannot be mapped just by flipping the
cx nodes.
"""
trivial_layout = Layout.generate_trivial_layout(*dag.qregs.values())
layout_map = trivial_layout.get_virtual_bits()
if len(dag.qregs) > 1:
raise TranspilerError(
"GateDirection expects a single qreg input DAG,"
"but input DAG had qregs: {}.".format(dag.qregs)
)
if self.target is None:
cmap_edges = set(self.coupling_map.get_edges())
if not cmap_edges:
return dag
self.coupling_map.compute_distance_matrix()
dist_matrix = self.coupling_map.distance_matrix
for node in dag.two_qubit_ops():
control = node.qargs[0]
target = node.qargs[1]
physical_q0 = layout_map[control]
physical_q1 = layout_map[target]
if dist_matrix[physical_q0, physical_q1] != 1:
raise TranspilerError(
"The circuit requires a connection between physical "
"qubits %s and %s" % (physical_q0, physical_q1)
)
if (physical_q0, physical_q1) not in cmap_edges:
if node.name == "cx":
dag.substitute_node_with_dag(node, self._cx_dag)
elif node.name == "ecr":
dag.substitute_node_with_dag(node, self._ecr_dag)
elif node.name == "rzx":
dag.substitute_node_with_dag(node, self._rzx_dag(*node.op.params))
else:
raise TranspilerError(
f"Flipping of gate direction is only supported "
f"for CX, ECR, and RZX at this time, not {node.name}."
)
else:
# TODO: Work with the gate instances and only use names as look up keys.
# This will require iterating over the target names to build a mapping
# of names to gates that implement CXGate, ECRGate, RZXGate (including
# fixed angle variants)
for node in dag.two_qubit_ops():
control = node.qargs[0]
target = node.qargs[1]
physical_q0 = layout_map[control]
physical_q1 = layout_map[target]
if node.name == "cx":
if (physical_q0, physical_q1) in self.target["cx"]:
continue
if (physical_q1, physical_q0) in self.target["cx"]:
dag.substitute_node_with_dag(node, self._cx_dag)
else:
raise TranspilerError(
"The circuit requires a connection between physical "
"qubits %s and %s for cx" % (physical_q0, physical_q1)
)
elif node.name == "ecr":
if (physical_q0, physical_q1) in self.target["ecr"]:
continue
if (physical_q1, physical_q0) in self.target["ecr"]:
dag.substitute_node_with_dag(node, self._ecr_dag)
else:
raise TranspilerError(
"The circuit requires a connection between physical "
"qubits %s and %s for ecr" % (physical_q0, physical_q1)
)
elif node.name == "rzx":
if (physical_q0, physical_q1) in self.target["rzx"]:
continue
if (physical_q1, physical_q0) in self.target["rzx"]:
dag.substitute_node_with_dag(node, self._rzx_dag(*node.op.params))
else:
raise TranspilerError(
"The circuit requires a connection between physical "
"qubits %s and %s for rzx" % (physical_q0, physical_q1)
)
else:
raise TranspilerError(
f"Flipping of gate direction is only supported "
f"for CX, ECR, and RZX at this time, not {node.name}."
)
return dag
| [
"[email protected]"
] | |
49b4e0b91d57155a69ce4080265a0ee06dd8bf3c | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/webdriver_manager/archive.py | f827dc3151deda496a84de6fc9aa5809d377ab0e | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 1,045 | py | import tarfile
import zipfile
class Archive(object):
def __init__(self, path: str):
self.file_path = path
def unpack(self, directory):
if self.file_path.endswith(".zip"):
return self.__extract_zip(directory)
elif self.file_path.endswith(".tar.gz"):
return self.__extract_tar_file(directory)
def __extract_zip(self, to_directory):
archive = zipfile.ZipFile(self.file_path)
try:
archive.extractall(to_directory)
except Exception as e:
if e.args[0] not in [26, 13] and e.args[1] not in ['Text file busy', 'Permission denied']:
raise e
return archive.namelist()
def __extract_tar_file(self, to_directory):
try:
tar = tarfile.open(self.file_path, mode="r:gz")
except tarfile.ReadError:
tar = tarfile.open(self.file_path, mode="r:bz2")
members = tar.getmembers()
tar.extractall(to_directory)
tar.close()
return [x.name for x in members]
| [
"[email protected]"
] | |
3fc799fe13345e1eae8b48fa05b126090829b332 | 5a96112e11834d400a59b76caee33fd63831e273 | /python3_API_framework_V2/TestCases/test_api_v2.py | 60c0bfcfe591a2359e2ea5d8e3fd20024415a63f | [] | no_license | zhaozongzhao/interface_test | d3f93c8220cb5fab5f063ce7e315e54b2f623ce6 | f63f7e188639b34a8b80c9ce57591d9cabe3f4f8 | refs/heads/master | 2020-05-02T03:21:51.633352 | 2019-04-10T15:32:12 | 2019-04-10T15:32:12 | 177,726,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,456 | py | import unittest
from Common.DoExcel import DoExcel
import os
from Common import myRequest
import ddt
from Common import dir_config
from Common import myLogger2
import logging
import re
#实例 化日志对象
#logger = MyLogger()
# 获取所有的测试数据
excelfile = dir_config.testcase_dir + "/api_info_1.xlsx"
de = DoExcel(excelfile)
all_case_datas = de.get_caseDatas_all()
print("所有的测试数据", all_case_datas)
global_vars = {}
@ddt.ddt
class Test_Api(unittest.TestCase):
@classmethod
def setUpClass(self):
de.update_init_data()
de.save_excelFile(excelfile)
@ddt.data(*all_case_datas)
def test_api(self,case_data):
global global_vars
# 使用for循环,读取每行测试数据,然后发送http请求。获取响应结果
logging.info("==============开始执行一个接口测试用例,请求数据如下===============")
logging.info("接口请求地址:%s" % case_data["url"])
logging.info("接口请求类型:{0}".format(case_data["method"]))
logging.info("接口请求数据为:{0}".format(case_data["request_data"]))
#动态替换了 - 判断请求数据当中,是否要替换全局变量的值、全局变量是否存在。
if len(global_vars)>0 and case_data["request_data"] is not None:
for key,value in global_vars.items():
if case_data["request_data"].find(key) != -1:
case_data["request_data"] = case_data["request_data"].replace(key,value)
logging.info("动态更新之后的请求数据为:\n{0}".format(case_data["request_data"]))
res = myRequest.myRequest(case_data["url"], case_data["method"], case_data["request_data"])
logging.info("本次接口请求的状态码为:%d" % res.status_code)
logging.info("接口请求的返回数据为:")
logging.info(res.text)
#先要判断测试数据当中,是否有关联字段。。如果有,则需要提取出来。按表达式提取,并且赋给指定变量。
if "related_exp" in case_data.keys():
logging.info("需要从响应结果中提取数据:")
#related_data = parse_response.get_relatedData_from_response(res.text,case_data["related_exp"])
temp = case_data["related_exp"].split("=")
res_id = re.findall(temp[1],res.text)
#动态获取了,成为全局变量。
global_vars[temp[0]] = res_id[0]
logging.info("接口请求的期望数据为:")
logging.info(case_data["expected_data"])
logging.info("期望结果与实际结果的比对方式为:")
if int(case_data["compare_type"]) == 0:
logging.info("全值匹配模式。")
try:
self.assertEqual(res.text,case_data["expected_data"])
logging.info("结果比对成功,测试用例通过")
except AssertionError:
logging.exception("结果比对失败:")
raise AssertionError
else:
logging.info("正则表达式匹配模式。")
re_obj = re.match(case_data["expected_data"],res.text)
self.assertIsNotNone(re_obj, "正则表达式匹配失败!")
logging.info("========================结束一个接口测试用例==========================")
| [
"[email protected]"
] | |
4b9394fcf22a80069c7b0fa99773765f13fc7a07 | 7d2f933ed3c54e128ecaec3a771817c4260a8458 | /venv/Lib/site-packages/mpl_toolkits/mplot3d/art3d.py | 2d28a808af66a5141266c6950f5966ee40a2a899 | [] | no_license | danielmoreira12/BAProject | c61dfb1d0521eb5a28eef9531a00e744bfb0e26a | 859f588305d826a35cc8f7d64c432f54a0a2e031 | refs/heads/master | 2021-01-02T07:17:39.267278 | 2020-02-25T22:27:43 | 2020-02-25T22:27:43 | 239,541,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,020 | py | # art3d.py, original mplot3d version by John Porter
# Parts rewritten by Reinier Heeres <[email protected]>
# Minor additions by Ben Axelrod <[email protected]>
"""
Module containing 3D artist code and functions to convert 2D
artists into 3D versions which can be added to an Axes3D.
"""
import math
import numpy as np
from matplotlib import (
artist, cbook, colors as mcolors, lines, text as mtext, path as mpath)
from matplotlib.collections import (
LineCollection, PolyCollection, PatchCollection, PathCollection)
from matplotlib.colors import Normalize
from matplotlib.patches import Patch
from . import proj3d
def _norm_angle(a):
"""Return the given angle normalized to -180 < *a* <= 180 degrees."""
a = (a + 360) % 360
if a > 180:
a = a - 360
return a
@cbook.deprecated("3.1")
def norm_angle(a):
"""Return the given angle normalized to -180 < *a* <= 180 degrees."""
return _norm_angle(a)
def _norm_text_angle(a):
"""Return the given angle normalized to -90 < *a* <= 90 degrees."""
a = (a + 180) % 180
if a > 90:
a = a - 180
return a
@cbook.deprecated("3.1")
def norm_text_angle(a):
"""Return the given angle normalized to -90 < *a* <= 90 degrees."""
return _norm_text_angle(a)
def get_dir_vector(zdir):
"""
Return a direction vector.
Parameters
----------
zdir : {'x', 'y', 'z', None, 3-tuple}
The direction. Possible values are:
- 'x': equivalent to (1, 0, 0)
- 'y': equivalent to (0, 1, 0)
- 'z': equivalent to (0, 0, 1)
- *None*: equivalent to (0, 0, 0)
- an iterable (x, y, z) is returned unchanged.
Returns
-------
x, y, z : array-like
The direction vector. This is either a numpy.array or *zdir* itself if
*zdir* is already a length-3 iterable.
"""
if zdir == 'x':
return np.array((1, 0, 0))
elif zdir == 'y':
return np.array((0, 1, 0))
elif zdir == 'z':
return np.array((0, 0, 1))
elif zdir is None:
return np.array((0, 0, 0))
elif np.iterable(zdir) and len(zdir) == 3:
return zdir
else:
raise ValueError("'x', 'y', 'z', None or vector of length 3 expected")
class Text3D(mtext.Text):
"""
Text object with 3D position and direction.
Parameters
----------
x, y, z
The position of the text.
text : str
The text string to display.
zdir : {'x', 'y', 'z', None, 3-tuple}
The direction of the text. See `.get_dir_vector` for a description of
the values.
Other Parameters
----------------
**kwargs
All other parameters are passed on to `~matplotlib.text.Text`.
"""
def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):
mtext.Text.__init__(self, x, y, text, **kwargs)
self.set_3d_properties(z, zdir)
def set_3d_properties(self, z=0, zdir='z'):
x, y = self.get_position()
self._position3d = np.array((x, y, z))
self._dir_vec = get_dir_vector(zdir)
self.stale = True
@artist.allow_rasterization
def draw(self, renderer):
proj = proj3d.proj_trans_points(
[self._position3d, self._position3d + self._dir_vec], renderer.M)
dx = proj[0][1] - proj[0][0]
dy = proj[1][1] - proj[1][0]
angle = math.degrees(math.atan2(dy, dx))
self.set_position((proj[0][0], proj[1][0]))
self.set_rotation(_norm_text_angle(angle))
mtext.Text.draw(self, renderer)
self.stale = False
def get_tightbbox(self, renderer):
# Overwriting the 2d Text behavior which is not valid for 3d.
# For now, just return None to exclude from layout calculation.
return None
def text_2d_to_3d(obj, z=0, zdir='z'):
"""Convert a Text to a Text3D object."""
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
class Line3D(lines.Line2D):
"""
3D line object.
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
"""
Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.
"""
lines.Line2D.__init__(self, [], [], *args, **kwargs)
self._verts3d = xs, ys, zs
def set_3d_properties(self, zs=0, zdir='z'):
xs = self.get_xdata()
ys = self.get_ydata()
try:
# If *zs* is a list or array, then this will fail and
# just proceed to juggle_axes().
zs = np.full_like(xs, fill_value=float(zs))
except TypeError:
pass
self._verts3d = juggle_axes(xs, ys, zs, zdir)
self.stale = True
def set_data_3d(self, *args):
"""
Set the x, y and z data
Parameters
----------
x : array_like
The x-data to be plotted
y : array_like
The y-data to be plotted
z : array_like
The z-data to be plotted
Notes
-----
Accepts x, y, z arguments or a single array_like (x, y, z)
"""
if len(args) == 1:
self._verts3d = args[0]
else:
self._verts3d = args
self.stale = True
def get_data_3d(self):
"""
Get the current data
Returns
-------
verts3d : length-3 tuple or array_likes
The current data as a tuple or array_likes
"""
return self._verts3d
@artist.allow_rasterization
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_data(xs, ys)
lines.Line2D.draw(self, renderer)
self.stale = False
def line_2d_to_3d(line, zs=0, zdir='z'):
"""Convert a 2D line to 3D."""
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
def _path_to_3d_segment(path, zs=0, zdir='z'):
"""Convert a path to a 3D segment."""
zs = np.broadcast_to(zs, len(path))
pathsegs = path.iter_segments(simplify=False, curves=False)
seg = [(x, y, z) for (((x, y), code), z) in zip(pathsegs, zs)]
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d
@cbook.deprecated("3.1")
def path_to_3d_segment(path, zs=0, zdir='z'):
"""Convert a path to a 3D segment."""
return _path_to_3d_segment(path, zs=zs, zdir=zdir)
def _paths_to_3d_segments(paths, zs=0, zdir='z'):
"""Convert paths from a collection object to 3D segments."""
zs = np.broadcast_to(zs, len(paths))
segs = [_path_to_3d_segment(path, pathz, zdir)
for path, pathz in zip(paths, zs)]
return segs
@cbook.deprecated("3.1")
def paths_to_3d_segments(paths, zs=0, zdir='z'):
"""Convert paths from a collection object to 3D segments."""
return _paths_to_3d_segments(paths, zs=zs, zdir=zdir)
def _path_to_3d_segment_with_codes(path, zs=0, zdir='z'):
"""Convert a path to a 3D segment with path codes."""
zs = np.broadcast_to(zs, len(path))
pathsegs = path.iter_segments(simplify=False, curves=False)
seg_codes = [((x, y, z), code) for ((x, y), code), z in zip(pathsegs, zs)]
if seg_codes:
seg, codes = zip(*seg_codes)
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
else:
seg3d = []
codes = []
return seg3d, list(codes)
@cbook.deprecated("3.1")
def path_to_3d_segment_with_codes(path, zs=0, zdir='z'):
"""Convert a path to a 3D segment with path codes."""
return _path_to_3d_segment_with_codes(path, zs=zs, zdir=zdir)
def _paths_to_3d_segments_with_codes(paths, zs=0, zdir='z'):
"""
Convert paths from a collection object to 3D segments with path codes.
"""
zs = np.broadcast_to(zs, len(paths))
segments_codes = [_path_to_3d_segment_with_codes(path, pathz, zdir)
for path, pathz in zip(paths, zs)]
if segments_codes:
segments, codes = zip(*segments_codes)
else:
segments, codes = [], []
return list(segments), list(codes)
@cbook.deprecated("3.1")
def paths_to_3d_segments_with_codes(paths, zs=0, zdir='z'):
"""
Convert paths from a collection object to 3D segments with path codes.
"""
return _paths_to_3d_segments_with_codes(paths, zs=zs, zdir=zdir)
class Line3DCollection(LineCollection):
"""
A collection of 3D lines.
"""
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def set_segments(self, segments):
"""
Set 3D segments.
"""
self._segments3d = np.asanyarray(segments)
LineCollection.set_segments(self, [])
def do_3d_projection(self, renderer):
"""
Project the points according to renderer matrix.
"""
xyslist = [
proj3d.proj_trans_points(points, renderer.M) for points in
self._segments3d]
segments_2d = [np.column_stack([xs, ys]) for xs, ys, zs in xyslist]
LineCollection.set_segments(self, segments_2d)
# FIXME
minz = 1e9
for xs, ys, zs in xyslist:
minz = min(minz, min(zs))
return minz
@artist.allow_rasterization
def draw(self, renderer, project=False):
if project:
self.do_3d_projection(renderer)
LineCollection.draw(self, renderer)
def line_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a LineCollection to a Line3DCollection object."""
segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
class Patch3D(Patch):
"""
3D patch object.
"""
def __init__(self, *args, zs=(), zdir='z', **kwargs):
Patch.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_3d_properties(self, verts, zs=0, zdir='z'):
zs = np.broadcast_to(zs, len(verts))
self._segment3d = [juggle_axes(x, y, z, zdir)
for ((x, y), z) in zip(verts, zs)]
self._facecolor3d = Patch.get_facecolor(self)
def get_path(self):
return self._path2d
def get_facecolor(self):
return self._facecolor2d
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = zip(*s)
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(np.column_stack([vxs, vys]))
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
class PathPatch3D(Patch3D):
"""
3D PathPatch object.
"""
def __init__(self, path, *, zs=(), zdir='z', **kwargs):
Patch.__init__(self, **kwargs)
self.set_3d_properties(path, zs, zdir)
def set_3d_properties(self, path, zs=0, zdir='z'):
Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
self._code3d = path.codes
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = zip(*s)
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(np.column_stack([vxs, vys]), self._code3d)
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def _get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
trans = patch.get_patch_transform()
path = patch.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
else:
return []
@cbook.deprecated("3.1")
def get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
return _get_patch_verts(patch)
def patch_2d_to_3d(patch, z=0, zdir='z'):
"""Convert a Patch to a Patch3D object."""
verts = _get_patch_verts(patch)
patch.__class__ = Patch3D
patch.set_3d_properties(verts, z, zdir)
def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
"""Convert a PathPatch to a PathPatch3D object."""
path = pathpatch.get_path()
trans = pathpatch.get_patch_transform()
mpath = trans.transform_path(path)
pathpatch.__class__ = PathPatch3D
pathpatch.set_3d_properties(mpath, z, zdir)
class Patch3DCollection(PatchCollection):
"""
A collection of 3D patches.
"""
def __init__(self, *args, zs=0, zdir='z', depthshade=True, **kwargs):
"""
Create a collection of flat 3D patches with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of patches in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PatchCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
self._depthshade = depthshade
super().__init__(*args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = offsets.T
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (_zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (_zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PatchCollection.set_offsets(self, np.column_stack([vxs, vys]))
if vzs.size > 0:
return min(vzs)
else:
return np.nan
class Path3DCollection(PathCollection):
"""
A collection of 3D paths.
"""
def __init__(self, *args, zs=0, zdir='z', depthshade=True, **kwargs):
"""
Create a collection of flat 3D paths with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of paths in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PathCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
self._depthshade = depthshade
super().__init__(*args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = offsets.T
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (_zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (_zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PathCollection.set_offsets(self, np.column_stack([vxs, vys]))
return np.min(vzs) if vzs.size else np.nan
def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):
"""
Convert a :class:`~matplotlib.collections.PatchCollection` into a
:class:`Patch3DCollection` object
(or a :class:`~matplotlib.collections.PathCollection` into a
:class:`Path3DCollection` object).
Parameters
----------
za
The location or locations to place the patches in the collection along
the *zdir* axis. Default: 0.
zdir
The axis in which to place the patches. Default: "z".
depthshade
Whether to shade the patches to give a sense of depth. Default: *True*.
"""
if isinstance(col, PathCollection):
col.__class__ = Path3DCollection
elif isinstance(col, PatchCollection):
col.__class__ = Patch3DCollection
col._depthshade = depthshade
col.set_3d_properties(zs, zdir)
class Poly3DCollection(PolyCollection):
"""
A collection of 3D polygons.
"""
def __init__(self, verts, *args, zsort='average', **kwargs):
"""
Create a Poly3DCollection.
*verts* should contain 3D coordinates.
Keyword arguments:
zsort, see set_zsort for options.
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
"""
super().__init__(verts, *args, **kwargs)
self.set_zsort(zsort)
self._codes3d = None
_zsort_functions = {
'average': np.average,
'min': np.min,
'max': np.max,
}
def set_zsort(self, zsort):
"""
Sets the calculation method for the z-order.
Parameters
----------
zsort : {'average', 'min', 'max'}
The function applied on the z-coordinates of the vertices in the
viewer's coordinate system, to determine the z-order. *True* is
deprecated and equivalent to 'average'.
"""
if zsort is True:
cbook.warn_deprecated(
"3.1", message="Passing True to mean 'average' for set_zsort "
"is deprecated and support will be removed in Matplotlib 3.3; "
"pass 'average' instead.")
zsort = 'average'
self._zsortfunc = self._zsort_functions[zsort]
self._sort_zpos = None
self.stale = True
def get_vector(self, segments3d):
"""Optimize points for projection."""
si = 0
ei = 0
segis = []
points = []
for p in segments3d:
points.extend(p)
ei = si + len(p)
segis.append((si, ei))
si = ei
if len(segments3d):
xs, ys, zs = zip(*points)
else:
# We need this so that we can skip the bad unpacking from zip()
xs, ys, zs = [], [], []
ones = np.ones(len(xs))
self._vec = np.array([xs, ys, zs, ones])
self._segis = segis
def set_verts(self, verts, closed=True):
"""Set 3D vertices."""
self.get_vector(verts)
# 2D verts will be updated at draw time
PolyCollection.set_verts(self, [], False)
self._closed = closed
def set_verts_and_codes(self, verts, codes):
"""Sets 3D vertices with path codes."""
# set vertices with closed=False to prevent PolyCollection from
# setting path codes
self.set_verts(verts, closed=False)
# and set our own codes instead.
self._codes3d = codes
def set_3d_properties(self):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
self._sort_zpos = None
self.set_zsort('average')
self._facecolors3d = PolyCollection.get_facecolor(self)
self._edgecolors3d = PolyCollection.get_edgecolor(self)
self._alpha3d = PolyCollection.get_alpha(self)
self.stale = True
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def do_3d_projection(self, renderer):
"""
Perform the 3D projection for this object.
"""
# FIXME: This may no longer be needed?
if self._A is not None:
self.update_scalarmappable()
self._facecolors3d = self._facecolors
txs, tys, tzs = proj3d._proj_transform_vec(self._vec, renderer.M)
xyzlist = [(txs[si:ei], tys[si:ei], tzs[si:ei])
for si, ei in self._segis]
# This extra fuss is to re-order face / edge colors
cface = self._facecolors3d
cedge = self._edgecolors3d
if len(cface) != len(xyzlist):
cface = cface.repeat(len(xyzlist), axis=0)
if len(cedge) != len(xyzlist):
if len(cedge) == 0:
cedge = cface
else:
cedge = cedge.repeat(len(xyzlist), axis=0)
# sort by depth (furthest drawn first)
z_segments_2d = sorted(
((self._zsortfunc(zs), np.column_stack([xs, ys]), fc, ec, idx)
for idx, ((xs, ys, zs), fc, ec)
in enumerate(zip(xyzlist, cface, cedge))),
key=lambda x: x[0], reverse=True)
segments_2d = [s for z, s, fc, ec, idx in z_segments_2d]
if self._codes3d is not None:
codes = [self._codes3d[idx] for z, s, fc, ec, idx in z_segments_2d]
PolyCollection.set_verts_and_codes(self, segments_2d, codes)
else:
PolyCollection.set_verts(self, segments_2d, self._closed)
self._facecolors2d = [fc for z, s, fc, ec, idx in z_segments_2d]
if len(self._edgecolors3d) == len(cface):
self._edgecolors2d = [ec for z, s, fc, ec, idx in z_segments_2d]
else:
self._edgecolors2d = self._edgecolors3d
# Return zorder value
if self._sort_zpos is not None:
zvec = np.array([[0], [0], [self._sort_zpos], [1]])
ztrans = proj3d._proj_transform_vec(zvec, renderer.M)
return ztrans[2][0]
elif tzs.size > 0:
# FIXME: Some results still don't look quite right.
# In particular, examine contourf3d_demo2.py
# with az = -54 and elev = -45.
return np.min(tzs)
else:
return np.nan
def set_facecolor(self, colors):
PolyCollection.set_facecolor(self, colors)
self._facecolors3d = PolyCollection.get_facecolor(self)
def set_edgecolor(self, colors):
PolyCollection.set_edgecolor(self, colors)
self._edgecolors3d = PolyCollection.get_edgecolor(self)
def set_alpha(self, alpha):
"""
Set the alpha transparencies of the collection.
Parameters
----------
alpha : float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors3d = mcolors.to_rgba_array(
self._facecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
self._edgecolors = mcolors.to_rgba_array(
self._edgecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
self.stale = True
def get_facecolor(self):
return self._facecolors2d
def get_edgecolor(self):
return self._edgecolors2d
def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a PolyCollection to a Poly3DCollection object."""
segments_3d, codes = _paths_to_3d_segments_with_codes(
col.get_paths(), zs, zdir)
col.__class__ = Poly3DCollection
col.set_verts_and_codes(segments_3d, codes)
col.set_3d_properties()
def juggle_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that 2D xs, ys can be plotted in the plane
orthogonal to zdir. zdir is normally x, y or z. However, if zdir
starts with a '-' it is interpreted as a compensation for rotate_axes.
"""
if zdir == 'x':
return zs, xs, ys
elif zdir == 'y':
return xs, zs, ys
elif zdir[0] == '-':
return rotate_axes(xs, ys, zs, zdir)
else:
return xs, ys, zs
def rotate_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that the axes are rotated with zdir along
the original z axis. Prepending the axis with a '-' does the
inverse transform, so zdir can be x, -x, y, -y, z or -z
"""
if zdir == 'x':
return ys, zs, xs
elif zdir == '-x':
return zs, xs, ys
elif zdir == 'y':
return zs, xs, ys
elif zdir == '-y':
return ys, zs, xs
else:
return xs, ys, zs
def _get_colors(c, num):
"""Stretch the color argument to provide the required number *num*."""
return np.broadcast_to(
mcolors.to_rgba_array(c) if len(c) else [0, 0, 0, 0],
(num, 4))
@cbook.deprecated("3.1")
def get_colors(c, num):
"""Stretch the color argument to provide the required number *num*."""
return _get_colors(c, num)
def _zalpha(colors, zs):
"""Modify the alphas of the color list according to depth."""
# FIXME: This only works well if the points for *zs* are well-spaced
# in all three dimensions. Otherwise, at certain orientations,
# the min and max zs are very close together.
# Should really normalize against the viewing depth.
if len(zs) == 0:
return np.zeros((0, 4))
norm = Normalize(min(zs), max(zs))
sats = 1 - norm(zs) * 0.7
rgba = np.broadcast_to(mcolors.to_rgba_array(colors), (len(zs), 4))
return np.column_stack([rgba[:, :3], rgba[:, 3] * sats])
@cbook.deprecated("3.1")
def zalpha(colors, zs):
"""Modify the alphas of the color list according to depth."""
return _zalpha(colors, zs)
| [
"[email protected]"
] | |
d3929dc97598aab6a7af1debfbe632157c441bb5 | 0b01cb61a4ae4ae236a354cbfa23064e9057e434 | /alipay/aop/api/request/AlipayOpenMiniVersionListQueryRequest.py | ad88c60fc343c2034182b816202dcf7724a44190 | [
"Apache-2.0"
] | permissive | hipacloud/alipay-sdk-python-all | e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13 | bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d | refs/heads/master | 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 | Apache-2.0 | 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null | UTF-8 | Python | false | false | 3,184 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniVersionListQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.mini.version.list.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"[email protected]"
] | |
d9aecb93dc9206914cef8b2032e80586cb4021f3 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/eqptcapacity/l3totalusagecap1w.py | 8a9e840a82e552ac433c1581e5975f775e7ff967 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,132 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class L3TotalUsageCap1w(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.eqptcapacity.L3TotalUsageCap1w", "Layer3 total entries max capacity")
counter = CounterMeta("v6TotalEpCap", CounterCategory.GAUGE, "count", "Total v6 Endpoints capacity")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "v6TotalEpCapLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v6TotalEpCapMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v6TotalEpCapMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v6TotalEpCapAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v6TotalEpCapSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "v6TotalEpCapTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v6TotalEpCapThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "v6TotalEpCapTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v6TotalEpCapTr"
meta._counters.append(counter)
counter = CounterMeta("v4TotalEpCap", CounterCategory.GAUGE, "count", "Total v4 Endpoints capacity")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "v4TotalEpCapLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v4TotalEpCapMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v4TotalEpCapMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v4TotalEpCapAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v4TotalEpCapSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "v4TotalEpCapTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v4TotalEpCapThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "v4TotalEpCapTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v4TotalEpCapTr"
meta._counters.append(counter)
meta.moClassName = "eqptcapacityL3TotalUsageCap1w"
meta.rnFormat = "CDeqptcapacityL3TotalUsageCap1w"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current Layer3 total entries max capacity stats in 1 week"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.eqptcapacity.Entity")
meta.superClasses.add("cobra.model.eqptcapacity.L3TotalUsageCap")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.rnPrefixes = [
('CDeqptcapacityL3TotalUsageCap1w', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "v4TotalEpCapAvg", "v4TotalEpCapAvg", 36710, PropCategory.IMPLICIT_AVG)
prop.label = "Total v4 Endpoints capacity average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapAvg", prop)
prop = PropMeta("str", "v4TotalEpCapLast", "v4TotalEpCapLast", 36704, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Total v4 Endpoints capacity current value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapLast", prop)
prop = PropMeta("str", "v4TotalEpCapMax", "v4TotalEpCapMax", 36709, PropCategory.IMPLICIT_MAX)
prop.label = "Total v4 Endpoints capacity maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapMax", prop)
prop = PropMeta("str", "v4TotalEpCapMin", "v4TotalEpCapMin", 36708, PropCategory.IMPLICIT_MIN)
prop.label = "Total v4 Endpoints capacity minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapMin", prop)
prop = PropMeta("str", "v4TotalEpCapSpct", "v4TotalEpCapSpct", 36711, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v4 Endpoints capacity suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapSpct", prop)
prop = PropMeta("str", "v4TotalEpCapThr", "v4TotalEpCapThr", 36712, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v4 Endpoints capacity thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v4TotalEpCapThr", prop)
prop = PropMeta("str", "v4TotalEpCapTr", "v4TotalEpCapTr", 36714, PropCategory.IMPLICIT_TREND)
prop.label = "Total v4 Endpoints capacity trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapTr", prop)
prop = PropMeta("str", "v4TotalEpCapTrBase", "v4TotalEpCapTrBase", 36713, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Total v4 Endpoints capacity trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapTrBase", prop)
prop = PropMeta("str", "v4TotalEpCapTtl", "v4TotalEpCapTtl", 45299, PropCategory.IMPLICIT_TOTAL)
prop.label = "Total v4 Endpoints capacity total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapTtl", prop)
prop = PropMeta("str", "v6TotalEpCapAvg", "v6TotalEpCapAvg", 36731, PropCategory.IMPLICIT_AVG)
prop.label = "Total v6 Endpoints capacity average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapAvg", prop)
prop = PropMeta("str", "v6TotalEpCapLast", "v6TotalEpCapLast", 36725, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Total v6 Endpoints capacity current value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapLast", prop)
prop = PropMeta("str", "v6TotalEpCapMax", "v6TotalEpCapMax", 36730, PropCategory.IMPLICIT_MAX)
prop.label = "Total v6 Endpoints capacity maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapMax", prop)
prop = PropMeta("str", "v6TotalEpCapMin", "v6TotalEpCapMin", 36729, PropCategory.IMPLICIT_MIN)
prop.label = "Total v6 Endpoints capacity minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapMin", prop)
prop = PropMeta("str", "v6TotalEpCapSpct", "v6TotalEpCapSpct", 36732, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v6 Endpoints capacity suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapSpct", prop)
prop = PropMeta("str", "v6TotalEpCapThr", "v6TotalEpCapThr", 36733, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v6 Endpoints capacity thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v6TotalEpCapThr", prop)
prop = PropMeta("str", "v6TotalEpCapTr", "v6TotalEpCapTr", 36735, PropCategory.IMPLICIT_TREND)
prop.label = "Total v6 Endpoints capacity trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapTr", prop)
prop = PropMeta("str", "v6TotalEpCapTrBase", "v6TotalEpCapTrBase", 36734, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Total v6 Endpoints capacity trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapTrBase", prop)
prop = PropMeta("str", "v6TotalEpCapTtl", "v6TotalEpCapTtl", 45300, PropCategory.IMPLICIT_TOTAL)
prop.label = "Total v6 Endpoints capacity total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapTtl", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
e1bbce8655b1596bb2a77c6db900e7a854d70cf5 | 2c16e24486ac92bbd37f5c6d0d00ec4ba4d48e56 | /ex/ex1.py | 0d5193b36e4107bb3f5edf45a87b64307424927a | [] | no_license | alagram/lpthw | 386b6cf7534e2f7dba2e5832d6975107f27ceb9b | 656e7526006de80354917da881cbcbb3dbe8523a | refs/heads/master | 2021-01-10T20:55:35.461722 | 2014-09-16T18:33:50 | 2014-09-16T18:33:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | # print "Hello World!"
# print "Hello Again"
# print "I like tying this."
# print "This is fun."
# print "Yay! Printing."
# print "I'd much rather you 'not'."
# print 'I "said" do not tocuh this.'
print "I am still printing..."
| [
"[email protected]"
] | |
46efd06e7181e3095d182fdcacca6baea3973712 | 8d375652e44b67d73102fee7abc1abaab4cb4329 | /mcompiler/kernel/makeref.py | 9dfdeb93aea606402be14eef4fbc0d4790b57a87 | [
"MIT"
] | permissive | paulscottrobson/old-m-versions | 6d2061e36f2a5aaef388a4786406f876f0a06e0b | c2edb4200d32e066223ace4fd05837a485302645 | refs/heads/master | 2020-04-04T03:09:25.399283 | 2018-11-01T12:14:57 | 2018-11-01T12:14:57 | 155,709,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | # ***********************************************************************************************
# ***********************************************************************************************
#
# Name : makeref.py
# Purpose : make reference file.
# Author : Paul Robson ([email protected])
# Created : 17th September 2018
#
# ***********************************************************************************************
# ***********************************************************************************************
import re
references = {}
#
# Read in the listing file, and extract lines with label values on it.
# (first bit is snasm-only)
#
src = [x.strip().lower() for x in open("boot.img.vice").readlines()]
#
# For each line, see if it fits the <label> = $<address>
#
for l in src:
if l.find(" _definition_") >= 0:
#print(l)
m = re.match("^al\s+c\:([0-9a-f]+)\s+_definition_([_0-9a-fmro]+)$",l)
assert m is not None,l
#
# If so, extract name and address
#
name = m.group(2)
address = int(m.group(1),16)
#
# If it is definition, get name, checking if it is a macro and
# convert back to standard ASCII
#
isMacro = False
if name[-6:] == "_macro":
name = name[:-6]
isMacro = True
name = "".join([chr(int(x,16)) for x in name.split("_")])
name = name.lower()
if isMacro:
name = "&&"+name
references[name.lower()] = address
#
# Write the file out.
#
keys = [x for x in references]
keys.sort(key = lambda x:references[x])
ref = "\n".join(["{0}:=${1:06x}".format(x,references[x]) for x in keys])
h = open("boot.dict","w").write(ref+"\n")
| [
"[email protected]"
] | |
2a976186b04e2414a02608208b9b889bdc4db0de | 1b8fba01309da37f8d0ff408765c1d545fc588d6 | /tests/data/test_d2go_datasets.py | 8f5c1514bfeff7ecb756ed09dba2c86dcd1c1ecd | [
"Apache-2.0"
] | permissive | supriyar/d2go | 9bd54bcb2704c91d7bf0d5fceab2ac4f23d59346 | 9dc1600b05ecf60fab556599b4c0bc6c32837449 | refs/heads/main | 2023-08-11T16:19:50.578547 | 2021-10-01T17:43:32 | 2021-10-01T17:44:49 | 413,646,825 | 0 | 0 | Apache-2.0 | 2021-10-05T02:20:59 | 2021-10-05T02:20:58 | null | UTF-8 | Python | false | false | 10,262 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import json
import os
import unittest
import d2go.data.extended_coco as extended_coco
from d2go.data.keypoint_metadata_registry import (
KEYPOINT_METADATA_REGISTRY,
KeypointMetadata,
get_keypoint_metadata,
)
from d2go.data.utils import (
maybe_subsample_n_images,
AdhocDatasetManager,
COCOWithClassesToUse,
)
from d2go.runner import Detectron2GoRunner
from d2go.utils.testing.data_loader_helper import (
LocalImageGenerator,
create_toy_dataset,
)
from d2go.utils.testing.helper import tempdir
from detectron2.data import DatasetCatalog, MetadataCatalog
from mobile_cv.common.misc.file_utils import make_temp_directory
def create_test_images_and_dataset_json(data_dir, num_images=10, num_classes=-1):
# create image and json
image_dir = os.path.join(data_dir, "images")
os.makedirs(image_dir)
json_dataset, meta_data = create_toy_dataset(
LocalImageGenerator(image_dir, width=80, height=60),
num_images=num_images,
num_classes=num_classes,
)
json_file = os.path.join(data_dir, "{}.json".format("inj_ds1"))
with open(json_file, "w") as f:
json.dump(json_dataset, f)
return image_dir, json_file
class TestD2GoDatasets(unittest.TestCase):
def test_coco_conversions(self):
test_data_0 = {
"info": {},
"imgs": {
"img_1": {
"file_name": "0.jpg",
"width": 600,
"height": 600,
"id": "img_1",
}
},
"anns": {0: {"id": 0, "image_id": "img_1", "bbox": [30, 30, 60, 20]}},
"imgToAnns": {"img_1": [0]},
"cats": {},
}
test_data_1 = copy.deepcopy(test_data_0)
test_data_1["imgs"][123] = test_data_1["imgs"].pop("img_1")
test_data_1["imgs"][123]["id"] = 123
test_data_1["anns"][0]["image_id"] = 123
test_data_1["imgToAnns"][123] = test_data_1["imgToAnns"].pop("img_1")
for test_data, exp_output in [(test_data_0, [0, 0]), (test_data_1, [123, 123])]:
with make_temp_directory("detectron2go_tmp_dataset") as tmp_dir:
src_json = os.path.join(tmp_dir, "source.json")
out_json = os.path.join(tmp_dir, "output.json")
with open(src_json, "w") as h_in:
json.dump(test_data, h_in)
out_json = extended_coco.convert_coco_text_to_coco_detection_json(
src_json, out_json
)
self.assertEqual(out_json["images"][0]["id"], exp_output[0])
self.assertEqual(out_json["annotations"][0]["image_id"], exp_output[1])
def test_annotation_rejection(self):
img_list = [
{"id": 0, "width": 50, "height": 50, "file_name": "a.png"},
{"id": 1, "width": 50, "height": 50, "file_name": "b.png"},
]
ann_list = [
[
{
"id": 0,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 100,
"bbox": [0, 0, 10, 10],
},
{
"id": 1,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 100,
"bbox": [45, 45, 10, 10],
},
{
"id": 2,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 100,
"bbox": [-5, -5, 10, 10],
},
{
"id": 3,
"image_id": 0,
"category_id": 0,
"segmentation": [[0, 0, 10, 0, 10, 10, 0, 10]],
"area": 0,
"bbox": [5, 5, 0, 0],
},
{
"id": 4,
"image_id": 0,
"category_id": 0,
"segmentation": [[]],
"area": 25,
"bbox": [5, 5, 5, 5],
},
],
[
{
"id": 5,
"image_id": 1,
"category_id": 0,
"segmentation": [[]],
"area": 100,
"bbox": [0, 0, 0, 0],
},
],
]
out_dict_list = extended_coco.convert_to_dict_list(
"",
[0],
img_list,
ann_list,
)
self.assertEqual(len(out_dict_list), 1)
@tempdir
def test_coco_injection(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_ds1", "inj_ds2"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir, "/mnt/fair"],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file, "inj_ds2"],
]
]
)
runner.register(cfg)
inj_ds1 = DatasetCatalog.get("inj_ds1")
self.assertEqual(len(inj_ds1), 10)
for dic in inj_ds1:
self.assertEqual(dic["width"], 80)
self.assertEqual(dic["height"], 60)
@tempdir
def test_sub_dataset(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_ds3"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file],
"DATASETS.TEST",
("inj_ds3",),
"D2GO_DATA.TEST.MAX_IMAGES",
1,
]
]
)
runner.register(cfg)
with maybe_subsample_n_images(cfg) as new_cfg:
test_loader = runner.build_detection_test_loader(
new_cfg, new_cfg.DATASETS.TEST[0]
)
self.assertEqual(len(test_loader), 1)
def test_coco_metadata_registry(self):
@KEYPOINT_METADATA_REGISTRY.register()
def TriangleMetadata():
return KeypointMetadata(
names=("A", "B", "C"),
flip_map=(
("A", "B"),
("B", "C"),
),
connection_rules=[
("A", "B", (102, 204, 255)),
("B", "C", (51, 153, 255)),
],
)
tri_md = get_keypoint_metadata("TriangleMetadata")
self.assertEqual(tri_md["keypoint_names"][0], "A")
self.assertEqual(tri_md["keypoint_flip_map"][0][0], "A")
self.assertEqual(tri_md["keypoint_connection_rules"][0][0], "A")
@tempdir
def test_coco_metadata_register(self, tmp_dir):
@KEYPOINT_METADATA_REGISTRY.register()
def LineMetadata():
return KeypointMetadata(
names=("A", "B"),
flip_map=(("A", "B"),),
connection_rules=[
("A", "B", (102, 204, 255)),
],
)
image_dir, json_file = create_test_images_and_dataset_json(tmp_dir)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["inj_ds"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file],
"D2GO_DATA.DATASETS.COCO_INJECTION.KEYPOINT_METADATA",
["LineMetadata"],
]
]
)
runner.register(cfg)
inj_md = MetadataCatalog.get("inj_ds")
self.assertEqual(inj_md.keypoint_names[0], "A")
self.assertEqual(inj_md.keypoint_flip_map[0][0], "A")
self.assertEqual(inj_md.keypoint_connection_rules[0][0], "A")
@tempdir
def test_coco_create_adhoc_class_to_use_dataset(self, tmp_dir):
image_dir, json_file = create_test_images_and_dataset_json(
tmp_dir, num_classes=2
)
runner = Detectron2GoRunner()
cfg = runner.get_default_cfg()
cfg.merge_from_list(
[
str(x)
for x in [
"D2GO_DATA.DATASETS.COCO_INJECTION.NAMES",
["test_adhoc_ds", "test_adhoc_ds2"],
"D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS",
[image_dir, image_dir],
"D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES",
[json_file, json_file],
]
]
)
runner.register(cfg)
# Test adhoc classes to use
AdhocDatasetManager.add(COCOWithClassesToUse("test_adhoc_ds", ["class_0"]))
ds_list = DatasetCatalog.get("test_adhoc_ds@1classes")
self.assertEqual(len(ds_list), 5)
# Test adhoc classes to use with suffix removal
AdhocDatasetManager.add(
COCOWithClassesToUse("test_adhoc_ds2@1classes", ["class_0"])
)
ds_list = DatasetCatalog.get("test_adhoc_ds2@1classes")
self.assertEqual(len(ds_list), 5)
| [
"[email protected]"
] | |
0d2ea1c5f31a044d68ce7bb06f65aaa2ee8a1422 | 327981aeef801fec08305d70270deab6f08bc122 | /13.tkinter与银行系统实战/thinker/18.Combobox下拉控件.py | 0dc0692ce37fde2328c063bb484b27127a142176 | [] | no_license | AWangHe/Python-basis | 2872db82187b169226271c509778c0798b151f50 | 2e3e9eb6da268f765c7ba04f1aefc644d50c0a29 | refs/heads/master | 2020-03-20T12:15:44.491323 | 2018-06-15T08:24:19 | 2018-06-15T08:24:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | # -*- coding: utf-8 -*-
import tkinter
from tkinter import ttk
#创建主窗口
win = tkinter.Tk()
#设置标题
win.title("魔兽世界")
#设置大小和位置 大小400x400 距离左侧400,距离上侧100
win.geometry("400x400+400+100")
#绑定变量
cv = tkinter.StringVar()
com = ttk.Combobox(win, textvariable = cv)
com.pack()
#设置下拉数据
com["value"] = ("济南", "青岛", "济宁")
#设置默认值
com.current(0)
#绑定事件
def func(event):
print(com.get())
print(cv.get())
com.bind("<<ComboboxSelected>>", func)
win.mainloop()
| [
"[email protected]"
] | |
c6e6e1ef088631e80462884b26b6f3bdfea593fb | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res/scripts/client/messenger/proto/bw_chat2/find_criteria.py | ea24304c044114f780246fdbce4279659cbd77a3 | [] | no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,701 | py | # 2016.02.14 12:42:53 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/messenger/proto/bw_chat2/find_criteria.py
from constants import PREBATTLE_TYPE
from messenger.ext import channel_num_gen
from messenger.m_constants import BATTLE_CHANNEL, PROTO_TYPE
from messenger.proto.interfaces import IEntityFindCriteria
class BWBattleChannelFindCriteria(IEntityFindCriteria):
def __init__(self):
super(BWBattleChannelFindCriteria, self).__init__()
self.__ids = []
for item in BATTLE_CHANNEL.ALL:
clientID = channel_num_gen.getClientID4BattleChannel(item.name)
if clientID:
self.__ids.append(clientID)
clientID = channel_num_gen.getClientID4Prebattle(PREBATTLE_TYPE.SQUAD)
if clientID:
self.__ids.append(clientID)
def filter(self, channel):
return channel.getProtoType() is PROTO_TYPE.BW_CHAT2 and channel.getClientID() in self.__ids
class BWPrebattleChannelFindCriteria(IEntityFindCriteria):
def filter(self, channel):
return channel.getProtoType() is PROTO_TYPE.BW_CHAT2 and channel.getPrebattleType()
class BWChatTypeFindCriteria(IEntityFindCriteria):
def __init__(self, chatType):
super(BWChatTypeFindCriteria, self).__init__()
self.__chatType = chatType
def filter(self, channel):
return channel.getProtoType() is PROTO_TYPE.BW_CHAT2 and channel.getProtoData().chatType == self.__chatType
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\messenger\proto\bw_chat2\find_criteria.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:42:53 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
8edf548db029dd530fa8bddd6f142a6ecd491f48 | 3dfb4ee39555b30e6e0c6fcdbef371864e69f694 | /google-cloud-sdk/lib/googlecloudsdk/api_lib/dns/transaction_util.py | e0444226d947ade648184a8d0f468d647f579eed | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | MD-Anderson-Bioinformatics/NG-CHM_Galaxy | 41d1566d5e60416e13e023182ca4351304381a51 | dcf4886d4ec06b13282143ef795c5f0ff20ffee3 | refs/heads/master | 2021-06-02T21:04:12.194964 | 2021-04-29T14:45:32 | 2021-04-29T14:45:32 | 130,249,632 | 0 | 1 | null | 2020-07-24T18:35:21 | 2018-04-19T17:25:33 | Python | UTF-8 | Python | false | false | 5,070 | py | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for record-set transactions."""
import os
from dns import rdatatype
from googlecloudsdk.api_lib.dns import import_util
from googlecloudsdk.api_lib.dns import util
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import resource_printer
from googlecloudsdk.third_party.apis.dns.v1 import dns_v1_messages as messages
import yaml
DEFAULT_PATH = 'transaction.yaml'
class CorruptedTransactionFileError(core_exceptions.Error):
def __init__(self):
super(CorruptedTransactionFileError, self).__init__(
'Corrupted transaction file.\n\n'
'Please abort and start a new transaction.')
def WriteToYamlFile(yaml_file, change):
"""Writes the given change in yaml format to the given file.
Args:
yaml_file: file, File into which the change should be written.
change: Change, Change to be written out.
"""
printer = resource_printer.YamlPrinter(yaml_file)
printer.AddRecord(change)
def _RecordSetsFromDictionaries(record_set_dictionaries):
"""Converts list of record-set dictionaries into list of ResourceRecordSets.
Args:
record_set_dictionaries: [{str:str}], list of record-sets as dictionaries.
Returns:
list of ResourceRecordSets equivalent to given list of yaml record-sets
"""
record_sets = []
for record_set_dict in record_set_dictionaries:
record_set = messages.ResourceRecordSet()
# Need to assign kind to default value for useful equals comparisons.
record_set.kind = record_set.kind
record_set.name = record_set_dict['name']
record_set.ttl = record_set_dict['ttl']
record_set.type = record_set_dict['type']
record_set.rrdatas = record_set_dict['rrdatas']
record_sets.append(record_set)
return record_sets
def ChangeFromYamlFile(yaml_file):
"""Returns the change contained in the given yaml file.
Args:
yaml_file: file, A yaml file with change.
Returns:
Change, the change contained in the given yaml file.
Raises:
CorruptedTransactionFileError: if the record_set_dictionaries are invalid
"""
try:
change_dict = yaml.safe_load(yaml_file) or {}
except yaml.error.YAMLError:
raise CorruptedTransactionFileError()
if (change_dict.get('additions') is None or
change_dict.get('deletions') is None):
raise CorruptedTransactionFileError()
change = messages.Change()
change.additions = _RecordSetsFromDictionaries(change_dict['additions'])
change.deletions = _RecordSetsFromDictionaries(change_dict['deletions'])
return change
def CreateRecordSetFromArgs(args):
"""Creates and returns a record-set from the given args.
Args:
args: The arguments to use to create the record-set.
Raises:
ToolException: If given record-set type is not supported
Returns:
ResourceRecordSet, the record-set created from the given args.
"""
rd_type = rdatatype.from_text(args.type)
if rd_type not in import_util.RDATA_TRANSLATIONS:
raise exceptions.ToolException(
'unsupported record-set type [{0}]'.format(args.type))
record_set = messages.ResourceRecordSet()
# Need to assign kind to default value for useful equals comparisons.
record_set.kind = record_set.kind
record_set.name = util.AppendTrailingDot(args.name)
record_set.ttl = args.ttl
record_set.type = args.type
record_set.rrdatas = args.data
if rd_type is rdatatype.TXT or rd_type is rdatatype.SPF:
record_set.rrdatas = [import_util.QuotedText(datum) for datum in args.data]
return record_set
class TransactionFile(object):
"""Context for reading/writing from/to a transaction file."""
def __init__(self, trans_file_path, mode='r'):
if not os.path.isfile(trans_file_path):
raise exceptions.ToolException(
'transaction not found at [{0}]'.format(trans_file_path))
self.__trans_file_path = trans_file_path
try:
self.__trans_file = open(trans_file_path, mode)
except IOError as exp:
msg = 'unable to open transaction [{0}] because [{1}]'
msg = msg.format(trans_file_path, exp)
raise exceptions.ToolException(msg)
def __enter__(self):
return self.__trans_file
def __exit__(self, typ, value, traceback):
self.__trans_file.close()
if typ is IOError or typ is yaml.YAMLError:
msg = 'unable to read/write transaction [{0}] because [{1}]'
msg = msg.format(self.__trans_file_path, value)
raise exceptions.ToolException(msg)
| [
"[email protected]"
] | |
58a5ffe0456fe028034da211b3db8c3daf7f4530 | 7642f70954b73aca0d56f03b3e3577ee5648c752 | /ppm/settings.py | 5bfa923c2b43f7b3d5b3ae1d8a2264a866af4505 | [] | no_license | alviandk/ppm | 8e5dfb2ca9a98b460c9b0a71be68b5310ed56d87 | eea4d37904f86b4ec9cded6091b89d18244b85a9 | refs/heads/master | 2021-01-10T21:05:22.931101 | 2014-11-13T09:24:36 | 2014-11-13T09:24:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | """
Django settings for ppm project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_v)b*pi3yhflh(bvrrk+rq9*fm5=b+@yh03bdgb94h95+1=#w-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'inventory',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ppm.urls'
WSGI_APPLICATION = 'ppm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'inventory',
'USER' : 'root',
'PASSWORD' : '',
'HOST': '127.0.0.1',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.