blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7eada46e184e56ffe1faad35d8e3ba614acbbfa3
|
bb31c0062354bbb0df70692e904c949a00973503
|
/21_list_pop.py
|
0426fabad056ce4b67e15136066a60f3c05bd401
|
[] |
no_license
|
millanmilu/Learn-Python-with-Milu-
|
c42df5aa7832fba75015b7af29d6009489e00ec5
|
3b4714b849dff0a0ef3cc91fd102840fbcf00e43
|
refs/heads/master
| 2022-04-26T03:29:38.990189 | 2020-04-28T13:17:28 | 2020-04-28T13:17:28 | 259,634,227 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 799 |
py
|
names = ['milu', 'Misan', 'millan', 'Babloo', 'anil']
print("It delete items from end of the list ")
names.pop()
print(names)
print("pop(0) returns the head (start), pop() or pop(-1) returns the tail(end)")
print(names.pop(0))
print(" LIFO -LAST IN ,FIRST OUT ;FIFO - FIRST IN ,FIRST OUT")
"""It’s computing jargon time! Don’t worry, these
won’t be on the final exam. If you use append() to
add new items to the end and pop() to remove
them from the same end, you’ve implemented a
data structure known as a LIFO (last in, first out)
queue. This is more commonly known as a stack.
pop(0) would create a FIFO (first in, first out)
queue. These are useful when you want to collect
data as they arrive and work with either the oldest
first (FIFO) or the newest first (LIFO)."""
|
[
"[email protected]"
] | |
5d02854a055be280f4890d03f09c82e468df1e6f
|
f25d477be296a63aac156c8dd907397dc156024c
|
/vse/handlers/test.py
|
421833f5a7e1c7a36c32d33ccd95e6f2e490ca40
|
[] |
no_license
|
cbaxter1988/validation_scripting_engine
|
f4e0a834632b5499c4d7a13b18d9208b27296325
|
86a8bd5061016f838747ea045bf3e32c0dd98e94
|
refs/heads/master
| 2022-11-23T05:23:05.343104 | 2020-07-22T06:55:47 | 2020-07-22T06:55:47 | 275,946,469 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 553 |
py
|
from vse.handlers import Handler, HandlerResult
class TestHandler(Handler):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def execute(self, **kwargs) -> HandlerResult:
poked = self.params.get("poked", False)
if poked:
self.result.status = True
self.result.msg = "Hey, You Poked Me"
else:
# print(self.params)
self.result.status = False
self.result.msg = "What do you want?"
self.check_expectation()
return self.result
|
[
"[email protected]"
] | |
2a6703236f533adc6ad37da1e13ce782b2c31505
|
cc0da95420131620ab5d49c48b38d038f1b67833
|
/scripts/update_reddit_cache.py
|
2a4e92028b31bb97d2afa82e4882bff9491f28f0
|
[
"MIT"
] |
permissive
|
madeyoga/Nano-Bot
|
4daee9a74351ca64329ec33ee7b565bba4cf3616
|
3966957d229aa2e3ea9945b2d9a96fb3353b910c
|
refs/heads/master
| 2022-06-25T17:36:41.610957 | 2022-06-13T04:03:05 | 2022-06-13T04:03:05 | 140,231,221 | 13 | 10 |
MIT
| 2021-09-27T11:11:19 | 2018-07-09T04:28:35 |
Python
|
UTF-8
|
Python
| false | false | 1,053 |
py
|
import os
import praw
class Subreddits:
MEMES = "memes"
DANKMEMES = "dankmemes"
WTF = "wtf"
GRANDORDER = "grandorder"
WAIFU = "Waifu"
SCATHACH = "scathach"
FGOFANART = "FGOfanart"
ANIME = "anime"
ANIMEMES = "Animemes"
AWWNIME = "awwnime"
AZURELANE = "AzureLane"
TSUNDERES = "Tsunderes"
ANIMEWALLPAPER = "Animewallpaper" # ANIME WALLPAPER ARTS
MOESCAPE = "Moescape" # ANIME WALLPAPER ARTS
MAMARAIKOU = "MamaRaikou"
SABER = "Saber"
FGOCOMICS = "FGOcomics"
FATEPRISMAILLYA = "FatePrismaIllya"
ILLYASVIEL = "Illyasviel"
reddit = praw.Reddit(
client_id=os.environ['REDDIT_CLIENT_ID'],
client_secret=os.environ['REDDIT_CLIENT_SECRET'],
user_agent=os.environ['REDDIT_USER_AGENT']
)
submissions = list(reddit.subreddit(Subreddits.TSUNDERES).hot())
for submission in submissions:
# Post hint & url
print(submission.__dict__)
break
## print(submission.url,
## submission.is_self,
## submission.over_18,
## submission.stickied)
|
[
"[email protected]"
] | |
0c03f297ce168a7e9f0f9498333600c01a37b205
|
2c78b0b78f57dda018fe382a4ddda964eb3e68fd
|
/基础练习/jisuanji.py
|
f3732c06954f05def4711a8b00ef2dbfd787d19d
|
[] |
no_license
|
Cola1995/s3
|
3a4621035928dcaa42c3117d25e88cf46a5f0721
|
d5612d63dac415f861d379b20ba6a165faf213ae
|
refs/heads/master
| 2020-04-13T14:14:38.753238 | 2019-03-22T01:36:46 | 2019-03-22T01:36:46 | 163,255,923 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,106 |
py
|
import re
suan='1+(9+3*1-4+(1+4+(2+1*2+4/2)+(1+1))+(2*4))'
f=re.search('\([^()]*\)',suan).group()
print(f)
s='(2+1*2+1*3+3*3+9/3)'
s_t=''
if '*'or '/' in s:
for i in s:
if i=='*':
jie=int(s[s.index(i)-1])*int(s[s.index(i)+1])
s=s.replace(s[s.index(i)-1:s.index(i)+2],str(jie))
print(s)
elif i=='/':
jie1=int(s[s.index(i)-1])/int(s[s.index(i)+1])
s=s.replace(s[s.index(i)-1:s.index(i)+2],str(jie1))
print('体替换后的字符串是%s'%s)
else:
print("ssssss%s"%s)
index=s.count('+')+s.count('-')
for jj in range(index+1):
for i in s:
if i=='+':
sd=int(s[s.index(i)-1])+int(s[s.index(i)+1])
s=s.replace(s[s.index(i)-1:s.index(i)+2],str(sd))
print(s)
# else:
# for i in s_t:
# if i== '+':
# int_jg=int(s_t[s_t.index(i)-1])+int(s_t[s_t.index(i)+1])
# t_jia=s_t.replace(s_t[s_t.index(i)-1:s_t.index(i)+2],str(int_jg))
# print(t_jia)
|
[
"[email protected]"
] | |
40f03b4a47d7607f7ead87b54fbca5de90f106d2
|
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
|
/src/main/output/study/others_program.py
|
eaf139b4920f4137f30074773b3a1a6c7017c4b9
|
[] |
no_license
|
matkosoric/GenericNameTesting
|
860a22af1098dda9ea9e24a1fc681bb728aa2d69
|
03f4a38229c28bc6d83258e5a84fce4b189d5f00
|
refs/heads/master
| 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,424 |
py
|
const request = require('request');
const uuidv4 = require('uuid/v4');
/* Checks to see if the subscription key is available
as an environment variable. If you are setting your subscription key as a
string, then comment these lines out.
If you want to set your subscription key as a string, replace the value for
the Ocp-Apim-Subscription-Key header as a string. */
const subscriptionKey="9115e0519101697df35c8c7256d54256";
if (!subscriptionKey) {
throw new Error('Environment variable for your subscription key is not set.')
};
/* If you encounter any issues with the base_url or path, make sure that you are
using the latest endpoint: https://docs.microsoft.com/azure/cognitive-services/translator/reference/v3-0-translate */
function translateText(){
let options = {
method: 'POST',
baseUrl: 'https://api.cognitive.microsofttranslator.com/',
url: 'translate',
qs: {
'api-version': '3.0',
'to': ['']
},
headers: {
'38d63b3a15b77fa7883f764dd1732eae': subscriptionKey,
'Content-type': 'application/json',
'X-ClientTraceId': uuidv4().toString()
},
body: [{
'text': 'Hello World!'
}],
json: true,
};
request(options, function(err, res, body){
console.log(JSON.stringify(body, null, 4));
});
};
// Call the function to translate text.
translateText();
|
[
"[email protected]"
] | |
19338b9bbddbf1b84311569fecdfa9bf70ae8287
|
d5552cda58e251e6a5983876681be8f641dea86f
|
/src/transformers/models/resnet/configuration_resnet.py
|
2d0dbc3b0fdb409f4150d985484321766f1fcd44
|
[
"Apache-2.0"
] |
permissive
|
patrickvonplaten/transformers
|
feb121e1ee82c317ac7561836b8f95a7de25fc1f
|
f738502979f6787609dcf0180e6606f464692e27
|
refs/heads/master
| 2022-12-08T10:15:34.743198 | 2022-11-22T11:00:20 | 2022-11-22T11:00:20 | 226,201,271 | 6 | 1 |
Apache-2.0
| 2019-12-05T22:39:46 | 2019-12-05T22:39:45 | null |
UTF-8
|
Python
| false | false | 5,262 |
py
|
# coding=utf-8
# Copyright 2022 Microsoft Research, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" ResNet model configuration"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
logger = logging.get_logger(__name__)
RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class ResNetConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ResNetModel`]. It is used to instantiate an
ResNet model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the ResNet
[microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embedding_size (`int`, *optional*, defaults to 64):
Dimensionality (hidden size) for the embedding layer.
hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
Dimensionality (hidden size) at each stage.
depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
Depth (number of layers) for each stage.
layer_type (`str`, *optional*, defaults to `"bottleneck"`):
The layer to use, it can be either `"basic"` (used for smaller models, like resnet-18 or resnet-34) or
`"bottleneck"` (used for larger models like resnet-50 and above).
hidden_act (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
are supported.
downsample_in_first_stage (`bool`, *optional*, defaults to `False`):
If `True`, the first stage will downsample the inputs using a `stride` of 2.
out_features (`List[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`,
`"stage3"`, `"stage4"`.
Example:
```python
>>> from transformers import ResNetConfig, ResNetModel
>>> # Initializing a ResNet resnet-50 style configuration
>>> configuration = ResNetConfig()
>>> # Initializing a model (with random weights) from the resnet-50 style configuration
>>> model = ResNetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "resnet"
layer_types = ["basic", "bottleneck"]
def __init__(
self,
num_channels=3,
embedding_size=64,
hidden_sizes=[256, 512, 1024, 2048],
depths=[3, 4, 6, 3],
layer_type="bottleneck",
hidden_act="relu",
downsample_in_first_stage=False,
out_features=None,
**kwargs
):
super().__init__(**kwargs)
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
self.num_channels = num_channels
self.embedding_size = embedding_size
self.hidden_sizes = hidden_sizes
self.depths = depths
self.layer_type = layer_type
self.hidden_act = hidden_act
self.downsample_in_first_stage = downsample_in_first_stage
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
if out_features is not None:
if not isinstance(out_features, list):
raise ValueError("out_features should be a list")
for feature in out_features:
if feature not in self.stage_names:
raise ValueError(
f"Feature {feature} is not a valid feature name. Valid names are {self.stage_names}"
)
self.out_features = out_features
class ResNetOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse("1.11")
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
]
)
@property
def atol_for_validation(self) -> float:
return 1e-3
|
[
"[email protected]"
] | |
d64428aa8ab4edc892e4df136959e62393a5edc2
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/datashare/v20201001preview/get_blob_data_set.py
|
79ae160c76b7217e3429731faca4a1ab166ad90b
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 |
Apache-2.0
| 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null |
UTF-8
|
Python
| false | false | 7,209 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetBlobDataSetResult',
'AwaitableGetBlobDataSetResult',
'get_blob_data_set',
]
@pulumi.output_type
class GetBlobDataSetResult:
"""
An Azure storage blob data set.
"""
def __init__(__self__, container_name=None, data_set_id=None, file_path=None, id=None, kind=None, name=None, resource_group=None, storage_account_name=None, subscription_id=None, system_data=None, type=None):
if container_name and not isinstance(container_name, str):
raise TypeError("Expected argument 'container_name' to be a str")
pulumi.set(__self__, "container_name", container_name)
if data_set_id and not isinstance(data_set_id, str):
raise TypeError("Expected argument 'data_set_id' to be a str")
pulumi.set(__self__, "data_set_id", data_set_id)
if file_path and not isinstance(file_path, str):
raise TypeError("Expected argument 'file_path' to be a str")
pulumi.set(__self__, "file_path", file_path)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_group and not isinstance(resource_group, str):
raise TypeError("Expected argument 'resource_group' to be a str")
pulumi.set(__self__, "resource_group", resource_group)
if storage_account_name and not isinstance(storage_account_name, str):
raise TypeError("Expected argument 'storage_account_name' to be a str")
pulumi.set(__self__, "storage_account_name", storage_account_name)
if subscription_id and not isinstance(subscription_id, str):
raise TypeError("Expected argument 'subscription_id' to be a str")
pulumi.set(__self__, "subscription_id", subscription_id)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> str:
"""
Container that has the file path.
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="dataSetId")
def data_set_id(self) -> str:
"""
Unique id for identifying a data set resource
"""
return pulumi.get(self, "data_set_id")
@property
@pulumi.getter(name="filePath")
def file_path(self) -> str:
"""
File path within the source data set
"""
return pulumi.get(self, "file_path")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource id of the azure resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Kind of data set.
Expected value is 'Blob'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the azure resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> str:
"""
Resource group of storage account
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> str:
"""
Storage account name of the source data set
"""
return pulumi.get(self, "storage_account_name")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> str:
"""
Subscription id of storage account
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
System Data of the Azure resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the azure resource
"""
return pulumi.get(self, "type")
class AwaitableGetBlobDataSetResult(GetBlobDataSetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBlobDataSetResult(
container_name=self.container_name,
data_set_id=self.data_set_id,
file_path=self.file_path,
id=self.id,
kind=self.kind,
name=self.name,
resource_group=self.resource_group,
storage_account_name=self.storage_account_name,
subscription_id=self.subscription_id,
system_data=self.system_data,
type=self.type)
def get_blob_data_set(account_name: Optional[str] = None,
data_set_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
share_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBlobDataSetResult:
"""
An Azure storage blob data set.
:param str account_name: The name of the share account.
:param str data_set_name: The name of the dataSet.
:param str resource_group_name: The resource group name.
:param str share_name: The name of the share.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['dataSetName'] = data_set_name
__args__['resourceGroupName'] = resource_group_name
__args__['shareName'] = share_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:datashare/v20201001preview:getBlobDataSet', __args__, opts=opts, typ=GetBlobDataSetResult).value
return AwaitableGetBlobDataSetResult(
container_name=__ret__.container_name,
data_set_id=__ret__.data_set_id,
file_path=__ret__.file_path,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
resource_group=__ret__.resource_group,
storage_account_name=__ret__.storage_account_name,
subscription_id=__ret__.subscription_id,
system_data=__ret__.system_data,
type=__ret__.type)
|
[
"[email protected]"
] | |
567fe246643d90abf4f7c5c8def3e5303b8e0179
|
76aa894988b3123306030240512c1e5039b2bc75
|
/scripts/0123456789abcdef/scripts/test.py
|
a00e67ecd316ddd7fcd81238dbdad019b22636b8
|
[] |
no_license
|
puppycodes/powny
|
4c2a554ed63c2f3a80a77ccefb491d00f951d877
|
a5f20ff667c2f93b72b63865f70e25b26e6d4b30
|
refs/heads/master
| 2021-01-23T06:29:19.506593 | 2016-06-07T16:26:48 | 2016-06-07T16:26:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 630 |
py
|
import urllib.request
from powny.core import (
expose,
save_job_state,
get_cas_storage,
)
# =====
@expose
def empty_method(**event):
pass
@expose
def do_urlopen(url, **_):
for _ in range(3):
urllib.request.build_opener().open(url)
save_job_state()
@expose
def failed_once(url):
save_job_state()
do_fail = get_cas_storage().replace_value(
path="failed_once_value",
value=False,
default=True,
)[0].value
if do_fail:
raise RuntimeError("A-HA-HA ANARCHY!!!111")
save_job_state()
urllib.request.build_opener().open(url)
return "OK"
|
[
"[email protected]"
] | |
32d3b8ebb43363cbb91b951fbc4078445b9f7fb5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03284/s003568973.py
|
45c2a69f388a3afddf0d39105674a680865408b9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 81 |
py
|
a,b=map(int,input().split())
N = a / b
if N > int(N):
print(1)
else:
print(0)
|
[
"[email protected]"
] | |
e25ef155d81ca596891f43af8a1402d173ee151a
|
3d705ec48c94373817e5f61d3f839988910431e3
|
/lib/platform/dataprocess/spark_compute/makedata/peer_info.py
|
3f6afbe05e41f38830ab26399afc5b95bae3bef5
|
[] |
no_license
|
namesuqi/zeus
|
937d3a6849523ae931162cd02c5a09b7e37ebdd8
|
3445b59b29854b70f25da2950016f135aa2a5204
|
refs/heads/master
| 2022-07-24T14:42:28.600288 | 2018-03-29T08:03:09 | 2018-03-29T08:03:09 | 127,256,973 | 0 | 0 | null | 2022-07-07T22:57:57 | 2018-03-29T07:53:16 |
Python
|
UTF-8
|
Python
| false | false | 3,470 |
py
|
from lib.platform.dataprocess.spark_compute.test_data import *
from lib.platform.dataprocess.spark_compute.commontool.timestamp_conversion import *
import random
import os
class PeerInfo(object):
def make_data(self, hour=''):
data_format = '%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\x1f%s\n'
with open(os.path.abspath(os.path.dirname(__file__)) + '/peer_info.txt', 'w') as writer:
if hour == '':
for hour in range(24):
for minute in range(60):
topic = 'topic=' + 'peer_info'
id = "id=" + test_file_id[random.randint(0, len(test_file_id) - 1)] + ":" + str(
random.randint(1000, 1000000))
timestamp = 'timestamp=' + str(
TimestampConversion.get_timestamp(test_day + '%02d' % hour + '%02d' % minute))
input_time = 'input_time=' + str(long(timestamp.split('=')[1]) + 300)
output_time = 'output_time=' + str(long(timestamp.split('=')[1]) + 600)
peer_id = 'peer_id=' + test_peer_id[random.randint(0, len(test_peer_id) - 1)]
sdk_version = 'sdk_version=' + test_sdk_version[random.randint(0, len(test_sdk_version) - 1)]
nat_type = 'nat_type=' + str(random.randint(1, 4))
public_ip = 'public_ip=' + '10.5.100.1'
public_port = 'public_port=' + '8888'
private_ip = 'private_ip=' + '192.168.1.110'
private_port = 'private_port=' + '8080'
macs = 'macs=' + '9C-5C-8E-87-6A-25'
writer.write(data_format % (topic, id, timestamp, input_time, output_time, peer_id, sdk_version,
nat_type, public_ip, public_port, private_ip, private_port, macs))
else:
for minute in range(60):
topic = 'topic=' + 'peer_info'
id = "id=" + test_file_id[random.randint(0, len(test_file_id) - 1)] + ":" + str(
random.randint(1000, 1000000))
timestamp = 'timestamp=' + str(
TimestampConversion.get_timestamp("20160823" + '{:0>2}'.format(hour) + '%02d' % minute))
input_time = 'input_time=' + str(long(timestamp.split('=')[1]) + 300)
output_time = 'output_time=' + str(long(timestamp.split('=')[1]) + 600)
peer_id = 'peer_id=' + test_peer_id[random.randint(0, len(test_peer_id) - 1)]
sdk_version = 'sdk_version=' + test_sdk_version[random.randint(0, len(test_sdk_version) - 1)]
nat_type = 'nat_type=' + str(random.randint(1, 4))
public_ip = 'public_ip=' + '10.5.100.1'
public_port = 'public_port=' + '8888'
private_ip = 'private_ip=' + '192.168.1.110'
private_port = 'private_port=' + '8080'
macs = 'macs=' + '9C-5C-8E-87-6A-25'
writer.write(data_format % (topic, id, timestamp, input_time, output_time, peer_id, sdk_version,
nat_type, public_ip, public_port, private_ip, private_port, macs))
if __name__ == '__main__':
pi = PeerInfo()
pi.make_data('6')
|
[
"[email protected]"
] | |
a402dc5d1b9114362a3f2e00c3684052fdeaf84f
|
814176f8e051575b7ab7a98ae5c72b7885273fcd
|
/APS/NXsas/create_nexus_data.py
|
4798c7751bf1b18a5545572ffbda896908fedda8
|
[] |
no_license
|
nexusformat/exampledata
|
af21f0beb17b60c6375a14e982ac06e274639ada
|
eae516807ef7e27d1c45aab3af3a64a679154677
|
refs/heads/master
| 2022-12-23T18:06:46.916518 | 2022-12-16T15:50:38 | 2022-12-16T15:50:38 | 20,968,421 | 7 | 6 | null | 2022-06-15T08:04:58 | 2014-06-18T15:59:54 |
Python
|
UTF-8
|
Python
| false | false | 20,635 |
py
|
#!/usr/bin/env python
'''
create NeXus data file with the image data from these area detector data files
stores data according to the **NXsas** application definition
:deviation: **NXsas** describes a single frame raw 2-D SAS data image.
We store a set of 2-D SAS images as a 3-D array.
:deviation: **NXsas** stores the counting time as a scalar: /entry/instrument/control/integral
We store an array of count sample_times, one for each image frame.
:see: http://download.nexusformat.org/doc/html/classes/applications/NXsas.html
'''
import h5py
import os
import numpy
from spec2nexus import eznx
TARGET_FILE = 'nexus-example.hdf5'
FILE_SET = '''
/share1/USAXS_data/2016-06/06_23_IN625_775C_saxs/IN625AB_775C_3min_0378.hdf
/share1/USAXS_data/2016-06/06_23_IN625_775C_saxs/IN625AB_775C_30min_0383.hdf
/share1/USAXS_data/2016-06/06_23_IN625_775C_saxs/IN625AB_775C_58min_0388.hdf
/share1/USAXS_data/2016-06/06_23_IN625_775C_saxs/IN625AB_775C_86min_0393.hdf
/share1/USAXS_data/2016-06/06_23_IN625_775C_saxs/IN625AB_775C_112min_0398.hdf
/share1/USAXS_data/2016-06/06_23_IN625_775C_saxs/IN625AB_775C_140min_0403.hdf
/share1/USAXS_data/2016-06/06_23_IN625_775C_saxs/IN625AB_775C_168min_0408.hdf
/share1/USAXS_data/2016-06/06_23_IN625_775C_saxs/IN625AB_775C_196min_0413.hdf
/share1/USAXS_data/2016-06/06_23_IN625_775C_saxs/IN625AB_775C_223min_0418.hdf
/share1/USAXS_data/2016-06/06_23_IN625_775C_saxs/IN625AB_775C_240min_0421.hdf
'''.strip().split()
BLANK_FILE = '/share1/USAXS_data/2016-06/06_23_IN625_775C_saxs/HeaterBlank_30C_560min_0376.hdf'
frame_set, names, h5_files, monitor, sample_times = [], [], [], [], []
for fname in FILE_SET:
h5 = h5py.File(fname, 'r')
frame_set.append(h5['/entry/data/data'])
monitor.append(h5['/entry/EPICS_PV_metadata/I0_cts_gated'][0])
names.append(os.path.split(fname)[-1])
sample_times.append(int(str(h5['/entry/sample/name'].value[0]).split('_')[-1][:-3]))
# do not close the HDF5 files yet
h5_files.append(h5)
h5_blank = h5py.File(BLANK_FILE, 'r')
# create file and group structure
root = eznx.makeFile(TARGET_FILE, default='entry')
nxentry = eznx.makeGroup(root, 'entry', 'NXentry', default='data')
nxdata = eznx.makeGroup(nxentry, 'data', 'NXdata', signal='frames', )
nxinstrument = eznx.makeGroup(nxentry, 'instrument', 'NXinstrument')
nxdetector = eznx.makeGroup(nxinstrument, 'detector', 'NXdetector')
nxsource = eznx.makeGroup(nxinstrument, 'source', 'NXsource')
nxmonochromator = eznx.makeGroup(nxinstrument, 'monochromator', 'NXmonochromator')
nxcollimator = eznx.makeGroup(nxinstrument, 'collimator', 'NXcollimator')
nxgeometry_slit = eznx.makeGroup(nxcollimator, 'geometry', 'NXgeometry')
nxshape_slit = eznx.makeGroup(nxgeometry_slit, 'shape', 'NXshape')
nxsample = eznx.makeGroup(nxinstrument, 'sample', 'NXsample')
nxmonitor = eznx.makeGroup(nxinstrument, 'control', 'NXmonitor')
# various metadata
eznx.addAttributes(root, creator=h5.attrs['creator'] + ' and spec2nexus.eznx')
eznx.write_dataset(nxentry, 'title', 'NeXus NXsas example')
eznx.write_dataset(nxentry, 'definition', 'NXsas', URL='http://download.nexusformat.org/doc/html/classes/applications/NXsas.html')
eznx.write_dataset(nxentry, 'start_time', h5_files[0].attrs['file_time'])
eznx.write_dataset(nxentry, 'end_time', h5_files[-1].attrs['file_time'])
eznx.write_dataset(nxdetector, 'frame_files', '\n'.join(names))
eznx.write_dataset(nxinstrument, 'name', 'APS 9-ID-C USAXS pinSAXS')
eznx.write_dataset(nxsource, 'type', 'Synchrotron X-ray Source')
eznx.write_dataset(nxsource, 'name', 'Advanced Photon Source Undulator A, sector 9ID-C')
eznx.write_dataset(nxsource, 'probe', 'x-ray')
eznx.write_dataset(nxsource, 'current', h5['/entry/EPICS_PV_metadata/SRcurrent'], units='mA')
eznx.write_dataset(nxsource, 'energy', float(7), units='GeV')
eznx.write_dataset(nxmonochromator, 'energy', h5['/entry/instrument/monochromator/energy'], units='keV')
eznx.write_dataset(nxmonochromator, 'wavelength', h5['/entry/EPICS_PV_metadata/wavelength'], units='Angstroms')
eznx.write_dataset(nxmonochromator, 'wavelength_spread', h5['/entry/EPICS_PV_metadata/wavelength_spread'], units='Angstroms/Angstroms')
eznx.write_dataset(nxshape_slit, 'shape', 'nxbox')
# next four are not defined in the NXsas specification
eznx.write_dataset(nxshape_slit, 'size_x', h5['/entry/EPICS_PV_metadata/USAXSslitHap'], units='mm')
eznx.write_dataset(nxshape_slit, 'size_y', h5['/entry/EPICS_PV_metadata/USAXSslitVap'], units='mm')
eznx.write_dataset(nxshape_slit, 'center_x', h5['/entry/EPICS_PV_metadata/USAXSslitHpos'], units='mm')
eznx.write_dataset(nxshape_slit, 'center_y', h5['/entry/EPICS_PV_metadata/USAXSslitVpos'], units='mm')
eznx.write_dataset(nxdetector, 'distance', h5['/entry/EPICS_PV_metadata/SDD'], units='mm')
eznx.write_dataset(nxdetector, 'x_pixel_size', h5['/entry/EPICS_PV_metadata/pin_ccd_pixel_size_x'], units='mm')
eznx.write_dataset(nxdetector, 'y_pixel_size', h5['/entry/EPICS_PV_metadata/pin_ccd_pixel_size_y'], units='mm')
eznx.write_dataset(nxdetector, 'beam_center_x', h5['/entry/EPICS_PV_metadata/pin_ccd_center_x'], units='mm')
eznx.write_dataset(nxdetector, 'beam_center_y', h5['/entry/EPICS_PV_metadata/pin_ccd_center_y'], units='mm')
eznx.write_dataset(nxdetector, 'beam_center_x_pixel', h5['/entry/EPICS_PV_metadata/pin_ccd_center_x_pixel'])
eznx.write_dataset(nxdetector, 'beam_center_y_pixel', h5['/entry/EPICS_PV_metadata/pin_ccd_center_y_pixel'])
eznx.write_dataset(nxsample, 'name', h5['/entry/EPICS_PV_metadata/SampleTitle'])
eznx.write_dataset(nxmonitor, 'mode', 'time')
eznx.write_dataset(nxmonitor, 'preset', h5['/entry/EPICS_PV_metadata/PresetTime'], units='s')
# NXsas specifies a scaler, we have a frame set so we record a 1-D array here
eznx.write_dataset(nxmonitor, 'integral', monitor, units='clock', clock_per_s='100000')
# the image data: a set of frames
# NXsas describes a 2-D single frame image, we are writing a 3-D multi-image frame set
# see: http://docs.h5py.org/en/latest/high/dataset.html#lossless-compression-filters
# full gzip compression (9) reduces THIS file size by ~50%
#ds = eznx.write_dataset(nxdetector, 'data', frame_set, units='counts')
ds = nxdetector.create_dataset('data', data=numpy.array(frame_set), compression='gzip', compression_opts=9)
eznx.addAttributes(ds, units='counts', compression='gzip', source_files='\n'.join(names))
eznx.makeLink(nxdata, ds, 'frames')
ds = eznx.write_dataset(nxsample, 'image_times', sample_times, units='minutes')
eznx.makeLink(nxdata, ds, 'sample_times')
ds = nxdetector.create_dataset('blank', data=h5_blank['/entry/data/data'], compression='gzip', compression_opts=9)
eznx.addAttributes(ds, units='counts')
eznx.addAttributes(ds, source_file=os.path.split(BLANK_FILE)[-1])
eznx.makeLink(nxdata, ds, 'blank')
eznx.write_dataset(nxdata, 'vertical', range(ds.shape[0]), units='pixels')
eznx.write_dataset(nxdata, 'horizontal', range(ds.shape[1]), units='pixels')
eznx.addAttributes(nxdata, axes='sample_times vertical horizontal'.split())
eznx.addAttributes(nxdata, sample_times_indices=[0,])
eznx.addAttributes(nxdata, vertical_indices=[1,])
eznx.addAttributes(nxdata, horizontal_indices=[2,])
# close the HDF5 output data file
root.close()
# Now, close the HDF5 source data files
[h5.close() for h5 in h5_files]
h5_blank.close()
##########################################################################
# structure of one of these files:
'''
IN625AB_775C_3min_0378.hdf
@NeXus_version = 4.3.0
@file_name = /mnt/share1/USAXS_data/2016-06/06_23_IN625_775C_saxs/IN625AB_775C_3min_0378.hdf
@HDF5_Version = 1.8.13
@file_time = 2016-06-23T12:02:31-06:00
@creator = areaDetector NDFileNexus plugin v0.2
@default = entry
entry:NXentry
@NX_class = NXentry
@default = data
@ADCoreVersion = 2.2.0
AD_template_ID:char[66] = $Id: nexus_template_pilatus.xml 8473 2015-06-09 19:39:50Z jemian $
definition:char[5] = NXsas
@version = 1.0b
@URL = http://download.nexusformat.org/doc/html/ClassDefinitions-Application.html#NXsas
end_time:char[25] = Thu Jun 23 11:50:42 2016
program_name:char[18] = NeXus areaDetector
run_cycle:char[8] = 2016-02
start_time:char[25] = Thu Jun 23 12:02:01 2016
title:char[30] = pinSAXS for IN625AB_775C_3min
EPICS_PV_metadata:NXcollection
@NX_class = NXcollection
@modified = 2015-06-09T14:31:00.450452
APS_run_cycle:char[8] = 2016-02
@pv = 9idcLAX:RunCycle
@description = APS operating cycle
EmptyFileName:char[13] = Unknown.hdf5
@pv = 9idcLAX:USAXS_Pin:EmptyFileName
@description = Name of file to use as empty scan
EndTime:char[25] = Thu Jun 23 11:50:42 2016
@pv = 9idcLAX:USAXS_Pin:EndExposureTime
@description = image ending time
GUPNumber:char[4] = PUP
@pv = 9idcLAX:GUPNumber
@description = GUP proposal number
GuardslitHap:float64 = 0.9
@pv = 9idcLAX:GSlit1H:size.VAL
@description = Guard slit, horizontal aperture, mm
GuardslitHpos:float64 = -1.06581410364e-14
@pv = 9idcLAX:GSlit1H:center.VAL
@description = Guard slit, horizontal position, mm
GuardslitVap:float64 = 0.3
@pv = 9idcLAX:GSlit1V:size.VAL
@description = Guard slit, vertical aperture, mm
GuardslitVpos:float64 = 2.22044604925e-16
@pv = 9idcLAX:GSlit1V:center.VAL
@description = Guard slit, vertical position, mm
I000_cts:float64 = 0.0
@pv = 9idb:scaler1_calc1.VAL
@description = I000 counts
I00_V:float64 = -0.0689697265625
@pv = 9idcUSX:ath01:ana01:ai06
@description = I00 voltage, V
I00_cts:float64 = 1.0
@pv = 9idcLAX:vsc:c0.S3
@description = I00 counts
I00_gain:float64 = 1000000000.0
@pv = 9idcUSX:fem03:seq01:gain
@description = I00 V/A gain
I0_V:float64 = 0.946655273438
@pv = 9idcUSX:ath01:ana01:ai05
@description = I0 voltage, V
I0_cts:float64 = 768876.0
@pv = 9idcLAX:vsc:c0.S2
@description = I0 counts
I0_cts_gated:float64 = 2934680.0
@pv = 9idcLAX:vsc:c1.S2
@description = I0 counts gated
I0_gain:float64 = 10000000.0
@pv = 9idcUSX:fem02:seq01:gain
@description = I0 V/A gain
Linkam_ci94_temp:float64 = 775.0
@pv = 9idcLAX:ci94:temp
@description = Linkam_ci94_temp
Linkam_ci94_temp2:float64 = 775.0
@pv = 9idcLAX:ci94:temp2
@description = Linkam_ci94_temp2
PIN_Y:float64 = 12.8
@pv = 9idcLAX:mxv:c0:m8.RBV
@description = pinhole y stage position, mm
PIN_Z:float64 = 12.998338
@pv = 9idcLAX:mxv:c0:m2.RBV
@description = pinhole z stage position, mm
Pin_TrI0:float64 = 300072.0
@pv = 9idcLAX:USAXS_Pin:Pin_TrI0
@description = Pin_TrI0
Pin_TrI0gain:float64 = 10000000.0
@pv = 9idcLAX:USAXS_Pin:Pin_TrI0gain
@description = Pin_TrI0gain
Pin_TrPD:float64 = 2147763.0
@pv = 9idcLAX:USAXS_Pin:Pin_TrPD
@description = Pin_TrPD
Pin_TrPDgain:float64 = 10000000.0
@pv = 9idcLAX:USAXS_Pin:Pin_TrPDgain
@description = Pin_TrPDgain
PresetTime:float64 = 30.0
@pv = usaxs_pilatus1:cam1:AcquireTime
@description = specified time for this exposure, s
SDD:float64 = 540.8
@pv = 9idcLAX:USAXS_Pin:Distance
@description = SDD: distance between sample and detector, mm
SRcurrent:float64 = 101.91112448
@pv = S:SRcurrentAI
@description = Storage Ring Current, mA
SampleTitle:char[30] = pinSAXS for IN625AB_775C_3min
@pv = 9idcLAX:USAXS:sampleTitle
@description = sample name
ScanMacro:char[21] = 06_23_IN625_775C.dat
@pv = 9idcLAX:SpecMacroFileName
@description = name of SPEC macro file
StartTime:char[25] = Thu Jun 23 12:02:01 2016
@pv = 9idcLAX:USAXS_Pin:StartExposureTime
@description = image starting time
USAXS_Q:float64 = 9.28709817959e-06
@pv = 9idcLAX:USAXS:Q
@description = Q
USAXSslitHap:float64 = 0.7998265
@pv = 9idcLAX:m58:c2:m8
@description = USAXS slit, horizontal aperture, mm
USAXSslitHpos:float64 = 8.00000000112e-06
@pv = 9idcLAX:m58:c2:m6
@description = USAXS slit, horizontal position, mm
USAXSslitVap:float64 = 0.2001505
@pv = 9idcLAX:m58:c2:m7
@description = USAXS slit, vertical aperture, mm
USAXSslitVpos:float64 = 0.500036
@pv = 9idcLAX:m58:c2:m5
@description = USAXS slit, vertical position, mm
UserName:char[19] = NIST, ANdrew Allen
@pv = 9idcLAX:UserName
@description = user name listed as GUP PI
WAXS_X:float64 = 0.0
@pv = 9idcLAX:m58:c0:m4.RBV
@description = waxs x stage position, mm
ccdProtection:int16 = 0
@pv = 9idcLAX:ccdProtection
@description = CCD protection bit
filterAl:float64 = 0.0
@pv = 9idcUSX:pf4:filterAl
@description = Al filter, mm
filterGlass:float64 = 0.0
@pv = 9idcUSX:pf42:filterGlass
@description = Glass filter, mm
filterTi:float64 = 0.0
@pv = 9idcUSX:pf4:filterTi
@description = Ti filter, mm
filterTrans:float64 = 1.0
@pv = 9idcUSX:pf4:trans
@description = filter transmission
idE_ds:float64 = 21.1887254715
@pv = ID09ds:Energy
@description = ID energy, downstream, keV
idE_us:float64 = 21.216753006
@pv = ID09us:Energy
@description = ID energy, upstream, keV
is_2D_USAXS_scan:float64 = 0.0
@pv = 9idcLAX:USAXS:is2DUSAXSscan
@description = does this scan use 2D collimated geometry
m2rp:float64 = 1.68
@pv = 9idcUSX:pzt:m2
@description = m2rp voltage, V
monoE:float64 = 20.9999615757
@pv = 9ida:BraggERdbkAO
@description = monochromator energy, keV
monoE_EGU:char[4] = keV
@pv = 9ida:BraggERdbkAO.EGU
@description = monochromator energy units
mr_enc:float64 = 8.84545561142
@pv = 9idcLAX:mr:encoder
@description = mr readback, deg
msrp:float64 = 5.005
@pv = 9idcUSX:pzt:m4
@description = msrp voltage, V
mx:float64 = 23.0
@pv = 9idcLAX:m58:c0:m2.RBV
@description = mx stage position, mm
my:float64 = 5.68434188608e-14
@pv = 9idcLAX:m58:c0:m3.RBV
@description = my stage position, mm
pin_ccd_center_x:float64 = 17.1914
@pv = 9idcLAX:USAXS_Pin:BeamCenterX:NXdetector
@description = horizontal position of beam center on CCD, mm
pin_ccd_center_x_pixel:float64 = 99.95
@pv = 9idcLAX:USAXS_Pin:BeamCenterX
@description = horizontal position of beam center on CCD, pixels
pin_ccd_center_y:float64 = -0.9718
@pv = 9idcLAX:USAXS_Pin:BeamCenterY:NXdetector
@description = vertical position of beam center on CCD, mm
pin_ccd_center_y_pixel:float64 = -5.65
@pv = 9idcLAX:USAXS_Pin:BeamCenterY
@description = vertical position of beam center on CCD, pixels
pin_ccd_pixel_size_x:float64 = 0.172
@pv = 9idcLAX:USAXS_Pin:PinPixSizeX
@description = CCD pixel size, horizontal, mm
pin_ccd_pixel_size_y:float64 = 0.172
@pv = 9idcLAX:USAXS_Pin:PinPixSizeY
@description = CCD pixel size, vertical, mm
pin_ccd_tilt_x:float64 = 2.408
@pv = 9idcLAX:USAXS_Pin:DetectorTiltX
@description = CCD tilt, x direction, degrees
pin_ccd_tilt_y:float64 = -7.879
@pv = 9idcLAX:USAXS_Pin:DetectorTiltY
@description = CCD tilt, y direction, degrees
sa:float64 = -8.67896
@pv = 9idcLAX:xps:c0:m7.RBV
@description = sample azimuthal rotation, degrees
scaler_freq:float64 = 10000000.0
@pv = 9idcLAX:vsc:c0.FREQ
@description = scaler frequency, Hz
sthick:float64 = 0.033
@pv = 9idcLAX:sampleThickness
@description = sample thickness
sx:float64 = -2.6
@pv = 9idcLAX:m58:c2:m1.RBV
@description = sample x stage position, mm
sy:float64 = 0.1
@pv = 9idcLAX:m58:c2:m2.RBV
@description = sample y stage position, mm
wavelength:float64 = 0.59040224218
@pv = 9ida:BraggLambdaRdbkAO
@description = monochromator wavelength, A
wavelength_EGU:char[10] = Angstroms
@pv = 9ida:BraggLambdaRdbkAO.EGU
@description = monochromator wavelength units
wavelength_spread:float64 = 8e-05
@pv = 9idcLAX:WavelengthSpread
@description = delta-lambda / lambda
areaDetector_reduced_250:NXdata
@NX_class = NXdata
@timestamp = 2016-06-23 12:02:52
@signal = R
@axes = Q
@Q_indices = 0
Q:float64[249] = [0.021918980150849705, 0.028707726365588677, 0.035489034129379531, '...', 1.6798613941377709]
@units = 1/A
R:float64[249] = [1.8623465183045094e-05, 2.2082338884805095e-05, 0.0001484437904133914, '...', 0.0032147384021863532]
@units = none
dR:float64[249] = [1.7542873121017162e-06, 2.401995716943954e-06, 0.00042207664124416792, '...', 4.5081624461636254e-05]
@units = none
areaDetector_reduced_full:NXdata
@NX_class = NXdata
@timestamp = 2016-06-23 12:02:52
@signal = R
@axes = Q
@Q_indices = 0
Q:float64[993] = [0.019462155321490378, 0.021154511839111413, 0.022846867153066421, '...', 1.6823303208831488]
@units = 1/A
R:float64[993] = [2.0359971104174904e-05, 1.5901790541614984e-05, 2.0007048712052326e-05, '...', 0.0032690105905925008]
@units = none
x:float64[993] = [0.98899999999999988, 1.075, 1.1609999999999998, '...', 86.300999999999988]
@units = mm
control:NXmonitor
@NX_class = NXmonitor
integral:float64 = 768876.0
mode:char[5] = timer
preset:float64 = 30.0
data:NXdata
@NX_class = NXdata
@signal = data
data:int32[195,487] = __array
__array = [
[41, 38, 57, '...', 9820]
[33, 43, 55, '...', 9367]
[46, 34, 44, '...', 9159]
...
[59, 43, 67, '...', 10831]
]
@ImageCounter = 365
@make = Dectris
@model = Pilatus
@maxSizeX = 487
@maxSizeY = 195
@USAXSmode = 06_23_IN625_775C.dat
description:char[8] = Pilatus
@make = Dectris
local_name:char[12] = Pilatus 100K
make:char[8] = Dectris
model:char[8] = Pilatus
instrument:NXinstrument
@NX_class = NXinstrument
name:char[5] = USAXS
aperture:NXaperture
@NX_class = NXaperture
description:char[9] = USAXSslit
hcenter:float64 = 8.00000000112e-06
hsize:float64 = 0.7998265
vcenter:float64 = 0.500036
vsize:float64 = 0.2001505
collimator:NXcollimator
@NX_class = NXcollimator
absorbing_material:char[8] = Tungsten
geometry:NXgeometry
@NX_class = NXgeometry
shape:NXshape
@NX_class = NXshape
shape:char[5] = nxbox
size:char[19] = see xsize and ysize
xcenter:float64 = 8.00000000112e-06
xsize:float64 = 0.7998265
ycenter:float64 = 0.500036
ysize:float64 = 0.2001505
detector:NXdetector
@NX_class = NXdetector
beam_center_x:float64 = 17.1914
beam_center_y:float64 = -0.9718
distance:float64 = 540.8
x_pixel_size:float64 = 0.172
y_pixel_size:float64 = 0.172
monochromator:NXmonochromator
@NX_class = NXmonochromator
energy:float64 = 20.9999615757
@units = keV
wavelength:float64 = 0.59040224218
@units = Angstroms
wavelength_spread:float64 = 8e-05
source:NXsource
@NX_class = NXsource
facility_beamline:char[3] = 9ID
facility_name:char[3] = APS
facility_sector:char[7] = XSD/9ID
facility_station:char[1] = C
name:char[48] = Advanced Photon Source Undulator A, sector 9ID-C
probe:char[5] = x-ray
type:char[24] = Synchrotron X-ray Source
link_rules:link_rules
@NX_class = link_rules
link --> /entry/instrument/detector/data
sample:NXsample
@NX_class = NXsample
aequatorial_angle:float64 = -8.67896
@units = degrees
name:char[30] = pinSAXS for IN625AB_775C_3min
thickness:float64 = 0.033
user1:NXuser
@NX_class = NXuser
name:char[19] = NIST, ANdrew Allen
proposal_number:char[4] = PUP
'''
|
[
"[email protected]"
] | |
073851a943ecfbba860f359ce7670d7e63e858a9
|
73bba8f2c52f53739d899c7580affc4ec926c65e
|
/trax/tf_numpy/jax_tests/lax_numpy_test.py
|
3923655f7e7687552ce7f9645c7be7791a908511
|
[
"Apache-2.0"
] |
permissive
|
weiddeng/trax
|
2a46e603a2e8b7fd1d31d80d87ca28b755b94b6f
|
084c5159235da4d32a0116bf3fa13f71a09e10d3
|
refs/heads/master
| 2022-12-26T01:15:18.556592 | 2020-10-09T02:17:45 | 2020-10-09T02:17:45 | 298,895,884 | 0 | 0 |
Apache-2.0
| 2020-09-26T20:38:29 | 2020-09-26T20:38:29 | null |
UTF-8
|
Python
| false | false | 135,944 |
py
|
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from functools import partial
import itertools
import operator
import unittest
from unittest import SkipTest
import warnings
from absl.testing import absltest
from absl.testing import parameterized
import six
import numpy as onp
import tensorflow.compat.v2 as tf
import trax.tf_numpy.numpy as lnp
import trax.tf_numpy.extensions as npe
from trax.tf_numpy.jax_tests.config import config, FLAGS
import trax.tf_numpy.jax_tests.test_util as jtu
config.parse_flags_with_absl()
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
empty_array_shapes = [(0,), (0, 4), (3, 0),]
scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]
array_shapes = nonempty_array_shapes + empty_array_shapes
nonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes
nonempty_shapes = scalar_shapes + nonempty_array_shapes
all_shapes = scalar_shapes + array_shapes
# TODO(wangpeng): float_dtypes = [lnp.bfloat16, onp.float16, onp.float32,
# onp.float64]
float_dtypes = [onp.float16, onp.float32, onp.float64]
complex_dtypes = [onp.complex64, onp.complex128]
int_dtypes = [onp.int32, onp.int64]
unsigned_dtypes = [onp.uint32, onp.uint64]
bool_dtypes = [onp.bool_]
default_dtypes = float_dtypes + int_dtypes
inexact_dtypes = float_dtypes + complex_dtypes
number_dtypes = float_dtypes + complex_dtypes + int_dtypes
all_dtypes = number_dtypes + bool_dtypes
python_scalar_dtypes = [lnp.bool_, lnp.int_, lnp.float_, lnp.complex_]
def _valid_dtypes_for_shape(shape, dtypes):
# Not all (shape, dtype) pairs are valid. In particular, Python scalars only
# have one type in each category (float, bool, etc.)
if shape is jtu.PYTHON_SCALAR_SHAPE:
return [t for t in dtypes if t in python_scalar_dtypes]
return dtypes
def _shape_and_dtypes(shapes, dtypes):
for shape in shapes:
for dtype in _valid_dtypes_for_shape(shape, dtypes):
yield (shape, dtype)
OpRecord = collections.namedtuple(
"OpRecord",
["name", "nargs", "dtypes", "shapes", "rng_factory", "diff_modes",
"test_name", "check_dtypes", "tolerance", "inexact",
"check_incomplete_shape"])
def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name=None, check_dtypes=True, tolerance=None, inexact=False,
check_incomplete_shape=True):
test_name = test_name or name
return OpRecord(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name, check_dtypes, tolerance, inexact,
check_incomplete_shape)
def minus(a, b):
return [x for x in a if x not in b]
JAX_ONE_TO_ONE_OP_RECORDS = [
op_record("abs", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("add", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("conj", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("exp", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("fabs", 1, float_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("float_power", 2, inexact_dtypes, all_shapes,
partial(jtu.rand_default, scale=1), ["rev"],
tolerance={
# TODO(wangpeng): lnp.bfloat16: 1e-2,
onp.float32: 1e-3,
onp.float64: 1e-12, onp.complex64: 2e-4,
onp.complex128: 1e-12}, check_dtypes=False),
op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("greater", 2, minus(all_dtypes, complex_dtypes), all_shapes,
jtu.rand_some_equal, []),
op_record("greater_equal", 2, minus(all_dtypes, complex_dtypes), all_shapes,
jtu.rand_some_equal, []),
op_record("less", 2, minus(all_dtypes, complex_dtypes), all_shapes,
jtu.rand_some_equal, []),
op_record("less_equal", 2, minus(all_dtypes, complex_dtypes), all_shapes,
jtu.rand_some_equal, []),
op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("logical_and", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_not", 1, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_or", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_xor", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("maximum", 2, minus(all_dtypes, complex_dtypes), all_shapes,
jtu.rand_some_inf, []),
op_record("minimum", 2, minus(all_dtypes, complex_dtypes), all_shapes,
jtu.rand_some_inf, []),
op_record("multiply", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("nextafter", 2, [f for f in float_dtypes
if f not in (lnp.bfloat16, onp.float16)],
all_shapes, jtu.rand_default, ["rev"], inexact=True, tolerance=0),
op_record("not_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default, []),
op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("signbit", 1, default_dtypes + bool_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"]),
op_record("sin", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cos", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("tan", 1, number_dtypes, all_shapes,
partial(jtu.rand_uniform, -1.5, 1.5), ["rev"], inexact=True),
# TODO(wangpeng): Add float16 support
op_record("sinh", 1, minus(number_dtypes, [onp.float16]), all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cosh", 1, minus(number_dtypes, [onp.float16]), all_shapes, jtu.rand_default, ["rev"],
inexact=True),
# TODO(b/142975473): on CPU, tanh for complex128 is only accurate to
# ~float32 precision.
# TODO(b/143135720): on GPU, tanh has only ~float32 precision.
op_record("tanh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={onp.float64: 1e-7, onp.complex128: 1e-7},
inexact=True),
op_record("arcsin", 1, minus(float_dtypes, [onp.float16]), all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arccos", 1, minus(float_dtypes, [onp.float16]), all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan", 1, minus(float_dtypes, [onp.float16]), all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan2", 2, minus(float_dtypes, [onp.float16]), all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arcsinh", 1, minus(number_dtypes, [onp.float16]), all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("arccosh", 1, minus(number_dtypes, [onp.float16]), all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("arctanh", 1, minus(number_dtypes, [onp.float16]), all_shapes, jtu.rand_small, ["rev"],
inexact=True),
]
JAX_COMPOUND_OP_RECORDS = [
# angle has inconsistent 32/64-bit return types across numpy versions.
op_record("angle", 1, number_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False, inexact=True),
op_record("atleast_1d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_2d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_3d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("cbrt", 1, default_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("conjugate", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("deg2rad", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero, ["rev"],
inexact=six.PY3),
op_record("divmod", 2, minus(int_dtypes + float_dtypes, [onp.float16]),
all_shapes, jtu.rand_nonzero, []),
op_record("exp2", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={
# TODO(wangpeng): lnp.bfloat16: 2e-2,
onp.float16: 1e-2}, inexact=True),
# TODO(b/142975473): on CPU, expm1 for float64 is only accurate to ~float32
# precision.
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="expm1_large", tolerance={onp.float64: 1e-8}, inexact=True),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_small_positive,
[], tolerance={onp.float64: 1e-8}, inexact=True),
op_record("fix", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("floor_divide", 2, minus(number_dtypes, complex_dtypes),
all_shapes, jtu.rand_nonzero, ["rev"]),
op_record("heaviside", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("hypot", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("kron", 2, number_dtypes, nonempty_shapes, jtu.rand_default, [],
check_incomplete_shape=False),
op_record("outer", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("imag", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("iscomplex", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isfinite", 1, minus(inexact_dtypes, complex_dtypes), all_shapes,
jtu.rand_some_inf_and_nan, []),
op_record("isinf", 1, minus(inexact_dtypes, complex_dtypes), all_shapes,
jtu.rand_some_inf_and_nan, []),
op_record("isnan", 1, minus(inexact_dtypes, complex_dtypes), all_shapes,
jtu.rand_some_inf_and_nan, []),
op_record("isneginf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isposinf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isreal", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isrealobj", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("log2", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log10", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="log1p_large", tolerance={onp.float64: 1e-12},
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_small_positive, [],
tolerance={onp.float64: 1e-12}, inexact=True),
op_record("logaddexp", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={onp.float64: 1e-12}, inexact=True),
op_record("logaddexp2", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={onp.float16: 1e-2}, inexact=True),
op_record("polyval", 2, number_dtypes, nonempty_nonscalar_array_shapes,
jtu.rand_default, [], check_dtypes=False,
tolerance={onp.float16: 1e-2, onp.float64: 1e-12},
check_incomplete_shape=False),
op_record("positive", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("power", 2, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
tolerance={onp.complex128: 1e-14}),
op_record("rad2deg", 1, float_dtypes, all_shapes, jtu.rand_default, [],
tolerance={onp.float64: 5e-6}),
op_record("ravel", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("real", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("remainder", 2, minus(default_dtypes, [onp.float16]), all_shapes,
jtu.rand_nonzero, [], tolerance={onp.float16: 1e-2}),
op_record("mod", 2, minus(default_dtypes, [onp.float16]), all_shapes,
jtu.rand_nonzero, []),
op_record("sinc", 1, [t for t in number_dtypes if t != lnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"],
tolerance={onp.complex64: 1e-5}, inexact=True,
check_dtypes=False),
op_record("square", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("sqrt", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("transpose", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"],
check_dtypes=False),
op_record("true_divide", 2, all_dtypes, all_shapes, jtu.rand_nonzero,
["rev"], inexact=True),
op_record("diff", 1, number_dtypes, nonzerodim_shapes, jtu.rand_default,
["rev"], check_incomplete_shape=False),
]
JAX_BITWISE_OP_RECORDS = [
op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, []),
op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, []),
op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, []),
op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, []),
]
JAX_REDUCER_RECORDS = [
op_record("mean", 1, number_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("nanmean", 1, minus(inexact_dtypes, complex_dtypes),
nonempty_shapes, jtu.rand_some_nan, [], inexact=True),
op_record("nanprod", 1, minus(inexact_dtypes, complex_dtypes), all_shapes,
jtu.rand_some_nan, []),
op_record("nansum", 1, minus(number_dtypes, complex_dtypes), all_shapes,
jtu.rand_some_nan, []),
]
JAX_REDUCER_NO_DTYPE_RECORDS = [
op_record("all", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("max", 1, minus(all_dtypes, complex_dtypes), nonempty_shapes,
jtu.rand_default, []),
op_record("min", 1, minus(all_dtypes, complex_dtypes), nonempty_shapes,
jtu.rand_default, []),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
]
JAX_ARGMINMAX_RECORDS = [
op_record("argmin", 1, minus(all_dtypes, complex_dtypes), nonempty_shapes,
jtu.rand_some_equal, []),
op_record("argmax", 1, minus(all_dtypes, complex_dtypes), nonempty_shapes,
jtu.rand_some_equal, []),
]
JAX_OPERATOR_OVERLOADS = [
op_record("__add__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__sub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__mul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__eq__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ne__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pos__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={onp.float32: 2e-4, onp.complex64: 2e-4, onp.complex128: 1e-14}),
op_record("__mod__", 2, minus(default_dtypes, [onp.float16]), all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-1}),
op_record("__floordiv__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2
op_record("__invert__", 1, int_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): investigate these failures
# op_record("__or__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__and__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__xor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__divmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
# TODO(mattjj): lshift, rshift
]
JAX_RIGHT_OPERATOR_OVERLOADS = [
op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={onp.float32: 2e-4, onp.complex64: 1e-3}),
op_record("__rmod__", 2, minus(default_dtypes, [onp.float16]), all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-1}),
op_record("__rfloordiv__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
# op_record("__ror__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rand__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__rxor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rdivmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
]
numpy_version = tuple(map(int, onp.version.version.split('.')))
if numpy_version >= (1, 15):
JAX_COMPOUND_OP_RECORDS += [
op_record("isclose", 2, [t for t in all_dtypes if t != lnp.bfloat16],
all_shapes, jtu.rand_small_positive, []),
op_record("gcd", 2, int_dtypes, all_shapes, jtu.rand_default, []),
op_record("lcm", 2, int_dtypes, all_shapes, jtu.rand_default, []),
]
JAX_REDUCER_NO_DTYPE_RECORDS += [
op_record("ptp", 1, minus(number_dtypes, complex_dtypes), nonempty_shapes,
jtu.rand_default, []),
]
if six.PY2:
JAX_OPERATOR_OVERLOADS += [
op_record("__div__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
]
JAX_RIGHT_OPERATOR_OVERLOADS += [
op_record("__rdiv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
]
CombosWithReplacement = itertools.combinations_with_replacement
def _dtypes_are_compatible_for_bitwise_ops(args):
if len(args) <= 1:
return True
is_signed = lambda dtype: lnp.issubdtype(dtype, onp.signedinteger)
width = lambda dtype: lnp.iinfo(dtype).bits
x, y = args
# `lnp.iinfo(dtype).bits` can't be called on bools, so we convert bools to
# ints.
if x == lnp.bool_:
x = lnp.int32
if y == lnp.bool_:
y = lnp.int32
if width(x) > width(y):
x, y = y, x
if x == lnp.uint32 and y == lnp.uint64:
return False
# The following condition seems a little ad hoc, but seems to capture what
# numpy actually implements.
return (
is_signed(x) == is_signed(y)
or (width(x) == 32 and width(y) == 32)
or (width(x) == 32 and width(y) == 64 and is_signed(y)))
def _shapes_are_broadcast_compatible(shapes):
accumulator = onp.zeros([])
for shape in shapes:
try:
accumulator = accumulator + onp.zeros(shape)
except ValueError:
return False
return True
def _shapes_are_equal_length(shapes):
return all(len(shape) == len(shapes[0]) for shape in shapes[1:])
def _promote_like_lnp(fun, inexact=False):
"""Decorator that promotes the arguments of `fun` to `lnp.result_type(*args)`.
lnp and onp have different type promotion semantics; this decorator allows
tests make an onp reference implementation act more like an lnp
implementation.
"""
def wrapper(*args, **kw):
flat_args = tf.nest.flatten(args)
if inexact and not any(lnp.issubdtype(lnp.result_type(x), lnp.inexact)
for x in flat_args):
dtype = lnp.result_type(lnp.float_, *flat_args)
else:
dtype = lnp.result_type(*flat_args)
args = tf.nest.map_structure(lambda a: onp.asarray(a, dtype), args)
return fun(*args, **kw)
return wrapper
def new_test(f):
def wrapper(self, *args, **kwargs):
if not FLAGS.tf_numpy_additional_tests:
self.skipTest("Newly added test is disabled, since flag is False.")
else:
f(self, *args, **kwargs)
return wrapper
def named_parameters(ls):
"""A version that allows an empty param list."""
def noop(_):
def wrapper(self, *args, **kwargs):
self.skipTest("Empty parameter list")
return wrapper
if isinstance(ls, (list, tuple)) and not ls:
return noop
if isinstance(ls, itertools.chain):
try:
first = next(ls)
except StopIteration:
return noop
else:
ls = itertools.chain([first], ls)
return parameterized.named_parameters(ls)
# TODO(wangpeng): Enable all disabled tests in this class
class LaxBackedNumpyTests(jtu.TestCase):
"""Tests for LAX-backed Numpy implementation."""
def _GetArgsMaker(self, rng, shapes, dtypes, onp_arrays=True):
def f():
out = [rng(shape, dtype or lnp.float_)
for shape, dtype in zip(shapes, dtypes)]
return out if onp_arrays else [lnp.asarray(a) for a in out]
return f
@named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"check_dtypes": rec.check_dtypes, "tolerance": rec.tolerance,
"inexact": rec.inexact,
"check_incomplete_shape": rec.check_incomplete_shape}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,
JAX_COMPOUND_OP_RECORDS)))
def testOp(self, onp_op, lnp_op, rng_factory, shapes, dtypes, check_dtypes,
tolerance, inexact, check_incomplete_shape):
# TODO(b/147769803): Remove this skipping
if lnp_op.__name__ == "kron" and shapes == ((2, 3, 4), (2, 3, 4)):
self.skipTest("Case disabled because of b/147769803")
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
tol = max(jtu.tolerance(dtype, tolerance) for dtype in dtypes)
tol = functools.reduce(jtu.join_tolerance,
[tolerance, tol, jtu.default_tolerance()])
self._CheckAgainstNumpy(_promote_like_lnp(onp_op, inexact), lnp_op,
args_maker, check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol,
check_incomplete_shape=check_incomplete_shape)
@named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"tol": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_OPERATOR_OVERLOADS))
def testOperatorOverload(self, name, rng_factory, shapes, dtypes, tol):
rng = rng_factory()
# onp and lnp arrays have different type promotion rules; force the use of
# lnp arrays.
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)
scalar_arg = (jtu.PYTHON_SCALAR_SHAPE in shapes or
jtu.NUMPY_SCALAR_SHAPE in shapes or
() in shapes)
empty_shape = any(isinstance(s, tuple) and 0 in s for s in shapes)
self._CompileAndCheck(
fun, args_maker, check_dtypes=True, #not scalar_arg and not empty_shape,
atol=tol, rtol=tol)
@named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"op_tolerance": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_RIGHT_OPERATOR_OVERLOADS))
def testRightOperatorOverload(self, name, rng_factory, shapes, dtypes,
op_tolerance):
if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:
raise SkipTest() # TODO(mattjj): clean up
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
fun = lambda fst, snd: getattr(snd, name)(fst)
tol = max(jtu.tolerance(dtype, op_tolerance) for dtype in dtypes)
scalar_arg = (jtu.PYTHON_SCALAR_SHAPE in shapes or
jtu.NUMPY_SCALAR_SHAPE in shapes or
() in shapes)
empty_shape = any(isinstance(s, tuple) and 0 in s for s in shapes)
self._CompileAndCheck(
fun, args_maker, check_dtypes=True, # not scalar_arg and not empty_shape,
atol=tol, rtol=tol)
@named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.test_name, shapes, dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name)}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in filter(
_dtypes_are_compatible_for_bitwise_ops,
CombosWithReplacement(rec.dtypes, rec.nargs)))
for rec in JAX_BITWISE_OP_RECORDS))
def testBitwiseOp(self, onp_op, lnp_op, rng_factory, shapes, dtypes):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
has_python_scalar = jtu.PYTHON_SCALAR_SHAPE in shapes
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
if onp_op == onp.bitwise_not and has_python_scalar:
# For bitwise_not with a Python `int`, npe.jit may choose a different
# dtype for the `int` from onp's choice, which may result in a different
# result value, so we skip _CompileAndCheck.
return
# Numpy does value-dependent dtype promotion on Python/numpy/array scalars
# which `jit` can't do (when np.result_type is called inside `jit`, tensor
# values are not available), so we skip dtype check in this case.
check_dtypes = not(set(shapes) & set([jtu.NUMPY_SCALAR_SHAPE,
jtu.PYTHON_SCALAR_SHAPE, ()]))
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=check_dtypes)
@named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_dtype={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis,
"None" if out_dtype is None else onp.dtype(out_dtype).name, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for out_dtype in [None] + rec.dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])
for keepdims in [False, True])
for rec in JAX_REDUCER_RECORDS))
def testReducer(self, onp_op, lnp_op, rng_factory, shape, dtype, out_dtype,
axis, keepdims, inexact):
rng = rng_factory()
def onp_fun(x):
x_cast = x if dtype != lnp.bfloat16 else x.astype(onp.float32)
t = out_dtype if out_dtype != lnp.bfloat16 else onp.float32
return onp_op(x_cast, axis, dtype=t, keepdims=keepdims)
onp_fun = _promote_like_lnp(onp_fun, inexact)
lnp_fun = lambda x: lnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
tol_spec = {onp.float16: 1e-2, onp.float32: 1e-3, onp.complex64: 1e-3,
onp.float64: 1e-5, onp.complex128: 1e-5}
tol = jtu.tolerance(dtype, tol_spec)
tol = max(tol, jtu.tolerance(out_dtype, tol_spec)) if out_dtype else tol
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker,
check_dtypes=lnp.bfloat16 not in (dtype, out_dtype),
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for rec in JAX_REDUCER_NO_DTYPE_RECORDS
for shape in rec.shapes for dtype in rec.dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])
for keepdims in [False, True]))
def testReducerNoDtype(self, onp_op, lnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact):
rng = rng_factory()
onp_fun = lambda x: onp_op(x, axis, keepdims=keepdims)
onp_fun = _promote_like_lnp(onp_fun, inexact)
lnp_fun = lambda x: lnp_op(x, axis, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes for dtype in all_dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])))
def testCountNonzero(self, shape, dtype, axis):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.count_nonzero(x, axis)
lnp_fun = lambda x: lnp.count_nonzero(x, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testNonzero(self, shape, dtype):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.nonzero(x)
lnp_fun = lambda x: lnp.nonzero(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False)
# The shapes of `nonzero`'s results are value-dependent, so `eval_on_shapes`
# won't return concrete shapes.
# Also, `nonzero` requires a known rank.
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_eval_on_shapes=False,
check_incomplete_shape=True, check_unknown_rank=False)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"axis": axis}
for rec in JAX_ARGMINMAX_RECORDS
for shape, dtype in _shape_and_dtypes(rec.shapes, rec.dtypes)
for axis in range(-len(shape), len(shape))))
def testArgMinMax(self, onp_op, lnp_op, rng_factory, shape, dtype, axis):
rng = rng_factory()
if dtype == onp.complex128 and jtu.device_under_test() == "gpu":
raise unittest.SkipTest("complex128 reductions not supported on GPU")
def onp_fun(array_to_reduce):
return onp_op(array_to_reduce, axis).astype(lnp.int_)
def lnp_fun(array_to_reduce):
return lnp_op(array_to_reduce, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for lhs_shape, rhs_shape, axes in [
[(2,), (2,), (-1, -1, -1, None)], # scalar output
[(2, 4), (2, 4), (-1, -1, -1, 0)], # 2D vectors
[(3, 4), (3, 4), (-1, -1, -1, 0)], # 3D vectors
[(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)], # broadcasting
[(4, 3), (3, 6, 5, 4), (1, 0, -1, None)], # different axes
[(6, 1, 3), (5, 3), (-1, -1, -1, None)], # more broadcasting
[(6, 1, 2), (5, 3), (-1, -1, -1, None)], # mixed 2D and 3D vectors
[(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)], # axes/broadcasting
[(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)], # axisc should do nothing
[(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)] # same as before
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(
minus(number_dtypes, complex_dtypes), 2)))
def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
axisa, axisb, axisc, axis = axes
lnp_fun = lambda a, b: lnp.cross(a, b, axisa, axisb, axisc, axis)
def onp_fun(a, b):
a = a.astype(onp.float32) if lhs_dtype == lnp.bfloat16 else a
b = b.astype(onp.float32) if rhs_dtype == lnp.bfloat16 else b
out = onp.cross(a, b, axisa, axisb, axisc, axis)
return out.astype(lnp.promote_types(lhs_dtype, rhs_dtype))
tol_spec = {
# TODO(wangpeng): dtypes.bfloat16: 3e-1,
onp.float16: 0.15}
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for name, lhs_shape, rhs_shape in [
("matrix-scalar", (3, 3), ()),
("scalar-matrix", (), (3, 3)),
("matrix-vector", (4, 5), (5,)),
("vector-matrix", (6,), (6, 4)),
("matrix-matrix", (3, 4), (4, 5)),
("tensor-vector", (4, 3, 2), (2,)),
("vector-tensor", (2,), (3, 2, 4)),
("tensor-matrix", (4, 3, 2), (2, 5)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-tensor", (2, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {onp.float16: 1e-2, onp.float32: 1e-5, onp.float64: 1e-14,
onp.complex128: 1e-14}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 2e-1
def onp_dot(x, y):
x = x.astype(onp.float32) if lhs_dtype == lnp.bfloat16 else x
y = y.astype(onp.float32) if rhs_dtype == lnp.bfloat16 else y
# `onp.dot(x, y).dtype` sometimes differs from `onp.result_type(x, y)`
# (e.g. when x is float64[] and y is complex64[3,3], or when x is
# float16[3,3] and y is int64[]). We ignore this corner case and pretend
# that they agree.
return onp.dot(x, y).astype(onp.result_type(x, y))
self._CheckAgainstNumpy(onp_dot, lnp.dot, args_maker,
check_dtypes=True, tol=tol)
# We disable dtype check in the following cases because `np.dot` does
# value-dependent type promotion in those cases.
check_dtypes = () not in (lhs_shape, rhs_shape)
self._CompileAndCheck(lnp.dot, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for name, lhs_shape, rhs_shape in [
("vector-vector", (3,), (3,)),
("matrix-vector", (3, 3), (3,)),
("vector-matrix", (3,), (3, 3)),
("matrix-matrix", (3, 3), (3, 3)),
("vector-tensor", (3,), (5, 3, 2)),
("tensor-vector", (5, 3, 2), (2,)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-matrix", (5, 2, 3), (3, 2)),
("tensor-tensor", (5, 3, 4), (5, 4, 1)),
("tensor-tensor-broadcast", (3, 1, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
def onp_fun(x, y):
dtype = lnp.promote_types(lhs_dtype, rhs_dtype)
return (onp.matmul(x, y).astype(dtype),
onp.array(x).__matmul__(y).astype(dtype),
onp.array(y).__rmatmul__(x).astype(dtype))
def lnp_fun(x, y):
return (lnp.matmul(x, y),
lnp.array(x).__matmul__(y),
lnp.array(y).__rmatmul__(x))
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {onp.float16: 1e-2, onp.float32: 2e-2, onp.float64: 1e-12,
onp.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 4e-2
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker,
check_dtypes=True, tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for name, lhs_shape, rhs_shape in [
("vector-vector", (3,), (3,)),
("vector-matrix", (9,), (3, 3)),
("matrix-matrix", (3, 3), (3, 3)),
("tensor-vector", (5, 3, 2), (30,))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
@new_test
def testVDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {onp.float16: 1e-2, onp.float32: 2e-2, onp.float64: 1e-12,
onp.complex128: 1e-12}
self._CheckAgainstNumpy(onp.vdot, lnp.vdot, args_maker,
check_dtypes=True, tol=tol)
self._CompileAndCheck(lnp.vdot, args_maker, check_dtypes=True, atol=tol,
rtol=tol, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for lhs_shape, rhs_shape, axes in [
[(2, 3, 4), (5, 6, 7), 0], # from issue #740
[(2, 3, 4), (3, 4, 5, 6), 2],
[(2, 3, 4), (5, 4, 3, 6), [1, 2]],
[(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],
[(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
lnp_fun = lambda a, b: lnp.tensordot(a, b, axes)
def onp_fun(a, b):
a = a if lhs_dtype != lnp.bfloat16 else a.astype(onp.float32)
b = b if rhs_dtype != lnp.bfloat16 else b.astype(onp.float32)
dtype = lnp.promote_types(lhs_dtype, rhs_dtype)
return onp.tensordot(a, b, axes).astype(dtype)
tol = {onp.float16: 1e-1, onp.float32: 1e-3, onp.float64: 1e-12,
onp.complex64: 1e-3, onp.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 2e-1
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True,
check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": jtu.rand_default}
# TODO(phawkins): support integer dtypes too.
for lhs_shape, lhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
for rhs_shape, rhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
if len(jtu._dims_of_shape(lhs_shape)) == 0
or len(jtu._dims_of_shape(rhs_shape)) == 0
or lhs_shape[-1] == rhs_shape[-1]))
def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
def onp_fun(lhs, rhs):
lhs = lhs if lhs_dtype != lnp.bfloat16 else lhs.astype(onp.float32)
rhs = rhs if rhs_dtype != lnp.bfloat16 else rhs.astype(onp.float32)
dtype = lnp.promote_types(lhs_dtype, rhs_dtype)
return onp.inner(lhs, rhs).astype(dtype)
lnp_fun = lambda lhs, rhs: lnp.inner(lhs, rhs)
tol_spec = {onp.float16: 1e-2, onp.float32: 1e-5, onp.float64: 2e-6}
if jtu.device_under_test() == "tpu":
tol_spec[onp.float32] = tol_spec[onp.complex64] = 2e-1
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
# TODO(phawkins): there are float32/float64 disagreements for some inputs.
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=False, atol=tol,
rtol=tol, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_amin={}_amax={}".format(
jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),
"shape": shape, "dtype": dtype, "a_min": a_min, "a_max": a_max,
"rng_factory": jtu.rand_default}
for shape in all_shapes for dtype in minus(number_dtypes, complex_dtypes)
for a_min, a_max in [(-1, None), (None, 1), (-1, 1),
(-onp.ones(1), None),
(None, onp.ones(1)),
(-onp.ones(1), onp.ones(1))]))
def testClipStaticBounds(self, shape, dtype, a_min, a_max, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.clip(x, a_min=a_min, a_max=a_max)
lnp_fun = lambda x: lnp.clip(x, a_min=a_min, a_max=a_max)
args_maker = lambda: [rng(shape, dtype)]
tol_spec = {onp.float64: 2e-7}
tol = jtu.tolerance(dtype, tol_spec)
is_x32_scalar = (dtype in [onp.int32, onp.float32] and
shape in [jtu.NUMPY_SCALAR_SHAPE, ()])
# Turns check_dtypes off if is_x32_scalar is True because there is
# a weird promotion inconsistency in numpy:
# ```
# print(np.result_type(np.ones([], np.int32), 1))
# print(np.result_type(np.ones([1], np.int32), 1))
# print(np.result_type(np.int32(1), 1))
# print(np.result_type(np.int32, 1))
# print(np.result_type(np.ones([], np.float32), 1))
# print(np.result_type(np.ones([1], np.float32), 1))
# print(np.result_type(np.float32(1), 1))
# print(np.result_type(np.float32, 1))
# ```
# >>>
# int64
# int32
# int64
# int32
# float64
# float32
# float64
# float32
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker,
check_dtypes=not is_x32_scalar, tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=not is_x32_scalar,
atol=tol, rtol=tol, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_amin={}_amax={}".format(
jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),
"shape": shape, "dtype": dtype, "a_min": a_min, "a_max": a_max,
"rng_factory": jtu.rand_default}
for shape in array_shapes + [jtu.NUMPY_SCALAR_SHAPE]
for dtype in minus(number_dtypes, complex_dtypes)
for a_min, a_max in [(-1, None), (None, 1), (-1, 1),
(-onp.ones(1), None),
(None, onp.ones(1)),
(-onp.ones(1), onp.ones(1))]))
@new_test
def testClipAsMethodStaticBounds(
self, shape, dtype, a_min, a_max, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.clip(x, a_min=a_min, a_max=a_max)
lnp_fun = lambda x: lnp.asarray(x).clip(a_min=a_min, a_max=a_max)
args_maker = lambda: [rng(shape, dtype)]
tol_spec = {onp.float64: 2e-7}
tol = jtu.tolerance(dtype, tol_spec)
is_x32_scalar = (dtype in [onp.int32, onp.float32] and
shape in [jtu.NUMPY_SCALAR_SHAPE, ()])
# Turns check_dtypes off if is_x32_scalar is True because there is
# a weird promotion inconsistency in numpy:
# ```
# print(np.result_type(np.ones([], np.int32), 1))
# print(np.result_type(np.ones([1], np.int32), 1))
# print(np.result_type(np.int32(1), 1))
# print(np.result_type(np.int32, 1))
# print(np.result_type(np.ones([], np.float32), 1))
# print(np.result_type(np.ones([1], np.float32), 1))
# print(np.result_type(np.float32(1), 1))
# print(np.result_type(np.float32, 1))
# ```
# >>>
# int64
# int32
# int64
# int32
# float64
# float32
# float64
# float32
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker,
check_dtypes=not is_x32_scalar, tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=not is_x32_scalar,
atol=tol, rtol=tol, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_decimals={}".format(
jtu.format_shape_dtype_string(shape, dtype), decimals),
"shape": shape, "dtype": dtype, "decimals": decimals,
"rng_factory": jtu.rand_default}
for shape, dtype in _shape_and_dtypes(
all_shapes, minus(number_dtypes, complex_dtypes))
for decimals in [0, 1, -2]))
def testRoundStaticDecimals(self, shape, dtype, decimals, rng_factory):
rng = rng_factory()
if lnp.issubdtype(dtype, onp.integer) and decimals < 0:
self.skipTest("Integer rounding with decimals < 0 not implemented")
onp_fun = lambda x: onp.round(x, decimals=decimals)
lnp_fun = lambda x: lnp.round(x, decimals=decimals)
args_maker = lambda: [rng(shape, dtype)]
tol = {
# TODO(b/154768983): lnp.bfloat16: 5e-2,
onp.float16: 1e-2}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol, check_incomplete_shape=True)
def testOperatorRound(self):
self.assertAllClose(round(onp.float32(7.532), 1),
round(lnp.float32(7.5), 1), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234), 2),
round(lnp.float32(1.234), 2), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234)),
round(lnp.float32(1.234)), check_dtypes=False)
self.assertAllClose(round(onp.float32(7.532), 1),
round(lnp.array(7.5, lnp.float32), 1), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234), 2),
round(lnp.array(1.234, lnp.float32), 2), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234)),
round(lnp.array(1.234, lnp.float32)),
check_dtypes=False)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_rpadwidth={}_rconstantvalues={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width_rank,
constant_values_rank),
"shape": shape, "dtype": dtype, "mode": mode,
"pad_width_rank": pad_width_rank,
"constant_values_rank": constant_values_rank,
"rng_factory": jtu.rand_default,
"irng_factory": partial(jtu.rand_int, 3)}
for mode, constant_values_rank, shapes in [
('constant', 0, all_shapes),
('constant', 1, all_shapes),
('constant', 2, all_shapes),
('symmetric', None, nonempty_shapes),
('reflect', None, nonempty_shapes),
('wrap', None, nonempty_shapes),
]
for shape, dtype in _shape_and_dtypes(shapes, all_dtypes)
for pad_width_rank in range(3)))
@jtu.disable
def testPad(self, shape, dtype, mode, pad_width_rank, constant_values_rank,
rng_factory, irng_factory):
rng = rng_factory()
irng = irng_factory()
pad_width = irng([len(shape), 2][2 - pad_width_rank:], onp.int32)
def onp_fun(x, kwargs):
if pad_width.size == 0:
return x
return onp.pad(x, pad_width, mode=mode, **kwargs)
def lnp_fun(x, kwargs):
return lnp.pad(x, pad_width, mode=mode, **kwargs)
def args_maker():
kwargs = {}
if constant_values_rank:
kwargs["constant_values"] = rng(
[len(shape), 2][2 - constant_values_rank:], dtype)
return rng(shape, dtype), kwargs
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_reps={}".format(
jtu.format_shape_dtype_string(shape, dtype), reps),
"shape": shape, "dtype": dtype, "reps": reps,
"rng_factory": jtu.rand_default}
for reps in [(), (2,), (3, 4), (2, 3, 4)]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
))
def testTile(self, shape, dtype, reps, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.tile(arg, reps)
lnp_fun = lambda arg: lnp.tile(arg, reps)
args_maker = lambda: [rng(shape, dtype)]
tol_spec = {onp.float64: 2e-7}
tol = jtu.tolerance(dtype, tol_spec)
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE,
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes,
"rng_factory": jtu.rand_default}
for num_arrs in [3]
for arg_dtypes in CombosWithReplacement(default_dtypes, num_arrs)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testConcatenate(self, axis, base_shape, arg_dtypes, rng_factory):
rng = rng_factory()
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def onp_fun(*args):
# TODO(nareshmodi): enable once bfloat16 has better support
# args = [x if x.dtype != bfloat16 else x.astype(onp.float32)
# for x in args]
dtype = functools.reduce(lnp.promote_types, arg_dtypes)
return onp.concatenate(args, axis=axis).astype(dtype)
lnp_fun = lambda *args: lnp.concatenate(args, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes,
"rng_factory": jtu.rand_default}
for arg_dtypes in CombosWithReplacement(default_dtypes, 2)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testAppend(self, axis, base_shape, arg_dtypes, rng_factory):
rng = rng_factory()
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def onp_fun(arr, values):
arr = arr.astype(onp.float32) if lnp.bfloat16 == arr.dtype else arr
values = (
values.astype(onp.float32)
if lnp.bfloat16 == values.dtype else values)
out = onp.append(arr, values, axis=axis)
return out.astype(lnp.promote_types(*arg_dtypes))
lnp_fun = lambda arr, values: lnp.append(arr, values, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_axis={}_repeats={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, repeats),
"axis": axis, "shape": shape, "dtype": dtype, "repeats": repeats,
"rng_factory": jtu.rand_default}
for repeats in [0, 1, 2]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
for axis in [None] + list(range(-len(shape), len(shape)))))
def testRepeat(self, axis, shape, dtype, repeats, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.repeat(arg, repeats=repeats, axis=axis)
onp_fun = _promote_like_lnp(onp_fun)
lnp_fun = lambda arg: lnp.repeat(arg, repeats=repeats, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=False)
def testIssue1233(self):
'''
Following numpy test suite from `test_repeat` at https://github.com/numpy/numpy/blob/master/numpy/core/tests/test_multiarray.py
'''
tol = 1e-5
def test_single(m, args_maker, repeats, axis):
lax_ans = lnp.repeat(m, repeats, axis)
numpy_ans = onp.repeat(m, repeats, axis)
self.assertAllClose(lax_ans, numpy_ans, check_dtypes=True, rtol=tol, atol=tol)
lnp_fun = lambda arg: lnp.repeat(arg, repeats = repeats, axis=axis)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=False)
m = lnp.array([1,2,3,4,5,6])
args_maker = lambda: [m]
for repeats in [2, [1,3,2,1,1,2], [1,3,0,1,1,2], [2], lnp.array([1,3,2,1,1,2]), lnp.array([2])]:
test_single(m, args_maker, repeats, None)
m_rect = m.reshape((2,3))
args_maker = lambda: [m_rect]
for repeats in [2, [2,1], [2], lnp.array([2,1]), lnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=0)
for repeats in [2, [1,3,2], [2], lnp.array([1,3,2]), lnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=1)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis, out_dtype),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"rng_factory": jtu.rand_default, "lnp_op": getattr(lnp, op),
"onp_op": getattr(onp, op)}
for op in ["cumsum", "cumprod"]
for dtype in default_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testCumSumProd(self, axis, shape, dtype, out_dtype, onp_op, lnp_op, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp_op(arg, axis=axis, dtype=out_dtype)
lnp_fun = lambda arg: lnp_op(arg, axis=axis, dtype=out_dtype)
args_maker = lambda: [rng(shape, dtype)]
tol = max(jtu.tolerance(dtype), jtu.tolerance(out_dtype))
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_m={}_n={}_k={}".format(
onp.dtype(dtype).name, m, n, k),
"m": m, "n": n, "k": k, "dtype": dtype, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for n in [0, 4]
for m in [None, 0, 1, 3, 4]
for k in list(range(-4, 4))))
def testTri(self, m, n, k, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda: onp.tri(n, M=m, k=k, dtype=dtype)
lnp_fun = lambda: lnp.tri(n, M=m, k=k, dtype=dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_shape={}_k={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "op": op, "k": k,
"rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for op in ["tril", "triu"]
for k in list(range(-3, 3))))
def testTriLU(self, dtype, shape, op, k, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: getattr(onp, op)(arg, k=k)
lnp_fun = lambda arg: getattr(lnp, op)(arg, k=k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
# Incomplete shape support is not implemented at the moment.
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=False)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_ndim={}_n={}".format(ndim, n),
"ndim": ndim, "n": n}
for ndim in [0, 1, 4]
for n in [0, 1, 7]))
def testDiagIndices(self, ndim, n):
onp.testing.assert_equal(onp.diag_indices(n, ndim),
lnp.diag_indices(n, ndim))
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]
for k in list(range(-4, 4))))
def testDiag(self, shape, dtype, k, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.diag(arg, k)
lnp_fun = lambda arg: lnp.diag(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),
"dtype": dtype, "shape": shape, "offset": offset, "axis1": axis1,
"axis2": axis2, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in [a for a in range(-len(shape), len(shape))
if a % len(shape) != axis1 % len(shape)]
for offset in list(range(-4, 4))))
def testDiagonal(self, shape, dtype, offset, axis1, axis2, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.diagonal(arg, offset, axis1, axis2)
lnp_fun = lambda arg: lnp.diagonal(arg, offset, axis1, axis2)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(onp.dtype(dtype).name, n),
"dtype": dtype, "n": n}
for dtype in default_dtypes
for n in list(range(4))))
def testIdentity(self, n, dtype):
onp_fun = lambda: onp.identity(n, dtype)
lnp_fun = lambda: lnp.identity(n, dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype),
out_dtype, offset, axis1, axis2),
"dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset,
"axis1": axis1, "axis2": axis2, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for out_dtype in [None] + number_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in range(-len(shape), len(shape))
if (axis1 % len(shape)) != (axis2 % len(shape))
for offset in list(range(-4, 4))))
def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng_factory):
rng = rng_factory()
def onp_fun(arg):
if out_dtype == lnp.bfloat16:
return onp.trace(arg, offset, axis1, axis2, onp.float32).astype(lnp.bfloat16)
else:
return onp.trace(arg, offset, axis1, axis2, out_dtype)
lnp_fun = lambda arg: lnp.trace(arg, offset, axis1, axis2, out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis),
"shape": shape, "axis": axis, "dtypes": dtypes, "rng_factory": rng_factory}
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
for axis in range(-len(shape), len(shape) + 1)
for rng_factory in [jtu.rand_default]))
def testStack(self, shape, axis, dtypes, rng_factory):
rng = rng_factory()
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = _promote_like_lnp(partial(onp.stack, axis=axis))
lnp_fun = partial(lnp.stack, axis=axis)
self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_{}".format(
op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes)),
"shape": shape, "op": op, "dtypes": dtypes, "rng_factory": rng_factory}
for op in ["hstack", "vstack", "dstack"]
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]
for rng_factory in [jtu.rand_default]))
def testHVDStack(self, shape, op, dtypes, rng_factory):
rng = rng_factory()
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = _promote_like_lnp(getattr(onp, op))
lnp_fun = getattr(lnp, op)
self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, fill_value_dtype),
onp.dtype(out_dtype).name if out_dtype else "None"),
"shape": shape, "fill_value_dtype": fill_value_dtype,
"out_dtype": out_dtype, "rng_factory": jtu.rand_default}
for shape in array_shapes + [3, onp.array(7, dtype=onp.int32)]
for fill_value_dtype in default_dtypes
for out_dtype in [None] + default_dtypes))
def testFull(self, shape, fill_value_dtype, out_dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda fill_value: onp.full(shape, fill_value, dtype=out_dtype)
lnp_fun = lambda fill_value: lnp.full(shape, fill_value, dtype=out_dtype)
args_maker = lambda: [rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_op={}_shape={}_dtype={}").format(op, shape, dtype),
"onp_op": getattr(onp, op), "lnp_op": getattr(lnp, op),
"shape": shape, "dtype": dtype}
for op in ["zeros", "ones"]
for shape in [2, (), (2,), (3, 0), onp.array((4, 5, 6), dtype=onp.int32),
onp.array(4, dtype=onp.int32)]
for dtype in all_dtypes))
def testZerosOnes(self, onp_op, lnp_op, shape, dtype):
rng = jtu.rand_default()
def args_maker(): return []
onp_op = partial(onp_op, shape, dtype)
lnp_op = partial(lnp_op, shape, dtype)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_op, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_filldtype={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, in_dtype),
onp.dtype(fill_value_dtype).name,
onp.dtype(out_dtype).name),
"shape": shape, "in_dtype": in_dtype,
"fill_value_dtype": fill_value_dtype, "out_dtype": out_dtype,
"rng_factory": jtu.rand_default}
for shape in array_shapes
for in_dtype in default_dtypes
for fill_value_dtype in default_dtypes
for out_dtype in default_dtypes))
def testFullLike(self, shape, in_dtype, fill_value_dtype, out_dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x, fill_value: onp.full_like(x, fill_value, dtype=out_dtype)
lnp_fun = lambda x, fill_value: lnp.full_like(x, fill_value, dtype=out_dtype)
args_maker = lambda: [rng(shape, in_dtype), rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng_factory": jtu.rand_default}
for shape, axis, num_sections in [
((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
for dtype in default_dtypes))
def testSplitStaticInt(self, shape, num_sections, axis, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.split(x, num_sections, axis=axis)
lnp_fun = lambda x: lnp.split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng_factory": jtu.rand_default}
for shape, axis, num_sections in [
((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]
for dtype in default_dtypes))
def testHVDSplit(self, shape, num_sections, axis, dtype, rng_factory):
rng = rng_factory()
def fn(module, axis):
if axis == 0:
return module.vsplit
elif axis == 1:
return module.hsplit
else:
assert axis == 2
return module.dsplit
onp_fun = lambda x: fn(onp, axis)(x, num_sections)
lnp_fun = lambda x: fn(lnp, axis)(x, num_sections)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_order={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
order),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"order": order, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for order in ["C", "F"]
for arg_shape, out_shape in [
(jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),
((), (1, 1, 1)),
((7, 0), (0, 42, 101)),
((3, 4), 12),
((3, 4), (12,)),
((3, 4), -1),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshape(self, arg_shape, out_shape, dtype, order, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.reshape(x, out_shape, order=order)
lnp_fun = lambda x: lnp.reshape(x, out_shape, order=order)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"rng_factory": jtu.rand_default}
for dtype in default_dtypes
for arg_shape, out_shape in [
((7, 0), (0, 42, 101)),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshapeMethod(self, arg_shape, out_shape, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.reshape(x, out_shape)
lnp_fun = lambda x: x.reshape(*out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_expanddim={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), dim),
"arg_shape": arg_shape, "dtype": dtype, "dim": dim,
"rng_factory": jtu.rand_default}
for arg_shape in [(), (3,), (3, 4)]
for dtype in default_dtypes
for dim in range(-len(arg_shape)+1, len(arg_shape))))
def testExpandDimsStaticDim(self, arg_shape, dtype, dim, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.expand_dims(x, dim)
lnp_fun = lambda x: lnp.expand_dims(x, dim)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axes=({},{})".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),
"arg_shape": arg_shape, "dtype": dtype, "ax1": ax1, "ax2": ax2,
"rng_factory": jtu.rand_default}
for arg_shape, ax1, ax2 in [
((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),
((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]
for dtype in default_dtypes))
def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.swapaxes(x, ax1, ax2)
lnp_fun = lambda x: lnp.swapaxes(x, ax1, ax2)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axes=({},{})".format(
jtu.format_shape_dtype_string(arg_shape, dtype), source, destination),
"arg_shape": arg_shape, "dtype": dtype, "source": source,
"destination": destination, "rng_factory": jtu.rand_default}
for arg_shape, source, destination in [
(tuple(range(6)), (0, 2), (3, 5)),
(tuple(range(6)), (0, 2), (-1, -3)),
(tuple(range(6)), (-6, -4),(3, 5)),
(tuple(range(6)), (-6, -4), (-1, -3)),
(tuple(range(6)), 0, 4),
(tuple(range(6)), -6, -2),
(tuple(range(6)), tuple(range(6)), tuple(range(6))),
(tuple(range(6)), tuple(range(6)), tuple(reversed(range(6)))),
(tuple(range(6)), (), ()),
] for dtype in default_dtypes))
@new_test
def testMoveaxisStaticAxes(self, arg_shape, dtype, source, destination,
rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.moveaxis(x, source, destination)
lnp_fun = lambda x: lnp.moveaxis(x, source, destination)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax,
"rng_factory": jtu.rand_default}
for arg_shape, ax in [
((3, 1), None),
((3, 1), 1),
((1, 3, 1), (0, 2)),
((1, 4, 1), (0,))]
for dtype in default_dtypes))
def testSqueeze(self, arg_shape, dtype, ax, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.squeeze(x, ax)
lnp_fun = lambda x: lnp.squeeze(x, ax)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}_weights={}_returned={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis,
(None if weights_shape is None else jtu.format_shape_dtype_string(weights_shape, dtype)),
returned),
"rng_factory": jtu.rand_default, "shape": shape, "dtype": dtype, "axis": axis,
"weights_shape": weights_shape, "returned": returned}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, number_dtypes)
for axis in set(range(-len(shape), len(shape))) | set([None])
# `weights_shape` is either `None`, same as the averaged axis, or same as
# that of the input
for weights_shape in ([None, shape] if axis is None or len(shape) == 1
else [None, (shape[axis],), shape])
for returned in [False, True]))
def testAverage(self, shape, dtype, axis, weights_shape, returned, rng_factory):
rng = rng_factory()
if weights_shape is None:
onp_fun = lambda x: onp.average(x, axis, returned=returned)
lnp_fun = lambda x: lnp.average(x, axis, returned=returned)
args_maker = lambda: [rng(shape, dtype)]
else:
onp_fun = lambda x, weights: onp.average(x, axis, weights, returned)
lnp_fun = lambda x, weights: lnp.average(x, axis, weights, returned)
args_maker = lambda: [rng(shape, dtype), rng(weights_shape, dtype)]
onp_fun = _promote_like_lnp(onp_fun, inexact=True)
tol = {
# TODO(b/154768983): lnp.bfloat16: 1e-1,
onp.float16: 1e-1, onp.float32: 1e-3, onp.float64: 2e-7,
onp.complex64: 1e-3, onp.complex128: 1e-10,
}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
try:
self._CheckAgainstNumpy(
onp_fun, lnp_fun, args_maker, check_dtypes=check_dtypes, tol=tol)
except ZeroDivisionError:
self.skipTest("don't support checking for ZeroDivisionError")
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=check_dtypes,
rtol=tol, atol=tol, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_arg{}_ndmin={}".format(i, ndmin),
"arg": arg, "ndmin": ndmin, "dtype": dtype}
for i, (arg, dtype) in enumerate([
([True, False, True], lnp.bool_),
(3., lnp.float_),
([1, 2, 3], lnp.int_),
([1., 2., 3.], lnp.float_),
([[1, 2], [3, 4], [5, 6]], lnp.int_),
([[1, 2.], [3, 4], [5, 6]], lnp.float_),
([[1., 2j], [3., 4.], [5., 6.]], lnp.complex_),
([[3, onp.array(2, dtype=lnp.float_), 1],
onp.arange(3., dtype=lnp.float_)], lnp.float_),
])
for ndmin in [None, onp.ndim(arg), onp.ndim(arg) + 1, onp.ndim(arg) + 2]))
def testArray(self, arg, ndmin, dtype):
args_maker = lambda: [arg]
dtype = lnp.canonicalize_dtype(dtype)
if ndmin is not None:
onp_fun = partial(onp.array, ndmin=ndmin, dtype=dtype)
lnp_fun = partial(lnp.array, ndmin=ndmin)
else:
onp_fun = partial(onp.array, dtype=dtype)
lnp_fun = lnp.array
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True,
check_incomplete_shape=True, static_argnums=[0])
def testIssue121(self):
assert not onp.isscalar(lnp.array(3))
@jtu.disable
def testArrayMethod(self):
class arraylike(object):
dtype = onp.float32
def __array__(self, dtype=None):
return 3.
a = arraylike()
ans = lnp.array(a)
assert ans == 3.
@jtu.skip_on_devices("tpu") # TODO(b/32368900): TPUs don't support uint8 yet.
@jtu.disable
def testMemoryView(self):
ans = lnp.array(bytearray(b'\x2a'))
self.assertAllClose(
ans,
onp.array([0x2a], dtype=onp.uint8),
check_dtypes=True)
def testAllClose(self):
rng = onp.random.RandomState(0)
x = rng.randn(2, 2)
y = rng.randn(2)
def same(list1, list2):
allclose = functools.partial(lnp.allclose, atol=1e-3, rtol=1e-3)
elements_close = list(map(allclose, list1, list2))
return lnp.all(lnp.array(elements_close))
csame = npe.jit(same)
a1 = same((x, y), (x, y))
a2 = csame((x, y), (x, y))
a3 = csame((x, y), (x, 2 * y))
self.assertTrue(a1)
self.assertTrue(a2)
self.assertFalse(a3)
@jtu.skip_on_devices("tpu") # TODO(mattjj): investigate this failure
@jtu.disable
def testOnesBroadcastingConstantHandler(self):
# TODO(mattjj): update this test for jax3
self.skipTest("test needs jax3 update")
def fun(x):
ones = lnp.ones((3, 4))
assert isinstance(ones, onp.ndarray) and ones.strides == (0, 0)
# To check that the constant handler generates a Broadcast for stride-zero
# arrays, we monkey-patch the client instance.
# TODO(mattjj): once we have better HLO dumping and inspecting facilities,
# we can check the HLO more directly.
c = x._node.c
Broadcast = c.Broadcast # pylint: disable=invalid-name
was_called = []
c.Broadcast = lambda *args: was_called.append(True) or Broadcast(*args)
out = x + ones # the ndarray constant handler should call Broadcast here
assert was_called, "Broadcast was not called."
return out
fun = api.jit(fun)
out_val = fun(lnp.ones(4))
self.assertAllClose(out_val, onp.full((3, 4), 2.), check_dtypes=False)
def testZeroStridesConstantHandler(self):
raw_const = onp.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)
const = onp.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))
def fun(x):
return x * const
fun = npe.jit(fun)
out_val = fun(3.)
self.assertAllClose(out_val, 3. * const, check_dtypes=False)
def testIsInstanceNdarrayDuringTracing(self):
arr = onp.ones(3)
@npe.jit
def f(x):
self.assertIsInstance(x, lnp.ndarray)
return lnp.sum(x)
f(arr)
@jtu.disable
def testNonArrayErrorMessage(self):
x = [1., 2.]
y = onp.array([3., 4.])
def g(x, y):
return lnp.add(x, y)
def f(x, y):
return lnp.dot(x, y)
self.assertRaises(TypeError, lambda: g(x, y))
self.assertRaises(TypeError, lambda: f(x, y))
self.assertRaises(TypeError, lambda: api.jit(g)(x, y))
self.assertRaises(TypeError, lambda: api.jit(f)(x, y))
@jtu.disable
def testAbstractionErrorMessage(self):
@api.jit
def f(x, n):
for _ in range(n):
x = x * x
return x
self.assertRaises(TypeError, lambda: f(3., 3))
@api.jit
def g(x):
if x > 0.:
return x * 2
else:
return x + 2
self.assertRaises(TypeError, lambda: g(3.))
@jtu.disable
def testTracingPrimitiveWithNoTranslationErrorMessage(self):
# TODO(mattjj): update this for jax3
self.skipTest("test needs jax3 update")
foo = lnp._not_implemented(lambda x: x)
# No error if there's no tracing.
foo(onp.arange(3))
cfoo = api.jit(foo)
self.assertRaises(NotImplementedError, lambda: cfoo(onp.arange(3)))
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (2, 3)]
for dtype in default_dtypes
for axis in list(range(-len(shape), len(shape))) + [None] # Test negative axes
for rng_factory in [jtu.rand_default]))
def testFlip(self, shape, dtype, axis, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.flip(x, axis)
onp_op = lambda x: onp.flip(x, axis)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_op, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype}
for shape in [(3,), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testFlipud(self, shape, dtype, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.flipud(x)
onp_op = lambda x: onp.flipud(x)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_op, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype}
for shape in [(3, 2), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testFliplr(self, shape, dtype, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.fliplr(x)
onp_op = lambda x: onp.fliplr(x)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_op, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "k": k, "axes": axes}
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3), (1, 0)],
[(4, 3, 2), (0, 2)],
[(4, 3, 2), (2, 1)],
]
for k in range(-3, 4)
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testRot90(self, shape, dtype, k, axes, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.rot90(x, k, axes)
onp_op = lambda x: onp.rot90(x, k, axes)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_op, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "k": k,
"axes": axes}
for shape, axes in [
[(2, 3), (-2, -1)],
[(2, 3), (-2, 1)],
[(4, 3, 2), (-1, -2)],
[(4, 3, 2), (2, -2)],
]
for k in range(-3, 4)
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
@new_test
# These tests are only added as a separate test from testRot90 since we would
# like to measure coverage directly against the existing baseline. Once we
# stop measuring that, we can combine this test with the above.
def testRot90Additional(self, shape, dtype, k, axes, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.rot90(x, k, axes)
onp_op = lambda x: onp.rot90(x, k, axes)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_op, args_maker, check_dtypes=True, check_incomplete_shape=True)
# TODO(mattjj): test infix operator overrides
def testRavel(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
self._CompileAndCheck(lambda x: x.ravel(), args_maker, check_dtypes=True,
check_incomplete_shape=True)
def testAstype(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
op = lambda x: x.astype(lnp.int32)
self._CheckAgainstNumpy(op, op, args_maker, check_dtypes=True)
self._CompileAndCheck(
op, args_maker, check_dtypes=True, check_incomplete_shape=True)
# TODO(mattjj): test other ndarray-like method overrides
def testOnpMean(self):
# from https://github.com/google/jax/issues/125
x = lnp.add(lnp.eye(3, dtype=lnp.float_), 0.)
ans = onp.mean(x)
self.assertAllClose(ans, onp.array(1./3), check_dtypes=False)
@jtu.disable
def testArangeOnFloats(self):
# from https://github.com/google/jax/issues/145
expected = onp.arange(0.0, 1.0, 0.1, dtype=lnp.float_)
ans = lnp.arange(0.0, 1.0, 0.1)
self.assertAllClose(expected, ans, check_dtypes=True)
def testSortManually(self):
def _test(*args, **kwargs):
raw_ans = lnp.sort(*args, **kwargs)
fn_ans = npe.jit(lnp.sort, static_argnums=(1,))(*args, **kwargs)
expected = onp.sort(*args, **kwargs)
self.assertAllClose(expected, raw_ans, check_dtypes=True)
self.assertAllClose(expected, fn_ans, check_dtypes=True)
# manual tests for sort are nice because we don't have to worry about ties.
# lax.sort is tested combinatorially.
_test(onp.array([16, 15, 23, 42, 8, 4]))
_test(onp.array([[1, 4], [3, 1]]), None)
_test(onp.array([[1, 4], [3, 1]]))
_test(onp.array([[1, 4], [3, 1]]), 0)
def testArgsortManually(self):
def _test(*args, **kwargs):
raw_ans = lnp.argsort(*args, **kwargs)
fn_ans = npe.jit(lnp.argsort, static_argnums=(1,))(*args, **kwargs)
expected = onp.argsort(*args, **kwargs)
self.assertAllClose(expected, raw_ans, check_dtypes=True)
self.assertAllClose(expected, fn_ans, check_dtypes=True)
_test(onp.array([16, 15, 23, 42, 8, 4]))
_test(onp.array([[16, 15, 23], [42, 8, 4]]), 0)
_test(onp.array([[16, 15, 23], [42, 8, 4]]), 1)
_test(onp.array([[16, 15, 23], [42, 8, 4]]), None)
_test(onp.array([[16, 15, 23], [42, 8, 4]]))
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_shifts={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
shifts, axis),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "shifts": shifts,
"axis": axis}
for dtype in all_dtypes
for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]
for shifts, axis in [
(3, None),
(1, 1),
((3,), (0,)),
((-2,), (-2,)),
((1, 2), (0, -1))
]
for rng_factory in [jtu.rand_default]))
def testRoll(self, shape, dtype, shifts, axis, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), onp.array(shifts)]
lnp_op = partial(lnp.roll, axis=axis)
onp_op = partial(onp.roll, axis=axis)
self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_op, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}_mode={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(index_shape, index_dtype),
axis, mode),
"rng_factory": rng_factory, "rng_indices_factory": rng_indices_factory,
"shape": shape, "index_shape": index_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis, "mode": mode}
for shape in [(3,), (3, 4), (3, 4, 5)]
for index_shape in scalar_shapes + [(3,), (2, 1, 3)]
for axis in itertools.chain(range(-len(shape), len(shape)), [None])
for dtype in all_dtypes
for index_dtype in int_dtypes
for mode in ['wrap', 'clip']
for rng_factory in [jtu.rand_default]
for rng_indices_factory in [partial(jtu.rand_int, -5, 5)]))
def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode,
rng_factory, rng_indices_factory):
def args_maker():
x = rng(shape, dtype)
i = rng_indices(index_shape, index_dtype)
return x, i
rng = rng_factory()
rng_indices = rng_indices_factory()
lnp_op = lambda x, i: lnp.take(x, i, axis=axis, mode=mode)
onp_op = lambda x, i: onp.take(x, i, axis=axis, mode=mode)
self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_op, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_ishape={}_axis={}".format(
jtu.format_shape_dtype_string(x_shape, dtype), i_shape, axis),
"rng_factory": rng_factory, "x_shape": x_shape, "i_shape": i_shape, "dtype": dtype,
"axis": axis}
for x_shape, i_shape in filter(
_shapes_are_equal_length,
filter(_shapes_are_broadcast_compatible,
CombosWithReplacement(nonempty_nonscalar_array_shapes, 2)))
for axis in itertools.chain(range(len(x_shape)), [-1], [None])
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testTakeAlongAxis(self, x_shape, i_shape, dtype, axis, rng_factory):
rng = rng_factory()
i_shape = onp.array(i_shape)
if axis is None:
i_shape = [onp.prod(i_shape, dtype=onp.int64)]
else:
# Test the case where the size of the axis doesn't necessarily broadcast.
i_shape[axis] *= 3
i_shape = list(i_shape)
def args_maker():
x = rng(x_shape, dtype)
n = onp.prod(x_shape, dtype=onp.int32) if axis is None else x_shape[axis]
i = rng(i_shape, onp.int32) % (2 * n - 1) - (n - 1)
return x, i
lnp_op = lambda x, i: lnp.take_along_axis(x, i, axis=axis)
if hasattr(onp, "take_along_axis"):
onp_op = lambda x, i: onp.take_along_axis(x, i, axis=axis)
self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True,
check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}_increasing={}".format(
jtu.format_shape_dtype_string([shape], dtype),
n, increasing),
"dtype": dtype, "shape": shape, "n": n, "increasing": increasing,
"rng_factory": jtu.rand_default}
for dtype in inexact_dtypes
for shape in [0, 5]
for n in [2, 4]
for increasing in [False, True]))
def testVander(self, shape, dtype, n, increasing, rng_factory):
rng = rng_factory()
def onp_fun(arg):
arg = arg.astype(onp.float32) if dtype == lnp.bfloat16 else arg
return onp.vander(arg, N=n, increasing=increasing)
lnp_fun = lambda arg: lnp.vander(arg, N=n, increasing=increasing)
args_maker = lambda: [rng([shape], dtype)]
# np.vander seems to return float64 for all floating types. We could obey
# those semantics, but they seem like a bug.
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False,
tol={onp.float32: 1e-3})
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=False, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("nan_to_num", [shape],
[dtype]),
"rng_factory": jtu.rand_some_inf_and_nan, "shape": shape,
"dtype": dtype}
for shape in all_shapes
for dtype in inexact_dtypes))
@jtu.disable
def testNanToNum(self, rng_factory, shape, dtype):
rng = rng_factory()
dtype = onp.dtype(dtypes.canonicalize_dtype(dtype)).type
def onp_fun(x):
if dtype == lnp.bfloat16:
x = onp.where(onp.isnan(x), dtype(0), x)
x = onp.where(onp.isposinf(x), lnp.finfo(dtype).max, x)
x = onp.where(onp.isneginf(x), lnp.finfo(dtype).min, x)
return x
else:
return onp.nan_to_num(x).astype(dtype)
args_maker = lambda: [rng(shape, dtype)]
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(onp_fun, lnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
self._CompileAndCheck(lnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
@named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("ix_", shapes, dtypes),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for shapes, dtypes in (
((), ()),
(((7,),), (onp.int32,)),
(((3,), (4,)), (onp.int32, onp.int32)),
(((3,), (1,), (4,)), (onp.int32, onp.int32, onp.int32)),
)))
def testIx_(self, rng_factory, shapes, dtypes):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)
for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(onp.ix_, lnp.ix_, args_maker,
check_dtypes=True)
self._CompileAndCheck(
lnp.ix_, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name":
"_op={}_a_shape={}_q_shape={}_axis={}_keepdims={}".format(
op,
jtu.format_shape_dtype_string(a_shape, a_dtype),
jtu.format_shape_dtype_string(q_shape, q_dtype),
axis, keepdims),
"a_rng": jtu.rand_default(), "q_rng": q_rng, "op": op,
"a_shape": a_shape, "a_dtype": a_dtype,
"q_shape": q_shape, "q_dtype": q_dtype, "axis": axis,
"keepdims": keepdims}
for (op, q_rng) in (
("percentile", jtu.rand_uniform(low=0., high=100.)),
("quantile", jtu.rand_uniform(low=0., high=1.)),
("median", jtu.rand_uniform(low=0., high=1.)),
)
for a_dtype in float_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for q_dtype in [onp.float32]
for q_shape in scalar_shapes + [(4,)]
for keepdims in [False, True]))
@jtu.disable
def testQuantile(self, op, a_rng, q_rng, a_shape, a_dtype, q_shape, q_dtype,
axis, keepdims):
if op == "quantile" and numpy_version < (1, 15):
raise SkipTest("Numpy < 1.15 does not have np.quantile")
if op == "median":
args_maker = lambda: [a_rng(a_shape, a_dtype)]
else:
args_maker = lambda: [a_rng(a_shape, a_dtype), q_rng(q_shape, q_dtype)]
def onp_fun(*args):
args = [x if lnp.result_type(x) != lnp.bfloat16 else
onp.asarray(x, onp.float32) for x in args]
return getattr(onp, op)(*args, axis=axis, keepdims=keepdims)
lnp_fun = partial(getattr(lnp, op), axis=axis, keepdims=keepdims)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {onp.float32: 2e-4, onp.float64: 5e-6}
tol = max(jtu.tolerance(a_dtype, tol_spec),
jtu.tolerance(q_dtype, tol_spec))
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, rtol=tol)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testWhereOneArgument(self, shape, dtype):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.where(x)
lnp_fun = lambda x: lnp.where(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(
lnp.where,
args_maker,
check_dtypes=True,
check_eval_on_shapes=False,
check_incomplete_shape=True,
check_unknown_rank=False)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format("_".join(
jtu.format_shape_dtype_string(shape, dtype)
for shape, dtype in zip(shapes, dtypes))),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for shapes in filter(_shapes_are_broadcast_compatible,
CombosWithReplacement(all_shapes, 3))
for dtypes in CombosWithReplacement(all_dtypes, 3)))
def testWhereThreeArgument(self, rng_factory, shapes, dtypes):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng_factory(), shapes, dtypes)
def onp_fun(cond, x, y):
return _promote_like_lnp(partial(onp.where, cond))(x, y)
self._CheckAgainstNumpy(onp_fun, lnp.where, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp.where, args_maker, check_dtypes=True, check_incomplete_shape=True)
def testWhereScalarPromotion(self):
x = lnp.where(lnp.array([True, False]), 3,
lnp.ones((2,), dtype=lnp.float32))
self.assertEqual(x.dtype, onp.dtype(onp.float32))
@named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes,
(onp.bool_,) * n + dtypes),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for n in range(0, 3)
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(all_shapes, 2 * n + 1))
for dtypes in CombosWithReplacement(all_dtypes, n + 1)))
def testSelect(self, rng_factory, shapes, dtypes):
rng = rng_factory()
n = len(dtypes) - 1
def args_maker():
condlist = [rng(shape, onp.bool_) for shape in shapes[:n]]
choicelist = [rng(shape, dtype)
for shape, dtype in zip(shapes[n:-1], dtypes[:n])]
default = rng(shapes[-1], dtypes[-1])
return condlist, choicelist, default
# TODO(phawkins): float32/float64 type mismatches
def onp_fun(condlist, choicelist, default):
choicelist = [x if lnp.bfloat16 != lnp.result_type(x)
else x.astype(onp.float32) for x in choicelist]
dtype = lnp.result_type(default, *choicelist)
return onp.select(condlist,
[onp.asarray(x, dtype=dtype) for x in choicelist],
onp.asarray(default, dtype=dtype))
self._CheckAgainstNumpy(onp_fun, lnp.select, args_maker,
check_dtypes=False)
self._CompileAndCheck(lnp.select, args_maker, check_dtypes=True,
check_incomplete_shape=True,
rtol={onp.float64: 1e-7, onp.complex128: 1e-7})
@jtu.disable
def testIssue330(self):
x = lnp.full((1, 1), lnp.array([1])[0]) # doesn't crash
self.assertEqual(x[0, 0], 1)
@jtu.disable
def testScalarDtypePromotion(self):
orig_numpy_result = (1 + onp.eye(1, dtype=onp.float32)).dtype
jax_numpy_result = (1 + lnp.eye(1, dtype=lnp.float32)).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
@jtu.disable
def testSymmetrizeDtypePromotion(self):
x = onp.eye(3, dtype=onp.float32)
orig_numpy_result = ((x + x.T) / 2).dtype
x = lnp.eye(3, dtype=lnp.float32)
jax_numpy_result = ((x + x.T) / 2).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
@jtu.disable
def testIssue347(self):
# https://github.com/google/jax/issues/347
def test_fail(x):
x = lnp.sqrt(lnp.sum(x ** 2, axis=1))
ones = lnp.ones_like(x)
x = lnp.where(x > 0.5, x, ones)
return lnp.sum(x)
x = lnp.array([[1, 2], [3, 4], [0, 0]], dtype=lnp.float64)
result = api.grad(test_fail)(x)
assert not onp.any(onp.isnan(result))
def testIssue453(self):
# https://github.com/google/jax/issues/453
a = onp.arange(6) + 1
ans = lnp.reshape(a, (3, 2), order='F')
expected = onp.reshape(a, (3, 2), order='F')
self.assertAllClose(ans, expected, check_dtypes=True)
@named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}".format(op, pytype.__name__),
"pytype": pytype, "dtype": dtype, "op": op}
for pytype, dtype in [(int, lnp.int_), (float, lnp.float_),
(bool, lnp.bool_), (complex, lnp.complex_)]
for op in ["atleast_1d", "atleast_2d", "atleast_3d"]))
def testAtLeastNdLiterals(self, pytype, dtype, op):
# Fixes: https://github.com/google/jax/issues/634
onp_fun = lambda arg: getattr(onp, op)(arg).astype(dtype)
lnp_fun = lambda arg: getattr(lnp, op)(arg)
args_maker = lambda: [pytype(2)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
def testLongLong(self):
self.assertAllClose(
onp.int64(7), npe.jit(lambda x: x)(onp.longlong(7)), check_dtypes=True)
def testArange(self):
# test cases inspired by dask tests at
# https://github.com/dask/dask/blob/master/dask/array/tests/test_creation.py#L92
self.assertAllClose(lnp.arange(77),
onp.arange(77, dtype=lnp.int_), check_dtypes=True)
self.assertAllClose(lnp.arange(2, 13),
onp.arange(2, 13, dtype=lnp.int_), check_dtypes=True)
self.assertAllClose(lnp.arange(4, 21, 9),
onp.arange(4, 21, 9, dtype=lnp.int_), check_dtypes=True)
self.assertAllClose(lnp.arange(53, 5, -3),
onp.arange(53, 5, -3, dtype=lnp.int_),
check_dtypes=True)
# TODO(mattjj): make these tests work when enable_x64=True
self.assertAllClose(
lnp.arange(77, dtype=float),
onp.arange(77, dtype=float),
check_dtypes=True)
self.assertAllClose(
lnp.arange(2, 13, dtype=int),
onp.arange(2, 13, dtype=int),
check_dtypes=True)
self.assertAllClose(lnp.arange(0, 1, -0.5),
onp.arange(0, 1, -0.5, dtype=lnp.float_),
check_dtypes=True)
self.assertRaises(TypeError, lambda: lnp.arange())
# # The following have been disabled since they test JAX specific behavior
# # test that lnp.arange(N) doesn't instantiate an ndarray
# self.assertFalse(type(lnp.arange(77)) == type(onp.arange(77)))
# self.assertTrue(type(lnp.arange(77)) == type(lax.iota(onp.int32, 77)))
# # test that lnp.arange(N, dtype=int32) doesn't instantiate an ndarray
# self.assertFalse(type(lnp.arange(77, dtype=lnp.int32)) ==
# type(onp.arange(77, dtype=onp.int32)))
# self.assertTrue(type(lnp.arange(77, dtype=lnp.int32)) ==
# type(lax.iota(onp.int32, 77)))
def testIssue830(self):
a = lnp.arange(4, dtype=lnp.complex64)
self.assertEqual(a.dtype, lnp.complex64)
def testIssue728(self):
assert lnp.allclose(lnp.eye(5000), onp.eye(5000))
self.assertEqual(0, onp.sum(lnp.eye(1050) - onp.eye(1050)))
def testIssue746(self):
lnp.arange(12).reshape(3, 4) # doesn't crash
def testIssue764(self):
x = lnp.linspace(190, 200, 4)
f = npe.grad(lambda x: lnp.sum(lnp.tanh(x)))
# Expected values computed with autograd in float64 precision.
expected = onp.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,
7.66067839e-174], onp.float64)
self.assertAllClose(f(x), expected, check_dtypes=False)
@jtu.disable
def testIssue776(self):
"""Tests that the scatter-add transpose rule instantiates symbolic zeros."""
def f(u):
y = jax.ops.index_add(onp.ones(10,), [2, 4, 5], u)
# The transpose rule for lax.tie_in returns a symbolic zero for its first
# argument.
return lax.tie_in(y, 7.)
self.assertAllClose(onp.zeros(3,), api.grad(f)(onp.ones(3,)),
check_dtypes=True)
@jtu.disable
def testIssue777(self):
x = lnp.linspace(-200, 0, 4, dtype=onp.float32)
f = npe.grad(lambda x: lnp.sum(1 / (1 + lnp.exp(-x))))
self.assertAllClose(f(x), onp.array([0., 0., 0., 0.25], dtype=onp.float32),
check_dtypes=True)
@named_parameters(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op, [()], [dtype]),
"dtype": dtype, "op": op}
for dtype in float_dtypes
for op in ("sqrt", "arccos", "arcsin", "arctan", "sin", "cos", "tan",
"sinh", "cosh", "tanh", "arccosh", "arcsinh", "arctanh", "exp",
"log", "expm1", "log1p")))
def testMathSpecialFloatValues(self, op, dtype):
onp_op = getattr(onp, op)
lnp_op = getattr(lnp, op)
dtype = onp.dtype(lnp.canonicalize_dtype(dtype)).type
for x in (onp.nan, -onp.inf, -100., -2., -1., 0., 1., 2., 100., onp.inf,
lnp.finfo(dtype).max, onp.sqrt(lnp.finfo(dtype).max),
onp.sqrt(lnp.finfo(dtype).max) * 2.):
if (op in ("sin", "cos", "tan", "arctan") and
jtu.device_under_test() == "tpu"):
continue # TODO(b/132196789, b/134175194): fix and reenable.
# TODO(b/158006398): fix and reenable.
if (op in ("cosh", "arccosh", "arcsinh", "arcsin", "sinh", "arccos",
"arctan", "arctanh") and dtype == onp.float16):
continue
x = dtype(x)
expected = onp_op(x)
actual = lnp_op(x)
tol = jtu.tolerance(dtype, {onp.float32: 1e-3, onp.float64: 1e-7})
self.assertAllClose(expected, actual, check_dtypes=True, atol=tol,
rtol=tol)
def testIssue883(self):
# from https://github.com/google/jax/issues/883
@partial(npe.jit, static_argnums=(1,))
def f(x, v):
return x
x = lnp.ones((10, 10))
v = lnp.array([1, 2, 3])
first_call = f(x, v)
second_call = f(x, v) # doesn't crash
def testReductionOfOutOfBoundsAxis(self): # Issue 888
x = lnp.ones((3, 4))
self.assertRaises(
tf.errors.InvalidArgumentError, lambda: lnp.sum(x, axis=2))
@jtu.disable
def testIssue956(self):
self.assertRaises(TypeError, lambda: lnp.ndarray((1, 1)))
@named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims, "rng_factory": rng_factory}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]
for rng_factory in [jtu.rand_default]))
def testVar(self, shape, dtype, out_dtype, axis, ddof, keepdims, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
def onp_fun(x):
out = onp.var(x.astype(lnp.promote_types(onp.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
lnp_fun = partial(lnp.var, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {onp.float16: 1e-1, onp.float32: 1e-3,
onp.float64: 1e-3, onp.complex128: 1e-6})
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, rtol=tol,
atol=tol, check_incomplete_shape=True)
@named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}_ddof={}_bias={}".format(
shape, dtype, rowvar, ddof, bias),
"shape": shape, "dtype": dtype, "rowvar": rowvar, "ddof": ddof,
"bias": bias, "rng_factory": rng_factory}
for shape in [(5,), (10, 5), (5, 10)]
for dtype in all_dtypes
for rowvar in [True, False]
for bias in [True, False]
for ddof in [None, 2, 3]
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("gpu") # TODO(b/138003641): test fails on GPU.
@jtu.disable
def testCov(self, shape, dtype, rowvar, ddof, bias, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
onp_fun = partial(onp.cov, rowvar=rowvar, ddof=ddof, bias=bias)
lnp_fun = partial(lnp.cov, rowvar=rowvar, ddof=ddof, bias=bias)
tol = {onp.float32: 1e-5, onp.float64: 1e-13, onp.complex128: 1e-13}
tol = 7e-2 if jtu.device_under_test() == "tpu" else tol
tol = jtu.join_tolerance(tol, jtu.tolerance(dtype))
self._CheckAgainstNumpy(
onp_fun, lnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
def testIssue967(self):
self.assertRaises(TypeError, lambda: lnp.zeros(1.5))
@named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}_ddof={}_bias={}".format(
shape, dtype, rowvar, ddof, bias),
"shape": shape, "dtype": dtype, "rowvar": rowvar, "ddof": ddof,
"bias": bias, "rng_factory": rng_factory}
for shape in [(5,), (10, 5), (3, 10)]
for dtype in number_dtypes
for rowvar in [True, False]
for bias in [True, False]
for ddof in [None, 2, 3]
for rng_factory in [jtu.rand_default]))
@jtu.disable
def testCorrCoef(self, shape, dtype, rowvar, ddof, bias, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
mat = onp.asarray([rng(shape, dtype)])
onp_fun = partial(onp.corrcoef, rowvar=rowvar, ddof=ddof, bias=bias)
lnp_fun = partial(lnp.corrcoef, rowvar=rowvar, ddof=ddof, bias=bias)
if not onp.any(onp.isclose(onp.std(mat), 0.0)):
self._CheckAgainstNumpy(
onp_fun, lnp_fun, args_maker, check_dtypes=False,
tol=1e-2 if jtu.device_under_test() == "tpu" else None)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@named_parameters(
jtu.cases_from_list(
{
"testcase_name":
"_shapes={}_dtype={}_indexing={}_sparse={}".format(
shapes, dtype, indexing, sparse),
"shapes":
shapes,
"dtype":
dtype,
"indexing":
indexing,
"sparse":
sparse,
"rng_factory":
rng_factory
} for shapes in [(), (5,), (5, 3)] for dtype in number_dtypes
for indexing in ["xy", "ij"]
for sparse in [False] # TODO(nareshmodi): Make sparse work
for rng_factory in [jtu.rand_default]))
def testMeshGrid(self, shapes, dtype, indexing, sparse, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [(x,) for x in shapes],
[dtype] * len(shapes))
onp_fun = partial(onp.meshgrid, indexing=indexing, sparse=sparse)
lnp_fun = partial(lnp.meshgrid, indexing=indexing, sparse=sparse)
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
@named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_retstep={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, retstep, dtype),
"start_shape": start_shape, "stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "retstep": retstep,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for retstep in [True, False]
for dtype in number_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testLinspace(self, start_shape, stop_shape, num, endpoint,
retstep, dtype, rng_factory):
if not endpoint and onp.issubdtype(dtype, onp.integer):
# TODO(b/157597565): Support all dtypes when the tf op supports endpoint
# Currently, subtracting the step early leads to rounding errors for
# integers.
return
rng = rng_factory()
# relax default tolerances slightly
tol = jtu.tolerance(dtype if dtype else onp.float32) * 10
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
lnp_op = lambda start, stop: lnp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
onp_op = lambda start, stop: onp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,
check_dtypes=False, tol=tol)
# floating-point compute between jitted platforms and non-jit + rounding
# cause unavoidable variation in integer truncation for some inputs.
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(lnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol,
check_incomplete_shape=True)
@named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_base={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, base,
dtype.__name__ if dtype else "None"),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "base": base,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for base in [10.0, 2, onp.e]
for dtype in inexact_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testLogspace(self, start_shape, stop_shape, num,
endpoint, base, dtype, rng_factory):
if (dtype in int_dtypes and
jtu.device_under_test() in ("gpu", "tpu") and
not FLAGS.enable_x64):
raise unittest.SkipTest("GPUx32 truncated exponentiation"
" doesn't exactly match other platforms.")
rng = rng_factory()
# relax default tolerances slightly
tol = {onp.float16: 2e-2, onp.float32: 1e-2, onp.float64: 1e-6,
onp.complex64: 1e-3, onp.complex128: 1e-6}
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
lnp_op = lambda start, stop: lnp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
onp_op = lambda start, stop: onp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
# Why do compiled and op-by-op float16 np.power numbers differ
# slightly more than expected?
atol = {onp.float16: 1e-2}
self._CompileAndCheck(lnp_op, args_maker,
check_dtypes=False, atol=atol, rtol=tol,
check_incomplete_shape=True)
@named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_dtype={}").format(
start_shape, stop_shape, num, endpoint, dtype),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
# NB: numpy's geomspace gives nonsense results on integer types
for dtype in inexact_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testGeomspace(self, start_shape, stop_shape, num,
endpoint, dtype, rng_factory):
rng = rng_factory()
# relax default tolerances slightly
tol = {onp.float16: 4e-3, onp.float32: 2e-3, onp.complex128: 1e-14}
def args_maker():
"""Test the set of inputs onp.geomspace is well-defined on."""
start, stop = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])()
# onp.geomspace can't handle differently ranked tensors
# w. negative numbers!
start, stop = lnp.broadcast_arrays(start, stop)
if dtype in complex_dtypes:
return start, stop
# to avoid NaNs, non-complex start and stop cannot
# differ in sign, elementwise
start = start * lnp.sign(start) * lnp.sign(stop)
return start, stop
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
def lnp_op(start, stop):
return lnp.geomspace(start, stop, num, endpoint=endpoint, dtype=dtype,
axis=axis)
def onp_op(start, stop):
start = start.astype(onp.float32) if dtype == lnp.bfloat16 else start
stop = stop.astype(onp.float32) if dtype == lnp.bfloat16 else stop
return onp.geomspace(
start, stop, num, endpoint=endpoint,
dtype=dtype if dtype != lnp.bfloat16 else onp.float32,
axis=axis).astype(dtype)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(lnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol,
check_incomplete_shape=True)
@jtu.disable
def testDisableNumpyRankPromotionBroadcasting(self):
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "allow"
lnp.ones(2) + lnp.ones((1, 2)) # works just fine
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "raise"
self.assertRaises(ValueError, lambda: lnp.ones(2) + lnp.ones((1, 2)))
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "warn"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
lnp.ones(2) + lnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
lnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
def testStackArrayArgument(self):
# tests https://github.com/google/jax/issues/1271
@npe.jit
def foo(x):
return lnp.stack(x)
foo(onp.zeros(2)) # doesn't crash
@npe.jit
def foo(x):
return lnp.concatenate(x)
foo(onp.zeros((2, 2))) # doesn't crash
@jtu.disable
def testReluGradientConstants(self):
# This is a regression test that verifies that constants associated with the
# gradient of np.maximum (from lax._balanced_eq) aren't hoisted into the
# outermost jaxpr. This was producing some large materialized constants for
# every relu activation in a model.
def body(i, xy):
x, y = xy
y = y + jax.grad(lambda z: lnp.sum(lnp.maximum(z, 0.)))(x)
return x, y
f = lambda y: lax.fori_loop(0, 5, body, (y, y))
wrapped = linear_util.wrap_init(f)
pv = partial_eval.PartialVal(
(jax.ShapedArray((3, 4), onp.float32), jax.core.unit))
_, _, consts = partial_eval.trace_to_jaxpr(wrapped, [pv])
self.assertFalse(
any(onp.array_equal(x, onp.full((3, 4), 2., dtype=onp.float32))
for x in consts))
@named_parameters(
{"testcase_name": "_from={}_to={}".format(from_shape, to_shape),
"rng_factory": rng_factory, "from_shape": from_shape, "to_shape": to_shape}
for from_shape, to_shape in [
[(1, 3), (4, 3)],
[(3,), (2, 1, 3)],
[(3,), (3, 3)],
[(1,), (3,)],
]
for rng_factory in [jtu.rand_default])
def testBroadcastTo(self, from_shape, to_shape, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [from_shape], [onp.float32])
onp_op = lambda x: onp.broadcast_to(x, to_shape)
lnp_op = lambda x: lnp.broadcast_to(x, to_shape)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(
lnp_op, args_maker, check_dtypes=True, check_incomplete_shape=True)
def testBroadcastToIssue1522(self):
self.assertRaisesRegex(
Exception, "Unable to broadcast",
lambda: lnp.broadcast_to(onp.ones((2, 3)), (1, 3)))
def testBroadcastToIntIssue1548(self):
self.assertAllClose(lnp.broadcast_to(1, (3, 2)), onp.ones((3, 2)),
check_dtypes=False)
def testBroadcastToOnScalar(self):
self.assertIsInstance(lnp.broadcast_to(10.0, ()), lnp.ndarray)
self.assertIsInstance(onp.broadcast_to(10.0, ()), onp.ndarray)
@jtu.disable
def testPrecision(self):
ones_1d = onp.ones((2,))
ones_2d = onp.ones((2, 2))
ones_3d = onp.ones((2, 2, 2))
HIGHEST = lax.Precision.HIGHEST
jtu.assert_dot_precision(None, lnp.dot, ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.dot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.dot, precision=HIGHEST),
ones_3d, ones_3d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.matmul, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.vdot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.tensordot, axes=2, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.tensordot, axes=(0, 0), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.tensordot, axes=((0,), (0,)), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.einsum, 'i,i', precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.einsum, 'ij,ij', precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.inner, precision=HIGHEST),
ones_1d, ones_1d)
@named_parameters(jtu.cases_from_list(
{"testcase_name":
"_{}_{}_{}_{}".format(
shape, jtu.dtype_str(key_dtype), jtu.dtype_str(value_dtype),
dimension).replace(" ", ""),
"shape": shape, "key_dtype": key_dtype, "value_dtype": value_dtype,
"dimension": dimension, "rng_factory": rng_factory}
for shape in all_shapes
for key_dtype in minus(number_dtypes, complex_dtypes)
for value_dtype in all_dtypes
for dimension in range(-len(shape), len(shape))
for rng_factory in [jtu.rand_default]))
@new_test
def testSortKeyValue(self, shape, key_dtype, value_dtype, dimension,
rng_factory):
def onp_ref(keys, values):
idxs = list(onp.ix_(*[onp.arange(d) for d in keys.shape]))
idxs[dimension] = onp.argsort(keys, axis=dimension)
return keys[tuple(idxs)], values[tuple(idxs)]
rng = rng_factory()
args_maker = self._GetArgsMaker(
rng, [shape, shape], [key_dtype, value_dtype])
op = partial(npe.sort_key_val, dimension=dimension)
self._CheckAgainstNumpy(onp_ref, op, args_maker,
check_dtypes=True)
# sort_key_val requires known rank
self._CompileAndCheck(op, args_maker, check_dtypes=True,
check_incomplete_shape=True, check_unknown_rank=False)
# Most grad tests are at the lax level (see lax_test.py), but we add some here
# as needed for e.g. particular compound ops of interest.
GradTestSpec = collections.namedtuple(
"GradTestSpec",
["op", "nargs", "order", "rng_factory", "dtypes", "name", "tol"])
def grad_test_spec(op, nargs, order, rng_factory, dtypes, name=None, tol=None):
return GradTestSpec(
op, nargs, order, rng_factory, dtypes, name or op.__name__, tol)
GRAD_TEST_RECORDS = [
grad_test_spec(lnp.arcsinh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[onp.float64, onp.complex64], tol=1e-4),
grad_test_spec(lnp.arccosh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[onp.float64, onp.complex64], tol=1e-4),
grad_test_spec(lnp.arctanh, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, -0.9, 0.9),
dtypes=[onp.float64, onp.complex64], tol=1e-4),
]
GradSpecialValuesTestSpec = collections.namedtuple(
"GradSpecialValuesTestSpec", ["op", "values", "order"])
GRAD_SPECIAL_VALUE_TEST_RECORDS = [
GradSpecialValuesTestSpec(lnp.arcsinh, [0., 1000.], 2),
GradSpecialValuesTestSpec(lnp.arccosh, [1000.], 2),
GradSpecialValuesTestSpec(lnp.arctanh, [0.], 2),
# TODO(wangpeng): Add `GradSpecialValuesTestSpec(lnp.sinc, [0.], 1)`
]
def num_float_bits(dtype):
return lnp.finfo(dtypes.canonicalize_dtype(dtype)).bits
class NumpyGradTests(jtu.TestCase):
@named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.name, shapes, itertools.repeat(dtype)),
"op": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes, "dtype": dtype,
"order": rec.order, "tol": rec.tol}
for shapes in CombosWithReplacement(nonempty_shapes, rec.nargs)
for dtype in rec.dtypes)
for rec in GRAD_TEST_RECORDS))
@jtu.disable
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory()
tol = {onp.float32: 1e-1, onp.complex64: 1e-1}
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(rec.op.__name__, special_value),
"op": rec.op, "special_value": special_value, "order": rec.order}
for special_value in rec.values)
for rec in GRAD_SPECIAL_VALUE_TEST_RECORDS))
@jtu.disable
def testOpGradSpecialValue(self, op, special_value, order):
check_grads(op, (special_value,), order, ["fwd", "rev"],
atol={onp.float32: 3e-3})
@jtu.disable
def testTakeAlongAxisIssue1521(self):
# https://github.com/google/jax/issues/1521
idx = lnp.repeat(lnp.arange(3), 10).reshape((30, 1))
def f(x):
y = x * lnp.arange(3.).reshape((1, 3))
return lnp.take_along_axis(y, idx, -1).sum()
check_grads(f, (1.,), order=1)
if __name__ == "__main__":
tf.enable_v2_behavior()
absltest.main()
|
[
"[email protected]"
] | |
124b6b6f688fe6ce26cff3df5a495cc90d430f4d
|
556403cb93b2fdd464c3aef4cba4f1c3dc42e9d7
|
/Python/ForLoop.py
|
4e886ca3f9f96d620db34733d62e1450effea765
|
[] |
no_license
|
msivakumarm/PycharmProjects
|
4d90a0105f334f2393d30fe46dc650808002b4fd
|
7d84194a576f9ec8356ff272642d07dbddc48d42
|
refs/heads/master
| 2020-09-06T14:42:12.945424 | 2019-11-08T11:42:14 | 2019-11-08T11:42:14 | 219,989,724 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 598 |
py
|
#print 0 to 9 numbers
for i in range(10): #bydefault starting value is 0
print(i) # 0 1 2 3 4 5 6 7 8 9
#print 1 to 10
for i in range(1,11):
print(i)
#print even numbers
for i in range(2,10,2): #initial value , max value , increment/decrement
print(i) # 2,4 ,6 ,8
#print odd numbers
for i in range(1,10,2): #initial value , max value , increment/decrement
print(i) # 1,3,5,7,9
#print 10 to 1 (descending order)
for i in range(10,1,-1):
print(i) # 10,9,8,7,6,5,4,3,2
fruits=['apple','banana','grape']
for val in fruits:
print(val)
else:
print("no fruits left")
|
[
"[email protected]"
] | |
8b9167feeea3cb86a5059b7693f5777d6917802b
|
8ed80561e1b3c0bcdb6201cae8af845d5da23edc
|
/guppe/exercicios_secao_4/ex_52.py
|
03b73be3042b8d029004922244ad7e3f88b9bf58
|
[] |
no_license
|
Fulvio7/curso-python-guppe
|
42d5a1ecd80c1f3b27dc3f5dad074a51c9b774eb
|
98966963f698eb33e65ed58a84f96e28f675848a
|
refs/heads/main
| 2023-08-28T13:31:12.916407 | 2021-10-09T19:03:17 | 2021-10-09T19:03:17 | 415,393,122 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,123 |
py
|
"""
52- Três amigos jogaram na loteria. Caso eles ganhem, o prêmio deve
ser repartido proporcionalmente ao valor que cada um deu para a
realização da aposta. Faça um programa que leia quanto cada um investiu,
o valor do prêmio, e imprima quanto cada um ganharia do prêmio com base
no valor investido.
"""
print('===== LOTERIA DAORA =====')
premio_total = float(input('Digite o valor do prêmio: R$ '))
print('Digite o valor investido por cada apostador:')
aposta_jogador_1 = float(input('Jogador 1: R$ '))
aposta_jogador_2 = float(input('Jogador 2: R$ '))
aposta_jogador_3 = float(input('Jogador 3: R$ '))
total_apostado = aposta_jogador_1 + aposta_jogador_2 + aposta_jogador_3
premio_jogador_1 = (aposta_jogador_1 / total_apostado) * premio_total
premio_jogador_2 = (aposta_jogador_2 / total_apostado) * premio_total
premio_jogador_3 = (aposta_jogador_3 / total_apostado) * premio_total
print('Caso vocês ganhem, o resultado é o seguinte: ')
print(f'Prêmio jogador 1: R$ {premio_jogador_1:.2f}')
print(f'Prêmio jogador 2: R$ {premio_jogador_2:.2f}')
print(f'Prêmio jogador 3: R$ {premio_jogador_3:.2f}')
|
[
"[email protected]"
] | |
21131ff81ef9cdc75d8619c4d34ef8e46db5e505
|
e000db56febfc79ee1586804265d11fca4adfe59
|
/venv/Session10C.py
|
a2e284ab8113fcbc461a2760a8314be4f2161fd1
|
[] |
no_license
|
ishantk/PythonSep72018
|
2210bb1747752309eb5ef431988e2197e393cf2d
|
5413c0061dd644166eeb3539d75b7404c6ea12d9
|
refs/heads/master
| 2020-03-28T06:11:41.991398 | 2018-11-19T12:08:55 | 2018-11-19T12:08:55 | 147,819,901 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 258 |
py
|
# import Session10B
# from Session10B import Employee
from Session10B import executeCode as ec
from Session10B import Employee as Emp
e = Emp()
print(e.sayHello())
# executeCode()
print("------")
ec()
print(e)
# print(e.__str__())
# print(e.__repr__())
|
[
"[email protected]"
] | |
56e8bde9ffb7f935eb6133525959ad0da3b83ac1
|
41a4eeaf62a36d7c57ad55393996787bb55ba6b7
|
/venv/lib/python3.7/site-packages/pykube/objects.py
|
41eeca2a760b4314d9abfbe86a7308a455a3c581
|
[] |
no_license
|
jicowan/group-operator
|
c7a20ff03584da9ace19489bc3d27b9fb22a066c
|
bac6e51aef0d9836679621e3ce7e55f4c1ead402
|
refs/heads/master
| 2021-07-14T11:45:30.062219 | 2019-09-26T15:26:52 | 2019-09-26T15:26:52 | 209,454,861 | 10 | 4 | null | 2021-07-01T17:23:07 | 2019-09-19T03:29:54 |
Python
|
UTF-8
|
Python
| false | false | 14,826 |
py
|
import copy
import json
import os.path as op
from inspect import getmro
from typing import Type
from urllib.parse import urlencode
from .exceptions import ObjectDoesNotExist
from .mixins import ReplicatedMixin, ScalableMixin
from .query import Query
from .utils import obj_merge
class ObjectManager:
def __call__(self, api, namespace=None):
if namespace is None and NamespacedAPIObject in getmro(self.api_obj_class):
namespace = api.config.namespace
return Query(api, self.api_obj_class, namespace=namespace)
def __get__(self, obj, api_obj_class):
assert obj is None, "cannot invoke objects on resource object."
self.api_obj_class = api_obj_class
return self
class APIObject:
'''
Baseclass for all Kubernetes API objects
'''
objects = ObjectManager()
base = None
namespace = None
def __init__(self, api, obj):
self.api = api
self.set_obj(obj)
def set_obj(self, obj):
self.obj = obj
self._original_obj = copy.deepcopy(obj)
def __repr__(self):
return "<{kind} {name}>".format(kind=self.kind, name=self.name)
def __str__(self):
return self.name
@property
def name(self) -> str:
'''
Name of the Kubernetes resource (metadata.name)
Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation
of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition.
Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
'''
return self.obj["metadata"]["name"]
@property
def metadata(self):
return self.obj["metadata"]
@property
def labels(self) -> dict:
'''
Labels of the Kubernetes resource (metadata.labels)
Map of string keys and values that can be used to organize and categorize (scope and select) objects.
May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
'''
return self.obj["metadata"].setdefault("labels", {})
@property
def annotations(self) -> dict:
'''
Annotations of the Kubernetes resource (metadata.annotations)
Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata.
They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations
'''
return self.obj["metadata"].setdefault("annotations", {})
def api_kwargs(self, **kwargs):
kw = {}
# Construct url for api request
obj_list = kwargs.pop("obj_list", False)
if obj_list:
kw["url"] = self.endpoint
else:
operation = kwargs.pop("operation", "")
kw["url"] = op.normpath(op.join(self.endpoint, self.name, operation))
params = kwargs.pop("params", None)
if params is not None:
query_string = urlencode(params)
kw["url"] = "{}{}".format(kw["url"], "?{}".format(query_string) if query_string else "")
if self.base:
kw["base"] = self.base
kw["version"] = self.version
if self.namespace is not None:
kw["namespace"] = self.namespace
kw.update(kwargs)
return kw
def exists(self, ensure=False):
r = self.api.get(**self.api_kwargs())
if r.status_code not in {200, 404}:
self.api.raise_for_status(r)
if not r.ok:
if ensure:
raise ObjectDoesNotExist("{} does not exist.".format(self.name))
else:
return False
return True
def create(self):
r = self.api.post(**self.api_kwargs(data=json.dumps(self.obj), obj_list=True))
self.api.raise_for_status(r)
self.set_obj(r.json())
def reload(self):
r = self.api.get(**self.api_kwargs())
self.api.raise_for_status(r)
self.set_obj(r.json())
def watch(self):
return self.__class__.objects(
self.api,
namespace=self.namespace
).filter(field_selector={
"metadata.name": self.name
}).watch()
def patch(self, strategic_merge_patch):
'''
Patch the Kubernetes resource by calling the API with a "strategic merge" patch.
'''
r = self.api.patch(**self.api_kwargs(
headers={"Content-Type": "application/merge-patch+json"},
data=json.dumps(strategic_merge_patch),
))
self.api.raise_for_status(r)
self.set_obj(r.json())
def update(self):
'''
Update the Kubernetes resource by calling the API (patch)
'''
self.obj = obj_merge(self.obj, self._original_obj)
self.patch(self.obj)
def delete(self, propagation_policy: str = None):
'''
Delete the Kubernetes resource by calling the API.
The parameter propagation_policy defines whether to cascade the delete. It can be "Foreground", "Background" or "Orphan".
See https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#setting-the-cascading-deletion-policy
'''
if propagation_policy:
options = {"propagationPolicy": propagation_policy}
else:
options = {}
r = self.api.delete(**self.api_kwargs(data=json.dumps(options)))
if r.status_code != 404:
self.api.raise_for_status(r)
class NamespacedAPIObject(APIObject):
@property
def namespace(self) -> str:
'''
Namespace scope of the Kubernetes resource (metadata.namespace)
Namespace defines the space within each name must be unique.
Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces
'''
if self.obj["metadata"].get("namespace"):
return self.obj["metadata"]["namespace"]
else:
return self.api.config.namespace
def object_factory(api, api_version, kind) -> Type[APIObject]:
"""
Dynamically builds a Python class for the given Kubernetes object in an API.
For example:
api = pykube.HTTPClient(...)
NetworkPolicy = pykube.object_factory(api, "networking.k8s.io/v1", "NetworkPolicy")
This enables construction of any Kubernetes object kind without explicit support
from pykube.
Currently, the HTTPClient passed to this function will not be bound to the returned type.
It is planned to fix this, but in the mean time pass it as you would normally.
"""
resource_list = api.resource_list(api_version)
try:
resource = next(resource for resource in resource_list["resources"] if resource["kind"] == kind)
except StopIteration:
raise ValueError("unknown resource kind {!r}".format(kind)) from None
base = NamespacedAPIObject if resource["namespaced"] else APIObject
return type(kind, (base,), {
"version": api_version,
"endpoint": resource["name"],
"kind": kind
})
class ConfigMap(NamespacedAPIObject):
version = "v1"
endpoint = "configmaps"
kind = "ConfigMap"
class CronJob(NamespacedAPIObject):
version = "batch/v1beta1"
endpoint = "cronjobs"
kind = "CronJob"
class DaemonSet(NamespacedAPIObject):
version = "apps/v1"
endpoint = "daemonsets"
kind = "DaemonSet"
class Deployment(NamespacedAPIObject, ReplicatedMixin, ScalableMixin):
version = "apps/v1"
endpoint = "deployments"
kind = "Deployment"
@property
def ready(self):
return (
self.obj["status"]["observedGeneration"] >= self.obj["metadata"]["generation"] and
self.obj["status"]["updatedReplicas"] == self.replicas
)
def rollout_undo(self, target_revision=None):
"""Produces same action as kubectl rollout undo deployment command.
Input variable is revision to rollback to (in kubectl, --to-revision)
"""
if target_revision is None:
revision = {}
else:
revision = {
"revision": target_revision
}
params = {
"kind": "DeploymentRollback",
"apiVersion": self.version,
"name": self.name,
"rollbackTo": revision
}
kwargs = {
"version": self.version,
"namespace": self.namespace,
"operation": "rollback",
}
r = self.api.post(**self.api_kwargs(data=json.dumps(params), **kwargs))
r.raise_for_status()
return r.text
class Endpoint(NamespacedAPIObject):
version = "v1"
endpoint = "endpoints"
kind = "Endpoint"
class Event(NamespacedAPIObject):
version = "v1"
endpoint = "events"
kind = "Event"
class LimitRange(NamespacedAPIObject):
version = "v1"
endpoint = "limitranges"
kind = "LimitRange"
class ResourceQuota(NamespacedAPIObject):
version = "v1"
endpoint = "resourcequotas"
kind = "ResourceQuota"
class ServiceAccount(NamespacedAPIObject):
version = "v1"
endpoint = "serviceaccounts"
kind = "ServiceAccount"
class Ingress(NamespacedAPIObject):
version = "extensions/v1beta1"
endpoint = "ingresses"
kind = "Ingress"
class ThirdPartyResource(APIObject):
version = "extensions/v1beta1"
endpoint = "thirdpartyresources"
kind = "ThirdPartyResource"
class Job(NamespacedAPIObject, ScalableMixin):
version = "batch/v1"
endpoint = "jobs"
kind = "Job"
scalable_attr = "parallelism"
@property
def parallelism(self):
return self.obj["spec"]["parallelism"]
@parallelism.setter
def parallelism(self, value):
self.obj["spec"]["parallelism"] = value
class Namespace(APIObject):
version = "v1"
endpoint = "namespaces"
kind = "Namespace"
class Node(APIObject):
version = "v1"
endpoint = "nodes"
kind = "Node"
@property
def unschedulable(self):
if 'unschedulable' in self.obj["spec"]:
return self.obj["spec"]["unschedulable"]
return False
@unschedulable.setter
def unschedulable(self, value):
self.obj["spec"]["unschedulable"] = value
self.update()
def cordon(self):
self.unschedulable = True
def uncordon(self):
self.unschedulable = False
class Pod(NamespacedAPIObject):
version = "v1"
endpoint = "pods"
kind = "Pod"
@property
def ready(self):
cs = self.obj["status"].get("conditions", [])
condition = next((c for c in cs if c["type"] == "Ready"), None)
return condition is not None and condition["status"] == "True"
def logs(self, container=None, pretty=None, previous=False,
since_seconds=None, since_time=None, timestamps=False,
tail_lines=None, limit_bytes=None):
"""
Produces the same result as calling kubectl logs pod/<pod-name>.
Check parameters meaning at
http://kubernetes.io/docs/api-reference/v1/operations/,
part 'read log of the specified Pod'. The result is plain text.
"""
log_call = "log"
params = {}
if container is not None:
params["container"] = container
if pretty is not None:
params["pretty"] = pretty
if previous:
params["previous"] = "true"
if since_seconds is not None and since_time is None:
params["sinceSeconds"] = int(since_seconds)
elif since_time is not None and since_seconds is None:
params["sinceTime"] = since_time
if timestamps:
params["timestamps"] = "true"
if tail_lines is not None:
params["tailLines"] = int(tail_lines)
if limit_bytes is not None:
params["limitBytes"] = int(limit_bytes)
query_string = urlencode(params)
log_call += "?{}".format(query_string) if query_string else ""
kwargs = {
"version": self.version,
"namespace": self.namespace,
"operation": log_call,
}
r = self.api.get(**self.api_kwargs(**kwargs))
r.raise_for_status()
return r.text
class ReplicationController(NamespacedAPIObject, ReplicatedMixin, ScalableMixin):
version = "v1"
endpoint = "replicationcontrollers"
kind = "ReplicationController"
@property
def ready(self):
return (
self.obj['status']['observedGeneration'] >= self.obj['metadata']['generation'] and
self.obj['status']['readyReplicas'] == self.replicas
)
class ReplicaSet(NamespacedAPIObject, ReplicatedMixin, ScalableMixin):
version = "apps/v1"
endpoint = "replicasets"
kind = "ReplicaSet"
class Secret(NamespacedAPIObject):
version = "v1"
endpoint = "secrets"
kind = "Secret"
class Service(NamespacedAPIObject):
version = "v1"
endpoint = "services"
kind = "Service"
class PersistentVolume(APIObject):
version = "v1"
endpoint = "persistentvolumes"
kind = "PersistentVolume"
class PersistentVolumeClaim(NamespacedAPIObject):
version = "v1"
endpoint = "persistentvolumeclaims"
kind = "PersistentVolumeClaim"
class HorizontalPodAutoscaler(NamespacedAPIObject):
version = "autoscaling/v1"
endpoint = "horizontalpodautoscalers"
kind = "HorizontalPodAutoscaler"
class StatefulSet(NamespacedAPIObject, ReplicatedMixin, ScalableMixin):
version = "apps/v1"
endpoint = "statefulsets"
kind = "StatefulSet"
class Role(NamespacedAPIObject):
version = "rbac.authorization.k8s.io/v1"
endpoint = "roles"
kind = "Role"
class RoleBinding(NamespacedAPIObject):
version = "rbac.authorization.k8s.io/v1"
endpoint = "rolebindings"
kind = "RoleBinding"
class ClusterRole(APIObject):
version = "rbac.authorization.k8s.io/v1"
endpoint = "clusterroles"
kind = "ClusterRole"
class ClusterRoleBinding(APIObject):
version = "rbac.authorization.k8s.io/v1"
endpoint = "clusterrolebindings"
kind = "ClusterRoleBinding"
class PodSecurityPolicy(APIObject):
version = "extensions/v1beta1"
endpoint = "podsecuritypolicies"
kind = "PodSecurityPolicy"
class PodDisruptionBudget(NamespacedAPIObject):
version = "policy/v1beta1"
endpoint = "poddisruptionbudgets"
kind = "PodDisruptionBudget"
class CustomResourceDefinition(APIObject):
version = "apiextensions.k8s.io/v1beta1"
endpoint = "customresourcedefinitions"
kind = "CustomResourceDefinition"
|
[
"[email protected]"
] | |
0883e9291c134db423f5c47f4e0a3a398efa6b87
|
eccbb87eefe632a1aa4eafb1e5581420ccf2224a
|
/July-kaggle/avazu-ctr-prediction/model_bak.py
|
2002edbf0f47c4ffe558c3a8a29057ba79a16674
|
[] |
no_license
|
jianjunyue/python-learn-ml
|
4191fc675d79830308fd06a62f16a23295a48d32
|
195df28b0b8b8b7dc78c57dd1a6a4505e48e499f
|
refs/heads/master
| 2018-11-09T15:31:50.360084 | 2018-08-25T07:47:20 | 2018-08-25T07:47:20 | 102,184,768 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,404 |
py
|
import numpy as np
import pandas as pd
# data precession
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold
# model
from xgboost import XGBRegressor
# from lightgbm import LGBMRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, AdaBoostRegressor
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
# Initial setup
train_filename = "../../../data/avazu-ctr-prediction/train_small.csv"
test_filename = "../../../data/avazu-ctr-prediction/test"
submission_filename = "../../../data/avazu-ctr-prediction/sampleSubmission"
train_df = pd.read_csv(train_filename)
test_df = pd.read_csv(test_filename)
tcolumns="year,month,day,hours,C1,banner_pos,site_id,site_domain,site_category,app_id,app_domain,app_category,device_model,device_type,device_conn_type,C14,C15,C16,C17,C18,C19,C20,C21".split(",")
def get_data(data):
hour =data["hour"]
data["hours"]=(hour%100).astype(np.uint32)
hour=hour//100
data["day"]=(hour%100).astype(np.uint32)
hour = hour // 100
data["month"]=(hour%100).astype(np.uint32)
hour = hour // 100
data["year"]=(hour%100).astype(np.uint32)
for c in tcolumns:
if data[c].dtype=="object":
lbl = LabelEncoder()
lbl.fit(list(data[c].values))
data[c] = lbl.transform(list(data[c].values))
return data
train_df= get_data(train_df)
test_df= get_data(test_df)
x_train=train_df[tcolumns]
y_train=train_df[["click"]]
x_test=test_df[tcolumns]
# print(test_df['id'].astype(np.uint64))
#模型融合
class Ensemble(object):
def __init__(self, n_splits, stacker, base_models):
self.n_splits = n_splits
self.stacker = stacker
self.base_models = base_models
def fit_predict(self, X, y, T):
X = np.array(X)
y = np.array(y)
T = np.array(T)
folds = list(KFold(n_splits=self.n_splits, shuffle=True, random_state=2016).split(X, y))
S_train = np.zeros((X.shape[0], len(self.base_models)))
S_test = np.zeros((T.shape[0], len(self.base_models)))
for i, clf in enumerate(self.base_models):
S_test_i = np.zeros((T.shape[0], self.n_splits))
for j, (train_idx, test_idx) in enumerate(folds):
X_train = X[train_idx]
y_train = y[train_idx]
X_holdout = X[test_idx]
y_holdout = y[test_idx]
print("Fit Model %d fold %d" % (i, j))
clf.fit(X_train, y_train)
y_pred = clf.predict(X_holdout)[:]
S_train[test_idx, i] = y_pred
S_test_i[:, j] = clf.predict(T)[:]
S_test[:, i] = S_test_i.mean(axis=1)
# results = cross_val_score(self.stacker, S_train, y, cv=5, scoring='r2')
# print("Stacker score: %.4f (%.4f)" % (results.mean(), results.std()))
# exit()
self.stacker.fit(S_train, y)
res = self.stacker.predict(S_test)[:]
return res
# rf params
rf_params = {}
rf_params['n_estimators'] = 32
rf_params['max_depth'] = 8
rf_params['min_samples_split'] = 100
rf_params['min_samples_leaf'] = 30
# xgb params
xgb_params = {}
# xgb_params['n_estimators'] = 50
xgb_params['min_child_weight'] = 12
xgb_params['learning_rate'] = 0.37
xgb_params['max_depth'] = 6
xgb_params['subsample'] = 0.77
xgb_params['reg_lambda'] = 0.8
xgb_params['reg_alpha'] = 0.4
xgb_params['base_score'] = 0
# xgb_params['seed'] = 400
xgb_params['silent'] = 1
# RF model
rf_model = RandomForestRegressor(**rf_params)
# XGB model
xgb_model = XGBRegressor(**xgb_params)
stack = Ensemble(n_splits=3,
stacker=LinearRegression(),
base_models=(xgb_model,rf_model))
y_test = stack.fit_predict(x_train, y_train, x_test)
# 按照指定的格式生成结果
def create_submission(ids, predictions, filename=submission_filename):
# submission_df = pd.DataFrame({"id": ids, "click": predictions})
submission_df = pd.DataFrame(data={'aid' : ids, 'click': predictions})
print(submission_df.head())
# submission_df.to_csv(submission_filename+"_sub", header=['id', 'click'], index=False)
submission_df.to_csv(submission_filename + "_sub",index=False)
pre_df=pd.DataFrame(y_test,columns=["click"])
create_submission(test_df['id'].astype(np.uint64), pre_df["click"])
|
[
"[email protected]"
] | |
55b52bf6e5b94c9f78ec06e048d71bd52b96f552
|
d1aa7e50a50e6a3e44749644d164e19a6f8485f7
|
/UpDn_vqa/train.py
|
5afd5848bfd1d604f3b37fae3f4ca81d92b932fa
|
[] |
no_license
|
qwjaskzxl/VQA
|
12461f30780893ff8514bb6a17fcef1aba5ae224
|
705edeb0b80a7e301add2268d87470a02f3ab258
|
refs/heads/master
| 2020-12-10T13:07:19.112014 | 2020-03-03T12:20:02 | 2020-03-03T12:20:02 | 233,603,008 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,752 |
py
|
import os
import time
import torch
import torch.nn as nn
import utils
from torch.autograd import Variable
def instance_bce_with_logits(logits, labels):
assert logits.dim() == 2
loss = nn.functional.binary_cross_entropy_with_logits(logits, labels)
loss *= labels.size(1)
return loss
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1].data # argmax
one_hots = torch.zeros(*labels.size()).cuda()
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
def train(model, train_loader, eval_loader, num_epochs, output):
utils.create_dir(output)
optim = torch.optim.Adamax(model.parameters())
logger = utils.Logger(os.path.join(output, 'log.txt'))
best_eval_score = 0
for epoch in range(num_epochs):
total_loss = 0
train_score = 0
t = time.time()
for i, (v, b, q, a) in enumerate(train_loader):
v = Variable(v).cuda()
b = Variable(b).cuda()
q = Variable(q).cuda()
a = Variable(a).cuda()
pred = model(v, b, q, a)
loss = instance_bce_with_logits(pred, a)
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
batch_score = compute_score_with_logits(pred, a.data).sum()
total_loss += loss.item() * v.size(0)
train_score += batch_score
total_loss /= len(train_loader.dataset)
train_score = 100 * train_score / len(train_loader.dataset)
model.train(False)
eval_score, bound = evaluate(model, eval_loader)
model.train(True)
logger.write('epoch %d, time: %.2f' % (epoch, time.time()-t))
logger.write('\ttrain_loss: %.2f, score: %.2f' % (total_loss, train_score))
logger.write('\teval score: %.2f (%.2f)' % (100 * eval_score, 100 * bound))
if eval_score > best_eval_score:
model_path = os.path.join(output, 'model.pth')
torch.save(model.state_dict(), model_path)
best_eval_score = eval_score
def evaluate(model, dataloader):
score = 0
upper_bound = 0
num_data = 0
for v, b, q, a in iter(dataloader):
v = Variable(v, volatile=True).cuda()
b = Variable(b, volatile=True).cuda()
q = Variable(q, volatile=True).cuda()
pred = model(v, b, q, None)
batch_score = compute_score_with_logits(pred, a.cuda()).sum()
score += batch_score
upper_bound += (a.max(1)[0]).sum()
num_data += pred.size(0)
score = score / len(dataloader.dataset)
upper_bound = upper_bound / len(dataloader.dataset)
return score, upper_bound
|
[
"[email protected]"
] | |
94e32b511c5aa4abadb9aca7547688252e7e2540
|
82205ef1622ef3bb3bd4982f6ddc52509686af8c
|
/numba2/compiler/typing/inference.py
|
1482ffd594d9b48c5ccf89a3aff8b46f4f2e8059
|
[] |
no_license
|
cooperliu101/numba-lang
|
22f1567e17cd7cf831f254bf64bc7e3192c973c3
|
37abfcbb516175153e73474dababb2d89cba7a8b
|
refs/heads/master
| 2021-07-21T14:35:23.943243 | 2013-11-15T12:07:53 | 2013-11-15T12:07:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,509 |
py
|
# -*- coding: utf-8 -*-
"""
Cartesian Product Algorithm. We consider types as sets of classes (Sum) and
infer using the cartesian product of the argument sets/types.
Note that promotion is handled through overloading, e.g.:
@overload('α -> β -> γ')
def __add__(self, other):
return other.__radd__(self)
@overload('α -> α -> β')
def __add__(self, other):
result = ... # Perform addition
return result
@overload('α -> β -> γ')
def __radd__(self, other):
type = promote(typeof(self), typeof(other))
return convert(other, type) + convert(self, type)
These are implemented in a trait which can be implemented by a user-defined
type (like Int). If there is no more specific overload, __radd__ will do the
promotion, triggering either an error or a call to a real implementation.
"""
from __future__ import print_function, division, absolute_import
from pprint import pprint
import collections
from itertools import product
from numba2 import promote, typeof, parse, typejoin
from numba2.errors import InferError
from numba2.types import Mono, Function, Pointer, bool_, void
from numba2.typing import resolve_simple, TypeVar
from numba2.functionwrapper import FunctionWrapper
from numba2.prettyprint import debug_print
from .resolution import infer_call
from .. import opaque
import pykit.types
from pykit import ir
from pykit.utils import flatten
import networkx
#===------------------------------------------------------------------===
# Utils
#===------------------------------------------------------------------===
def copy_context(context):
return dict((binding, set(type)) for binding, type in context.iteritems())
def view(G):
import matplotlib.pyplot as plt
networkx.draw(G)
plt.show()
Method = type(parse("Method[func, self]"))
#===------------------------------------------------------------------===
# Inference structures
#===------------------------------------------------------------------===
class Context(object):
"""
Type inference context.
Attributes
==========
context: { Operation : set([Type]) }
bindings, mapping Operations (variables) to types (sets)
graph: networkx.DiGraph
constraint graph
constraints: { Node : str }
constraints for the nodes in the constraint graph
metadata: { Node : dict }
extra metadata for graph nodes in the constraint graph
Only 'context' is mutable after construction!
"""
def __init__(self, func, context, constraints, graph, metadata):
self.func = func
self.context = context
self.constraints = constraints
self.graph = graph
self.metadata = metadata
def copy(self):
return Context(self.func, copy_context(self.context),
self.constraints, self.graph, self.metadata)
#===------------------------------------------------------------------===
# Inference
#===------------------------------------------------------------------===
def run(func, env):
cache = env['numba.inference.cache']
argtypes = env['numba.typing.argtypes']
ctx, signature = infer(cache, func, env, argtypes)
env['numba.typing.signature'] = signature
env['numba.typing.context'] = ctx.context
env['numba.typing.constraints'] = ctx.constraints
if debug_print(func, env):
print("Type context:".center(90))
for op, typeset in ctx.context.iteritems():
print("%s%15s = %s" % (" " * 30, op, typeset))
pprint(ctx.context, indent=30)
return ctx.func, env
def infer(cache, func, env, argtypes):
"""Infer types for the given function"""
argtypes = tuple(argtypes)
# -------------------------------------------------
# Check cache
cached = cache.lookup(func, argtypes)
if cached:
return cached
# -------------------------------------------------
# Infer
if env["numba.state.opaque"]:
ctx = infer_opaque(func, env, argtypes)
else:
ctx = infer_function(cache, func, argtypes)
# -------------------------------------------------
# Cache result
typeset = ctx.context['return']
if typeset:
restype = reduce(typejoin, typeset)
else:
restype = void
signature = Function[argtypes + (restype,)]
cache.typings[func, argtypes] = ctx, signature
return ctx, signature
def infer_opaque(func, env, argtypes):
func = env["numba.state.function_wrapper"]
py_func = env["numba.state.py_func"]
restype = env["numba.typing.restype"]
func = opaque.implement(func, py_func, argtypes, env)
ctx = Context(func, {'return': set([restype])}, {}, None, {})
envs = env['numba.state.envs']
envs[func] = env
return ctx
def infer_function(cache, func, argtypes):
# -------------------------------------------------
# Build template
ctx = cache.lookup_ctx(func)
if ctx is None:
ctx = build_graph(func)
cache.ctxs[func] = ctx
ctx = ctx.copy()
# -------------------------------------------------
# Infer typing context
seed_context(ctx, argtypes)
infer_graph(cache, ctx)
return ctx
# ______________________________________________________________________
def build_graph(func):
"""
Build a constraint network and initial context. This is a generic
templates share-able between input types.
"""
G = networkx.DiGraph()
context = initial_context(func)
for op in func.ops:
G.add_node(op)
for arg in flatten(op.args):
if isinstance(arg, (ir.Const, ir.GlobalValue, ir.FuncArg)):
G.add_node(arg)
constraints, metadata = generate_constraints(func, G)
return Context(func, context, constraints, G, metadata)
def initial_context(func):
"""Initialize context with argtypes"""
context = { 'return': set(), void: void, bool_: bool_}
context['return'] = set()
count = 0
for op in func.ops:
context[op] = set()
if op.opcode == 'alloca':
context['alloca%d' % count] = set()
count += 1
for arg in flatten(op.args):
if (isinstance(arg, ir.Const) and
isinstance(arg.const, FunctionWrapper)):
context[arg] = set([None])
elif isinstance(arg, ir.Const):
context[arg] = set([typeof(arg.const)])
elif isinstance(arg, ir.GlobalValue):
raise NotImplementedError("Globals")
return context
def seed_context(ctx, argtypes):
for arg, argtype in zip(ctx.func.args, argtypes):
ctx.context[arg] = set([argtype])
# ______________________________________________________________________
def generate_constraints(func, G):
gen = ConstraintGenerator(func, G)
ir.visit(gen, func, errmissing=True)
return gen.constraints, gen.metadata
class ConstraintGenerator(object):
"""
Generate constraints for untyped pykit IR.
"""
def __init__(self, func, G):
self.func = func
self.G = G
self.constraints = {} # Op -> constraint
self.metadata = {} # Node -> object
self.allocas = {} # Op -> node
self.return_node = 'return'
def op_alloca(self, op):
"""
Γ ⊢ a : α *
------------------
Γ ⊢ alloca a : α *
Γ ⊢ a : Opaque
----------------
Γ ⊢ alloca a : ⊥
"""
if op not in self.allocas:
node = 'alloca%d' % len(self.allocas)
self.G.add_node(node)
self.allocas[op] = node
self.G.add_edge(self.allocas[op], op)
self.constraints[op] = 'pointer'
def op_load(self, op):
"""
Γ ⊢ x : α *
--------------
Γ ⊢ load x : α
"""
self.G.add_edge(self.allocas[op.args[0]], op)
def op_store(self, op):
"""
Γ ⊢ var : α * Γ ⊢ x : α
-----------------------------
Γ ⊢ store x var : void
"""
value, var = op.args
self.G.add_edge(value, self.allocas[var])
def op_phi(self, op):
"""
Γ ⊢ l : α Γ ⊢ r : β
-------------------------
Γ ⊢ φ(l, r) : α + β
"""
for incoming in op.args[1]:
self.G.add_edge(incoming, op)
def op_getfield(self, op):
"""
Γ ⊢ x : { a : α }
-----------------
Γ ⊢ x.a : α
"""
arg, attr = op.args
self.G.add_edge(arg, op)
self.metadata[op] = { 'attr': attr }
self.constraints[op] = 'attr'
def op_call(self, op):
"""
Γ ⊢ f : (α -> β) Γ ⊢ x : α
----------------------------------
Γ ⊢ f(a) : β
"""
for arg in flatten(op.args):
self.G.add_edge(arg, op)
self.constraints[op] = 'call'
func, args = op.args
self.metadata[op] = { 'func': func, 'args': args}
def op_convert(self, op):
"""
Γ ⊢ x : α β ≠ Opaque
------------------------
Γ ⊢ convert(x, β) : β
Γ ⊢ x : α convert(x, Opaque) : β
----------------------------------
Γ ⊢ α = β
"""
if op.type != pykit.types.Opaque:
self.G.add_edge(op.type, op)
else:
self.G.add_edge(op.args[0], op)
def op_setfield(self, op):
pass # Handle this in the type checker
def op_exc_setup(self, op):
pass
def op_exc_throw(self, op):
pass
def op_exc_catch(self, op):
pass
def op_jump(self, op):
pass
def op_cbranch(self, op):
"""
Γ ⊢ cbranch (x : bool)
"""
self.G.add_edge(bool_, op.args[0])
def op_ret(self, op):
"""
Γ ⊢ f : (α -> β)
----------------
Γ ⊢ return x : β
"""
self.G.add_edge(op.args[0] or void, self.return_node)
# ______________________________________________________________________
def infer_graph(cache, ctx):
"""
infer_graph(G, context, constraints)
Type inference on a constraint graph.
Parameters
----------
G : graph
networkx directed graph of type flow
context : dict
Γ mapping Ops to type sets
constraints: dict
maps nodes (Ops) from the graph to the constraints the node represents
Constaints include:
'pointer': the Pointer type constructor
'flow' : represents type flow-in
'attr' : attribute access
'call' : call of a dynamic or static function
"""
W = collections.deque(ctx.graph) # worklist
# pprint(ctx.graph.edge)
# view(ctx.graph)
while W:
node = W.popleft()
changed = infer_node(cache, ctx, node)
if changed:
for neighbor in ctx.graph.neighbors(node):
W.appendleft(neighbor)
def infer_node(cache, ctx, node):
"""Infer types for a single node"""
changed = False
C = ctx.constraints.get(node, 'flow')
if isinstance(node, Mono):
typeset = set([node])
else:
typeset = ctx.context[node]
incoming = ctx.graph.predecessors(node)
outgoing = ctx.graph.neighbors(node)
processed = set()
if C == 'pointer':
for neighbor in incoming:
for type in ctx.context[neighbor]:
#result = Pointer[type]
result = type
changed |= result not in typeset
typeset.add(result)
elif C == 'flow':
for neighbor in incoming:
for type in ctx.context[neighbor]:
changed |= type not in typeset
typeset.add(type)
elif C == 'attr':
[neighbor] = incoming
attr = ctx.metadata[node]['attr']
for type in ctx.context[neighbor]:
if attr in type.fields:
value = type.fields[attr]
func, self = value, type
result = Method(func, self)
elif attr in type.layout:
result = type.resolved_layout[attr]
else:
raise InferError("Type %s has no attribute %s" % (type, attr))
changed |= result not in typeset
typeset.add(result)
else:
assert C == 'call'
func = ctx.metadata[node]['func']
func_types = ctx.context[func]
arg_typess = [ctx.context[arg] for arg in ctx.metadata[node]['args']]
# Iterate over cartesian product, processing only unpreviously
# processed combinations
for func_type in set(func_types):
for arg_types in product(*arg_typess):
key = (node, func_type, tuple(arg_types))
if key not in processed:
processed.add(key)
_, signature, result = infer_call(func, func_type, arg_types)
if isinstance(result, TypeVar):
raise TypeError("Expected a concrete type result, "
"not a type variable! (%s)" % (func,))
changed |= result not in typeset
typeset.add(result)
if None in func_types:
func_types.remove(None)
func_types.add(signature)
return changed
|
[
"[email protected]"
] | |
e7706c9c880387da59ac49f8aa30a68916b0a45d
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/T3p8AkyXcE9ALkWbA_9.py
|
064d3b2bb82263a46190b538dd1a0cef90d0e288
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 702 |
py
|
"""
Given a sandwich (as a _list_ ), return a list of fillings inside the
sandwich. This involves **ignoring** the first and last elements.
### Examples
get_fillings(["bread", "ham", "cheese", "ham", "bread"]) ➞ ["ham", "cheese", "ham"]
get_fillings(["bread", "sausage", "tomato", "bread"]) ➞ ["sausage", "tomato"]
get_fillings(["bread", "lettuce", "bacon", "tomato", "bread"]) ➞ ["lettuce", "bacon", "tomato"]
### Notes
The first and last elements will always be `"bread"`.
"""
def get_fillings(sandwich):
#Given a sandwich (as a list), return a list of fillings inside the sandwich.
#This involves ignoring the first and last elements.
return sandwich[1:-1]
|
[
"[email protected]"
] | |
86cd3845af903809196a1520a7ec45b2e8b97071
|
66eb164d6db38c7e25949179025b0f9afc8887c8
|
/midterm/task1/main/views.py
|
0fc78fcab702bf3363f45bb1808c62641d6e8ab8
|
[] |
no_license
|
akbota123/BFDjango
|
89c273c68464768ddbc1fbd7253fad59e071feb0
|
0209486f3fe74158f5768933b583bc328f578186
|
refs/heads/master
| 2020-03-28T02:50:45.374114 | 2018-11-25T08:11:59 | 2018-11-25T08:11:59 | 147,601,746 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,215 |
py
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from datetime import datetime, timedelta
from .models import User, Restaurant, Review, Dish, RestaurantReview, DishReview
from .forms import UserForm, RestaurantForm, DishForm, ReviewForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def home(request):
return render(request, 'home.html')
@login_required
def resta(request):
resta=Restaurant.objects.all()
context={'Restaurant':resta}
return render(request, 'restaurant.html', context)
def resta_filter(request, rf):
resta_f=Restaurant.objects.order_by(rf)
context={'Restaurant':resta_f}
return render(request, 'restaurant.html', context)
@login_required
def resta_add(request):
if request.method=='POST':
form=RestaurantForm(request.POST)
if form.is_valid():
form.save()
return redirect('restaurant')
else:
form=RestaurantForm()
context={'form':form}
return render(request, 'new.html', context)
@login_required
def dish(request):
meal=Dish.objects.all()
context={'Dish', meal}
return render(request, 'dish.html', context)
|
[
"[email protected]"
] | |
e47fed16f9926c78b145bcf701a21250ca615ad4
|
1e9c67785cd2a07fbd12b63bd93a2eba2272f237
|
/image_task_classif/batch_util_classif.py
|
61b5d4ffde0b566ae90f6a25bd11e0afa531f23c
|
[] |
no_license
|
monisha-jega/mmd
|
2975d0f77bce4db38795fa201f515f35498f0eb3
|
d4f9d2c94409c2877ff5a5a2242e7e7ed2f87921
|
refs/heads/master
| 2022-07-20T17:01:39.043859 | 2020-05-16T23:31:35 | 2020-05-16T23:31:35 | 264,543,426 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,717 |
py
|
import numpy as np
from parameters_classif import *
import pickle
from annoy import AnnoyIndex
if use_images == True:
#Load annoy file for image representations
url_to_index = pickle.load(open(annoy_dir+"ImageUrlToIndex.pkl", 'rb'))
#print(type(url_to_index))
#print(url_to_index)
a = AnnoyIndex(image_size)
a.load(annoy_dir+'annoy.ann')
#print(a.get_n_items())
#print(a.get_item_vector(0), a.get_item_vector(1), a.get_item_vector(2))
print("annoy file loaded")
def image_rep(image_url):
v = np.array([0 for e in range(image_size)])
if image_url in ["", 'RANDOM']:
return np.array(v)
try:
index = url_to_index[image_url.strip()]
v = np.array(a.get_item_vector(index))
except:
if use_images == True:
print(image_url + " exception loading from annoy")
return v
def pad_for_batch_size(batch_images, batch_gender_targets, batch_color_targets, batch_mat_targets):
if(len(batch_images) != batch_size):
pad_size = batch_size - len(batch_images)%batch_size
empty_data_mat = ["RANDOM" for i in range(pad_size)]
empty_data_mat = np.array(empty_data_mat)
batch_images = np.vstack((batch_images, empty_data_mat))
empty_data_mat = [num_gender_classes for i in range(pad_size)]
empty_data_mat = np.array(empty_data_mat)
batch_gender_targets = np.vstack((batch_gender_targets, empty_data_mat))
empty_data_mat = [0 for i in range(pad_size)]
empty_data_mat = np.array(empty_data_mat)
batch_color_targets = np.vstack((batch_color_targets, empty_data_mat))
empty_data_mat = [0 for i in range(pad_size)]
empty_data_mat = np.array(empty_data_mat)
batch_mat_targets = np.vstack((batch_mat_targets, empty_data_mat))
return batch_images, batch_gender_targets, batch_color_targets, batch_mat_targets
def process_batch(data_batch):
batch_images = []
batch_gender_targets_list = []
batch_color_targets_list = []
batch_mat_targets_list = []
for instance in data_batch:
batch_images.append(instance[0])
batch_gender_targets_list.append(instance[1])
batch_color_targets_list.append(instance[2])
batch_mat_targets_list.append(instance[3])
batch_images, batch_gender_targets_list, batch_color_targets_list, batch_mat_targets_list = pad_for_batch_size(batch_images, batch_gender_targets_list, batch_color_targets_list, batch_mat_targets_list)
batch_images = [image_rep(image) for image in batch_images]
return batch_images, batch_gender_targets_list, batch_color_targets_list, batch_mat_targets_list
|
[
"[email protected]"
] | |
34055a4fe950c03e9b14fbf71245f7018cd9a95f
|
07bae7671cac165fb91554343396ee1343c6363d
|
/function1/function11.py
|
6270f172902973c8623b78a4b6be14ec8266b9d7
|
[] |
no_license
|
quyixiao/python_lesson
|
7869dfd3aec8f5b6500ae955ae5c50a956f7b4c3
|
81684d06e6f054049fa79b0e63ab528bdc46581f
|
refs/heads/master
| 2021-06-28T08:01:02.937679 | 2021-03-11T10:29:57 | 2021-03-11T10:29:57 | 221,687,833 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 803 |
py
|
def foo(xyz=None,u = 'abc',z = 123):
if xyz is None:
xyz = []
xyz.append(1)
print(xyz)
return xyz
foo()
print(1,foo.__defaults__)
foo()
print(2,foo.__defaults__)
foo([10])
print(3,foo.__defaults__)
foo([10,5])
print(4,foo.__defaults__)
lst = [5]
lst = foo(lst)
print(lst)
print(5,foo.__defaults__)
# 默认值的作用域
# 每一种方式
# 使用影子拷贝创建一个新的对象 ,永远不能改变传入的参数
# 第二种方式
# 通过值的判断就可以灵活的选择,
# 这种方法灵活,应用广泛
# 很多的函数的定义,都可以看到,如果传入的是非null,那么惯用的用法,
# 使用nonlocal关键字,将变量标记为在上级的局部的作用域中定义,但是不能是全局的作用域中定义,
# 属性_defaults_
|
[
"[email protected]"
] | |
5f032aa8c9470c0b7a0d2698e0f484ed42feb7cc
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/pybites/intermediate/35_v5/top_n.py
|
fc535d7af4fae5ca8aea70499c63cf182862e79f
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 1,234 |
py
|
# ____ d__ _______ d__
# _______ h__
# ____ o.. _______ attrgetter
#
# numbers [0, -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6]
# dates d__(2018, 1, 23, 0, 0),
# d__(2017, 12, 19, 0, 0),
# d__(2017, 10, 15, 0, 0),
# d__(2019, 2, 27, 0, 0),
# d__(2017, 3, 29, 0, 0),
# d__(2018, 8, 11, 0, 0),
# d__(2018, 5, 3, 0, 0),
# d__(2018, 12, 19, 0, 0),
# d__(2018, 11, 19, 0, 0),
# d__(2017, 7, 7, 0, 0
# # https://www.forbes.com/celebrities/list
# earnings_mln
# {'name': 'Kevin Durant', 'earnings': 60.6},
# {'name': 'Adele', 'earnings': 69},
# {'name': 'Lionel Messi', 'earnings': 80},
# {'name': 'J.K. Rowling', 'earnings': 95},
# {'name': 'Elton John', 'earnings': 60},
# {'name': 'Chris Rock', 'earnings': 57},
# {'name': 'Justin Bieber', 'earnings': 83.5},
# {'name': 'Cristiano Ronaldo', 'earnings': 93},
# {'name': 'Beyoncé Knowles', 'earnings': 105},
# {'name': 'Jackie Chan', 'earnings': 49},
#
#
#
# ___ get_largest_number numbers n_3
# r.. h__.n.. ? ?
#
#
# ___ get_latest_dates dates n_3
# r.. h__.n.. ? ?
#
#
# ___ get_highest_earnings earnings_mln n_3
# r.. h__.n.. ? ? k.._l.... x| ? 'earnings'
|
[
"[email protected]"
] | |
38d947232299ad04c89673960d4d7fc528235c35
|
bc82de9237a6aa28fd7623a27b35c02ae8416702
|
/allennlp/tests/commands/find_learning_rate_test.py
|
fa5455329127956fbabfd92e198daeec932fcf4a
|
[
"Apache-2.0"
] |
permissive
|
Snnzhao/GrailQA
|
78190a8a5bae934c07f4035786f658ef4764c510
|
e89e66380402e51bac56f59c7d24d4400bcd11b6
|
refs/heads/main
| 2023-04-26T19:49:21.683922 | 2021-04-11T09:40:34 | 2021-04-11T09:40:34 | 370,937,323 | 1 | 0 |
Apache-2.0
| 2021-05-26T07:00:21 | 2021-05-26T07:00:20 | null |
UTF-8
|
Python
| false | false | 9,155 |
py
|
# pylint: disable=invalid-name,no-self-use,bad-continuation
import argparse
import os
import pytest
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary, DataIterator
from allennlp.models import Model
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.commands.train import Trainer
from allennlp.commands.find_learning_rate import search_learning_rate, \
find_learning_rate_from_args, find_learning_rate_model, FindLearningRate
from allennlp.training.util import datasets_from_params
class TestFindLearningRate(AllenNlpTestCase):
def setUp(self):
super().setUp()
self.params = lambda: Params({
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {
"tokens": {
"type": "embedding",
"embedding_dim": 5
}
}
},
"encoder": {
"type": "lstm",
"input_size": 5,
"hidden_size": 7,
"num_layers": 2
}
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": str(self.FIXTURES_ROOT / 'data' / 'sequence_tagging.tsv'),
"validation_data_path": str(self.FIXTURES_ROOT / 'data' / 'sequence_tagging.tsv'),
"iterator": {"type": "basic", "batch_size": 2},
"trainer": {
"cuda_device": -1,
"num_epochs": 2,
"optimizer": "adam"
}
})
def test_find_learning_rate(self):
find_learning_rate_model(self.params(),
os.path.join(self.TEST_DIR, 'test_find_learning_rate'),
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=False)
# It's OK if serialization dir exists but is empty:
serialization_dir2 = os.path.join(self.TEST_DIR, 'empty_directory')
assert not os.path.exists(serialization_dir2)
os.makedirs(serialization_dir2)
find_learning_rate_model(self.params(), serialization_dir2,
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=False)
# It's not OK if serialization dir exists and has junk in it non-empty:
serialization_dir3 = os.path.join(self.TEST_DIR, 'non_empty_directory')
assert not os.path.exists(serialization_dir3)
os.makedirs(serialization_dir3)
with open(os.path.join(serialization_dir3, 'README.md'), 'w') as f:
f.write("TEST")
with pytest.raises(ConfigurationError):
find_learning_rate_model(self.params(), serialization_dir3,
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=False)
# ... unless you use the --force flag.
find_learning_rate_model(self.params(), serialization_dir3,
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=True)
def test_find_learning_rate_args(self):
parser = argparse.ArgumentParser(description="Testing")
subparsers = parser.add_subparsers(title='Commands', metavar='')
FindLearningRate().add_subparser('find_lr', subparsers)
for serialization_arg in ["-s", "--serialization-dir"]:
raw_args = ["find_lr", "path/to/params", serialization_arg, "serialization_dir"]
args = parser.parse_args(raw_args)
assert args.func == find_learning_rate_from_args # pylint: disable=comparison-with-callable
assert args.param_path == "path/to/params"
assert args.serialization_dir == "serialization_dir"
# config is required
with self.assertRaises(SystemExit) as cm: # pylint: disable=invalid-name
args = parser.parse_args(["find_lr", "-s", "serialization_dir"])
assert cm.exception.code == 2 # argparse code for incorrect usage
# serialization dir is required
with self.assertRaises(SystemExit) as cm: # pylint: disable=invalid-name
args = parser.parse_args(["find_lr", "path/to/params"])
assert cm.exception.code == 2 # argparse code for incorrect usage
@pytest.mark.skipif(torch.cuda.device_count() < 2,
reason="Need multiple GPUs.")
def test_find_learning_rate_multi_gpu(self):
params = self.params()
params["trainer"]["cuda_device"] = [0, 1]
find_learning_rate_model(params,
os.path.join(self.TEST_DIR, 'test_find_learning_rate_multi_gpu'),
start_lr=1e-5,
end_lr=1,
num_batches=100,
linear_steps=True,
stopping_factor=None,
force=False)
class TestSearchLearningRate(AllenNlpTestCase):
def setUp(self):
super().setUp()
params = Params({
"model": {
"type": "simple_tagger",
"text_field_embedder": {
"token_embedders": {
"tokens": {
"type": "embedding",
"embedding_dim": 5
}
}
},
"encoder": {
"type": "lstm",
"input_size": 5,
"hidden_size": 7,
"num_layers": 2
}
},
"dataset_reader": {"type": "sequence_tagging"},
"train_data_path": str(self.FIXTURES_ROOT / 'data' / 'sequence_tagging.tsv'),
"validation_data_path": str(self.FIXTURES_ROOT / 'data' / 'sequence_tagging.tsv'),
"iterator": {"type": "basic", "batch_size": 2},
"trainer": {
"cuda_device": -1,
"num_epochs": 2,
"optimizer": "adam"
}
})
all_datasets = datasets_from_params(params)
vocab = Vocabulary.from_params(
params.pop("vocabulary", {}),
(instance for dataset in all_datasets.values()
for instance in dataset)
)
model = Model.from_params(vocab=vocab, params=params.pop('model'))
iterator = DataIterator.from_params(params.pop("iterator"))
iterator.index_with(vocab)
train_data = all_datasets['train']
trainer_params = params.pop("trainer")
serialization_dir = os.path.join(self.TEST_DIR, 'test_search_learning_rate')
self.trainer = Trainer.from_params(model,
serialization_dir,
iterator,
train_data,
params=trainer_params,
validation_data=None,
validation_iterator=None)
def test_search_learning_rate_with_num_batches_less_than_ten(self):
with pytest.raises(ConfigurationError):
search_learning_rate(self.trainer, num_batches=9)
def test_search_learning_rate_linear_steps(self):
learning_rates_losses = search_learning_rate(self.trainer, linear_steps=True)
assert len(learning_rates_losses) > 1
def test_search_learning_rate_without_stopping_factor(self):
learning_rates, losses = search_learning_rate(self.trainer, num_batches=100,
stopping_factor=None)
assert len(learning_rates) == 101
assert len(losses) == 101
|
[
"[email protected]"
] | |
559cc23ee0088ba255f51cac038b41d9882f11cc
|
2eab4a6fa0b525dc49fe06fd3c8f4e56dabe2ad2
|
/python/Calculate Fibonacci return count of digit occurrences.py
|
f7180023010e239d0de95d1cb3b45a34f5c8b4ea
|
[] |
no_license
|
bthowe/codewars
|
6563aa2c49bb876d3945620a27f95940f75130c6
|
fea2593c24b9e7f89ee33d1afb31581364e6f567
|
refs/heads/master
| 2020-07-01T02:59:35.113100 | 2017-02-06T19:57:34 | 2017-02-06T19:57:34 | 74,102,013 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 495 |
py
|
from collections import Counter
def fib(n):
n0 = 0
n1 = 1
count = 1
while count<n:
n2 = n0 + n1
n0 = n1
n1 = n2
count+=1
if n==0:
return 0
elif n==1:
return 1
else:
return n2
def fib_digits(n):
fib_num = fib(n)
return sorted([(v, int(k)) for k, v in dict(Counter(str(fib_num))).iteritems()])[::-1]
# print Counter(str(fib_num)).items()
if __name__=="__main__":
print fib_digits(100000)
|
[
"[email protected]"
] | |
c288c47b0ee58b847eafd53a54b97c0dbe7b513b
|
52cb25dca22292fce4d3907cc370098d7a57fcc2
|
/SWEA/5202_화물 도크.py
|
b568642b10f3da83d05c58573e217c3b11c3bb77
|
[] |
no_license
|
shjang1013/Algorithm
|
c4fc4c52cbbd3b7ecf063c716f600d1dbfc40d1a
|
33f2caa6339afc6fc53ea872691145effbce0309
|
refs/heads/master
| 2022-09-16T12:02:53.146884 | 2022-08-31T16:29:04 | 2022-08-31T16:29:04 | 227,843,135 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 514 |
py
|
# 문제
# SWEA 5202 - [파이썬 S/W 문제해결 구현 3일차] 탐욕 알고리즘 - 화물 도크
# 나의 코드
T = int(input())
for tc in range(T):
N = int(input())
time = []
Lorry = []
for i in range(N):
time.append(list(map(int, input().split())))
time.sort(key = lambda i:i[1])
Lorry.append(time[0])
for k in range(N):
if Lorry[-1][1] <= time[k][0]:
Lorry.append(time[k])
i += 1
print("#%d %d" %(tc+1, len(Lorry)))
|
[
"[email protected]"
] | |
b37de288b6b30b48e14d7fb169d4d8fe6c4bbccd
|
dac12c9178b13d60f401c4febff5569af8aa2719
|
/cvat/apps/engine/migrations/0050_auto_20220211_1425.py
|
67322fca1254fb9f2986368a567e10d4f4876a05
|
[
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
opencv/cvat
|
39dc66ca20f972ba40b79c44d7ce43590dc0b0b5
|
899c9fd75146744def061efd7ab1b1c6c9f6942f
|
refs/heads/develop
| 2023-08-19T04:27:56.974498 | 2023-08-18T09:58:25 | 2023-08-18T09:58:25 | 139,156,354 | 6,558 | 1,887 |
MIT
| 2023-09-14T12:44:39 | 2018-06-29T14:02:45 |
TypeScript
|
UTF-8
|
Python
| false | false | 822 |
py
|
# Generated by Django 3.2.12 on 2022-02-11 14:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('engine', '0049_auto_20220202_0710'),
]
operations = [
migrations.RemoveField(
model_name='trainingprojectimage',
name='task',
),
migrations.RemoveField(
model_name='trainingprojectlabel',
name='cvat_label',
),
migrations.RemoveField(
model_name='project',
name='training_project',
),
migrations.DeleteModel(
name='TrainingProject',
),
migrations.DeleteModel(
name='TrainingProjectImage',
),
migrations.DeleteModel(
name='TrainingProjectLabel',
),
]
|
[
"[email protected]"
] | |
ff48941968979ef0668d6935a7cf5d692a04351b
|
dfeeb6f8a691c104898eee7b9ecefe8015d40f7c
|
/Pyhton tutorial /132Advanced_Python_Iterators_for_Dictionaries.py
|
022a96ddd6c7fba038120681284d26abfe48c699
|
[] |
no_license
|
narendra-ism/Python_tutorial_basic_
|
9277926dbfc707a761abe2ddebafb0855249fb68
|
29c2ebd5e7095bfda02d8c03d0afb65a85efe05d
|
refs/heads/master
| 2021-03-30T20:46:17.444715 | 2018-03-12T05:29:16 | 2018-03-12T05:29:16 | 124,831,659 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 208 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 11 18:02:08 2018
@author: narendra
"""
my_dict = {
'name': 'Nick',
'age': 31,
'occupation': 'Dentist',
}
for key in my_dict:
print(key, my_dict[key])
|
[
"[email protected]"
] | |
233968b08a8860b9777fb772c495dd55648ccf25
|
3ccd609f68016aad24829b8dd3cdbb535fb0ff6d
|
/python/bpy/types/PackedFile.py
|
d99de4c183bf5223a17fa706010d5076230aea26
|
[] |
no_license
|
katharostech/blender_externs
|
79b2eed064fd927e3555aced3e2eb8a45840508e
|
fdf7f019a460de0fe7e62375c1c94f7ab0e9f68d
|
refs/heads/master
| 2020-04-11T14:00:29.393478 | 2018-10-01T00:40:51 | 2018-10-01T00:40:51 | 161,838,212 | 1 | 1 | null | 2018-12-14T20:41:32 | 2018-12-14T20:41:32 | null |
UTF-8
|
Python
| false | false | 25 |
py
|
PackedFile.size = None
|
[
"[email protected]"
] | |
3f52a9fa1febbb8892a9673e2e7cb36cd16cbc1f
|
6ad41d9b76360c8007b494616374e9e0474f4da8
|
/mitogen/debug.py
|
8cb1a3675469ddfbcbaf2b70875a73d0235d6d62
|
[
"BSD-3-Clause"
] |
permissive
|
danielcompton/mitogen
|
a1f46aec5766a1309a4a0fb89aac6fcb72d1ee89
|
2813d1a968d6f694514a0053d094c0da9ea4863b
|
refs/heads/master
| 2021-04-17T20:48:30.103447 | 2018-03-25T09:13:20 | 2018-03-25T09:13:20 | 126,739,845 | 0 | 0 |
BSD-3-Clause
| 2018-03-25T21:00:54 | 2018-03-25T21:00:54 | null |
UTF-8
|
Python
| false | false | 3,279 |
py
|
# Copyright 2017, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Basic signal handler for dumping thread stacks.
"""
import difflib
import os
import signal
import sys
import threading
import time
import traceback
_last = None
def format_stacks():
name_by_id = {
t.ident: t.name
for t in threading.enumerate()
}
l = ['', '']
for threadId, stack in sys._current_frames().items():
l += ["# PID %d ThreadID: (%s) %s; %r" % (
os.getpid(),
name_by_id.get(threadId, '<no name>'),
threadId,
stack,
)]
stack = stack.f_back.f_back
for filename, lineno, name, line in traceback.extract_stack(stack):
l += [
'File: "%s", line %d, in %s' % (
filename,
lineno,
name
)
]
if line:
l += [' ' + line.strip()]
l += ['']
l += ['', '']
return '\n'.join(l)
def _handler(*_):
global _last
s = format_stacks()
fp = open('/dev/tty', 'w', 1)
fp.write(s)
if _last:
fp.write('\n')
diff = list(difflib.unified_diff(
a=_last.splitlines(),
b=s.splitlines(),
fromfile='then',
tofile='now'
))
if diff:
fp.write('\n'.join(diff) + '\n')
else:
fp.write('(no change since last time)\n')
_last = s
def install_handler():
signal.signal(signal.SIGUSR2, _handler)
def _thread_main():
while True:
time.sleep(7)
l = format_stacks()
open('/tmp/stack.%s.log' % (os.getpid(),), 'wb', 65535).write(l)
break
def dump_periodically():
th = threading.Thread(target=main)
th.setDaemon(True)
th.start()
|
[
"[email protected]"
] | |
b6c563a3591bfe9763c33179bd4e387dea5f53bf
|
d73409535734a788af83a9b2b2e32dd1b979d5d2
|
/proxySTAR_V3/certbot/acme/acme/jws.py
|
f9b81749ab1db42de1e3654eef04f98432698ff3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
mami-project/lurk
|
adff1fb86cb3e478fe1ded4cbafa6a1e0b93bfdd
|
98c293251e9b1e9c9a4b02789486c5ddaf46ba3c
|
refs/heads/master
| 2022-11-02T07:28:22.708152 | 2019-08-24T19:28:58 | 2019-08-24T19:28:58 | 88,050,138 | 2 | 2 |
NOASSERTION
| 2022-10-22T15:46:11 | 2017-04-12T12:38:33 |
Python
|
UTF-8
|
Python
| false | false | 2,145 |
py
|
"""ACME-specific JWS.
The JWS implementation in acme.jose only implements the base JOSE standard. In
order to support the new header fields defined in ACME, this module defines some
ACME-specific classes that layer on top of acme.jose.
"""
from acme import jose
class Header(jose.Header):
"""ACME-specific JOSE Header. Implements nonce, kid, and url.
"""
nonce = jose.Field('nonce', omitempty=True, encoder=jose.encode_b64jose)
kid = jose.Field('kid', omitempty=True)
url = jose.Field('url', omitempty=True)
@nonce.decoder
def nonce(value): # pylint: disable=missing-docstring,no-self-argument
try:
return jose.decode_b64jose(value)
except jose.DeserializationError as error:
# TODO: custom error
raise jose.DeserializationError("Invalid nonce: {0}".format(error))
class Signature(jose.Signature):
"""ACME-specific Signature. Uses ACME-specific Header for customer fields."""
__slots__ = jose.Signature._orig_slots # pylint: disable=no-member
# TODO: decoder/encoder should accept cls? Otherwise, subclassing
# JSONObjectWithFields is tricky...
header_cls = Header
header = jose.Field(
'header', omitempty=True, default=header_cls(),
decoder=header_cls.from_json)
# TODO: decoder should check that nonce is in the protected header
class JWS(jose.JWS):
"""ACME-specific JWS. Includes none, url, and kid in protected header."""
signature_cls = Signature
__slots__ = jose.JWS._orig_slots # pylint: disable=no-member
@classmethod
# pylint: disable=arguments-differ,too-many-arguments
def sign(cls, payload, key, alg, nonce, url=None, kid=None):
# Per ACME spec, jwk and kid are mutually exclusive, so only include a
# jwk field if kid is not provided.
include_jwk = kid is None
return super(JWS, cls).sign(payload, key=key, alg=alg,
protect=frozenset(['nonce', 'url', 'kid', 'jwk', 'alg']),
nonce=nonce, url=url, kid=kid,
include_jwk=include_jwk)
|
[
"[email protected]"
] | |
fb9e77575fffbd38ca11b3a18550c06b11795d67
|
98d2f9be98e5720cce5cb8da1f21b82c08b2f0ec
|
/src/transformers/models/barthez/tokenization_barthez.py
|
428f6fec654661edb0a5d8b3bc73135477561e47
|
[
"Apache-2.0"
] |
permissive
|
MANISH007700/transformers-1
|
80dd47a47e481e26d7d3bd208f7efa4c6df4d10b
|
5bf5d50c8dae2e54327a754aa476f13a0308f844
|
refs/heads/master
| 2023-04-02T23:40:39.871802 | 2021-04-08T12:22:58 | 2021-04-08T12:22:58 | 355,976,918 | 1 | 0 |
Apache-2.0
| 2021-04-08T16:22:45 | 2021-04-08T16:22:44 | null |
UTF-8
|
Python
| false | false | 11,446 |
py
|
# coding=utf-8
# Copyright 2020 Ecole Polytechnique and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
""" Tokenization classes for the BARThez model."""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
SPIECE_UNDERLINE = "▁"
class BarthezTokenizer(PreTrainedTokenizer):
"""
Adapted from :class:`~transformers.CamembertTokenizer` and :class:`~transformers.BartTokenizer`. Construct a
BARThez tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that
contains the vocabulary necessary to instantiate a tokenizer.
bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the :obj:`cls_token`.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
Attributes:
sp_model (:obj:`SentencePieceProcessor`):
The `SentencePiece` processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
**kwargs
):
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs,
)
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(str(vocab_file))
self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) - 1
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BARThez sequence has the following format:
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s></s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formated with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
@property
def vocab_size(self):
return len(self.sp_model)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
return self.sp_model.EncodeAsPieces(text)
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
return spm_id if spm_id else self.unk_token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index)
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
[
"[email protected]"
] | |
e29d8f2fff001a00232b614fc5f9ef322f588e70
|
b9eb496c4551fd091954675a61382636fc68e715
|
/src/ABC1xx/ABC14x/ABC145/ABC145A.py
|
d67371adc8709b9e59bd885084578a459b075261
|
[] |
no_license
|
kttaroha/AtCoder
|
af4c5783d89a61bc6a40f59be5e0992980cc8467
|
dc65ce640954da8c2ad0d1b97580da50fba98a55
|
refs/heads/master
| 2021-04-17T16:52:09.508706 | 2020-11-22T05:45:08 | 2020-11-22T05:45:08 | 249,460,649 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 79 |
py
|
def main():
print(int(input())**2)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
b598f0eb2b805c60cbd9476ff493934ffdf9de4e
|
b129c9b11e9d2c06114f45ce03a94f4f2a177119
|
/hugin/haproxy/configuration.py
|
a0d3263efdc2cedcdfab838f248cfbc7c2003847
|
[] |
no_license
|
pyfidelity/hugin.haproxy
|
a9e48e345b03ed9d361c0d6c8617135378f5c311
|
444e30350936883e7749c2371f394fa82c1644fe
|
refs/heads/master
| 2016-09-01T17:29:48.210244 | 2014-11-24T12:34:51 | 2014-11-24T12:34:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 679 |
py
|
from ConfigParser import RawConfigParser
import re
from paste.util.multidict import MultiDict
class FilterConfig(RawConfigParser, object):
def __init__(self):
RawConfigParser.__init__(self, dict_type=MultiDict)
def urls(self):
output = self._dict()
for key, value in self._sections.items():
output[key] = value['method'], value['match']
return output
def _read(self, *args, **kwargs):
return_value = RawConfigParser._read(self, *args, **kwargs)
for key in self._sections.keys():
self._sections[key]['match'] = re.compile(self._sections[key]['match'])
return return_value
|
[
"[email protected]"
] | |
a760406307209396651413a877d753828833f2df
|
b2ba88eb56e1f08b823a8865d69a69c395754011
|
/PycharmProjects/PythonSeleniumAutoAugSep/12Oct2019/dictWithselenium.py
|
6eb730986c1523b8f07b69242fef45e683ad0ad5
|
[] |
no_license
|
aravindanath/TeslaEV
|
90d4577f4e2e2d0df9d5799acf9263895eb4a98c
|
a5a4b071da1187fec65f80481bf05a9469d38202
|
refs/heads/master
| 2020-12-06T12:07:52.074500 | 2020-03-18T15:25:21 | 2020-03-18T15:25:21 | 223,544,053 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 450 |
py
|
from selenium import webdriver
import time
td = {"url":"https://www.google.com/",'browser':"ff"}
if td['browser']=="chrome":
driver = webdriver.Chrome("/Users/aravindanathdm/PycharmProjects/PythonSeleniumProject/driver/chromedriver")
elif td['browser']=="ff":
driver = webdriver.Firefox(executable_path="/Users/aravindanathdm/PycharmProjects/PythonSeleniumProject/driver/geckodriver")
driver.get(td['url'])
time.sleep(2)
# driver.quit()
|
[
"[email protected]"
] | |
58018bce47bab170230d6e3048ec82dde7848ead
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part005027.py
|
27f26130e6844d40cbd135f283b24ee2697dcba2
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,300 |
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher65494(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher65494._instance is None:
CommutativeMatcher65494._instance = CommutativeMatcher65494()
return CommutativeMatcher65494._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 65493
return
yield
from collections import deque
|
[
"[email protected]"
] | |
ee8221f7212eb222c79b8dcfb88d513aad2ddf15
|
adf4f40bc899775e4f87b40036d8b9ed8be7e847
|
/chapter_03/exercises/greeting.py
|
5e43278a36b84cd0b47ab44fdc81c5b049187846
|
[] |
no_license
|
ltsuda/python-crash-course
|
7473ff150214fc7d7370fa7cebfd009d1a2746e7
|
d153929229c071ce4733a68410220f621719983f
|
refs/heads/master
| 2020-06-23T19:14:07.967109 | 2019-09-03T01:00:14 | 2019-09-03T01:12:53 | 198,728,349 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 139 |
py
|
names = ['chris', 'maka', 'leandro']
print(f'The first name in the list is {names[0]}')
print(f'The last name in the list is {names[-1]}')
|
[
"[email protected]"
] | |
4a77df5b8c2b09a7a235b60d8d7a36c558c4f1d0
|
1df82fa8ef888b74fb9095c9ade89e16895366b1
|
/14.Lambdas and Buit In Functions - Exercise/03. Multiplication.py
|
c04346d3cb290fa33e44cdde67bdb53e43bbb9cc
|
[] |
no_license
|
filipov73/python_advanced_january_2020
|
868eb4bc365f7774c373183760e7ac584e1bd20c
|
a5e24190ee08bd1a0534dc04f91a5ba1927d1b19
|
refs/heads/master
| 2020-11-26T14:07:12.423309 | 2020-02-23T15:20:13 | 2020-02-23T15:20:13 | 229,097,988 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 136 |
py
|
num = int(input())
list_ = [int(x) for x in input().split()]
result = map(lambda x: x * num, list_)
print(" ".join(map(str, result)))
|
[
"[email protected]"
] | |
86ff63b94e72ffb5a1207a33fb869f5c5fbe46f7
|
3bb8b4e9d9b3f38ec4ec8f049c2bb58fce5850ea
|
/setup.py
|
352fcb364ae9a12ec1549006b19b704d46994a12
|
[
"Apache-2.0"
] |
permissive
|
klmitch/vapi
|
8ae87d1c4032e1b5ae54b50b7bc09e18f3f4e8de
|
3b8607d15723a6258ede96f607b32bb1ecf885be
|
refs/heads/master
| 2021-01-21T23:03:45.998643 | 2014-09-24T23:02:34 | 2014-09-24T23:02:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 991 |
py
|
#!/usr/bin/env python
# Copyright 2014 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True,
)
|
[
"[email protected]"
] | |
85492e471a02ef450052f0f42fe3d28ff42058fb
|
7e69c60c23fce92463c78774b5968d3320c715c9
|
/python_net/web_cgi/cgi-bin/botengine.py
|
76b1f2b939236d3afc24d011cef7790c81f06c16
|
[] |
no_license
|
hwet-j/Python
|
5128d114cf7257067f68cfb1db502e4f762ac8cc
|
3e6f36be665932588a576f44ebb0107a4f350613
|
refs/heads/master
| 2023-04-08T17:52:31.607225 | 2021-04-17T05:25:02 | 2021-04-17T05:25:02 | 353,336,473 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,685 |
py
|
import codecs
from bs4 import BeautifulSoup
import urllib.request
from konlpy.tag import Okt
import os, re, json, random
dict_file = "chatbot-data.json"
dic = {}
twitter = Okt()
# 딕셔너리에 단어 등록하기 ---
def register_dic(words):
global dic
if len(words) == 0: return
tmp = ["@"]
for i in words:
word = i[0]
if word == "" or word == "\r\n" or word == "\n": continue
tmp.append(word)
if len(tmp) < 3: continue
if len(tmp) > 3: tmp = tmp[1:]
set_word3(dic, tmp)
if word == "." or word == "?":
tmp = ["@"]
continue
# 딕셔너리가 변경될 때마다 저장하기
json.dump(dic, open(dict_file,"w", encoding="utf-8"))
# 딕셔너리에 글 등록하기
def set_word3(dic, s3):
w1, w2, w3 = s3
if not w1 in dic: dic[w1] = {}
if not w2 in dic[w1]: dic[w1][w2] = {}
if not w3 in dic[w1][w2]: dic[w1][w2][w3] = 0
dic[w1][w2][w3] += 1
# 문장 만들기 ---
def make_sentence(head):
if not head in dic: return ""
ret = []
if head != "@":
ret.append(head)
top = dic[head]
w1 = word_choice(top)
w2 = word_choice(top[w1])
ret.append(w1)
ret.append(w2)
while True:
if w1 in dic and w2 in dic[w1]:
w3 = word_choice(dic[w1][w2])
else:
w3 = ""
ret.append(w3)
if w3 == "." or w3 == "? " or w3 == "":
break
w1, w2 = w2, w3
ret = "".join(ret)
# 띄어쓰기
params = urllib.parse.urlencode({
"_callback": "",
"q": ret
})
# 네이버의 맞춤법 검사기 api를 사용
data = urllib.request.urlopen("https://m.search.naver.com/p/csearch/ocontent/spellchecker.nhn?" + params)
data = data.read().decode("utf-8")[1:-2]
data = json.loads(data)
data = data["message"]["result"]["html"]
data = soup = BeautifulSoup(data, "html.parser").getText()
return data
def word_choice(sel):
keys = sel.keys()
return random.choice(list(keys))
# 챗봇 응답 만들기 ---
def make_reply(text):
# 단어 학습 시키기
if not text[-1] in [".", "?"]: text += "."
words = twitter.pos(text)
register_dic(words)
# 사전에 단어가 있다면 그것을 기반으로 문장 만들기
for word in words:
face = word[0]
if face in dic:
return make_sentence(face)
return make_sentence("@")
# 딕셔너리가 있다면 읽어 들이기
if os.path.exists(dict_file):
dic = json.load(open(dict_file, "r"))
|
[
"[email protected]"
] | |
b62879c41444d90c6c81d3e6f4d4455793c8acc1
|
57106b3c8aab1f8a635806c8c15ffdde3f5d6fc2
|
/22data-mining/frequent_patterns/main.py
|
939e3a6744ea47a42464c91572bb4ab60bbe4c8c
|
[
"Apache-2.0"
] |
permissive
|
CoryVegan/python-tutorial
|
85e6b824d1f6a39b54d1fa84cd57def192f34e20
|
a7c51593d779f0fc9751c2d6093f80878c4ba5c3
|
refs/heads/master
| 2020-03-22T03:22:43.774009 | 2018-06-28T02:52:14 | 2018-06-28T02:52:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,092 |
py
|
# -*- coding: utf-8 -*-
# Author: XuMing <[email protected]>
# Brief:
import time
from eclat import eclat_zc
from freq_utils import loadDblpData, loadData, loadUnixData
from apriori import apriori_zc
from fp_growth import fp_growth
def test_fp_growth(minSup, dataSetDict, dataSet):
freqItems = fp_growth(dataSetDict, minSup)
freqItems = sorted(freqItems.items(), key=lambda item: item[1])
return freqItems
def test_apriori(minSup, dataSetDict, dataSet):
freqItems = apriori_zc(dataSet, dataSetDict, minSup)
freqItems = sorted(freqItems.items(), key=lambda item: item[1])
return freqItems
def test_eclat(minSup, dataSetDict, dataSet):
freqItems = eclat_zc(dataSet, minSup)
freqItems = sorted(freqItems.items(), key=lambda item: item[1])
return freqItems
def print_freqItems(logo, freqItems):
print("-------------------", logo, "---------------")
for i in range(len(freqItems)):
print(i, freqItems[i])
print(len(freqItems))
print("-------------------", logo, " end ---------------")
def do_experiment_data_size():
data_name = 'unixData8_pro.txt'
x_name = "Data_Size"
data_num = 980
step = data_num / 5 # #################################################################
all_time = []
x_value = []
for k in range(5):
minSup = data_num * 0.010
dataSetDict, dataSet = loadDblpData(("dataSet/" + data_name), ' ', data_num)
x_value.append(data_num) # #################################################################
if data_num < 0: # #################################################################
break
time_fp = 0
time_et = 0
time_ap = 0
freqItems_fp = {}
freqItems_eclat = {}
freqItems_ap = {}
for i in range(2):
ticks0 = time.time()
freqItems_fp = test_fp_growth(minSup, dataSetDict, dataSet)
time_fp += time.time() - ticks0
ticks0 = time.time()
freqItems_eclat = test_eclat(minSup, dataSetDict, dataSet)
time_et += time.time() - ticks0
ticks0 = time.time()
freqItems_ap = test_apriori(minSup, dataSetDict, dataSet)
time_ap += time.time() - ticks0
print("minSup :", minSup, " data_num :", data_num, \
" freqItems_fp:", " freqItems_eclat:", len(freqItems_eclat), " freqItems_ap:", len(
freqItems_ap))
print("fp_growth:", time_fp / 10, " eclat:", time_et / 10, " apriori:", time_ap / 10)
# print_freqItems("show", freqItems_eclat)
data_num -= step # #################################################################
use_time = [time_fp / 10, time_et / 10, time_ap / 10]
all_time.append(use_time)
# print use_time
y_value = []
for i in range(len(all_time[0])):
tmp = []
for j in range(len(all_time)):
tmp.append(all_time[j][i])
y_value.append(tmp)
return x_value, y_value
def do_experiment_min_support():
data_name = 'unixData8_pro.txt'
x_name = "Min_Support"
data_num = 980
minSup = data_num / 6
dataSetDict, dataSet = loadDblpData(("dataSet/" + data_name), ',', data_num)
step = minSup / 5 # #################################################################
all_time = []
x_value = []
for k in range(5):
x_value.append(minSup) # #################################################################
if minSup < 0: # #################################################################
break
time_fp = 0
time_et = 0
time_ap = 0
freqItems_fp = {}
freqItems_eclat = {}
freqItems_ap = {}
for i in range(10):
ticks0 = time.time()
freqItems_fp = test_fp_growth(minSup, dataSetDict, dataSet)
time_fp += time.time() - ticks0
ticks0 = time.time()
freqItems_eclat = test_eclat(minSup, dataSetDict, dataSet)
time_et += time.time() - ticks0
ticks0 = time.time()
freqItems_ap = test_apriori(minSup, dataSetDict, dataSet)
time_ap += time.time() - ticks0
print("minSup :", minSup, " data_num :", data_num, \
" freqItems_eclat:", len(freqItems_eclat))
print("[time spend] fp_growth:", time_fp / 10, " eclat:", time_et / 10, " apriori:", time_ap / 10)
# print_freqItems("show", freqItems_eclat)
minSup -= step # #################################################################
use_time = [time_fp / 10, time_et / 10, time_ap / 10]
all_time.append(use_time)
# print use_time
y_value = []
for i in range(len(all_time[0])):
tmp = []
for j in range(len(all_time)):
tmp.append(all_time[j][i])
y_value.append(tmp)
return x_value, y_value
def do_test():
dataSetDict, dataSet = loadDblpData(("dataSet/connectPro.txt"), ',', 100)
minSup = 101
# for item in freq_items:
# print item
# freqItems = test_fp_growth(minSup, dataSetDict, dataSet)
# print_freqItems("show", freqItems)
#
freqItems = test_eclat(minSup, dataSetDict, dataSet)
# print_freqItems("show", freqItems)
freqItems_eclat = test_eclat(minSup, dataSetDict, dataSet)
# freqItems_ap = test_apriori(minSup, dataSetDict, dataSet)
# print_freqItems("show", freqItems_ap)
print(len(freqItems_eclat))
def do_dblp_data():
data_name = 'dblpDataAll.txt'
x_name = "Min_Support"
data_num = 2715700
minSup = 100
dataSetDict, dataSet = loadDblpData(("dataSet/" + data_name), ',', data_num)
time_fp = 0
ticks0 = time.time()
freqItems_fp = test_eclat(minSup, dataSetDict, dataSet)
time_fp += time.time() - ticks0
print(time_fp)
for item in freqItems_fp:
print(item)
if __name__ == '__main__':
x_value, y_value = do_experiment_min_support()
x_value, y_value = do_experiment_data_size()
do_test()
do_dblp_data()
|
[
"[email protected]"
] | |
84666edef17c1b5cba6573aa7211aaf13565b74d
|
d930697cc16f69187c0918524e655ab8259b9806
|
/src/aux/parsing.py
|
811773ccfb4605251941ca7c528dd8eb8454f7fc
|
[] |
no_license
|
tkusmierczyk/badges2
|
7ff6c6edd8f21f90ec2981ede569c4a7c018a765
|
7738483c2a732a062007b14286ca2fce6684965a
|
refs/heads/master
| 2021-09-22T09:51:08.296245 | 2018-09-07T15:59:33 | 2018-09-07T15:59:33 | 111,401,358 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,278 |
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
def str2value(s):
original = s
s = s.lower()
if s=="t" or s=="true": return True
if s=="f" or s=="false": return False
try:
if int(s)==float(s): return int(s)
except: pass
try: return float(s)
except: pass
return original
def tcast(v):
try:
v2 = int(v)
if v2==v: return v2
except:
pass
try:
return float(v)
except:
pass
return str(v)
def parse_dictionary(options_str):
options_str = options_str.replace(";", ",").replace(":", "=")
options = [o.strip() for o in options_str.split(",") if len(o.strip())>0]
options_dict = {}
for option in options:
if "=" not in option:
raise ValueError("options must be given as option=value")
parts = option.split("=")
option, val = parts[0], parts[1]
options_dict[option] = str2value(val)
return options_dict
def format_dict(dct):
return str(dct).strip("{").strip("}").replace("\"", "").replace("'", "")
class objectview(object):
def __init__(self, d):
self.d = d.copy()
self.__dict__ = self.d
def __str__(self):
return(str(self.d))
|
[
"[email protected]"
] | |
c68ef51105c5285f6a7602dbe1e424ed80366edb
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03061/s017722263.py
|
daf6b30e11b69ac96f03b0ad16f9930e404f4415
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 283 |
py
|
import math
n = int(input())
a = list(map(int,input().split()))
L = [0]*(n+1)
R = [0]*(n+1)
for i in range(n):
L[i+1] = math.gcd(L[i], a[i])
R[-2-i] = math.gcd(R[-1-i], a[-1-i])
ans = 0
for i in range(n):
G = math.gcd(L[i], R[i+1])
ans = max(ans, G)
print(ans)
|
[
"[email protected]"
] | |
51070f08059bac4f36859b19228d0f0ac977d60c
|
a80b8d4276140c5d702a651ef1fd4540201ae8eb
|
/homeworks/hw7/views.py
|
5b5e8c3e23ff6047fa17d906c1d693c89d25386f
|
[] |
no_license
|
laky55555/application_security
|
cc15c7abf8e472634e37ea56fe1b0eb01b6ee060
|
a81299f2dfbe93e5785720eb7ccb25b9c5c11b18
|
refs/heads/master
| 2021-01-23T02:22:51.536155 | 2017-08-19T16:12:02 | 2017-08-19T16:12:02 | 85,986,254 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,839 |
py
|
from django.shortcuts import render
from social_django.admin import UserSocialAuth
import requests
# Create your views here.
def index(request):
page_title = "Seventh homework"
content = "Using OAuth2 from popular social networks"# + '/home/ivan/Dropbox/Faks/5_godina/application_security/homeworks'
problem1 = [('login', 'Create a simple server-side web application that uses OAuth2 from popular sites for login.')]
problem2 = [('facebook', 'Using Facebook API'), ('twitter', 'Using Twitter API'), ('google', 'Using Google+ API'), ]
problems = [problem1, problem2]
return render(request, 'landing.html', {'page_title': page_title, 'content': content, 'problems': problems})
def login(request):
page_title = "Login via social networks"
providers = []
if request.user.is_authenticated():
providers = list(UserSocialAuth.objects.filter(user=request.user).values_list('provider', flat=True))
explanation = [('Account linked to:', providers),
('Facebook', ['<a href="/login/facebook?next=/hw7">Login via facebook</a>',
'Specificy what permissions you want to get in settings: SOCIAL_AUTH_FACEBOOK_SCOPE = ["email", "user_photos"]']),
('Google', ['<a href="/login/google-oauth2?next=/hw7">Login via google</a>']),
('Twitter', ['<a href="/login/twitter?next=/hw7">Login via twitter [NOT IN YET IN USE]</a>']),
('Slack', ['<a href="/login/slack?next=/hw7">Login via slack</a>',]),
('Note', ['Add backends of API-s you want to use in backend tuple (setting.py)',
'Add keys (secret and public) of backed',
'Add pipelanes; info and specification of new users you want to get and import into database)',
'Using pipeline associate_by_email just with services that check email. If they don\'t check it -> <a href="http://python-social-auth.readthedocs.io/en/latest/configuration/django.html">security risk</a>.'])]
return render(request, 'base.html', {'page_title': page_title, 'explanation': explanation})
def get_data_from_facebook(url, token):
response = requests.get(url, params={'access_token': token}).json()
data = response.get('data')
# next = response.get('paging').get('next')
next = data
return (data, next)
def facebook(request):
page_title = "Playing with Facebook API"
providers = []
friend_list = my_albums = latest_posts = False
if request.user.is_authenticated():
providers = list(UserSocialAuth.objects.filter(user=request.user).values_list('provider', flat=True))
user = UserSocialAuth.objects.filter(provider='facebook', user=request.user).first()
if request.method == 'POST' and user and not user.access_token_expired() and request.POST.get('usage') in {'posts', 'albums', 'taggable_friends'}:
usage = request.POST.get('usage')
url = 'https://graph.facebook.com/v2.9/' + user.uid
if usage == 'posts':
latest_posts, next = get_data_from_facebook(url +'/feed?fields=picture,message,permalink_url,created_time', user.access_token)
elif usage == 'albums':
my_albums, next = get_data_from_facebook(url +'/albums?fields=count,link,name,photo_count,picture', user.access_token)
elif usage == 'taggable_friends':
friend_list, next = get_data_from_facebook(url + '/taggable_friends?fields=picture.width(300),name', user.access_token)
return render(request, 'hw7/facebook.html', {'page_title': page_title, 'providers': providers, 'friend_list': friend_list, 'my_albums': my_albums, 'latest_posts': latest_posts})
def twitter(request):
a = 2
def google(request):
a = 2
|
[
"[email protected]"
] | |
3d9654063e7ca4dd1188f7023431c014a679a192
|
f3b023931812ca0f37bb9fcaf930b8fda4b8609c
|
/natsr/dataloader.py
|
cea9d73ac58ad2f2d8f51d3587cd416908036a94
|
[
"MIT"
] |
permissive
|
kozistr/NatSR-pytorch
|
91bbdc31f94f6a32886f922e9825f1a947509886
|
51b2b5ce9b1fdc0864a299243798a0f78eb7eedc
|
refs/heads/master
| 2022-12-07T23:41:59.485090 | 2022-11-22T09:54:49 | 2022-11-22T09:54:49 | 251,505,048 | 7 | 3 |
MIT
| 2022-11-22T09:54:08 | 2020-03-31T05:02:47 |
Python
|
UTF-8
|
Python
| false | false | 4,792 |
py
|
import os
import random
from glob import glob
from math import sqrt
from typing import List, Optional, Tuple
import numpy as np
import torch
from PIL import Image
from torch import cat
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import (
Compose,
RandomCrop,
Resize,
ToPILImage,
ToTensor,
)
from torchvision.transforms.functional import rotate
from natsr import DataSets, DataType, Mode, ModelType
from natsr.utils import get_blurry, get_noisy, is_gpu_available, is_valid_key
def get_scale_factor(scale: int) -> int:
if scale & (scale - 1):
return int(sqrt(scale))
return scale
def get_valid_crop_size(crop_size: int, scale: int) -> int:
return crop_size - (crop_size % scale)
def hr_transform(crop_size: int):
return Compose([RandomCrop(crop_size), ToTensor()])
def lr_transform(crop_size: int, scale: int):
return Compose(
[
ToPILImage(),
Resize(crop_size // scale, interpolation=Image.BICUBIC),
ToTensor(),
]
)
def get_nmd_data(
img, scale: int, alpha: float, sigma: float, mode: str
) -> Optional[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
batch_size: int = img.size(0)
if mode == Mode.TRAIN:
noisy_img = get_noisy(img[: batch_size // 4, :, :, :], sigma)
blurry_img = get_blurry(
img[batch_size // 4 : batch_size // 2, :, :, :], scale, alpha
)
clean_img = img[batch_size // 2 :, :, :, :]
return cat([noisy_img, blurry_img, clean_img], dim=0)
elif mode == Mode.VALID:
noisy_img = get_noisy(img, sigma)
blurry_img = get_blurry(img, scale, alpha)
clean_img = img
return (
cat([blurry_img, clean_img], dim=0),
cat([noisy_img, clean_img], dim=0),
)
raise NotImplementedError(f'[-] not supported mode : {mode}')
class DIV2KDataSet(Dataset):
def __init__(self, config, data_type: str):
self.config = config
self.scale_factor: int = get_scale_factor(
config['data'][DataSets.DIV2K]['scale']
)
self.crop_size: int = get_valid_crop_size(
config['model'][ModelType.FRSR]['height'], self.scale_factor
)
self.hr_image_paths: List[str] = []
self.hr_images: np.ndarray = np.array([], dtype=np.uint8)
self.hr_transform = hr_transform(self.crop_size)
self.lr_transform = lr_transform(self.crop_size, self.scale_factor)
self._get_image_paths(data_type=data_type)
def _get_image_paths(self, data_type: str) -> None:
dataset_path: str = self.config['data'][DataSets.DIV2K]['dataset_path']
if os.path.exists(dataset_path):
self.hr_image_paths = sorted(
glob(
os.path.join(
dataset_path, f'DIV2K_{data_type}_HR', '*.png'
)
)
)
else:
raise FileNotFoundError(
f'[-] there\'s no dataset at {dataset_path}'
)
def __getitem__(self, index: int):
hr_image = Image.open(self.hr_image_paths[index])
hr_image = rotate(hr_image, random.choice([0, 90, 180, 270]))
hr_image = self.hr_transform(hr_image)
lr_image = self.lr_transform(hr_image)
return lr_image, hr_image
def __len__(self):
return len(self.hr_image_paths)
def build_data_loader(
config, data_type: str, override_batch_size: Optional[int] = None
) -> DataLoader:
dataset_type: str = config['data']['dataset_type']
model_type: str = config['model']['model_type']
if not is_valid_key(config['model'], model_type):
raise NotImplementedError(
f'[-] not supported model_type : {model_type}'
)
if dataset_type == DataSets.DIV2K:
dataset = DIV2KDataSet(config, data_type)
else:
raise NotImplementedError(
f'[-] not supported dataset_type : {dataset_type}'
)
data_loader = DataLoader(
dataset=dataset,
batch_size=config['model'][model_type]['batch_size']
if override_batch_size is None
else override_batch_size,
shuffle=True,
pin_memory=is_gpu_available(),
drop_last=False,
num_workers=config['aux']['n_threads'],
)
return data_loader
def build_loader(
config, override_batch_size: Optional[int] = None
) -> Tuple[DataLoader, DataLoader]:
train_data_loader = build_data_loader(
config, data_type=DataType.TRAIN.value
)
valid_data_loader = build_data_loader(
config,
data_type=DataType.VALID.value,
override_batch_size=override_batch_size,
)
return train_data_loader, valid_data_loader
|
[
"[email protected]"
] | |
54fa847db262c80c74df746853501b408ac95069
|
bbf9b9a382a427dbf90980f609f7ab14dd0511bc
|
/day9/ByLinkText.py
|
2e03bc2356c78fc357be528bc44d57c0c424b807
|
[] |
no_license
|
aravindanath/MarvelAutomation
|
b916a73467ec479ecad67be8c268743feea98816
|
91ae916de90cf0f407eb83ff08ddfb477d8cbea2
|
refs/heads/master
| 2021-05-18T07:14:15.533717 | 2020-06-07T05:52:06 | 2020-06-07T05:52:06 | 251,174,725 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 335 |
py
|
from day8 import LaunchBrowser as lp
import time
from selenium.webdriver.common.keys import Keys
data = "iphone SE2"
lp.driver.get("https://www.google.com")
lp.driver.find_element_by_link_text("తెలుగు").click()
time.sleep(4)
lp.driver.find_element_by_name('q').send_keys("news",Keys.ENTER)
time.sleep(4)
lp.driver.quit()
|
[
"[email protected]"
] | |
1ed435bb36804248b862257ae25e0672980fa2c3
|
8f836e3c4add1af6311abd8c71d517847d29e8f9
|
/python_learning/python_book_projectsPractice/web_application/homeworks/pizzeria_18_4/pizzas/migrations/0001_initial.py
|
ff016843b41af2ca9ca7f2800dc4638d5a34dd86
|
[] |
no_license
|
DanilWH/Python
|
f6282d5aff5d4fa79c1fd0f0108e6c0c3777a485
|
b87319409a94e26faf084c22b1eb6a1d55458282
|
refs/heads/master
| 2021-01-03T21:23:02.305101 | 2020-03-11T16:20:27 | 2020-03-11T16:20:27 | 240,238,725 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 506 |
py
|
# Generated by Django 2.2.3 on 2019-07-23 12:38
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Pizza',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
]
|
[
"[email protected]"
] | |
a98c17ccd06a2ab20d73dde4cff5f5119ea749a8
|
43cdb5fc4202346394272926b37a0671b0862d42
|
/winremote/modules/devices.py
|
27d7e15aff7e6c2ca24ce6a1c3e3ab48d3706dcc
|
[] |
no_license
|
machacekondra/winremote
|
2d2b9f9bf94cb8340dbcc49a744d74d37a770a2c
|
3e79f0f4b37e1a358b45eb22602f44da9068bfee
|
refs/heads/master
| 2021-01-10T07:45:16.907796 | 2016-08-16T10:48:50 | 2016-08-16T10:48:50 | 43,779,439 | 6 | 3 | null | 2016-12-02T11:52:45 | 2015-10-06T21:20:24 |
Python
|
UTF-8
|
Python
| false | false | 1,767 |
py
|
"""
This module implements work with devices via WMI.
"""
def list(session, attributes='Name,ConfigManagerErrorCode'):
"""
Description: return list of all devices on windows machine
:param session: instance of Windows, which hold session to win machine
:type session: winremote.Windows
:param attributes: comma delimited name of attributes to be returned
:type attributes: str
:returns: list of devices info
:rtype: list of dict
"""
return session._wmi.query('select %s from Win32_PnPEntity' % attributes)
def status(session, name):
"""
Description: check status of device
:param session: instance of Windows, which hold session to win machine
:type session: winremote.Windows
:param name: name of the device to fetch info
:type name: str
:returns: True or False, True if device is OK, False otherwise
:rtype: bool
"""
dev = session._wmi.query_first(
"select * from Win32_PnPEntity where Name = '%s'" % name
)
if dev and 'ConfigManagerErrorCode' in dev:
return dev['ConfigManagerErrorCode'] == '0'
return False
def get(session, name, attributes='Name'):
"""
Description: get basic info about windows device @name
:param session: instance of Windows, which hold session to win machine
:type session: winremote.Windows
:param attributes: comma delimited name of attributes to be returned
:type attributes: str
:param name: name of the device to fetch info
:type name: str
:returns: dictionary with device driver information
:returns: info about device, None if device not found
:rtype: dict
"""
return session._wmi.query_first(
"select * from Win32_PnPEntity where Name = '%s'" % name
)
|
[
"[email protected]"
] | |
233cf17b20db9e8da29c2d67bd65024db0765681
|
d0533b0574494b13606a557620f38f5a2c74ce16
|
/venv/lib/python3.7/site-packages/sympy/matrices/tests/test_normalforms.py
|
24475e4b219f83f5338425d08dd3529f0f06a589
|
[
"MIT",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
CatTiger/vnpy
|
af889666464ab661fb30fdb0e8f71f94ba2d1e41
|
7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b
|
refs/heads/master
| 2020-09-26T00:37:54.123877 | 2020-07-13T10:15:46 | 2020-07-13T10:15:46 | 226,124,078 | 0 | 0 |
MIT
| 2020-04-21T03:02:20 | 2019-12-05T14:44:55 |
C++
|
UTF-8
|
Python
| false | false | 843 |
py
|
from sympy import Symbol, Poly
from sympy.polys.solvers import RawMatrix as Matrix
from sympy.matrices.normalforms import invariant_factors, smith_normal_form
from sympy.polys.domains import ZZ, QQ
def test_smith_normal():
m = Matrix([[12, 6, 4,8],[3,9,6,12],[2,16,14,28],[20,10,10,20]])
setattr(m, 'ring', ZZ)
smf = Matrix([[1, 0, 0, 0], [0, 10, 0, 0], [0, 0, -30, 0], [0, 0, 0, 0]])
assert smith_normal_form(m) == smf
x = Symbol('x')
m = Matrix([[Poly(x-1), Poly(1, x),Poly(-1,x)],
[0, Poly(x), Poly(-1,x)],
[Poly(0,x),Poly(-1,x),Poly(x)]])
setattr(m, 'ring', QQ[x])
invs = (Poly(1, x), Poly(x - 1), Poly(x**2 - 1))
assert invariant_factors(m) == invs
m = Matrix([[2, 4]])
setattr(m, 'ring', ZZ)
smf = Matrix([[2, 0]])
assert smith_normal_form(m) == smf
|
[
"[email protected]"
] | |
64ec78804a0e924b0fa62b824725c884cdeffb29
|
eae3d77ac72c168cee7701462f1fc45d7d4dcd91
|
/start1/1240_단순2진암호코드.py
|
30d4af020c267c1134e39d99f98595abe3ab4d21
|
[] |
no_license
|
ByeongjunCho/Algorithm-TIL
|
ed2f018d50bd2483bd1175ff9bf7e91913c14766
|
ad79125a1498915fe97c1d57ee6860b06c410958
|
refs/heads/master
| 2022-07-19T15:12:23.689319 | 2020-05-18T08:37:09 | 2020-05-18T08:37:09 | 256,399,493 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,506 |
py
|
# def check(N, M):
# code = ['0001101', '0011001', '0010011', '0111101', '0100011', '0110001', '0101111', '0111011', '0110111', '0001011']
# code = {code[x]: x for x in range(len(code))}
# passwords = [0] * 8
# # 암호가 존재하는 위치 저장
# ys, ye = 0, 0 # 암호 시작, 암호 끝 행 위치
# for i in range(N):
# if ye:
# break
# elif not ys and int(arr[i]):
# ys = i
# elif ys and not int(arr[i]):
# ye = i - 1
#
# xs, xe = 0, 0 # 암호의 끝 열
# for j in range(M-1, -1, -1):
# if arr[ys][j] == '1':
# xe = j
# break
#
# xs = xe - 55
# start = xs
# for i in range(8):
# tmp = arr[ys][start:start+7]
# k = code.get(tmp)
# if k == None:
# return 0
# passwords[i] = k
# start += 7
# test = 0
# for i in range(0, 7, 2):
# test += passwords[i]
# test *= 3
# for i in range(1, 7, 2):
# test += passwords[i]
# test += passwords[-1]
#
# if test % 10:
# return 0
#
# for j in range(xs, xe + 1):
# for i in range(ys, ye):
# if arr[i][j] != arr[i+1][j]:
# return 0
#
# return sum(passwords)
#
#
# T = int(input())
# for tc in range(1, T+1):
# N, M = map(int, input().split()) # 세로, 가로
# arr = [input() for _ in range(N)]
# print('#{} {}'.format(tc, check(N, M)))
# 간단한 코드
# code = ['0001101', '0011001', '0010011', '0111101', '0100011', '0110001', '0101111', '0111011', '0110111', '0001011']
# code = {code[x]: x for x in range(len(code))}
#
# T = int(input())
# for tc in range(1, T+1):
# N, M = map(int, input().split()) # 세로, 가로
# arr = [input() for _ in range(N)]
#
# def find():
# # 끝나는 위치를 찾음
# for i in range(N):
# for j in range(M-1, 0, -1):
# if arr[i][j] == '0': continue
# pwd = []
# for s in range(j-56+1, j, 7):
# pwd.append(code[arr[i][s: s+7]])
#
# a = pwd[0] + pwd[2] + pwd[4] + pwd[6]
# b = pwd[1] + pwd[3] + pwd[5]
# if (a*3 + b) % 10 == 0:
# return a+b
# else:
# return 0
#
#
# print('#{} {}'.format(tc, find()))
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split()) # 세로, 가로
arr = [input() for _ in range(N)]
def find():
# 끝나는 위치를 찾음
for i in range(N):
j = M - 1
while j >= 0:
if arr[i][j] == '1' and arr[i-1][j] == '0':
pwd = []
for _ in range(8):
c2 = c3 = c4 = 0
while arr[i][j] == '0': j-1
while arr[i][j] == '1': c4, j = c4+1, j-1
while arr[i][j] == '0': c3, j = c3 + 1, j - 1
while arr[i][j] == '1': c2, j = c2 + 1, j - 1
MIN = min(c2, c3, c4)
pwd.append(P[(c2//MIN, c3//MIN, c4//MIN)])
j -= c1
b = pwd[0] + pwd[2] + pwd[4] + pwd[6]
a = pwd[1] + pwd[3] + pwd[5]
if (a*3 + b) % 10 == 0:
return a+b
else:
return 0
|
[
"[email protected]"
] | |
464fd54ef836816d3e9af6de1a8449fd7e305d75
|
c6053ad14e9a9161128ab43ced5604d801ba616d
|
/Lemon/Python_Base/Lesson10_object_20181117/homework_04.py
|
376628267c6c40688877c51fce0c9b6091931be0
|
[] |
no_license
|
HesterXu/Home
|
0f6bdace39f15e8be26031f88248f2febf33954d
|
ef8fa0becb687b7b6f73a7167bdde562b8c539be
|
refs/heads/master
| 2020-04-04T00:56:35.183580 | 2018-12-25T02:48:51 | 2018-12-25T02:49:05 | 155,662,403 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,269 |
py
|
# -*- coding: utf-8 -*-
# @Time : 2018/11/17/13:35
# @Author : Hester Xu
# Email : [email protected]
# @File : homework_04.py
# @Software : PyCharm
'''
4:按照以下要求定义一个游乐园门票类,并创建实例调用函数,
完成儿童和大人的总票价统计(人数不定,由你输入的人数个数来决定)
1)平日票价100元
2)周末票价为平日票价120%
3)儿童半价
'''
class Ticket:
def __init__(self,time,adult_number,child_number):
self.time = time
self.adult_number = adult_number
self.child_number = child_number
def get_price(self):
if self.time == "weekend":
adult_price = 120
child_price = 60
else:
adult_price = 100
child_price = 50
totla_price = adult_price * self.adult_number + child_price * self.child_number
print("{}个成人和{}个儿童的票价一共是:{}元".format(self.adult_number,self.child_number,totla_price))
time = input("请输入去公园的时间(weekend or weekday):")
adult_number = eval(input("请输入去公园的成人数量:"))
child_number = eval(input("请输入去公园的儿童数量:"))
p = Ticket(time,adult_number,child_number)
p.get_price()
|
[
"[email protected]"
] | |
a1afa2fac8e405059e85a2618a7878c4182aab03
|
45b644af6d0204ff337bf348c007fd036b0fd113
|
/0x0B-python-input_output/11-student.py
|
925d2a83ec4d8aa2b22e33509ed50e7ff56a1261
|
[] |
no_license
|
jaycer95/holbertonschool-higher_level_programming
|
b5e7f2e72a9da8242befa0945b2935ceea3a086e
|
47882b8a4d8b78e09cb372a8b2b85440de2b2d5b
|
refs/heads/master
| 2022-12-20T22:31:35.675364 | 2020-09-24T18:33:21 | 2020-09-24T18:33:21 | 259,335,318 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 297 |
py
|
#!/usr/bin/python3
""" create a student class"""
class Student:
""" defines a student """
def __init__(self, first_name, last_name, age):
self.first_name = first_name
self.last_name = last_name
self.age = age
def to_json(self):
return self.__dict__
|
[
"[email protected]"
] | |
1052569c6dac29bb7f48040506d0dd1b7b089514
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/benison.py
|
c62b8efc78821e7a30cbaf33bb71d01fad388ac4
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 |
Python
|
UTF-8
|
Python
| false | false | 83 |
py
|
ii = [('RoscTTI3.py', 1), ('AinsWRR3.py', 1), ('BailJD1.py', 1), ('TalfTIT.py', 1)]
|
[
"[email protected]"
] | |
399a3d37af431b0d4b7205c68bef93e1a1222c45
|
edccc564bf3699d7bab9a6b26c369ac85cd32555
|
/misc/add_func.py
|
bba74174e82b95aac6210ab30759e4be88be0ecd
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
NAL-i5K/genomics-workspace
|
7bb609b651b6118d3ce8aa49868d7372f5562956
|
013f44dd02980d34a00e4e9b667fa8fea6e824c5
|
refs/heads/master
| 2023-01-27T20:36:09.871710 | 2021-02-10T13:48:25 | 2021-02-10T13:48:25 | 72,758,632 | 14 | 6 |
NOASSERTION
| 2023-01-13T22:38:11 | 2016-11-03T15:20:11 |
JavaScript
|
UTF-8
|
Python
| false | false | 4,539 |
py
|
from blast.models import SequenceType
#from blast.models import BlastDb
from app.models import Organism
import os
import sys
import requests
#from hmmer.models import HmmerDB
def display_name(options):
try:
base_organism = options['Genus_Species'][0].lower().capitalize() + ' ' + options['Genus_Species'][1].lower()
except TypeError:
return 0
if len(options['Genus_Species']) == 3:
display_name = base_organism + ' '+ options['Genus_Species'][2].lower()
return display_name
else:
display_name = base_organism
return display_name
def get_organism(display_name):
organism_database = Organism.objects.get(display_name = display_name)
if organism_database :
return organism_database
else:
print("check your organism name again if it still fails then check your organism database")
sys.exit(0)
def get_path(app_name,title):
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if app_name == 'blast':
path = os.path.join('blast/db',title)
else:
path = os.path.join('hmmer/db',title)
a=os.path.join(base_dir,'media',path)
check = os.path.isfile(a)
if check:
return path
else:
print("No fasta file in media/blast/db or media/hmmer/db")
sys.exit(0)
def short_name(name):
short_name = name.split(' ')
short_name1 = short_name[0][0:3]
short_name2 = short_name[1][0:3]
short_name = short_name1 + short_name2
return short_name
def get_molecule(options):
try:
molecule = options['type'][0].lower() #get molecule_type from command line
if molecule == 'peptide': #change the name tp prot or nucl
molecule2 = 'prot'
elif molecule == 'nucleotide':
molecule2 = 'nucl'
else:
print("please enter the correct molecule_type, must be nucleotide or peptide")
sys.exit(0)
except Exception :
print("enter the argument complete '-t' '-f' ")
sys.exit(0)
molecule_type = SequenceType.objects.filter(molecule_type = molecule2) #get the data from molecule_type field
a = molecule_type[0]
molecule_str = a.molecule_type
return molecule2,molecule_str
def get_dataset(options):
dataset = options['type'][1].lower().capitalize()
if dataset =='Genome':
dataset = dataset + ' ' + options['type'][2].lower().capitalize()
elif dataset == 'Transcript':
pass
elif dataset == 'Protein':
pass
else:
print('enter the correct dataset type')
sys.exit(0)
dataset_type = SequenceType.objects.filter(dataset_type = dataset)
b = dataset_type[0]
dataset_str = str(b.dataset_type)
return dataset,dataset_str
def get_type(dataset,molecule2,molecule_str,dataset_str): #get the sequence type from SequencType Table
if molecule2 != molecule_str :
print("something wrong in molecule")
elif dataset != dataset_str :
print("something wrong with dataset")
else:
try:
dataset_type = SequenceType.objects.filter(molecule_type = molecule2, dataset_type = dataset)
return dataset_type[0]
except IndexError:
print("there are no {molecule} - {dataset} combination in the database".format(molecule=molecule2.capitalize(),dataset=dataset_str))
sys.exit(0)
def get_description(url1,wiki_url2):
try:
re1 = requests.get(url1)
data1 = re1.json()
try:
title = data1['query']['search'][0]['title']
url2 = wiki_url2 + title
re2 = requests.get(url2)
data2 = re2.json()
key = data1['query']['search'][0]['pageid']
key = str(key)
description = data2['query']['pages'][key]['extract']
return description
except IndexError:
print("check your organism name again")
sys.exit(0)
except requests.exceptions.ConnectionError:
print("check your internet connection")
sys.exit(0)
def get_taxid(id_baseurl,name):
try:
url = id_baseurl+ name
re = requests.get(url)
data = re.json()
tax_id = data['esearchresult']['idlist'][0]
tax_id = int(tax_id)
return tax_id
except IndexError:
print("make sure your name is completed and correct")
sys.exit(0)
def delete_org(name):
Organism.objects.filter(display_name = name).delete()
return ("remove %s in database"%name)
|
[
"[email protected]"
] | |
4f0f69ccaea1e69b949c44d78167b885c304c83b
|
f125a883dbcc1912dacb3bf13e0f9263a42e57fe
|
/tsis5/part1/3.py
|
db2049f558022d1fc5fdf786334b167dba610d5e
|
[] |
no_license
|
AruzhanBazarbai/pp2
|
1f28b9439d1b55499dec4158e8906954b507f04a
|
9d7f1203b6735b27bb54dfda73b3d2c6b90524c3
|
refs/heads/master
| 2023-07-13T05:26:02.154105 | 2021-08-27T10:20:34 | 2021-08-27T10:20:34 | 335,332,307 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 247 |
py
|
# Write a Python program to append text to a file and display the text
def f_write(fname):
with open(fname,"w") as f:
f.write("11111111\n")
f.write("22222222\n")
txt=open(fname,"r")
print(txt.read())
f_write("abs.txt")
|
[
"[email protected]"
] | |
06e40101df06c9ccf95a7737360d1f5dd8b2a557
|
229e1e103bc24dda4d8fef54b762009e19045a45
|
/configs/nowd/abl/convbn/res101_nl_gc_nowd_innostd_ws1e0.py
|
0da33195f3bb6184f370e936c48482241eb57950
|
[
"MIT"
] |
permissive
|
yinmh17/CCNet
|
c0be71919877c0d44c51cd8fd8ad8f644ef618a6
|
d5e90fe5ccfa16389fd25bdd3e2160ffe2dfbd22
|
refs/heads/master
| 2020-06-18T13:03:46.781284 | 2019-11-12T06:26:59 | 2019-11-12T06:26:59 | 196,311,075 | 1 | 1 |
MIT
| 2019-07-21T19:48:39 | 2019-07-11T03:10:01 |
Python
|
UTF-8
|
Python
| false | false | 1,135 |
py
|
model = dict(
type='basenet',
pretrained='',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
block_num=[3, 4, 23, 3],
),
att=dict(
with_att=False,
type='glore',
att_stage=[False,False,True,False],
att_pos='after_add',
att_location=[[],[],[5,11,17],[]],
),
module=dict(
type='nl_nowd',
downsample=False,
whiten_type=['in_nostd'],
weight_init_scale=1.0,
with_unary=False,
with_gc=True,
with_nl=True,
nowd=['nl'],
use_out=True,
out_bn=True,
)
)
train_cfg = dict(
batch_size=8,
learning_rate=1e-2,
momentum=0.9,
num_steps=60000,
power=0.9,
random_seed=1234,
restore_from='./dataset/resnet101-imagenet.pth',
save_num_images=2,
start_iters=0,
save_from=59500,
save_pred_every=100,
snapshot_dir='snapshots/',
weight_decay=0.0005
)
data_cfg = dict(
data_dir='cityscapes',
data_list='./dataset/list/cityscapes/train.lst',
ignore_label=255,
input_size='769,769',
num_classes=19,
)
|
[
"[email protected]"
] | |
2c2b9eaa06d37224c0965868d3a8b2f6902e69ab
|
ce32e0e1b9568c710a3168abc3c638d6f9f6c31b
|
/prod/jobs/refill_binance_spot_bars.py
|
066b8b16d8c33677259b33a48a960e20cf5c9842
|
[
"MIT"
] |
permissive
|
msincenselee/vnpy
|
55ae76ca32cae47369a66bd2d6589c13d7a0bdd4
|
7f4fd3cd202712b083ed7dc2f346ba4bb1bda6d7
|
refs/heads/vnpy2
| 2022-05-19T10:06:55.504408 | 2022-03-19T15:26:01 | 2022-03-19T15:26:01 | 38,525,806 | 359 | 158 |
MIT
| 2020-09-09T00:09:12 | 2015-07-04T07:27:46 |
C++
|
UTF-8
|
Python
| false | false | 4,703 |
py
|
# flake8: noqa
import os
import sys
import csv
import pandas as pd
# 将repostory的目录i,作为根目录,添加到系统环境中。
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
if ROOT_PATH not in sys.path:
sys.path.append(ROOT_PATH)
print(f'append {ROOT_PATH} into sys.path')
from datetime import datetime, timedelta
from vnpy.data.binance.binance_spot_data import BinanceSpotData, HistoryRequest, Exchange, Interval
from vnpy.trader.utility import get_csv_last_dt, append_data
# 获取币安现货交易的所有合约
spot_data = BinanceSpotData()
contracts = BinanceSpotData.load_contracts()
if len(contracts) == 0:
spot_data.save_contracts()
contracts = BinanceSpotData.load_contracts()
# 开始下载日期
start_date = '20170101'
if __name__ == "__main__":
if len(sys.argv) >= 2:
interval = str(sys.argv[1]).lower()
if interval.isdecimal():
interval_num = int(sys.argv[1])
interval_type = Interval.MINUTE
else:
if 'm' in interval:
interval_type = Interval.MINUTE
interval_num = int(interval.replace('m', ''))
elif 'h' in interval:
interval_type = Interval.HOUR
interval_num = int(interval.replace('h', ''))
elif 'd' in interval:
interval_type = Interval.DAILY
interval_num = int(interval.replace('d', ''))
else:
interval = '1m'
interval_num = 1
interval_type = Interval.MINUTE
def download_symbol(symbol, start_dt, bar_file_path, interval_type, interval_num):
req = HistoryRequest(
symbol=symbol,
exchange=Exchange(contract_info.get('exchange')),
interval=interval_type,
interval_num=interval_num,
start=start_dt
)
bars = spot_data.get_bars(req=req, return_dict=True)
spot_data.export_to(bars, file_name=bar_file_path)
# 逐一合约进行下载
for vt_symbol, contract_info in contracts.items():
symbol = contract_info.get('symbol')
if symbol not in ['BTCUSDT', 'ETHUSDT']:
continue
bar_file_path = os.path.abspath(os.path.join(
ROOT_PATH,
'bar_data',
'binance_spot',
f'{symbol}_{start_date}_{interval}.csv'))
# 不存在文件,直接下载,并保存
if not os.path.exists(bar_file_path):
print(f'文件{bar_file_path}不存在,开始时间:{start_date}')
start_dt = datetime.strptime(start_date, '%Y%m%d')
download_symbol(symbol, start_dt, bar_file_path, interval_type, interval_num)
continue
# 如果存在文件,获取最后的bar时间
last_dt = get_csv_last_dt(bar_file_path)
# 获取不到时间,重新下载
if last_dt is None:
print(f'获取文件{bar_file_path}的最后时间失败,开始时间:{start_date}')
start_dt = datetime.strptime(start_date, '%Y%m%d')
download_symbol(symbol, start_dt, bar_file_path, interval_type, interval_num)
continue
# 获取到时间,变成那天的开始时间,下载数据
start_dt = last_dt.replace(hour=0, minute=0, second=0, microsecond=0)
print(f'文件{bar_file_path}存在,最后时间:{last_dt}, 调整数据获取开始时间:{start_dt}')
req = HistoryRequest(
symbol=symbol,
exchange=Exchange(contract_info.get('exchange')),
interval=interval_type,
interval_num=interval_num,
start=start_dt
)
bars = spot_data.get_bars(req=req, return_dict=True)
if len(bars) <= 0:
print(f'下载{symbol} {interval_num} {interval_type.value} 数据为空白')
continue
bar_count = 0
# 获取标题
headers = []
with open(bar_file_path, "r", encoding='utf8') as f:
reader = csv.reader(f)
for header in reader:
headers = header
break
# 写入所有大于最后bar时间的数据
with open(bar_file_path, 'a', encoding='utf8', newline='\n') as csvWriteFile:
writer = csv.DictWriter(f=csvWriteFile, fieldnames=headers, dialect='excel',
extrasaction='ignore')
for bar in bars:
if bar['datetime'] <= last_dt:
continue
bar_count += 1
writer.writerow(bar)
print(f'更新{symbol}数据 => 文件{bar_file_path}, 最后记录:{bars[-1]}')
|
[
"[email protected]"
] | |
1c81443f8040a981a2cd339ee5c2b2fc3b1d83e5
|
d5fe9d0c7c93c3250b9e212435b02d8373dec091
|
/code/4.py
|
c0fa8bcde47e7a15e9b48e556ecb2f2379c6f9e1
|
[] |
no_license
|
HarshaaArunachalam/GUV
|
6937adb84f0928f08c9fbc519310abc06ef3541a
|
c047887bf6c19a4950c5f634111e1c02966367e5
|
refs/heads/master
| 2020-05-31T10:52:23.280052 | 2019-08-10T20:23:11 | 2019-08-10T20:23:11 | 190,249,464 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 27 |
py
|
ids=input()
print(ids+".")
|
[
"[email protected]"
] | |
20c7fddfa77a6b8ef7a7fb0847dedd60f878899d
|
c4a57dced2f1ed5fd5bac6de620e993a6250ca97
|
/huaxin/huaxin_ui/ui_android_xjb_2_0/credit_card_reserved_pay_page.py
|
d67aea16c10dd49cdf86b6503a14c6c650015002
|
[] |
no_license
|
wanglili1703/firewill
|
f1b287b90afddfe4f31ec063ff0bd5802068be4f
|
1996f4c01b22b9aec3ae1e243d683af626eb76b8
|
refs/heads/master
| 2020-05-24T07:51:12.612678 | 2019-05-17T07:38:08 | 2019-05-17T07:38:08 | 187,169,391 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,039 |
py
|
# coding=utf-8
import time
from _common.page_object import PageObject
from _common.xjb_decorator import robot_log
import huaxin_ui.ui_android_xjb_2_0.credit_card_repay_page
RESERVED_PAY="xpath_//android.widget.TextView[@text='信用卡还款']"
TRADE_PASSWORD = "xpath_//android.widget.EditText[@resource-id='com.shhxzq.xjb:id/trade_pop_password_et']"
CREDIT_CARD_SELECTED="xpath_//android.widget.RelativeLayout[@resource-id='com.shhxzq.xjb:id/rl_credit_item']"
RESERVED_PAY_AMOUNT="xpath_//android.widget.EditText[@text='请输入预约还款金额']"
RESERVED_PAY_DATE="xpath_//android.widget.TextView[@text='请选择信用卡还款日']"
DEDUCTION_DATE="xpath_//android.widget.TextView[@resource-id='com.shhxzq.xjb:id/tv_cr_deduction_date']"
RESERVED_PAY_DATE_MONTH="xpath_//android.view.View[@resource-id='com.shhxzq.xjb:id/month']"
RESERVED_PAY_DATE_DAY="xpath_//android.view.View[@resource-id='com.shhxzq.xjb:id/day']"
RESERVED_PAY_DATE_COMPELETED="xpath_//android.widget.TextView[@text='完成']"
RESERVED_PAY_COMFIRM="xpath_//android.widget.Button[@text='确认还款']"
RESERVED_PAY_DONE="xpath_//android.widget.Button[@text='确认']"
current_page=[]
class ReservedPayPage(PageObject):
def __init__(self, web_driver):
super(ReservedPayPage, self).__init__(web_driver)
self.elements_exist(*current_page)
# 信用卡预约还款
@robot_log
def reserved_pay(self,reserved_pay_amount,trade_password):
self.perform_actions(RESERVED_PAY_AMOUNT,reserved_pay_amount,
RESERVED_PAY_DATE,
RESERVED_PAY_DATE_MONTH,
RESERVED_PAY_DATE_DAY,
RESERVED_PAY_DATE_COMPELETED,
RESERVED_PAY_COMFIRM,
TRADE_PASSWORD,trade_password,
RESERVED_PAY_DONE
)
page=huaxin_ui.ui_android_xjb_2_0.credit_card_repay_page.CreditCardRepayPage(self.web_driver)
return page
|
[
"[email protected]"
] | |
3c3c3a52869314e48ca1ff01f62c307cf14d182f
|
0f5cccdf84bb02eafd7e18fbea2f1342bfd48185
|
/arch/config/cmsis.py
|
9f2345e3b38598ea5959fb283d3d0cccf0b8bba7
|
[
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"LicenseRef-scancode-public-domain"
] |
permissive
|
fb321/csp
|
c56632611a041d391c241d0ed5f0dc32c7387bed
|
4963c6933e873073ac4db1837896f5ca087bcd94
|
refs/heads/master
| 2020-06-27T13:21:00.675587 | 2019-06-29T06:04:40 | 2019-07-02T13:31:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,879 |
py
|
"""*****************************************************************************
* Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
def instantiateComponent(cmsisComponent):
cmsisInformation = cmsisComponent.createCommentSymbol("cmsisInformation", None)
import xml.etree.ElementTree as ET
cmsisDescriptionFile = open(Variables.get("__CMSIS_PACK_DIR") + "/ARM.CMSIS.pdsc", "r")
cmsisDescription = ET.fromstring(cmsisDescriptionFile.read())
cmsisInformation.setLabel("Release Information: " + str(cmsisDescription.iter("release").next().attrib))
#check if it is a cortex M device
archNode = ATDF.getNode('/avr-tools-device-file/devices')
if ("m" in archNode.getChildren()[0].getAttribute("architecture").split("CORTEX-")[1].lower()):
coreFile = "core_c" + str(archNode.getChildren()[0].getAttribute("architecture").split("CORTEX-")[1].lower()) + ".h"
# add core header files
headerFileNames = ["cmsis_compiler.h", "cmsis_iccarm.h", "cmsis_gcc.h", "tz_context.h", str(eval('coreFile')), "mpu_armv7.h", "cmsis_version.h"]
#Cortex M23 has MPU v8
if (archNode.getChildren()[0].getAttribute("architecture") == "CORTEX-M23"):
headerFileNames.remove("mpu_armv7.h")
headerFileNames.append("mpu_armv8.h")
for headerFileName in headerFileNames:
szSymbol = "{}_H".format(headerFileName[:-2].upper())
headerFile = cmsisComponent.createFileSymbol(szSymbol, None)
headerFile.setRelative(False)
headerFile.setSourcePath(Variables.get("__CMSIS_PACK_DIR") + "/CMSIS/Core/Include/" + headerFileName)
headerFile.setOutputName(headerFileName)
headerFile.setMarkup(False)
headerFile.setOverwrite(True)
headerFile.setDestPath("../../packs/CMSIS/CMSIS/Core/Include/")
headerFile.setProjectPath("packs/CMSIS/CMSIS/Core/Include/")
headerFile.setType("HEADER")
#assume this is a cortex A device
else:
headerFileNames = ["cmsis_compiler.h", "cmsis_gcc.h", "cmsis_iccarm.h", "cmsis_cp15.h", "core_ca.h"]
# add core header files for cortex a devices
for headerFileName in headerFileNames:
szSymbol = "CORE_A_{}_H".format(headerFileName[:-2].upper())
headerFile = cmsisComponent.createFileSymbol(szSymbol, None)
headerFile.setRelative(False)
headerFile.setSourcePath(Variables.get("__CMSIS_PACK_DIR") + "/CMSIS/Core_A/Include/" + headerFileName)
headerFile.setOutputName(headerFileName)
headerFile.setMarkup(False)
headerFile.setOverwrite(True)
headerFile.setDestPath("../../packs/CMSIS/CMSIS/Core_A/Include/")
headerFile.setProjectPath("packs/CMSIS/CMSIS/Core_A/Include/")
headerFile.setType("HEADER")
# add dsp header files
headerFileNames = ["arm_common_tables.h", "arm_const_structs.h", "arm_math.h"]
for headerFileName in headerFileNames:
szSymbol = "{}_H".format(headerFileName[:-2].upper())
headerFile = cmsisComponent.createFileSymbol(szSymbol, None)
headerFile.setRelative(False)
headerFile.setSourcePath(Variables.get("__CMSIS_PACK_DIR") + "/CMSIS/DSP/Include/" + headerFileName)
headerFile.setOutputName(headerFileName)
headerFile.setMarkup(False)
headerFile.setOverwrite(True)
headerFile.setDestPath("../../packs/CMSIS/CMSIS/DSP/Include/")
headerFile.setProjectPath("packs/CMSIS/CMSIS/DSP/Include/")
headerFile.setType("HEADER")
|
[
"http://support.microchip.com"
] |
http://support.microchip.com
|
94b209e024d003ffb3534976b6821966ef68c231
|
3eee3ee3b0dd5b5f50b0c40390fc0dfda36ccf90
|
/examples/textrnn_classification_demo.py
|
539ea657a33c75470aa2410ed235370a400b7393
|
[
"Apache-2.0",
"Python-2.0"
] |
permissive
|
shibing624/pytextclassifier
|
d36f514dee0a01c64a2e57d069344d8505cf2140
|
daebd31cfbe92606da92f007ffba390475e73b16
|
refs/heads/master
| 2023-09-01T05:54:11.775314 | 2023-08-22T11:23:37 | 2023-08-22T11:23:37 | 89,688,656 | 263 | 31 |
Apache-2.0
| 2023-09-11T12:46:43 | 2017-04-28T09:04:14 |
Python
|
UTF-8
|
Python
| false | false | 1,502 |
py
|
# -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description:
"""
import sys
sys.path.append('..')
from pytextclassifier import TextRNNClassifier
if __name__ == '__main__':
m = TextRNNClassifier(output_dir='models/textrnn-toy')
data = [
('education', '名师指导托福语法技巧:名词的复数形式'),
('education', '中国高考成绩海外认可 是“狼来了”吗?'),
('education', '公务员考虑越来越吃香,这是怎么回事?'),
('sports', '图文:法网孟菲尔斯苦战进16强 孟菲尔斯怒吼'),
('sports', '四川丹棱举行全国长距登山挑战赛 近万人参与'),
('sports', '米兰客场8战不败国米10年连胜')
]
# train and save best model
m.train(data, num_epochs=3, evaluate_during_training_steps=1)
print(m)
# load best model from model_dir
m.load_model()
predict_label, predict_proba = m.predict(['福建春季公务员考试报名18日截止 2月6日考试',
'意甲首轮补赛交战记录:米兰客场8战不败国米10年连胜'])
print(f'predict_label: {predict_label}, predict_proba: {predict_proba}')
test_data = [
('education', '福建春季公务员考试报名18日截止 2月6日考试'),
('sports', '意甲首轮补赛交战记录:米兰客场8战不败国米10年连胜'),
]
acc_score = m.evaluate_model(test_data)
print(f'acc_score: {acc_score}')
|
[
"[email protected]"
] | |
9b1471a8abc70f359d9fa154e922f6b368d53732
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2904/60641/301110.py
|
1c0cdc02566ddb95f235e917e0595acde8b346c7
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 169 |
py
|
def main():
num = input()
if num[0] == "-":
print(int("-" + num[:0:-1]))
else:
print(int(num[::-1]))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
fbdedf4bbdf5e1fff6af26c44e1308075f620afb
|
1989c958e197c782f025e45554d39a3e302b1523
|
/contact/forms.py
|
c55d183e7d08116ce3e976f071a435ac4529798d
|
[] |
no_license
|
Levalife/django_lessons
|
27f400ddc515102c62de39456b58b364c3ebfb80
|
0b313089741eb5ba8e6dead105240447585749e3
|
refs/heads/master
| 2021-01-13T01:40:05.965881 | 2013-07-02T10:08:07 | 2013-07-02T10:08:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 414 |
py
|
from django import forms
class ContactForm(forms.Form):
subject = forms.CharField(max_length=100)
email = forms.EmailField(required = False, label='Your e-mail address')
message = forms.CharField(widget=forms.Textarea)
def clean_message(self):
message = self.cleaned_data['message']
num_words = len(message.split())
if num_words < 4:
raise forms.ValidationError('Not enough words!')
return message
|
[
"[email protected]"
] | |
7f5cdeff83e1e295b6b20393714452880925c6b7
|
a65e5dc54092a318fc469543c3b96f6699d0c60b
|
/Personel/Siddhesh/Python/Mar19/Array4.py
|
826bf25ff6300f430b57b88d40ae55e6c0e19e92
|
[] |
no_license
|
shankar7791/MI-10-DevOps
|
e15bfda460ffd0afce63274f2f430445d04261fe
|
f0b9e8c5be7b28298eb6d3fb6badf11cd033881d
|
refs/heads/main
| 2023-07-04T15:25:08.673757 | 2021-08-12T09:12:37 | 2021-08-12T09:12:37 | 339,016,230 | 1 | 0 | null | 2021-08-12T09:12:37 | 2021-02-15T08:50:08 |
JavaScript
|
UTF-8
|
Python
| false | false | 577 |
py
|
#Changing and Adding Elements
#Arrays are mutable; their elements can be changed in a similar way as lists.
import array as arr
numbers = arr.array('i',[1, 2, 3, 4, 5, 6 ,10])
#changing first element
numbers[0]=0
print (numbers)
#changing 3rd to 5th element
numbers[2:5] = arr.array('i',[4 ,8 ,9])
print(numbers)
#we can add element using append() method
numbers.append(12)
print(numbers)
#we can add element using extennd() method
numbers.extend([7,9,5])
print(numbers)
#we can add element also using insert() method into array
numbers.insert(7, 13)
print(numbers)
|
[
"[email protected]"
] | |
42ddc05f74f30db5709aaa0150e361b4652d8df3
|
51363872687318ac54e815b51d16d44d214974a2
|
/catkin/build/turtlebot_simulator/turtlebot_gazebo/catkin_generated/pkg.installspace.context.pc.py
|
974cad3cda2a1d7ba2eac148b2cf27ccfa3acbef
|
[] |
no_license
|
pirmou/catkin_ws
|
2acee80a43f17841326d1b917931866d561648c3
|
abaac27209016a944bd3520d84e4dc3aab1abf2e
|
refs/heads/main
| 2023-02-17T04:44:03.927127 | 2021-01-10T17:40:02 | 2021-01-10T17:40:02 | 328,440,401 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 366 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot_gazebo"
PROJECT_SPACE_DIR = "/opt/ros/melodic"
PROJECT_VERSION = "2.2.3"
|
[
"[email protected]"
] | |
4cef778c67f1af2bbf43a834f84a4ad272c1d7c0
|
3d90c79a7337bff78eb663ef8120e8279498155b
|
/30 Days of Code/Day 28 RegEx, Patterns, and Intro to Databases.py
|
9b4f935dcfaa5d170c590032372d9ee8ae19e8f3
|
[] |
no_license
|
ikaushikpal/Hacker_Rank_Problems
|
b460f7c1d4bf331102239d13a9096ee5cd479d21
|
72e2f2168e1bcfdd267c9daec6da71d5aa44de52
|
refs/heads/master
| 2022-11-18T00:24:28.529594 | 2020-07-22T11:50:22 | 2020-07-22T11:50:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 390 |
py
|
import math
import os
import random
import re
import sys
if __name__ == '__main__':
N = int(input())
id = []
for N_itr in range(N):
firstNameEmailID = input().split()
firstName = firstNameEmailID[0]
emailID = firstNameEmailID[1]
if '@gmail' in emailID:
id.append(firstName)
id.sort()
for i in id:
print(i)
|
[
"[email protected]"
] | |
5bafd0e38072c72e33f2894a208b8ac1c46f7594
|
cb4e07b2a5dd30804ce428ec84d9e9f77709fcd5
|
/swea/D3/SWEA_5201_컨테이너운반_구진범.py
|
0c1b095c642a35ad82382f12e82dd9555f1aa8cc
|
[] |
no_license
|
jbsam2/algo_problem
|
141c17003e88a69afdeea93a723e7f27c4626fdc
|
18f2cab5a9af2dec57b7fd6f8218badd7de822e4
|
refs/heads/master
| 2023-05-18T10:03:00.408300 | 2021-06-02T10:36:50 | 2021-06-02T10:36:50 | 282,104,637 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 284 |
py
|
for T in range(int(input())):
n,m=map(int,input().split());ret=0
w=sorted([*map(int,input().split())],reverse=True)
t=sorted([*map(int,input().split())],reverse=True)
for i in t:
for j in w:
if i>=j:ret+=j;w.remove(j);break
print(f'#{T+1}',ret)
|
[
"[email protected]"
] | |
53882e755a639044d51f4ef49f066bb78922a0b9
|
7eebbfaee45fdc57c4fc6ba32c87c35be1e62b14
|
/airbyte-integrations/connector-templates/destination-python/integration_tests/integration_test.py
|
836df2c8d66ef056068217bc8f8e89be3882e0fb
|
[
"MIT",
"Elastic-2.0"
] |
permissive
|
Velocity-Engineering/airbyte
|
b6e1fcead5b9fd7c74d50b9f27118654604dc8e0
|
802a8184cdd11c1eb905a54ed07c8732b0c0b807
|
refs/heads/master
| 2023-07-31T15:16:27.644737 | 2021-09-28T08:43:51 | 2021-09-28T08:43:51 | 370,730,633 | 0 | 1 |
MIT
| 2021-06-08T05:58:44 | 2021-05-25T14:55:43 |
Java
|
UTF-8
|
Python
| false | false | 1,171 |
py
|
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def integration_test():
# TODO write integration tests
pass
|
[
"[email protected]"
] | |
d0e9940d5b58451bb8468f501adf11de55dca199
|
8dbb2a3e2286c97b1baa3ee54210189f8470eb4d
|
/kubernetes-stubs/client/models/v2beta1_resource_metric_status.pyi
|
37f745aeb5fbb9ef0d7d17bc2065e18e6af5f79a
|
[] |
no_license
|
foodpairing/kubernetes-stubs
|
e4b0f687254316e6f2954bacaa69ff898a88bde4
|
f510dc3d350ec998787f543a280dd619449b5445
|
refs/heads/master
| 2023-08-21T21:00:54.485923 | 2021-08-25T03:53:07 | 2021-08-25T04:45:17 | 414,555,568 | 0 | 0 | null | 2021-10-07T10:26:08 | 2021-10-07T10:26:08 | null |
UTF-8
|
Python
| false | false | 600 |
pyi
|
import datetime
import typing
import kubernetes.client
class V2beta1ResourceMetricStatus:
current_average_utilization: typing.Optional[int]
current_average_value: str
name: str
def __init__(
self,
*,
current_average_utilization: typing.Optional[int] = ...,
current_average_value: str,
name: str
) -> None: ...
def to_dict(self) -> V2beta1ResourceMetricStatusDict: ...
class V2beta1ResourceMetricStatusDict(typing.TypedDict, total=False):
currentAverageUtilization: typing.Optional[int]
currentAverageValue: str
name: str
|
[
"[email protected]"
] | |
933f509f45c0fd0a83dfdb92b9c39cf33d4e37f7
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_QC2595.py
|
5a64c80e92f317a9578fed16757ba858da82bd83
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,185 |
py
|
# qubit number=4
# total number=30
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=27
prog.cz(input_qubit[0],input_qubit[3]) # number=28
prog.h(input_qubit[3]) # number=29
prog.x(input_qubit[3]) # number=15
prog.rx(1.8001325905069514,input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=16
prog.h(input_qubit[1]) # number=22
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.x(input_qubit[3]) # number=24
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.x(input_qubit[1]) # number=25
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.z(input_qubit[1]) # number=21
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.x(input_qubit[1]) # number=17
prog.cx(input_qubit[2],input_qubit[0]) # number=11
prog.y(input_qubit[0]) # number=12
prog.y(input_qubit[0]) # number=13
prog.z(input_qubit[2]) # number=26
prog.cx(input_qubit[2],input_qubit[1]) # number=23
prog.x(input_qubit[0]) # number=19
prog.x(input_qubit[0]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2595.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
a636a08716e18c4d09c625704f0d0e10db999f25
|
ed4910769a51691f222a3e311215b014dc64ae3a
|
/wagtail/api/v2/filters.py
|
42411f3c32464a76b5947023760b645c5fef406d
|
[
"BSD-3-Clause"
] |
permissive
|
mr-const/wagtail
|
cba2db26a5b370aef2fc5dd41ca0f0ba95bf6536
|
091e26adfb9e4dc9bdf70be3572c104c356c664d
|
refs/heads/master
| 2021-01-13T06:28:51.819517 | 2016-03-10T15:29:30 | 2016-03-10T15:29:30 | 53,587,500 | 0 | 0 | null | 2016-03-10T13:43:50 | 2016-03-10T13:43:50 | null |
UTF-8
|
Python
| false | false | 7,094 |
py
|
from django.conf import settings
from rest_framework.filters import BaseFilterBackend
from taggit.managers import _TaggableManager
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsearch.backends import get_search_backend
from .utils import BadRequestError, pages_for_site
class FieldsFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
"""
This performs field level filtering on the result set
Eg: ?title=James Joyce
"""
fields = set(view.get_available_fields(queryset.model)).union({'id'})
for field_name, value in request.GET.items():
if field_name in fields:
field = getattr(queryset.model, field_name, None)
if isinstance(field, _TaggableManager):
for tag in value.split(','):
queryset = queryset.filter(**{field_name + '__name': tag})
# Stick a message on the queryset to indicate that tag filtering has been performed
# This will let the do_search method know that it must raise an error as searching
# and tag filtering at the same time is not supported
queryset._filtered_by_tag = True
else:
queryset = queryset.filter(**{field_name: value})
return queryset
class OrderingFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
"""
This applies ordering to the result set
Eg: ?order=title
It also supports reverse ordering
Eg: ?order=-title
And random ordering
Eg: ?order=random
"""
if 'order' in request.GET:
# Prevent ordering while searching
if 'search' in request.GET:
raise BadRequestError("ordering with a search query is not supported")
order_by = request.GET['order']
# Random ordering
if order_by == 'random':
# Prevent ordering by random with offset
if 'offset' in request.GET:
raise BadRequestError("random ordering with offset is not supported")
return queryset.order_by('?')
# Check if reverse ordering is set
if order_by.startswith('-'):
reverse_order = True
order_by = order_by[1:]
else:
reverse_order = False
# Add ordering
if order_by == 'id' or order_by in view.get_available_fields(queryset.model):
queryset = queryset.order_by(order_by)
else:
# Unknown field
raise BadRequestError("cannot order by '%s' (unknown field)" % order_by)
# Reverse order
if reverse_order:
queryset = queryset.reverse()
return queryset
class SearchFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
"""
This performs a full-text search on the result set
Eg: ?search=James Joyce
"""
search_enabled = getattr(settings, 'WAGTAILAPI_SEARCH_ENABLED', True)
if 'search' in request.GET:
if not search_enabled:
raise BadRequestError("search is disabled")
# Searching and filtering by tag at the same time is not supported
if getattr(queryset, '_filtered_by_tag', False):
raise BadRequestError("filtering by tag with a search query is not supported")
search_query = request.GET['search']
search_operator = request.GET.get('search_operator', None)
sb = get_search_backend()
queryset = sb.search(search_query, queryset, operator=search_operator)
return queryset
class ChildOfFilter(BaseFilterBackend):
"""
Implements the ?child_of filter used to filter the results to only contain
pages that are direct children of the specified page.
"""
def get_root_page(self, request):
return Page.get_first_root_node()
def get_page_by_id(self, request, page_id):
return Page.objects.get(id=page_id)
def filter_queryset(self, request, queryset, view):
if 'child_of' in request.GET:
try:
parent_page_id = int(request.GET['child_of'])
assert parent_page_id >= 0
parent_page = self.get_page_by_id(request, parent_page_id)
except (ValueError, AssertionError):
if request.GET['child_of'] == 'root':
parent_page = self.get_root_page(request)
else:
raise BadRequestError("child_of must be a positive integer")
except Page.DoesNotExist:
raise BadRequestError("parent page doesn't exist")
queryset = queryset.child_of(parent_page)
queryset._filtered_by_child_of = True
return queryset
class RestrictedChildOfFilter(ChildOfFilter):
"""
A restricted version of ChildOfFilter that only allows pages in the current
site to be specified.
"""
def get_root_page(self, request):
return request.site.root_page
def get_page_by_id(self, request, page_id):
site_pages = pages_for_site(request.site)
return site_pages.get(id=page_id)
class DescendantOfFilter(BaseFilterBackend):
"""
Implements the ?decendant_of filter which limits the set of pages to a
particular branch of the page tree.
"""
def get_root_page(self, request):
return Page.get_first_root_node()
def get_page_by_id(self, request, page_id):
return Page.objects.get(id=page_id)
def filter_queryset(self, request, queryset, view):
if 'descendant_of' in request.GET:
if getattr(queryset, '_filtered_by_child_of', False):
raise BadRequestError("filtering by descendant_of with child_of is not supported")
try:
parent_page_id = int(request.GET['descendant_of'])
assert parent_page_id >= 0
parent_page = self.get_page_by_id(request, parent_page_id)
except (ValueError, AssertionError):
if request.GET['descendant_of'] == 'root':
parent_page = self.get_root_page(request)
else:
raise BadRequestError("descendant_of must be a positive integer")
except Page.DoesNotExist:
raise BadRequestError("ancestor page doesn't exist")
queryset = queryset.descendant_of(parent_page)
return queryset
class RestrictedDescendantOfFilter(DescendantOfFilter):
"""
A restricted version of DecendantOfFilter that only allows pages in the current
site to be specified.
"""
def get_root_page(self, request):
return request.site.root_page
def get_page_by_id(self, request, page_id):
site_pages = pages_for_site(request.site)
return site_pages.get(id=page_id)
|
[
"[email protected]"
] | |
a9f7c3d75ffd672e277c27e70adee3e33c3e9510
|
8076de02ad53ea7b6328f819ae23e212f3a7d47c
|
/DXCTraining/Examples/4OOP/Inheritence/first.py
|
ada7ce1d8a65e48bcc5d991006dd997c7906d4cc
|
[] |
no_license
|
rhitik26/python
|
f6013d978cbfc83c211b0e4e9aa92ee43a1b488f
|
b667c6502c6a1cb58b79ddd9d30a752f92da1f94
|
refs/heads/master
| 2020-09-21T12:34:03.869549 | 2019-11-29T06:28:33 | 2019-11-29T06:28:33 | 224,790,343 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 521 |
py
|
def y():
pass
class A:
sam='test'
class Person:
count=0
def __init__(self,name):
Person.count+=1
self.name=name
def sayHi(self):
print('Hi '+self.name)
class Emp(Person,A):
def __init__(self,name,id):
super().__init__(name)
self.id=id
def sayHi(self):
super().sayHi()
print('Hello '+self.name)
e1 = Emp('Saravan' ,'007')
e1.sayHi()
#e1.sayHi()
#print(e1.__dict__)
#print(Emp.__dict__)
#print(Person.__dict__)
#print(Emp.__bases__)
#z=type('Foo', (), {'attrib': 'value'}) #meta class
|
[
"[email protected]"
] | |
c2ac269526081ba4c09e510388a319650a8b9b24
|
8a25ada37271acd5ea96d4a4e4e57f81bec221ac
|
/home/pi/GrovePi/Software/Python/others/temboo/Library/Basecamp/CompleteItem.py
|
14b5086953de1a5f7782bc7413760851d2d6ada6
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
lupyuen/RaspberryPiImage
|
65cebead6a480c772ed7f0c4d0d4e08572860f08
|
664e8a74b4628d710feab5582ef59b344b9ffddd
|
refs/heads/master
| 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,797 |
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# CompleteItem
# Marks a single, specified item in a To-do list as complete.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CompleteItem(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CompleteItem Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CompleteItem, self).__init__(temboo_session, '/Library/Basecamp/CompleteItem')
def new_input_set(self):
return CompleteItemInputSet()
def _make_result_set(self, result, path):
return CompleteItemResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CompleteItemChoreographyExecution(session, exec_id, path)
class CompleteItemInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CompleteItem
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountName(self, value):
"""
Set the value of the AccountName input for this Choreo. ((required, string) A valid Basecamp account name. This is the first part of the account's URL.)
"""
super(CompleteItemInputSet, self)._set_input('AccountName', value)
def set_ItemID(self, value):
"""
Set the value of the ItemID input for this Choreo. ((required, integer) The ID of the item to mark as complete.)
"""
super(CompleteItemInputSet, self)._set_input('ItemID', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) The Basecamp account password. Use the value 'X' when specifying an API Key for the Username input.)
"""
super(CompleteItemInputSet, self)._set_input('Password', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) A Basecamp account username or API Key.)
"""
super(CompleteItemInputSet, self)._set_input('Username', value)
class CompleteItemResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CompleteItem Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (There is no structrued response from complete item requests.)
"""
return self._output.get('Response', None)
class CompleteItemChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CompleteItemResultSet(response, path)
|
[
"[email protected]"
] | |
f8feb419c39656afeff8e906cb8a45211147ee2b
|
f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb
|
/weapp/webapp/modules/cms/models.py
|
4e7ad86001019d94d76bc9f0a091acc07944b931
|
[] |
no_license
|
chengdg/weizoom
|
97740c121724fae582b10cdbe0ce227a1f065ece
|
8b2f7befe92841bcc35e0e60cac5958ef3f3af54
|
refs/heads/master
| 2021-01-22T20:29:30.297059 | 2017-03-30T08:39:25 | 2017-03-30T08:39:25 | 85,268,003 | 1 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,740 |
py
|
# -*- coding: utf-8 -*-
from datetime import datetime
from hashlib import md5
from django.db import models
from django.contrib.auth.models import Group, User
from django.db.models import signals
from django.conf import settings
from django.db.models import F
from core import dateutil
#########################################################################
# Category:文章分类
#########################################################################
class Category(models.Model):
owner = models.ForeignKey(User, related_name='owned_cms_categories')
name = models.CharField(max_length=256) #分类名
pic_url = models.CharField(max_length=1024, default='') #分类图片
display_index = models.IntegerField(default=1, db_index=True) #显示的排序
created_at = models.DateTimeField(auto_now_add=True) #添加时间
class Meta(object):
db_table = 'cms_category'
verbose_name = '文章分类'
verbose_name_plural = '文章分类'
#########################################################################
# Article:文章
#########################################################################
class Article(models.Model):
owner = models.ForeignKey(User, related_name='owned_cms_articles')
title = models.CharField(max_length=256) #标题
summary = models.CharField(max_length=256, default='') #摘要
content = models.TextField(default='') #内容
display_index = models.IntegerField(default=1, db_index=True) #显示的排序
created_at = models.DateTimeField(auto_now_add=True) #添加时间
class Meta(object):
db_table = 'cms_article'
verbose_name = '文章'
verbose_name_plural = '文章'
#########################################################################
# SpecialArticle:特殊文章
#########################################################################
class SpecialArticle(models.Model):
owner = models.ForeignKey(User, related_name='owned_cms_special_articles')
name = models.CharField(max_length=256) #内部名
title = models.CharField(max_length=256) #标题
content = models.TextField(default='') #内容
display_index = models.IntegerField(default=1, db_index=True) #显示的排序
created_at = models.DateTimeField(auto_now_add=True) #添加时间
class Meta(object):
db_table = 'cms_special_article'
verbose_name = '特殊文章'
verbose_name_plural = '特殊文章'
#########################################################################
# CategoryHasArticle:<category, article>关系
#########################################################################
class CategoryHasArticle(models.Model):
article = models.ForeignKey(Article)
category = models.ForeignKey(Category)
class Meta(object):
db_table = 'cms_category_has_article'
|
[
"[email protected]"
] | |
5955abba13969d2e6dbf080aa32f43a83df0882d
|
cd257631f442d24d2e4902cfb60d05095e7c49ad
|
/week-03/day-03/centered_square.py
|
718aa19f4966f147193973ccae71489f2fde4ccd
|
[] |
no_license
|
green-fox-academy/Chiflado
|
62e6fc1244f4b4f2169555af625b6bfdda41a975
|
008893c63a97f4c28ff63cab269b4895ed9b8cf1
|
refs/heads/master
| 2021-09-04T03:25:25.656921 | 2018-01-15T09:02:47 | 2018-01-15T09:02:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 522 |
py
|
from tkinter import *
root = Tk()
canvas = Canvas(root, width='300', height='300')
canvas.pack()
# draw a green 10x10 square to the center of the canvas.
canvas_width = 300
canvas_height = 300
edge_length = 10
starting_x = canvas_width / 2 - edge_length / 2
starting_y = canvas_height / 2 - edge_length / 2
ending_x = canvas_width / 2 + edge_length / 2
ending_y = canvas_height / 2 + edge_length / 2
green_square = canvas.create_rectangle( starting_x, starting_y, ending_x, ending_y, fill= 'green')
root.mainloop()
|
[
"[email protected]"
] | |
705a6c711907b2fa6d7884a850e39de847ea32db
|
d750fb953abda6a965c4f307266b2405ad8c11b1
|
/programers algorithm/LEVEL2/주식가격.py
|
3c0fee793a8d39bb540a3b9695a7381a3d96d493
|
[] |
no_license
|
heaven324/Python
|
dbe8e57fa7741ab963af239474d108ff9dbdc0c7
|
065663fe1e5f86c9d08ec645e24b5fde2045fee1
|
refs/heads/master
| 2023-05-25T02:06:01.728138 | 2023-05-17T15:12:08 | 2023-05-17T15:12:08 | 188,010,310 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,343 |
py
|
# solution
prices = [1, 2, 3, 2, 3]
def solution(prices):
result = []
for i in range(len(prices)-1):
cnt = 1
for j in range(i+1, len(prices)-1):
if prices[i] <= prices[j]:
cnt += 1
else:
break
result.append(cnt)
result.append(0)
return result
print(solution(prices))
'''
문제 설명
초 단위로 기록된 주식가격이 담긴 배열 prices가 매개변수로 주어질 때,
가격이 떨어지지 않은 기간은 몇 초인지를 return 하도록 solution 함수를 완성하세요.
제한사항
prices의 각 가격은 1 이상 10,000 이하인 자연수입니다.
prices의 길이는 2 이상 100,000 이하입니다.
입출력 예
prices return
[1, 2, 3, 2, 3] [4, 3, 1, 1, 0]
입출력 예 설명
1초 시점의 ₩1은 끝까지 가격이 떨어지지 않았습니다.
2초 시점의 ₩2은 끝까지 가격이 떨어지지 않았습니다.
3초 시점의 ₩3은 1초뒤에 가격이 떨어집니다. 따라서 1초간 가격이 떨어지지 않은 것으로 봅니다.
4초 시점의 ₩2은 1초간 가격이 떨어지지 않았습니다.
5초 시점의 ₩3은 0초간 가격이 떨어지지 않았습니다.
※ 공지 - 2019년 2월 28일 지문이 리뉴얼되었습니다.
'''
|
[
"[email protected]"
] | |
b2b77cda68bc763a8f080a5688d500e6503eeee5
|
58c8838461101f2252d17824e924ece7e93212d7
|
/tests/cloudcli/test_server_history.py
|
6b50c457b3b890abce3ed7070e178294fb14528b
|
[
"MIT"
] |
permissive
|
imcvampire/kamateratoolbox
|
9b03ac703c1dd996de3faad5520220d4e7db91f2
|
372853059c584bb6b80c59efca125e08352def0e
|
refs/heads/master
| 2023-02-03T17:23:00.052904 | 2020-12-22T07:19:17 | 2020-12-22T07:19:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,421 |
py
|
import datetime
from ..common import assert_only_one_server_cloudcli, assert_no_matching_servers_cloudcli, get_server_id
def test_server_history_only_one_server(cloudcli, session_server_powered_on, session_server_powered_off):
assert_only_one_server_cloudcli([session_server_powered_on, session_server_powered_off], cloudcli, ["server", "history"])
def test_server_history_no_matching_servers(cloudcli):
assert_no_matching_servers_cloudcli(cloudcli, ["server", "history"])
def test_server_history(cloudcli, temp_server):
print("Reboot server to have some history")
cloudcli("server", "reboot", "--name", temp_server["name"], "--wait")
res = cloudcli("server", "history", "--name", temp_server["name"], "--format", "json")
assert len(res) == 2
assert set(res[0].keys()) == {"date", "user", "action"}
assert datetime.datetime.strptime(res[0]["date"], "%d/%m/%Y %H:%M:%S").date() == datetime.datetime.now().date()
assert len(res[0]["user"]) > 3
assert len(res[0]["action"]) > 3
print("Get history by id")
res = cloudcli("server", "history", "--id", get_server_id(temp_server), "--format", "json")
assert len(res) == 2
assert set(res[0].keys()) == {"date", "user", "action"}
assert datetime.datetime.strptime(res[0]["date"], "%d/%m/%Y %H:%M:%S").date() == datetime.datetime.now().date()
assert len(res[0]["user"]) > 3
assert len(res[0]["action"]) > 3
|
[
"[email protected]"
] | |
bbf8b5718568d7b9ef2974b393b8ce361eeefe1f
|
898f547bbeb7d1da27bc40e2d594a363c0d1a75a
|
/Leetcode Problems/lc1389e.py
|
bd96f9c3e848960611d528be93d5b379427f98f2
|
[] |
no_license
|
TerryLun/Code-Playground
|
4e069e28c457309329f003ea249be83d7578a4a3
|
708ad69594cf5b9edc9ff1189716cad70916574c
|
refs/heads/master
| 2023-06-20T14:03:43.924472 | 2021-07-23T05:27:48 | 2021-07-23T05:27:48 | 237,375,279 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 680 |
py
|
"""
1389. Create Target Array in the Given Order
Given two arrays of integers nums and index. Your task is to create target array under the following rules:
Initially target array is empty.
From left to right read nums[i] and index[i], insert at index index[i] the value nums[i] in target array.
Repeat the previous step until there are no elements to read in nums and index.
Return the target array.
It is guaranteed that the insertion operations will be valid.
"""
def createTargetArray(nums, index):
target = []
for i, j in zip(index, nums):
if i >= len(target):
target.append(j)
else:
target.insert(i, j)
return target
|
[
"[email protected]"
] | |
1936160e12db29ad137d1f6effb6db365bd0ad5f
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5634697451274240_0/Python/MReni/RevengeOfThePancakes.py
|
f0bd9154a41a9e82d73ac0ce8564e51703f16a39
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,209 |
py
|
import math
import collections
HAPPY_PANCAKE = '+'
UNHAPPY_PANCAKE = '-'
# pancake_min_flips = None
# pancake_map = {} # {pancake: {value: value, parents}}
pancake_value_map = {} #{pancake: steps to final}
pancake_parent_map = {} #{pancake: [parent1, parent2]}
super_parent = ""
def all_happy(pancakes):
return (UNHAPPY_PANCAKE not in pancakes)
def flip(pancakes, count):
flipped_pancakes = ''
for i in range(0, count):
this_pancake = HAPPY_PANCAKE if pancakes[i] == UNHAPPY_PANCAKE else UNHAPPY_PANCAKE
flipped_pancakes = flipped_pancakes + this_pancake
return flipped_pancakes[::-1] + pancakes[count:]
# def get_min_flips(pancakes, current_flips):
# if all_happy(pancakes):
# if not pancake_min_flips or pancake_min_flips > current_flips:
# pancake_min_flips = current_flips
# return current_flips
# min_flips = None
# while not all_happy(pancakes):
# for i in range(0, len(pancakes)):
# flipped_pancakes = flip(pancakes, i)
# current_flips = current_flips + 1
# if pancake_min_flips and current_flips > pancake_min_flips:
# continue
# # If we have seen this map and with smaller flips, use smaller flips
# if flipped_pancakes in pancake_map:
# if pancake_map[flipped_pancakes] > current_flips:
# final_flips = current_flips + pancake_map[flipped_pancakes]
# else:
# final_flips = get_min_flips(flipped_pancakes, current_flips)
# if current_min_flips and current_min_flips < final_flips:
# return current_flips
# if not min_flips or current_flips < min_flips :
# min_flips = current_flips
# return min_flips
# def get_min_flips(pancakes, depth):
# if all_happy(pancakes):
# # print "all happy"
# return 0
# if pancakes in pancake_map:
# return pancake_map[pancakes]
# current_min_flips = None
# for i in range(1, len(pancakes)+1):
# flipped_pancakes = flip(pancakes, i)
# # print "This is my local " + str(i) + " flip from " + pancakes + " to " + flipped_pancakes
# # print pancake_map
# if flipped_pancakes not in attempts or attempts[flipped_pancakes] > depth:
# attempts[flipped_pancakes] = depth
# future_min_flips = get_min_flips(flipped_pancakes, depth + 1)
# # print "Not in attempts and have a future min flip of " + str(future_min_flips)
# # print "Futures: " + str(i) + " :" + str(future_min_flips)
# # print "count: " + str(i) + " flipped_pancakes: " + flipped_pancakes + ". future_min_flips: " + str(future_min_flips)
# if future_min_flips != None:
# my_flip = 1 + future_min_flips
# # print "MYFlips: " + str(i) + " :" + str(my_flip)
# if pancakes not in pancake_map or pancake_map[pancakes] > my_flip:
# pancake_map[pancakes] = my_flip
# if current_min_flips == None or current_min_flips > my_flip:
# current_min_flips = my_flip
# # if current_min_flips != None and (pancakes not in pancake_map or pancake_map[pancakes] > current_min_flips):
# # pancake_map[pancakes] = current_min_flips
# return current_min_flips
def update_all_parents(pancakes, value, previous_value):
# old_parent_value + (value - previous_value + 1)
# Previous value either > current value or does not exist or = current value
if not pancakes:
return
for parent in pancake_parent_map[pancakes]:
old_parent_value = pancake_value_map[parent]
expected_new_value = (old_parent_value or 0) + (value - (previous_value or 0) + 1)
if old_parent_value == None or old_parent_value > expected_new_value:
pancake_value_map[parent] = expected_new_value
update_all_parents(parent, expected_new_value, old_parent_value)
def find_consecutive_chars(pancakes):
count = 1
first_char = pancakes[0]
for i in range(1, len(pancakes)):
if pancakes[i] == first_char:
count = count + 1
else:
return count
return count
def get_min_flips(pancakes, depth):
if all_happy(pancakes):
previous_value = pancake_value_map.get(pancakes, None)
if previous_value == None or previous_value > 0:
pancake_value_map[pancakes] = 0
update_all_parents(pancakes, 0, previous_value)
# return 0
if pancake_value_map.get(pancakes) != None:
update_all_parents(pancakes, pancake_value_map[pancakes], pancake_value_map[pancakes])
#find consecutive marks at the beginning:
min_count = find_consecutive_chars(pancakes)
for i in range(max(min_count, depth), len(pancakes) + 1):
flipped_pancakes = flip(pancakes, i)
if pancakes not in pancake_parent_map[flipped_pancakes] and flipped_pancakes not in pancake_value_map:# and flipped_pancakes != super_parent:
pancake_value_map[flipped_pancakes] = None
pancake_parent_map[flipped_pancakes].add(pancakes)
get_min_flips(flipped_pancakes, i )
count = 1
with open('dataB', 'rb') as data:
for pancakes in data:
# pancake_min_flips = None
# pancake_map[pancakes] = 0
pancakes = pancakes.replace('\n', '')
pancake_value_map = {pancakes: None}
pancake_parent_map = collections.defaultdict(set)
super_parent = pancakes
# print "Case #" + str(count) + ": " + str(get_min_flips(pancakes, None))
get_min_flips(pancakes, 1)
print "Case #" + str(count) + ": " + str(pancake_value_map[pancakes])
# print pancake_value_map
# print pancake_parent_map
# find_shortest(pancakes)
count = count + 1
|
[
"[email protected]"
] | |
aba1814fbd4b650bf347f3f9336a1f8bc4df1fea
|
a7146e71459408498cc6b735935ba508a2e43c90
|
/examples/long_running_with_tm/long_running_with_tm/models.py
|
262843b6b4672b04747ee8cce37579b9fca74d95
|
[
"MIT"
] |
permissive
|
timgates42/pyramid_celery
|
8ae5ed583696a35c35ddb1589a77444bec6362f6
|
cf8aa80980e42f7235ad361874d3c35e19963b60
|
refs/heads/master
| 2023-03-15T23:17:01.816146 | 2021-02-24T02:40:04 | 2021-02-24T02:40:04 | 251,593,921 | 0 | 0 |
NOASSERTION
| 2020-03-31T12:18:55 | 2020-03-31T12:18:55 | null |
UTF-8
|
Python
| false | false | 390 |
py
|
from sqlalchemy import (
Column,
Integer,
Text,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
)
DBSession = scoped_session(sessionmaker())
Base = declarative_base()
class TaskItem(Base):
__tablename__ = 'tasks'
id = Column(Integer, primary_key=True)
task = Column(Text, unique=True)
|
[
"[email protected]"
] | |
81ba2c6e03bcba6e513148f7db39f61d030b176b
|
5e0ea2e0fff8a9e38606da6c73a51df3f92604ea
|
/assortment.py
|
8a3ddb0a8c9ed8a4955654465286a3cf83e1c01c
|
[] |
no_license
|
Trafire/f2_auto_complete
|
c6419a96b5324bb697681adc9e0a72c91d1681e5
|
3c6da1977b139889f0af7d60a431fef8580cd720
|
refs/heads/master
| 2022-11-26T09:40:18.860721 | 2020-03-12T20:37:16 | 2020-03-12T20:37:16 | 224,901,668 | 0 | 0 | null | 2022-11-22T04:52:05 | 2019-11-29T18:06:28 |
Python
|
UTF-8
|
Python
| false | false | 36 |
py
|
from navigation import traverse
|
[
"[email protected]"
] | |
4801e77162a0826fde15f393eaf0aec9babf0d6f
|
ac45b55915e634815922329195c203b1e810458c
|
/astro169_5.py
|
fcdb267807d1eb2ec556c9b469293b02a33b81e8
|
[] |
no_license
|
mj1e16lsst/iridisPeriodicNew
|
96a8bfef0d09f13e18adb81b89e25ae885e30bd9
|
dc0214b1e702b454e0cca67d4208b2113e1fbcea
|
refs/heads/master
| 2020-03-23T15:01:23.583944 | 2018-07-23T18:58:59 | 2018-07-23T18:58:59 | 141,715,292 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,053 |
py
|
from operator import add
#from astropy import units as u
#from astropy.coordinates import SkyCoord
#from astropy.stats import LombScargle
#from gatspy.periodic import LombScargleFast
from functools import partial
#from gatspy import periodic
#import matplotlib.pyplot as plt
#from matplotlib.font_manager import FontProperties
import lomb_scargle_multiband as periodic
from multiprocessing import Pool
import numpy as np
import os
#from sqlite3 import *
import random
from random import shuffle
from random import randint
import Observations
import Magnitudes
# In[13]:
#conn = connect('minion_1016_sqlite.db')
#conn = connect('astro_lsst_01_1004_sqlite.db')
#conn = connect('minion_1020_sqlite.db')
# In[14]:
# LSST zero points u,g,r,i,z,y
zeroPoints = [0,26.5,28.3,28.13,27.79,27.4,26.58]
FWHMeff = [0.8,0.92,0.87,0.83,0.80,0.78,0.76] # arcmins?
pixelScale = 0.2
readOut = 12.7
sigSys = 0.005
flareperiod = 4096
flarecycles = 10
dayinsec=86400
background = 40
# sat mag u,g,r,i,z,y=14.7,15.7,15.8,15.8,15.3 and 13.9
# start date 59580.033829 end date + 10 years
#maglist=[20]*7
lim = [0, 23.5, 24.8, 24.4, 23.9, 23.3, 22.1] # limiting magnitude ugry
sat = [0, 14.7, 15.7, 15.8, 15.8, 15.3, 13.9] # sat mag as above
# In[15]:
looooops = 10000
maglength = 20
freqlength = 20
processors = 20
startnumber = 0 + 5
endnumber = startnumber + 1
#observingStrategy = 'minion'
observingStrategy = 'astroD'
#observingStrategy = 'panstars'
inFile = '/home/mj1e16/periodic/in'+str(startnumber)+'.txt'
outFile = '/home/mj1e16/periodic/outastro169'+str(startnumber)+'.txt'
#inFile = '/home/ubuntu/vagrant/'+observingStrategy+'/in'+observingStrategy+'KtypefullresultsFile'+str(startnumber)+'.txt'
#outFile = '/home/ubuntu/vagrant/'+observingStrategy+'/out'+observingStrategy+'KtypefullresultsFile'+str(startnumber)+'.txt'
obs = Observations.obsastro169
# In[19]:
def magUncertainy(Filter, objectmag, exposuretime,background, FWHM): # b is background counts per pixel
countsPS = 10**((Filter-objectmag)/2.5)
counts = countsPS * exposuretime
uncertainty = 1/(counts/((counts/2.3)+(((background/2.3)+(12.7**2))*2.266*((FWHM/0.2)**2)))**0.5) # gain assumed to be 1
return uncertainty
#from lsst should have got the website! https://smtn-002.lsst.io/
# In[20]:
def averageFlux(observations, Frequency, exptime):
b = [0]*len(observations)
for seconds in range(0, exptime):
a = [np.sin((2*np.pi*(Frequency))*(x+(seconds/(3600*24)))) for x in observations] # optical modulation
b = map(add, a, b)
c = [z/exptime for z in b]
return c
def Flux(observations,Frequency,exptime):
a = [np.sin((2*np.pi*(Frequency)*x)) for x in observations]
return a
# In[21]:
def ellipsoidalFlux(observations, Frequency,exptime):
period = 1/(Frequency)
phase = [(x % (2*period)) for x in observations]
b = [0]*len(observations)
for seconds in range(0, exptime):
a = [np.sin((2*np.pi*(Frequency))*(x+(seconds/(3600*24)))) for x in observations] # optical modulation
b = map(add, a, b)
c = [z/exptime for z in b]
for x in range(0,len(phase)):
if (phase[x]+(1.5*period)) < (3*period):
c[x] = c[x]*(1./3.)
else:
c[x] = c[x]*(2./3.)
return c
## this is doing something but not the right something, come back to it
# In[22]:
def flaring(B, length, dayinsec=86400,amplitude=1):
global flareMag, minutes
fouriers = np.linspace(0.00001,0.05,(dayinsec/30))
logF = [np.log(x) for x in fouriers] # start at 30 go to a day in 30 sec increments
real = [random.gauss(0,1)*((1/x)**(B/2)) for x in fouriers] #random.gauss(mu,sigma) to change for values from zurita
# imaginary = [random.gauss(0,1)*((1/x)**(B/2)) for x in fouriers]
IFT = np.fft.ifft(real)
seconds = np.linspace(0,dayinsec, (dayinsec/30)) # the day in 30 sec increments
minutes = [x for x in seconds]
minimum = (np.max(-IFT))
positive = [x + minimum for x in IFT] # what did this even achieve? it helped with normalisation!
normalised = [x/(np.mean(positive)) for x in positive] # find normalisation
normalisedmin = minimum/(np.mean(positive))
normalised = [x - normalisedmin for x in normalised]
flareMag = [amplitude * x for x in normalised] # normalise to amplitude
logmins = [np.log(d) for d in minutes] # for plotting?
# plt.plot(minutes,flareMag)
# plt.title('lightcurve')
# plt.show()
return flareMag
# In[55]:
def lombScargle(frequencyRange,objectmag=20,loopNo=looooops,df=0.001,fmin=0.001,numsteps=100000,modulationAmplitude=0.1,Nquist=200): # frequency range and object mag in list
#global totperiod, totmperiod, totpower, date, amplitude, frequency, periods, LSperiod, power, mag, error, SigLevel
results = {}
totperiod = []
totmperiod = []
totpower = [] # reset
SigLevel = []
filterletter = ['o','u','g','r','i','z','y']
period = 1/(frequencyRange)
if period > 0.5:
numsteps = 10000
elif period > 0.01:
numsteps = 100000
else:
numsteps = 200000
freqs = fmin + df * np.arange(numsteps) # for manuel
allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy = [], [], [], [], [], [], [] #reset
measuredpower = [] # reset
y = [allobsy, uobsy, gobsy, robsy, iobsy, zobsy, yobsy] # for looping only
for z in range(1, len(y)):
#y[z] = averageFlux(obs[z], frequencyRange[frange], 30) # amplitde calculation for observations, anf frequency range
y[z] = ellipsoidalFlux(obs[z], frequencyRange,30)
y[z] = [modulationAmplitude * t for t in y[z]] # scaling
for G in range(0, len(y[z])):
flareMinute = int(round((obs[z][G]*24*60*2)%((dayinsec/(30*2))*flarecycles)))
y[z][G] = y[z][G] + longflare[flareMinute] # add flares swapped to second but not changing the name intrtoduces fewer bugs
date = []
amplitude = []
mag = []
error = []
filts = []
for z in range(1, len(y)):
if objectmag[z] > sat[z] and objectmag[z] < lim[z]:
#date.extend([x for x in obs[z]])
date.extend(obs[z])
amplitude = [t + random.gauss(0,magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])) for t in y[z]] # scale amplitude and add poisson noise
mag.extend([objectmag[z] - t for t in amplitude]) # add actual mag
error.extend([sigSys + magUncertainy(zeroPoints[z],objectmag[z],30,background,FWHMeff[z])+0.2]*len(amplitude))
filts.extend([filterletter[z]]*len(amplitude))
phase = [(day % (period*2))/(period*2) for day in obs[z]]
pmag = [objectmag[z] - t for t in amplitude]
# plt.plot(phase, pmag, 'o', markersize=4)
# plt.xlabel('Phase')
# plt.ylabel('Magnitude')
# plt.gca().invert_yaxis()
# plt.title('filter'+str(z)+', Period = '+str(period))#+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)))
# plt.show()
# plt.plot(date, mag, 'o')
# plt.xlim(lower,higher)
# plt.xlabel('time (days)')
# plt.ylabel('mag')
# plt.gca().invert_yaxis()
# plt.show()
model = periodic.LombScargleMultibandFast(fit_period=False)
model.fit(date, mag, error, filts)
power = model.score_frequency_grid(fmin, df, numsteps)
if period > 10.:
model.optimizer.period_range=(10, 110)
elif period > 0.51:
model.optimizer.period_range=(0.5, 10)
elif period > 0.011:
model.optimizer.period_range=(0.01, 0.52)
else:
model.optimizer.period_range=(0.0029, 0.012)
LSperiod = model.best_period
if period < 10:
higher = 10
else:
higher = 100
# fig, ax = plt.subplots()
# ax.plot(1./freqs, power)
# ax.set(xlim=(0, higher), ylim=(0, 1.2),
# xlabel='period (days)',
# ylabel='Lomb-Scargle Power',
# title='Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)));
# plt.show()
phase = [(day % (period*2))/(period*2) for day in date]
#idealphase = [(day % (period*2))/(period*2) for day in dayZ]
#print(len(phase),len(idealphase))
#plt.plot(idealphase,Zmag,'ko',)
# plt.plot(phase, mag, 'o', markersize=4)
# plt.xlabel('Phase')
# plt.ylabel('Magnitude')
# plt.gca().invert_yaxis()
# plt.title('Period = '+str(period)+', MeasuredPeriod = '+str(LSperiod)+', Periodx20 = '+(str(period*20)))
# plt.show()
#print(period, LSperiod, period*20)
# print('actualperiod', period, 'measured period', np.mean(LSperiod),power.max())# 'power',np.mean(power[maxpos]))
# print(frequencyRange[frange], 'z', z)
# totperiod.append(period)
# totmperiod.append(np.mean(LSperiod))
# totpower.append(power.max())
mpower = power.max()
measuredpower.append(power.max()) # should this correspond to period power and not max power?
maxpower = []
counter = 0.
for loop in range(0,loopNo):
random.shuffle(date)
model = periodic.LombScargleMultibandFast(fit_period=False)
model.fit(date, mag, error, filts)
power = model.score_frequency_grid(fmin, df, numsteps)
maxpower.append(power.max())
for X in range(0, len(maxpower)):
if maxpower[X] > measuredpower[-1]:
counter = counter + 1.
Significance = (1.-(counter/len(maxpower)))
#print('sig', Significance, 'counter', counter)
SigLevel.append(Significance)
#freqnumber = FrangeLoop.index(frequencyRange)
#magnumber = MagRange.index(objectmag)
#print(fullmaglist)
#listnumber = (magnumber*maglength)+freqnumber
# print(listnumber)
# measuredperiodlist[listnumber] = LSperiod
# periodlist[listnumber] = period
# powerlist[listnumber] = mpower
# siglist[listnumber] = Significance
# fullmaglist[listnumber] = objectmag
# results order, 0=mag,1=period,2=measuredperiod,3=siglevel,4=power,5=listnumber
results[0] = objectmag[3]
results[1] = period
results[2] = LSperiod
results[3] = Significance
results[4] = mpower
results[5] = 0#listnumber
return results
# In[24]:
#findObservations([(630,)])
#remove25(obs)
#averageFlux(obs[0], 1, 30)
longflare = []
for floop in range(0,flarecycles):
flareone = flaring(-1, flareperiod, amplitude=0.3)
flareone = flareone[0:1440]
positiveflare = [abs(x) for x in flareone]
longflare.extend(positiveflare)
# In[25]:
PrangeLoop = np.logspace(-2.5,2,freqlength)
FrangeLoop = [(1/x) for x in PrangeLoop]
# In[26]:
# reset results file
with open(inFile,'w') as f:
f.write('fullmaglist \n\n periodlist \n\n measuredperiodlist \n\n siglist \n\n powerlist \n\n listnumberlist \n\n end of file')
# In[57]:
results = []
fullmeasuredPeriod = []
fullPeriod = []
fullPower = []
fullSigLevel = []
fullMag = []
MagRangearray = np.linspace(17,24,maglength)
MagRange = [x for x in MagRangearray]
maglist = []
for x in range(len(MagRange)):
maglist.append([MagRange[x]]*7)
newlist = Magnitudes.mag169
pool = Pool(processors)
for h in range(startnumber,endnumber):
print(newlist[h])
results.append(pool.map(partial(lombScargle, objectmag=newlist[h]),FrangeLoop))
twoDlist = [[],[],[],[],[],[]]
for X in range(len(results)):
for Y in range(len(results[X])):
twoDlist[0].append(results[X][Y][0])
twoDlist[1].append(results[X][Y][1])
twoDlist[2].append(results[X][Y][2])
twoDlist[3].append(results[X][Y][3])
twoDlist[4].append(results[X][Y][4])
twoDlist[5].append(results[X][Y][5])
with open(inFile, 'r') as istr:
with open(outFile,'w') as ostr:
for i, line in enumerate(istr):
# Get rid of the trailing newline (if any).
line = line.rstrip('\n')
if i % 2 != 0:
line += str(twoDlist[int((i-1)/2)])+','
ostr.write(line+'\n')
|
[
"[email protected]"
] | |
6661c970ed8204123d5f74af8add6e78011ed805
|
75275e1cd5ef1a5dddd5fdcb82db03fdf1b609d3
|
/lib/ansible/modules/cloud/alicloud/alicloud_slb_vsg_facts.py
|
f191838ff20a57f9e843348e165ea5a4860f141c
|
[
"Apache-2.0"
] |
permissive
|
jumping/ansible-provider
|
bc8b2bc51aa422de89d255ba1208ba8e8ae8f0be
|
067ce1aa4277720bc481c2ba08e3d1b408b8f13c
|
refs/heads/master
| 2020-03-13T21:30:50.287049 | 2018-04-27T13:12:23 | 2018-04-27T13:12:23 | 131,297,789 | 0 | 0 |
Apache-2.0
| 2018-04-27T13:12:24 | 2018-04-27T13:07:37 |
Python
|
UTF-8
|
Python
| false | false | 7,586 |
py
|
#!/usr/bin/python
# Copyright (c) 2017 Alibaba Group Holding Limited. He Guimin <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: alicloud_slb_vsg_facts
version_added: "2.4"
short_description: Gather facts on vserver group of Alibaba Cloud SLB.
description:
- This module fetches data from the Open API in Alicloud.
The module must be called from within the SLB vserver group itself.
options:
load_balancer_id:
description:
- ID of server load balancer.
required: true
aliases: [ "lb_id"]
vserver_group_ids:
description:
- A list of SLB vserver group ids.
required: false
aliases: [ "group_ids" ]
author:
- "He Guimin (@xiaozhu36)"
requirements:
- "python >= 2.6"
- "footmark"
extends_documentation_fragment:
- alicloud
'''
EXAMPLES = '''
# Fetch slb server group according to setting different filters
- name: Fetch slb vserver group example
hosts: localhost
vars:
alicloud_access_key: <your-alicloud-access-key>
alicloud_secret_key: <your-alicloud-secret-key>
alicloud_region: cn-beijing
load_balancer_id: lb-dj1hv3n9oemvk34evb466
vserver_group_ids:
- rsp-dj1lrpsgr8d5v
- rsp-dj10xmgq31vl0
tasks:
- name: Find all vserver gorup in specified slb
alicloud_slb_vsg_facts:
alicloud_access_key: '{{ alicloud_access_key }}'
alicloud_secret_key: '{{ alicloud_secret_key }}'
alicloud_region: '{{ alicloud_region }}'
load_balancer_id: '{{ load_balancer_id }}'
register: all_vserver_group
- debug: var=all_vserver_group
- name: Find all vserver group by ids
alicloud_slb_vsg_facts:
alicloud_access_key: '{{ alicloud_access_key }}'
alicloud_secret_key: '{{ alicloud_secret_key }}'
alicloud_region: '{{ alicloud_region }}'
load_balancer_id: '{{ load_balancer_id }}'
vserver_group_ids: '{{ vserver_group_ids }}'
register: vserver_group_by_ids
- debug: var=vserver_group_by_ids
'''
RETURN = '''
vserver_group_ids:
description: List all vserver group's id after operating slb vserver group.
returned: when success
type: list
sample: [ "rsp-dj1lrpsgr8d5v", "rsp-dj10xmgq31vl0" ]
vserver_groups:
description: Details about the slb vserver group that were created.
returned: when success
type: list
sample: [
{
"backend_servers": {
"backend_server": [
{
"port": 8282,
"server_id": "i-2ze35dldjc05dcvezgwk",
"weight": 100
},
{
"port": 8283,
"server_id": "i-2zehjm3jvtbkp175c2bt",
"weight": 100
}
]
},
"vserver_group_id": "rsp-dj1lrpsgr8d5v",
"vserver_group_name": "group_1"
},
{
"backend_servers": {
"backend_server": [
{
"port": 8085,
"server_id": "i-2zehjm3jvtbkp175c2bt",
"weight": 100
},
{
"port": 8086,
"server_id": "i-2ze35dldjc05dcvezgwk",
"weight": 100
}
]
},
"vserver_group_id": "rsp-dj10xmgq31vl0",
"vserver_group_name": "group_2"
}
]
total:
description: The number of all vserver group after operating slb.
returned: when success
type: int
sample: 2
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_ecs import get_acs_connection_info, ecs_argument_spec, slb_connect
HAS_FOOTMARK = False
try:
from footmark.exception import SLBResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
def get_info(obj):
"""
get info from vsg object
:param obj: vsg obj
:return: res: info of vsg
"""
res = {'vserver_group_id': obj.vserver_group_id}
if hasattr(obj, 'backend_servers'):
res['backend_servers'] = obj.backend_servers
if hasattr(obj, 'vserver_group_name'):
res['vserver_group_name'] = obj.vserver_group_name
return res
def main():
argument_spec = ecs_argument_spec()
argument_spec.update(dict(
load_balancer_id=dict(type='str', aliases=['lb_id'], required=True),
vserver_group_ids=dict(type='list', aliases=['group_ids'])
))
module = AnsibleModule(argument_spec=argument_spec)
if HAS_FOOTMARK is False:
module.fail_json(msg="Package 'footmark' required for this module.")
load_balancer_id = module.params['load_balancer_id']
vserver_group_ids = module.params['vserver_group_ids']
ids = []
result = []
all_vserver_group_ids = []
if vserver_group_ids and (not isinstance(vserver_group_ids, list) or len(vserver_group_ids)) < 1:
module.fail_json(msg='vserver_group_ids should be a list of vserver group ids, aborting')
try:
slb = slb_connect(module)
laod_balancer = slb.describe_load_balancers(load_balancer_id=load_balancer_id)
if laod_balancer and len(laod_balancer) == 1:
# list all vserver groups in selected load balancer
for vserver_group_obj in slb.describe_vserver_groups(load_balancer_id=load_balancer_id):
all_vserver_group_ids.append(vserver_group_obj.vserver_group_id)
# if list of vserver group id provided
if vserver_group_ids:
for vserver_group_id in vserver_group_ids:
# check whether provided vserver grooup id is valid or not
if vserver_group_id in all_vserver_group_ids:
vserver_group = slb.describe_vserver_group_attribute(vserver_group_id)
result.append(get_info(vserver_group))
ids.append(vserver_group_id)
# list all vserver group in specified slb
else:
for vserver_group_id in all_vserver_group_ids:
vserver_group = slb.describe_vserver_group_attribute(vserver_group_id)
result.append(get_info(vserver_group))
ids.append(vserver_group.vserver_group_id)
module.exit_json(changed=False, vserver_group_ids=ids,
vserver_groups=result, total=len(result))
else:
module.fail_json(msg="Unable to describe slb vserver groups, invalid load balancer id")
except Exception as e:
module.fail_json(msg=str("Unable to describe slb vserver group, error:{0}".format(e)))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
800eb0325b5ed7eb9f9f40151ce5297efa61fbbd
|
bab4f301ff7b7cf0143d82d1052f49e8632a210e
|
/98. Validate Binary Search Tree.py
|
bc811609d234bd06fd090df9f688e7930a212199
|
[] |
no_license
|
ashish-c-naik/leetcode_submission
|
7da91e720b14fde660450674d6ce94c78b1150fb
|
9f5dcd8e04920d07beaf6aa234b9804339f58770
|
refs/heads/master
| 2020-04-05T05:12:03.656621 | 2019-06-08T17:30:22 | 2019-06-08T17:30:22 | 156,585,497 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 626 |
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
def valid(root,mini,maxi):
if root:
if mini < root.val < maxi:
return valid(root.right,root.val,maxi) and valid(root.left,mini,root.val)
else:
return False
return True
return valid(root,float('-inf'),float('inf'))
|
[
"[email protected]"
] | |
113c34036bdd75f8bbe1c612e9eac312ecd2ef53
|
9751b5d99cf2bd4d969c9168e449b8bb1314d500
|
/admin/dashboard/.venv/lib/python2.7/site-packages/oslo_concurrency/tests/unit/test_processutils.py
|
0fd8045b6ac7b0ef5ccccf42a7cfa05ea87ebf10
|
[
"Apache-2.0"
] |
permissive
|
rajagopalx/product
|
79ff8cd6ed52f13613b0ffb11ce14587e101eb40
|
18ff22bc0c5440ebb37809ee7b9140083dec5cb4
|
refs/heads/master
| 2021-01-13T04:58:50.733190 | 2016-04-28T04:15:55 | 2016-04-28T04:15:55 | 57,262,088 | 0 | 0 | null | 2016-04-28T01:55:44 | 2016-04-28T01:55:44 | null |
UTF-8
|
Python
| false | false | 33,129 |
py
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import errno
import logging
import multiprocessing
import os
import pickle
import resource
import stat
import subprocess
import sys
import tempfile
import fixtures
import mock
from oslotest import base as test_base
import six
from oslo_concurrency import processutils
from oslotest import mockpatch
PROCESS_EXECUTION_ERROR_LOGGING_TEST = """#!/bin/bash
exit 41"""
TEST_EXCEPTION_AND_MASKING_SCRIPT = """#!/bin/bash
# This is to test stdout and stderr
# and the command returned in an exception
# when a non-zero exit code is returned
echo onstdout --password='"secret"'
echo onstderr --password='"secret"' 1>&2
exit 38"""
# This byte sequence is undecodable from most encoding
UNDECODABLE_BYTES = b'[a\x80\xe9\xff]'
TRUE_UTILITY = (sys.platform.startswith('darwin') and
'/usr/bin/true' or '/bin/true')
class UtilsTest(test_base.BaseTestCase):
# NOTE(jkoelker) Moar tests from nova need to be ported. But they
# need to be mock'd out. Currently they require actually
# running code.
def test_execute_unknown_kwargs(self):
self.assertRaises(processutils.UnknownArgumentError,
processutils.execute,
hozer=True)
@mock.patch.object(multiprocessing, 'cpu_count', return_value=8)
def test_get_worker_count(self, mock_cpu_count):
self.assertEqual(8, processutils.get_worker_count())
@mock.patch.object(multiprocessing, 'cpu_count',
side_effect=NotImplementedError())
def test_get_worker_count_cpu_count_not_implemented(self,
mock_cpu_count):
self.assertEqual(1, processutils.get_worker_count())
def test_execute_with_callback(self):
on_execute_callback = mock.Mock()
on_completion_callback = mock.Mock()
processutils.execute(TRUE_UTILITY)
self.assertEqual(0, on_execute_callback.call_count)
self.assertEqual(0, on_completion_callback.call_count)
processutils.execute(TRUE_UTILITY, on_execute=on_execute_callback,
on_completion=on_completion_callback)
self.assertEqual(1, on_execute_callback.call_count)
self.assertEqual(1, on_completion_callback.call_count)
@mock.patch.object(subprocess.Popen, "communicate")
def test_execute_with_callback_and_errors(self, mock_comm):
on_execute_callback = mock.Mock()
on_completion_callback = mock.Mock()
def fake_communicate(*args):
raise IOError("Broken pipe")
mock_comm.side_effect = fake_communicate
self.assertRaises(IOError,
processutils.execute,
TRUE_UTILITY,
on_execute=on_execute_callback,
on_completion=on_completion_callback)
self.assertEqual(1, on_execute_callback.call_count)
self.assertEqual(1, on_completion_callback.call_count)
def test_execute_with_preexec_fn(self):
# NOTE(dims): preexec_fn is set to a callable object, this object
# will be called in the child process just before the child is
# executed. So we cannot pass share variables etc, simplest is to
# check if a specific exception is thrown which can be caught here.
def preexec_fn():
raise processutils.InvalidArgumentError()
processutils.execute(TRUE_UTILITY)
expected_exception = (processutils.InvalidArgumentError if six.PY2
else subprocess.SubprocessError)
self.assertRaises(expected_exception,
processutils.execute,
TRUE_UTILITY,
preexec_fn=preexec_fn)
class ProcessExecutionErrorTest(test_base.BaseTestCase):
def test_defaults(self):
err = processutils.ProcessExecutionError()
self.assertTrue('None\n' in six.text_type(err))
self.assertTrue('code: -\n' in six.text_type(err))
def test_with_description(self):
description = 'The Narwhal Bacons at Midnight'
err = processutils.ProcessExecutionError(description=description)
self.assertTrue(description in six.text_type(err))
def test_with_exit_code(self):
exit_code = 0
err = processutils.ProcessExecutionError(exit_code=exit_code)
self.assertTrue(str(exit_code) in six.text_type(err))
def test_with_cmd(self):
cmd = 'telinit'
err = processutils.ProcessExecutionError(cmd=cmd)
self.assertTrue(cmd in six.text_type(err))
def test_with_stdout(self):
stdout = """
Lo, praise of the prowess of people-kings
of spear-armed Danes, in days long sped,
we have heard, and what honot the athelings won!
Oft Scyld the Scefing from squadroned foes,
from many a tribe, the mead-bench tore,
awing the earls. Since erse he lay
friendless, a foundling, fate repaid him:
for he waxed under welkin, in wealth he trove,
till before him the folk, both far and near,
who house by the whale-path, heard his mandate,
gabe him gits: a good king he!
To him an heir was afterward born,
a son in his halls, whom heaven sent
to favor the fol, feeling their woe
that erst they had lacked an earl for leader
so long a while; the Lord endowed him,
the Wielder of Wonder, with world's renown.
""".strip()
err = processutils.ProcessExecutionError(stdout=stdout)
print(six.text_type(err))
self.assertTrue('people-kings' in six.text_type(err))
def test_with_stderr(self):
stderr = 'Cottonian library'
err = processutils.ProcessExecutionError(stderr=stderr)
self.assertTrue(stderr in six.text_type(err))
def test_retry_on_failure(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If stdin fails to get passed during one of the runs, make a note.
if ! grep -q foo
then
echo 'failure' > "$1"
fi
# If stdin has failed to get passed during this or a previous run, exit early.
if grep failure "$1"
then
exit 1
fi
runs="$(cat $1)"
if [ -z "$runs" ]
then
runs=0
fi
runs=$(($runs + 1))
echo $runs > "$1"
exit 1
''')
fp.close()
os.chmod(tmpfilename, 0o755)
self.assertRaises(processutils.ProcessExecutionError,
processutils.execute,
tmpfilename, tmpfilename2, attempts=10,
process_input=b'foo',
delay_on_retry=False)
fp = open(tmpfilename2, 'r')
runs = fp.read()
fp.close()
self.assertNotEqual(runs.strip(), 'failure', 'stdin did not '
'always get passed '
'correctly')
runs = int(runs.strip())
self.assertEqual(runs, 10, 'Ran %d times instead of 10.' % (runs,))
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
def test_unknown_kwargs_raises_error(self):
self.assertRaises(processutils.UnknownArgumentError,
processutils.execute,
'/usr/bin/env', 'true',
this_is_not_a_valid_kwarg=True)
def test_check_exit_code_boolean(self):
processutils.execute('/usr/bin/env', 'false', check_exit_code=False)
self.assertRaises(processutils.ProcessExecutionError,
processutils.execute,
'/usr/bin/env', 'false', check_exit_code=True)
def test_check_cwd(self):
tmpdir = tempfile.mkdtemp()
out, err = processutils.execute('/usr/bin/env',
'sh', '-c', 'pwd',
cwd=tmpdir)
self.assertIn(tmpdir, out)
def test_check_exit_code_list(self):
processutils.execute('/usr/bin/env', 'sh', '-c', 'exit 101',
check_exit_code=(101, 102))
processutils.execute('/usr/bin/env', 'sh', '-c', 'exit 102',
check_exit_code=(101, 102))
self.assertRaises(processutils.ProcessExecutionError,
processutils.execute,
'/usr/bin/env', 'sh', '-c', 'exit 103',
check_exit_code=(101, 102))
self.assertRaises(processutils.ProcessExecutionError,
processutils.execute,
'/usr/bin/env', 'sh', '-c', 'exit 0',
check_exit_code=(101, 102))
def test_no_retry_on_success(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write("""#!/bin/sh
# If we've already run, bail out.
grep -q foo "$1" && exit 1
# Mark that we've run before.
echo foo > "$1"
# Check that stdin gets passed correctly.
grep foo
""")
fp.close()
os.chmod(tmpfilename, 0o755)
processutils.execute(tmpfilename,
tmpfilename2,
process_input=b'foo',
attempts=2)
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
# This test and the one below ensures that when communicate raises
# an OSError, we do the right thing(s)
def test_exception_on_communicate_error(self):
mock = self.useFixture(mockpatch.Patch(
'subprocess.Popen.communicate',
side_effect=OSError(errno.EAGAIN, 'fake-test')))
self.assertRaises(OSError,
processutils.execute,
'/usr/bin/env',
'false',
check_exit_code=False)
self.assertEqual(1, mock.mock.call_count)
def test_retry_on_communicate_error(self):
mock = self.useFixture(mockpatch.Patch(
'subprocess.Popen.communicate',
side_effect=OSError(errno.EAGAIN, 'fake-test')))
self.assertRaises(OSError,
processutils.execute,
'/usr/bin/env',
'false',
check_exit_code=False,
attempts=5)
self.assertEqual(5, mock.mock.call_count)
def _test_and_check_logging_communicate_errors(self, log_errors=None,
attempts=None):
mock = self.useFixture(mockpatch.Patch(
'subprocess.Popen.communicate',
side_effect=OSError(errno.EAGAIN, 'fake-test')))
fixture = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
kwargs = {}
if log_errors:
kwargs.update({"log_errors": log_errors})
if attempts:
kwargs.update({"attempts": attempts})
self.assertRaises(OSError,
processutils.execute,
'/usr/bin/env',
'false',
**kwargs)
self.assertEqual(attempts if attempts else 1, mock.mock.call_count)
self.assertIn('Got an OSError', fixture.output)
self.assertIn('errno: %d' % errno.EAGAIN, fixture.output)
self.assertIn("'/usr/bin/env false'", fixture.output)
def test_logging_on_communicate_error_1(self):
self._test_and_check_logging_communicate_errors(
log_errors=processutils.LOG_FINAL_ERROR,
attempts=None)
def test_logging_on_communicate_error_2(self):
self._test_and_check_logging_communicate_errors(
log_errors=processutils.LOG_FINAL_ERROR,
attempts=1)
def test_logging_on_communicate_error_3(self):
self._test_and_check_logging_communicate_errors(
log_errors=processutils.LOG_FINAL_ERROR,
attempts=5)
def test_logging_on_communicate_error_4(self):
self._test_and_check_logging_communicate_errors(
log_errors=processutils.LOG_ALL_ERRORS,
attempts=None)
def test_logging_on_communicate_error_5(self):
self._test_and_check_logging_communicate_errors(
log_errors=processutils.LOG_ALL_ERRORS,
attempts=1)
def test_logging_on_communicate_error_6(self):
self._test_and_check_logging_communicate_errors(
log_errors=processutils.LOG_ALL_ERRORS,
attempts=5)
def test_with_env_variables(self):
env_vars = {'SUPER_UNIQUE_VAR': 'The answer is 42'}
out, err = processutils.execute('/usr/bin/env', env_variables=env_vars)
self.assertIsInstance(out, str)
self.assertIsInstance(err, str)
self.assertIn('SUPER_UNIQUE_VAR=The answer is 42', out)
def test_binary(self):
env_vars = {'SUPER_UNIQUE_VAR': 'The answer is 42'}
out, err = processutils.execute('/usr/bin/env',
env_variables=env_vars,
binary=True)
self.assertIsInstance(out, bytes)
self.assertIsInstance(err, bytes)
self.assertIn(b'SUPER_UNIQUE_VAR=The answer is 42', out)
def test_exception_and_masking(self):
tmpfilename = self.create_tempfiles(
[["test_exceptions_and_masking",
TEST_EXCEPTION_AND_MASKING_SCRIPT]], ext='bash')[0]
os.chmod(tmpfilename, (stat.S_IRWXU |
stat.S_IRGRP |
stat.S_IXGRP |
stat.S_IROTH |
stat.S_IXOTH))
err = self.assertRaises(processutils.ProcessExecutionError,
processutils.execute,
tmpfilename, 'password="secret"',
'something')
self.assertEqual(38, err.exit_code)
self.assertIsInstance(err.stdout, six.text_type)
self.assertIsInstance(err.stderr, six.text_type)
self.assertIn('onstdout --password="***"', err.stdout)
self.assertIn('onstderr --password="***"', err.stderr)
self.assertEqual(err.cmd, ' '.join([tmpfilename,
'password="***"',
'something']))
self.assertNotIn('secret', str(err))
def execute_undecodable_bytes(self, out_bytes, err_bytes,
exitcode=0, binary=False):
if six.PY3:
code = ';'.join(('import sys',
'sys.stdout.buffer.write(%a)' % out_bytes,
'sys.stdout.flush()',
'sys.stderr.buffer.write(%a)' % err_bytes,
'sys.stderr.flush()',
'sys.exit(%s)' % exitcode))
else:
code = ';'.join(('import sys',
'sys.stdout.write(%r)' % out_bytes,
'sys.stdout.flush()',
'sys.stderr.write(%r)' % err_bytes,
'sys.stderr.flush()',
'sys.exit(%s)' % exitcode))
return processutils.execute(sys.executable, '-c', code, binary=binary)
def check_undecodable_bytes(self, binary):
out_bytes = b'out: ' + UNDECODABLE_BYTES
err_bytes = b'err: ' + UNDECODABLE_BYTES
out, err = self.execute_undecodable_bytes(out_bytes, err_bytes,
binary=binary)
if six.PY3 and not binary:
self.assertEqual(out, os.fsdecode(out_bytes))
self.assertEqual(err, os.fsdecode(err_bytes))
else:
self.assertEqual(out, out_bytes)
self.assertEqual(err, err_bytes)
def test_undecodable_bytes(self):
self.check_undecodable_bytes(False)
def test_binary_undecodable_bytes(self):
self.check_undecodable_bytes(True)
def check_undecodable_bytes_error(self, binary):
out_bytes = b'out: password="secret1" ' + UNDECODABLE_BYTES
err_bytes = b'err: password="secret2" ' + UNDECODABLE_BYTES
exc = self.assertRaises(processutils.ProcessExecutionError,
self.execute_undecodable_bytes,
out_bytes, err_bytes, exitcode=1,
binary=binary)
out = exc.stdout
err = exc.stderr
out_bytes = b'out: password="***" ' + UNDECODABLE_BYTES
err_bytes = b'err: password="***" ' + UNDECODABLE_BYTES
if six.PY3:
# On Python 3, stdout and stderr attributes of
# ProcessExecutionError must always be Unicode
self.assertEqual(out, os.fsdecode(out_bytes))
self.assertEqual(err, os.fsdecode(err_bytes))
else:
# On Python 2, stdout and stderr attributes of
# ProcessExecutionError must always be bytes
self.assertEqual(out, out_bytes)
self.assertEqual(err, err_bytes)
def test_undecodable_bytes_error(self):
self.check_undecodable_bytes_error(False)
def test_binary_undecodable_bytes_error(self):
self.check_undecodable_bytes_error(True)
def test_picklable(self):
exc = processutils.ProcessExecutionError(
stdout='my stdout', stderr='my stderr',
exit_code=42, cmd='my cmd',
description='my description')
exc_message = str(exc)
exc = pickle.loads(pickle.dumps(exc))
self.assertEqual('my stdout', exc.stdout)
self.assertEqual('my stderr', exc.stderr)
self.assertEqual(42, exc.exit_code)
self.assertEqual('my cmd', exc.cmd)
self.assertEqual('my description', exc.description)
self.assertEqual(exc_message, str(exc))
class ProcessExecutionErrorLoggingTest(test_base.BaseTestCase):
def setUp(self):
super(ProcessExecutionErrorLoggingTest, self).setUp()
self.tmpfilename = self.create_tempfiles(
[["process_execution_error_logging_test",
PROCESS_EXECUTION_ERROR_LOGGING_TEST]],
ext='bash')[0]
os.chmod(self.tmpfilename, (stat.S_IRWXU | stat.S_IRGRP |
stat.S_IXGRP | stat.S_IROTH |
stat.S_IXOTH))
def _test_and_check(self, log_errors=None, attempts=None):
fixture = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
kwargs = {}
if log_errors:
kwargs.update({"log_errors": log_errors})
if attempts:
kwargs.update({"attempts": attempts})
err = self.assertRaises(processutils.ProcessExecutionError,
processutils.execute,
self.tmpfilename,
**kwargs)
self.assertEqual(41, err.exit_code)
self.assertIn(self.tmpfilename, fixture.output)
def test_with_invalid_log_errors(self):
self.assertRaises(processutils.InvalidArgumentError,
processutils.execute,
self.tmpfilename,
log_errors='invalid')
def test_with_log_errors_NONE(self):
self._test_and_check(log_errors=None, attempts=None)
def test_with_log_errors_final(self):
self._test_and_check(log_errors=processutils.LOG_FINAL_ERROR,
attempts=None)
def test_with_log_errors_all(self):
self._test_and_check(log_errors=processutils.LOG_ALL_ERRORS,
attempts=None)
def test_multiattempt_with_log_errors_NONE(self):
self._test_and_check(log_errors=None, attempts=3)
def test_multiattempt_with_log_errors_final(self):
self._test_and_check(log_errors=processutils.LOG_FINAL_ERROR,
attempts=3)
def test_multiattempt_with_log_errors_all(self):
self._test_and_check(log_errors=processutils.LOG_ALL_ERRORS,
attempts=3)
def fake_execute(*cmd, **kwargs):
return 'stdout', 'stderr'
def fake_execute_raises(*cmd, **kwargs):
raise processutils.ProcessExecutionError(exit_code=42,
stdout='stdout',
stderr='stderr',
cmd=['this', 'is', 'a',
'command'])
class TryCmdTestCase(test_base.BaseTestCase):
def test_keep_warnings(self):
self.useFixture(fixtures.MonkeyPatch(
'oslo_concurrency.processutils.execute', fake_execute))
o, e = processutils.trycmd('this is a command'.split(' '))
self.assertNotEqual('', o)
self.assertNotEqual('', e)
def test_keep_warnings_from_raise(self):
self.useFixture(fixtures.MonkeyPatch(
'oslo_concurrency.processutils.execute', fake_execute_raises))
o, e = processutils.trycmd('this is a command'.split(' '),
discard_warnings=True)
self.assertIsNotNone(o)
self.assertNotEqual('', e)
def test_discard_warnings(self):
self.useFixture(fixtures.MonkeyPatch(
'oslo_concurrency.processutils.execute', fake_execute))
o, e = processutils.trycmd('this is a command'.split(' '),
discard_warnings=True)
self.assertIsNotNone(o)
self.assertEqual('', e)
class FakeSshChannel(object):
def __init__(self, rc):
self.rc = rc
def recv_exit_status(self):
return self.rc
class FakeSshStream(six.BytesIO):
def setup_channel(self, rc):
self.channel = FakeSshChannel(rc)
class FakeSshConnection(object):
def __init__(self, rc, out=b'stdout', err=b'stderr'):
self.rc = rc
self.out = out
self.err = err
def exec_command(self, cmd):
stdout = FakeSshStream(self.out)
stdout.setup_channel(self.rc)
return (six.BytesIO(),
stdout,
six.BytesIO(self.err))
class SshExecuteTestCase(test_base.BaseTestCase):
def test_invalid_addl_env(self):
self.assertRaises(processutils.InvalidArgumentError,
processutils.ssh_execute,
None, 'ls', addl_env='important')
def test_invalid_process_input(self):
self.assertRaises(processutils.InvalidArgumentError,
processutils.ssh_execute,
None, 'ls', process_input='important')
def test_works(self):
out, err = processutils.ssh_execute(FakeSshConnection(0), 'ls')
self.assertEqual('stdout', out)
self.assertEqual('stderr', err)
self.assertIsInstance(out, six.text_type)
self.assertIsInstance(err, six.text_type)
def test_binary(self):
o, e = processutils.ssh_execute(FakeSshConnection(0), 'ls',
binary=True)
self.assertEqual(b'stdout', o)
self.assertEqual(b'stderr', e)
self.assertIsInstance(o, bytes)
self.assertIsInstance(e, bytes)
def check_undecodable_bytes(self, binary):
out_bytes = b'out: ' + UNDECODABLE_BYTES
err_bytes = b'err: ' + UNDECODABLE_BYTES
conn = FakeSshConnection(0, out=out_bytes, err=err_bytes)
out, err = processutils.ssh_execute(conn, 'ls', binary=binary)
if six.PY3 and not binary:
self.assertEqual(out, os.fsdecode(out_bytes))
self.assertEqual(err, os.fsdecode(err_bytes))
else:
self.assertEqual(out, out_bytes)
self.assertEqual(err, err_bytes)
def test_undecodable_bytes(self):
self.check_undecodable_bytes(False)
def test_binary_undecodable_bytes(self):
self.check_undecodable_bytes(True)
def check_undecodable_bytes_error(self, binary):
out_bytes = b'out: password="secret1" ' + UNDECODABLE_BYTES
err_bytes = b'err: password="secret2" ' + UNDECODABLE_BYTES
conn = FakeSshConnection(1, out=out_bytes, err=err_bytes)
out_bytes = b'out: password="***" ' + UNDECODABLE_BYTES
err_bytes = b'err: password="***" ' + UNDECODABLE_BYTES
exc = self.assertRaises(processutils.ProcessExecutionError,
processutils.ssh_execute,
conn, 'ls',
binary=binary, check_exit_code=True)
out = exc.stdout
err = exc.stderr
if six.PY3:
# On Python 3, stdout and stderr attributes of
# ProcessExecutionError must always be Unicode
self.assertEqual(out, os.fsdecode(out_bytes))
self.assertEqual(err, os.fsdecode(err_bytes))
else:
# On Python 2, stdout and stderr attributes of
# ProcessExecutionError must always be bytes
self.assertEqual(out, out_bytes)
self.assertEqual(err, err_bytes)
def test_undecodable_bytes_error(self):
self.check_undecodable_bytes_error(False)
def test_binary_undecodable_bytes_error(self):
self.check_undecodable_bytes_error(True)
def test_fails(self):
self.assertRaises(processutils.ProcessExecutionError,
processutils.ssh_execute, FakeSshConnection(1), 'ls')
def _test_compromising_ssh(self, rc, check):
fixture = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
fake_stdin = six.BytesIO()
fake_stdout = mock.Mock()
fake_stdout.channel.recv_exit_status.return_value = rc
fake_stdout.read.return_value = b'password="secret"'
fake_stderr = six.BytesIO(b'password="foobar"')
command = 'ls --password="bar"'
connection = mock.Mock()
connection.exec_command.return_value = (fake_stdin, fake_stdout,
fake_stderr)
if check and rc != -1 and rc != 0:
err = self.assertRaises(processutils.ProcessExecutionError,
processutils.ssh_execute,
connection, command,
check_exit_code=check)
self.assertEqual(rc, err.exit_code)
self.assertEqual(err.stdout, 'password="***"')
self.assertEqual(err.stderr, 'password="***"')
self.assertEqual(err.cmd, 'ls --password="***"')
self.assertNotIn('secret', str(err))
self.assertNotIn('foobar', str(err))
else:
o, e = processutils.ssh_execute(connection, command,
check_exit_code=check)
self.assertEqual('password="***"', o)
self.assertEqual('password="***"', e)
self.assertIn('password="***"', fixture.output)
self.assertNotIn('bar', fixture.output)
def test_compromising_ssh1(self):
self._test_compromising_ssh(rc=-1, check=True)
def test_compromising_ssh2(self):
self._test_compromising_ssh(rc=0, check=True)
def test_compromising_ssh3(self):
self._test_compromising_ssh(rc=1, check=True)
def test_compromising_ssh4(self):
self._test_compromising_ssh(rc=1, check=False)
def test_compromising_ssh5(self):
self._test_compromising_ssh(rc=0, check=False)
def test_compromising_ssh6(self):
self._test_compromising_ssh(rc=-1, check=False)
class PrlimitTestCase(test_base.BaseTestCase):
# Simply program that does nothing and returns an exit code 0.
# Use Python to be portable.
SIMPLE_PROGRAM = [sys.executable, '-c', 'pass']
def soft_limit(self, res, substract, default_limit):
# Create a new soft limit for a resource, lower than the current
# soft limit.
soft_limit, hard_limit = resource.getrlimit(res)
if soft_limit < 0:
soft_limit = default_limit
else:
soft_limit -= substract
return soft_limit
def memory_limit(self, res):
# Substract 1 kB just to get a different limit. Don't substract too
# much to avoid memory allocation issues.
#
# Use 1 GB by default. Limit high enough to be able to load shared
# libraries. Limit low enough to be work on 32-bit platforms.
return self.soft_limit(res, 1024, 1024 ** 3)
def limit_address_space(self):
max_memory = self.memory_limit(resource.RLIMIT_AS)
return processutils.ProcessLimits(address_space=max_memory)
def test_simple(self):
# Simple test running a program (/bin/true) with no parameter
prlimit = self.limit_address_space()
stdout, stderr = processutils.execute(*self.SIMPLE_PROGRAM,
prlimit=prlimit)
self.assertEqual(stdout.rstrip(), '')
self.assertEqual(stderr.rstrip(), '')
def check_limit(self, prlimit, resource, value):
code = ';'.join(('import resource',
'print(resource.getrlimit(resource.%s))' % resource))
args = [sys.executable, '-c', code]
stdout, stderr = processutils.execute(*args, prlimit=prlimit)
expected = (value, value)
self.assertEqual(stdout.rstrip(), str(expected))
def test_address_space(self):
prlimit = self.limit_address_space()
self.check_limit(prlimit, 'RLIMIT_AS', prlimit.address_space)
def test_resident_set_size(self):
max_memory = self.memory_limit(resource.RLIMIT_RSS)
prlimit = processutils.ProcessLimits(resident_set_size=max_memory)
self.check_limit(prlimit, 'RLIMIT_RSS', max_memory)
def test_number_files(self):
nfiles = self.soft_limit(resource.RLIMIT_NOFILE, 1, 1024)
prlimit = processutils.ProcessLimits(number_files=nfiles)
self.check_limit(prlimit, 'RLIMIT_NOFILE', nfiles)
def test_unsupported_prlimit(self):
self.assertRaises(ValueError, processutils.ProcessLimits, xxx=33)
def test_relative_path(self):
prlimit = self.limit_address_space()
program = sys.executable
env = dict(os.environ)
env['PATH'] = os.path.dirname(program)
args = [os.path.basename(program), '-c', 'pass']
processutils.execute(*args, prlimit=prlimit, env_variables=env)
def test_execv_error(self):
prlimit = self.limit_address_space()
args = ['/missing_path/dont_exist/program']
try:
processutils.execute(*args, prlimit=prlimit)
except processutils.ProcessExecutionError as exc:
self.assertEqual(exc.exit_code, 1)
self.assertEqual(exc.stdout, '')
expected = ('%s -m oslo_concurrency.prlimit: '
'failed to execute /missing_path/dont_exist/program: '
% os.path.basename(sys.executable))
self.assertIn(expected, exc.stderr)
else:
self.fail("ProcessExecutionError not raised")
def test_setrlimit_error(self):
prlimit = self.limit_address_space()
# trying to set a limit higher than the current hard limit
# with setrlimit() should fail.
higher_limit = prlimit.address_space + 1024
args = [sys.executable, '-m', 'oslo_concurrency.prlimit',
'--as=%s' % higher_limit,
'--']
args.extend(self.SIMPLE_PROGRAM)
try:
processutils.execute(*args, prlimit=prlimit)
except processutils.ProcessExecutionError as exc:
self.assertEqual(exc.exit_code, 1)
self.assertEqual(exc.stdout, '')
expected = ('%s -m oslo_concurrency.prlimit: '
'failed to set the AS resource limit: '
% os.path.basename(sys.executable))
self.assertIn(expected, exc.stderr)
else:
self.fail("ProcessExecutionError not raised")
|
[
"[email protected]"
] | |
23d4e5aa40696d6550bcbaa74f288227bb8f13b0
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/ir/inference/test_groupnorm_act_pass_fuse_pass.py
|
c9f821b21d4e93fb3e366f54c555278279cf2643
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 |
Apache-2.0
| 2023-09-14T19:20:51 | 2016-08-15T06:59:08 |
C++
|
UTF-8
|
Python
| false | false | 4,554 |
py
|
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import partial
import hypothesis.strategies as st
import numpy as np
from auto_scan_test import PassAutoScanTest
from program_config import OpConfig, ProgramConfig, TensorConfig
import paddle.inference as paddle_infer
class TestElementGNActPass(PassAutoScanTest):
#
# | fuse |
# groupnorm -> groupnorm(with_silu)
# | |
# silu
# |
#
#
def sample_predictor_configs(self, program_config):
# trt dynamic_shape
config = self.create_trt_inference_config()
config.enable_tensorrt_engine(
max_batch_size=1,
workspace_size=102400,
min_subgraph_size=0,
precision_mode=paddle_infer.PrecisionType.Half,
use_static=False,
use_calib_mode=False,
)
config.set_trt_dynamic_shape_info(
{
"input_data": [1, 160, 1, 1],
},
{
"input_data": [4, 1280, 64, 64],
},
{
"input_data": [1, 320, 32, 32],
},
)
yield config, ['group_norm'], (3e-3, 1e-3)
def sample_program_config(self, draw):
axis = draw(st.sampled_from([0, -1]))
epsilon = draw(st.floats(min_value=0.0000001, max_value=0.001))
batch_size = draw(st.integers(min_value=1, max_value=4))
groups = draw(st.sampled_from([4, 8, 16, 32]))
hw = draw(st.sampled_from([1, 8, 16, 32]))
channel = draw(st.sampled_from([320, 1280]))
def generate_input(attrs):
return np.random.random(
[attrs[1]["batch_size"], *attrs[1]["input_dim"]]
).astype(np.float32)
def generate_weight(attrs):
return np.random.random(attrs[1]['input_dim'][0]).astype(np.float32)
attrs = [
{
'epsilon': epsilon,
'groups': groups,
},
{
'batch_size': batch_size,
'input_dim': [channel, hw, hw],
},
]
group_norm_op = OpConfig(
type="group_norm",
inputs={
"X": ["input_data"],
"Bias": ["group_norm_bias"],
"Scale": ["group_norm_scale"],
},
outputs={
"Y": ["group_norm_output1"],
"Mean": ["group_norm_output2"],
"Variance": ["group_norm_output3"],
},
attrs={
"data_layout": "NCHW",
"groups": attrs[0]["groups"],
"epsilon": attrs[0]["epsilon"],
},
)
silu_op = OpConfig(
type="silu",
inputs={
"X": ["group_norm_output1"],
},
outputs={
"Out": ["silu_output"],
},
)
program_config = ProgramConfig(
ops=[
group_norm_op,
silu_op,
],
weights={
"group_norm_bias": TensorConfig(
data_gen=partial(generate_weight, attrs)
),
"group_norm_scale": TensorConfig(
data_gen=partial(generate_weight, attrs)
),
},
inputs={
"input_data": TensorConfig(
data_gen=partial(generate_input, attrs)
),
},
outputs=["silu_output"],
)
return program_config
def test(self):
self.run_and_statis(
quant=False,
max_examples=50,
passes=["groupnorm_act_pass"],
max_duration=250,
min_success_num=50,
)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
24938ce3102a1751f11d20fb9b6e5597d6e0b1d8
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2627/60593/271403.py
|
5ab202a296dfd60d401bede606a6b59da9d7601c
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 101 |
py
|
t=int(input())
for _ in range(t):
p,s=map(int,input().split())
print(min(p/12,(s/6)**0.5)**3)
|
[
"[email protected]"
] | |
5d2f65939d336b320d6390d414834f0d5e24b0be
|
2ce3ef971a6d3e14db6615aa4da747474d87cc5d
|
/练习/CMB/02_03 string_test/string_test_lyh.py
|
153d9a8d0141a302596cb45801712394292ac494
|
[] |
no_license
|
JarvanIV4/pytest_hogwarts
|
40604245807a4da5dbec2cb189b57d5f76f5ede3
|
37d4bae23c030480620897583f9f5dd69463a60c
|
refs/heads/master
| 2023-01-07T09:56:33.472233 | 2020-11-10T15:06:13 | 2020-11-10T15:06:13 | 304,325,109 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 772 |
py
|
import re
string1="LiuYongHua"
def reverse_string(str):
revers_str=str[::-1]
revers_list=[]
for i in range(len(revers_str)):
if revers_str[i]==revers_str[i].upper():
revers_list.append(revers_str[i].lower())
else:
revers_list.append(revers_str[i].upper())
return ''.join(revers_list)
#print(reverse_string(string1))
string2="agf23bss43dsfds6fd4"
def new_num_str(str):
new_list=[]
for char in str:
if char>="0" and char<="9":
new_list.append(char)
return "".join(new_list)
#print(new_num_str(string2))
string3="DSabABaassBA"
string4="ab"
def count_str(str1,str2):
str1=str1.upper()
str2=str2.upper()
count=str1.count(str2)
return count
print(count_str(string3,string4))
|
[
"[email protected]"
] | |
a64e8f763cd6929ea0b1e9186137a9d92946b7df
|
a2ac73af04a07bb070cd85c88778608b561dd3e4
|
/addons/event/res_partner.py
|
0b7ae0a759890d669d141d4bc67c1dd621df039f
|
[] |
no_license
|
sannareddy/openerp-heimai
|
c849586d6099cc7548dec8b3f1cc7ba8be49594a
|
58255ecbcea7bf9780948287cf4551ed6494832a
|
refs/heads/master
| 2021-01-15T21:34:46.162550 | 2014-05-13T09:20:37 | 2014-05-13T09:20:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 55 |
py
|
/usr/share/pyshared/openerp/addons/event/res_partner.py
|
[
"[email protected]"
] | |
d7940c263c8c7fce2ebf9d1c8ed5e39bc97d882e
|
931f818c937df10d0b601b559d5a340dde4b04d1
|
/flexx/event/_js.py
|
8411919bdafdcbb1722e5d711290ed1f5f16ab66
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
vault-the/flexx
|
7f06a6c7f9c4bfa35d65b523f376be0c5873a862
|
15fe205f4239e44c2911f9fe72c86077893b6209
|
refs/heads/master
| 2021-09-06T05:06:57.326378 | 2018-02-02T16:02:17 | 2018-02-02T16:02:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 25,137 |
py
|
"""
Implementation of flexx.event in JS via PyScript.
In this module we compile the flexx event system to JavaScript. Most
code is generated by transpiling methods from the Python classes. This
module implements a JS variant of some classes to overload certain
behavior in JS. E.g. the JS implementation of the Component class has
some boilerplate code to create actions, reactions, emitters and
properties.
By reusing as much code as possible, we reduce maintencance costs, and
make it easier to realize that the Python and JS implementation of this
event system have the same API and behavior.
"""
import re
import sys
import json
import inspect
from flexx.pyscript import JSString, RawJS, py2js
from flexx.pyscript.parser2 import get_class_definition
from flexx.event import _property
from flexx.event._loop import Loop
from flexx.event._action import ActionDescriptor
from flexx.event._reaction import ReactionDescriptor, Reaction
from flexx.event._attribute import Attribute
from flexx.event._property import Property
from flexx.event._emitter import EmitterDescriptor
from flexx.event._component import Component, _mutate_array_js
Object = console = setTimeout = loop = logger = arguments = None # fool pyflake
undefined = 'UNDEFINED'
reprs = json.dumps
class MetaCollector:
def __init__(self, cls=None):
filename = None
linenr = 1e9
if cls is not None:
filename = getattr(sys.modules[cls.__module__], '__file__', None)
linenr = (cls.__linenr__ if hasattr(cls, '__linenr__') else
inspect.findsource(cls)[1])
self.meta = {'vars_unknown': set(), 'vars_global': set(),
'std_functions': set(), 'std_methods': set(),
'filename': filename, 'linenr': linenr}
def py2js(self, *args, **kwargs):
kwargs['inline_stdlib'] = False
kwargs['docstrings'] = False
code = py2js(*args, **kwargs)
return self.update(code)
def update(self, code):
for key in self.meta:
if key in ('filename', 'linenr'):
continue
self.meta[key].update(code.meta[key])
return code
def attach_meta(self, s):
s = JSString(s)
s.meta = self.meta
return s
## The JS class variants
# Easiest to implement this directly in JS
JS_LOGGER = """
var Logger = function () {
this.level = 25;
}
var $Logger = Logger.prototype;
$Logger.debug = function (msg) {
if (this.level <= 10) { console.info(msg); }
};
$Logger.info = function (msg) {
if (this.level <= 20) { console.info(msg); }
};
$Logger.warn = function (msg) {
if (this.level <= 30) { console.warn(msg); }
};
$Logger.exception = function (msg) {
console.error(msg);
};
$Logger.error = function (msg) {
console.error(msg);
};
var logger = new Logger();
"""
class LoopJS: # pragma: no cover
""" JS variant of the Loop class.
"""
# Hide a couple of methods
integrate = undefined
integrate_tornado = undefined
integrate_pyqt4 = undefined
integrate_pyside = undefined
_integrate_qt = undefined
_thread_match = undefined
def __init__(self):
self._active_components = []
self.reset()
def _call_soon_func(self, func):
setTimeout(func, 0)
def _iter_callback(self):
self._scheduled_call_to_iter = False
return self.iter()
class ComponentJS: # pragma: no cover
""" JS variant of the Component class.
"""
_IS_COMPONENT = True
_COUNT = 0
_REACTION_COUNT = 0
def __init__(self, *init_args, **property_values):
RawJS('Component.prototype._COUNT += 1')
self._id = RawJS("this.__name__ + Component.prototype._COUNT")
self._disposed = False
# Init some internal variables
self.__handlers = {} # reactions connecting to this component
self.__pending_events = {}
self.__anonymous_reactions = []
# Create actions
for i in range(len(self.__actions__)):
name = self.__actions__[i]
self.__create_action(self[name], name)
# Create emitters
for i in range(len(self.__emitters__)):
name = self.__emitters__[i]
self.__handlers[name] = []
self.__create_emitter(self[name], name)
# Create properties
for i in range(len(self.__properties__)):
name = self.__properties__[i]
self.__handlers[name] = []
self.__create_property(name)
# Create attributes
for i in range(len(self.__attributes__)):
name = self.__attributes__[i]
self.__create_attribute(name)
# Init the values of all properties.
prop_events = self._comp_init_property_values(property_values)
# Apply user-defined initialization
with self:
self.init(*init_args)
# Connect reactions and fire initial events
self._comp_init_reactions()
self._comp_init_events(prop_events)
def _comp_init_property_values(self, property_values):
events = []
# First process default property values
for i in range(len(self.__properties__)):
name = self.__properties__[i]
value_name = '_' + name + '_value'
value = self[value_name]
value = self['_' + name + '_validate'](value)
self[value_name] = value
if name not in property_values:
ev = dict(type=name, new_value=value, old_value=value, mutation='set')
events.append(ev)
# Then process property values given at init time
for name, value in property_values.items(): # is sorted by occurance in py36
if name not in self.__properties__:
if name in self.__attributes__:
raise AttributeError('%s.%s is an attribute, not a property' %
(self._id, name))
else:
raise AttributeError('%s does not have property %s.' %
(self._id, name))
if callable(value):
self._comp_make_implicit_setter(name, value)
continue
value = self['_' + name + '_validate'](value)
self['_' + name + '_value'] = value
ev = dict(type=name, new_value=value, old_value=value, mutation='set')
events.append(ev)
return events
def _comp_make_implicit_setter(self, prop_name, func):
setter_func = getattr(self, 'set_' + prop_name, None)
if setter_func is None:
t = '%s does not have a set_%s() action for property %s.'
raise TypeError(t % (self._id, prop_name, prop_name))
setter_reaction = lambda: setter_func(func())
reaction = self.__create_reaction(setter_reaction, 'auto-' + prop_name, [])
self.__anonymous_reactions.append(reaction)
def _comp_init_reactions(self):
# Create (and connect) reactions.
# Implicit reactions need to be invoked to initialize connections.
for i in range(len(self.__reactions__)):
name = self.__reactions__[i]
func = self[name]
r = self.__create_reaction(func, name, func._connection_strings or ())
if r.is_explicit() is False:
ev = dict(source=self, type='', label='')
loop.add_reaction_event(r, ev)
# Also invoke the anonymouse implicit reactions
for i in range(len(self.__anonymous_reactions)):
r = self.__anonymous_reactions[i]
if r.is_explicit() is False:
ev = dict(source=self, type='', label='')
loop.add_reaction_event(r, ev)
def reaction(self, *connection_strings):
# The JS version (no decorator functionality)
if len(connection_strings) < 2:
raise RuntimeError('Component.reaction() (js) needs a function and '
'one or more connection strings.')
# Get callable
if callable(connection_strings[0]):
func = connection_strings[0]
connection_strings = connection_strings[1:]
elif callable(connection_strings[-1]):
func = connection_strings[-1]
connection_strings = connection_strings[:-1]
else:
raise TypeError('Component.reaction() requires a callable.')
# Verify connection strings
for i in range(len(connection_strings)):
s = connection_strings[i]
if not (isinstance(s, str) and len(s)):
raise ValueError('Connection string must be nonempty strings.')
# Get function name (Flexx sets __name__ on methods)
name = RawJS("func.__name__ || func.name || 'anonymous'")
# name = name.split(' ')[-1].split('flx_')[-1]
nameparts = RawJS("name.split(' ')")
nameparts = RawJS("nameparts[nameparts.length-1].split('flx_')")
name = nameparts[-1]
return self.__create_reaction_ob(func, name, connection_strings)
def __create_action(self, action_func, name):
# Keep a ref to the action func, which is a class attribute. The object
# attribute with the same name will be overwritten with the property below.
# Because the class attribute is the underlying function, super() works.
def action(): # this func should return None, so super() works correct
if loop.is_processing_actions() is True:
res = action_func.apply(self, arguments)
if res is not None:
logger.warn('Action (%s) is not supposed to return a value' % name)
else:
loop.add_action_invokation(action, arguments)
def getter():
return action
def setter(x):
raise AttributeError('Action %s is not settable' % name)
opts = {'enumerable': True, 'configurable': True, # i.e. overloadable
'get': getter, 'set': setter}
Object.defineProperty(self, name, opts)
def __create_attribute(self, name):
def getter():
return self['_' + name]
def setter(x):
raise AttributeError('Cannot set attribute %r' % name)
opts = {'enumerable': False, 'configurable': False,
'get': getter, 'set': setter}
Object.defineProperty(self, name, opts)
def __create_property(self, name):
private_name = '_' + name + '_value'
def getter():
loop.register_prop_access(self, name)
return self[private_name]
def setter(x):
raise AttributeError('Cannot set property %r; properties can only '
'be mutated by actions.' % name)
opts = {'enumerable': True, 'configurable': True, # i.e. overloadable
'get': getter, 'set': setter}
Object.defineProperty(self, name, opts)
def __create_emitter(self, emitter_func, name):
# Keep a ref to the emitter func, see comment in __create_action()
def func(): # this func should return None, so super() works correct
ev = emitter_func.apply(self, arguments)
if ev is not None:
self.emit(name, ev)
def getter():
return func
def setter(x):
raise AttributeError('Emitter %s is not settable' % name)
opts = {'enumerable': True, 'configurable': True, # i.e. overloadable
'get': getter, 'set': setter}
Object.defineProperty(self, name, opts)
def __create_reaction(self, reaction_func, name, connection_strings):
reaction = self.__create_reaction_ob(reaction_func, name, connection_strings)
def getter():
return reaction
def setter(x):
raise AttributeError('Reaction %s is not settable' % name)
opts = {'enumerable': True, 'configurable': True, # i.e. overloadable
'get': getter, 'set': setter}
Object.defineProperty(self, name, opts)
return reaction
def __create_reaction_ob(self, reaction_func, name, connection_strings):
# Keep ref to the reaction function, see comment in create_action().
# Create function that becomes our "reaction object"
def reaction():
return reaction_func.apply(self, arguments) # arguments == events
# Attach methods to the function object (this gets replaced)
REACTION_METHODS_HOOK # noqa
# Init reaction
that = self
RawJS("Component.prototype._REACTION_COUNT += 1")
reaction._id = RawJS("'r' + Component.prototype._REACTION_COUNT")
reaction._name = name
reaction._ob1 = lambda : that # no weakref in JS
reaction._init(connection_strings, self)
return reaction
## Compile functions
OK_MAGICS = (
# Specific to Flexx
'__attributes__', '__properties__', '__actions__',
'__emitters__', '__reactions__', '__jsmodule__',
# Functions that make sense
'__init__', '__enter__', '__exit__',
# For flexx.ui
'__proxy_properties__',
)
def _create_js_class(PyClass, JSClass):
""" Create the JS code for Loop, Reaction and Component based on their
Python and JS variants.
"""
mc = MetaCollector(PyClass)
cname = PyClass.__name__
# Start with our special JS version
jscode = [mc.py2js(JSClass, cname)]
jscode[0] = jscode[0].replace('}\n',
'}\nvar $%s = %s.prototype;\n' % (cname, cname),
1
).replace('%s.prototype.' % cname,
'$%s.' % cname)
# Add the Python class methods
for name, val in sorted(PyClass.__dict__.items()):
nameok = name in OK_MAGICS or not name.startswith('__')
if nameok and not hasattr(JSClass, name):
if callable(val):
jscode.append(mc.py2js(val, '$' + cname + '.' + name))
elif name in OK_MAGICS:
jscode.append('$' + cname + '.' + name + ' = ' + json.dumps(val))
# Compose
jscode = '\n'.join(jscode)
# Add the reaction methods to component
if PyClass is Component:
code = '\n'
for name, val in sorted(Reaction.__dict__.items()):
if not name.startswith('__') and callable(val):
code += mc.py2js(val, 'reaction.' + name, indent=1)[4:] + '\n'
jscode = jscode.replace('REACTION_METHODS_HOOK', code)
# Optimizations, e.g. remove threading lock context in Loop
if PyClass is Loop:
p = r"this\._lock\.__enter.+?try {(.+?)} catch.+?else.+?exit__.+?}"
jscode= re.sub(p, r'{/* with lock */\1}', jscode, 0,
re.MULTILINE | re.DOTALL)
jscode= re.sub(r'\$Loop\..+? = undefined;\n', r'', jscode, 0,
re.MULTILINE | re.DOTALL)
jscode = jscode.replace('this._ensure_thread_', '//this._ensure_thread_')
jscode = jscode.replace('threading.get_ident()', '0')
jscode = jscode.replace('._local.', '.')
jscode = jscode.replace('this._thread_match(true);\n', '')
jscode = jscode.replace('if (_pyfunc_truthy(this._thread_match(false)))', '')
# Almost done
jscode = jscode.replace('new Dict()', '{}').replace('new Dict(', '_pyfunc_dict(')
mc.meta['std_functions'].add('dict')
return mc.attach_meta(jscode)
def create_js_component_class(cls, cls_name, base_class='Component.prototype'):
""" Create the JS equivalent of a subclass of the Component class.
Given a Python class with actions, properties, emitters and reactions,
this creates the code for the JS version of the class. It also supports
class constants that are int/float/str, or a tuple/list thereof.
The given class does not have to be a subclass of Component.
This more or less does what ComponentMeta does, but for JS.
"""
assert cls_name != 'Component' # we need this special class above instead
# Collect meta information of all code pieces that we collect
mc = MetaCollector(cls)
mc.meta['std_functions'].add('op_instantiate') # b/c we use get_class_definition
total_code = []
funcs_code = [] # functions and emitters go below class constants
const_code = []
err = ('Objects on JS Component classes can only be int, float, str, '
'or a list/tuple thereof. Not %s -> %r.')
total_code.append('\n'.join(get_class_definition(cls_name, base_class)).rstrip())
prefix = '' if cls_name.count('.') else 'var '
total_code[0] = prefix + total_code[0]
prototype_prefix = '$' + cls_name.split('.')[-1] + '.'
total_code.append('var %s = %s.prototype;' % (prototype_prefix[:-1], cls_name))
# Process class items in original order or sorted by name if we cant
class_items = cls.__dict__.items()
if sys.version_info < (3, 6): # pragma: no cover
class_items = sorted(class_items)
for name, val in class_items:
if isinstance(val, ActionDescriptor):
# Set underlying function as class attribute. This is overwritten
# by the instance, but this way super() works.
funcname = name
# Add function def
code = mc.py2js(val._func, prototype_prefix + funcname)
code = code.replace('super()', base_class) # fix super
# Tweak if this was an autogenerated action
# we use flx_ prefixes to indicate autogenerated functions
if val._func.__name__.startswith('flx_'):
subname = name
if name.startswith('set_') or name.startswith('_set_'):
subname = name[4:]
code = code.replace("flx_name", "'%s'" % subname)
funcs_code.append(code.rstrip())
# Mark to not bind the func
funcs_code.append(prototype_prefix + funcname + '.nobind = true;')
funcs_code.append('')
elif isinstance(val, ReactionDescriptor):
funcname = name # funcname is simply name, so that super() works
# Add function def
code = mc.py2js(val._func, prototype_prefix + funcname)
code = code.replace('super()', base_class) # fix super
funcs_code.append(code.rstrip())
# Mark to not bind the func
funcs_code.append(prototype_prefix + funcname + '.nobind = true;')
# Add connection strings, but not for implicit reactions
if val._connection_strings:
funcs_code.append(prototype_prefix + funcname +
'._connection_strings = ' +
reprs(val._connection_strings))
funcs_code.append('')
elif isinstance(val, EmitterDescriptor):
funcname = name
# Add function def
code = mc.py2js(val._func, prototype_prefix + funcname)
code = code.replace('super()', base_class) # fix super
funcs_code.append(code.rstrip())
# Mark to not bind the func
funcs_code.append(prototype_prefix + funcname + '.nobind = true;')
funcs_code.append('')
elif isinstance(val, Attribute):
pass
elif isinstance(val, Property):
# Mutator and validator functions are picked up as normal functions.
# Set default value on class.
default_val = json.dumps(val._default)
t = '%s_%s_value = %s;'
const_code.append(t % (prototype_prefix, name, default_val))
elif isinstance(val, classmethod):
pass # ignore, like magics
elif (name.startswith('__') and name not in OK_MAGICS and
not name.endswith('_validate')):
# These are only magics, since class attributes with double-underscores
# have already been mangled. Note that we need to exclude validator
# funcs of private properties though.
pass
elif (name.endswith('_validate') and hasattr(val, '__self__') and
isinstance(val.__self__, Property)):
# Proxy the validator functions (not inline).
prop_class = val.__self__.__class__
mod_name_parts = prop_class.__module__.split('.')
module_ns = sys.modules[cls.__module__].__dict__
# Get canonical class name, included part of the module name, as
# needed, depending on what names exist in the component module.
prop_class_name = prop_class.__name__
if prop_class_name not in module_ns:
for ip in reversed(range(0, len(mod_name_parts))):
if mod_name_parts[ip] in module_ns:
prop_class_name = mod_name_parts[ip] + '.' + prop_class_name
break
# Create function that calls the _validate function
t = ' = function (value) { return %s(value, %s, %s); }\n'
code = prototype_prefix + name + t % (
prop_class_name + '.prototype._validate',
json.dumps(name[1:-9]),
json.dumps(val.__self__._data))
funcs_code.append(code)
mc.meta['vars_unknown'].add(prop_class_name)
elif callable(val):
# Functions, including methods attached by the meta class
code = mc.py2js(val, prototype_prefix + name)
code = code.replace('super()', base_class) # fix super
if val.__name__.startswith('flx_'):
subname = name[8:] if name.startswith('_mutate_') else name
code = code.replace("flx_name", "'%s'" % subname)
funcs_code.append(code.rstrip())
funcs_code.append('')
else:
# Static simple (json serializable) attributes, e.g. __actions__ etc.
try:
serialized = json.dumps(val)
except Exception as err: # pragma: no cover
raise ValueError('Attributes on JS Component class must be '
'JSON compatible.\n%s' % str(err))
const_code.append(prototype_prefix + name + ' = ' + serialized)
if const_code:
total_code.append('')
total_code.extend(const_code)
if funcs_code:
total_code.append('')
total_code.extend(funcs_code)
total_code.append('')
# Return string with meta info (similar to what py2js returns)
mc.meta['vars_unknown'].discard('flx_name')
return mc.attach_meta('\n'.join(total_code))
def gen_prop_classes(mc):
""" Generate stub Property classes with _validate() methods.
"""
total_code = []
# Add JS-specific base Property class
total_code.append('var Property = function () {};')
total_code.append('Property.prototype._validate = '
'function(value, name, data) {return value;};')
# Add code for all other props
names = ['Property']
for name in dir(_property):
val = getattr(_property, name)
if isinstance(val, type) and issubclass(val, Property) and val is not Property:
names.append(name)
total_code.append(mc.py2js(val))
# Add event.xx shortcuts for use without flexx.app's binding mechanics.
total_code.append('var event = {}; // convenience "module emulator"')
for name in names:
total_code.append('event.%s = %s;' % (name, name))
return '\n'.join(total_code)
# Generate the code
mc = MetaCollector()
JS_FUNCS = mc.py2js(_mutate_array_js) + '\nvar mutate_array = _mutate_array_js;\n'
JS_LOOP = mc.update(_create_js_class(Loop, LoopJS)) + '\nvar loop = new Loop();\n'
JS_COMPONENT = mc.update(_create_js_class(Component, ComponentJS))
JS_PROP = gen_prop_classes(mc)
JS_EVENT = JS_FUNCS + JS_LOGGER + JS_LOOP + JS_COMPONENT + JS_PROP
JS_EVENT = mc.attach_meta(JS_EVENT.replace(' ', '\t'))
del mc
assert JS_LOOP.count('._scheduled_call_to_iter') > 2 # prevent error after refactor
if __name__ == '__main__':
# Testing ...
from flexx import event
class Foo(Component):
__x = 3
foo = event.StringProp('asd', settable=True)
@event.action
def do_bar(self, v=0):
print(v)
@event.reaction
def react2foo(self):
print(self.foo)
def __xx(self):
pass
toprint = JS_EVENT # or JS_LOOP JS_COMPONENT JS_EVENT
print('-' * 80)
print(toprint)
print('-' * 80)
print(len(toprint), 'of', len(JS_EVENT), 'bytes in total') # 29546 before refactor
print('-' * 80)
print(create_js_component_class(Foo, 'Foo'))
|
[
"[email protected]"
] | |
42b65f1d1f9cc2d93d1e76c4b9d4e9c9f6f48bba
|
22956a21b0b3ffe69c5618a7ef53683e4f73b483
|
/busstopped-gae/lib/wtforms/__init__.py
|
954ff22aba94d1172ff9785c67c417f1fb682cc4
|
[] |
no_license
|
humitos/bus-stopped
|
b397c3c47d8bd4b0b713389b3a0f47b7aa573762
|
e49e6ce0b20ebc5f19fb7374216c082b0b12a962
|
refs/heads/master
| 2021-01-17T05:53:51.795324 | 2011-03-28T15:11:27 | 2011-03-28T15:11:27 | 1,435,952 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 408 |
py
|
"""
WTForms
=======
WTForms is a flexible forms validation and rendering library for python web
development.
:copyright: Copyright (c) 2010 by Thomas Johansson, James Crasta and others.
:license: BSD, see LICENSE.txt for details.
"""
from wtforms import validators, widgets
from wtforms.fields import *
from wtforms.form import Form
from wtforms.validators import ValidationError
__version__ = '0.6.3dev'
|
[
"[email protected]"
] | |
380f5cd110c93d9c0100e713830642b815bad83d
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/51/usersdata/82/21229/submittedfiles/listas.py
|
9397d4c26e6dfec440597fe791ca4bf0c5754c4b
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 427 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
def maiorDegrau(a):
for i in range (0,len(a)-1,1):
degrau = math.fabs(a[i]-a[i+1])
if degrau>maiorDegrau:
maior=maiorDegrau
return
a=[]
n = input ('Digite o valor de n:')
for i in range (0,n,1):
a.append (input('Digite o valor de a:'))
if maiorDegrau(a):
print ('S')
else:
print ('N')
|
[
"[email protected]"
] | |
d0087db78a7bc3a955916ca6d7840764df89d2e4
|
27acd9eeb0d2b9b6326cc0477e7dbb84341e265c
|
/test/vraag4/src/yahtzee/43.py
|
d64a07e55e549fa6d710248f28ee87831f6d9bf7
|
[] |
no_license
|
VerstraeteBert/algos-ds
|
e0fe35bc3c5b7d8276c07250f56d3719ecc617de
|
d9215f11cdfa1a12a3b19ade3b95fa73848a636c
|
refs/heads/master
| 2021-07-15T13:46:58.790446 | 2021-02-28T23:28:36 | 2021-02-28T23:28:36 | 240,883,220 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 628 |
py
|
def histogram(stenen):
histogram={}
stenen.sort()
for element in stenen:
aantal=stenen.count(element)
histogram[element]=aantal
return histogram
def max_gelijk(stenen):
max=0
for element in stenen:
aantal=stenen.count(element)
if aantal>max:
max=aantal
return max
def is_FullHouse(stenen):
trio=False
duo=False
for element in stenen:
aantal=stenen.count(element)
if aantal==3:
trio=True
if aantal==2:
duo=True
if trio==True and duo==True:
return True
else:
return False
|
[
"[email protected]"
] | |
5c3c7c56558b2a063516442c59fdfda684584660
|
f8eefef177c4794392ddbad008a67b10e14cb357
|
/common/python/ax/kubernetes/swagger_client/models/v1_config_map_volume_source.py
|
fd67a4624e8f6be40321995d40eca7c9cc399a89
|
[
"Apache-2.0"
] |
permissive
|
durgeshsanagaram/argo
|
8c667c7e64721f149194950f0d75b27efe091f50
|
8601d652476cd30457961aaf9feac143fd437606
|
refs/heads/master
| 2021-07-10T19:44:22.939557 | 2017-10-05T18:02:56 | 2017-10-05T18:02:56 | 105,924,908 | 1 | 0 | null | 2017-10-05T18:22:21 | 2017-10-05T18:22:20 | null |
UTF-8
|
Python
| false | false | 6,776 |
py
|
# coding: utf-8
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ConfigMapVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, items=None, default_mode=None, optional=None):
"""
V1ConfigMapVolumeSource - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'items': 'list[V1KeyToPath]',
'default_mode': 'int',
'optional': 'bool'
}
self.attribute_map = {
'name': 'name',
'items': 'items',
'default_mode': 'defaultMode',
'optional': 'optional'
}
self._name = name
self._items = items
self._default_mode = default_mode
self._optional = optional
@property
def name(self):
"""
Gets the name of this V1ConfigMapVolumeSource.
Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names
:return: The name of this V1ConfigMapVolumeSource.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ConfigMapVolumeSource.
Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names
:param name: The name of this V1ConfigMapVolumeSource.
:type: str
"""
self._name = name
@property
def items(self):
"""
Gets the items of this V1ConfigMapVolumeSource.
If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
:return: The items of this V1ConfigMapVolumeSource.
:rtype: list[V1KeyToPath]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1ConfigMapVolumeSource.
If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.
:param items: The items of this V1ConfigMapVolumeSource.
:type: list[V1KeyToPath]
"""
self._items = items
@property
def default_mode(self):
"""
Gets the default_mode of this V1ConfigMapVolumeSource.
Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
:return: The default_mode of this V1ConfigMapVolumeSource.
:rtype: int
"""
return self._default_mode
@default_mode.setter
def default_mode(self, default_mode):
"""
Sets the default_mode of this V1ConfigMapVolumeSource.
Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
:param default_mode: The default_mode of this V1ConfigMapVolumeSource.
:type: int
"""
self._default_mode = default_mode
@property
def optional(self):
"""
Gets the optional of this V1ConfigMapVolumeSource.
Specify whether the ConfigMap or it's keys must be defined
:return: The optional of this V1ConfigMapVolumeSource.
:rtype: bool
"""
return self._optional
@optional.setter
def optional(self, optional):
"""
Sets the optional of this V1ConfigMapVolumeSource.
Specify whether the ConfigMap or it's keys must be defined
:param optional: The optional of this V1ConfigMapVolumeSource.
:type: bool
"""
self._optional = optional
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ConfigMapVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
72ce1228b9b03247bee7a76ad11a95be51de9ce3
|
3e358fa6f824e3923878c2200ffa685948281824
|
/FlightPlannerTask/FlightPlanner/RnavTolerance/RnavGnssTolerance.py
|
9d4a56cc587a9fb8cf48b4676c53f287e6a420a0
|
[] |
no_license
|
developer124320/FlightPO
|
28825a4c2c0b2c4d9095296e785f0123eb5d7560
|
a5f4c583d01104d7c379e7cf677b898f407ab565
|
refs/heads/master
| 2021-06-16T16:55:27.203361 | 2017-04-10T13:14:39 | 2017-04-10T13:14:39 | 87,812,061 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,520 |
py
|
# -*- coding: UTF-8 -*-
'''
Created on 30 Jun 2015
@author: Administrator
'''
from FlightPlanner.types import DistanceUnits, RnavSpecification, RnavDmeDmeFlightPhase, \
RnavDmeDmeCriteria, RnavGnssFlightPhase, RnavFlightPhase, AircraftSpeedCategory
from FlightPlanner.helpers import Distance, Unit
import math
class RnavGnssTolerance:
def __init__(self, rnavSpecification_0, rnavGnssFlightPhase_0, rnavFlightPhase_0, aircraftSpeedCategory_0, distance_0 = None, distance_1 = None, distance_2 = None):
self.asw = None
self.att = None
self.xtt = None
if distance_1 != None:
self.asw = distance_2.NauticalMiles;
self.att = distance_1.NauticalMiles;
self.xtt = distance_0.NauticalMiles;
if rnavGnssFlightPhase_0 != None:
self.method_0(rnavSpecification_0, rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
return
if rnavFlightPhase_0 != None:
rnavGnssFlightPhase = RnavGnssFlightPhase.Enroute;
if (rnavFlightPhase_0 == RnavFlightPhase.SID):
if (distance_0.NauticalMiles >= 30):
rnavGnssFlightPhase = RnavGnssFlightPhase.StarSid;
elif (distance_0.NauticalMiles < 15):
rnavGnssFlightPhase = RnavGnssFlightPhase.Sid15;
else:
rnavGnssFlightPhase = RnavGnssFlightPhase.Star30Sid30IfIafMa30;
elif rnavFlightPhase_0 == RnavFlightPhase.STAR:
if (distance_0.NauticalMiles < 30):
rnavGnssFlightPhase = RnavGnssFlightPhase.Star30Sid30IfIafMa30;
else:
rnavGnssFlightPhase = RnavGnssFlightPhase.StarSid;
elif rnavFlightPhase_0 == RnavFlightPhase.IafIf:
rnavGnssFlightPhase = RnavGnssFlightPhase.Star30Sid30IfIafMa30;
elif rnavFlightPhase_0 == RnavFlightPhase.Faf:
rnavGnssFlightPhase = RnavGnssFlightPhase.Faf;
elif rnavFlightPhase_0 == RnavFlightPhase.MissedApproach:
if (distance_0.NauticalMiles >= 30):
rnavGnssFlightPhase = RnavGnssFlightPhase.StarSid;
elif (distance_0.NauticalMiles < 15):
rnavGnssFlightPhase = RnavGnssFlightPhase.Ma15;
else:
rnavGnssFlightPhase = RnavGnssFlightPhase.Star30Sid30IfIafMa30;
self.method_0(rnavSpecification_0, rnavGnssFlightPhase, aircraftSpeedCategory_0);
pass
def method_0(self, rnavSpecification_0, rnavGnssFlightPhase_0, aircraftSpeedCategory_0):
num = 0.0;
num1 = 0.0;
RnavGnssTolerance.smethod_2(rnavSpecification_0, rnavGnssFlightPhase_0);
if rnavSpecification_0 == RnavSpecification.Rnav5:
num1 = 2.51;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.Rnav2:
if (rnavGnssFlightPhase_0 != RnavGnssFlightPhase.Enroute):
if (rnavGnssFlightPhase_0 == RnavGnssFlightPhase.StarSid):
num1 = 2;
else:
num1 = 1;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.Rnav1:
if (rnavGnssFlightPhase_0 != RnavGnssFlightPhase.Enroute):
if (rnavGnssFlightPhase_0 == RnavGnssFlightPhase.StarSid):
num1 = 2;
else:
num1 = 1;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.Rnp4:
num1 = 4;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.Rnp2:
num1 = 2;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.Rnp1:
num1 = 1;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.ARnp2:
num1 = 2;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.ARnp1:
if (rnavGnssFlightPhase_0 != RnavGnssFlightPhase.Faf):
if (rnavGnssFlightPhase_0 == RnavGnssFlightPhase.Mapt):
num1 = 0.3;
else:
num1 = 1;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.ARnp09:
if (rnavGnssFlightPhase_0 != RnavGnssFlightPhase.Faf):
if (rnavGnssFlightPhase_0 == RnavGnssFlightPhase.Mapt):
num1 = 0.3;
else:
num1 = 0.9;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.ARnp08:
if (rnavGnssFlightPhase_0 != RnavGnssFlightPhase.Faf):
if (rnavGnssFlightPhase_0 == RnavGnssFlightPhase.Mapt):
num1 = 0.3;
else:
num1 = 0.8;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.ARnp07:
if (rnavGnssFlightPhase_0 != RnavGnssFlightPhase.Faf):
if (rnavGnssFlightPhase_0 == RnavGnssFlightPhase.Mapt):
num1 = 0.3;
else:
num1 = 0.7;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.ARnp06:
if (rnavGnssFlightPhase_0 != RnavGnssFlightPhase.Faf):
if (rnavGnssFlightPhase_0 == RnavGnssFlightPhase.Mapt):
num1 = 0.3;
else:
num1 = 0.6;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.ARnp05:
if (rnavGnssFlightPhase_0 != RnavGnssFlightPhase.Faf):
if (rnavGnssFlightPhase_0 == RnavGnssFlightPhase.Mapt):
num1 = 0.3;
else:
num1 = 0.5;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.ARnp04:
if (rnavGnssFlightPhase_0 != RnavGnssFlightPhase.Faf):
if (rnavGnssFlightPhase_0 == RnavGnssFlightPhase.Mapt):
num1 = 0.3;
else:
num1 = 0.4;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.ARnp03:
num1 = 0.3;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
elif rnavSpecification_0 == RnavSpecification.RnpApch:
if (rnavGnssFlightPhase_0 != RnavGnssFlightPhase.Star30Sid30IfIafMa30):
if (rnavGnssFlightPhase_0 != RnavGnssFlightPhase.Ma15):
num1 = 0.3;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
num1 = 1;
num = self.method_1(rnavGnssFlightPhase_0, aircraftSpeedCategory_0);
# else:
# throw new ArgumentException(string.Format(Validations.RNAV_SPECIFICATION_NOT_SUPPORTED, EnumHelper.smethod_0(rnavSpecification_0)));
self.xtt = num1;
self.att = 0.8 * self.xtt;
self.asw = 1.5 * self.xtt + num;
def method_1(self, rnavGnssFlightPhase_0, aircraftSpeedCategory_0):
if rnavGnssFlightPhase_0 == RnavGnssFlightPhase.Enroute or rnavGnssFlightPhase_0 == RnavGnssFlightPhase.StarSid:
if (aircraftSpeedCategory_0 == AircraftSpeedCategory.H):
return 1;
return 2;
elif rnavGnssFlightPhase_0 == RnavGnssFlightPhase.Star30Sid30IfIafMa30 or rnavGnssFlightPhase_0 == RnavGnssFlightPhase.Faf:
if (aircraftSpeedCategory_0 == AircraftSpeedCategory.H):
return 0.7;
return 1;
elif rnavGnssFlightPhase_0 == RnavGnssFlightPhase.Sid15 or rnavGnssFlightPhase_0 == RnavGnssFlightPhase.Ma15 or rnavGnssFlightPhase_0 == RnavGnssFlightPhase.Mapt:
if (aircraftSpeedCategory_0 == AircraftSpeedCategory.H):
return 0.35;
return 0.5;
# else:
# throw new ArgumentException(string.Format(Validations.RNAV_FLIGHT_PHASE_NOT_SUPPORTED, EnumHelper.smethod_0(rnavGnssFlightPhase_0)));
@staticmethod
def smethod_0(rnavSpecification_0):
rnavGnssFlightPhases = [];
if rnavSpecification_0 == RnavSpecification.Rnav5 or rnavSpecification_0 == RnavSpecification.Rnav2 or rnavSpecification_0 == RnavSpecification.Rnp4 or rnavSpecification_0 == RnavSpecification.Rnp2:
rnavGnssFlightPhases.append(RnavGnssFlightPhase.Enroute);
rnavGnssFlightPhases.append(RnavGnssFlightPhase.StarSid);
elif rnavSpecification_0 == RnavSpecification.Rnav1 or rnavSpecification_0 == RnavSpecification.Rnp1 or rnavSpecification_0 == RnavSpecification.ARnp1:
rnavGnssFlightPhases.append(RnavGnssFlightPhase.Enroute);
rnavGnssFlightPhases.append(RnavGnssFlightPhase.StarSid);
rnavGnssFlightPhases.append(RnavGnssFlightPhase.Star30Sid30IfIafMa30);
rnavGnssFlightPhases.append(RnavGnssFlightPhase.Sid15);
rnavGnssFlightPhases.append(RnavGnssFlightPhase.Ma15);
elif rnavSpecification_0 == RnavSpecification.ARnp2:
rnavGnssFlightPhases.append(RnavGnssFlightPhase.Enroute);
elif rnavSpecification_0 == RnavSpecification.ARnp09 or rnavSpecification_0 == RnavSpecification.ARnp08 or rnavSpecification_0 == RnavSpecification.ARnp07 or rnavSpecification_0 == RnavSpecification.ARnp06 or rnavSpecification_0 == RnavSpecification.ARnp05 or rnavSpecification_0 == RnavSpecification.ARnp04 or rnavSpecification_0 == RnavSpecification.ARnp03:
rnavGnssFlightPhases.append(RnavGnssFlightPhase.StarSid);
rnavGnssFlightPhases.append(RnavGnssFlightPhase.Star30Sid30IfIafMa30);
rnavGnssFlightPhases.append(RnavGnssFlightPhase.Sid15);
rnavGnssFlightPhases.append(RnavGnssFlightPhase.Ma15);
elif rnavSpecification_0 == RnavSpecification.RnpApch:
rnavGnssFlightPhases.append(RnavGnssFlightPhase.Star30Sid30IfIafMa30);
rnavGnssFlightPhases.append(RnavGnssFlightPhase.Faf);
rnavGnssFlightPhases.append(RnavGnssFlightPhase.Mapt);
rnavGnssFlightPhases.append(RnavGnssFlightPhase.Ma15);
# else:
# throw new ArgumentException(string.Format(Validations.RNAV_SPECIFICATION_NOT_SUPPORTED, EnumHelper.smethod_0(rnavSpecification_0)));
return rnavGnssFlightPhases;
@staticmethod
def smethod_1(rnavSpecification_0):
rnavFlightPhases = [];
if rnavSpecification_0 == RnavSpecification.Rnav5 or rnavSpecification_0 == RnavSpecification.Rnav2 or rnavSpecification_0 == RnavSpecification.Rnp4 or rnavSpecification_0 == RnavSpecification.Rnp2:
rnavFlightPhases.append(RnavFlightPhase.Enroute);
rnavFlightPhases.append(RnavFlightPhase.STAR);
rnavFlightPhases.append(RnavFlightPhase.SID);
elif rnavSpecification_0 == RnavSpecification.Rnav1 or rnavSpecification_0 == RnavSpecification.Rnp1 or rnavSpecification_0 == RnavSpecification.ARnp1:
rnavFlightPhases.append(RnavFlightPhase.Enroute);
rnavFlightPhases.append(RnavFlightPhase.STAR);
rnavFlightPhases.append(RnavFlightPhase.SID);
rnavFlightPhases.append(RnavFlightPhase.IafIf);
rnavFlightPhases.append(RnavFlightPhase.MissedApproach);
elif rnavSpecification_0 == RnavSpecification.ARnp2:
rnavFlightPhases.append(RnavFlightPhase.Enroute);
elif rnavSpecification_0 == RnavSpecification.ARnp09 or rnavSpecification_0 == RnavSpecification.ARnp08 or rnavSpecification_0 == RnavSpecification.ARnp07 or rnavSpecification_0 == RnavSpecification.ARnp06 or rnavSpecification_0 == RnavSpecification.ARnp05 or rnavSpecification_0 == RnavSpecification.ARnp04 or rnavSpecification_0 == RnavSpecification.ARnp03:
rnavFlightPhases.append(RnavFlightPhase.STAR);
rnavFlightPhases.append(RnavFlightPhase.SID);
rnavFlightPhases.append(RnavFlightPhase.IafIf);
rnavFlightPhases.append(RnavFlightPhase.MissedApproach);
elif rnavSpecification_0 == RnavSpecification.RnpApch:
rnavFlightPhases.append(RnavFlightPhase.STAR);
rnavFlightPhases.append(RnavFlightPhase.SID);
rnavFlightPhases.append(RnavFlightPhase.IafIf);
rnavFlightPhases.append(RnavFlightPhase.Faf);
rnavFlightPhases.append(RnavFlightPhase.MissedApproach);
# else:
# throw new ArgumentException(string.Format(Validations.RNAV_SPECIFICATION_NOT_SUPPORTED, EnumHelper.smethod_0(rnavSpecification_0)));
return rnavFlightPhases;
@staticmethod
def smethod_2(rnavSpecification_0, rnavGnssFlightPhase_0):
enumerator = RnavGnssTolerance.smethod_0(rnavSpecification_0);
for current in enumerator:
if (current != rnavGnssFlightPhase_0):
continue;
return;
# raise
# throw new ArgumentException(string.Format(Validations.RNAV_FLIGHT_PHASE_NOT_SUPPORTED, EnumHelper.smethod_0(rnavGnssFlightPhase_0)));
# }
# finally
# {
# ((IDisposable)enumerator).Dispose();
def get_asw(self):
return Distance(self.asw, DistanceUnits.NM)
ASW = property(get_asw, None, None, None)
def get_xtt(self):
return Distance(self.xtt, DistanceUnits.NM)
XTT = property(get_xtt, None, None, None)
def get_att(self):
return Distance(self.att, DistanceUnits.NM)
ATT = property(get_att, None, None, None)
|
[
"[email protected]"
] | |
60c9c060f101e760e8b9acf844231a85212ed025
|
4ad94b71e30883d6df07a3277265bd6fb7457ba7
|
/python/examples/working_with_datasets/polygons2.py
|
5f0987ea17d5e5bedab0c3c064af72fa075101de
|
[
"MIT"
] |
permissive
|
Tecplot/handyscripts
|
7cb1d4c80f323c785d06b0c8d37aeb0acb67f58c
|
84a89bfecff5479a0319f08eb8aa9df465283830
|
refs/heads/master
| 2023-08-22T15:29:22.629644 | 2023-08-12T01:19:59 | 2023-08-12T01:19:59 | 149,826,165 | 89 | 64 |
MIT
| 2022-01-13T01:11:02 | 2018-09-21T22:47:23 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 4,514 |
py
|
"""Polygonal Finite-element Data Creation (Part 2)
This script creates a quad of two triangles just as in Part 1, however they are
placed into two different zones. Boundary connections are then made to stitch
the two triangles together.
The data created looks like this::
Node positions (x,y,z):
(1,1,1)
*
/ \
/ \
(0,1,.5) *-----* (1,0,.5)
\ /
\ /
*
(0,0,0)
The two triangles will have separate nodes at the shared locations::
Nodes:
1
Zone 1: / \
/ \
2-----0
2-----1
\ /
Zone 0: \ /
0
The left/right element indices are zero-based. A value of :math:`-1` indicates
no neighboring element while values :math:`(-2, -3, -4 ...)` indicate indices
into the boundary elements array :math:`(0, 1, 2 ...)`.
"""
import itertools as it
import tecplot as tp
from tecplot.constant import *
# Run this script with "-c" to connect to Tecplot 360 on port 7600
# To enable connections in Tecplot 360, click on:
# "Scripting..." -> "PyTecplot Connections..." -> "Accept connections"
import sys
if '-c' in sys.argv:
tp.session.connect()
# First Triangle
# Nodes are in (x,y,z)
nodes0 = ((0, 0, 0), (1, 0, 0.5), (0, 1, 0.5))
scalar_data0 = (0, 1, 2)
# Triangle faces (lines)
faces0 = ((0, 1), (1, 2), (2, 0))
# The (left elements, right elements) adjacent to each face
elements0 = ((0, 0, 0), (-1, -2, -1))
# Get the number of elements by the maximum index in elements0
num_elements0 = 1
# One boundary element neighboring the
# first element (index 0)
# of the second zone (index 1)
boundary_elems0 = ((0,),)
boundary_zones0 = ((1,),)
# Second Triangle
nodes1 = ((1, 0, 0.5), (1, 1, 1), (0, 1, 0.5))
scalar_data1 = (1, 3, 2)
faces1 = ((0, 1), (1, 2), (2, 0))
elements1 = ((0, 0, 0), (-1, -1, -2))
num_elements1 = 1
# One boundary element neighboring the
# first element (index 0)
# of the first zone (index 0)
boundary_elems1 = ((0,),)
boundary_zones1 = ((0,),)
# Create the dataset and zones
# Make sure to set the connectivity before any plot or style change.
ds = tp.active_frame().create_dataset('Data', ['x','y','z','s'])
z0 = ds.add_poly_zone(ZoneType.FEPolygon,
name='0: FE Polygon Float (3,1,3) Nodal',
num_points=len(nodes0),
num_elements=num_elements0,
num_faces=len(faces0))
z1 = ds.add_poly_zone(ZoneType.FEPolygon,
name='1: FE Polygon Float (3,1,3) Nodal',
num_points=len(nodes1),
num_elements=num_elements1,
num_faces=len(faces1))
# Fill in and connect first triangle
z0.values('x')[:] = [n[0] for n in nodes0]
z0.values('y')[:] = [n[1] for n in nodes0]
z0.values('z')[:] = [n[2] for n in nodes0]
z0.values('s')[:] = scalar_data0
# Fill in and connect second triangle
z1.values('x')[:] = [n[0] for n in nodes1]
z1.values('y')[:] = [n[1] for n in nodes1]
z1.values('z')[:] = [n[2] for n in nodes1]
z1.values('s')[:] = scalar_data1
# Set face neighbors
z0.facemap.set_mapping(faces0, elements0, boundary_elems0, boundary_zones0)
z1.facemap.set_mapping(faces1, elements1, boundary_elems1, boundary_zones1)
# Write data out in tecplot text format
tp.data.save_tecplot_ascii('polygons2.dat')
### Now we setup a nice view of the data
plot = tp.active_frame().plot(PlotType.Cartesian3D)
plot.activate()
plot.contour(0).colormap_name = 'Sequential - Yellow/Green/Blue'
plot.contour(0).colormap_filter.distribution = ColorMapDistribution.Continuous
for ax in plot.axes:
ax.show = True
plot.show_mesh = False
plot.show_contour = True
plot.show_edge = True
plot.use_translucency = True
fmaps = plot.fieldmaps()
fmaps.surfaces.surfaces_to_plot = SurfacesToPlot.All
fmaps.effects.surface_translucency = 40
# View parameters obtained interactively from Tecplot 360
plot.view.distance = 10
plot.view.width = 2
plot.view.psi = 80
plot.view.theta = 30
plot.view.alpha = 0
plot.view.position = (-4.2, -8.0, 2.3)
# Showing mesh, we can see all the individual triangles
plot.show_mesh = True
fmaps.mesh.line_pattern = LinePattern.Dashed
# ensure consistent output between interactive (connected) and batch
plot.contour(0).levels.reset_to_nice()
tp.export.save_png('polygons2.png', 600, supersample=3)
|
[
"[email protected]"
] | |
be9b797e8c84eb118ee2d5bd847543beea765636
|
548f9594d6634b4f814d8ee3fa9ea6fb8c612bda
|
/examples/simple/config/config_local.py
|
51dcfb603619572c6ab622b9334849a25034ad28
|
[] |
no_license
|
wp-fei/algorithm-base
|
994a10a94c11a2ccc0e076fc76d5d12612832349
|
a54d8a192e364b02514cf1119761d0cb41790d9b
|
refs/heads/master
| 2023-08-19T06:36:59.142576 | 2021-10-14T03:48:31 | 2021-10-14T03:48:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 989 |
py
|
"""
本地环境配置
"""
enable_calllimit = False
PORT = 8000
workers = 2
# max_requests = 2
# max_requests_jitter = 1
# preload_app = False
# keepalive = 2
timeout = 100
# 固定日志位置,不要修改日志目录
# accesslog='logs/access.log'
# errorlog='logs/error.log'
# accesslog='logs/faccess.log'
# errorlog='logs/ferror.log'
APP_NAME = 'simple'
# REGISTER_AT_EUREKA = True
# EUREKA_SERVER = "http://127.0.0.1:7001/eureka/"
preload_app = True
# 是否开启存活检查
ENABLE_LIVENESS_PROB = False
LIVENESS_PROB = {
# 容器启动后要等待多少秒后存活和就绪探测器才被初始化,最小值是 0。
"initialDelaySeconds": 2,
# 执行探测的时间间隔(单位是秒)。最小值是 1。
"periodSeconds": 5,
# 探测的超时后等待多少秒。最小值是 1。
"timeoutSeconds": 1,
# 当连续失败N次后,重启容器
"failureThreshold": 3,
}
# REDIS = {
# 'host': 'localhost',
# 'port': 6379,
# }
|
[
"[email protected]"
] | |
335ef4cdde8d85825c1c527334f631a489ffa8db
|
00b762e37ecef30ed04698033f719f04be9c5545
|
/scripts/test_results/pipenv_test_results/conflicts/1_test_project_expected.py
|
d299260e39f9888bddc99b106636343d37d0a55c
|
[] |
no_license
|
kenji-nicholson/smerge
|
4f9af17e2e516333b041727b77b8330e3255b7c2
|
3da9ebfdee02f9b4c882af1f26fe2e15d037271b
|
refs/heads/master
| 2020-07-22T02:32:03.579003 | 2018-06-08T00:40:53 | 2018-06-08T00:40:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,834 |
py
|
# -*- coding=utf-8 -*-
import io
import pytest
import os
from pipenv.project import Project
from pipenv.utils import temp_environ
from pipenv.patched import pipfile
@pytest.mark.project
@pytest.mark.sources
@pytest.mark.environ
def test_pipfile_envvar_expansion(PipenvInstance):
with PipenvInstance(chdir=True) as p:
with temp_environ():
with open(p.pipfile_path, 'w') as f:
f.write("""
[[source]]
url = 'https://${TEST_HOST}/simple'
verify_ssl = false
name = "pypi"
[packages]
pytz = "*"
""".strip())
os.environ['TEST_HOST'] = 'localhost:5000'
project = Project()
assert project.sources[0]['url'] == 'https://localhost:5000/simple'
assert 'localhost:5000' not in str(pipfile.load(p.pipfile_path))
@pytest.mark.project
@pytest.mark.sources
@pytest.mark.parametrize('lock_first', [True, False])
def test_get_source(PipenvInstance, pypi, lock_first):
with PipenvInstance(pypi=pypi, chdir=True) as p:
with open(p.pipfile_path, 'w') as f:
contents = """
[[source]]
url = "{0}"
verify_ssl = false
name = "testindex"
[[source]]
url = "https://pypi.python.org/simple"
verify_ssl = "true"
name = "pypi"
[packages]
pytz = "*"
six = {{version = "*", index = "pypi"}}
[dev-packages]
""".format(os.environ['PIPENV_TEST_INDEX']).strip()
f.write(contents)
if lock_first:
# force source to be cached
c = p.pipenv('lock')
assert c.return_code == 0
project = Project()
sources = [
['pypi', 'https://pypi.python.org/simple'],
['testindex', os.environ.get('PIPENV_TEST_INDEX')]
]
for src in sources:
name, url = src
source = [s for s in project.pipfile_sources if s.get('name') == name]
assert source
source = source[0]
assert source['name'] == name
assert source['url'] == url
assert sorted(source.items()) == sorted(project.get_source(name=name).items())
assert sorted(source.items()) == sorted(project.get_source(url=url).items())
assert sorted(source.items()) == sorted(project.find_source(name).items())
assert sorted(source.items()) == sorted(project.find_source(url).items())
@pytest.mark.install
@pytest.mark.project
@pytest.mark.parametrize('newlines', [u'\n', u'\r\n'])
def test_maintain_file_line_endings(PipenvInstance, pypi, newlines):
with PipenvInstance(pypi=pypi, chdir=True) as p:
# Initial pipfile + lockfile generation
c = p.pipenv('install pytz')
assert c.return_code == 0
# Rewrite each file with parameterized newlines
for fn in [p.pipfile_path, p.lockfile_path]:
with io.open(fn) as f:
contents = f.read()
written_newlines = f.newlines
assert written_newlines == u'\n', '{0!r} != {1!r} for {2}'.format(
written_newlines, u'\n', fn,
)
# message because of https://github.com/pytest-dev/pytest/issues/3443
with io.open(fn, 'w', newline=newlines) as f:
f.write(contents)
# Run pipenv install to programatically rewrite
c = p.pipenv('install chardet')
assert c.return_code == 0
# Make sure we kept the right newlines
for fn in [p.pipfile_path, p.lockfile_path]:
with io.open(fn) as f:
f.read() # Consumes the content to detect newlines.
actual_newlines = f.newlines
assert actual_newlines == newlines, '{0!r} != {1!r} for {2}'.format(
actual_newlines, newlines, fn,
)
# message because of https://github.com/pytest-dev/pytest/issues/3443
|
[
"[email protected]"
] | |
bb4982fbff64c4005c1bd748e6ca93e826ddc357
|
c04acaa6ee9c6a7c365e217bc78039fa9c77833e
|
/my_apps/web/migrations/0002_auto_20160913_0025.py
|
d61f89d3bc2746ac0e88efdd2c9d5907f5c0832d
|
[] |
no_license
|
danielhuamani/django-la-cuzquena
|
0386800d640b224d94b0fac2d83f999b60d7da85
|
a6f4aaf44775b27328d073a65f1d0f50eff51fad
|
refs/heads/master
| 2020-12-05T04:51:01.077860 | 2016-09-17T13:56:58 | 2016-09-17T13:56:58 | 67,900,351 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,232 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-09-13 05:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import filebrowser.fields
class Migration(migrations.Migration):
dependencies = [
('web', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Contacto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=120, verbose_name='Nombre')),
('telefono', models.CharField(max_length=20, verbose_name='Telefono')),
('Correo', models.EmailField(max_length=254, verbose_name='Correo')),
('mensaje', models.TextField(verbose_name='Mensaje')),
],
options={
'verbose_name': 'Contacto',
'verbose_name_plural': 'Contacto',
},
),
migrations.CreateModel(
name='MovilizarEmpresa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=120, verbose_name='Nombre')),
('Correo', models.EmailField(max_length=254, verbose_name='Correo')),
('mensaje', models.TextField(verbose_name='Mensaje')),
],
options={
'verbose_name': 'Movilizar a tu Empresa',
'verbose_name_plural': 'Movilizar a tu Empresas',
},
),
migrations.CreateModel(
name='Nosotros',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nosotros_amarillo', models.TextField(verbose_name='Nosotros texto amarillo')),
('nosotros_plomo', models.TextField(verbose_name='Nosotros texto plomo')),
('banner', filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Banner')),
('mision', models.TextField(verbose_name='Misi\xf3n')),
('vision', models.TextField(verbose_name='Visi\xf3n')),
],
options={
'verbose_name': 'Nosotros',
'verbose_name_plural': 'Nosotross',
},
),
migrations.CreateModel(
name='NuestrosServicios',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('posicion', models.IntegerField(default=0, verbose_name='Posicion')),
('imagen', filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Servicios')),
('titulo', models.CharField(max_length=120, verbose_name='Titulo')),
('descripcion', models.TextField(verbose_name='Descripci\xf3n')),
],
options={
'verbose_name': 'Valores',
'verbose_name_plural': 'Valoress',
},
),
migrations.CreateModel(
name='Servicios',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('banner', filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Servicios Banner')),
],
options={
'verbose_name': 'Nosotros',
'verbose_name_plural': 'Nosotross',
},
),
migrations.CreateModel(
name='Valores',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('posicion', models.IntegerField(default=0, verbose_name='Posicion')),
('imagen', filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Valores')),
('titulo', models.CharField(max_length=120, verbose_name='Titulo')),
('descripcion', models.TextField(verbose_name='Descripci\xf3n')),
('nosotros', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nosotros_valores', to='web.Nosotros')),
],
options={
'verbose_name': 'Valores',
'verbose_name_plural': 'Valoress',
},
),
migrations.CreateModel(
name='Vehiculos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('posicion', models.IntegerField(default=0, verbose_name='Posicion')),
('vehiculo', filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Vehiculos Imagen')),
('descripcion', models.TextField(verbose_name='Descripci\xf3n')),
],
options={
'verbose_name': 'Vehiculos',
'verbose_name_plural': 'Vehiculoss',
},
),
migrations.AlterField(
model_name='home',
name='nosotros_image',
field=filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Nosostros Imagen'),
),
migrations.AlterField(
model_name='home',
name='servicios_image',
field=filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Servicios Imagen'),
),
migrations.AlterField(
model_name='home',
name='vehiculos_image',
field=filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Vehiculos Imagen'),
),
migrations.AlterField(
model_name='homebanner',
name='banner',
field=filebrowser.fields.FileBrowseField(max_length=200, verbose_name='Home Banner'),
),
migrations.AddField(
model_name='nuestrosservicios',
name='servicio',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='servicios_valores', to='web.Servicios'),
),
]
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.