blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
35264d72969241f81b66b9d5a4b9c691c83f4953
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03147/s721564434.py
|
2d4d71c12e237e3c8db09fbb5ea107d30f69bc5b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 136 |
py
|
n =int(input())
h=list(map(int,input().split()))
ans=0
p=0
for i in range(n):
if h[i]>=p:
ans+=h[i]-p
p=h[i]
print(ans)
|
[
"[email protected]"
] | |
acc77520c22c42f333191aa95a4b6817dbac255d
|
29d1e5d1190ddd6cdf1e1b97b91f442765905454
|
/Chapter 2/demo_str/demo_split.py
|
056d3e3c61bab0e7d590ab3dc4357f41524963ed
|
[] |
no_license
|
SkewwG/SCIP_Python_Learn
|
abe199e1701022c1491c9e5d6de98d653c267ab9
|
11216e958f5a77c90c0583ca5cfdb1ec9fb2896c
|
refs/heads/master
| 2021-09-13T18:40:42.426924 | 2018-05-03T07:33:52 | 2018-05-03T07:33:52 | 116,967,610 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 784 |
py
|
'''
split(...)
通过指定分隔符对字符串进行切片,如果参数num有指定值,则仅分隔 num 个子字符串
S.split(sep=None, maxsplit=-1) -> list of strings
Return a list of the words in S, using sep as the
delimiter string. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified or is None, any
whitespace string is a separator and empty strings are
removed from the result.
'''
print(help(str.split))
a = "abcdeabcdeabcdeabcdeabcdeabcde"
print(a.split('c')) # ['ab', 'deab', 'deab', 'deab', 'deab', 'deab', 'de']
print(a.split('c', 1)) # ['ab', 'deabcdeabcdeabcdeabcdeabcde']
print(a.split('c', 3)) # ['ab', 'deab', 'deab', 'deabcdeabcdeabcde']
|
[
"[email protected]"
] | |
810083db87880c4c4b1795f932349768dc679df6
|
84ecc3f416647b4c6e40faa6d5392421bc13a4ec
|
/exercise3.py
|
372b35f405eb76c6295a87135502cea961c1395d
|
[] |
no_license
|
Deer5000/DebuggingExercise1_4
|
b358670708c1b74125b3badea256ee980aef6672
|
35caeeeb8fce0480aa99ea3f7ee1de05624cf9df
|
refs/heads/master
| 2023-02-28T15:41:41.475716 | 2021-01-24T23:55:45 | 2021-01-24T23:55:45 | 332,583,383 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,561 |
py
|
"""
Exercise 3
"""
# PART 1: Gather Information
#
# TODO: Gather information about the source of the error and paste your findings here. E.g.:
# - What is the expected vs. the actual output?
# - expected a sorted number but got an error instead
# - What error message (if any) is there?
# - Error message is "IndexError: list index out of range"
# - What line number is causing the error?
# - while key < arr[j]
# - What can you deduce about the cause of the error?
# - Developer does not know how to insert error and to worry about the last element to move
# PART 2: State Assumptions
#
# TODO: State your assumptions here or say them out loud to your partner ... Worked with donny Vallejo
# Make sure to be SPECIFIC about what each of your assumptions is!
# HINT: It may help to draw a picture to clarify what your assumptions are.
# - Developer didnt foresee that if element to move (j's index) is less than the length of arr, then that number will not exist anymore
# What is insertion sort: https://www.youtube.com/watch?v=JU767SDMDvA
def insertion_sort(arr):
"""Performs an Insertion Sort on the array arr."""
for i in range(1, len(arr)):
key = arr[i]
j = i-1
while key < arr[j] and j >= 0: #Move elements of arr[0..i-1], that are greater than key, to one position ahead of their current position
arr[j+1] = arr[j]
j -= 1
arr[j+1] = key
return arr
if __name__ == '__main__':
print('### Problem 3 ###')
answer = insertion_sort([5, 2, 3, 1, 6])
print(answer)
|
[
"[email protected]"
] | |
bfbbe25dbfa3f0b2ae468d54e782e2f14c642e75
|
ecdf9256853e11d6105e2b9ad92ba912602d97d7
|
/hackerrank/implementation/utopian_tree.py
|
3925093946f80b538058215341cddd8a3778c7ea
|
[] |
no_license
|
rgsriram/Algorithms
|
364fda568356834e32ec247438d21202bebc838d
|
d4f9acb1a60bd098a601d8173dfdad447a02fd74
|
refs/heads/master
| 2021-01-10T05:11:05.688731 | 2019-03-20T04:59:10 | 2019-03-20T04:59:10 | 49,176,180 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 490 |
py
|
__author__ = 'sriram'
"""
Problem from: HackerRank
Domain: Algorithms
Name: Utopian Tree
"""
def get_height(seasons, initial_height=1):
for i in xrange(1, (seasons+1), 1):
if i % 2 == 0:
initial_height += 1
else:
initial_height *= 2
return initial_height
def main():
t = int(raw_input().strip())
for a0 in xrange(t):
n = int(raw_input().strip())
print get_height(seasons=n)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
20ea3f691afaf1a27832eb25dcf13739aaf16da2
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/cdn/get_afd_origin_group.py
|
58b27e3e39791dc9915dc08b3fec02a847d3968e
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,379 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetAFDOriginGroupResult',
'AwaitableGetAFDOriginGroupResult',
'get_afd_origin_group',
'get_afd_origin_group_output',
]
@pulumi.output_type
class GetAFDOriginGroupResult:
"""
AFDOrigin group comprising of origins is used for load balancing to origins when the content cannot be served from CDN.
"""
def __init__(__self__, deployment_status=None, health_probe_settings=None, id=None, load_balancing_settings=None, name=None, provisioning_state=None, response_based_afd_origin_error_detection_settings=None, session_affinity_state=None, system_data=None, traffic_restoration_time_to_healed_or_new_endpoints_in_minutes=None, type=None):
if deployment_status and not isinstance(deployment_status, str):
raise TypeError("Expected argument 'deployment_status' to be a str")
pulumi.set(__self__, "deployment_status", deployment_status)
if health_probe_settings and not isinstance(health_probe_settings, dict):
raise TypeError("Expected argument 'health_probe_settings' to be a dict")
pulumi.set(__self__, "health_probe_settings", health_probe_settings)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if load_balancing_settings and not isinstance(load_balancing_settings, dict):
raise TypeError("Expected argument 'load_balancing_settings' to be a dict")
pulumi.set(__self__, "load_balancing_settings", load_balancing_settings)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if response_based_afd_origin_error_detection_settings and not isinstance(response_based_afd_origin_error_detection_settings, dict):
raise TypeError("Expected argument 'response_based_afd_origin_error_detection_settings' to be a dict")
pulumi.set(__self__, "response_based_afd_origin_error_detection_settings", response_based_afd_origin_error_detection_settings)
if session_affinity_state and not isinstance(session_affinity_state, str):
raise TypeError("Expected argument 'session_affinity_state' to be a str")
pulumi.set(__self__, "session_affinity_state", session_affinity_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if traffic_restoration_time_to_healed_or_new_endpoints_in_minutes and not isinstance(traffic_restoration_time_to_healed_or_new_endpoints_in_minutes, int):
raise TypeError("Expected argument 'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes' to be a int")
pulumi.set(__self__, "traffic_restoration_time_to_healed_or_new_endpoints_in_minutes", traffic_restoration_time_to_healed_or_new_endpoints_in_minutes)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="deploymentStatus")
def deployment_status(self) -> str:
return pulumi.get(self, "deployment_status")
@property
@pulumi.getter(name="healthProbeSettings")
def health_probe_settings(self) -> Optional['outputs.HealthProbeParametersResponse']:
"""
Health probe settings to the origin that is used to determine the health of the origin.
"""
return pulumi.get(self, "health_probe_settings")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="loadBalancingSettings")
def load_balancing_settings(self) -> Optional['outputs.LoadBalancingSettingsParametersResponse']:
"""
Load balancing settings for a backend pool
"""
return pulumi.get(self, "load_balancing_settings")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning status
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="responseBasedAfdOriginErrorDetectionSettings")
def response_based_afd_origin_error_detection_settings(self) -> Optional['outputs.ResponseBasedOriginErrorDetectionParametersResponse']:
"""
The JSON object that contains the properties to determine origin health using real requests/responses. This property is currently not supported.
"""
return pulumi.get(self, "response_based_afd_origin_error_detection_settings")
@property
@pulumi.getter(name="sessionAffinityState")
def session_affinity_state(self) -> Optional[str]:
"""
Whether to allow session affinity on this host. Valid options are 'Enabled' or 'Disabled'
"""
return pulumi.get(self, "session_affinity_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Read only system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="trafficRestorationTimeToHealedOrNewEndpointsInMinutes")
def traffic_restoration_time_to_healed_or_new_endpoints_in_minutes(self) -> Optional[int]:
"""
Time in minutes to shift the traffic to the endpoint gradually when an unhealthy endpoint comes healthy or a new endpoint is added. Default is 10 mins. This property is currently not supported.
"""
return pulumi.get(self, "traffic_restoration_time_to_healed_or_new_endpoints_in_minutes")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetAFDOriginGroupResult(GetAFDOriginGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAFDOriginGroupResult(
deployment_status=self.deployment_status,
health_probe_settings=self.health_probe_settings,
id=self.id,
load_balancing_settings=self.load_balancing_settings,
name=self.name,
provisioning_state=self.provisioning_state,
response_based_afd_origin_error_detection_settings=self.response_based_afd_origin_error_detection_settings,
session_affinity_state=self.session_affinity_state,
system_data=self.system_data,
traffic_restoration_time_to_healed_or_new_endpoints_in_minutes=self.traffic_restoration_time_to_healed_or_new_endpoints_in_minutes,
type=self.type)
def get_afd_origin_group(origin_group_name: Optional[str] = None,
profile_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAFDOriginGroupResult:
"""
AFDOrigin group comprising of origins is used for load balancing to origins when the content cannot be served from CDN.
API Version: 2020-09-01.
:param str origin_group_name: Name of the origin group which is unique within the endpoint.
:param str profile_name: Name of the CDN profile which is unique within the resource group.
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['originGroupName'] = origin_group_name
__args__['profileName'] = profile_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:cdn:getAFDOriginGroup', __args__, opts=opts, typ=GetAFDOriginGroupResult).value
return AwaitableGetAFDOriginGroupResult(
deployment_status=__ret__.deployment_status,
health_probe_settings=__ret__.health_probe_settings,
id=__ret__.id,
load_balancing_settings=__ret__.load_balancing_settings,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
response_based_afd_origin_error_detection_settings=__ret__.response_based_afd_origin_error_detection_settings,
session_affinity_state=__ret__.session_affinity_state,
system_data=__ret__.system_data,
traffic_restoration_time_to_healed_or_new_endpoints_in_minutes=__ret__.traffic_restoration_time_to_healed_or_new_endpoints_in_minutes,
type=__ret__.type)
@_utilities.lift_output_func(get_afd_origin_group)
def get_afd_origin_group_output(origin_group_name: Optional[pulumi.Input[str]] = None,
profile_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAFDOriginGroupResult]:
"""
AFDOrigin group comprising of origins is used for load balancing to origins when the content cannot be served from CDN.
API Version: 2020-09-01.
:param str origin_group_name: Name of the origin group which is unique within the endpoint.
:param str profile_name: Name of the CDN profile which is unique within the resource group.
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
...
|
[
"[email protected]"
] | |
92d61573f163cc264a1c3357554348a137fda00e
|
cfc9bb332f6c18c52f941aa4919e80a736b33453
|
/code/set_1_array/287_find_the_duplicate_number.py
|
897e247c171be6235bb10e77d7132c6e689772ae
|
[] |
no_license
|
JagritiG/interview-questions-answers-python
|
8992c64b754d81c76f4d2d29f92fbd9abe522a15
|
411536a94d4a2f9a64e4f06a41dc8aef4111e80f
|
refs/heads/master
| 2022-11-23T21:38:53.986360 | 2020-08-02T22:17:13 | 2020-08-02T22:17:13 | 267,738,880 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,532 |
py
|
# Find the Duplicate Number
# Given an array nums containing n + 1 integers where each integer is between 1 and n (inclusive),
# prove that at least one duplicate number must exist. Assume that there is only one duplicate number,
# find the duplicate one.
# Example 1:
# Input: [1,3,4,2,2]
# Output: 2
# Example 2:
# Input: [3,1,3,4,2]
# Output: 3
# Note:
# You must not modify the array (assume the array is read only).
# You must use only constant, O(1) extra space.
# Your runtime complexity should be less than O(n2).
# There is only one duplicate number in the array, but it could be repeated more than once.
# =======================================================================================================
# Algorithm:
# Traverse the array --> for each index, perform the following steps:
# 1. at index i of an array nums, if nums[abs(nums[i]) >= 0, make it negative
# 2. at index i of an array nums, if nums[abs(nums[i]) < 0, (negative means the value already encountered),
# return the absolute value of the element at index i --> abs(nums[i]) <-- result
# TC: O(n)
# SC: O(1)
# =======================================================================================================
def find_duplicate(nums):
for i in range(len(nums)):
if nums[abs(nums[i])] < 0:
return abs(nums[i])
else:
nums[abs(nums[i])] *= -1
if __name__ == "__main__":
# inputs = [1, 3, 4, 2, 2] # output: 2
inputs = [3, 1, 3, 4, 2] # output: 3
print(find_duplicate(inputs))
|
[
"[email protected]"
] | |
20c60c4ab3ffcaef59f386754bf8ec8172462fd8
|
404fafd24140a474b868a3f19681ffae80f3cef6
|
/oregoninvasiveshotline/reports/search_indexes.py
|
787f067cc9ac921ecf001df3ed123fe526b6738d
|
[] |
no_license
|
wylee/oregoninvasiveshotline
|
50590c2684c4445c58574e773d47936cbccb2d47
|
221f4c5f0307d0e5ffd3f46b8048b5a826388f98
|
refs/heads/develop
| 2020-12-03T09:11:14.606417 | 2017-04-14T17:22:46 | 2017-04-14T17:56:02 | 43,166,993 | 0 | 0 | null | 2015-09-25T18:06:50 | 2015-09-25T18:06:49 |
JavaScript
|
UTF-8
|
Python
| false | false | 2,198 |
py
|
from haystack import indexes
from .models import Report
class ReportIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
report_id = indexes.IntegerField(model_attr='report_id', boost=1.125)
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
location = indexes.CharField(model_attr='location')
edrr_status = indexes.CharField(model_attr='get_edrr_status_display')
ofpd = indexes.BooleanField(model_attr='created_by__has_completed_ofpd')
category = indexes.CharField(model_attr='category__name', boost=1.0625)
category_id = indexes.IntegerField(model_attr='category__pk')
claimed_by = indexes.CharField(model_attr='claimed_by', null=True)
claimed_by_id = indexes.IntegerField(model_attr='claimed_by__user_id', null=True)
county = indexes.CharField(model_attr='county__name', null=True, boost=1.0625)
county_id = indexes.IntegerField(model_attr='county__pk', null=True)
created_by = indexes.CharField(model_attr='created_by')
created_by_id = indexes.IntegerField(model_attr='created_by__user_id')
created_on = indexes.DateTimeField(model_attr='created_on')
is_archived = indexes.BooleanField(model_attr='is_archived', default=False)
is_public = indexes.BooleanField(model_attr='is_public', default=False)
species_id = indexes.CharField(model_attr='species__pk', null=True)
species = indexes.CharField(model_attr='species__title', null=True, boost=1.0625)
reported_species = indexes.CharField(model_attr='reported_species__name', null=True)
actual_species = indexes.CharField(model_attr='actual_species__name', null=True)
# Non-indexed fields (i.e., fields that we don't search on but that
# we want available in search results).
icon_url = indexes.CharField(model_attr='icon_url', indexed=False)
image_url = indexes.CharField(model_attr='image_url', default=None, indexed=False)
lat = indexes.FloatField(model_attr='point__y', indexed=False)
lng = indexes.FloatField(model_attr='point__x', indexed=False)
def get_model(self):
return Report
|
[
"[email protected]"
] | |
b5bd8cc19f966c69bb896f02648f30ff4beea112
|
fbfc0e4d72e2d42b079804775f717833b946fab5
|
/conda_build/main_index.py
|
e64aefc6bba616b7a323c37c83953a0de17cb27e
|
[] |
no_license
|
minrk/conda-build
|
a13eb0a6ebc3a66e276b1ab20ffa97b035434c71
|
71b561831665ca7b8c906f87789ea8ffc38c6ce5
|
refs/heads/master
| 2023-06-08T09:51:37.787222 | 2014-02-13T23:48:42 | 2014-02-13T23:48:42 | 16,822,417 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 830 |
py
|
from __future__ import print_function, division, absolute_import
import os
from os.path import abspath
from optparse import OptionParser
from conda_build.index import update_index
def main():
p = OptionParser(
usage="usage: %prog [options] DIR [DIR ...]",
description="display useful information about tar files")
p.add_option('-f', "--force",
action = "store_true",
help = "force reading all files")
p.add_option('-q', "--quiet",
action = "store_true")
opts, args = p.parse_args()
if len(args) == 0:
dir_paths = [os.getcwd()]
else:
dir_paths = [abspath(path) for path in args]
for path in dir_paths:
update_index(path, verbose=not opts.quiet, force=opts.force)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
51b9750566b082570f8361843c139be599d70dc7
|
32ac0ae3eea0d8d8fd60ddee956c6ef864f7a8ae
|
/oop_examples.py
|
51c58bda09af0e691e691f2275baa0428605164a
|
[] |
no_license
|
EricSchles/oop_py
|
1867778c70597a7d91da256be0cf93017e4627df
|
52e30b0e804e4fc925935f95357cedb58da7d06c
|
refs/heads/master
| 2016-09-06T16:37:35.052549 | 2015-07-28T21:30:54 | 2015-07-28T21:30:54 | 39,858,771 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,554 |
py
|
class Node:
def __init__(self,data,next=None):
self.data = data
self.next = next
def __str__(self):
return repr(self.data)
class Vertex(Node):
def __init__(self,data,edges=[]):
self.data = data
self.edges = edges
def __eq__(self,other):
if isinstance(other,self.__class__):
return self.data == other.data
elif type(self.data) == type(other):
return self.data == other
else:
return False
def __ne__(self,other):
return not self.__eq__(other)
class Graph:
def __init__(self):
self.vertices = []
self.edge_list = [] #a list of dictionaries
def print_nodes(self):
for v in self.vertices:
print v
def print_edges(self):
for pair in self.edge_list:
print pair
def add_node(self,vertex):
v = Vertex(vertex)
self.vertices.append(v)
self.vertices.sort()
def add_edge(self,vertex1,vertex2):
if not vertex1 in self.vertices:
self.add_node(vertex1)
if not vertex2 in self.vertices:
self.add_node(vertex2)
v1 = Vertex(vertex1)
v2 = Vertex(vertex2)
v1.edges.append(v2)
v2.edges.append(v1)
self.edge_list.append({vertex1:vertex2})
self.edge_list.append({vertex2:vertex1})
if __name__ == '__main__':
g = Graph()
g.add_node(5)
g.add_node(7)
g.add_edge(5,7)
g.print_nodes()
g.print_edges()
|
[
"[email protected]"
] | |
11f8d9bd50de3aebf2e3913dfc21c0fa4d160fe5
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/databricks/outputs.py
|
38d7ae4b7b6fd0d09214308325c1c546fb631b0e
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 21,199 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = [
'AddressSpaceResponse',
'CreatedByResponse',
'EncryptionResponse',
'ManagedIdentityConfigurationResponse',
'SkuResponse',
'VirtualNetworkPeeringPropertiesFormatResponseDatabricksVirtualNetwork',
'VirtualNetworkPeeringPropertiesFormatResponseRemoteVirtualNetwork',
'WorkspaceCustomBooleanParameterResponse',
'WorkspaceCustomParametersResponse',
'WorkspaceCustomStringParameterResponse',
'WorkspaceEncryptionParameterResponse',
'WorkspaceProviderAuthorizationResponse',
]
@pulumi.output_type
class AddressSpaceResponse(dict):
"""
AddressSpace contains an array of IP address ranges that can be used by subnets of the virtual network.
"""
def __init__(__self__, *,
address_prefixes: Optional[Sequence[str]] = None):
"""
AddressSpace contains an array of IP address ranges that can be used by subnets of the virtual network.
:param Sequence[str] address_prefixes: A list of address blocks reserved for this virtual network in CIDR notation.
"""
if address_prefixes is not None:
pulumi.set(__self__, "address_prefixes", address_prefixes)
@property
@pulumi.getter(name="addressPrefixes")
def address_prefixes(self) -> Optional[Sequence[str]]:
"""
A list of address blocks reserved for this virtual network in CIDR notation.
"""
return pulumi.get(self, "address_prefixes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CreatedByResponse(dict):
"""
Provides details of the entity that created/updated the workspace.
"""
def __init__(__self__, *,
application_id: str,
oid: str,
puid: str):
"""
Provides details of the entity that created/updated the workspace.
:param str application_id: The application ID of the application that initiated the creation of the workspace. For example, Azure Portal.
:param str oid: The Object ID that created the workspace.
:param str puid: The Personal Object ID corresponding to the object ID above
"""
pulumi.set(__self__, "application_id", application_id)
pulumi.set(__self__, "oid", oid)
pulumi.set(__self__, "puid", puid)
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> str:
"""
The application ID of the application that initiated the creation of the workspace. For example, Azure Portal.
"""
return pulumi.get(self, "application_id")
@property
@pulumi.getter
def oid(self) -> str:
"""
The Object ID that created the workspace.
"""
return pulumi.get(self, "oid")
@property
@pulumi.getter
def puid(self) -> str:
"""
The Personal Object ID corresponding to the object ID above
"""
return pulumi.get(self, "puid")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EncryptionResponse(dict):
"""
The object that contains details of encryption used on the workspace.
"""
def __init__(__self__, *,
key_name: Optional[str] = None,
key_source: Optional[str] = None,
key_vault_uri: Optional[str] = None,
key_version: Optional[str] = None):
"""
The object that contains details of encryption used on the workspace.
:param str key_name: The name of KeyVault key.
:param str key_source: The encryption keySource (provider). Possible values (case-insensitive): Default, Microsoft.Keyvault
:param str key_vault_uri: The Uri of KeyVault.
:param str key_version: The version of KeyVault key.
"""
if key_name is not None:
pulumi.set(__self__, "key_name", key_name)
if key_source is None:
key_source = 'Default'
if key_source is not None:
pulumi.set(__self__, "key_source", key_source)
if key_vault_uri is not None:
pulumi.set(__self__, "key_vault_uri", key_vault_uri)
if key_version is not None:
pulumi.set(__self__, "key_version", key_version)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> Optional[str]:
"""
The name of KeyVault key.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="keySource")
def key_source(self) -> Optional[str]:
"""
The encryption keySource (provider). Possible values (case-insensitive): Default, Microsoft.Keyvault
"""
return pulumi.get(self, "key_source")
@property
@pulumi.getter(name="keyVaultUri")
def key_vault_uri(self) -> Optional[str]:
"""
The Uri of KeyVault.
"""
return pulumi.get(self, "key_vault_uri")
@property
@pulumi.getter(name="keyVersion")
def key_version(self) -> Optional[str]:
"""
The version of KeyVault key.
"""
return pulumi.get(self, "key_version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedIdentityConfigurationResponse(dict):
"""
The Managed Identity details for storage account.
"""
def __init__(__self__, *,
principal_id: str,
tenant_id: str,
type: str):
"""
The Managed Identity details for storage account.
:param str principal_id: The objectId of the Managed Identity that is linked to the Managed Storage account.
:param str tenant_id: The tenant Id where the Managed Identity is created.
:param str type: The type of Identity created. It can be either SystemAssigned or UserAssigned.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The objectId of the Managed Identity that is linked to the Managed Storage account.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant Id where the Managed Identity is created.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Identity created. It can be either SystemAssigned or UserAssigned.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SkuResponse(dict):
"""
SKU for the resource.
"""
def __init__(__self__, *,
name: str,
tier: Optional[str] = None):
"""
SKU for the resource.
:param str name: The SKU name.
:param str tier: The SKU tier.
"""
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> str:
"""
The SKU name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tier(self) -> Optional[str]:
"""
The SKU tier.
"""
return pulumi.get(self, "tier")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualNetworkPeeringPropertiesFormatResponseDatabricksVirtualNetwork(dict):
"""
The remote virtual network should be in the same region. See here to learn more (https://docs.microsoft.com/en-us/azure/databricks/administration-guide/cloud-configurations/azure/vnet-peering).
"""
def __init__(__self__, *,
id: Optional[str] = None):
"""
The remote virtual network should be in the same region. See here to learn more (https://docs.microsoft.com/en-us/azure/databricks/administration-guide/cloud-configurations/azure/vnet-peering).
:param str id: The Id of the databricks virtual network.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The Id of the databricks virtual network.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VirtualNetworkPeeringPropertiesFormatResponseRemoteVirtualNetwork(dict):
"""
The remote virtual network should be in the same region. See here to learn more (https://docs.microsoft.com/en-us/azure/databricks/administration-guide/cloud-configurations/azure/vnet-peering).
"""
def __init__(__self__, *,
id: Optional[str] = None):
"""
The remote virtual network should be in the same region. See here to learn more (https://docs.microsoft.com/en-us/azure/databricks/administration-guide/cloud-configurations/azure/vnet-peering).
:param str id: The Id of the remote virtual network.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The Id of the remote virtual network.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class WorkspaceCustomBooleanParameterResponse(dict):
"""
The value which should be used for this field.
"""
def __init__(__self__, *,
type: str,
value: bool):
"""
The value which should be used for this field.
:param str type: The type of variable that this is
:param bool value: The value which should be used for this field.
"""
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> str:
"""
The type of variable that this is
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> bool:
"""
The value which should be used for this field.
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class WorkspaceCustomParametersResponse(dict):
"""
Custom Parameters used for Cluster Creation.
"""
def __init__(__self__, *,
aml_workspace_id: Optional['outputs.WorkspaceCustomStringParameterResponse'] = None,
custom_private_subnet_name: Optional['outputs.WorkspaceCustomStringParameterResponse'] = None,
custom_public_subnet_name: Optional['outputs.WorkspaceCustomStringParameterResponse'] = None,
custom_virtual_network_id: Optional['outputs.WorkspaceCustomStringParameterResponse'] = None,
enable_no_public_ip: Optional['outputs.WorkspaceCustomBooleanParameterResponse'] = None,
encryption: Optional['outputs.WorkspaceEncryptionParameterResponse'] = None,
prepare_encryption: Optional['outputs.WorkspaceCustomBooleanParameterResponse'] = None,
require_infrastructure_encryption: Optional['outputs.WorkspaceCustomBooleanParameterResponse'] = None):
"""
Custom Parameters used for Cluster Creation.
:param 'WorkspaceCustomStringParameterResponseArgs' aml_workspace_id: The ID of a Azure Machine Learning workspace to link with Databricks workspace
:param 'WorkspaceCustomStringParameterResponseArgs' custom_private_subnet_name: The name of the Private Subnet within the Virtual Network
:param 'WorkspaceCustomStringParameterResponseArgs' custom_public_subnet_name: The name of a Public Subnet within the Virtual Network
:param 'WorkspaceCustomStringParameterResponseArgs' custom_virtual_network_id: The ID of a Virtual Network where this Databricks Cluster should be created
:param 'WorkspaceCustomBooleanParameterResponseArgs' enable_no_public_ip: Should the Public IP be Disabled?
:param 'WorkspaceEncryptionParameterResponseArgs' encryption: Contains the encryption details for Customer-Managed Key (CMK) enabled workspace.
:param 'WorkspaceCustomBooleanParameterResponseArgs' prepare_encryption: Prepare the workspace for encryption. Enables the Managed Identity for managed storage account.
:param 'WorkspaceCustomBooleanParameterResponseArgs' require_infrastructure_encryption: A boolean indicating whether or not the DBFS root file system will be enabled with secondary layer of encryption with platform managed keys for data at rest.
"""
if aml_workspace_id is not None:
pulumi.set(__self__, "aml_workspace_id", aml_workspace_id)
if custom_private_subnet_name is not None:
pulumi.set(__self__, "custom_private_subnet_name", custom_private_subnet_name)
if custom_public_subnet_name is not None:
pulumi.set(__self__, "custom_public_subnet_name", custom_public_subnet_name)
if custom_virtual_network_id is not None:
pulumi.set(__self__, "custom_virtual_network_id", custom_virtual_network_id)
if enable_no_public_ip is not None:
pulumi.set(__self__, "enable_no_public_ip", enable_no_public_ip)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if prepare_encryption is not None:
pulumi.set(__self__, "prepare_encryption", prepare_encryption)
if require_infrastructure_encryption is not None:
pulumi.set(__self__, "require_infrastructure_encryption", require_infrastructure_encryption)
@property
@pulumi.getter(name="amlWorkspaceId")
def aml_workspace_id(self) -> Optional['outputs.WorkspaceCustomStringParameterResponse']:
"""
The ID of a Azure Machine Learning workspace to link with Databricks workspace
"""
return pulumi.get(self, "aml_workspace_id")
@property
@pulumi.getter(name="customPrivateSubnetName")
def custom_private_subnet_name(self) -> Optional['outputs.WorkspaceCustomStringParameterResponse']:
"""
The name of the Private Subnet within the Virtual Network
"""
return pulumi.get(self, "custom_private_subnet_name")
@property
@pulumi.getter(name="customPublicSubnetName")
def custom_public_subnet_name(self) -> Optional['outputs.WorkspaceCustomStringParameterResponse']:
"""
The name of a Public Subnet within the Virtual Network
"""
return pulumi.get(self, "custom_public_subnet_name")
@property
@pulumi.getter(name="customVirtualNetworkId")
def custom_virtual_network_id(self) -> Optional['outputs.WorkspaceCustomStringParameterResponse']:
"""
The ID of a Virtual Network where this Databricks Cluster should be created
"""
return pulumi.get(self, "custom_virtual_network_id")
@property
@pulumi.getter(name="enableNoPublicIp")
def enable_no_public_ip(self) -> Optional['outputs.WorkspaceCustomBooleanParameterResponse']:
"""
Should the Public IP be Disabled?
"""
return pulumi.get(self, "enable_no_public_ip")
@property
@pulumi.getter
def encryption(self) -> Optional['outputs.WorkspaceEncryptionParameterResponse']:
"""
Contains the encryption details for Customer-Managed Key (CMK) enabled workspace.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter(name="prepareEncryption")
def prepare_encryption(self) -> Optional['outputs.WorkspaceCustomBooleanParameterResponse']:
"""
Prepare the workspace for encryption. Enables the Managed Identity for managed storage account.
"""
return pulumi.get(self, "prepare_encryption")
@property
@pulumi.getter(name="requireInfrastructureEncryption")
def require_infrastructure_encryption(self) -> Optional['outputs.WorkspaceCustomBooleanParameterResponse']:
"""
A boolean indicating whether or not the DBFS root file system will be enabled with secondary layer of encryption with platform managed keys for data at rest.
"""
return pulumi.get(self, "require_infrastructure_encryption")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class WorkspaceCustomStringParameterResponse(dict):
"""
The Value.
"""
def __init__(__self__, *,
type: str,
value: str):
"""
The Value.
:param str type: The type of variable that this is
:param str value: The value which should be used for this field.
"""
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> str:
"""
The type of variable that this is
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> str:
"""
The value which should be used for this field.
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class WorkspaceEncryptionParameterResponse(dict):
"""
The object that contains details of encryption used on the workspace.
"""
def __init__(__self__, *,
type: str,
value: Optional['outputs.EncryptionResponse'] = None):
"""
The object that contains details of encryption used on the workspace.
:param str type: The type of variable that this is
:param 'EncryptionResponseArgs' value: The value which should be used for this field.
"""
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> str:
"""
The type of variable that this is
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> Optional['outputs.EncryptionResponse']:
"""
The value which should be used for this field.
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class WorkspaceProviderAuthorizationResponse(dict):
"""
The workspace provider authorization.
"""
def __init__(__self__, *,
principal_id: str,
role_definition_id: str):
"""
The workspace provider authorization.
:param str principal_id: The provider's principal identifier. This is the identity that the provider will use to call ARM to manage the workspace resources.
:param str role_definition_id: The provider's role definition identifier. This role will define all the permissions that the provider must have on the workspace's container resource group. This role definition cannot have permission to delete the resource group.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "role_definition_id", role_definition_id)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The provider's principal identifier. This is the identity that the provider will use to call ARM to manage the workspace resources.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="roleDefinitionId")
def role_definition_id(self) -> str:
"""
The provider's role definition identifier. This role will define all the permissions that the provider must have on the workspace's container resource group. This role definition cannot have permission to delete the resource group.
"""
return pulumi.get(self, "role_definition_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
[
"[email protected]"
] | |
d7a11c0110515f3c629a354c016735846c02be79
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nndemocrat.py
|
e29865bb1ed861ed5bd1e71e81987879494e61ba
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 |
Python
|
UTF-8
|
Python
| false | false | 712 |
py
|
ii = [('CookGHP3.py', 29), ('MarrFDI.py', 7), ('SadlMLP.py', 1), ('WilbRLW.py', 1), ('WilbRLW4.py', 6), ('MartHSI2.py', 12), ('LeakWTI2.py', 1), ('WilbRLW5.py', 2), ('MarrFDI3.py', 4), ('WilbRLW2.py', 20), ('WilkJMC2.py', 1), ('CarlTFR.py', 12), ('CookGHP2.py', 3), ('RoscTTI2.py', 1), ('MarrFDI2.py', 40), ('ClarGE.py', 1), ('LandWPA2.py', 1), ('WadeJEB.py', 7), ('CoopJBT.py', 1), ('MartHRW.py', 13), ('FitzRNS4.py', 1), ('HallFAC.py', 1), ('ThomGLG.py', 2), ('StorJCC.py', 1), ('MackCNH2.py', 1), ('HaliTBC.py', 5), ('WilbRLW3.py', 1), ('JacoWHI.py', 1), ('ClarGE3.py', 3), ('MartHRW2.py', 4), ('FitzRNS2.py', 6), ('MartHSI.py', 34), ('DwigTHH.py', 2), ('WordWYR.py', 2), ('ThomWEC.py', 8), ('BentJDO.py', 7)]
|
[
"[email protected]"
] | |
45580a16f6a18dad43e707e5d56a5ccb7cd5c775
|
ced1068f3cbab76399490b5e1b2e7c496555639c
|
/pslist2.py
|
9f6a763d1d733c047656acd196021f722920f3de
|
[] |
no_license
|
ravijaya/july23
|
744d5e88ed5ab414a1097a107ef9577664da8b73
|
e29ca509ac2bb38a3ddc93e2185daf54832722c3
|
refs/heads/master
| 2020-06-23T01:04:50.985147 | 2019-07-24T11:43:52 | 2019-07-24T11:43:52 | 198,453,808 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 286 |
py
|
# delete by index
items = [2.2, 'pam', .98, 'tim', 'pat', 1.2, 'kim', .67]
print(items)
print()
value = items.pop()
print(value)
print(items)
print()
items = [2.2, 'pam', .98, 'tim', 'pat', 1.2, 'kim', .67]
print(items)
print()
value = items.pop(5)
print(value)
print(items)
print()
|
[
"[email protected]"
] | |
e6946326b32ac4520b6a43c8ce3fbe9617677612
|
f3b5c4a5ce869dee94c3dfa8d110bab1b4be698b
|
/tools/sandesh/library/common/test/SConscript
|
96a9520dcf0fe554cf04fa620990b5020f8b8421
|
[
"Apache-2.0"
] |
permissive
|
pan2za/ctrl
|
8f808fb4da117fce346ff3d54f80b4e3d6b86b52
|
1d49df03ec4577b014b7d7ef2557d76e795f6a1c
|
refs/heads/master
| 2021-01-22T23:16:48.002959 | 2015-06-17T06:13:36 | 2015-06-17T06:13:36 | 37,454,161 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 866 |
# -*- mode: python; -*-
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
Import('SandeshEnv')
env = SandeshEnv.Clone()
env.Append(CPPPATH = [
Dir('#build/include').abspath,
Dir('#controller/src').abspath,
Dir('#tools').abspath,
])
SandeshBufferTestGenCppFiles = env.SandeshGenCpp('sandesh_buffer_test.sandesh')
SandeshBufferTestGenCppSrcs = env.ExtractCpp(SandeshBufferTestGenCppFiles)
SandeshBufferTestGenCppObjs = env.Object(SandeshBufferTestGenCppSrcs)
SandeshBufferTestGenCFiles = env.SandeshGenC('sandesh_buffer_test.sandesh')
SandeshBufferTestGenCSrcs = env.ExtractC(SandeshBufferTestGenCFiles)
SandeshBufferTestGenCObjs = env.Object(SandeshBufferTestGenCSrcs)
SandeshEnv['SandeshBufferTestGenCppObjs'] = SandeshBufferTestGenCppObjs
SandeshEnv['SandeshBufferTestGenCObjs'] = SandeshBufferTestGenCObjs
|
[
"[email protected]"
] | ||
887e2b6ff01a0510044a6bf19ef7078447cafaab
|
d063684dd03293eb0f980568af088d26ab087dbe
|
/debadmin/migrations/0093_user_addon_cart_item_cart_id.py
|
e3e4b69f8eb6eac7dc10b7c7fc76391b4bee6345
|
[] |
no_license
|
abhaysantra/debscientific
|
ce88e5ef44da8d6771c3652ed0ad02900ccd8ed2
|
88ec65616fd24052bbdbba8b00beba85493f5aea
|
refs/heads/master
| 2020-11-26T22:09:33.820247 | 2019-12-20T07:58:43 | 2019-12-20T07:58:43 | 229,213,810 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 419 |
py
|
# Generated by Django 2.2.6 on 2019-11-27 08:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('debadmin', '0092_user_addon_cart_item'),
]
operations = [
migrations.AddField(
model_name='user_addon_cart_item',
name='cart_id',
field=models.IntegerField(null=True),
),
]
|
[
"[email protected]"
] | |
87a879348653c2883060d095fafc694a1aa3b3ff
|
e6c65e2e354336a4bea5b6a4ccbccd3682915fe2
|
/out-bin/py/google/fhir/models/model_test.runfiles/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/rnn/python/ops/__init__.py
|
c760019ae94c14bdf37bf38b2431b81e7cd9315d
|
[
"Apache-2.0"
] |
permissive
|
rasalt/fhir-datalab
|
c30ab773d84983dd04a37e9d0ddec8bf2824b8a4
|
3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de
|
refs/heads/master
| 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 186 |
py
|
/home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/rnn/python/ops/__init__.py
|
[
"[email protected]"
] | |
77fcce991ed41ab5f767912e8b9655c8c7323165
|
a91948971d4ade6fa3f315ced47f6867398ba34b
|
/PracticeProblems/dict_of_squares.py
|
f4ae7ead4f8b8163a092f965c87cdab1eabe01b3
|
[] |
no_license
|
randolchance/PythonProjects
|
10cb4c457ad475e227394d5b4ce7939d51af7f86
|
d484ec4fc2c9089b9544c01d3af51fbfc84b7340
|
refs/heads/master
| 2018-12-02T19:49:19.478640 | 2018-09-06T18:30:31 | 2018-09-06T18:30:31 | 119,735,576 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 104 |
py
|
square_dict = {}
x = int(input())
for y in range(1,x+1):
square_dict[y] = y*y
print(square_dict)
|
[
"[email protected]"
] | |
35a67b282264036588f8a8fee11b90cbf8a73faf
|
0e478f3d8b6c323c093455428c9094c45de13bac
|
/src/OTLMOW/OTLModel/Classes/Installatie/Ecovallei.py
|
2b15e33f943ef57001d14f6a606039fb858c4be9
|
[
"MIT"
] |
permissive
|
davidvlaminck/OTLMOW
|
c6eae90b2cab8a741271002cde454427ca8b75ba
|
48f8c357c475da1d2a1bc7820556843d4b37838d
|
refs/heads/main
| 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 |
MIT
| 2022-06-20T20:36:00 | 2021-11-28T10:28:24 |
Python
|
UTF-8
|
Python
| false | false | 642 |
py
|
# coding=utf-8
from OTLMOW.OTLModel.Classes.ImplementatieElement.AIMObject import AIMObject
from OTLMOW.GeometrieArtefact.VlakGeometrie import VlakGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class Ecovallei(AIMObject, VlakGeometrie):
"""Een vallei onder de verkeersbrug waar het landschap gewoon onderdoor loopt en minimaal wordt verstoord."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/installatie#Ecovallei'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
AIMObject.__init__(self)
VlakGeometrie.__init__(self)
|
[
"[email protected]"
] | |
b70d805e1f5ae369892c503ffe7cb69f881a3ed5
|
c78b20665068e712917558dbdd512641b0b90c80
|
/rebecca/fanstatic/apis.py
|
1cf4b8101d5b6c332c1f6db3af54c37e02c1f3c1
|
[
"MIT"
] |
permissive
|
rebeccaframework/rebecca.fanstatic
|
9d60cb29f4bee5e5a1dbc8cfcc43e3ac4c723c43
|
9f71d5f7d0a605b0c9ad165b20958d88cfdbcf69
|
refs/heads/master
| 2016-09-06T14:08:34.278688 | 2012-03-20T08:23:42 | 2012-03-20T08:23:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,464 |
py
|
from fanstatic import get_library_registry
from zope.interface import implementer
from .interfaces import IFanstaticSet
def get_fanstatic_set(request):
reg = request.registry
fanstatic_set = reg.queryUtility(IFanstaticSet)
return fanstatic_set
def need_fanstatic(request, renderer_name):
fanstatic_set = get_fanstatic_set(request)
if fanstatic_set is None:
return
fanstatic_set(renderer_name)
class FanstaticSet(object):
def __init__(self):
self.fanstatics = []
def add_fanstatic(self, resources, renderer_name_pattern):
self.fanstatics.append(Fanstatic(resources, renderer_name_pattern))
def __call__(self, renderer_name):
for f in self.fanstatics:
f(renderer_name)
def iter_resources(self):
printed = set()
for f in self.fanstatics:
for r in f.resources:
lib = r.library
if lib not in printed:
yield lib
printed.add(lib)
class Fanstatic(object):
def __init__(self, resources, renderer_name_regex):
self.resources = resources
self.regex = renderer_name_regex
def match(self, renderer_name):
return self.regex.match(renderer_name)
def __call__(self, renderer_name):
if self.match(renderer_name):
for resource in self.resources:
from fanstatic.core import get_needed
resource.need()
|
[
"[email protected]"
] | |
49c1a6ffb77eb42628a14c31a91f6da58b557a6d
|
10e8fa6e43a54b3bbb89326a7d5786d50a625551
|
/04. Inheritance/venv/Scripts/pip3.8-script.py
|
49afacc31ed499f32cbf17a0824ad4d92a2dd157
|
[] |
no_license
|
ramona-2020/Python-OOP
|
cbc7e5fadfdc907e51c83313e0ffb1f4f5f83f70
|
7404908f50d30c533f0fca2fd08d0290526686a5
|
refs/heads/master
| 2023-03-20T18:43:18.389720 | 2020-06-07T15:20:00 | 2020-06-07T15:20:00 | 523,400,905 | 1 | 0 | null | 2022-08-10T15:38:09 | 2022-08-10T15:38:08 | null |
UTF-8
|
Python
| false | false | 447 |
py
|
#!"D:\User\Desktop\Python Projects_SoftUni\Python-OOP\04. Inheritance\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
|
[
"[email protected]"
] | |
6f2cafd43a0d793c14a963dee3e2c6ede3bc62b3
|
a88207cf67ad0d65afdd57d5f5144cbc09995f60
|
/test/pPb/step2_RAW2DIGI_L1Reco_RECO.py
|
bd20aa632197a2c164cdf5066199385358db4aa7
|
[] |
no_license
|
pfs/TopFromHeavyIons
|
211184bad34e4ae11e6216689e5141a132e14542
|
a75ed1fc68d24682dad3badacf2726dc2b7ff464
|
refs/heads/master
| 2020-12-29T02:44:30.348481 | 2017-06-16T14:53:35 | 2017-06-16T14:53:35 | 37,029,781 | 1 | 1 | null | 2015-10-11T08:28:35 | 2015-06-07T19:47:07 |
Python
|
UTF-8
|
Python
| false | false | 3,477 |
py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step2 --filein file:step1.root --fileout file:step2.root --mc --eventcontent AODSIM --datatier AODSIM --conditions 80X_mcRun2_pA_v4 --customise_commands process.bunchSpacingProducer.bunchSpacingOverride=cms.uint32(25)\n process.bunchSpacingProducer.overrideBunchSpacing=cms.bool(True) -n -1 --step RAW2DIGI,L1Reco,RECO --era Run2_2016_pA
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('RECO',eras.Run2_2016_pA)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:step1.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step2 nevts:-1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.AODSIMoutput = cms.OutputModule("PoolOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(4),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('AODSIM'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(15728640),
fileName = cms.untracked.string('file:step2.root'),
outputCommands = process.AODSIMEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '80X_mcRun2_pA_v4', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1Reco_step = cms.Path(process.L1Reco)
process.reconstruction_step = cms.Path(process.reconstruction)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.AODSIMoutput_step = cms.EndPath(process.AODSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1Reco_step,process.reconstruction_step,process.endjob_step,process.AODSIMoutput_step)
# Customisation from command line
process.bunchSpacingProducer.bunchSpacingOverride=cms.uint32(25)
process.bunchSpacingProducer.overrideBunchSpacing=cms.bool(True)
# Customisation from command line
process.bunchSpacingProducer.bunchSpacingOverride=cms.uint32(25)
process.bunchSpacingProducer.overrideBunchSpacing=cms.bool(True)
|
[
"[email protected]"
] | |
457d08945aaab8688e20d9b67d9f662e622f45c7
|
05083d24088bbb3bfb7cdd162c101c72e18bc3a6
|
/containers/failures/router/failure1/myapp.py
|
91a0fcf8f0bc567cc5d9e9642a2f9910e038d684
|
[
"Apache-2.0"
] |
permissive
|
crossbario/crossbar-examples
|
f5e14b62db0f14e20ab54346cd4e8c3276aa6449
|
aa31d9fe3abcb4b797931356b5a2ceeac64229c3
|
refs/heads/master
| 2023-01-11T02:36:00.883034 | 2023-01-03T11:12:06 | 2023-01-03T11:12:06 | 28,035,551 | 100 | 122 |
Apache-2.0
| 2023-01-03T11:12:07 | 2014-12-15T12:23:02 |
HTML
|
UTF-8
|
Python
| false | false | 371 |
py
|
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
@inlineCallbacks
def onJoin(self, details):
self.log.info("MySession.onJoin()")
|
[
"[email protected]"
] | |
45aeeec4bced02a189d55b9a5a3b75962906bfd3
|
457e2f5b2a26877df739e314ec1560e8a3ecfb97
|
/rebind/baseRig/util/nodePVpos.py
|
c0fe805be2194bfad4641759e25560502c3f02dd
|
[] |
no_license
|
mappp7/tools
|
f6685d9a682bd540d59c1bff0cebb60f79fd6556
|
c537e7648112c51ba4f44225418e773ee6b8be6c
|
refs/heads/master
| 2021-01-14T16:40:44.450790 | 2020-10-30T05:30:27 | 2020-10-30T05:30:27 | 242,682,763 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,772 |
py
|
import maya.cmds as cmds
import util.homeNul as HN
# Node base PV position !
# 3 joint list
#jnt = cmds.ls( sl=True, type='joint' )
def nodePVconnect( jnt ):
#decomposeMatrix
rootJdecompose = cmds.createNode( 'decomposeMatrix', n=jnt[0].replace( 'JNT', 'DCM' ) )
middleJdecompose = cmds.createNode( 'decomposeMatrix', n=jnt[1].replace( 'JNT', 'DCM' ) )
tipJdecompose = cmds.createNode( 'decomposeMatrix', n=jnt[2].replace( 'JNT', 'DCM' ) )
#connections
cmds.connectAttr( '%s.worldMatrix[0]' % jnt[0], '%s.inputMatrix' % rootJdecompose )
cmds.connectAttr( '%s.worldMatrix[0]' % jnt[1], '%s.inputMatrix' % middleJdecompose )
cmds.connectAttr( '%s.worldMatrix[0]' % jnt[2], '%s.inputMatrix' % tipJdecompose )
#plusMinusAverage
sumPMA = cmds.createNode( 'plusMinusAverage', n='%sPos_sum_%sPos_PMA' % ( jnt[0].replace( '_JNT', '' ), jnt[2].replace( '_JNT', '' ) ) )
cmds.setAttr( '%s.operation' % sumPMA, 1 )
#connections
cmds.connectAttr( '%s.outputTranslate' % rootJdecompose, '%s.input3D[0]' % sumPMA )
cmds.connectAttr( '%s.outputTranslate' % tipJdecompose, '%s.input3D[1]' % sumPMA )
#multiplyDivide
divideSumPMA = cmds.createNode( 'multiplyDivide', n=sumPMA.replace( 'PMA', 'halfDvide_MPD' ) )
cmds.setAttr( '%s.operation' % divideSumPMA, 2 )
cmds.setAttr( '%s.input2X' % divideSumPMA, 2 )
cmds.setAttr( '%s.input2Y' % divideSumPMA, 2 )
cmds.setAttr( '%s.input2Z' % divideSumPMA, 2 )
#connections
cmds.connectAttr( '%s.output3D' % sumPMA, '%s.input1' % divideSumPMA )
#plusMinusAverage( calculate vector )
VT = cmds.createNode( 'plusMinusAverage', n='to_%s_vector_PMA' % jnt[1].replace( 'JNT', 'joint' ) )
cmds.setAttr( '%s.operation' % VT, 2 )
#connections
cmds.connectAttr( '%s.outputTranslate' % middleJdecompose, '%s.input3D[0]' % VT )
cmds.connectAttr( '%s.output' % divideSumPMA, '%s.input3D[1]' % VT )
# offset
offsetVector = cmds.createNode( 'multiplyDivide', n='%s_MPD' % VT.replace( 'PMA', 'offset' ) )
cmds.connectAttr( '%s.output3D' % VT, '%s.input1' % offsetVector )
#plusMinusAverage( middleJ + offset + vector )
PVposition = cmds.createNode( 'plusMinusAverage', n='%s_vector_PMA' % divideSumPMA.replace( 'MPD', 'sum' ) )
cmds.setAttr( '%s.operation' % PVposition, 1 )
#connections
cmds.connectAttr( '%s.output' % divideSumPMA, '%s.input3D[0]' % PVposition )
cmds.connectAttr( '%s.output' % offsetVector, '%s.input3D[1]' % PVposition )
# finish
loc = cmds.spaceLocator( n=jnt[1].replace( 'JNT', 'pv_LOC' ) )
cmds.connectAttr( '%s.output3D' % PVposition, '%s.translate' % loc[0] )
homeN = HN.homeNul( loc[0] )
return homeN
|
[
"[email protected]"
] | |
5f536b3a0467f3d3de997f12b88faf81f8d924e4
|
1cde7c8612aadfcebe4fab4191caafc0618351d2
|
/examples/withRaycing/12_Multilayer/BalderDMM.py
|
b64b106008474af054ba2c9d5e9d84da4c7a36a7
|
[
"MIT"
] |
permissive
|
mrakitin/xrt
|
9927599998fc4add57201eca903ecab67a9c6461
|
aef2f8e15a6639bc54ce79e8c717a75fd74dce29
|
refs/heads/master
| 2023-07-25T22:21:29.531887 | 2022-09-08T14:15:44 | 2022-09-08T14:15:44 | 210,162,051 | 0 | 0 |
MIT
| 2019-09-22T14:37:11 | 2019-09-22T14:37:10 | null |
UTF-8
|
Python
| false | false | 13,622 |
py
|
# -*- coding: utf-8 -*-
"""This module describes the beamline CLÆSS to be imported by
``traceBalderBL.py`` script."""
__author__ = "Konstantin Klementiev", "Roman Chernikov"
__date__ = "07 Jan 2016"
import math
import numpy as np
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import xrt.backends.raycing as raycing
import xrt.backends.raycing.sources as rs
import xrt.backends.raycing.apertures as ra
import xrt.backends.raycing.oes as roe
import xrt.backends.raycing.run as rr
import xrt.backends.raycing.materials as rm
import xrt.backends.raycing.screens as rsc
showIn3D = False
stripeSi = rm.Material('Si', rho=2.33)
stripeSiO2 = rm.Material(('Si', 'O'), quantities=(1, 2), rho=2.2)
stripeIr = rm.Material('Ir', rho=22.42)
mSi = rm.Material('Si', rho=2.33)
mW = rm.Material('W', rho=19.3)
mL = rm.Multilayer(mSi, 27, mW, 18, 40, mSi)
def build_beamline(nrays=1e4, hkl=(1, 1, 1), stripe='Si',
eMinRays=2400, eMaxRays=45000):
filterDiamond = rm.Material('C', rho=3.52, kind='plate')
if stripe.startswith('S'):
materialVCM = stripeSi
materialVFM = stripeSiO2
elif stripe.startswith('I'):
materialVCM = stripeIr
materialVFM = stripeIr
else:
raise ('Don''t know the mirror material')
height = 0
beamLine = raycing.BeamLine(azimuth=0, height=height)
wigglerToStraightSection = 0
xWiggler = wigglerToStraightSection * beamLine.sinAzimuth
yWiggler = wigglerToStraightSection * beamLine.cosAzimuth
# rs.WigglerWS(
# beamLine, name='SoleilW50', center=(xWiggler, yWiggler, height),
# nrays=nrays, period=50., K=8.446, n=39, eE=3., eI=0.5,
# eSigmaX=48.66, eSigmaZ=6.197, eEpsilonX=0.263, eEpsilonZ=0.008,
# eMin=50, eMax=60050, eMinRays=eMinRays, eMaxRays=eMaxRays, eN=2000,
# xPrimeMax=0.22, zPrimeMax=0.06, nx=40, nz=10)
rs.Wiggler(
beamLine, name='SoleilW50', center=(xWiggler, yWiggler, height),
nrays=nrays, period=50., K=8.446, n=39, eE=3., eI=0.5,
eSigmaX=48.66, eSigmaZ=6.197, eEpsilonX=0.263, eEpsilonZ=0.008,
eMin=eMinRays, eMax=eMaxRays, xPrimeMax=0.22, zPrimeMax=0.06)
beamLine.fsm0 = rsc.Screen(beamLine, 'FSM0', (0, 15000, height))
beamLine.feFixedMask = ra.RectangularAperture(
beamLine, 'FEFixedMask', (0, 15750, height),
('left', 'right', 'bottom', 'top'), [-3.15, 3.15, -0.7875, 0.7875])
beamLine.fsmFE = rsc.Screen(beamLine, 'FSM-FE', (0, 16000, height))
beamLine.filter1 = roe.Plate(
beamLine, 'Filter1', (0, 23620, height),
pitch=math.pi/2, limPhysX=(-9., 9.), limPhysY=(-4., 4.),
surface='diamond 60 $\mu$m', material=filterDiamond, t=0.06,
alarmLevel=0.)
if stripe.startswith('I'):
beamLine.filter2 = roe.Plate(
beamLine, 'Filter2', (0, 23720, height),
pitch=math.pi/2, limPhysX=(-9., 9.), limPhysY=(-4., 4.),
surface='diamond 0.4 mm', material=filterDiamond, t=0.4,
alarmLevel=0.)
beamLine.vcm = roe.SimpleVCM(
beamLine, 'VCM', [0, 25290, height],
surface=('Si',), material=(materialVCM,),
limPhysX=(-15., 15.), limPhysY=(-680., 680.), limOptX=(-6, 6),
limOptY=(-670., 670.), R=5.0e6, pitch=2e-3, alarmLevel=0.)
beamLine.fsmVCM = rsc.Screen(beamLine, 'FSM-VCM', (0, 26300, height))
beamLine.dmm = roe.DCM(
beamLine, 'DMM', [0, 27060, height], surface=('mL1',),
material=(mL,), material2=(mL,),
limPhysX=(-12, 12), limPhysY=(-150, 150),
cryst2perpTransl=20, cryst2longTransl=100,
limPhysX2=(-12, 12), limPhysY2=(-200, 200),
# targetOpenCL='auto',
targetOpenCL='CPU',
alarmLevel=0.05)
beamLine.BSBlock = ra.RectangularAperture(
beamLine, 'BSBlock', (0, 29100, height), ('bottom',),
(22,), alarmLevel=0.)
beamLine.slitAfterDCM = ra.RectangularAperture(
beamLine, 'SlitAfterDCM', (0, 29200, height),
('left', 'right', 'bottom', 'top'), [-7, 7, -2, 2], alarmLevel=0.5)
beamLine.fsmDCM = rsc.Screen(beamLine, 'FSM-DCM', (0, 29400, height))
beamLine.vfm = roe.SimpleVFM(
beamLine, 'VFM', [0, 30575, height],
surface=('SiO2',), material=(materialVFM,),
limPhysX=(-20., 20.), limPhysY=(-700., 700.),
limOptX=(-10, 10), limOptY=(-700, 700),
positionRoll=math.pi, R=5.0e6, r=40.77, alarmLevel=0.2)
beamLine.slitAfterVFM = ra.RectangularAperture(
beamLine, 'SlitAfterVFM', (0, 31720, height),
('left', 'right', 'bottom', 'top'), [-7, 7, -2, 2], alarmLevel=0.5)
beamLine.fsmVFM = rsc.Screen(beamLine, 'FSM-VFM', (0, 32000, height))
beamLine.ohPS = ra.RectangularAperture(
beamLine, 'OH-PS', (0, 32070, height),
('left', 'right', 'bottom', 'top'), (-20, 20, 25, 55), alarmLevel=0.2)
beamLine.slitEH = ra.RectangularAperture(
beamLine, 'slitEH', (0, 43000, height),
('left', 'right', 'bottom', 'top'), [-20, 20, -7, 7], alarmLevel=0.5)
beamLine.fsmSample = rsc.Screen(
beamLine, 'FSM-Sample', (0, 45863, height))
return beamLine
def run_process(beamLine, shineOnly1stSource=False):
for i in range(len(beamLine.sources)):
curSource = beamLine.sources[i].shine()
if i == 0:
beamSource = curSource
else:
beamSource.concatenate(curSource)
if shineOnly1stSource:
break
beamFSM0 = beamLine.fsm0.expose(beamSource)
beamLine.feFixedMask.propagate(beamSource)
beamFSMFE = beamLine.fsmFE.expose(beamSource)
beamFilter1global, beamFilter1local1, beamFilter1local2 = \
beamLine.filter1.double_refract(beamSource)
if hasattr(beamLine, 'filter2'):
beamFilter2global, beamFilter2local1, beamFilter2local2 = \
beamLine.filter2.double_refract(beamFilter1global)
beamFurtherDown = beamFilter2global
else:
beamFurtherDown = beamFilter1global
# beamFurtherDown = beamSource
beamVCMglobal, beamVCMlocal = beamLine.vcm.reflect(beamFurtherDown)
beamFSMVCM = beamLine.fsmVCM.expose(beamVCMglobal)
beamDCMglobal, beamDCMlocal1, beamDCMlocal2 = \
beamLine.dmm.double_reflect(beamVCMglobal)
# beamBSBlocklocal = beamLine.BSBlock.propagate(beamDCMglobal)
# beamSlitAfterDCMlocal = beamLine.slitAfterDCM.propagate(beamDCMglobal)
beamFSMDCM = beamLine.fsmDCM.expose(beamDCMglobal)
#
# beamVFMglobal, beamVFMlocal = beamLine.vfm.reflect(beamDCMglobal)
# beamSlitAfterVFMlocal = beamLine.slitAfterVFM.propagate(beamVFMglobal)
# beamFSMVFM = beamLine.fsmVFM.expose(beamVFMglobal)
# beamPSLocal = beamLine.ohPS.propagate(beamVFMglobal)
#
# beamSlitEHLocal = beamLine.slitEH.propagate(beamVFMglobal)
# beamFSMSample = beamLine.fsmSample.expose(beamVFMglobal)
# 'beamFilter2global': beamFilter2global,
# 'beamFilter2local1': beamFilter2local1,
# 'beamFilter2local2': beamFilter2local2,
outDict = {'beamSource': beamSource,
'beamFSM0': beamFSM0,
'beamFSMFE': beamFSMFE,
'beamFilter1global': beamFilter1global,
'beamFilter1local1': beamFilter1local1,
'beamFilter1local2': beamFilter1local2,
'beamVCMglobal': beamVCMglobal, 'beamVCMlocal': beamVCMlocal,
'beamFSMVCM': beamFSMVCM,
'beamDCMglobal': beamDCMglobal,
'beamDCMlocal1': beamDCMlocal1, 'beamDCMlocal2': beamDCMlocal2,
'beamFSMDCM': beamFSMDCM}
# 'beamBSBlocklocal': beamBSBlocklocal,
# 'beamSlitAfterDCMlocal': beamSlitAfterDCMlocal,
# 'beamFSMDCM': beamFSMDCM,
# 'beamVFMglobal': beamVFMglobal, 'beamVFMlocal': beamVFMlocal,
# 'beamSlitAfterVFMlocal': beamSlitAfterVFMlocal,
# 'beamFSMVFM': beamFSMVFM,
# 'beamPSLocal': beamPSLocal,
# 'beamSlitEHLocal': beamSlitEHLocal,
# 'beamFSMSample': beamFSMSample
# }
if hasattr(beamLine, 'filter2'):
outDict['beamFilter2global'] = beamFilter2global
outDict['beamFilter2local1'] = beamFilter2local1
outDict['beamFilter2local2'] = beamFilter2local2
beamLine.beams = outDict
if showIn3D:
beamLine.prepare_flow()
return outDict
rr.run_process = run_process
aceptanceH = 4e-4
aceptanceV = 1e-4
def align_beamline(beamLine, pitch=None, bragg=None, energy=9000.,
fixedExit=None, heightVFM=25, vfmR='auto'):
p = raycing.distance_xy(beamLine.vfm.center, beamLine.sources[0].center)
if pitch is None:
sinPitch = beamLine.vfm.r * 1.5 / p
pitch = math.asin(sinPitch)
else:
sinPitch = math.sin(pitch)
if vfmR == 'auto':
beamLine.vfm.R = p / sinPitch
else:
beamLine.vfm.R = vfmR
fefm = beamLine.feFixedMask
op = fefm.center[1] * aceptanceV / 2 * min(1, pitch/2e-3)
fefm.opening[2] = -op
fefm.opening[3] = op
beamLine.vcm.pitch = pitch
p = raycing.distance_xy(beamLine.vcm.center, beamLine.sources[0].center)
beamLine.vcm.R = 2. * p / sinPitch
print('VCM.p = {0:.1f}'.format(p))
print('VCM.pitch = {0:.6f} mrad'.format(beamLine.vcm.pitch*1e3))
print('VCM.roll = {0:.6f} mrad'.format(beamLine.vcm.roll*1e3))
print('VCM.yaw = {0:.6f} mrad'.format(beamLine.vcm.yaw*1e3))
print('VCM.z = {0:.3f}'.format(beamLine.vcm.center[2]))
print('VCM.R = {0:.0f}'.format(beamLine.vcm.R))
p = raycing.distance_xy(beamLine.dmm.center, beamLine.vcm.center)
beamLine.dmm.center[2] = beamLine.height + p*math.tan(2*pitch)
aML = beamLine.dmm.material[0]
dSpacing = aML.d
print(u'DMM.dSpacing = {0:.6f} angstrom'.format(dSpacing))
if bragg is None:
theta = aML.get_Bragg_angle(energy) -\
aML.get_dtheta_symmetric_Bragg(energy)
# theta = np.radians(1.05)
bragg = theta + 2*pitch
else:
theta = bragg - 2*pitch
energy = rm.ch / (2*dSpacing*math.sin(theta))
print('DMM.energy = {0:.3f} eV'.format(energy))
print('DMM.bragg = {0:.6f} deg'.format(math.degrees(bragg)))
print('DMM.realThetaAngle = DMM.bragg - 2*VCM.pitch = {0:.6f} deg'.format(
math.degrees(theta)))
beamLine.dmm.energy = energy
beamLine.dmm.bragg = bragg
p = raycing.distance_xy(beamLine.vfm.center, beamLine.vcm.center)
if heightVFM is not None:
fixedExit = (heightVFM - beamLine.height - p * math.tan(2 * pitch)) * \
math.cos(2 * pitch)
else:
heightVFM = fixedExit / math.cos(2 * pitch) + \
beamLine.height + p * math.tan(2 * pitch) + 0.2
beamLine.heightVFM = heightVFM
beamLine.dmm.fixedExit = fixedExit
beamLine.dmm.cryst2perpTransl =\
beamLine.dmm.fixedExit/2./math.cos(beamLine.dmm.bragg)
print('DMM.pitch = {0:.6f} mrad'.format(beamLine.dmm.pitch*1e3))
print('DMM.roll = {0:.6f} mrad'.format(beamLine.dmm.roll*1e3))
print('DMM.yaw = {0:.6f} mrad'.format(beamLine.dmm.yaw*1e3))
print('DMM.z = {0:.3f}'.format(beamLine.dmm.center[2]))
print('DMM.fixedExit = {0:.3f}'.format(fixedExit))
print('DMM.cryst2perpTransl = {0:.3f}'.format(
beamLine.dmm.cryst2perpTransl))
p = raycing.distance_xy(
beamLine.vfm.center,
(beamLine.slitAfterDCM.center[0], beamLine.slitAfterDCM.center[1]))
slitHeight = heightVFM - p * math.tan(2 * pitch)
dz = beamLine.slitAfterDCM.opening[3] - beamLine.slitAfterDCM.opening[2]
beamLine.slitAfterDCM.opening[2] = slitHeight - beamLine.height - dz/2
beamLine.slitAfterDCM.opening[3] = slitHeight - beamLine.height + dz/2
beamLine.slitAfterDCM.set_optical_limits()
p = raycing.distance_xy(beamLine.vfm.center, beamLine.fsmDCM.center)
fsmHeight = heightVFM - p * math.tan(2 * pitch)
print('fsmDCM.z = {0:.3f}'.format(fsmHeight))
beamLine.vfm.pitch = -pitch
beamLine.vfm.center[2] = heightVFM # - beamLine.vfm.hCylinder
print('VFM.pitch = {0:.6f} mrad'.format(beamLine.vfm.pitch*1e3))
print('VFM.roll = {0:.6f} mrad'.format(beamLine.vfm.roll*1e3))
print('VFM.yaw = {0:.6f} mrad'.format(beamLine.vfm.yaw*1e3))
print('VFM.z = {0:.3f}'.format(beamLine.vfm.center[2]))
print('VFM.R = {0:.0f}'.format(beamLine.vfm.R))
print('VFM.r = {0:.3f}'.format(beamLine.vfm.r))
dz = beamLine.slitAfterVFM.opening[3] - beamLine.slitAfterVFM.opening[2]
beamLine.slitAfterVFM.opening[2] = heightVFM - beamLine.height - dz/2
beamLine.slitAfterVFM.opening[3] = heightVFM - beamLine.height + dz/2
beamLine.slitAfterVFM.set_optical_limits()
p = raycing.distance_xy(beamLine.vfm.center, beamLine.sources[0].center)
q = 1./(2 * np.sin(pitch)/beamLine.vfm.r - 1./p)
qr = raycing.distance_xy(beamLine.fsmSample.center, beamLine.vfm.center)
beamLine.spotSizeH = abs(1. - qr / q) * p * aceptanceH
qr = raycing.distance_xy(
(beamLine.slitEH.center[0], beamLine.slitEH.center[1]),
beamLine.vfm.center)
s = abs(1. - qr / q) * p * aceptanceH / 2
beamLine.slitEH.opening[0] = -s * 1.2
beamLine.slitEH.opening[1] = s * 1.2
dz = beamLine.slitEH.opening[3] - beamLine.slitEH.opening[2]
beamLine.slitEH.opening[2] = heightVFM - beamLine.height - dz/2
beamLine.slitEH.opening[3] = heightVFM - beamLine.height + dz/2
beamLine.slitEH.set_optical_limits()
if __name__ == '__main__':
beamLine = build_beamline(nrays=25000)
align_beamline(beamLine, pitch=2e-3, energy=9000., fixedExit=20.86)
print('finished')
|
[
"[email protected]"
] | |
ccd3779c71d37763c623e43b835d92aefa84dc55
|
e6dab5aa1754ff13755a1f74a28a201681ab7e1c
|
/.parts/lib/django-1.4/tests/regressiontests/localflavor/ca/tests.py
|
a6a3680b6ae007f3eb304949651f0ae93c78ee84
|
[] |
no_license
|
ronkagan/Euler_1
|
67679203a9510147320f7c6513eefd391630703e
|
022633cc298475c4f3fd0c6e2bde4f4728713995
|
refs/heads/master
| 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 111 |
py
|
/home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.4/tests/regressiontests/localflavor/ca/tests.py
|
[
"[email protected]"
] | |
254ed8cda97709dce065bb81769ae14f0c9b9b70
|
b2b79cc61101ddf54959b15cf7d0887d114fb4e5
|
/web/pgadmin/tools/schema_diff/tests/test_schema_diff_comp.py
|
4231bcad3453af4b2874d3891849c0bfd8b1d472
|
[
"PostgreSQL"
] |
permissive
|
99Percent/pgadmin4
|
8afe737eb2ec1400ab034ad1d8a4f7c4ba4c35c8
|
5e0c113c7bc4ffefbec569e7ca5416d9acf9dd8a
|
refs/heads/master
| 2021-10-10T20:08:48.321551 | 2021-09-30T12:51:43 | 2021-09-30T12:51:43 | 165,702,958 | 0 | 0 |
NOASSERTION
| 2019-01-14T17:18:40 | 2019-01-14T17:18:39 | null |
UTF-8
|
Python
| false | false | 8,735 |
py
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2021, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
import json
import os
import random
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from .utils import restore_schema
from pgadmin.utils.versioned_template_loader import \
get_version_mapping_directories
class SchemaDiffTestCase(BaseTestGenerator):
""" This class will test the schema diff. """
scenarios = [
# Fetching default URL for database node.
('Schema diff comparison', dict(
url='schema_diff/compare_database/{0}/{1}/{2}/{3}/{4}'))
]
def setUp(self):
self.src_database = "db_schema_diff_src_%s" % str(uuid.uuid4())[1:8]
self.tar_database = "db_schema_diff_tar_%s" % str(uuid.uuid4())[1:8]
self.src_db_id = utils.create_database(self.server, self.src_database)
self.tar_db_id = utils.create_database(self.server, self.tar_database)
self.server = parent_node_dict["server"][-1]["server"]
self.server_id = parent_node_dict["server"][-1]["server_id"]
self.schema_name = 'test_schema_diff'
self.restored_backup = True
status = self.restore_backup()
if not status:
self.restored_backup = False
def restore_backup(self):
self.sql_folder = self.get_sql_folder()
if self.sql_folder is None:
raise FileNotFoundError('Schema diff folder does not exists')
src_sql_path = os.path.join(self.sql_folder, 'source.sql')
tar_sql_path = os.path.join(self.sql_folder, 'target.sql')
if not os.path.exists(src_sql_path):
raise FileNotFoundError(
'{} file does not exists'.format(src_sql_path))
if not os.path.exists(tar_sql_path):
raise FileNotFoundError(
'{} file does not exists'.format(tar_sql_path))
status, self.src_schema_id = restore_schema(
self.server, self.src_database, self.schema_name, src_sql_path)
if not status:
print("Failed to restore schema on source database.")
return False
status, self.tar_schema_id = restore_schema(
self.server, self.tar_database, self.schema_name, tar_sql_path)
if not status:
print("Failed to restore schema on target database.")
return False
return True
def get_sql_folder(self):
"""
This function will get the appropriate test folder based on
server version and their existence.
:param module_path: Path of the module to be tested.
:return:
"""
# Join the application path, module path and tests folder
tests_folder_path = os.path.dirname(os.path.abspath(__file__))
# A folder name matching the Server Type (pg, ppas) takes priority so
# check whether that exists or not. If so, than check the version
# folder in it, else look directly in the 'tests' folder.
absolute_path = os.path.join(tests_folder_path, self.server['type'])
if not os.path.exists(absolute_path):
absolute_path = tests_folder_path
# Iterate the version mapping directories.
for version_mapping in get_version_mapping_directories(
self.server['type']):
if version_mapping['number'] > \
self.server_information['server_version']:
continue
complete_path = os.path.join(absolute_path,
version_mapping['name'])
if os.path.exists(complete_path):
return complete_path
return None
def compare(self):
comp_url = self.url.format(self.trans_id, self.server_id,
self.src_db_id,
self.server_id,
self.tar_db_id
)
response = self.tester.get(comp_url)
self.assertEqual(response.status_code, 200)
return json.loads(response.data.decode('utf-8'))
def runTest(self):
""" This function will test the schema diff."""
self.assertEqual(True, self.restored_backup)
response = self.tester.get("schema_diff/initialize")
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.data.decode('utf-8'))
self.trans_id = response_data['data']['schemaDiffTransId']
url = 'schema_diff/server/connect/{}'.format(self.server_id)
data = {'password': self.server['db_password']}
response = self.tester.post(url,
data=json.dumps(data),
content_type='html/json'
)
response = self.tester.post(
'schema_diff/database/connect/{0}/{1}'.format(
self.server_id,
self.src_db_id))
response = self.tester.post(
'schema_diff/database/connect/{0}/{1}'.format(
self.server_id,
self.tar_db_id))
response_data = self.compare()
diff_file = os.path.join(self.sql_folder, 'diff_{0}.sql'.format(
str(random.randint(1, 99999))))
file_obj = open(diff_file, 'a')
for diff in response_data['data']:
if diff['status'] == 'Identical':
src_obj_oid = diff['source_oid']
tar_obj_oid = diff['target_oid']
src_schema_id = diff['source_scid']
tar_schema_id = diff['target_scid']
if src_obj_oid is not None and tar_obj_oid is not None:
url = 'schema_diff/ddl_compare/{0}/{1}/{2}/{3}/{4}/{5}/' \
'{6}/{7}/{8}/{9}/{10}/'.format(self.trans_id,
self.server_id,
self.src_db_id,
src_schema_id,
self.server_id,
self.tar_db_id,
tar_schema_id,
src_obj_oid,
tar_obj_oid,
diff['type'],
diff['status']
)
response = self.tester.get(url)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.data.decode('utf-8'))
file_obj.write(response_data['diff_ddl'])
elif 'diff_ddl' in diff:
file_obj.write(diff['diff_ddl'])
file_obj.close()
try:
restore_schema(self.server, self.tar_database, self.schema_name,
diff_file)
os.remove(diff_file)
response_data = self.compare()
for diff in response_data['data']:
self.assertEqual(diff['status'], 'Identical')
except Exception as e:
if os.path.exists(diff_file):
os.remove(diff_file)
def tearDown(self):
"""This function drop the added database"""
connection = utils.get_db_connection(self.server['db'],
self.server['username'],
self.server['db_password'],
self.server['host'],
self.server['port'],
self.server['sslmode'])
utils.drop_database(connection, self.src_database)
connection = utils.get_db_connection(self.server['db'],
self.server['username'],
self.server['db_password'],
self.server['host'],
self.server['port'],
self.server['sslmode'])
utils.drop_database(connection, self.tar_database)
|
[
"[email protected]"
] | |
8a351a0a17dd9c9e820ad0a3ce4ed47c32bbad79
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/CMDy4pvnTZkFwJmmx_17.py
|
eb2c62d15f7a683e86ff3af378adbe04730786b4
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,319 |
py
|
"""
Create a class `Sudoku` that takes a **string** as an argument. The string
will contain the numbers of a regular 9x9 sudoku board **left to right and top
to bottom** , with zeros filling up the empty cells.
### Attributes
An instance of the class `Sudoku` will have one attribute:
* `board`: a list representing the board, with sublits for each **row** , with the numbers as **integers**. Empty cell represented with `0`.
### Methods
An instance of the class `Sudoku` wil have three methods:
* `get_row(n)`: will return the row in position `n`.
* `get_col(n)`: will return the column in position `n`.
* `get_sqr([n, m])`: will return the square in position `n` if only one argument is given, and the square to which the cell in position `(n, m)` belongs to if two arguments are given.
### Example

game = Sudoku("417950030000000700060007000050009106800600000000003400900005000000430000200701580")
game.board ➞ [
[4, 1, 7, 9, 5, 0, 0, 3, 0],
[0, 0, 0, 0, 0, 0, 7, 0, 0],
[0, 6, 0, 0, 0, 7, 0, 0, 0],
[0, 5, 0, 0, 0, 9, 1, 0, 6],
[8, 0, 0, 6, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 3, 4, 0, 0],
[9, 0, 0, 0, 0, 5, 0, 0, 0],
[0, 0, 0, 4, 3, 0, 0, 0, 0],
[2, 0, 0, 7, 0, 1, 5, 8, 0]
]
game.get_row(0) ➞ [4, 1, 7, 9, 5, 0, 0, 3, 0]
game.get_col(8) ➞ [0, 0, 0, 6, 0, 0, 0, 0, 0]
game.get_sqr(1) ➞ [9, 5, 0, 0, 0, 0, 0, 0, 7]
game.get_sqr(1, 8) ➞ [0, 3, 0, 7, 0, 0, 0, 0, 0]
game.get_sqr(8, 3) ➞ [0, 0, 5, 4, 3, 0, 7, 0, 1]
### Notes
* All positions are indexed to 0.
* All orders are assigned left to right and top to bottom.
"""
class Sudoku:
def __init__(self, board):
self.board = [[int(board[r*9+c]) for c in range(9)] for r in range(9)]
def get_row(self, n):
return self.board[n]
def get_col(self, n):
return [row[n] for row in self.board]
def get_sqr(self, n, m=None):
if m == None:
r, c = (n//3)*3, (n%3)*3
else:
r, c = (n//3)*3, (m//3)*3
return [self.board[r][c], self.board[r][c+1], self.board[r][c+2],
self.board[r+1][c], self.board[r+1][c+1], self.board[r+1][c+2],
self.board[r+2][c], self.board[r+2][c+1], self.board[r+2][c+2]]
|
[
"[email protected]"
] | |
2dc0161a4729cda94a80a95977ad4a8515d70974
|
938a496fe78d5538af94017c78a11615a8498682
|
/algorithms/901-/1104.path-in-zigzag-labelled-binary-tree.py
|
2b2367b8413873d7a0cf319fc2d88320522f4c2d
|
[] |
no_license
|
huilizhou/Leetcode-pyhton
|
261280044d15d0baeb227248ade675177efdb297
|
6ae85bf79c5a21735e3c245c0c256f29c1c60926
|
refs/heads/master
| 2020-03-28T15:57:52.762162 | 2019-11-26T06:14:13 | 2019-11-26T06:14:13 | 148,644,059 | 8 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 901 |
py
|
# 二叉树寻路
class Solution(object):
def pathInZigZagTree(self, label):
"""
:type label: int
:rtype: List[int]
"""
"""
我们会发现一个规律,在偶数行,原索引和逆序后的索引值加在一起,
等于该行最小索引和最大索引的值(因为每一行都是一个等差数列),
而这个值也恰好等于该行最小索引值的3倍减去1(因为下一行开始的索引是前一行开始索引的2倍)。
"""
if label == 1:
return [label]
res = [label]
while label > 1:
res.append(label // 2)
label //= 2
res.reverse()
for i in range(1, len(res) - 1):
if (i + 1) % 2 != len(res) % 2:
res[i] = (3 * (2**i)) - 1 - res[i]
return res
print(Solution().pathInZigZagTree(14))
|
[
"[email protected]"
] | |
a1edc42591bba80bbc072e3e8e1a466e37df2340
|
8adaec7baeb4b6ef02e162f5f8d437e19c6feee5
|
/tests/mappers/test_java_mapper.py
|
fb7bc1672ebafc7a83985ab4e5e5364615effc9a
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/oozie-to-airflow
|
3722e6a344e9592f213d901609f7dac51acd4314
|
5a329965d24c543853e081c16d19daf35c7f6dc0
|
refs/heads/master
| 2023-08-16T21:30:34.758058 | 2023-08-10T11:00:04 | 2023-08-10T11:00:04 | 162,367,272 | 69 | 44 |
Apache-2.0
| 2023-09-12T18:59:53 | 2018-12-19T01:43:38 |
Python
|
UTF-8
|
Python
| false | false | 11,211 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests Java mapper"""
import ast
import unittest
from typing import List
from xml.etree import ElementTree as ET
from o2a.converter.task import Task
from o2a.mappers import java_mapper
from o2a.o2a_libs.property_utils import PropertySet
class TestJavaMapper(unittest.TestCase):
def setUp(self) -> None:
# language=XML
java_node_with_multiple_opt_str = """
<java>
<resource-manager>${resourceManager}</resource-manager>
<name-node>${nameNode}</name-node>
<configuration>
<property>
<name>mapred.job.queue.name</name>
<value>${queueName}</value>
</property>
</configuration>
<main-class>org.apache.oozie.example.DemoJavaMain</main-class>
<java-opt>-Dtest1=val1_mult</java-opt>
<java-opt>-Dtest2=val2_mult</java-opt>
<arg>Hello</arg>
<arg>Oozie!</arg>
</java>
"""
self.java_node_with_multiple_opt = ET.fromstring(java_node_with_multiple_opt_str)
# language=XML
java_node_with_single_opts = """
<java>
<resource-manager>${resourceManager}</resource-manager>
<name-node>${nameNode}</name-node>
<configuration>
<property>
<name>mapred.job.queue.name</name>
<value>${queueName}</value>
</property>
</configuration>
<main-class>org.apache.oozie.example.DemoJavaMain</main-class>
<java-opts>-Dtest1=val1 -Dtest2=val2</java-opts>
<arg>Hello</arg>
<arg>Oozie!</arg>
</java>
"""
self.java_node_with_single_opts = ET.fromstring(java_node_with_single_opts)
def test_arguments_are_parsed_correctly_without_jar_files(self):
mapper = self._get_java_mapper(
job_properties={
"userName": "user",
"oozie.wf.application.path": "hdfs:///user/USER/examples/apps/java",
},
config={},
)
mapper.on_parse_node()
self.assertEqual("test_id", mapper.name)
self.assertEqual("org.apache.oozie.example.DemoJavaMain", mapper.main_class)
self.assertEqual(["-Dtest1=val1", "-Dtest2=val2"], mapper.java_opts)
self.assertEqual(
PropertySet(
config={},
job_properties={
"userName": "user",
"oozie.wf.application.path": "hdfs:///user/USER/examples/apps/java",
},
action_node_properties={"mapred.job.queue.name": "{{queueName}}"},
),
mapper.props,
)
self.assertEqual([], mapper.jar_files_in_hdfs)
self.assertEqual([], mapper.jar_files)
def test_arguments_are_parsed_correctly_with_multiple_opts(self):
mapper = self._get_java_mapper(
job_properties={
"userName": "user",
"oozie.wf.application.path": "hdfs:///user/USER/examples/apps/java",
},
single_opts=False,
config={},
)
mapper.on_parse_node()
self.assertEqual("test_id", mapper.name)
self.assertEqual("org.apache.oozie.example.DemoJavaMain", mapper.main_class)
self.assertEqual(["-Dtest1=val1_mult", "-Dtest2=val2_mult"], mapper.java_opts)
self.assertEqual(
PropertySet(
config={},
job_properties={
"userName": "user",
"oozie.wf.application.path": "hdfs:///user/USER/examples/apps/java",
},
action_node_properties={"mapred.job.queue.name": "{{queueName}}"},
),
mapper.props,
)
self.assertEqual([], mapper.jar_files_in_hdfs)
self.assertEqual([], mapper.jar_files)
def test_arguments_are_parsed_correctly_with_jar_files(self):
mapper = self._get_java_mapper(
job_properties={
"userName": "user",
"oozie.wf.application.path": "hdfs:///user/USER/examples/apps/java",
},
config={},
jar_files=["test.jar", "test2.jar"],
)
mapper.on_parse_node()
self.assertEqual("test_id", mapper.name)
self.assertEqual("org.apache.oozie.example.DemoJavaMain", mapper.main_class)
self.assertEqual(["-Dtest1=val1", "-Dtest2=val2"], mapper.java_opts)
self.assertEqual(
PropertySet(
config={},
job_properties={
"userName": "user",
"oozie.wf.application.path": "hdfs:///user/USER/examples/apps/java",
},
action_node_properties={"mapred.job.queue.name": "{{queueName}}"},
),
mapper.props,
)
self.assertEqual(
[
"hdfs:///user/USER/examples/apps/java/lib/test.jar",
"hdfs:///user/USER/examples/apps/java/lib/test2.jar",
],
mapper.jar_files_in_hdfs,
)
self.assertEqual(["test.jar", "test2.jar"], mapper.jar_files)
def test_mapred_ops_append_list_mapred_child(self):
mapper = self._get_java_mapper(
job_properties={
"userName": "user",
"oozie.wf.application.path": "hdfs:///user/USER/examples/apps/java",
"mapred.child.java.opts": "-Dmapred1=val1 -Dmapred2=val2",
},
config={},
jar_files=["test.jar", "test2.jar"],
)
mapper.on_parse_node()
self.assertEqual("test_id", mapper.name)
self.assertEqual("org.apache.oozie.example.DemoJavaMain", mapper.main_class)
self.assertEqual(
["-Dmapred1=val1", "-Dmapred2=val2", "-Dtest1=val1", "-Dtest2=val2"], mapper.java_opts
)
self.assertEqual(
PropertySet(
config={},
job_properties={
"userName": "user",
"oozie.wf.application.path": "hdfs:///user/USER/examples/apps/java",
"mapred.child.java.opts": "-Dmapred1=val1 -Dmapred2=val2",
},
action_node_properties={"mapred.job.queue.name": "{{queueName}}"},
),
mapper.props,
)
self.assertEqual(
[
"hdfs:///user/USER/examples/apps/java/lib/test.jar",
"hdfs:///user/USER/examples/apps/java/lib/test2.jar",
],
mapper.jar_files_in_hdfs,
)
self.assertEqual(["test.jar", "test2.jar"], mapper.jar_files)
def test_mapred_ops_append_list_mapreduce_map(self):
mapper = self._get_java_mapper(
job_properties={
"userName": "user",
"oozie.wf.application.path": "hdfs:///user/USER/examples/apps/java",
"mapreduce.map.java.opts": "-Dmapreduce1=val1 -Dmapreduce2=val2",
},
config={},
jar_files=["test.jar", "test2.jar"],
)
mapper.on_parse_node()
self.assertEqual("test_id", mapper.name)
self.assertEqual("org.apache.oozie.example.DemoJavaMain", mapper.main_class)
self.assertEqual(
["-Dmapreduce1=val1", "-Dmapreduce2=val2", "-Dtest1=val1", "-Dtest2=val2"], mapper.java_opts
)
self.assertEqual(
PropertySet(
config={},
job_properties={
"userName": "user",
"oozie.wf.application.path": "hdfs:///user/USER/examples/apps/java",
"mapreduce.map.java.opts": "-Dmapreduce1=val1 -Dmapreduce2=val2",
},
action_node_properties={"mapred.job.queue.name": "{{queueName}}"},
),
mapper.props,
)
self.assertEqual(
[
"hdfs:///user/USER/examples/apps/java/lib/test.jar",
"hdfs:///user/USER/examples/apps/java/lib/test2.jar",
],
mapper.jar_files_in_hdfs,
)
self.assertEqual(["test.jar", "test2.jar"], mapper.jar_files)
def test_to_tasks_and_relations(self):
mapper = self._get_java_mapper(
job_properties={
"userName": "user",
"oozie.wf.application.path": "hdfs:///user/USER/examples/apps/java",
},
config={},
)
mapper.on_parse_node()
tasks, relations = mapper.to_tasks_and_relations()
self.assertEqual(
[
Task(
task_id="test_id",
template_name="java.tpl",
trigger_rule="one_success",
template_params={
"props": PropertySet(
config={},
job_properties={
"userName": "user",
"oozie.wf.application.path": "hdfs:///user/USER/examples/apps/java",
},
action_node_properties={"mapred.job.queue.name": "{{queueName}}"},
),
"hdfs_files": [],
"hdfs_archives": [],
"main_class": "org.apache.oozie.example.DemoJavaMain",
"jar_files_in_hdfs": [],
"args": ["Hello", "Oozie!"],
},
)
],
tasks,
)
self.assertEqual(relations, [])
def test_required_imports(self):
imps = self._get_java_mapper(
job_properties={
"userName": "user",
"oozie.wf.application.path": "hdfs:///user/USER/examples/apps/java",
},
config={},
).required_imports()
imp_str = "\n".join(imps)
self.assertIsNotNone(ast.parse(imp_str))
def _get_java_mapper(self, job_properties, config, single_opts: bool = True, jar_files: List[str] = None):
mapper = java_mapper.JavaMapper(
oozie_node=self.java_node_with_single_opts if single_opts else self.java_node_with_multiple_opt,
name="test_id",
dag_name="DAG_NAME_A",
props=PropertySet(job_properties=job_properties, config=config),
jar_files=jar_files if jar_files else [],
input_directory_path="/tmp/input-directory-path/",
)
return mapper
|
[
"[email protected]"
] | |
3b77497dd6907b028972597fae49f10765bf24f5
|
32b9ed968247fd0f5b2291307059f2de4288a951
|
/utils/FID/fid_score.py
|
5b9b252f9100fdfbae96a778a53c5f0ec824979e
|
[] |
no_license
|
jshi31/T2ONet
|
4aaf57636e2caf8f8d93ba742be8b4ebaaefe30d
|
928cdc3311e887f3676a55db5d544fee5ac71a3f
|
refs/heads/master
| 2023-05-25T22:14:54.441849 | 2023-05-10T06:45:02 | 2023-05-10T06:45:02 | 247,373,510 | 18 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,533 |
py
|
#!/usr/bin/env python3
"""Calculates the Frechet Inception Distance (FID) to evalulate GANs
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectively.
See --help to see further details.
Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
of Tensorflow
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pathlib
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import torch
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
from PIL import Image
try:
from tqdm import tqdm
except ImportError:
# If not tqdm is not available, provide a mock version of it
def tqdm(x): return x
from utils.FID.inception import InceptionV3
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('path', type=str, nargs=2,
help=('Path to the generated images or '
'to .npz statistic files'))
parser.add_argument('--batch-size', type=int, default=50,
help='Batch size to use')
parser.add_argument('--dims', type=int, default=2048,
choices=list(InceptionV3.BLOCK_INDEX_BY_DIM),
help=('Dimensionality of Inception features to use. '
'By default, uses pool3 features'))
parser.add_argument('-c', '--gpu', default='', type=str,
help='GPU to use (leave blank for CPU only)')
def imread(filename):
"""
Loads an image file into a (height, width, 3) uint8 ndarray.
"""
return np.asarray(Image.open(filename), dtype=np.uint8)[..., :3]
def get_activations(files, model, batch_size=50, dims=2048,
cuda=False, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
if batch_size > len(files):
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = len(files)
pred_arr = np.empty((len(files), dims))
for i in tqdm(range(0, len(files), batch_size)):
if verbose:
print('\rPropagating batch %d/%d' % (i + 1, n_batches),
end='', flush=True)
start = i
end = i + batch_size
images = np.array([imread(str(f)).astype(np.float32)
for f in files[start:end]])
# Reshape to (n_images, 3, height, width)
images = images.transpose((0, 3, 1, 2))
images /= 255
batch = torch.from_numpy(images).type(torch.FloatTensor)
if cuda:
batch = batch.cuda()
pred = model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.size(2) != 1 or pred.size(3) != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr[start:end] = pred.cpu().data.numpy().reshape(pred.size(0), -1)
if verbose:
print(' done')
return pred_arr
def get_activation(img, model):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- img : (bs, 3, h, w) RGB \in [0, 1]
-- model : Instance of inception model
Returns:
-- A numpy array of dimension (bs, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
pred = model(img)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.size(2) != 1 or pred.size(3) != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr = pred.cpu().data.numpy().reshape(pred.size(0), -1)
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def calculate_activation_statistics(files, model, batch_size=50,
dims=2048, cuda=False, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : The images numpy array is split into batches with
batch size batch_size. A reasonable batch size
depends on the hardware.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the
number of calculated batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the inception model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the inception model.
"""
act = get_activations(files, model, batch_size, dims, cuda, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
m, s = calculate_activation_statistics(files, model, batch_size,
dims, cuda)
return m, s
def calculate_fid_given_paths(paths, batch_size, cuda, dims):
"""Calculates the FID of two paths"""
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size,
dims, cuda)
m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size,
dims, cuda)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
if __name__ == '__main__':
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
fid_value = calculate_fid_given_paths(args.path,
args.batch_size,
args.gpu != '',
args.dims)
print('FID: ', fid_value)
|
[
"[email protected]"
] | |
456e7e165ba3a548ac23e58a170994066a37145e
|
42b30769e4c676014d3fd8753bc4b1bbcc2a3e3c
|
/database/compiled_templates/tagging_common.mako.py
|
d004b47abc9ed5a4ac6dd4028d233e554384040c
|
[
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
psnehal/MethylSig
|
4e510685349b1712c92667d40e795fa798ee1702
|
5efad71e71ff2515feff2e49579c856ef9a1bbd8
|
refs/heads/master
| 2020-05-18T21:03:51.240410 | 2015-09-03T20:23:30 | 2015-09-03T20:23:30 | 26,826,252 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 16,996 |
py
|
# -*- encoding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1415293119.109046
_template_filename=u'templates/tagging_common.mako'
_template_uri=u'/tagging_common.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='ascii'
_exports = ['render_tagging_element_html', 'render_tool_tagging_elements', 'render_individual_tagging_element', 'render_community_tagging_element']
# SOURCE LINE 1
from cgi import escape
from galaxy.web.framework.helpers import iff
from random import random
from sys import maxint
from math import floor
from galaxy.model import Tag, ItemTagAssociation
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
in_form = context.get('in_form', UNDEFINED)
def render_community_tagging_element(tagged_item=None,elt_context=None,use_toggle_link=False,tag_click_fn='default_tag_click_fn'):
return render_render_community_tagging_element(context.locals_(__M_locals),tagged_item,elt_context,use_toggle_link,tag_click_fn)
elt_context = context.get('elt_context', UNDEFINED)
use_toggle_link = context.get('use_toggle_link', UNDEFINED)
user = context.get('user', UNDEFINED)
def render_individual_tagging_element(user=None,tagged_item=None,elt_context=None,use_toggle_link=True,in_form=False,input_size='15',tag_click_fn='default_tag_click_fn',get_toggle_link_text_fn='default_get_toggle_link_text_fn',editable=True,render_add_tag_button=True):
return render_render_individual_tagging_element(context.locals_(__M_locals),user,tagged_item,elt_context,use_toggle_link,in_form,input_size,tag_click_fn,get_toggle_link_text_fn,editable,render_add_tag_button)
tag_click_fn = context.get('tag_click_fn', UNDEFINED)
input_size = context.get('input_size', UNDEFINED)
tagged_item = context.get('tagged_item', UNDEFINED)
tag_type = context.get('tag_type', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 8
__M_writer(u'\n\n')
# SOURCE LINE 11
if tagged_item is not None:
# SOURCE LINE 12
if tag_type == "individual":
# SOURCE LINE 13
__M_writer(u' ')
__M_writer(unicode(render_individual_tagging_element( user=user, tagged_item=tagged_item, elt_context=elt_context, in_form=in_form, input_size=input_size, tag_click_fn=tag_click_fn, use_toggle_link=use_toggle_link )))
__M_writer(u'\n')
# SOURCE LINE 14
elif tag_type == "community":
# SOURCE LINE 15
__M_writer(u' ')
__M_writer(unicode(render_community_tagging_element(tagged_item=tagged_item, elt_context=elt_context, tag_click_fn=tag_click_fn)))
__M_writer(u'\n')
pass
pass
# SOURCE LINE 18
__M_writer(u'\n')
# SOURCE LINE 85
__M_writer(u'\n\n')
# SOURCE LINE 100
__M_writer(u'\n\n')
# SOURCE LINE 118
__M_writer(u'\n\n\n')
# SOURCE LINE 231
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_render_tagging_element_html(context,elt_id=None,tags=None,editable=True,use_toggle_link=True,input_size='15',in_form=False,tag_type='individual',render_add_tag_button=True):
context.caller_stack._push_frame()
try:
h = context.get('h', UNDEFINED)
unicode = context.get('unicode', UNDEFINED)
isinstance = context.get('isinstance', UNDEFINED)
len = context.get('len', UNDEFINED)
str = context.get('str', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 20
__M_writer(u'\n')
# SOURCE LINE 22
__M_writer(u' ')
num_tags = len( tags )
# SOURCE LINE 24
__M_writer(u'\n <div class="tag-element"\n')
# SOURCE LINE 26
if elt_id:
# SOURCE LINE 27
__M_writer(u' id="')
__M_writer(unicode(elt_id))
__M_writer(u'"\n')
pass
# SOURCE LINE 30
if num_tags == 0 and not editable:
# SOURCE LINE 31
__M_writer(u' style="display: none"\n')
pass
# SOURCE LINE 33
__M_writer(u' >\n')
# SOURCE LINE 34
if use_toggle_link:
# SOURCE LINE 35
__M_writer(u' <a class="toggle-link" href="#">')
__M_writer(unicode(num_tags))
__M_writer(u' Tag')
__M_writer(unicode(iff( num_tags == 1, "", "s")))
__M_writer(u'</a>\n')
pass
# SOURCE LINE 37
__M_writer(u' <div class="tag-area \n')
# SOURCE LINE 38
if tag_type == 'individual':
# SOURCE LINE 39
__M_writer(u' individual-tag-area\n')
pass
# SOURCE LINE 41
__M_writer(u' ">\n\n')
# SOURCE LINE 44
for tag in tags:
# SOURCE LINE 45
__M_writer(u' ')
## Handle both Tag and ItemTagAssociation objects.
if isinstance( tag, Tag ):
tag_name = tag.name
tag_value = None
elif isinstance( tag, ItemTagAssociation ):
tag_name = tag.user_tname
tag_value = tag.user_value
## Convert tag name, value to unicode.
if isinstance( tag_name, str ):
tag_name = unicode( escape( tag_name ), 'utf-8' )
if tag_value:
tag_value = unicode( escape( tag_value ), 'utf-8' )
if tag_value:
tag_str = tag_name + ":" + tag_value
else:
tag_str = tag_name
# SOURCE LINE 62
__M_writer(u'\n <span class="tag-button">\n <span class="tag-name">')
# SOURCE LINE 64
__M_writer(unicode(tag_str))
__M_writer(u'</span>\n')
# SOURCE LINE 65
if editable:
# SOURCE LINE 66
__M_writer(u' <img class="delete-tag-img" src="')
__M_writer(unicode(h.url_for('/static/images/delete_tag_icon_gray.png')))
__M_writer(u'"/>\n')
pass
# SOURCE LINE 68
__M_writer(u' </span>\n')
pass
# SOURCE LINE 70
__M_writer(u' \n')
# SOURCE LINE 72
if editable:
# SOURCE LINE 73
if in_form:
# SOURCE LINE 74
__M_writer(u' <textarea class="tag-input" rows=\'1\' cols=\'')
__M_writer(unicode(input_size))
__M_writer(u"'></textarea>\n")
# SOURCE LINE 75
else:
# SOURCE LINE 76
__M_writer(u' <input class="tag-input" type=\'text\' size=\'')
__M_writer(unicode(input_size))
__M_writer(u"'/>\n")
pass
# SOURCE LINE 79
if render_add_tag_button:
# SOURCE LINE 80
__M_writer(u" <img src='")
__M_writer(unicode(h.url_for('/static/images/fugue/tag--plus.png')))
__M_writer(u'\' class="add-tag-button" title="Add tags"/>\n')
pass
pass
# SOURCE LINE 83
__M_writer(u' </div>\n </div>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_render_tool_tagging_elements(context):
context.caller_stack._push_frame()
try:
int = context.get('int', UNDEFINED)
self = context.get('self', UNDEFINED)
trans = context.get('trans', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 88
__M_writer(u'\n ')
# SOURCE LINE 89
elt_id = int ( floor ( random()*maxint ) )
tags = trans.app.tag_handler.get_tool_tags( trans )
# SOURCE LINE 92
__M_writer(u'\n ')
# SOURCE LINE 93
__M_writer(unicode(self.render_tagging_element_html(elt_id=elt_id, \
tags=tags, \
editable=False, \
use_toggle_link=False )))
# SOURCE LINE 96
__M_writer(u'\n <script type="text/javascript">\n init_tag_click_function($(\'#')
# SOURCE LINE 98
__M_writer(unicode(elt_id))
__M_writer(u"'), tool_tag_click);\n </script>\n")
return ''
finally:
context.caller_stack._pop_frame()
def render_render_individual_tagging_element(context,user=None,tagged_item=None,elt_context=None,use_toggle_link=True,in_form=False,input_size='15',tag_click_fn='default_tag_click_fn',get_toggle_link_text_fn='default_get_toggle_link_text_fn',editable=True,render_add_tag_button=True):
context.caller_stack._push_frame()
try:
isinstance = context.get('isinstance', UNDEFINED)
unicode = context.get('unicode', UNDEFINED)
int = context.get('int', UNDEFINED)
h = context.get('h', UNDEFINED)
self = context.get('self', UNDEFINED)
dict = context.get('dict', UNDEFINED)
str = context.get('str', UNDEFINED)
trans = context.get('trans', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 122
__M_writer(u'\n')
# SOURCE LINE 124
__M_writer(u' ')
# Useful ids.
tagged_item_id = str( trans.security.encode_id ( tagged_item.id ) )
elt_id = int ( floor ( random()*maxint ) )
# Get list of user's item tags. TODO: implement owner_tags for all taggable objects and use here.
item_tags = [ tag for tag in tagged_item.tags if ( tag.user == user ) ]
# SOURCE LINE 131
__M_writer(u'\n \n')
# SOURCE LINE 134
__M_writer(u' ')
__M_writer(unicode(self.render_tagging_element_html(elt_id=elt_id, tags=item_tags, editable=editable, use_toggle_link=use_toggle_link, input_size=input_size, in_form=in_form, render_add_tag_button=render_add_tag_button)))
__M_writer(u'\n \n')
# SOURCE LINE 137
__M_writer(u' <script type="text/javascript">\n //\n // Set up autocomplete tagger.\n //\n\n //\n // Default function get text to display on the toggle link.\n //\n var default_get_toggle_link_text_fn = function(tags)\n {\n var text = "";\n var num_tags = obj_length(tags);\n if (num_tags != 0)\n {\n text = num_tags + (num_tags != 1 ? " Tags" : " Tag");\n /*\n // Show first N tags; hide the rest.\n var max_to_show = 1;\n \n // Build tag string.\n var tag_strs = new Array();\n var count = 0;\n for (tag_name in tags)\n {\n tag_value = tags[tag_name];\n tag_strs[tag_strs.length] = build_tag_str(tag_name, tag_value);\n if (++count == max_to_show)\n break;\n }\n tag_str = tag_strs.join(", ");\n \n // Finalize text.\n var num_tags_hiding = num_tags - max_to_show;\n text = "Tags: " + tag_str + \n (num_tags_hiding != 0 ? " and " + num_tags_hiding + " more" : "");\n */\n }\n else\n {\n // No tags.\n text = "Add tags";\n }\n return text;\n };\n \n // Default function to handle a tag click.\n var default_tag_click_fn = function(tag_name, tag_value) { };\n \n ')
# SOURCE LINE 185
## Build dict of tag name, values.
tag_names_and_values = dict()
for tag in item_tags:
tag_name = tag.user_tname
tag_value = ""
if tag.value is not None:
tag_value = tag.user_value
## Tag names and values may be string or unicode object.
if isinstance( tag_name, str ):
tag_names_and_values[unicode(tag_name, 'utf-8')] = unicode(tag_value, 'utf-8')
else: ## isInstance( tag_name, unicode ):
tag_names_and_values[tag_name] = tag_value
# SOURCE LINE 198
__M_writer(u'\n var options =\n {\n tags : ')
# SOURCE LINE 201
__M_writer(unicode(h.dumps(tag_names_and_values)))
__M_writer(u',\n editable : ')
# SOURCE LINE 202
__M_writer(unicode(iff( editable, 'true', 'false' )))
__M_writer(u',\n get_toggle_link_text_fn: ')
# SOURCE LINE 203
__M_writer(unicode(get_toggle_link_text_fn))
__M_writer(u',\n tag_click_fn: ')
# SOURCE LINE 204
__M_writer(unicode(tag_click_fn))
__M_writer(u',\n')
# SOURCE LINE 206
__M_writer(u' ajax_autocomplete_tag_url: "')
__M_writer(unicode(h.url_for( controller='/tag', action='tag_autocomplete_data', item_id=tagged_item_id, item_class=tagged_item.__class__.__name__ )))
__M_writer(u'",\n ajax_add_tag_url: "')
# SOURCE LINE 207
__M_writer(unicode(h.url_for( controller='/tag', action='add_tag_async', item_id=tagged_item_id, item_class=tagged_item.__class__.__name__, context=elt_context )))
__M_writer(u'",\n ajax_delete_tag_url: "')
# SOURCE LINE 208
__M_writer(unicode(h.url_for( controller='/tag', action='remove_tag_async', item_id=tagged_item_id, item_class=tagged_item.__class__.__name__, context=elt_context )))
__M_writer(u'",\n delete_tag_img: "')
# SOURCE LINE 209
__M_writer(unicode(h.url_for('/static/images/delete_tag_icon_gray.png')))
__M_writer(u'",\n delete_tag_img_rollover: "')
# SOURCE LINE 210
__M_writer(unicode(h.url_for('/static/images/delete_tag_icon_white.png')))
__M_writer(u'",\n use_toggle_link: ')
# SOURCE LINE 211
__M_writer(unicode(iff( use_toggle_link, 'true', 'false' )))
__M_writer(u"\n };\n \n $('#")
# SOURCE LINE 214
__M_writer(unicode(elt_id))
__M_writer(u"').autocomplete_tagging(options);\n </script>\n \n")
# SOURCE LINE 218
__M_writer(u' <style>\n .tag-area {\n display: ')
# SOURCE LINE 220
__M_writer(unicode(iff( use_toggle_link, "none", "block" )))
__M_writer(u';\n }\n </style>\n\n <noscript>\n <style>\n .tag-area {\n display: block;\n }\n </style>\n </noscript>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_render_community_tagging_element(context,tagged_item=None,elt_context=None,use_toggle_link=False,tag_click_fn='default_tag_click_fn'):
context.caller_stack._push_frame()
try:
int = context.get('int', UNDEFINED)
self = context.get('self', UNDEFINED)
trans = context.get('trans', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 103
__M_writer(u'\n')
# SOURCE LINE 105
__M_writer(u' ')
elt_id = int ( floor ( random()*maxint ) )
community_tags = trans.app.tag_handler.get_community_tags( trans, item=tagged_item, limit=5 )
# SOURCE LINE 108
__M_writer(u'\n ')
# SOURCE LINE 109
__M_writer(unicode(self.render_tagging_element_html(elt_id=elt_id, \
tags=community_tags, \
use_toggle_link=use_toggle_link, \
editable=False, tag_type="community")))
# SOURCE LINE 112
__M_writer(u'\n \n')
# SOURCE LINE 115
__M_writer(u' <script type="text/javascript">\n init_tag_click_function($(\'#')
# SOURCE LINE 116
__M_writer(unicode(elt_id))
__M_writer(u"'), ")
__M_writer(unicode(tag_click_fn))
__M_writer(u');\n </script>\n')
return ''
finally:
context.caller_stack._pop_frame()
|
[
"snehal@lnx-306501.(none)"
] |
snehal@lnx-306501.(none)
|
727b274021936dbec7e423339760a2a165e22cd7
|
a70e4ba37ff2267b23a4d70282577f03086ab98d
|
/setup.py
|
0cd40d03a9499c1619570decacc6446dafc521f1
|
[
"MIT"
] |
permissive
|
i5o/xo-retroscope
|
b731b8511054a2b8144e85a9b545dea8d02d494b
|
0e61b8eb41828356e6a49402f1bdb93c285486f4
|
refs/heads/master
| 2016-09-05T14:58:04.088218 | 2014-01-03T19:39:47 | 2014-01-03T19:39:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 169 |
py
|
#!/usr/bin/env python
try:
from sugar3.activity import bundlebuilder
bundlebuilder.start()
except ImportError:
print "Error: sugar.activity.Bundlebuilder not found."
|
[
"[email protected]"
] | |
01bf3ba6e3fdeb2cfdc75acfd7cae65d5ce05eba
|
15cb0ddd678abe1e1f7a905fab0305079bfc4007
|
/source/vsm-dashboard/vsm_dashboard/dashboards/vsm/monitor-status/tables.py
|
785a28bfaa382c1d8a76a769ccba4105a78ac32b
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
ramkrsna/virtual-storage-manager
|
3563baf9763a0925af77cc13245e0896c20a2ced
|
78125bfb4dd4d78ff96bc3274c8919003769c545
|
refs/heads/master
| 2023-02-18T08:52:56.769486 | 2016-07-01T06:46:53 | 2016-07-01T06:46:53 | 63,155,952 | 0 | 0 |
NOASSERTION
| 2023-02-07T06:07:38 | 2016-07-12T12:27:16 |
Python
|
UTF-8
|
Python
| false | false | 4,595 |
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Intel Corporation, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
from django.utils.datastructures import SortedDict
from django import forms
from django.utils.safestring import mark_safe
from horizon import tables
from horizon.utils import html
from horizon import exceptions
from vsm_dashboard.api import vsm as vsmapi
from .utils import checkbox_transform
STRING_SEPARATOR = "__"
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, id):
# todo update zone info in apiclient CEPH_LOG
try:
_zones = vsmapi.get_zone_list(request,)
except:
exceptions.handle(request,
_('Unable to retrieve sever list. '))
zones = {}
for _zone in _zones:
zones.setdefault(_zone.id, _zone.name)
_server = vsmapi.get_server(request, id)
server = {"id": _server.id,
"name": _server.host,
"primary_public_ip": _server.primary_public_ip,
"secondary_public_ip": _server.secondary_public_ip,
"cluster_ip": _server.cluster_ip,
"zone_id": _server.zone_id,
"zone": "",
"osds": _server.osds,
"type": _server.type,
"status": _server.status}
if "monitor" in _server.type:
server['is_monitor'] = "yes"
else:
server['is_monitor'] = "no"
if _server.zone_id in zones:
server['zone'] = zones[_server.zone_id]
return server
STATUS_DISPLAY_CHOICES = (
("resize", "Resize/Migrate"),
("verify_resize", "Confirm or Revert Resize/Migrate"),
("revert_resize", "Revert Resize/Migrate"),
)
class ListMonitorStatusTable(tables.DataTable):
STATUS_CHOICES = (
("active", True),
("available", True),
("Active", True),
)
#server_id = tables.Column("id", verbose_name=_("ID"))
ordinal = tables.Column("id", verbose_name=_("ordinal"))
name = tables.Column("name", verbose_name=_("Name"))
address = tables.Column("address", verbose_name=_("Address"))
health = tables.Column("health", verbose_name=_("Health"))
details = tables.Column("details", verbose_name=_("Detail"))
skew = tables.Column("skew", verbose_name=_("Skew"))
latency = tables.Column("latency", verbose_name=_("Latency"))
kb_total = tables.Column("mb_total", verbose_name=_("MB Total (disk)"))
kb_used = tables.Column("mb_used", verbose_name=_("MB Used (disk)"))
kb_avail = tables.Column("mb_avail", verbose_name=_("MB Available (disk)"))
percent_avail = tables.Column("percent_avail", verbose_name=_("Percent Available"))
updated_at = tables.Column("updated_at", verbose_name=_("Updated at"), classes=("span2",))
class Meta:
name = "monitor_list"
verbose_name = _("Monitor List")
#status_columns = ['status']
row_class = UpdateRow
multi_select = False
def get_object_id(self, datum):
if hasattr(datum, "id"):
return datum.id
else:
return datum["id"]
def get_object_display(self, datum):
if hasattr(datum, "name"):
return datum.id
else:
return datum["name"]
def empty_value_maker(type, name, value, attrs=None):
def _empty_value_caller(datum):
if type == "text":
widget = forms.TextInput()
elif type == "choice":
widget = forms.ChoiceField().widget
elif type == "checkbox":
widget = forms.CheckboxInput()
data = dict(name=name, value=value)
if name in datum.keys():
data.update(datum[name])
if attrs:
data.update(dict(attrs=attrs))
data = widget.render(**data)
return data
return _empty_value_caller
|
[
"[email protected]"
] | |
75aa59ccac8e96f75f760fc5720ee19d5dbb3fc4
|
26771494974942f4ab18d2cd8247506c344e1d14
|
/133-v2-cloneGraph.py
|
da32838a0365c7696c37221dfa93c8d27f7d203e
|
[] |
no_license
|
wangyunpengbio/LeetCode
|
9f4c6076e067c5e847d662679483f737d40e8ca5
|
cec1fd11fe43177abb2d4236782c0f116e6e8bce
|
refs/heads/master
| 2020-04-29T22:28:25.899420 | 2020-04-03T07:37:26 | 2020-04-03T07:37:26 | 176,448,957 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,326 |
py
|
"""
# Definition for a Node.
class Node:
def __init__(self, val, neighbors):
self.val = val
self.neighbors = neighbors
"""
class Solution:
def cloneGraph(self, node: 'Node') -> 'Node':
# 深度优先遍历 非递归
if node == None :return None
resNode2CopyNode = {}
stack = [node]
copy = Node(node.val,None)
resNode2CopyNode[node] = copy
while stack:
current = stack.pop()
neighbors = current.neighbors
if neighbors == None:continue # 原来图里该节点就没有邻居,直接跳过
copyNode = resNode2CopyNode[current]
if copyNode.neighbors == None:
copyNode.neighbors = []
# 遍历当前节点的全部邻居,把“当前节点的拷贝”的邻居list也拷贝好,遇到新邻居:创建新节点,新节点放到stack中;遇到旧邻居:直接从dic中拿节点
for nei in neighbors:
if nei in resNode2CopyNode:
copyneighbor = resNode2CopyNode[nei]
else:
copyneighbor = Node(nei.val,None)
resNode2CopyNode[nei] = copyneighbor
stack.append(nei)
copyNode.neighbors.append(copyneighbor)
return copy
|
[
"[email protected]"
] | |
0f61efa724ff6f8c229649cf3b50c92d8bd7b5b1
|
7ba22c9826a1574777a08fb634ff15c56de6cb98
|
/syntaxnet/dragnn/tools/evaluator.py
|
ae60e5d4f8beeb1996dd1633c94b9a5e2710e180
|
[] |
no_license
|
dhanya1/full_cyclist
|
02b85b8331f8ca9364169484ab97b32920cbbd14
|
dd12c8d8a3deaaea15041e54f2e459a5041f11c2
|
refs/heads/master
| 2022-10-17T13:36:51.886476 | 2018-07-30T15:46:02 | 2018-07-30T15:46:02 | 142,896,293 | 0 | 1 | null | 2022-10-05T10:11:01 | 2018-07-30T15:46:15 |
Python
|
UTF-8
|
Python
| false | false | 6,917 |
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Runs a DRAGNN model on a given set of CoNLL-formatted sentences.
Sample invocation:
bazel run -c opt <...>:evaluator -- \
--master_spec="/path/to/master-spec" \
--checkpoint_file="/path/to/model/name.checkpoint" \
--input_file="/path/to/input/documents/test.connlu"
"""
import os
import re
import time
from absl import flags
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.client import timeline
from tensorflow.python.platform import gfile
from dragnn.protos import spec_pb2
from dragnn.python import evaluation
from dragnn.python import graph_builder
from dragnn.python import sentence_io
from dragnn.python import spec_builder
from syntaxnet import sentence_pb2
FLAGS = flags.FLAGS
flags.DEFINE_string('master_spec', '',
'Path to text file containing a DRAGNN master spec to run.')
flags.DEFINE_string('resource_dir', '',
'Optional base directory for resources in the master spec.')
flags.DEFINE_bool('complete_master_spec', False, 'Whether the master_spec '
'needs the lexicon and other resources added to it.')
flags.DEFINE_string('checkpoint_file', '', 'Path to trained model checkpoint.')
flags.DEFINE_string('input_file', '',
'File of CoNLL-formatted sentences to read from.')
flags.DEFINE_string('output_file', '',
'File path to write annotated sentences to.')
flags.DEFINE_integer('max_batch_size', 2048, 'Maximum batch size to support.')
flags.DEFINE_string('inference_beam_size', '', 'Comma separated list of '
'component_name=beam_size pairs.')
flags.DEFINE_string('locally_normalize', '', 'Comma separated list of '
'component names to do local normalization on.')
flags.DEFINE_integer('threads', 10, 'Number of threads used for intra- and '
'inter-op parallelism.')
flags.DEFINE_string('timeline_output_file', '', 'Path to save timeline to. '
'If specified, the final iteration of the evaluation loop '
'will capture and save a TensorFlow timeline.')
flags.DEFINE_string('log_file', '', 'File path to write parser eval_bkp results.')
flags.DEFINE_string('language_name', '_', 'Name of language being parsed, '
'for logging.')
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Parse the flags containint lists, using regular expressions.
# This matches and extracts key=value pairs.
component_beam_sizes = re.findall(r'([^=,]+)=(\d+)',
FLAGS.inference_beam_size)
# This matches strings separated by a comma. Does not return any empty
# strings.
components_to_locally_normalize = re.findall(r'[^,]+',
FLAGS.locally_normalize)
# Reads master spec.
master_spec = spec_pb2.MasterSpec()
with gfile.FastGFile(FLAGS.master_spec) as fin:
text_format.Parse(fin.read(), master_spec)
# Rewrite resource locations.
if FLAGS.resource_dir:
for component in master_spec.component:
for resource in component.resource:
for part in resource.part:
part.file_pattern = os.path.join(FLAGS.resource_dir,
part.file_pattern)
if FLAGS.complete_master_spec:
spec_builder.complete_master_spec(master_spec, None, FLAGS.resource_dir)
# Graph building.
tf.logging.info('Building the graph')
g = tf.Graph()
with g.as_default(), tf.device('/device:CPU:0'):
hyperparam_config = spec_pb2.GridPoint()
hyperparam_config.use_moving_average = True
builder = graph_builder.MasterBuilder(master_spec, hyperparam_config)
annotator = builder.add_annotation()
builder.add_saver()
tf.logging.info('Reading documents...')
input_corpus = sentence_io.ConllSentenceReader(FLAGS.input_file).corpus()
session_config = tf.ConfigProto(
log_device_placement=False,
intra_op_parallelism_threads=FLAGS.threads,
inter_op_parallelism_threads=FLAGS.threads)
with tf.Session(graph=g, config=session_config) as sess:
tf.logging.info('Initializing variables...')
sess.run(tf.global_variables_initializer())
tf.logging.info('Loading from checkpoint...')
sess.run('save/restore_all', {'save/Const:0': FLAGS.checkpoint_file})
tf.logging.info('Processing sentences...')
processed = []
start_time = time.time()
run_metadata = tf.RunMetadata()
for start in range(0, len(input_corpus), FLAGS.max_batch_size):
end = min(start + FLAGS.max_batch_size, len(input_corpus))
feed_dict = {annotator['input_batch']: input_corpus[start:end]}
for comp, beam_size in component_beam_sizes:
feed_dict['%s/InferenceBeamSize:0' % comp] = beam_size
for comp in components_to_locally_normalize:
feed_dict['%s/LocallyNormalize:0' % comp] = True
if FLAGS.timeline_output_file and end == len(input_corpus):
serialized_annotations = sess.run(
annotator['annotations'], feed_dict=feed_dict,
options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_metadata)
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
with open(FLAGS.timeline_output_file, 'w') as trace_file:
trace_file.write(trace.generate_chrome_trace_format())
else:
serialized_annotations = sess.run(
annotator['annotations'], feed_dict=feed_dict)
processed.extend(serialized_annotations)
tf.logging.info('Processed %d documents in %.2f seconds.',
len(input_corpus), time.time() - start_time)
pos, uas, las = evaluation.calculate_parse_metrics(input_corpus, processed)
if FLAGS.log_file:
with gfile.GFile(FLAGS.log_file, 'w') as f:
f.write('%s\t%f\t%f\t%f\n' % (FLAGS.language_name, pos, uas, las))
if FLAGS.output_file:
with gfile.GFile(FLAGS.output_file, 'w') as f:
for serialized_sentence in processed:
sentence = sentence_pb2.Sentence()
sentence.ParseFromString(serialized_sentence)
f.write(text_format.MessageToString(sentence) + '\n\n')
if __name__ == '__main__':
tf.app.run()
|
[
"[email protected]"
] | |
728b1bb43e0f48712ecc6c99f97521da32ede5ca
|
c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd
|
/google/cloud/aiplatform/v1beta1/aiplatform-v1beta1-py/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py
|
af7d96df843df62f9e5ae00a3f86d2b6d0c6f798
|
[
"Apache-2.0"
] |
permissive
|
dizcology/googleapis-gen
|
74a72b655fba2565233e5a289cfaea6dc7b91e1a
|
478f36572d7bcf1dc66038d0e76b9b3fa2abae63
|
refs/heads/master
| 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 170,703 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1beta1.services.vizier_service import VizierServiceAsyncClient
from google.cloud.aiplatform_v1beta1.services.vizier_service import VizierServiceClient
from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers
from google.cloud.aiplatform_v1beta1.services.vizier_service import transports
from google.cloud.aiplatform_v1beta1.services.vizier_service.transports.base import _API_CORE_VERSION
from google.cloud.aiplatform_v1beta1.services.vizier_service.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.aiplatform_v1beta1.types import study
from google.cloud.aiplatform_v1beta1.types import study as gca_study
from google.cloud.aiplatform_v1beta1.types import vizier_service
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-api-core >= 1.26.0 is required:
# - Delete all the api-core and auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
requires_api_core_lt_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"),
reason="This test requires google-api-core < 1.26.0",
)
requires_api_core_gte_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"),
reason="This test requires google-api-core >= 1.26.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert VizierServiceClient._get_default_mtls_endpoint(None) is None
assert VizierServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert VizierServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert VizierServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert VizierServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert VizierServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
VizierServiceClient,
VizierServiceAsyncClient,
])
def test_vizier_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'aiplatform.googleapis.com:443'
@pytest.mark.parametrize("client_class", [
VizierServiceClient,
VizierServiceAsyncClient,
])
def test_vizier_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'aiplatform.googleapis.com:443'
def test_vizier_service_client_get_transport_class():
transport = VizierServiceClient.get_transport_class()
available_transports = [
transports.VizierServiceGrpcTransport,
]
assert transport in available_transports
transport = VizierServiceClient.get_transport_class("grpc")
assert transport == transports.VizierServiceGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"),
(VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient))
@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient))
def test_vizier_service_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "true"),
(VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "false"),
(VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient))
@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_vizier_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"),
(VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_vizier_service_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"),
(VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_vizier_service_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_vizier_service_client_client_options_from_dict():
with mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = VizierServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_create_study(transport: str = 'grpc', request_type=vizier_service.CreateStudyRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gca_study.Study(
name='name_value',
display_name='display_name_value',
state=gca_study.Study.State.ACTIVE,
inactive_reason='inactive_reason_value',
)
response = client.create_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.CreateStudyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_study.Study)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.state == gca_study.Study.State.ACTIVE
assert response.inactive_reason == 'inactive_reason_value'
def test_create_study_from_dict():
test_create_study(request_type=dict)
def test_create_study_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_study),
'__call__') as call:
client.create_study()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.CreateStudyRequest()
@pytest.mark.asyncio
async def test_create_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateStudyRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study(
name='name_value',
display_name='display_name_value',
state=gca_study.Study.State.ACTIVE,
inactive_reason='inactive_reason_value',
))
response = await client.create_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.CreateStudyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_study.Study)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.state == gca_study.Study.State.ACTIVE
assert response.inactive_reason == 'inactive_reason_value'
@pytest.mark.asyncio
async def test_create_study_async_from_dict():
await test_create_study_async(request_type=dict)
def test_create_study_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.CreateStudyRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_study),
'__call__') as call:
call.return_value = gca_study.Study()
client.create_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_study_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.CreateStudyRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_study),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study())
await client.create_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_study_flattened():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gca_study.Study()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_study(
parent='parent_value',
study=gca_study.Study(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].study == gca_study.Study(name='name_value')
def test_create_study_flattened_error():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_study(
vizier_service.CreateStudyRequest(),
parent='parent_value',
study=gca_study.Study(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_study_flattened_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gca_study.Study()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_study(
parent='parent_value',
study=gca_study.Study(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].study == gca_study.Study(name='name_value')
@pytest.mark.asyncio
async def test_create_study_flattened_error_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_study(
vizier_service.CreateStudyRequest(),
parent='parent_value',
study=gca_study.Study(name='name_value'),
)
def test_get_study(transport: str = 'grpc', request_type=vizier_service.GetStudyRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Study(
name='name_value',
display_name='display_name_value',
state=study.Study.State.ACTIVE,
inactive_reason='inactive_reason_value',
)
response = client.get_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.GetStudyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Study)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.state == study.Study.State.ACTIVE
assert response.inactive_reason == 'inactive_reason_value'
def test_get_study_from_dict():
test_get_study(request_type=dict)
def test_get_study_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_study),
'__call__') as call:
client.get_study()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.GetStudyRequest()
@pytest.mark.asyncio
async def test_get_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetStudyRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study(
name='name_value',
display_name='display_name_value',
state=study.Study.State.ACTIVE,
inactive_reason='inactive_reason_value',
))
response = await client.get_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.GetStudyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Study)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.state == study.Study.State.ACTIVE
assert response.inactive_reason == 'inactive_reason_value'
@pytest.mark.asyncio
async def test_get_study_async_from_dict():
await test_get_study_async(request_type=dict)
def test_get_study_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.GetStudyRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_study),
'__call__') as call:
call.return_value = study.Study()
client.get_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_study_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.GetStudyRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_study),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study())
await client.get_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_study_flattened():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Study()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_study(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_study_flattened_error():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_study(
vizier_service.GetStudyRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_study_flattened_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Study()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_study(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_study_flattened_error_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_study(
vizier_service.GetStudyRequest(),
name='name_value',
)
def test_list_studies(transport: str = 'grpc', request_type=vizier_service.ListStudiesRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_studies),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = vizier_service.ListStudiesResponse(
next_page_token='next_page_token_value',
)
response = client.list_studies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.ListStudiesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListStudiesPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_studies_from_dict():
test_list_studies(request_type=dict)
def test_list_studies_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_studies),
'__call__') as call:
client.list_studies()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.ListStudiesRequest()
@pytest.mark.asyncio
async def test_list_studies_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListStudiesRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_studies),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse(
next_page_token='next_page_token_value',
))
response = await client.list_studies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.ListStudiesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListStudiesAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_studies_async_from_dict():
await test_list_studies_async(request_type=dict)
def test_list_studies_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.ListStudiesRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_studies),
'__call__') as call:
call.return_value = vizier_service.ListStudiesResponse()
client.list_studies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_studies_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.ListStudiesRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_studies),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse())
await client.list_studies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_studies_flattened():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_studies),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = vizier_service.ListStudiesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_studies(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_studies_flattened_error():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_studies(
vizier_service.ListStudiesRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_studies_flattened_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_studies),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = vizier_service.ListStudiesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_studies(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_studies_flattened_error_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_studies(
vizier_service.ListStudiesRequest(),
parent='parent_value',
)
def test_list_studies_pager():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_studies),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
vizier_service.ListStudiesResponse(
studies=[
study.Study(),
study.Study(),
study.Study(),
],
next_page_token='abc',
),
vizier_service.ListStudiesResponse(
studies=[],
next_page_token='def',
),
vizier_service.ListStudiesResponse(
studies=[
study.Study(),
],
next_page_token='ghi',
),
vizier_service.ListStudiesResponse(
studies=[
study.Study(),
study.Study(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_studies(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, study.Study)
for i in results)
def test_list_studies_pages():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_studies),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
vizier_service.ListStudiesResponse(
studies=[
study.Study(),
study.Study(),
study.Study(),
],
next_page_token='abc',
),
vizier_service.ListStudiesResponse(
studies=[],
next_page_token='def',
),
vizier_service.ListStudiesResponse(
studies=[
study.Study(),
],
next_page_token='ghi',
),
vizier_service.ListStudiesResponse(
studies=[
study.Study(),
study.Study(),
],
),
RuntimeError,
)
pages = list(client.list_studies(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_studies_async_pager():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_studies),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
vizier_service.ListStudiesResponse(
studies=[
study.Study(),
study.Study(),
study.Study(),
],
next_page_token='abc',
),
vizier_service.ListStudiesResponse(
studies=[],
next_page_token='def',
),
vizier_service.ListStudiesResponse(
studies=[
study.Study(),
],
next_page_token='ghi',
),
vizier_service.ListStudiesResponse(
studies=[
study.Study(),
study.Study(),
],
),
RuntimeError,
)
async_pager = await client.list_studies(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, study.Study)
for i in responses)
@pytest.mark.asyncio
async def test_list_studies_async_pages():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_studies),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
vizier_service.ListStudiesResponse(
studies=[
study.Study(),
study.Study(),
study.Study(),
],
next_page_token='abc',
),
vizier_service.ListStudiesResponse(
studies=[],
next_page_token='def',
),
vizier_service.ListStudiesResponse(
studies=[
study.Study(),
],
next_page_token='ghi',
),
vizier_service.ListStudiesResponse(
studies=[
study.Study(),
study.Study(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_studies(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_delete_study(transport: str = 'grpc', request_type=vizier_service.DeleteStudyRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.DeleteStudyRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_study_from_dict():
test_delete_study(request_type=dict)
def test_delete_study_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_study),
'__call__') as call:
client.delete_study()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.DeleteStudyRequest()
@pytest.mark.asyncio
async def test_delete_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteStudyRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.DeleteStudyRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_study_async_from_dict():
await test_delete_study_async(request_type=dict)
def test_delete_study_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.DeleteStudyRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_study),
'__call__') as call:
call.return_value = None
client.delete_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_study_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.DeleteStudyRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_study),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_study_flattened():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_study(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_study_flattened_error():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_study(
vizier_service.DeleteStudyRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_study_flattened_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_study(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_study_flattened_error_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_study(
vizier_service.DeleteStudyRequest(),
name='name_value',
)
def test_lookup_study(transport: str = 'grpc', request_type=vizier_service.LookupStudyRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.lookup_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Study(
name='name_value',
display_name='display_name_value',
state=study.Study.State.ACTIVE,
inactive_reason='inactive_reason_value',
)
response = client.lookup_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.LookupStudyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Study)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.state == study.Study.State.ACTIVE
assert response.inactive_reason == 'inactive_reason_value'
def test_lookup_study_from_dict():
test_lookup_study(request_type=dict)
def test_lookup_study_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.lookup_study),
'__call__') as call:
client.lookup_study()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.LookupStudyRequest()
@pytest.mark.asyncio
async def test_lookup_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.LookupStudyRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.lookup_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study(
name='name_value',
display_name='display_name_value',
state=study.Study.State.ACTIVE,
inactive_reason='inactive_reason_value',
))
response = await client.lookup_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.LookupStudyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Study)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.state == study.Study.State.ACTIVE
assert response.inactive_reason == 'inactive_reason_value'
@pytest.mark.asyncio
async def test_lookup_study_async_from_dict():
await test_lookup_study_async(request_type=dict)
def test_lookup_study_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.LookupStudyRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.lookup_study),
'__call__') as call:
call.return_value = study.Study()
client.lookup_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_lookup_study_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.LookupStudyRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.lookup_study),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study())
await client.lookup_study(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_lookup_study_flattened():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.lookup_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Study()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.lookup_study(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_lookup_study_flattened_error():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.lookup_study(
vizier_service.LookupStudyRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_lookup_study_flattened_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.lookup_study),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Study()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.lookup_study(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_lookup_study_flattened_error_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.lookup_study(
vizier_service.LookupStudyRequest(),
parent='parent_value',
)
def test_suggest_trials(transport: str = 'grpc', request_type=vizier_service.SuggestTrialsRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_trials),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.suggest_trials(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.SuggestTrialsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_suggest_trials_from_dict():
test_suggest_trials(request_type=dict)
def test_suggest_trials_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_trials),
'__call__') as call:
client.suggest_trials()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.SuggestTrialsRequest()
@pytest.mark.asyncio
async def test_suggest_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.SuggestTrialsRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_trials),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.suggest_trials(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.SuggestTrialsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_suggest_trials_async_from_dict():
await test_suggest_trials_async(request_type=dict)
def test_suggest_trials_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.SuggestTrialsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_trials),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.suggest_trials(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_suggest_trials_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.SuggestTrialsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.suggest_trials),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.suggest_trials(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_trial(transport: str = 'grpc', request_type=vizier_service.CreateTrialRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Trial(
name='name_value',
id='id_value',
state=study.Trial.State.REQUESTED,
client_id='client_id_value',
infeasible_reason='infeasible_reason_value',
custom_job='custom_job_value',
)
response = client.create_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.CreateTrialRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Trial)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.state == study.Trial.State.REQUESTED
assert response.client_id == 'client_id_value'
assert response.infeasible_reason == 'infeasible_reason_value'
assert response.custom_job == 'custom_job_value'
def test_create_trial_from_dict():
test_create_trial(request_type=dict)
def test_create_trial_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_trial),
'__call__') as call:
client.create_trial()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.CreateTrialRequest()
@pytest.mark.asyncio
async def test_create_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateTrialRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial(
name='name_value',
id='id_value',
state=study.Trial.State.REQUESTED,
client_id='client_id_value',
infeasible_reason='infeasible_reason_value',
custom_job='custom_job_value',
))
response = await client.create_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.CreateTrialRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Trial)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.state == study.Trial.State.REQUESTED
assert response.client_id == 'client_id_value'
assert response.infeasible_reason == 'infeasible_reason_value'
assert response.custom_job == 'custom_job_value'
@pytest.mark.asyncio
async def test_create_trial_async_from_dict():
await test_create_trial_async(request_type=dict)
def test_create_trial_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.CreateTrialRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_trial),
'__call__') as call:
call.return_value = study.Trial()
client.create_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_trial_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.CreateTrialRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_trial),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial())
await client.create_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_trial_flattened():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Trial()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_trial(
parent='parent_value',
trial=study.Trial(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].trial == study.Trial(name='name_value')
def test_create_trial_flattened_error():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_trial(
vizier_service.CreateTrialRequest(),
parent='parent_value',
trial=study.Trial(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_trial_flattened_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Trial()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_trial(
parent='parent_value',
trial=study.Trial(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].trial == study.Trial(name='name_value')
@pytest.mark.asyncio
async def test_create_trial_flattened_error_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_trial(
vizier_service.CreateTrialRequest(),
parent='parent_value',
trial=study.Trial(name='name_value'),
)
def test_get_trial(transport: str = 'grpc', request_type=vizier_service.GetTrialRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Trial(
name='name_value',
id='id_value',
state=study.Trial.State.REQUESTED,
client_id='client_id_value',
infeasible_reason='infeasible_reason_value',
custom_job='custom_job_value',
)
response = client.get_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.GetTrialRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Trial)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.state == study.Trial.State.REQUESTED
assert response.client_id == 'client_id_value'
assert response.infeasible_reason == 'infeasible_reason_value'
assert response.custom_job == 'custom_job_value'
def test_get_trial_from_dict():
test_get_trial(request_type=dict)
def test_get_trial_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_trial),
'__call__') as call:
client.get_trial()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.GetTrialRequest()
@pytest.mark.asyncio
async def test_get_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetTrialRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial(
name='name_value',
id='id_value',
state=study.Trial.State.REQUESTED,
client_id='client_id_value',
infeasible_reason='infeasible_reason_value',
custom_job='custom_job_value',
))
response = await client.get_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.GetTrialRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Trial)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.state == study.Trial.State.REQUESTED
assert response.client_id == 'client_id_value'
assert response.infeasible_reason == 'infeasible_reason_value'
assert response.custom_job == 'custom_job_value'
@pytest.mark.asyncio
async def test_get_trial_async_from_dict():
await test_get_trial_async(request_type=dict)
def test_get_trial_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.GetTrialRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_trial),
'__call__') as call:
call.return_value = study.Trial()
client.get_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_trial_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.GetTrialRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_trial),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial())
await client.get_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_trial_flattened():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Trial()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_trial(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_trial_flattened_error():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_trial(
vizier_service.GetTrialRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_trial_flattened_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Trial()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_trial(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_trial_flattened_error_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_trial(
vizier_service.GetTrialRequest(),
name='name_value',
)
def test_list_trials(transport: str = 'grpc', request_type=vizier_service.ListTrialsRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_trials),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = vizier_service.ListTrialsResponse(
next_page_token='next_page_token_value',
)
response = client.list_trials(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.ListTrialsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTrialsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_trials_from_dict():
test_list_trials(request_type=dict)
def test_list_trials_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_trials),
'__call__') as call:
client.list_trials()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.ListTrialsRequest()
@pytest.mark.asyncio
async def test_list_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListTrialsRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_trials),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_trials(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.ListTrialsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTrialsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_trials_async_from_dict():
await test_list_trials_async(request_type=dict)
def test_list_trials_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.ListTrialsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_trials),
'__call__') as call:
call.return_value = vizier_service.ListTrialsResponse()
client.list_trials(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_trials_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.ListTrialsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_trials),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse())
await client.list_trials(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_trials_flattened():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_trials),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = vizier_service.ListTrialsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_trials(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_trials_flattened_error():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_trials(
vizier_service.ListTrialsRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_trials_flattened_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_trials),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = vizier_service.ListTrialsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_trials(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_trials_flattened_error_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_trials(
vizier_service.ListTrialsRequest(),
parent='parent_value',
)
def test_list_trials_pager():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_trials),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
vizier_service.ListTrialsResponse(
trials=[
study.Trial(),
study.Trial(),
study.Trial(),
],
next_page_token='abc',
),
vizier_service.ListTrialsResponse(
trials=[],
next_page_token='def',
),
vizier_service.ListTrialsResponse(
trials=[
study.Trial(),
],
next_page_token='ghi',
),
vizier_service.ListTrialsResponse(
trials=[
study.Trial(),
study.Trial(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_trials(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, study.Trial)
for i in results)
def test_list_trials_pages():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_trials),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
vizier_service.ListTrialsResponse(
trials=[
study.Trial(),
study.Trial(),
study.Trial(),
],
next_page_token='abc',
),
vizier_service.ListTrialsResponse(
trials=[],
next_page_token='def',
),
vizier_service.ListTrialsResponse(
trials=[
study.Trial(),
],
next_page_token='ghi',
),
vizier_service.ListTrialsResponse(
trials=[
study.Trial(),
study.Trial(),
],
),
RuntimeError,
)
pages = list(client.list_trials(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_trials_async_pager():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_trials),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
vizier_service.ListTrialsResponse(
trials=[
study.Trial(),
study.Trial(),
study.Trial(),
],
next_page_token='abc',
),
vizier_service.ListTrialsResponse(
trials=[],
next_page_token='def',
),
vizier_service.ListTrialsResponse(
trials=[
study.Trial(),
],
next_page_token='ghi',
),
vizier_service.ListTrialsResponse(
trials=[
study.Trial(),
study.Trial(),
],
),
RuntimeError,
)
async_pager = await client.list_trials(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, study.Trial)
for i in responses)
@pytest.mark.asyncio
async def test_list_trials_async_pages():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_trials),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
vizier_service.ListTrialsResponse(
trials=[
study.Trial(),
study.Trial(),
study.Trial(),
],
next_page_token='abc',
),
vizier_service.ListTrialsResponse(
trials=[],
next_page_token='def',
),
vizier_service.ListTrialsResponse(
trials=[
study.Trial(),
],
next_page_token='ghi',
),
vizier_service.ListTrialsResponse(
trials=[
study.Trial(),
study.Trial(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_trials(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_add_trial_measurement(transport: str = 'grpc', request_type=vizier_service.AddTrialMeasurementRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_trial_measurement),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Trial(
name='name_value',
id='id_value',
state=study.Trial.State.REQUESTED,
client_id='client_id_value',
infeasible_reason='infeasible_reason_value',
custom_job='custom_job_value',
)
response = client.add_trial_measurement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.AddTrialMeasurementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Trial)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.state == study.Trial.State.REQUESTED
assert response.client_id == 'client_id_value'
assert response.infeasible_reason == 'infeasible_reason_value'
assert response.custom_job == 'custom_job_value'
def test_add_trial_measurement_from_dict():
test_add_trial_measurement(request_type=dict)
def test_add_trial_measurement_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_trial_measurement),
'__call__') as call:
client.add_trial_measurement()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.AddTrialMeasurementRequest()
@pytest.mark.asyncio
async def test_add_trial_measurement_async(transport: str = 'grpc_asyncio', request_type=vizier_service.AddTrialMeasurementRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_trial_measurement),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial(
name='name_value',
id='id_value',
state=study.Trial.State.REQUESTED,
client_id='client_id_value',
infeasible_reason='infeasible_reason_value',
custom_job='custom_job_value',
))
response = await client.add_trial_measurement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.AddTrialMeasurementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Trial)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.state == study.Trial.State.REQUESTED
assert response.client_id == 'client_id_value'
assert response.infeasible_reason == 'infeasible_reason_value'
assert response.custom_job == 'custom_job_value'
@pytest.mark.asyncio
async def test_add_trial_measurement_async_from_dict():
await test_add_trial_measurement_async(request_type=dict)
def test_add_trial_measurement_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.AddTrialMeasurementRequest()
request.trial_name = 'trial_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_trial_measurement),
'__call__') as call:
call.return_value = study.Trial()
client.add_trial_measurement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'trial_name=trial_name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_add_trial_measurement_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.AddTrialMeasurementRequest()
request.trial_name = 'trial_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.add_trial_measurement),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial())
await client.add_trial_measurement(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'trial_name=trial_name/value',
) in kw['metadata']
def test_complete_trial(transport: str = 'grpc', request_type=vizier_service.CompleteTrialRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.complete_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Trial(
name='name_value',
id='id_value',
state=study.Trial.State.REQUESTED,
client_id='client_id_value',
infeasible_reason='infeasible_reason_value',
custom_job='custom_job_value',
)
response = client.complete_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.CompleteTrialRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Trial)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.state == study.Trial.State.REQUESTED
assert response.client_id == 'client_id_value'
assert response.infeasible_reason == 'infeasible_reason_value'
assert response.custom_job == 'custom_job_value'
def test_complete_trial_from_dict():
test_complete_trial(request_type=dict)
def test_complete_trial_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.complete_trial),
'__call__') as call:
client.complete_trial()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.CompleteTrialRequest()
@pytest.mark.asyncio
async def test_complete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CompleteTrialRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.complete_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial(
name='name_value',
id='id_value',
state=study.Trial.State.REQUESTED,
client_id='client_id_value',
infeasible_reason='infeasible_reason_value',
custom_job='custom_job_value',
))
response = await client.complete_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.CompleteTrialRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Trial)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.state == study.Trial.State.REQUESTED
assert response.client_id == 'client_id_value'
assert response.infeasible_reason == 'infeasible_reason_value'
assert response.custom_job == 'custom_job_value'
@pytest.mark.asyncio
async def test_complete_trial_async_from_dict():
await test_complete_trial_async(request_type=dict)
def test_complete_trial_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.CompleteTrialRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.complete_trial),
'__call__') as call:
call.return_value = study.Trial()
client.complete_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_complete_trial_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.CompleteTrialRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.complete_trial),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial())
await client.complete_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_trial(transport: str = 'grpc', request_type=vizier_service.DeleteTrialRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.DeleteTrialRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_trial_from_dict():
test_delete_trial(request_type=dict)
def test_delete_trial_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_trial),
'__call__') as call:
client.delete_trial()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.DeleteTrialRequest()
@pytest.mark.asyncio
async def test_delete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteTrialRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.DeleteTrialRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_trial_async_from_dict():
await test_delete_trial_async(request_type=dict)
def test_delete_trial_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.DeleteTrialRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_trial),
'__call__') as call:
call.return_value = None
client.delete_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_trial_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.DeleteTrialRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_trial),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_trial_flattened():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_trial(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_trial_flattened_error():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_trial(
vizier_service.DeleteTrialRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_trial_flattened_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_trial(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_trial_flattened_error_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_trial(
vizier_service.DeleteTrialRequest(),
name='name_value',
)
def test_check_trial_early_stopping_state(transport: str = 'grpc', request_type=vizier_service.CheckTrialEarlyStoppingStateRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.check_trial_early_stopping_state),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.check_trial_early_stopping_state(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_check_trial_early_stopping_state_from_dict():
test_check_trial_early_stopping_state(request_type=dict)
def test_check_trial_early_stopping_state_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.check_trial_early_stopping_state),
'__call__') as call:
client.check_trial_early_stopping_state()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest()
@pytest.mark.asyncio
async def test_check_trial_early_stopping_state_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CheckTrialEarlyStoppingStateRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.check_trial_early_stopping_state),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.check_trial_early_stopping_state(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_check_trial_early_stopping_state_async_from_dict():
await test_check_trial_early_stopping_state_async(request_type=dict)
def test_check_trial_early_stopping_state_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.CheckTrialEarlyStoppingStateRequest()
request.trial_name = 'trial_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.check_trial_early_stopping_state),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.check_trial_early_stopping_state(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'trial_name=trial_name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_check_trial_early_stopping_state_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.CheckTrialEarlyStoppingStateRequest()
request.trial_name = 'trial_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.check_trial_early_stopping_state),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.check_trial_early_stopping_state(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'trial_name=trial_name/value',
) in kw['metadata']
def test_stop_trial(transport: str = 'grpc', request_type=vizier_service.StopTrialRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = study.Trial(
name='name_value',
id='id_value',
state=study.Trial.State.REQUESTED,
client_id='client_id_value',
infeasible_reason='infeasible_reason_value',
custom_job='custom_job_value',
)
response = client.stop_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.StopTrialRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Trial)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.state == study.Trial.State.REQUESTED
assert response.client_id == 'client_id_value'
assert response.infeasible_reason == 'infeasible_reason_value'
assert response.custom_job == 'custom_job_value'
def test_stop_trial_from_dict():
test_stop_trial(request_type=dict)
def test_stop_trial_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_trial),
'__call__') as call:
client.stop_trial()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.StopTrialRequest()
@pytest.mark.asyncio
async def test_stop_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.StopTrialRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_trial),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial(
name='name_value',
id='id_value',
state=study.Trial.State.REQUESTED,
client_id='client_id_value',
infeasible_reason='infeasible_reason_value',
custom_job='custom_job_value',
))
response = await client.stop_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.StopTrialRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, study.Trial)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.state == study.Trial.State.REQUESTED
assert response.client_id == 'client_id_value'
assert response.infeasible_reason == 'infeasible_reason_value'
assert response.custom_job == 'custom_job_value'
@pytest.mark.asyncio
async def test_stop_trial_async_from_dict():
await test_stop_trial_async(request_type=dict)
def test_stop_trial_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.StopTrialRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_trial),
'__call__') as call:
call.return_value = study.Trial()
client.stop_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_stop_trial_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.StopTrialRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_trial),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial())
await client.stop_trial(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_list_optimal_trials(transport: str = 'grpc', request_type=vizier_service.ListOptimalTrialsRequest):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_optimal_trials),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = vizier_service.ListOptimalTrialsResponse(
)
response = client.list_optimal_trials(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.ListOptimalTrialsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, vizier_service.ListOptimalTrialsResponse)
def test_list_optimal_trials_from_dict():
test_list_optimal_trials(request_type=dict)
def test_list_optimal_trials_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_optimal_trials),
'__call__') as call:
client.list_optimal_trials()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.ListOptimalTrialsRequest()
@pytest.mark.asyncio
async def test_list_optimal_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListOptimalTrialsRequest):
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_optimal_trials),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse(
))
response = await client.list_optimal_trials(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == vizier_service.ListOptimalTrialsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, vizier_service.ListOptimalTrialsResponse)
@pytest.mark.asyncio
async def test_list_optimal_trials_async_from_dict():
await test_list_optimal_trials_async(request_type=dict)
def test_list_optimal_trials_field_headers():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.ListOptimalTrialsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_optimal_trials),
'__call__') as call:
call.return_value = vizier_service.ListOptimalTrialsResponse()
client.list_optimal_trials(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_optimal_trials_field_headers_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = vizier_service.ListOptimalTrialsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_optimal_trials),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse())
await client.list_optimal_trials(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_optimal_trials_flattened():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_optimal_trials),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = vizier_service.ListOptimalTrialsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_optimal_trials(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_optimal_trials_flattened_error():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_optimal_trials(
vizier_service.ListOptimalTrialsRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_optimal_trials_flattened_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_optimal_trials),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = vizier_service.ListOptimalTrialsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_optimal_trials(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_optimal_trials_flattened_error_async():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_optimal_trials(
vizier_service.ListOptimalTrialsRequest(),
parent='parent_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.VizierServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.VizierServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VizierServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.VizierServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VizierServiceClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.VizierServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = VizierServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.VizierServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.VizierServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.VizierServiceGrpcTransport,
transports.VizierServiceGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.VizierServiceGrpcTransport,
)
def test_vizier_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.VizierServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_vizier_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.VizierServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'create_study',
'get_study',
'list_studies',
'delete_study',
'lookup_study',
'suggest_trials',
'create_trial',
'get_trial',
'list_trials',
'add_trial_measurement',
'complete_trial',
'delete_trial',
'check_trial_early_stopping_state',
'stop_trial',
'list_optimal_trials',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
@requires_google_auth_gte_1_25_0
def test_vizier_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VizierServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_vizier_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VizierServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
def test_vizier_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VizierServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_vizier_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
VizierServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_vizier_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
VizierServiceClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.VizierServiceGrpcTransport,
transports.VizierServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_vizier_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.VizierServiceGrpcTransport,
transports.VizierServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_vizier_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.VizierServiceGrpcTransport, grpc_helpers),
(transports.VizierServiceGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_gte_1_26_0
def test_vizier_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.VizierServiceGrpcTransport, grpc_helpers),
(transports.VizierServiceGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_lt_1_26_0
def test_vizier_service_transport_create_channel_old_api_core(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus")
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.VizierServiceGrpcTransport, grpc_helpers),
(transports.VizierServiceGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_lt_1_26_0
def test_vizier_service_transport_create_channel_user_scopes(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=["1", "2"],
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport])
def test_vizier_service_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_vizier_service_host_no_port():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'),
)
assert client.transport._host == 'aiplatform.googleapis.com:443'
def test_vizier_service_host_with_port():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'),
)
assert client.transport._host == 'aiplatform.googleapis.com:8000'
def test_vizier_service_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VizierServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_vizier_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VizierServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport])
def test_vizier_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport])
def test_vizier_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_vizier_service_grpc_lro_client():
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_vizier_service_grpc_lro_async_client():
client = VizierServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc_asyncio',
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsAsyncClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_custom_job_path():
project = "squid"
location = "clam"
custom_job = "whelk"
expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, )
actual = VizierServiceClient.custom_job_path(project, location, custom_job)
assert expected == actual
def test_parse_custom_job_path():
expected = {
"project": "octopus",
"location": "oyster",
"custom_job": "nudibranch",
}
path = VizierServiceClient.custom_job_path(**expected)
# Check that the path construction is reversible.
actual = VizierServiceClient.parse_custom_job_path(path)
assert expected == actual
def test_study_path():
project = "cuttlefish"
location = "mussel"
study = "winkle"
expected = "projects/{project}/locations/{location}/studies/{study}".format(project=project, location=location, study=study, )
actual = VizierServiceClient.study_path(project, location, study)
assert expected == actual
def test_parse_study_path():
expected = {
"project": "nautilus",
"location": "scallop",
"study": "abalone",
}
path = VizierServiceClient.study_path(**expected)
# Check that the path construction is reversible.
actual = VizierServiceClient.parse_study_path(path)
assert expected == actual
def test_trial_path():
project = "squid"
location = "clam"
study = "whelk"
trial = "octopus"
expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, )
actual = VizierServiceClient.trial_path(project, location, study, trial)
assert expected == actual
def test_parse_trial_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"study": "cuttlefish",
"trial": "mussel",
}
path = VizierServiceClient.trial_path(**expected)
# Check that the path construction is reversible.
actual = VizierServiceClient.parse_trial_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = VizierServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = VizierServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = VizierServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder, )
actual = VizierServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = VizierServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = VizierServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization, )
actual = VizierServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = VizierServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = VizierServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project, )
actual = VizierServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = VizierServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = VizierServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = VizierServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = VizierServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = VizierServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep:
client = VizierServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = VizierServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
e6d3fec8c37847d142a76d9494d85199977bdfc9
|
e2423781704811bf0a0ecc07f9cb29d0a044ac48
|
/tensorflow_datasets/core/tfrecords_reader_test.py
|
ca950a0f7a0654dfc139f11a1da23d7102447b4a
|
[
"Apache-2.0"
] |
permissive
|
mbbessa/datasets
|
af2506a8cf5c46c33143d6e0266ba50d8b4c3fcc
|
2a7e8e793197637948ea0e0be4aa02a6aa2f7f55
|
refs/heads/master
| 2021-11-30T22:28:55.825453 | 2021-11-19T20:49:49 | 2021-11-19T20:52:42 | 171,528,015 | 0 | 0 |
Apache-2.0
| 2019-02-19T18:34:26 | 2019-02-19T18:34:26 | null |
UTF-8
|
Python
| false | false | 25,137 |
py
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.tfrecords_reader."""
import functools
import itertools
import os
from unittest import mock
import six
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_datasets import testing
from tensorflow_datasets.core import example_parser
from tensorflow_datasets.core import splits
from tensorflow_datasets.core import tfrecords_reader
from tensorflow_datasets.core import tfrecords_writer
from tensorflow_datasets.core.utils import read_config as read_config_lib
from tensorflow_datasets.core.utils import shard_utils
# Skip the cardinality test for backward compatibility with TF <= 2.1.
_SKIP_CARDINALITY_TEST = not hasattr(tf.data.experimental, 'assert_cardinality')
_SHUFFLE_FILES_ERROR_MESSAGE = ('Dataset is an ordered dataset '
'(\'disable_shuffling=True\'), but examples '
'will not be read in order because '
'`shuffle_files=True`.')
_CYCLE_LENGTH_ERROR_MESSAGE = ('Dataset is an ordered dataset '
'(\'disable_shuffling=True\'), but examples will'
' not be read in order because '
'`ReadConfig.interleave_cycle_length != 1`.')
def _write_tfrecord_from_shard_spec(shard_spec, get):
"""Write tfrecord shard given shard_spec and buckets to read data from.
Args:
shard_spec: _ShardSpec, the spec for shard to write.
get: callable taking the shard index (of bucket) and returning iterator over
its elements.
"""
iterators = []
for instruction in shard_spec.file_instructions:
iterator = get(int(instruction.filename))
skip, take = instruction.skip, instruction.take
stop = skip + take if take > 0 else None
iterators.append(itertools.islice(iterator, skip, stop))
tfrecords_writer._write_examples(shard_spec.path, itertools.chain(*iterators))
class GetDatasetFilesTest(testing.TestCase):
SPLIT_INFOS = {
'train':
splits.SplitInfo(
name='train',
shard_lengths=[3, 2, 3, 2, 3], # 13 examples.
num_bytes=0,
),
}
PATH_PATTERN = 'mnist-train.tfrecord-0000%d-of-00005'
def _get_files(self, instruction):
file_instructions = tfrecords_reader._make_file_instructions_from_absolutes(
name='mnist',
split_infos=self.SPLIT_INFOS,
absolute_instructions=[instruction],
)
return file_instructions
def test_no_skip_no_take(self):
instruction = tfrecords_reader._AbsoluteInstruction('train', None, None)
files = self._get_files(instruction)
self.assertEqual(files, [
shard_utils.FileInstruction(
filename=self.PATH_PATTERN % i, skip=0, take=-1, num_examples=n)
for i, n in enumerate([3, 2, 3, 2, 3])
])
def test_skip(self):
# One file is not taken, one file is partially taken.
instruction = tfrecords_reader._AbsoluteInstruction('train', 4, None)
files = self._get_files(instruction)
self.assertEqual(files, [
shard_utils.FileInstruction(
filename=self.PATH_PATTERN % 1, skip=1, take=-1, num_examples=1),
shard_utils.FileInstruction(
filename=self.PATH_PATTERN % 2, skip=0, take=-1, num_examples=3),
shard_utils.FileInstruction(
filename=self.PATH_PATTERN % 3, skip=0, take=-1, num_examples=2),
shard_utils.FileInstruction(
filename=self.PATH_PATTERN % 4, skip=0, take=-1, num_examples=3),
])
def test_take(self):
# Two files are not taken, one file is partially taken.
instruction = tfrecords_reader._AbsoluteInstruction('train', None, 6)
files = self._get_files(instruction)
self.assertEqual(files, [
shard_utils.FileInstruction(
filename=self.PATH_PATTERN % 0, skip=0, take=-1, num_examples=3),
shard_utils.FileInstruction(
filename=self.PATH_PATTERN % 1, skip=0, take=-1, num_examples=2),
shard_utils.FileInstruction(
filename=self.PATH_PATTERN % 2, skip=0, take=1, num_examples=1),
])
def test_skip_take1(self):
# A single shard with both skip and take.
instruction = tfrecords_reader._AbsoluteInstruction('train', 1, 2)
files = self._get_files(instruction)
self.assertEqual(files, [
shard_utils.FileInstruction(
filename=self.PATH_PATTERN % 0, skip=1, take=1, num_examples=1),
])
def test_skip_take2(self):
# 2 elements in across two shards are taken in middle.
instruction = tfrecords_reader._AbsoluteInstruction('train', 7, 9)
files = self._get_files(instruction)
self.assertEqual(files, [
shard_utils.FileInstruction(
filename=self.PATH_PATTERN % 2, skip=2, take=-1, num_examples=1),
shard_utils.FileInstruction(
filename=self.PATH_PATTERN % 3, skip=0, take=1, num_examples=1),
])
def test_touching_boundaries(self):
# Nothing to read.
instruction = tfrecords_reader._AbsoluteInstruction('train', 0, 0)
files = self._get_files(instruction)
self.assertEqual(files, [])
instruction = tfrecords_reader._AbsoluteInstruction('train', None, 0)
files = self._get_files(instruction)
self.assertEqual(files, [])
instruction = tfrecords_reader._AbsoluteInstruction('train', 3, 3)
files = self._get_files(instruction)
self.assertEqual(files, [])
instruction = tfrecords_reader._AbsoluteInstruction('train', 13, None)
files = self._get_files(instruction)
self.assertEqual(files, [])
def test_missing_shard_lengths(self):
with self.assertRaisesWithPredicateMatch(ValueError, 'Shard empty.'):
split_info = [
splits.SplitInfo(name='train', shard_lengths=[], num_bytes=0),
]
tfrecords_reader.make_file_instructions('mnist', split_info, 'train')
class ReadInstructionTest(testing.TestCase):
def setUp(self):
super(ReadInstructionTest, self).setUp()
self.splits = {
'train':
splits.SplitInfo(name='train', shard_lengths=[200], num_bytes=0),
'test':
splits.SplitInfo(name='train', shard_lengths=[101], num_bytes=0),
'validation':
splits.SplitInfo(name='train', shard_lengths=[30], num_bytes=0),
'dev-train':
splits.SplitInfo(name='train', shard_lengths=[5, 5], num_bytes=0),
}
def check_from_ri(self, ri, expected):
res = ri.to_absolute(self.splits)
expected_result = []
for split_name, from_, to_ in expected:
expected_result.append(
tfrecords_reader._AbsoluteInstruction(split_name, from_, to_))
self.assertEqual(res, expected_result)
return ri
def check_from_spec(self, spec, expected):
ri = tfrecords_reader.ReadInstruction.from_spec(spec)
return self.check_from_ri(ri, expected)
def assertRaises(self, spec, msg, exc_cls=ValueError):
with self.assertRaisesWithPredicateMatch(exc_cls, msg):
ri = tfrecords_reader.ReadInstruction.from_spec(spec)
ri.to_absolute(self.splits)
def test_valid(self):
# Simple split:
ri = self.check_from_spec('train', [('train', None, None)])
self.assertEqual(
str(ri),
"ReadInstruction('train')",
)
self.check_from_spec('test', [('test', None, None)])
# Addition of splits:
self.check_from_spec('train+test', [
('train', None, None),
('test', None, None),
])
# Absolute slicing:
self.check_from_spec('train[0:0]', [('train', None, 0)])
self.check_from_spec('train[:10]', [('train', None, 10)])
self.check_from_spec('train[0:10]', [('train', None, 10)])
self.check_from_spec('train[-10:]', [('train', 190, None)])
self.check_from_spec('train[-100:-50]', [('train', 100, 150)])
self.check_from_spec('train[-10:200]', [('train', 190, None)])
self.check_from_spec('train[10:-10]', [('train', 10, 190)])
self.check_from_spec('train[42:99]', [('train', 42, 99)])
# Percent slicing, closest rounding:
self.check_from_spec('train[:10%]', [('train', None, 20)])
self.check_from_spec('train[90%:]', [('train', 180, None)])
self.check_from_spec('train[-1%:]', [('train', 198, None)])
ri = self.check_from_spec('test[:99%]', [('test', None, 100)])
self.assertEqual(
str(ri), "ReadInstruction('test[:99%]', rounding='closest')")
# No overlap:
self.check_from_spec('test[100%:]', [('test', 101, None)])
# Percent slicing, pct1_dropremainder rounding:
ri = tfrecords_reader.ReadInstruction(
'train', to=20, unit='%', rounding='pct1_dropremainder')
self.check_from_ri(ri, [('train', None, 40)])
# test split has 101 examples.
ri = tfrecords_reader.ReadInstruction(
'test', to=100, unit='%', rounding='pct1_dropremainder')
self.check_from_ri(ri, [('test', None, 100)])
# No overlap using 'pct1_dropremainder' rounding:
ri1 = tfrecords_reader.ReadInstruction(
'test', to=99, unit='%', rounding='pct1_dropremainder')
ri2 = tfrecords_reader.ReadInstruction(
'test', from_=100, unit='%', rounding='pct1_dropremainder')
self.check_from_ri(ri1, [('test', None, 99)])
self.check_from_ri(ri2, [('test', 100, None)])
# Empty:
# Slices resulting in empty datasets are valid with 'closest' rounding:
self.check_from_spec('validation[:1%]', [('validation', None, 0)])
# New integer syntax
self.check_from_spec('train[4_2:9_9]', [('train', 42, 99)])
self.check_from_spec('train[:1_0%]', [('train', None, 20)])
# Supports splits with '-' in name.
ri = self.check_from_spec('dev-train', [('dev-train', None, None)])
def test_add(self):
ri1 = tfrecords_reader.ReadInstruction.from_spec('train[10:20]')
ri2 = tfrecords_reader.ReadInstruction.from_spec('test[10:20]')
ri3 = tfrecords_reader.ReadInstruction.from_spec('train[1:5]')
ri = ri1 + ri2 + ri3
self.assertEqual(
str(ri),
"ReadInstruction('train[10:20]')"
"+ReadInstruction('test[10:20]')"
"+ReadInstruction('train[1:5]')",
)
def test_invalid_rounding(self):
with self.assertRaisesWithPredicateMatch(ValueError, 'Rounding should be'):
tfrecords_reader.ReadInstruction('test', unit='%', rounding='unexisting')
def test_invalid_unit(self):
with self.assertRaisesWithPredicateMatch(ValueError, 'Unit should be'):
tfrecords_reader.ReadInstruction('test', unit='kg', rounding='closest')
def test_invalid_spec(self):
# Invalid format:
self.assertRaises('validation[:250%:2]',
'Unrecognized split format: \'validation[:250%:2]\'')
# Unexisting split:
self.assertRaises('imaginary', "Unknown split 'imaginary'")
# Invalid boundaries abs:
self.assertRaises('validation[:31]', 'incompatible with 30 examples')
# Invalid boundaries %:
self.assertRaises('validation[:250%]',
'percent slice boundaries should be in [-100, 100]')
self.assertRaises('validation[-101%:]',
'percent slice boundaries should be in [-100, 100]')
# pct1_dropremainder with < 100 examples
with self.assertRaisesWithPredicateMatch(
ValueError, 'with less than 100 elements is forbidden'):
ri = tfrecords_reader.ReadInstruction(
'validation', to=99, unit='%', rounding='pct1_dropremainder')
ri.to_absolute(self.splits)
class ReaderTest(testing.TestCase):
def setUp(self):
super(ReaderTest, self).setUp()
with mock.patch.object(example_parser, 'ExampleParser',
testing.DummyParser):
self.reader = tfrecords_reader.Reader(self.tmp_dir, 'some_spec')
self.reader.read = functools.partial(
self.reader.read,
read_config=read_config_lib.ReadConfig(),
shuffle_files=False,
)
def _write_tfrecord(self, split_name, shards_number, records):
path = os.path.join(self.tmp_dir, 'mnist-%s.tfrecord' % split_name)
num_examples = len(records)
with mock.patch.object(
tfrecords_writer, '_get_number_shards', return_value=shards_number):
shard_specs = tfrecords_writer._get_shard_specs(num_examples, 0,
[num_examples], path)
serialized_records = [(key, six.b(rec)) for key, rec in enumerate(records)]
for shard_spec in shard_specs:
_write_tfrecord_from_shard_spec(shard_spec,
lambda unused_i: iter(serialized_records))
return splits.SplitInfo(
name=split_name,
shard_lengths=[int(s.examples_number) for s in shard_specs],
num_bytes=0,
)
def test_nodata_instruction(self):
# Given instruction corresponds to no data.
with self.assertRaisesWithPredicateMatch(ValueError,
'corresponds to no data!'):
train_info = splits.SplitInfo(
name='train',
shard_lengths=[2, 3, 2, 3, 2],
num_bytes=0,
)
self.reader.read(
name='mnist',
instructions='train[0:0]',
split_infos=[train_info],
)
def test_noskip_notake(self):
train_info = self._write_tfrecord('train', 5, 'abcdefghijkl')
ds = self.reader.read(
name='mnist',
instructions='train',
split_infos=[train_info],
)
read_data = list(tfds.as_numpy(ds))
self.assertEqual(read_data, [six.b(l) for l in 'abcdefghijkl'])
if not _SKIP_CARDINALITY_TEST:
# Check that the cardinality is correctly set.
self.assertEqual(
tf.data.experimental.cardinality(ds).numpy(), len(read_data))
def test_overlap(self):
train_info = self._write_tfrecord('train', 5, 'abcdefghijkl')
ds = self.reader.read(
name='mnist',
instructions='train+train[:2]',
split_infos=[train_info],
)
read_data = list(tfds.as_numpy(ds))
self.assertEqual(read_data, [six.b(l) for l in 'abcdefghijklab'])
if not _SKIP_CARDINALITY_TEST:
# Check that the cardinality is correctly set.
self.assertEqual(
tf.data.experimental.cardinality(ds).numpy(), len(read_data))
def test_complex(self):
train_info = self._write_tfrecord('train', 5, 'abcdefghijkl')
test_info = self._write_tfrecord('test', 3, 'mnopqrs')
self.assertEqual(train_info.name, 'train')
self.assertEqual(test_info.name, 'test')
self.assertEqual(train_info.shard_lengths, [2, 3, 2, 3, 2]) # 12 ex.
self.assertEqual(test_info.shard_lengths, [2, 3, 2]) # 7 ex.
split_info = [train_info, test_info]
ds = self.reader.read(
name='mnist',
instructions='train[1:-1]+test[:-50%]',
split_infos=split_info,
)
read_data = list(tfds.as_numpy(ds))
self.assertEqual(read_data, [six.b(l) for l in 'bcdefghijkmno'])
if not _SKIP_CARDINALITY_TEST:
# Check that the cardinality is correctly set.
self.assertEqual(
tf.data.experimental.cardinality(ds).numpy(), len(read_data))
def test_shuffle_files(self):
train_info = self._write_tfrecord('train', 5, 'abcdefghijkl')
ds = self.reader.read(
name='mnist',
instructions='train',
split_infos=[train_info],
shuffle_files=True,
)
shards = [ # The shards of the dataset:
[b'a', b'b'],
[b'c', b'd', b'e'],
[b'f', b'g'],
[b'h', b'i', b'j'],
[b'k', b'l'],
]
# The various orders in which the dataset can be read:
expected_permutations = [
tuple(sum(shard, [])) for shard in itertools.permutations(shards)
]
ds = ds.batch(12).repeat(100)
read_data = set(tuple(e) for e in tfds.as_numpy(ds))
for batch in read_data:
self.assertIn(batch, expected_permutations)
# There are theoritically 5! (=120) different arrangements, but we would
# need too many repeats to be sure to get them.
self.assertGreater(len(set(read_data)), 10)
def test_shuffle_deterministic(self):
split_info = self._write_tfrecord('train', 5, 'abcdefghijkl')
read_config = read_config_lib.ReadConfig(shuffle_seed=123,)
ds = self.reader.read(
name='mnist',
instructions='train',
split_infos=[split_info],
read_config=read_config,
shuffle_files=True,
)
ds_values = list(tfds.as_numpy(ds))
# Check that shuffle=True with a seed provides deterministic results.
self.assertEqual(ds_values, [
b'a', b'b', b'k', b'l', b'h', b'i', b'j', b'c', b'd', b'e', b'f', b'g'
])
def test_4fold(self):
train_info = self._write_tfrecord('train', 5, 'abcdefghijkl')
instructions = [
tfrecords_reader.ReadInstruction('train', from_=k, to=k + 25, unit='%')
for k in range(0, 100, 25)
]
tests = self.reader.read(
name='mnist',
instructions=instructions,
split_infos=[train_info],
)
instructions = [
(tfrecords_reader.ReadInstruction('train', to=k, unit='%') +
tfrecords_reader.ReadInstruction('train', from_=k + 25, unit='%'))
for k in range(0, 100, 25)
]
trains = self.reader.read(
name='mnist',
instructions=instructions,
split_infos=[train_info],
)
read_tests = [list(r) for r in tfds.as_numpy(tests)]
read_trains = [list(r) for r in tfds.as_numpy(trains)]
self.assertEqual(read_tests, [[b'a', b'b', b'c'], [b'd', b'e', b'f'],
[b'g', b'h', b'i'], [b'j', b'k', b'l']])
self.assertEqual(read_trains,
[[b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l'],
[b'a', b'b', b'c', b'g', b'h', b'i', b'j', b'k', b'l'],
[b'a', b'b', b'c', b'd', b'e', b'f', b'j', b'k', b'l'],
[b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i']])
def test_read_files(self):
self._write_tfrecord('train', 4, 'abcdefghijkl')
fname_pattern = 'mnist-train.tfrecord-0000%d-of-00004'
ds = self.reader.read_files(
[
shard_utils.FileInstruction(
filename=fname_pattern % 1, skip=0, take=-1, num_examples=3),
shard_utils.FileInstruction(
filename=fname_pattern % 3, skip=1, take=1, num_examples=1),
],
read_config=read_config_lib.ReadConfig(),
shuffle_files=False,
)
read_data = list(tfds.as_numpy(ds))
self.assertEqual(read_data, [six.b(l) for l in 'defk'])
def test_input_context(self):
split_info = self._write_tfrecord('train', 5, 'abcdefghijkl')
self.assertEqual(split_info.shard_lengths, [2, 3, 2, 3, 2])
def read(num_workers, index):
return list(
tfds.as_numpy(
self.reader.read(
name='mnist',
instructions='train',
split_infos=[split_info],
read_config=read_config_lib.ReadConfig(
input_context=tf.distribute.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=index,
),),
# Workers should read a deterministic subset of the examples,
# even if examples within one worker may be shuffled.
shuffle_files=True,
)))
def _b(bytes_str):
if six.PY2:
return list(bytes_str)
# Convert to List[bytes] (rather than List[int])
return [bytes([b]) for b in bytes_str]
# Read all the data (single pipeline)
self.assertCountEqual(read(num_workers=1, index=0), _b(b'abcdefghijkl'))
# Read part of the data (workers should not overlapp)
self.assertCountEqual(read(num_workers=3, index=0), _b(b'abhij')) # 0, 3
self.assertCountEqual(read(num_workers=3, index=1), _b(b'cdekl')) # 1, 4
self.assertEqual(read(num_workers=3, index=2), _b(b'fg')) # Shards 2
# If num_workers == num_shards, then a single shard is read
self.assertEqual(read(num_workers=5, index=1), _b(b'cde')) # Shard 1
# If num_workers > num_shards, raise error
with self.assertRaisesRegexp(ValueError, 'Cannot shard the pipeline'):
read(num_workers=6, index=0)
def test_shuffle_files_should_be_disabled(self):
self._write_tfrecord('train', 4, 'abcdefghijkl')
fname_pattern = 'mnist-train.tfrecord-0000%d-of-00004'
with self.assertRaisesWithPredicateMatch(ValueError,
_SHUFFLE_FILES_ERROR_MESSAGE):
self.reader.read_files(
[
shard_utils.FileInstruction(
filename=fname_pattern % 1, skip=0, take=-1, num_examples=3),
],
read_config=read_config_lib.ReadConfig(),
shuffle_files=True,
disable_shuffling=True,
)
def test_cycle_length_must_be_one(self):
self._write_tfrecord('train', 4, 'abcdefghijkl')
fname_pattern = 'mnist-train.tfrecord-0000%d-of-00004'
instructions = [
shard_utils.FileInstruction(
filename=fname_pattern % 1, skip=0, take=-1, num_examples=3),
]
# In ordered dataset interleave_cycle_length is set to 1 by default
self.reader.read_files(
instructions,
read_config=read_config_lib.ReadConfig(),
shuffle_files=False,
disable_shuffling=True,
)
with self.assertRaisesWithPredicateMatch(ValueError,
_CYCLE_LENGTH_ERROR_MESSAGE):
self.reader.read_files(
instructions,
read_config=read_config_lib.ReadConfig(interleave_cycle_length=16),
shuffle_files=False,
disable_shuffling=True,
)
def test_ordering_guard(self):
self._write_tfrecord('train', 4, 'abcdefghijkl')
fname_pattern = 'mnist-train.tfrecord-0000%d-of-00004'
instructions = [
shard_utils.FileInstruction(
filename=fname_pattern % 1, skip=0, take=-1, num_examples=3),
]
reported_warnings = []
with mock.patch('absl.logging.warning', reported_warnings.append):
self.reader.read_files(
instructions,
read_config=read_config_lib.ReadConfig(
interleave_cycle_length=16, enable_ordering_guard=False),
shuffle_files=True,
disable_shuffling=True,
)
expected_warning = _SHUFFLE_FILES_ERROR_MESSAGE + '\n' + _CYCLE_LENGTH_ERROR_MESSAGE
self.assertIn(expected_warning, reported_warnings)
@mock.patch(
'tensorflow.data.experimental.assert_cardinality',
wraps=tf.data.experimental.assert_cardinality)
def test_assert_cardinality_is_on_by_default(self, assert_cardinality):
train_info = self._write_tfrecord('train', 5, 'abcdefghijkl')
self.reader.read(
name='mnist', instructions='train', split_infos=[train_info])
assert_cardinality.assert_called_with(12)
@mock.patch('tensorflow.data.experimental.assert_cardinality')
def test_assert_cardinality_can_be_disabled_through_readconfig(
self, assert_cardinality):
train_info = self._write_tfrecord('train', 5, 'abcdefghijkl')
self.reader.read(
name='mnist',
instructions='train',
split_infos=[train_info],
read_config=read_config_lib.ReadConfig(assert_cardinality=False))
assert not assert_cardinality.called
def test_shard_api():
si = tfds.core.SplitInfo(
name='train',
shard_lengths=[10, 20, 13],
num_bytes=0,
)
fi = [
shard_utils.FileInstruction(
filename='ds_name-train.tfrecord-00000-of-00003',
skip=0,
take=-1,
num_examples=10,
),
shard_utils.FileInstruction(
filename='ds_name-train.tfrecord-00001-of-00003',
skip=0,
take=-1,
num_examples=20,
),
shard_utils.FileInstruction(
filename='ds_name-train.tfrecord-00002-of-00003',
skip=0,
take=-1,
num_examples=13,
),
]
sd = splits.SplitDict([si], dataset_name='ds_name')
assert sd['train[0shard]'].file_instructions == [fi[0]]
assert sd['train[1shard]'].file_instructions == [fi[1]]
assert sd['train[-1shard]'].file_instructions == [fi[-1]]
assert sd['train[-2shard]'].file_instructions == [fi[-2]]
assert sd['train[:2shard]'].file_instructions == fi[:2]
assert sd['train[1shard:]'].file_instructions == fi[1:]
assert sd['train[-1shard:]'].file_instructions == fi[-1:]
assert sd['train[1:-1shard]'].file_instructions == fi[1:-1]
if __name__ == '__main__':
testing.test_main()
|
[
"[email protected]"
] | |
787e0215434095aa0b3afec689844c5bea7ff1fc
|
82c73b70c2002f647bdc254125f0bdb18f0b79d2
|
/openstack_dashboard/dashboards/admin/volumes/urls.py
|
4a6a4c23b4007ad736bdeacbe29791aa6810287d
|
[
"Apache-2.0"
] |
permissive
|
xuweiliang/Codelibrary
|
cfb5755ced54c65cacdb3e35ab2b98385f8d5f8e
|
54e45b2daa205132c05b0ff5a2c3db7fca2853a7
|
refs/heads/master
| 2021-05-04T00:31:42.025238 | 2018-03-20T07:05:20 | 2018-03-20T07:05:20 | 71,852,078 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,999 |
py
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import include # noqa
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.volumes.backups \
import urls as backups_urls
from openstack_dashboard.dashboards.admin.volumes.snapshots \
import urls as snapshot_urls
from openstack_dashboard.dashboards.admin.volumes import views
from openstack_dashboard.dashboards.admin.volumes.volumes \
import urls as volume_urls
from openstack_dashboard.dashboards.admin.volumes.volume_types \
import urls as volume_types_urls
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^\?tab=volumes_and_snapshots__snapshots_tab$',
views.IndexView.as_view(), name='snapshots_tab'),
url(r'^\?tab=volumes_and_snapshots__volumes_tab$',
views.IndexView.as_view(), name='volumes_tab'),
url(r'^\?tab=volumes_and_snapshots__backups_tab$',
views.IndexView.as_view(), name='backups_tab'),
url(r'', include(volume_urls, namespace='volumes')),
url(r'backups/', include(backups_urls, namespace='backups')),
url(r'snapshots/', include(snapshot_urls, namespace='snapshots')),
url(r'^\?tab=volumes_group_tabs__volume_types_tab$',
views.IndexView.as_view(),
name='volume_types_tab'),
url(r'volume_types/',
include(volume_types_urls, namespace='volume_types')),
)
|
[
"[email protected]"
] | |
a88e8898471f969bb19e173853e19c315c95f494
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/perm_20200622013142.py
|
721353b76d5da3353229c7518531f035e0fa2219
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
def sequence(n,k):
newArr = []
for i in range(1,n+1):
newArr.append(i)
print(newArr)
sequence(3,3)
|
[
"[email protected]"
] | |
26eb47fa8e30d61647f5fb7f6b7ecabb24d49630
|
fd463a0526c3a23916ce4fe32307e80472a0f25c
|
/mesa.py
|
337f0a4d9b34d2b38324a43bee8defa7f9804e87
|
[
"MIT"
] |
permissive
|
aiedward/mesa
|
5ae516380dcd04c5416227d2074ec1dd26c5cde6
|
ce685958a506fa8b877ecbf5c97d0d28a4f102c6
|
refs/heads/master
| 2022-11-08T01:17:14.379005 | 2020-06-26T17:31:45 | 2020-06-26T17:31:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,453 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 8 02:27:20 2020
@author: ZhiningLiu1998
mailto: [email protected] / [email protected]
"""
import os
import torch
import pandas as pd
import numpy as np
from gym import spaces
from sac_src.sac import SAC
from sac_src.replay_memory import ReplayMemory
from environment import EnsembleTrainingEnv
from utils import *
class Mesa(EnsembleTrainingEnv):
"""The ensemble imbalanced learning framework MESA.
Parameters
----------
args : arguments
See arguments.py for more information.
base_estimator : scikit-learn classifier object
The base estimator used to build ensemble classifiers.
NO need to support sample weighting.
Built-in `fit()`, `predict()`, `predict_proba()` methods are required.
n_estimators : int, optional (default=10)
The number of base estimators used to form an MESA ensemble.
Attributes
----------
args : arguments
rater : object (Rater)
Rater for evaluate classifiers performance on class imabalanced data.
See arguments.py for more information.
base_estimator_ : object (scikit-learn classifier)
The base estimator from which the ensemble is grown.
estimators_ : list of classifiers
The collection of fitted sub-estimators.
n_estimators : int
The number of base estimators used to form an MESA ensemble.
meta_sampler : object (SAC)
The meta-sampler in MESA.
env : object (EnsembleTrainingEnv)
The ensemble training environment in MESA.
memory : object (ReplayMemory)
The replay memory for Soft Actor-Critic training.
"""
def __init__(self, args, base_estimator, n_estimators=10):
super(Mesa, self).__init__(args, base_estimator)
# state-size = 2 x num_bins
state_size = int(args.num_bins*2)
action_space = spaces.Box(low=0.0, high=1.0, shape=[1], dtype=np.float32)
self.args = args
self.n_estimators = n_estimators
self.base_estimator_ = base_estimator
self.meta_sampler = SAC(state_size, action_space, self.args)
self.env = EnsembleTrainingEnv(args, base_estimator)
self.memory = ReplayMemory(self.args.replay_size)
def meta_fit(self, X_train, y_train, X_valid, y_valid, X_test=None, y_test=None):
"""Meta-training process of MESA.
Parameters
----------
X_train : array-like of shape = [n_training_samples, n_features]
The training data instances.
y_train : array-like of shape = [n_training_samples]
Labels for X_train.
X_valid : array-like of shape = [n_validation_samples, n_features]
The validation data instances.
y_valid : array-like of shape = [n_validation_samples]
Labels for X_valid.
X_test : array-like of shape = [n_training_samples, n_features], optional (default=None)
The test data instances.
y_train : array-like of shape = [n_training_samples], optional (default=None)
Labels for X_test.
Returns
----------
self : object (Mesa)
"""
# initialize replay memory and environment
self.env.load_data(X_train, y_train, X_valid, y_valid, X_test, y_test, train_ratio=self.args.train_ratio)
self.memory = memory_init_fulfill(self.args, ReplayMemory(self.args.replay_size))
self.scores = []
total_steps = self.args.update_steps + self.args.start_steps
num_steps, num_updates, num_episodes = 0, 0, 0
# start meta-training
while num_steps < total_steps:
self.env.init()
state = self.env.get_state()
done = False
# for each episode
while not done:
num_steps += 1
# take an action
if num_steps >= self.args.start_steps:
action, by = self.meta_sampler.select_action(state), 'mesa'
else:
action, by = self.meta_sampler.action_space.sample(), 'rand'
# store transition
next_state, reward, done, info = self.env.step(action[0])
reward = reward * self.args.reward_coefficient
self.memory.push(state, action, reward, next_state, float(done))
# update meta-sampler parameters
if num_steps > self.args.start_steps:
for i in range(self.args.updates_per_step):
_, _, _, _, _ = self.meta_sampler.update_parameters(
self.memory, self.args.batch_size, num_updates)
num_updates += self.args.updates_per_step
# print log to stdout
if self.args.meta_verbose is 'full':
print ('Epi.{:<4d} updates{:<4d}| {} | {} by {}'.format(num_episodes, num_updates, info, action[0], by))
if done:
num_episodes += 1
self.record_scores()
# record print mean score of latest args.meta_verbose_mean_episodes to stdout
self.verbose_mean_scores(num_episodes, num_updates, by)
return self
def record_scores(self):
"""Record the training/validation/test performance scores."""
train_score = self.env.rater.score(self.env.y_train, self.env.y_pred_train_buffer)
valid_score = self.env.rater.score(self.env.y_valid, self.env.y_pred_valid_buffer)
test_score = self.env.rater.score(self.env.y_test, self.env.y_pred_test_buffer) if self.env.flag_use_test_set else 'NULL'
self.scores.append([train_score, valid_score, test_score] if self.env.flag_use_test_set else [train_score, valid_score])
return
def verbose_mean_scores(self, num_episodes, num_updates, by):
"""Print mean score of latest n episodes to stdout.
n = args.meta_verbose_mean_episodes
Parameters
----------
num_episodes : int
The number of finished meta-training episodes.
num_updates : int
The number of finished meta-sampler updates.
by : {'rand', 'mesa'}, string
The way of selecting actions in the current episode.
"""
if self.args.meta_verbose is 'full' or (self.args.meta_verbose != 0 and num_episodes % self.args.meta_verbose == 0):
view_bound = max(-self.args.meta_verbose_mean_episodes, -len(self.scores))
recent_scores_mean = np.array(self.scores)[view_bound:].mean(axis=0)
print ('Epi.{:<4d} updates {:<4d} |last-{}-mean-{}| train {:.3f} | valid {:.3f} | test {:.3f} | by {}'.format(
num_episodes, num_updates, self.args.meta_verbose_mean_episodes, self.args.metric,
recent_scores_mean[0], recent_scores_mean[1], recent_scores_mean[2], by))
return
def fit(self, X, y, X_valid, y_valid, n_estimators=None, verbose=False):
"""Build a MESA ensemble from training set (X, y) and validation set (X_valid, y_valid).
Parameters
----------
X : array-like of shape = [n_training_samples, n_features]
The training data instances.
y : array-like of shape = [n_training_samples]
Labels for X.
X_valid : array-like of shape = [n_validation_samples, n_features]
The validation data instances.
y_valid : array-like of shape = [n_validation_samples]
Labels for X_valid.
n_estimators : int, optional (default=self.n_estimators)
The number of base estimators used to form an MESA ensemble.
verbose: bool, optional (default=False)
Whether to print progress messages to stdout.
Returns
----------
self : object (Mesa)
"""
n_estimators = self.n_estimators if n_estimators is None else n_estimators
self.load_data(X, y, X_valid, y_valid)
self.init()
self.actions_record = []
for i in range(n_estimators-1):
state = self.get_state()
action = self.meta_sampler.select_action(state)
self.actions_record.append(action[0])
_, _, _, info = self.step(action[0], verbose)
if verbose:
print ('{:<12s} | action: {} {}'.format('Mesa', action, info))
return self
def save_meta_sampler(self, directory='save_model', suffix='meta_sampler'):
"""Save trained meta-sampler to files.
Parameters
----------
directory : string, optional (default='save_model')
The directory to save files.
Create the directory if it does not exist.
suffix : string, optional (default='meta_sampler')
The actor network will be saved in {directory}/actor_{suffix}.
The critic network will be saved in {directory}/critic_{suffix}.
"""
directory_path = f'{directory}/'
if not os.path.exists(directory_path):
os.makedirs(directory_path)
actor_path = f'{directory_path}actor_{suffix}'
critic_path = f'{directory_path}critic_{suffix}'
self.meta_sampler.save_model(actor_path, critic_path)
return
def load_meta_sampler(self, directory='save_model', suffix='meta_sampler'):
"""Load trained meta-sampler from files.
Parameters
----------
directory : string, optional (default='save_model')
The directory to load files.
suffix : string, optional (default='meta_sampler')
The actor network will be loaded from {directory}/actor_{suffix}.
The critic network will be loaded from {directory}/critic_{suffix}.
"""
directory_path = f'{directory}/'
actor_path = f'{directory_path}actor_{suffix}'
critic_path = f'{directory_path}critic_{suffix}'
self.meta_sampler.load_model(actor_path, critic_path)
return self
|
[
"[email protected]"
] | |
dfe31c4e723e17ac685d9e2a451abd83f0774db5
|
b0cdbad299f6174bfdb0fba173dbcf3889b82209
|
/Modules/sys/38_sys.py
|
09e80a1a040cbbf4490bac86fe593db7821a7af8
|
[] |
no_license
|
deesaw/PythonD-06
|
a33e676f1e0cfc13b4ea645c8b60547b198239ac
|
3c6f065d7be2e3e10cafb6cef79d6cae9d55a7fa
|
refs/heads/master
| 2023-03-18T08:24:42.030935 | 2021-03-02T14:15:09 | 2021-03-02T14:15:09 | 343,797,605 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 373 |
py
|
import sys
print("Length:",len(sys.argv))
if (len(sys.argv)>2):
sum=0
for i in sys.argv[1:]:
sum=sum+int(i)
print(sum)
else:
total=0
no_Values=int(input("How many values has to be added:"))
for val in range(no_Values):
print(val)
num=int(input("Enter Value:"))
total+=num
print(total)
|
[
"[email protected]"
] | |
29958a6140724765938a648ad8144e723a3f67dc
|
fecc1daf3ee945191dee561dd501e9e17a36685d
|
/projectile.py
|
77b4eb35dcd119c7c25ba9c514b258c87ce31e60
|
[] |
no_license
|
tt-n-walters/tt19-pytue-game
|
856d9bb4a2c4260d88b1ef6fb63426f648c4808f
|
2fe4ca47180b617f0d1d72046753fa5e914a2809
|
refs/heads/master
| 2022-10-23T11:35:03.150595 | 2020-06-16T17:03:49 | 2020-06-16T17:03:49 | 267,097,729 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 705 |
py
|
from arcade import Sprite, draw_rectangle_filled, color
from math import sin, cos, radians
class Projectile(Sprite):
def __init__(self, image_filename, direction, speed):
super().__init__(filename=image_filename, scale=4)
self.angle = direction
self.change_x = speed * cos(radians(direction))
self.change_y = speed * sin(radians(direction))
class SmallBullet(Projectile):
def __init__(self, gun_x, gun_y, direction, speed):
super().__init__("assets/bullets/bulletDark2.png", direction, speed)
self.center_x = gun_x
self.center_y = gun_y
self.width = self.width / 2
self.height = self.height / 2
self.angle = -90
|
[
"[email protected]"
] | |
44851354a019a77a82c2a8e957f0ee79172d10cd
|
2fff43f976e55c31e448e56b2809c36a0b154684
|
/blog/views.py
|
b2aac913895a07b61be54fae4394fae8a9ac7c18
|
[] |
permissive
|
omar115/first_blog_application
|
60f48c859f7b2d5be30f6d4abc34564b2dc7cd08
|
c87ae74bdeabc72fc12162528a966ef1295184e6
|
refs/heads/main
| 2023-02-16T12:20:24.513085 | 2021-01-14T11:57:00 | 2021-01-14T11:57:00 | 329,451,779 | 0 | 0 |
MIT
| 2021-01-14T10:20:55 | 2021-01-13T22:56:54 | null |
UTF-8
|
Python
| false | false | 145 |
py
|
from django.shortcuts import render
# Create your views here.
def post_list(request):
return render(request, 'blog/post_list.html', {})
|
[
"[email protected]"
] | |
2fad9418b56e80ca01ab03f50a5629b955b26ddb
|
e1efc8e0b0e4629dea61504fbc816c0527691bd9
|
/8.thread线程/1.线程基础/1_线程基本概念.py
|
63dc04d2e753d0495c412dff0504ed2dee2325fc
|
[] |
no_license
|
xiongmengmeng/xmind-technology
|
2bb67a0bf92cfd660cac01f8ab3a2454423ccba5
|
e2fdb6987ef805a65f0a4feb52d84383853f4b77
|
refs/heads/main
| 2023-07-31T07:10:29.868120 | 2021-09-11T08:18:17 | 2021-09-11T08:18:17 | 307,636,242 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,390 |
py
|
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="thread"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("线程基本概念")
r2=s2.getRootTopic()
r2.setTitle("线程基本概念")
content={
'进程':[
'操作系统管理的基本运行单元',
'当一个程序被运行,从磁盘加载程序的代码到内存,就开启了一个进程'
'如:一个正在运行的exe程序'
],
'线程':[
'一个指令流,将指令流中的一条条指令以一定的顺序交给CPU处理',
'进程中独立运行的子任务',
'最大限度地利用CPU空闲时间处理任务'
],
'并行与并发':[
'单核CPU下,线程串行执行',
{'任务调度器':[
'将cpu的时间片(15毫秒)分给不同的程序使用',
'由于cpu在线程间(时间片)的切换很快,感觉是同时运行的'
]},
{'并发':[
'线程轮流使用cpu,实际是串行的'
]},
{'并行':[
'多核cpu下,每个核都可调试运行线程'
]}
],
'多线程':[
{'异步':[
'代码运行结果与代码执行或调用顺序无关'
]},
{'实现方式':[
'继承Thread类',
'实现Runnable接口',
'实现Callable接囗(FutureTask接收返回值)'
]},
{'Future接口':[
'获取异步计算结果',
]},
{'FutureTask类':[
'Future接口的实现类',
]}
],
'非线程安全':[
'多个线程对同一个对象中的同一个实例变量进行操作->出现值被更改、不同步的情况->影响程序的执行流程',
{'分类':[
'成员变量:共享的,有读写操作的',
'局部变量:引用对象逃离方法作用范围'
]},
'线程安全包含原子性和可见性'
],
# 'Timer定时器类':[
# {'1.Timer类':[
# '设置计划任务,TimeTask类:封闭计划任务'
# ]},
# {'2.Schedule(TimeTask timeTask,Date time)在指定时间执行一次某任务':[
# '一个timer可运行多个TimeTask',
# 'TimeTask以队列方式一个一个被顺序执行',
# '执行的时间可能跟预计不一致(单线程执行)'
# ]},
# {'3.Schedule(TimeTask timeTask,Date firstTime,long period)':[
# '指定日期后,按指定间隔周期性无限循环地执行某一任务'
# ]}
# ],
'多线程下的单例':[
{'立即加载':[
'使用类时已将对象创建完毕,不存在线程安全问题',
'类加载的准备阶段为类变量分配空间,设初始值,初始化阶段为类变量赋值'
]},
{'延迟加载':[
'兼顾效率与线程安全性,使用DCL双检查锁机制:volatile+synchronized',
'private volatile static MyObject myObject;'
'....',
'synchronized (MyObject.class) {',
' if (object == null) {',
' object = new MyObject();',
' }',
'}',
{'静态内置类实现':[
'类加载的初始化阶段会执行类的静态语句块'
]}
]}
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
|
[
"[email protected]"
] | |
39dd022361eeff4b26dc76375bafd21c5b91e869
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/Simulation/ISF/ISF_Geant4/ISF_Geant4CommonTools/python/ISF_Geant4CommonToolsConfigDb.py
|
57bc594f8edc61ff1211ce0259aa17ceaa7dbae2
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 890 |
py
|
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
"""
Configuration database for ISF_Geant4CommonTools
Elmar Ritsch, 31/10/2014
"""
from AthenaCommon.CfgGetter import addTool, addToolClone, addService, addAlgorithm, \
addTypesToExcludeIfDefaultValue, addNamesToExcludeIfDefaultValue, addFullNamesToExcludeIfDefaultValue, \
addPropertiesToExcludeIfDefault, \
addTypesToSkipIfNotAvailable, addNamesToSkipIfNotAvailable, addFullNamesToSkipIfNotAvailable, \
addTypesOnlyToSkip
from AthenaCommon.Constants import * # FATAL,ERROR etc.
import AthenaCommon.SystemOfUnits as Units
# Common tools, services and algorithms used by jobs
addTool("ISF_Geant4CommonTools.ISF_Geant4CommonToolsConfig.getEntryLayerTool", "ISF_EntryLayerTool")
addTool("ISF_Geant4CommonTools.ISF_Geant4CommonToolsConfig.getAFIIEntryLayerTool", "ISF_AFIIEntryLayerTool")
|
[
"[email protected]"
] | |
4c5c9b5b065e80fca6d1741d5b52a87f50e94787
|
b8ba0f496b3e89af32c11503b8bb87b1917c4c36
|
/mutant/__init__.py
|
862b1670352ecd904a7b646a269bdae676ea1330
|
[
"MIT"
] |
permissive
|
torchingloom/django-mutant
|
21a3bbb076668c88e855725e74163442810e4817
|
7bf396071f22c7339098b7ec57e0629750cf57c8
|
refs/heads/master
| 2021-01-21T20:19:04.029964 | 2016-01-14T05:02:13 | 2016-01-14T05:02:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 258 |
py
|
from __future__ import unicode_literals
import logging
from django.utils.version import get_version
VERSION = (0, 2, 2, 'alpha', 0)
__version__ = get_version(VERSION)
logger = logging.getLogger('mutant')
default_app_config = 'mutant.apps.MutantConfig'
|
[
"[email protected]"
] | |
1d5a1ceca4a37df1c9413d3bd9f77be6dcc74c75
|
bb93784aad5933329118cc2ed86357045e535c51
|
/setup.py
|
d92ebe3852c13ffa729b201b45376444bae1511e
|
[] |
no_license
|
garaemon/pr_style_review
|
3f69ddee8a93d3422955fa96f42c754a4c3c1a43
|
2ae6e400ae68746fc6d385f642d01cbaaa9c19c2
|
refs/heads/master
| 2020-04-24T22:11:07.173809 | 2019-02-28T16:01:57 | 2019-02-28T16:01:57 | 172,303,383 | 0 | 0 | null | 2019-02-28T16:01:58 | 2019-02-24T06:15:43 |
Python
|
UTF-8
|
Python
| false | false | 135 |
py
|
from setuptools import setup
setup(
name='pr_style_review',
version='0.0.0',
install_requires=['GitPython', 'github3.py'])
|
[
"[email protected]"
] | |
e53912616396c13d4e09af02972f7af0a5d56051
|
babff7df289cb7173a22be1f68feec51f71d9269
|
/manage.py
|
0436a4c994071feac690f1a81989b12038390b25
|
[
"MIT"
] |
permissive
|
jwestgard/prange-db
|
9d1a6817dd9f94d8a4dc380cefe8846dd8b20312
|
27535271cd902d18673c187f4277e47327563556
|
refs/heads/master
| 2021-01-10T21:20:50.583769 | 2015-10-11T17:21:04 | 2015-10-11T17:21:04 | 42,461,824 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 726 |
py
|
#!/usr/bin/env python
import os
from app import create_app, db
from app.models import User, Role
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
|
[
"[email protected]"
] | |
79c9b3bb46711849e3535454e5043208d663f50b
|
c1f732ebeceb8c4103454f8ed8c5be3f02589b3f
|
/run.py
|
571c2e31c096ff2f91563919165a8d25630100b9
|
[
"MIT"
] |
permissive
|
Roychela/Password-Locker
|
828cab4ba678f11beeee602cf2a475e52e45e147
|
67e16580ea9283ede593c5cf6eadcfde877a70d2
|
refs/heads/master
| 2020-06-04T06:01:37.011996 | 2019-06-17T12:23:38 | 2019-06-17T12:23:38 | 191,897,776 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,172 |
py
|
#!/usr/bin/env python3.6
from user_credentials import User
from user_credentials import Credentials
def create_user(uname,password):
'''
Function to create a new user
'''
new_user = User(uname, password)
return new_user
def save_user(user):
'''
Function to save a new user
'''
user.save_user()
def authenticate_user(username, password):
'''
Function to authenticate a user
'''
authenticated_user = Credentials.confirm_login(username, password)
return authenticated_user
def create_credential(user_name,site_name,account_name,password):
'''
Function to create a new credential object
'''
new_credential = Credentials(user_name,site_name,account_name,password)
return new_credential
def save_credential(credential):
'''
Function to save a created credential
'''
Credentials.save_credentials(credential)
def generate_password():
'''
Function to randomly generate password
'''
passwrd_generated = Credentials.generate_password()
return passwrd_generated
def display_credentials(user_name):
'''
Function to display credentials
'''
return Credentials.display_credentials(user_name)
def copy_credential(site_name):
'''
Function to copy a credential to the clipboard
'''
return Credentials.copy_credential(site_name)
def main():
print(' ')
print('Hello! Welcome to Password Locker.')
while True:
print(' ')
print("-"*40)
print('Use these short codes: ca-Create Password-Locker account, log-Login, ex-Exit')
short_code = input('Enter short code here: ').lower().strip()
if short_code == 'ex':
break
elif short_code == 'ca':
print("-"*40)
print(' ')
print('To create a new account:')
username = input('Choose a username - ').strip()
password = input('Choose a password - ').strip()
save_user(create_user(username,password))
print(" ")
print(f'Your Password-Locker account username is : {username} and password is: {(password)}')
elif short_code == 'log':
print("-"*40)
print(' ')
print('To login:')
user_name = input('Enter your Password-Locker username - ').strip()
password = str(input('Enter your password - '))
user_authenticated = authenticate_user(user_name,password)
if user_authenticated == user_name:
print(" ")
print(f'Welcome {user_name}. You have successfully logged in. Choose short code to continue')
print(' ')
while True:
print("-"*40)
print('Your credentials short codes: ccd-Create credential, dc-Display Credentials, dl-delete credentials account, cp-Copy Password, ex-Exit')
short_code = input('Enter short code: ').lower().strip()
print("-"*40)
if short_code == 'ex':
print(" ")
print(f'Goodbye {user_name}')
break
elif short_code == 'ccd':
print(' ')
print('Enter your credential account information:')
site_name = input('Enter the site name- ').strip()
account_name = input('Enter your account name - ').strip()
while True:
print(' ')
print("-"*40)
print('Select option for entering a password: ep-Enter your own password, gp-Generate a password ,ex-Exit')
passwrd_select = input('Enter an option: ').lower().strip()
print("-"*40)
if passwrd_select == 'ep':
print(" ")
password = input('Enter your password: ').strip()
break
elif passwrd_select == 'gp':
password = generate_password()
break
elif passwrd_select == 'ex':
break
else:
print('Incorrect entry. Try again.')
save_credential(create_credential(user_name,site_name,account_name,password))
print(' ')
print(f'Credential Created: Site Name: {site_name} - Account Name: {account_name} - Password: {password}')
print(' ')
elif short_code == 'dc':
print(' ')
if display_credentials(user_name):
print('Your credentials account list:')
print(' ')
for credential in display_credentials(user_name):
print(f'Site Name: {credential.site_name} - Account Name: {credential.account_name} - Password: {credential.password}')
print(' ')
else:
print(' ')
print("No credentials saved")
print(' ')
elif short_code == 'cp':
print(' ')
chosen_site = input('Enter the site name for the credential password to copy: ')
copy_credential(chosen_site)
print('')
print('Paste copied site_name password here:')
copy = input()
else:
print('Incorrect entry.Try again.')
else:
print(' ')
print('Incorrect entry. Try again or Create an Account.')
else:
print("-"*40)
print(' ')
print('Incorrect entry. Try again.')
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
e8f6439fd172538a4266d571ee451f4d67e64297
|
94b59529bc24bd219b5e4ce4ac2f11ea357833d8
|
/ayush_crowdbotics_202/settings.py
|
df56ecf4864bf3e5b67bb39177f3270a51f76cf6
|
[] |
no_license
|
payush/ayush-crowdbotics-202
|
1a285f25866731830b0800549865d7dbb90f34f5
|
dd582ac5962c9fc99e3a91508e7ec344aebe091c
|
refs/heads/master
| 2020-03-23T09:53:58.430867 | 2018-07-18T09:37:25 | 2018-07-18T09:37:25 | 141,413,738 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,130 |
py
|
"""
Django settings for ayush_crowdbotics_202 project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'itfpl$8ux!xc$nb(ojgwwbhv-$a!@+2^e4!(o2*q_4bm*=a170'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ayush_crowdbotics_202.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ayush_crowdbotics_202.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
|
[
"[email protected]"
] | |
5934915c4f56931289cac74101259879de684988
|
544cfadc742536618168fc80a5bd81a35a5f2c99
|
/tools/acloud/setup/setup.py
|
c424318b97c03083e60d2b51c27384c8c788fcfc
|
[
"Apache-2.0"
] |
permissive
|
ZYHGOD-1/Aosp11
|
0400619993b559bf4380db2da0addfa9cccd698d
|
78a61ca023cbf1a0cecfef8b97df2b274ac3a988
|
refs/heads/main
| 2023-04-21T20:13:54.629813 | 2021-05-22T05:28:21 | 2021-05-22T05:28:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,732 |
py
|
# Copyright 2018 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Setup entry point.
Setup will handle all of the necessary steps to enable acloud to create a local
or remote instance of an Android Virtual Device.
"""
from __future__ import print_function
import os
import subprocess
import sys
from acloud.internal import constants
from acloud.internal.lib import utils
from acloud.public import config
from acloud.setup import host_setup_runner
from acloud.setup import gcp_setup_runner
def Run(args):
"""Run setup.
Setup options:
-host: Setup host settings.
-gcp_init: Setup gcp settings.
-None, default behavior will setup host and gcp settings.
Args:
args: Namespace object from argparse.parse_args.
"""
if args.update_config:
_UpdateConfig(args.config_file, args.update_config[0], args.update_config[1])
return
_RunPreSetup()
# Setup process will be in the following manner:
# 1.Print welcome message.
_PrintWelcomeMessage()
# 2.Init all subtasks in queue and traverse them.
host_base_runner = host_setup_runner.HostBasePkgInstaller()
host_avd_runner = host_setup_runner.AvdPkgInstaller()
host_cf_common_runner = host_setup_runner.CuttlefishCommonPkgInstaller()
host_env_runner = host_setup_runner.CuttlefishHostSetup()
gcp_runner = gcp_setup_runner.GcpTaskRunner(args.config_file)
task_queue = []
# User must explicitly specify --host to install the avd host packages.
if args.host:
task_queue.append(host_base_runner)
task_queue.append(host_avd_runner)
task_queue.append(host_cf_common_runner)
task_queue.append(host_env_runner)
# We should do these setup tasks if specified or if no args were used.
if args.host_base or (not args.host and not args.gcp_init):
task_queue.append(host_base_runner)
if args.gcp_init or (not args.host and not args.host_base):
task_queue.append(gcp_runner)
for subtask in task_queue:
subtask.Run(force_setup=args.force)
# 3.Print the usage hints.
_PrintUsage()
def _PrintWelcomeMessage():
"""Print welcome message when acloud setup been called."""
# pylint: disable=anomalous-backslash-in-string
asc_art = " \n" \
" ___ _______ ____ __ _____ \n" \
" / _ |/ ___/ / / __ \/ / / / _ \\ \n" \
" / __ / /__/ /__/ /_/ / /_/ / // / \n" \
"/_/ |_\\___/____/\\____/\\____/____/ \n" \
" \n"
print("\nWelcome to")
print(asc_art)
def _PrintUsage():
"""Print cmd usage hints when acloud setup been finished."""
utils.PrintColorString("")
utils.PrintColorString("Setup process finished")
def _RunPreSetup():
"""This will run any pre-setup scripts.
If we can find any pre-setup scripts, run it and don't care about the
results. Pre-setup scripts will do any special setup before actual
setup occurs (e.g. copying configs).
"""
if constants.ENV_ANDROID_BUILD_TOP not in os.environ:
print("Can't find $%s." % constants.ENV_ANDROID_BUILD_TOP)
print("Please run '#source build/envsetup.sh && lunch <target>' first.")
sys.exit(constants.EXIT_BY_USER)
pre_setup_sh = os.path.join(os.environ.get(constants.ENV_ANDROID_BUILD_TOP),
"tools",
"acloud",
"setup",
"pre_setup_sh",
"acloud_pre_setup.sh")
if os.path.exists(pre_setup_sh):
subprocess.call([pre_setup_sh])
def _UpdateConfig(config_file, field, value):
"""Update the user config.
Args:
config_file: String of config file path.
field: String, field name in user config.
value: String, the value of field.
"""
config_mgr = config.AcloudConfigManager(config_file)
config_mgr.Load()
user_config = config_mgr.user_config_path
print("Your config (%s) is updated." % user_config)
gcp_setup_runner.UpdateConfigFile(user_config, field, value)
_PrintUsage()
|
[
"[email protected]"
] | |
acbce51fdf30e8e35483b45bfa30cab872f2a061
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/request/AlipayEcoMycarMaintainShopCreateRequest.py
|
8f013befa1d34da14c72c2d2b98cc509b5ff4468
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 3,992 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayEcoMycarMaintainShopCreateModel import AlipayEcoMycarMaintainShopCreateModel
class AlipayEcoMycarMaintainShopCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayEcoMycarMaintainShopCreateModel):
self._biz_content = value
else:
self._biz_content = AlipayEcoMycarMaintainShopCreateModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.eco.mycar.maintain.shop.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
[
"[email protected]"
] | |
02d209733f4ae22362411e874febaf105049cc1f
|
6ad2fb13c42b6bb483189b0931bcca8bb117b5dc
|
/tests/ci/unittests/sdk/internal/agent/agent_client_test.py
|
e547c5dfca11df49d80a961c5e0ba687426d719c
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
woodywan/python-sdk
|
aac2a2527f07c8900a01b4336f890c603a1c8d4c
|
b8583a8abf3bdc1f978fad6f692e980de00bc7ea
|
refs/heads/master
| 2023-01-14T03:23:12.291230 | 2020-11-20T15:18:41 | 2020-11-20T15:18:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,223 |
py
|
# Copyright 2020 TestProject (https://testproject.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import responses
from src.testproject.rest.messages.agentstatusresponse import AgentStatusResponse
from src.testproject.sdk.exceptions import SdkException, AgentConnectException
from src.testproject.sdk.internal.agent import AgentClient
from src.testproject.helpers import ConfigHelper
@pytest.fixture()
def mocked_agent_address(mocker):
# Mock the Agent address
mocker.patch.object(ConfigHelper, "get_agent_service_address")
ConfigHelper.get_agent_service_address.return_value = "http://localhost:9876"
@responses.activate
def test_get_agent_status_no_response_raises_sdkexception(mocked_agent_address):
# Mock the response returned by the Agent when retrieving the address
responses.add(responses.GET, "http://localhost:9876/api/status", status=200)
with pytest.raises(SdkException) as sdke:
AgentClient.get_agent_version(token="1234")
assert (
str(sdke.value)
== "Could not parse Agent status response: no JSON response body present"
)
@responses.activate
def test_get_agent_status_response_without_tag_element_raises_sdkexception(
mocked_agent_address,
):
# Mock the response returned by the Agent when retrieving the address
responses.add(
responses.GET,
"http://localhost:9876/api/status",
json={"key": "value"},
status=200,
)
with pytest.raises(SdkException) as sdke:
AgentClient.get_agent_version(token="1234")
assert (
str(sdke.value)
== "Could not parse Agent status response: element 'tag' not found in JSON response body"
)
@responses.activate
def test_get_agent_status_response_with_error_http_status_code_raises_agentconnectexception(
mocked_agent_address,
):
# Mock the response returned by the Agent when retrieving the address
responses.add(responses.GET, "http://localhost:9876/api/status", status=500)
with pytest.raises(AgentConnectException) as ace:
AgentClient.get_agent_version(token="1234")
assert (
str(ace.value) == "Agent returned HTTP 500 when trying to retrieve Agent status"
)
@responses.activate
def test_get_agent_status_response_with_tag_element_creates_agentstatusresponse(
mocked_agent_address,
):
# Mock the response returned by the Agent when retrieving the address
responses.add(
responses.GET,
"http://localhost:9876/api/status",
json={"tag": "1.2.3"},
status=200,
)
agent_status_response: AgentStatusResponse = AgentClient.get_agent_version(
token="1234"
)
assert agent_status_response.tag == "1.2.3"
|
[
"[email protected]"
] | |
9aebea7c51c967d87dbf4f648217c77a7eb52dda
|
c268dcf432f3b7171be6eb307aafbe1bd173285a
|
/reddit2telegram/channels/~inactive/r_linuxmemes/app.py
|
7c7c338a5ee9f373aadc9306f10466ee14873922
|
[
"MIT"
] |
permissive
|
Fillll/reddit2telegram
|
a7162da2cc08c81bcc8078ea4160d4ee07461fee
|
5d8ee3097e716734d55a72f5a16ce3d7467e2ed7
|
refs/heads/master
| 2023-08-09T10:34:16.163262 | 2023-07-30T18:36:19 | 2023-07-30T18:36:19 | 67,726,018 | 258 | 205 |
MIT
| 2023-09-07T02:36:36 | 2016-09-08T17:39:46 |
Python
|
UTF-8
|
Python
| false | false | 143 |
py
|
#encoding:utf-8
subreddit = 'linuxmemes'
t_channel = '@r_linuxmemes'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
[
"[email protected]"
] | |
9faffe3153372843ef972e1783f126a5c1a982cf
|
10d98fecb882d4c84595364f715f4e8b8309a66f
|
/structformer/utils.py
|
5bfcdbe7fcd76e6958692bae860d3f0ecf877937
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
afcarl/google-research
|
51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42
|
320a49f768cea27200044c0d12f394aa6c795feb
|
refs/heads/master
| 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 |
Apache-2.0
| 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null |
UTF-8
|
Python
| false | false | 1,703 |
py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utils for training."""
import random
import numpy
import torch
def batchify(idxs, bsz, device, pad=0, shuffle=True):
"""Batchify the training data."""
length = [len(seq) for seq in idxs]
sorted_idx = numpy.argsort(length)
idxs_sorted = [idxs[i] for i in sorted_idx]
idxs_batched = []
i = 0
def get_batch(source, i, batch_size, pad=0):
total_length = 0
data = []
while total_length < batch_size and i < len(source):
data.append(source[i])
total_length += len(source[i])
i += 1
length = [len(seq) for seq in data]
max_l = max(length)
data_padded = []
for seq in data:
data_padded.append(seq + [pad] * (max_l - len(seq)))
data_mat = torch.LongTensor(data_padded).to(device)
return data_mat
while i < len(idxs_sorted):
idxs_batched.append(get_batch(idxs_sorted, i, bsz, pad))
i += idxs_batched[-1].size(0)
if shuffle:
sentence_idx = list(range(len(idxs_batched)))
random.shuffle(sentence_idx)
idxs_batched = [idxs_batched[i] for i in sentence_idx]
return idxs_batched
|
[
"[email protected]"
] | |
335a739b77ad9f9f6b858847ac0bd3526d3b033c
|
2ed2dd917afb05d194e87f989d78953b31a5781b
|
/lesson8/mission1.py
|
c0a03c9b4c67ea4473ebbb1e9fc2e4bbd98d5c46
|
[] |
no_license
|
RenegaDe1288/pythonProject
|
4058d549db7c37652f77438c31f8b31476497d98
|
801c06f3be22ed63214987b11d6f1b3fd2fe5b44
|
refs/heads/master
| 2023-08-17T13:20:50.777842 | 2021-10-05T10:51:00 | 2021-10-05T10:51:00 | 393,145,207 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 366 |
py
|
import random
cereal = random.randint(20, 100)
print('Всего кг гречки = ', cereal)
for num in range(1, 10):
if cereal >= 4:
cereal -= 4
print('Месяц = ', num)
print('На конец месяца Осталось гречки', cereal)
else:
print('Вы здохли на месяце ', num)
break
|
[
"[email protected]"
] | |
801079f7f2054c5a86f12bc6b180ae002b113965
|
de9b8b7192a0a81e9249823bb2b86f0b7e452863
|
/.history/main_20171106225556.py
|
2fd686058e6c52f591c4b1270ab859ab28dbd2df
|
[
"MIT"
] |
permissive
|
reecebenson/uwe-dadsa-tennis-a
|
f5eaeb1b96d4e61f29279514e68eeea8ad6533db
|
d0763f819b300fcd0ce27041f5bc4ef0519c00bf
|
refs/heads/master
| 2023-07-08T16:13:23.963348 | 2017-11-30T12:07:01 | 2017-11-30T12:07:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,361 |
py
|
# DADSA - Assignment 1
# Reece Benson
import random
from classes import Menu as Menu
from classes import Handler as Handler
class App():
# Define the variables we will be using
debug = True
handler = None
# Define all of the properties we will need to use
def __init__(self):
# Load our handler
self.handler = Handler.Handler(self)
self.handler.load()
# Generate rounds
self.generate_rounds()
# Hold the program
self.exit()
# Generate our rounds from our player list
def generate_rounds(self):
# Let's generate our random rounds from scratch
round_data = { }
# Write our new data to memory
for seasonId in self.handler.get_seasons():
season = self.handler.get_season(seasonId)
players = season.players()
# Generate our rounds
for gender in players:
# Create our gendered rounds
if(not gender in round_data):
# Default Round Cap
roundCap = 3
# Do we have a Round Cap overrider for this gender?
if(gender + "_cap" in season.settings()):
roundCap = season.settings()[gender + "_cap"]
# Update our round data
round_data.update({ gender: [ { "_roundCap": roundCap } ] })
# Create our round data from players
rnd_players = random.sample(players[gender], len(players[gender]))
x = 0
for i in range(len(rnd_players)):
# Have we exceeded our index? (prone to IndexError?)
if(x > (len(rnd_players) + 2 / 2)):
break
# Grab our versus players
playerOne = rnd_players[x]
playerTwo = rnd_players[x + 1]
print("{0} vs {1} ".format(playerOne.name(), playerTwo.name()))
# Increment by 2 to avoid having duplicates
x += 2
print(round_data)
# A method which exits the program after the user has pressed the Return key
def exit(self):
input(">>> Press <Return> to terminate the program")
exit()
App()
|
[
"[email protected]"
] | |
c7c12ce39667b16703c21aca62a7e62b8faaaf14
|
df7f13ec34591fe1ce2d9aeebd5fd183e012711a
|
/hata/ext/plugin_loader/__init__.py
|
69f22e8d027110d53bfe40fdda2a90f6223048a0
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
HuyaneMatsu/hata
|
63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e
|
53f24fdb38459dc5a4fd04f11bdbfee8295b76a4
|
refs/heads/master
| 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 |
Apache-2.0
| 2019-12-18T03:46:12 | 2018-12-31T14:59:47 |
Python
|
UTF-8
|
Python
| false | false | 781 |
py
|
from .import_overwrite import *
from .plugin_tree import *
from .snapshot import *
from .utils import *
from .client_extension import *
from .constants import *
from .exceptions import *
from .plugin import *
from .plugin_extractor import *
from .plugin_loader import *
from .plugin_root import *
from .helpers import *
__all__ = (
*import_overwrite.__all__,
*plugin_tree.__all__,
*snapshot.__all__,
*utils.__all__,
*client_extension.__all__,
*constants.__all__,
*exceptions.__all__,
*plugin.__all__,
*plugin_extractor.__all__,
*plugin_loader.__all__,
*plugin_root.__all__,
*helpers.__all__,
)
from .. import register_library_extension
register_library_extension('HuyaneMatsu.plugin_loader')
del register_library_extension
|
[
"[email protected]"
] | |
96526cb71cfcf833e6e090c692e8579be40537a1
|
9264cda8d9bb152e4fed4923e6403a2334abbe89
|
/laxy_backend/tasks/orchestration.py
|
7d20f917adbe162b19ff3f10a961e7eaf4300c7a
|
[
"Apache-2.0"
] |
permissive
|
MonashBioinformaticsPlatform/laxy
|
b228d93690f7cb9c0658af44013497f6c756167c
|
bee9d283d0932dd845cbc9c7c090dde794d2ecbc
|
refs/heads/master
| 2023-08-26T06:25:11.188255 | 2023-08-22T05:13:09 | 2023-08-22T05:13:09 | 104,432,675 | 3 | 2 |
Apache-2.0
| 2023-06-27T23:34:18 | 2017-09-22T04:48:54 |
Python
|
UTF-8
|
Python
| false | false | 1,434 |
py
|
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from celery.utils.log import get_task_logger
from celery import shared_task
from celery import Celery, states, chain, group
from celery.exceptions import (Ignore,
InvalidTaskError,
TimeLimitExceeded,
SoftTimeLimitExceeded)
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task(bind=True, track_started=True)
def dispose_compute_resource(self, task_data, **kwargs):
from ..models import Job, ComputeResource
if task_data is None:
raise InvalidTaskError("task_data is None")
compute_resource_id = task_data.get('compute_resource_id', None)
# result = task_data.get('result')
if not compute_resource_id:
job_id = task_data.get('job_id')
job = Job.objects.get(id=job_id)
compute_resource_id = job.compute_resource.id
compute = ComputeResource.objects.get(id=compute_resource_id)
self.status = ComputeResource.STATUS_TERMINATING
self.save()
# TODO: Terminate the compute resource
# (different depending on cloud provider, resource type)
raise NotImplementedError()
################################################################
self.status = ComputeResource.STATUS_DECOMMISSIONED
self.save()
return task_data
|
[
"[email protected]"
] | |
97e03d5ce5a1b878b9cc44e984b6afd2fed84a1b
|
e96cc817c768915eeff46027ded14e759e8042ff
|
/Python编程/系统编程/线程/thread_lock_stu.py
|
021d30991b4e9dbbb4127f5a6df3225f838428b2
|
[] |
no_license
|
fovegage/learn-python
|
e22a32207cf513ba0f8c3428e9c00138987c2359
|
93b8d3513769a0b7d492a7b515f289fe3f1efc4a
|
refs/heads/master
| 2023-06-08T13:44:57.274677 | 2023-05-29T05:52:35 | 2023-05-29T05:52:35 | 148,493,932 | 6 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 968 |
py
|
# -*- coding: utf-8 -*-
# @Author: fovegage
# @File: thread_lock_stu.py
# @Email: [email protected]
# @Date: 2018-09-21 16:41:02
# @Last Modified time: 2018-09-21 16:41:07
import threading
# 可以加锁 也可以延时
num = 0
class My_Thread_1(threading.Thread):
def run(self):
global num
for i in range(1000000):
flag = mutex.acquire(True)
if flag:
num += 1
mutex.release()
print("线程1:{}".format(num))
class My_thread_2(threading.Thread):
def run(self):
global num
for i in range(1000000):
flag = mutex.acquire(True)
if flag:
num += 1
mutex.release()
print("线程2:{}".format(num))
if __name__ == '__main__':
mutex = threading.Lock()
# mutex.acquire()
t1 = My_Thread_1()
t1.start()
t2 = My_thread_2()
t2.start()
|
[
"[email protected]"
] | |
af36a65541b839c6bbb15fa9e1fd4ff8e5374673
|
c09a4b4f02849c03ba536edda2bf920b655be6bc
|
/wyl/add_noise.py
|
6dc8a805bf7045866c98b4283c6c4910ad3bc427
|
[] |
no_license
|
jpober/brownscripts
|
33bcc70a31694dfb06f1314adb1402316540108c
|
c25789ec765b018eaad59d99a0a4264c75655265
|
refs/heads/master
| 2021-01-23T22:01:19.004636 | 2020-11-12T18:39:14 | 2020-11-12T18:39:14 | 57,912,669 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,388 |
py
|
import numpy as np, pyuvdata.uvdata as uvd, sys, optparse, aipy
import capo
from aipy.const import k
import glob, matplotlib.pyplot as plt
from scipy.io.idl import readsav
from IPython import embed
o = optparse.OptionParser()
o.set_usage('add_noise.py [options] obs')
o.add_option('-o', dest='outpath', help='Destination directory',default='/users/wl42/data/wl42/CALSIM/')
o.add_option('-f', dest='fhdpath', help='FHD directory', default='/users/wl42/data/wl42/FHD_out/fhd_PhaseII_TESTSET_EoR0/')
o.add_option('--gain', dest='gain', default=False, action='store_true', help='')
opts,args = o.parse_args(sys.argv[1:])
obs = args[0]
Trec = 30.
Tsky = 180.
fhdpath = opts.fhdpath
fn = glob.glob(fhdpath+'vis_data/'+obs+'*') + glob.glob(fhdpath+'metadata/'+obs+'*')
uv = uvd.UVData()
uv.read_fhd(fn,use_model=True)
dt = uv.integration_time
df = uv.channel_width
fqs = uv.freq_array[0]/1e6
Tsys = float(Tsky)*np.power(fqs/(180.),-2.6) + float(Trec)*np.ones(fqs.shape)
Area = (198000.-215000.)/(200.*200.-150.*150.)*(fqs*fqs-150.*150.)+215000.
sigs = k*Tsys/(Area*np.sqrt(df*dt))*1e23/np.sqrt(2)
#print sigs
print ' adding noise:'
for ff in range(uv.Nfreqs):
noise = (np.random.normal(0,sigs[ff],(uv.Nblts,uv.Nspws,uv.Npols))+1j*np.random.normal(0,sigs[ff],(uv.Nblts,uv.Nspws,uv.Npols)))*np.logical_not(uv.flag_array[:,:,ff])
uv.data_array[:,:,ff] += noise
if opts.gain:
print ' apply gains:'
cal = readsav(fhdpath+'calibration/'+obs+'_cal.sav',python_dict=True)
a1 = uv.ant_1_array[:uv.Nbls]
a2 = uv.ant_2_array[:uv.Nbls]
g = {'x':{1:[],2:[]},'y':{1:[],2:[]}}
for i in range(uv.Nbls):
g['x'][1].append(cal['cal']['GAIN'][0][0][a1[i]])
g['x'][2].append(cal['cal']['GAIN'][0][0][a2[i]])
g['y'][1].append(cal['cal']['GAIN'][0][1][a1[i]])
g['y'][2].append(cal['cal']['GAIN'][0][1][a2[i]])
g['x'][1] = g['x'][1]*uv.Ntimes
g['x'][2] = g['x'][2]*uv.Ntimes
g['y'][1] = g['y'][1]*uv.Ntimes
g['y'][2] = g['y'][2]*uv.Ntimes
g['x'][1] = np.array(g['x'][1])
g['x'][2] = np.array(g['x'][2])
g['y'][1] = np.array(g['y'][1])
g['y'][2] = np.array(g['y'][2])
for i in range(uv.Npols):
p1,p2 = aipy.miriad.pol2str[uv.polarization_array[i]]
uv.data_array[:,0][:,:,i] *= (g[p1][1]*g[p2][2].conj())
print ' writing...'
uv.write_uvfits(opts.outpath+obs+'.uvfits',spoof_nonessential=True)
|
[
"[email protected]"
] | |
cc828390d5d25140f299b141f0bee2892c95787d
|
0028a9a0d3fb346c44a386d507579fa6288ec0b9
|
/payment_receipt_invoice/__manifest__.py
|
95b995d758d7ba810d0d58fbc761aebded2fac62
|
[] |
no_license
|
rpsjr/extra-addons
|
283a7e54c3dc67ba2cab2b28e03e2cd8e3bfbe2d
|
9f8906b7908ad373cc26405c6aea54b0cd5031cb
|
refs/heads/master
| 2022-07-31T21:19:04.013649 | 2020-05-16T19:12:19 | 2020-05-16T19:12:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,543 |
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Cybrosys Technologies Pvt. Ltd.
# Copyright (C) 2017-TODAY Cybrosys Technologies(<http://www.cybrosys.com>).
# Author: Niyas Raphy,Fasluca(<http://www.cybrosys.com>)
# you can modify it under the terms of the GNU LESSER
# GENERAL PUBLIC LICENSE (LGPL v3), Version 3.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU LESSER GENERAL PUBLIC LICENSE (LGPL v3) for more details.
#
# You should have received a copy of the GNU LESSER GENERAL PUBLIC LICENSE
# GENERAL PUBLIC LICENSE (LGPL v3) along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Payment Receipt',
'summary': """Payment Receipt With Paid Details""",
'version': '10.0.1.0',
'description': """Payment Receipt With Paid Details""",
'author': 'Cybrosys Techno Solutions',
'company': 'Cybrosys Techno Solutions',
'website': 'http://www.cybrosys.com',
'category': 'Accounting',
'depends': ['base', 'account'],
'license': 'AGPL-3',
'data': [
'views/report_payment.xml',
'views/report.xml',
],
'demo': [],
'images': ['static/description/banner.jpg'],
'installable': True,
'auto_install': False,
}
|
[
"[email protected]"
] | |
645ed88cfcfdedfaa7b157819933b2a425965edf
|
e35fd52fe4367320024a26f2ee357755b5d5f4bd
|
/leetcode/problems/599.minimum-index-sum-of-two-lists.py
|
9f64bc499ff33c39eaa7f998f6fc339d5a9d0027
|
[] |
no_license
|
liseyko/CtCI
|
a451967b0a0ce108c491d30b81e88d20ad84d2cd
|
c27f19fac14b4acef8c631ad5569e1a5c29e9e1f
|
refs/heads/master
| 2020-03-21T14:28:47.621481 | 2019-11-12T22:59:07 | 2019-11-12T22:59:07 | 138,658,372 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,646 |
py
|
#
# @lc app=leetcode id=599 lang=python3
#
# [599] Minimum Index Sum of Two Lists
#
# https://leetcode.com/problems/minimum-index-sum-of-two-lists/description/
#
# algorithms
# Easy (49.05%)
# Total Accepted: 69.6K
# Total Submissions: 141.9K
# Testcase Example: '["Shogun","Tapioca Express","Burger King","KFC"]\n' +
'["Piatti","The Grill at Torrey Pines","Hungry Hunter Steakhouse","Shogun"]'
#
#
# Suppose Andy and Doris want to choose a restaurant for dinner, and they both
# have a list of favorite restaurants represented by strings.
#
#
# You need to help them find out their common interest with the least list
# index sum. If there is a choice tie between answers, output all of them with
# no order requirement. You could assume there always exists an answer.
#
#
#
# Example 1:
#
# Input:
# ["Shogun", "Tapioca Express", "Burger King", "KFC"]
# ["Piatti", "The Grill at Torrey Pines", "Hungry Hunter Steakhouse", "Shogun"]
# Output: ["Shogun"]
# Explanation: The only restaurant they both like is "Shogun".
#
#
#
# Example 2:
#
# Input:
# ["Shogun", "Tapioca Express", "Burger King", "KFC"]
# ["KFC", "Shogun", "Burger King"]
# Output: ["Shogun"]
# Explanation: The restaurant they both like and have the least index sum is
# "Shogun" with index sum 1 (0+1).
#
#
#
#
# Note:
#
# The length of both lists will be in the range of [1, 1000].
# The length of strings in both lists will be in the range of [1, 30].
# The index is starting from 0 to the list length minus 1.
# No duplicates in both lists.
#
#
#
class Solution:
def findRestaurant(self, list1: List[str], list2: List[str]) -> List[str]:
|
[
"[email protected]"
] | |
33f997ce05ea563ef525d2f8526b25d76942c1fa
|
201335e99ac66a1e404bda38c3ca0fe1006835ce
|
/network_model/builder/pytorch_builder.py
|
f79ef81e35cfc7bea464818473a06e9cb57bd13e
|
[
"MIT"
] |
permissive
|
yuga-n/ModelLearner
|
507c701cb5beea30e096a51c2ae1296cdc699f8b
|
3193efd5eb15172ba8231a34829942040fcb0fc5
|
refs/heads/main
| 2023-08-14T04:03:23.338993 | 2021-09-10T14:15:30 | 2021-09-10T14:15:30 | 406,409,911 | 0 | 0 |
MIT
| 2021-09-14T14:52:28 | 2021-09-14T14:52:27 | null |
UTF-8
|
Python
| false | false | 5,276 |
py
|
# -*- coding: utf-8 -*-
import keras.engine.training
from typing import Callable
from typing import Tuple
from typing import List
from typing import Union
from util_types import types_of_loco
from network_model.distillation.distillation_model_builder import DistllationModelIncubator
from network_model.build_model import builder_pt, builder_with_merge
from keras.callbacks import Callback
import torch
from torch.optim.optimizer import Optimizer
from torch.optim import SGD
from torch.nn.modules.loss import _Loss
from torch.nn import CrossEntropyLoss, Module
from network_model.wrapper.pytorch.model_pt import ModelForPytorch
from model_merger.pytorch.proc.distance.calculator import L1Norm
from model_merger.pytorch.proc.distance.abs_calculator import AbstractDistanceCaluclator
from model_merger.pytorch.proc.loss.calculator import AAEUMLoss
from model_merger.pytorch.proc.loss.abstract_calculator import AbstractLossCalculator
from model_merger.pytorch.proc.shiamese_loss import SiameseLoss, SiameseLossForInceptionV3
from model_merger.pytorch.siamese import SiameseNetworkPT
ModelBuilderResult = Union[keras.engine.training.Model, List[Callback]]
ModelBuilder = Union[Callable[[int], ModelBuilderResult],
Callable[[Union[str, Tuple[str, str]]], keras.engine.training.Model],
DistllationModelIncubator]
OptimizerBuilder = Callable[[Module], Optimizer]
def optimizer_builder(optimizer, **kwargs):
def build(base_model: Module):
kwargs["params"] = base_model.parameters()
return optimizer(**kwargs)
return build
default_optimizer_builder = optimizer_builder(SGD)
class PytorchModelBuilder(object):
def __init__(self,
img_size: types_of_loco.input_img_size = 28,
channels: int = 3,
model_name: str = "model1",
opt_builder: OptimizerBuilder = default_optimizer_builder,
loss: _Loss = None,
decide_dataset_generator=None,
nearest_data_ave_num=1,
will_calc_rate_real_data_train=False):
self.__img_size = img_size
self.__channels = channels
self.__model_name = model_name
self.__opt_builder = opt_builder
self.__loss = loss
self.__decide_dataset_generator = decide_dataset_generator
self.__nearest_data_ave_num = nearest_data_ave_num
self.__will_calc_rate_real_data_train = will_calc_rate_real_data_train
def build_raw_model(self, model_builder_input) -> torch.nn.Module:
if self.__model_name == "tempload":
return torch.jit.load(model_builder_input)
return builder_pt(model_builder_input, self.__img_size, self.__model_name)
def build_model_builder_wrapper(self, model_builder_input):
base_model = self.build_raw_model(model_builder_input)
optimizer = self.__opt_builder(base_model)
return ModelForPytorch.build_wrapper(base_model,
optimizer,
self.__loss,
decide_dataset_generator=self.__decide_dataset_generator,
nearest_data_ave_num=self.__nearest_data_ave_num,
will_calc_rate_real_data_train=self.__will_calc_rate_real_data_train)
def __call__(self, model_builder_input):
return self.build_model_builder_wrapper(model_builder_input)
class PytorchSiameseModelBuilder(PytorchModelBuilder):
def __init__(self,
q: float,
img_size: types_of_loco.input_img_size = 28,
channels: int = 3,
model_name: str = "model1",
opt_builder: OptimizerBuilder = default_optimizer_builder,
loss_calculator: AbstractLossCalculator = None,
calc_distance: AbstractDistanceCaluclator=L1Norm(),
is_inceptionv3: bool = False,
decide_dataset_generator=None,
nearest_data_ave_num=1,
will_calc_rate_real_data_train=False):
use_loss_calculator = AAEUMLoss(q) if loss_calculator is None else loss_calculator
loss = SiameseLossForInceptionV3(calc_distance, use_loss_calculator) if is_inceptionv3 else SiameseLoss(calc_distance, use_loss_calculator)
super(PytorchSiameseModelBuilder, self).__init__(img_size,
channels,
model_name,
opt_builder,
loss,
decide_dataset_generator,
nearest_data_ave_num,
will_calc_rate_real_data_train
)
def build_raw_model(self, model_builder_input) -> torch.nn.Module:
original_model = super(PytorchSiameseModelBuilder, self).build_raw_model(model_builder_input)
return SiameseNetworkPT(original_model)
|
[
"[email protected]"
] | |
787c0af6845645273f03f517cdc63b368ff78526
|
00820b522cc16bf996f1ef44a94a2f31989c4065
|
/abc/abc151/b.py
|
83c9ebfb2c76f96625755c32b514c9fb111b83c2
|
[] |
no_license
|
yamato1992/at_coder
|
6dffd425163a37a04e37507743a15f67b29239fc
|
6e0ec47267ed3cae62aebdd3d149f6191fdcae27
|
refs/heads/master
| 2020-08-31T11:17:03.500616 | 2020-06-12T15:45:58 | 2020-06-12T15:45:58 | 218,678,043 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 201 |
py
|
N, K, M = map(int, input().split())
scores = list(map(int, input().split()))
req_score = N * M - sum(scores)
if req_score > K:
print(-1)
elif req_score < 0:
print(0)
else:
print(req_score)
|
[
"[email protected]"
] | |
00fd44fd4c9944de27295296d9220003e0054ebc
|
1fb2da0e6f73652f0b0126c82a84562f6a8d3535
|
/946. Validate Stack Sequences.py
|
0e3b5ae404154d83fdb8c28dd8ada0394f1e5dfd
|
[] |
no_license
|
JaylenZhang19/Leetcode
|
be3456fcb45270c8aad797f965f4c7a1781c0e61
|
178546686aa3ae8f5da1ae845417f86fab9a644d
|
refs/heads/master
| 2023-02-27T06:08:58.818435 | 2021-01-31T20:28:10 | 2021-01-31T20:28:10 | 287,661,146 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 382 |
py
|
class Solution:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
stack = []
while pushed:
stack.append(pushed.pop(0))
while stack and popped and stack[-1] == popped[0]:
stack.pop()
popped.pop(0)
if stack:
return False
return True
|
[
"[email protected]"
] | |
a1ffff01cb8f903d88835b5e17535542b975e3e5
|
e553161c3adba5c1b19914adbacd58f34f27788e
|
/ambari/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
|
91d7b89d107edf99c0874d1987c0c8b9b5b1a8c3
|
[
"Apache-2.0"
] |
permissive
|
ReedOei/dependent-tests-experiments
|
57daf82d1feb23165651067b7ac004dd74d1e23d
|
9fccc06ec13ff69a1ac8fb2a4dd6f93c89ebd29b
|
refs/heads/master
| 2020-03-20T02:50:59.514767 | 2018-08-23T16:46:01 | 2018-08-23T16:46:01 | 137,126,354 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,768 |
py
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_XML
from resource_management.libraries.functions.decorator import retry
from resource_management.core.resources.system import File, Execute
from resource_management.core.source import Template
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
from resource_management.libraries.providers.hdfs_resource import HdfsResourceProvider
from resource_management import is_empty
from resource_management import shell
from resource_management.core.resources.zkmigrator import ZkMigrator
from yarn import yarn
from service import service
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from setup_ranger_yarn import setup_ranger_yarn
class Resourcemanager(Script):
def install(self, env):
self.install_packages(env)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
service('resourcemanager', action='stop')
def configure(self, env):
import params
env.set_params(params)
yarn(name='resourcemanager')
def refreshqueues(self, env):
pass
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class ResourcemanagerWindows(Resourcemanager):
def start(self, env):
import params
env.set_params(params)
self.configure(env)
service('resourcemanager', action='start')
def status(self, env):
service('resourcemanager', action='status')
def decommission(self, env):
import params
env.set_params(params)
yarn_user = params.yarn_user
yarn_refresh_cmd = format("cmd /c yarn rmadmin -refreshNodes")
File(params.exclude_file_path,
content=Template("exclude_hosts_list.j2"),
owner=yarn_user,
mode="f"
)
if params.update_exclude_file_only == False:
Execute(yarn_refresh_cmd, user=yarn_user)
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class ResourcemanagerDefault(Resourcemanager):
def get_component_name(self):
return "hadoop-yarn-resourcemanager"
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Stack Upgrade post-restart")
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
conf_select.select(params.stack_name, "hadoop", params.version)
stack_select.select("hadoop-yarn-resourcemanager", params.version)
def disable_security(self, env):
import params
if not params.rm_zk_address:
Logger.info("Skipping reverting ACL")
return
zkmigrator = ZkMigrator(
params.rm_zk_address, \
params.java_exec, \
params.java64_home, \
params.yarn_jaas_file, \
params.yarn_user)
zkmigrator.set_acls(params.rm_zk_znode, 'world:anyone:crdwa')
zkmigrator.set_acls(params.rm_zk_failover_znode, 'world:anyone:crdwa')
zkmigrator.set_acls(params.hadoop_registry_zk_root, 'world:anyone:crdwa')
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env) # FOR SECURITY
if params.has_ranger_admin and params.is_supported_yarn_ranger:
setup_ranger_yarn() #Ranger Yarn Plugin related calls
# wait for active-dir and done-dir to be created by ATS if needed
if params.has_ats:
Logger.info("Verifying DFS directories where ATS stores time line data for active and completed applications.")
self.wait_for_dfs_directories_created(params.entity_groupfs_store_dir, params.entity_groupfs_active_dir)
service('resourcemanager', action='start')
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.resourcemanager_pid_file)
pass
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
"yarn.acl.enable": "true"}
props_empty_check = ["yarn.resourcemanager.principal",
"yarn.resourcemanager.keytab",
"yarn.resourcemanager.webapp.spnego-principal",
"yarn.resourcemanager.webapp.spnego-keytab-file"]
props_read_check = ["yarn.resourcemanager.keytab",
"yarn.resourcemanager.webapp.spnego-keytab-file"]
yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
props_read_check)
yarn_expectations ={}
yarn_expectations.update(yarn_site_props)
security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
{'yarn-site.xml': FILE_TYPE_XML})
result_issues = validate_security_config_properties(security_params, yarn_site_props)
if not result_issues: # If all validations passed successfully
try:
# Double check the dict before calling execute
if ( 'yarn-site' not in security_params
or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
self.put_structured_out({"securityState": "UNSECURED"})
self.put_structured_out(
{"securityIssuesFound": "Keytab file or principal are not set property."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.yarn_user,
security_params['yarn-site']['yarn.resourcemanager.keytab'],
security_params['yarn-site']['yarn.resourcemanager.principal'],
status_params.hostname,
status_params.tmp_dir)
cached_kinit_executor(status_params.kinit_path_local,
status_params.yarn_user,
security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
else:
self.put_structured_out({"securityState": "UNSECURED"})
def refreshqueues(self, env):
import params
self.configure(env)
env.set_params(params)
service('resourcemanager',
action='refreshQueues'
)
def decommission(self, env):
import params
env.set_params(params)
rm_kinit_cmd = params.rm_kinit_cmd
yarn_user = params.yarn_user
conf_dir = params.hadoop_conf_dir
user_group = params.user_group
yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
File(params.exclude_file_path,
content=Template("exclude_hosts_list.j2"),
owner=yarn_user,
group=user_group
)
if params.update_exclude_file_only == False:
Execute(yarn_refresh_cmd,
environment= {'PATH' : params.execute_path },
user=yarn_user)
pass
pass
def wait_for_dfs_directories_created(self, *dirs):
import params
ignored_dfs_dirs = HdfsResourceProvider.get_ignored_resources_list(params.hdfs_resource_ignore_file)
if params.security_enabled:
Execute(params.rm_kinit_cmd,
user=params.yarn_user
)
Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
user=params.hdfs_user
)
for dir_path in dirs:
self.wait_for_dfs_directory_created(dir_path, ignored_dfs_dirs)
@retry(times=8, sleep_time=20, backoff_factor=1, err_class=Fail)
def wait_for_dfs_directory_created(self, dir_path, ignored_dfs_dirs):
import params
if not is_empty(dir_path):
dir_path = HdfsResourceProvider.parse_path(dir_path)
if dir_path in ignored_dfs_dirs:
Logger.info("Skipping DFS directory '" + dir_path + "' as it's marked to be ignored.")
return
Logger.info("Verifying if DFS directory '" + dir_path + "' exists.")
dir_exists = None
if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
# check with webhdfs is much faster than executing hdfs dfs -test
util = WebHDFSUtil(params.hdfs_site, params.hdfs_user, params.security_enabled)
list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
dir_exists = ('FileStatus' in list_status)
else:
# have to do time expensive hdfs dfs -d check.
dfs_ret_code = shell.call(format("hdfs --config {hadoop_conf_dir} dfs -test -d " + dir_path), user=params.yarn_user)[0]
dir_exists = not dfs_ret_code #dfs -test -d returns 0 in case the dir exists
if not dir_exists:
raise Fail("DFS directory '" + dir_path + "' does not exist !")
else:
Logger.info("DFS directory '" + dir_path + "' exists.")
def get_log_folder(self):
import params
return params.yarn_log_dir
def get_user(self):
import params
return params.yarn_user
def get_pid_files(self):
import status_params
return [status_params.resourcemanager_pid_file]
if __name__ == "__main__":
Resourcemanager().execute()
|
[
"[email protected]"
] | |
badf4d8ee3241875395d8cab7be4c5abe4aae39e
|
cbdef2e8ed259adc4653ade34db12d8bcc0cea9f
|
/dominion/cards/Card_Capital_City.py
|
8fd93365fb68c55deab0a00b7b76f4a9f427e947
|
[] |
no_license
|
dwagon/pydominion
|
8dd5afef8ec89c63ade74c4ae6c7473cd676799f
|
545709f0a41529de74f33aa83b106c456900fa5b
|
refs/heads/main
| 2023-08-29T10:02:26.652032 | 2023-08-23T02:25:00 | 2023-08-23T02:25:00 | 18,776,204 | 1 | 0 | null | 2023-08-23T02:25:02 | 2014-04-14T20:49:28 |
Python
|
UTF-8
|
Python
| false | false | 3,191 |
py
|
#!/usr/bin/env python
import unittest
from dominion import Game, Card, Piles
###############################################################################
class Card_Capital_City(Card.Card):
def __init__(self):
Card.Card.__init__(self)
self.cardtype = Card.CardType.ACTION
self.base = Card.CardExpansion.ALLIES
self.cards = 1
self.actions = 2
self.name = "Capital City"
self.desc = """+1 Card; +2 Actions; You may discard 2 cards for +$2.;
You may pay $2 for +2 Cards."""
self.cost = 5
def special(self, game, player):
ch1 = player.plr_choose_options(
"Discard 2 cards to gain $2 coin?",
("Do nothing", False),
("Discard 2 Cards", True),
)
if ch1:
discard = player.plr_discard_cards(num=2)
if len(discard) == 2:
player.coins.add(2)
if player.coins.get() >= 2:
ch2 = player.plr_choose_options(
"Pay $2 to gain 2 cards?",
("Do nothing", False),
("Gain 2 Cards", True),
)
if ch2:
player.coins.add(-2)
player.pickup_cards(2)
###############################################################################
class Test_Capital_City(unittest.TestCase):
def setUp(self):
self.g = Game.TestGame(numplayers=1, initcards=["Capital City"])
self.g.start_game()
self.plr = self.g.player_list()[0]
self.card = self.g["Capital City"].remove()
def test_play(self):
"""Play the card"""
self.plr.piles[Piles.HAND].set("Copper", "Copper", "Estate", "Duchy")
self.plr.piles[Piles.DECK].set("Gold", "Silver", "Copper", "Copper")
self.plr.add_card(self.card, Piles.HAND)
self.plr.test_input = [
"Discard",
"Discard Estate",
"Discard Duchy",
"Finish",
"Gain",
]
self.plr.play_card(self.card)
self.assertEqual(self.plr.coins.get(), 0)
self.assertEqual(self.plr.piles[Piles.HAND].size(), 4 + 1 - 2 + 2)
self.assertNotIn("Duchy", self.plr.piles[Piles.HAND])
self.assertIn("Silver", self.plr.piles[Piles.HAND])
def test_play_no_pickup(self):
"""Play the card but don't pickup new cards"""
self.plr.piles[Piles.HAND].set("Copper", "Copper", "Estate", "Duchy")
self.plr.piles[Piles.DECK].set("Gold", "Silver", "Copper", "Copper")
self.plr.add_card(self.card, Piles.HAND)
self.plr.test_input = [
"Discard",
"Discard Estate",
"Discard Duchy",
"Finish",
"nothing",
]
self.plr.play_card(self.card)
self.assertEqual(self.plr.coins.get(), 2)
self.assertEqual(self.plr.piles[Piles.HAND].size(), 4 + 1 - 2)
self.assertNotIn("Duchy", self.plr.piles[Piles.HAND])
self.assertNotIn("Silver", self.plr.piles[Piles.HAND])
###############################################################################
if __name__ == "__main__": # pragma: no cover
unittest.main()
# EOF
|
[
"[email protected]"
] | |
408b989cffaf6b1aff9bdfb917de8fa0987870c2
|
bbeba16730eca05a897e46e771b8e9dc2a61e044
|
/testflows/_core/contrib/pygments/lexers/basic.py
|
f7359403d676c6fc69eb4c7737770aa0243ffaac
|
[
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
testflows/TestFlows-Core
|
47d3e5b8890fcf73024c91f4ea293363c29f422b
|
7dd2d3af19f6930257bd53133286edb78bf490ab
|
refs/heads/master
| 2023-08-16T15:42:08.888323 | 2023-08-15T11:35:09 | 2023-08-15T11:35:09 | 215,418,320 | 5 | 4 |
NOASSERTION
| 2023-04-26T19:28:55 | 2019-10-15T23:59:26 |
Python
|
UTF-8
|
Python
| false | false | 27,648 |
py
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.basic
~~~~~~~~~~~~~~~~~~~~~
Lexers for BASIC like languages (other than VB.net).
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from testflows._core.contrib.pygments.lexer import RegexLexer, bygroups, default, words, include
from testflows._core.contrib.pygments.token import Comment, Error, Keyword, Name, Number, \
Punctuation, Operator, String, Text, Whitespace
from testflows._core.contrib.pygments.lexers import _vbscript_builtins
__all__ = ['BlitzBasicLexer', 'BlitzMaxLexer', 'MonkeyLexer', 'CbmBasicV2Lexer',
'QBasicLexer', 'VBScriptLexer', 'BBCBasicLexer']
class BlitzMaxLexer(RegexLexer):
"""
For `BlitzMax <http://blitzbasic.com>`_ source code.
.. versionadded:: 1.4
"""
name = 'BlitzMax'
aliases = ['blitzmax', 'bmax']
filenames = ['*.bmx']
mimetypes = ['text/x-bmx']
bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b'
bmax_sktypes = r'@{1,2}|[!#$%]'
bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b'
bmax_name = r'[a-z_]\w*'
bmax_var = (r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)'
r'|([ \t]*)(:)([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)') % \
(bmax_name, bmax_sktypes, bmax_lktypes, bmax_name)
bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
(r'\.\.\n', Text), # Line continuation
# Comments
(r"'.*?\n", Comment.Single),
(r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]*(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' %
(bmax_vopwords), Operator),
(r'[(),.:\[\]]', Punctuation),
(r'(?:#[\w \t]*)', Name.Label),
(r'(?:\?[\w \t]*)', Comment.Preproc),
# Identifiers
(r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Punctuation, Name.Class)),
(r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' %
(bmax_name, bmax_name),
bygroups(Keyword.Reserved, Text, Keyword.Namespace)),
(bmax_func, bygroups(Name.Function, Text, Keyword.Type,
Operator, Text, Punctuation, Text,
Keyword.Type, Name.Class, Text,
Keyword.Type, Text, Punctuation)),
(bmax_var, bygroups(Name.Variable, Text, Keyword.Type, Operator,
Text, Punctuation, Text, Keyword.Type,
Name.Class, Text, Keyword.Type)),
(r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Ptr)\b', Keyword.Type),
(r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field)\b', Keyword.Declaration),
(words((
'TNullMethodException', 'TNullFunctionException',
'TNullObjectException', 'TArrayBoundsException',
'TRuntimeException'), prefix=r'\b', suffix=r'\b'), Name.Exception),
(words((
'Strict', 'SuperStrict', 'Module', 'ModuleInfo',
'End', 'Return', 'Continue', 'Exit', 'Public', 'Private',
'Var', 'VarPtr', 'Chr', 'Len', 'Asc', 'SizeOf', 'Sgn', 'Abs', 'Min', 'Max',
'New', 'Release', 'Delete', 'Incbin', 'IncbinPtr', 'IncbinLen',
'Framework', 'Include', 'Import', 'Extern', 'EndExtern',
'Function', 'EndFunction', 'Type', 'EndType', 'Extends', 'Method', 'EndMethod',
'Abstract', 'Final', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'EachIn', 'While', 'Wend', 'EndWhile',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default', 'EndSelect',
'Try', 'Catch', 'EndTry', 'Throw', 'Assert', 'Goto', 'DefData', 'ReadData',
'RestoreData'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
(r'(%s)' % (bmax_name), Name.Variable),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class BlitzBasicLexer(RegexLexer):
"""
For `BlitzBasic <http://blitzbasic.com>`_ source code.
.. versionadded:: 2.0
"""
name = 'BlitzBasic'
aliases = ['blitzbasic', 'b3d', 'bplus']
filenames = ['*.bb', '*.decls']
mimetypes = ['text/x-bb']
bb_sktypes = r'@{1,2}|[#$%]'
bb_name = r'[a-z]\w*'
bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \
(bb_name, bb_sktypes, bb_name)
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
# Comments
(r";.*?\n", Comment.Single),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(words(('Shl', 'Shr', 'Sar', 'Mod', 'Or', 'And', 'Not',
'Abs', 'Sgn', 'Handle', 'Int', 'Float', 'Str',
'First', 'Last', 'Before', 'After'),
prefix=r'\b', suffix=r'\b'),
Operator),
(r'([+\-*/~=<>^])', Operator),
(r'[(),:\[\]\\]', Punctuation),
(r'\.([ \t]*)(%s)' % bb_name, Name.Label),
# Identifiers
(r'\b(New)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Label)),
(r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name),
bygroups(Operator, Text, Punctuation, Text, Name.Class)),
(r'\b%s\b([ \t]*)(\()' % bb_var,
bygroups(Name.Function, Text, Keyword.Type, Text, Punctuation,
Text, Name.Class, Text, Punctuation)),
(r'\b(Function)\b([ \t]+)%s' % bb_var,
bygroups(Keyword.Reserved, Text, Name.Function, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
(r'\b(Type)([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Pi|True|False|Null)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration),
(words((
'End', 'Return', 'Exit', 'Chr', 'Len', 'Asc', 'New', 'Delete', 'Insert',
'Include', 'Function', 'Type', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'Each', 'While', 'Wend',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default',
'Goto', 'Gosub', 'Data', 'Read', 'Restore'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
# (r'(%s)' % (bb_name), Name.Variable),
(bb_var, bygroups(Name.Variable, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class MonkeyLexer(RegexLexer):
"""
For
`Monkey <https://en.wikipedia.org/wiki/Monkey_(programming_language)>`_
source code.
.. versionadded:: 1.6
"""
name = 'Monkey'
aliases = ['monkey']
filenames = ['*.monkey']
mimetypes = ['text/x-monkey']
name_variable = r'[a-z_]\w*'
name_function = r'[A-Z]\w*'
name_constant = r'[A-Z_][A-Z0-9_]*'
name_class = r'[A-Z]\w*'
name_module = r'[a-z0-9_]*'
keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)'
# ? == Bool // % == Int // # == Float // $ == String
keyword_type_special = r'[?%#$]'
flags = re.MULTILINE
tokens = {
'root': [
# Text
(r'\s+', Text),
# Comments
(r"'.*", Comment),
(r'(?i)^#rem\b', Comment.Multiline, 'comment'),
# preprocessor directives
(r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc),
# preprocessor variable (any line starting with '#' that is not a directive)
(r'^#', Comment.Preproc, 'variables'),
# String
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-fA-Z]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Native data types
(r'\b%s\b' % keyword_type, Keyword.Type),
# Exception handling
(r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved),
(r'Throwable', Name.Exception),
# Builtins
(r'(?i)\b(?:Null|True|False)\b', Name.Builtin),
(r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo),
(r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant),
# Keywords
(r'(?i)^(Import)(\s+)(.*)(\n)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text)),
(r'(?i)^Strict\b.*\n', Keyword.Reserved),
(r'(?i)(Const|Local|Global|Field)(\s+)',
bygroups(Keyword.Declaration, Text), 'variables'),
(r'(?i)(New|Class|Interface|Extends|Implements)(\s+)',
bygroups(Keyword.Reserved, Text), 'classname'),
(r'(?i)(Function|Method)(\s+)',
bygroups(Keyword.Reserved, Text), 'funcname'),
(r'(?i)(?:End|Return|Public|Private|Extern|Property|'
r'Final|Abstract)\b', Keyword.Reserved),
# Flow Control stuff
(r'(?i)(?:If|Then|Else|ElseIf|EndIf|'
r'Select|Case|Default|'
r'While|Wend|'
r'Repeat|Until|Forever|'
r'For|To|Until|Step|EachIn|Next|'
r'Exit|Continue)\s+', Keyword.Reserved),
# not used yet
(r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved),
# Array
(r'[\[\]]', Punctuation),
# Other
(r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator),
(r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word),
(r'[(){}!#,.:]', Punctuation),
# catch the rest
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_function, Name.Function),
(r'%s\b' % name_variable, Name.Variable),
],
'funcname': [
(r'(?i)%s\b' % name_function, Name.Function),
(r':', Punctuation, 'classname'),
(r'\s+', Text),
(r'\(', Punctuation, 'variables'),
(r'\)', Punctuation, '#pop')
],
'classname': [
(r'%s\.' % name_module, Name.Namespace),
(r'%s\b' % keyword_type, Keyword.Type),
(r'%s\b' % name_class, Name.Class),
# array (of given size)
(r'(\[)(\s*)(\d*)(\s*)(\])',
bygroups(Punctuation, Text, Number.Integer, Text, Punctuation)),
# generics
(r'\s+(?!<)', Text, '#pop'),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
default('#pop')
],
'variables': [
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_variable, Name.Variable),
(r'%s' % keyword_type_special, Keyword.Type),
(r'\s+', Text),
(r':', Punctuation, 'classname'),
(r',', Punctuation, '#push'),
default('#pop')
],
'string': [
(r'[^"~]+', String.Double),
(r'~q|~n|~r|~t|~z|~~', String.Escape),
(r'"', String.Double, '#pop'),
],
'comment': [
(r'(?i)^#rem.*?', Comment.Multiline, "#push"),
(r'(?i)^#end.*?', Comment.Multiline, "#pop"),
(r'\n', Comment.Multiline),
(r'.+', Comment.Multiline),
],
}
class CbmBasicV2Lexer(RegexLexer):
"""
For CBM BASIC V2 sources.
.. versionadded:: 1.6
"""
name = 'CBM BASIC V2'
aliases = ['cbmbas']
filenames = ['*.bas']
flags = re.IGNORECASE
tokens = {
'root': [
(r'rem.*\n', Comment.Single),
(r'\s+', Text),
(r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont'
r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?'
r'|list|clr|cmd|open|close|get#?', Keyword.Reserved),
(r'data|restore|dim|let|def|fn', Keyword.Declaration),
(r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn'
r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin),
(r'[-+*/^<>=]', Operator),
(r'not|and|or', Operator.Word),
(r'"[^"\n]*.', String),
(r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float),
(r'[(),:;]', Punctuation),
(r'\w+[$%]?', Name),
]
}
def analyse_text(self, text):
# if it starts with a line number, it shouldn't be a "modern" Basic
# like VB.net
if re.match(r'\d+', text):
return 0.2
class QBasicLexer(RegexLexer):
"""
For
`QBasic <http://en.wikipedia.org/wiki/QBasic>`_
source code.
.. versionadded:: 2.0
"""
name = 'QBasic'
aliases = ['qbasic', 'basic']
filenames = ['*.BAS', '*.bas']
mimetypes = ['text/basic']
declarations = ('DATA', 'LET')
functions = (
'ABS', 'ASC', 'ATN', 'CDBL', 'CHR$', 'CINT', 'CLNG',
'COMMAND$', 'COS', 'CSNG', 'CSRLIN', 'CVD', 'CVDMBF', 'CVI',
'CVL', 'CVS', 'CVSMBF', 'DATE$', 'ENVIRON$', 'EOF', 'ERDEV',
'ERDEV$', 'ERL', 'ERR', 'EXP', 'FILEATTR', 'FIX', 'FRE',
'FREEFILE', 'HEX$', 'INKEY$', 'INP', 'INPUT$', 'INSTR', 'INT',
'IOCTL$', 'LBOUND', 'LCASE$', 'LEFT$', 'LEN', 'LOC', 'LOF',
'LOG', 'LPOS', 'LTRIM$', 'MID$', 'MKD$', 'MKDMBF$', 'MKI$',
'MKL$', 'MKS$', 'MKSMBF$', 'OCT$', 'PEEK', 'PEN', 'PLAY',
'PMAP', 'POINT', 'POS', 'RIGHT$', 'RND', 'RTRIM$', 'SADD',
'SCREEN', 'SEEK', 'SETMEM', 'SGN', 'SIN', 'SPACE$', 'SPC',
'SQR', 'STICK', 'STR$', 'STRIG', 'STRING$', 'TAB', 'TAN',
'TIME$', 'TIMER', 'UBOUND', 'UCASE$', 'VAL', 'VARPTR',
'VARPTR$', 'VARSEG'
)
metacommands = ('$DYNAMIC', '$INCLUDE', '$STATIC')
operators = ('AND', 'EQV', 'IMP', 'NOT', 'OR', 'XOR')
statements = (
'BEEP', 'BLOAD', 'BSAVE', 'CALL', 'CALL ABSOLUTE',
'CALL INTERRUPT', 'CALLS', 'CHAIN', 'CHDIR', 'CIRCLE', 'CLEAR',
'CLOSE', 'CLS', 'COLOR', 'COM', 'COMMON', 'CONST', 'DATA',
'DATE$', 'DECLARE', 'DEF FN', 'DEF SEG', 'DEFDBL', 'DEFINT',
'DEFLNG', 'DEFSNG', 'DEFSTR', 'DEF', 'DIM', 'DO', 'LOOP',
'DRAW', 'END', 'ENVIRON', 'ERASE', 'ERROR', 'EXIT', 'FIELD',
'FILES', 'FOR', 'NEXT', 'FUNCTION', 'GET', 'GOSUB', 'GOTO',
'IF', 'THEN', 'INPUT', 'INPUT #', 'IOCTL', 'KEY', 'KEY',
'KILL', 'LET', 'LINE', 'LINE INPUT', 'LINE INPUT #', 'LOCATE',
'LOCK', 'UNLOCK', 'LPRINT', 'LSET', 'MID$', 'MKDIR', 'NAME',
'ON COM', 'ON ERROR', 'ON KEY', 'ON PEN', 'ON PLAY',
'ON STRIG', 'ON TIMER', 'ON UEVENT', 'ON', 'OPEN', 'OPEN COM',
'OPTION BASE', 'OUT', 'PAINT', 'PALETTE', 'PCOPY', 'PEN',
'PLAY', 'POKE', 'PRESET', 'PRINT', 'PRINT #', 'PRINT USING',
'PSET', 'PUT', 'PUT', 'RANDOMIZE', 'READ', 'REDIM', 'REM',
'RESET', 'RESTORE', 'RESUME', 'RETURN', 'RMDIR', 'RSET', 'RUN',
'SCREEN', 'SEEK', 'SELECT CASE', 'SHARED', 'SHELL', 'SLEEP',
'SOUND', 'STATIC', 'STOP', 'STRIG', 'SUB', 'SWAP', 'SYSTEM',
'TIME$', 'TIMER', 'TROFF', 'TRON', 'TYPE', 'UEVENT', 'UNLOCK',
'VIEW', 'WAIT', 'WHILE', 'WEND', 'WIDTH', 'WINDOW', 'WRITE'
)
keywords = (
'ACCESS', 'ALIAS', 'ANY', 'APPEND', 'AS', 'BASE', 'BINARY',
'BYVAL', 'CASE', 'CDECL', 'DOUBLE', 'ELSE', 'ELSEIF', 'ENDIF',
'INTEGER', 'IS', 'LIST', 'LOCAL', 'LONG', 'LOOP', 'MOD',
'NEXT', 'OFF', 'ON', 'OUTPUT', 'RANDOM', 'SIGNAL', 'SINGLE',
'STEP', 'STRING', 'THEN', 'TO', 'UNTIL', 'USING', 'WEND'
)
tokens = {
'root': [
(r'\n+', Text),
(r'\s+', Text.Whitespace),
(r'^(\s*)(\d*)(\s*)(REM .*)$',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace,
Comment.Single)),
(r'^(\s*)(\d+)(\s*)',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace)),
(r'(?=[\s]*)(\w+)(?=[\s]*=)', Name.Variable.Global),
(r'(?=[^"]*)\'.*$', Comment.Single),
(r'"[^\n"]*"', String.Double),
(r'(END)(\s+)(FUNCTION|IF|SELECT|SUB)',
bygroups(Keyword.Reserved, Text.Whitespace, Keyword.Reserved)),
(r'(DECLARE)(\s+)([A-Z]+)(\s+)(\S+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name)),
(r'(DIM)(\s+)(SHARED)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name.Variable.Global)),
(r'(DIM)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable.Global)),
(r'^(\s*)([a-zA-Z_]+)(\s*)(\=)',
bygroups(Text.Whitespace, Name.Variable.Global, Text.Whitespace,
Operator)),
(r'(GOTO|GOSUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
(r'(SUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
include('declarations'),
include('functions'),
include('metacommands'),
include('operators'),
include('statements'),
include('keywords'),
(r'[a-zA-Z_]\w*[$@#&!]', Name.Variable.Global),
(r'[a-zA-Z_]\w*\:', Name.Label),
(r'\-?\d*\.\d+[@|#]?', Number.Float),
(r'\-?\d+[@|#]', Number.Float),
(r'\-?\d+#?', Number.Integer.Long),
(r'\-?\d+#?', Number.Integer),
(r'!=|==|:=|\.=|<<|>>|[-~+/\\*%=<>&^|?:!.]', Operator),
(r'[\[\]{}(),;]', Punctuation),
(r'[\w]+', Name.Variable.Global),
],
# can't use regular \b because of X$()
# XXX: use words() here
'declarations': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, declarations)),
Keyword.Declaration),
],
'functions': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, functions)),
Keyword.Reserved),
],
'metacommands': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, metacommands)),
Keyword.Constant),
],
'operators': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, operators)), Operator.Word),
],
'statements': [
(r'\b(%s)\b' % '|'.join(map(re.escape, statements)),
Keyword.Reserved),
],
'keywords': [
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
],
}
def analyse_text(text):
if '$DYNAMIC' in text or '$STATIC' in text:
return 0.9
class VBScriptLexer(RegexLexer):
"""
VBScript is scripting language that is modeled on Visual Basic.
.. versionadded:: 2.4
"""
name = 'VBScript'
aliases = ['vbscript']
filenames = ['*.vbs', '*.VBS']
flags = re.IGNORECASE
tokens = {
'root': [
(r"'[^\n]*", Comment.Single),
(r'\s+', Whitespace),
('"', String.Double, 'string'),
('&h[0-9a-f]+', Number.Hex),
# Float variant 1, for example: 1., 1.e2, 1.2e3
(r'[0-9]+\.[0-9]*(e[+-]?[0-9]+)?', Number.Float),
(r'\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), # Float variant 2, for example: .1, .1e2
(r'[0-9]+e[+-]?[0-9]+', Number.Float), # Float variant 3, for example: 123e45
(r'\d+', Number.Integer),
('#.+#', String), # date or time value
(r'(dim)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Variable), 'dim_more'),
(r'(function|sub)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Function)),
(r'(class)(\s+)([a-z_][a-z0-9_]*)', bygroups(Keyword.Declaration, Whitespace, Name.Class)),
(r'(const)(\s+)([a-z_][a-z0-9_]*)', bygroups(Keyword.Declaration, Whitespace, Name.Constant)),
(r'(end)(\s+)(class|function|if|property|sub|with)', bygroups(Keyword, Whitespace, Keyword)),
(r'(on)(\s+)(error)(\s+)(goto)(\s+)(0)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Number.Integer)),
(r'(on)(\s+)(error)(\s+)(resume)(\s+)(next)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Keyword)),
(r'(option)(\s+)(explicit)', bygroups(Keyword, Whitespace, Keyword)),
(r'(property)(\s+)(get|let|set)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Whitespace, Name.Property)),
(r'rem\s.*[^\n]*', Comment.Single),
(words(_vbscript_builtins.KEYWORDS, suffix=r'\b'), Keyword),
(words(_vbscript_builtins.OPERATORS), Operator),
(words(_vbscript_builtins.OPERATOR_WORDS, suffix=r'\b'), Operator.Word),
(words(_vbscript_builtins.BUILTIN_CONSTANTS, suffix=r'\b'), Name.Constant),
(words(_vbscript_builtins.BUILTIN_FUNCTIONS, suffix=r'\b'), Name.Builtin),
(words(_vbscript_builtins.BUILTIN_VARIABLES, suffix=r'\b'), Name.Builtin),
(r'[a-z_][a-z0-9_]*', Name),
(r'\b_\n', Operator),
(words(r'(),.:'), Punctuation),
(r'.+(\n)?', Error)
],
'dim_more': [
(r'(\s*)(,)(\s*)([a-z_][a-z0-9]*)', bygroups(Whitespace, Punctuation, Whitespace, Name.Variable)),
default('#pop'),
],
'string': [
(r'[^"\n]+', String.Double),
(r'\"\"', String.Double),
(r'"', String.Double, '#pop'),
(r'\n', Error, '#pop'), # Unterminated string
],
}
class BBCBasicLexer(RegexLexer):
"""
BBC Basic was supplied on the BBC Micro, and later Acorn RISC OS.
It is also used by BBC Basic For Windows.
.. versionadded:: 2.4
"""
base_keywords = ['OTHERWISE', 'AND', 'DIV', 'EOR', 'MOD', 'OR', 'ERROR',
'LINE', 'OFF', 'STEP', 'SPC', 'TAB', 'ELSE', 'THEN',
'OPENIN', 'PTR', 'PAGE', 'TIME', 'LOMEM', 'HIMEM', 'ABS',
'ACS', 'ADVAL', 'ASC', 'ASN', 'ATN', 'BGET', 'COS', 'COUNT',
'DEG', 'ERL', 'ERR', 'EVAL', 'EXP', 'EXT', 'FALSE', 'FN',
'GET', 'INKEY', 'INSTR', 'INT', 'LEN', 'LN', 'LOG', 'NOT',
'OPENUP', 'OPENOUT', 'PI', 'POINT', 'POS', 'RAD', 'RND',
'SGN', 'SIN', 'SQR', 'TAN', 'TO', 'TRUE', 'USR', 'VAL',
'VPOS', 'CHR$', 'GET$', 'INKEY$', 'LEFT$', 'MID$',
'RIGHT$', 'STR$', 'STRING$', 'EOF', 'PTR', 'PAGE', 'TIME',
'LOMEM', 'HIMEM', 'SOUND', 'BPUT', 'CALL', 'CHAIN', 'CLEAR',
'CLOSE', 'CLG', 'CLS', 'DATA', 'DEF', 'DIM', 'DRAW', 'END',
'ENDPROC', 'ENVELOPE', 'FOR', 'GOSUB', 'GOTO', 'GCOL', 'IF',
'INPUT', 'LET', 'LOCAL', 'MODE', 'MOVE', 'NEXT', 'ON',
'VDU', 'PLOT', 'PRINT', 'PROC', 'READ', 'REM', 'REPEAT',
'REPORT', 'RESTORE', 'RETURN', 'RUN', 'STOP', 'COLOUR',
'TRACE', 'UNTIL', 'WIDTH', 'OSCLI']
basic5_keywords = ['WHEN', 'OF', 'ENDCASE', 'ENDIF', 'ENDWHILE', 'CASE',
'CIRCLE', 'FILL', 'ORIGIN', 'POINT', 'RECTANGLE', 'SWAP',
'WHILE', 'WAIT', 'MOUSE', 'QUIT', 'SYS', 'INSTALL',
'LIBRARY', 'TINT', 'ELLIPSE', 'BEATS', 'TEMPO', 'VOICES',
'VOICE', 'STEREO', 'OVERLAY', 'APPEND', 'AUTO', 'CRUNCH',
'DELETE', 'EDIT', 'HELP', 'LIST', 'LOAD', 'LVAR', 'NEW',
'OLD', 'RENUMBER', 'SAVE', 'TEXTLOAD', 'TEXTSAVE',
'TWIN', 'TWINO', 'INSTALL', 'SUM', 'BEAT']
name = 'BBC Basic'
aliases = ['bbcbasic']
filenames = ['*.bbc']
tokens = {
'root': [
(r"[0-9]+", Name.Label),
(r"(\*)([^\n]*)",
bygroups(Keyword.Pseudo, Comment.Special)),
(r"", Whitespace, 'code'),
],
'code': [
(r"(REM)([^\n]*)",
bygroups(Keyword.Declaration, Comment.Single)),
(r'\n', Whitespace, 'root'),
(r'\s+', Whitespace),
(r':', Comment.Preproc),
# Some special cases to make functions come out nicer
(r'(DEF)(\s*)(FN|PROC)([A-Za-z_@][\w@]*)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Name.Function)),
(r'(FN|PROC)([A-Za-z_@][\w@]*)',
bygroups(Keyword, Name.Function)),
(r'(GOTO|GOSUB|THEN|RESTORE)(\s*)(\d+)',
bygroups(Keyword, Whitespace, Name.Label)),
(r'(TRUE|FALSE)', Keyword.Constant),
(r'(PAGE|LOMEM|HIMEM|TIME|WIDTH|ERL|ERR|REPORT\$|POS|VPOS|VOICES)', Keyword.Pseudo),
(words(base_keywords), Keyword),
(words(basic5_keywords), Keyword),
('"', String.Double, 'string'),
('%[01]{1,32}', Number.Bin),
('&[0-9a-f]{1,8}', Number.Hex),
(r'[+-]?[0-9]+\.[0-9]*(E[+-]?[0-9]+)?', Number.Float),
(r'[+-]?\.[0-9]+(E[+-]?[0-9]+)?', Number.Float),
(r'[+-]?[0-9]+E[+-]?[0-9]+', Number.Float),
(r'[+-]?\d+', Number.Integer),
(r'([A-Za-z_@][\w@]*[%$]?)', Name.Variable),
(r'([+\-]=|[$!|?+\-*/%^=><();]|>=|<=|<>|<<|>>|>>>|,)', Operator),
],
'string': [
(r'[^"\n]+', String.Double),
(r'"', String.Double, '#pop'),
(r'\n', Error, 'root'), # Unterminated string
],
}
def analyse_text(text):
if text.startswith('10REM >') or text.startswith('REM >'):
return 0.9
|
[
"[email protected]"
] | |
31426e105b712e14f2356ac3431be2d91963cd4c
|
2ef009eaa4cc0a6a6d1aee6794f43d8409c99711
|
/python和linux高级编程阶段/05-多任务-协程/04-协程-greenlet.py
|
7df153bb2e4c8366df336493e6ef2e926ae83870
|
[] |
no_license
|
vkhaibao/PyProject
|
6ae833cef09d7000af00dd7c842d2db29a1cd0cc
|
2a733b34f337d4497051660f473a4cfb977fc15b
|
refs/heads/master
| 2022-11-22T07:41:22.630002 | 2019-07-29T01:17:17 | 2019-07-29T01:17:17 | 173,897,429 | 1 | 3 | null | 2020-07-22T23:14:22 | 2019-03-05T07:32:41 |
Python
|
UTF-8
|
Python
| false | false | 489 |
py
|
# coding=utf8
from greenlet import greenlet
import time
import os
def test1():
while True:
print("%s" % os.getpid())
print("%s" % os.getppid())
print("---A--")
gr2.switch()
time.sleep(0.5)
def test2():
while True:
print("%s" % os.getpid())
print("%s" % os.getppid())
print("---B--")
gr1.switch()
time.sleep(0.5)
gr1 = greenlet(test1)
gr2 = greenlet(test2)
# 切换到gr1中运行
gr1.switch()
|
[
"[email protected]"
] | |
eed197eb1c8885c234bee2ca408f919a4654981f
|
43ae032297b492fbdf2df478588d2367f59d0b6b
|
/3 - Types/3.3 - InbuiltTypes-DictionarySetArray/10-dictions-methods-setdefault.py
|
9a824b12c8c37a47ca2b3db55ee9176d4dbd8697
|
[] |
no_license
|
thippeswamydm/python
|
59fa4dbb2899894de5481cb1dd4716040733c378
|
db03b49eb531e75b9f738cf77399a9813d16166b
|
refs/heads/master
| 2020-07-05T06:57:18.575099 | 2019-10-23T04:30:27 | 2019-10-23T04:30:27 | 202,562,414 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 434 |
py
|
# Describes the assigning, working, and method usages of dictionaries
obj = {'name': 'Ganesh', 'age': 5}
# Add a key value pair
if 'color' not in obj:
obj['color'] = 'light-brown'
obj = {'name': 'Ganesh', 'age': 5}
# Using setdefault function
obj.setdefault('color', 'light-brown')
# 'light-brown'
print(obj)
# {'color': 'light-brown', 'age': 5, 'name': 'Ganesh'}
obj.setdefault('color', 'white')
# 'light-brown'
print(obj)
|
[
"[email protected]"
] | |
e74bd9c8df9754782584f12a29542e54c31d5b05
|
210ecd63113ce90c5f09bc2b09db3e80ff98117a
|
/AbletonLive9_RemoteScripts/_Framework/ControlSurfaceComponent.py
|
f03b0d3222874fbb3521aac557d4e52d63bdf4e6
|
[] |
no_license
|
ajasver/MidiScripts
|
86a765b8568657633305541c46ccc1fd1ea34501
|
f727a2e63c95a9c5e980a0738deb0049363ba536
|
refs/heads/master
| 2021-01-13T02:03:55.078132 | 2015-07-16T18:27:30 | 2015-07-16T18:27:30 | 38,516,112 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,835 |
py
|
#Embedded file name: /Users/versonator/Jenkins/live/Binary/Core_Release_64_static/midi-remote-scripts/_Framework/ControlSurfaceComponent.py
from __future__ import absolute_import
import Live
from . import Task
from .Control import ControlManager
from .Dependency import dependency, depends
from .SubjectSlot import Subject
from .Util import lazy_attribute
class ControlSurfaceComponent(ControlManager, Subject):
"""
Base class for all classes encapsulating functions in Live
"""
name = ''
canonical_parent = None
is_private = False
_show_msg_callback = dependency(show_message=None)
_has_task_group = False
_layer = None
@depends(register_component=None, song=None)
def __init__(self, name = '', register_component = None, song = None, layer = None, is_enabled = True, is_root = False, *a, **k):
if not callable(register_component):
raise AssertionError
super(ControlSurfaceComponent, self).__init__(*a, **k)
self.name = name
raise layer is None or not is_enabled or AssertionError
self._explicit_is_enabled = is_enabled
self._recursive_is_enabled = True
self._is_enabled = self._explicit_is_enabled
self._is_root = is_root
self._allow_updates = True
self._update_requests = 0
self._song = song
self._layer = layer is not None and layer
register_component(self)
def disconnect(self):
if self._has_task_group:
self._tasks.kill()
self._tasks.clear()
super(ControlSurfaceComponent, self).disconnect()
@property
def is_root(self):
return self._is_root
def _internal_on_enabled_changed(self):
if self._layer:
if self.is_enabled():
grabbed = self._layer.grab(self)
if not grabbed:
raise AssertionError, 'Only one component can use a layer at atime'
else:
self._layer.release(self)
if self._has_task_group:
self.is_enabled() and self._tasks.resume()
else:
self._tasks.pause()
def on_enabled_changed(self):
self.update()
def update_all(self):
self.update()
def set_enabled(self, enable):
self._explicit_is_enabled = bool(enable)
self._update_is_enabled()
def _set_enabled_recursive(self, enable):
self._recursive_is_enabled = bool(enable)
self._update_is_enabled()
def _update_is_enabled(self):
if self._recursive_is_enabled:
is_enabled = self._explicit_is_enabled
self._is_enabled = is_enabled != self._is_enabled and is_enabled
self._internal_on_enabled_changed()
self.on_enabled_changed()
def set_allow_update(self, allow_updates):
allow = bool(allow_updates)
if self._allow_updates != allow:
self._allow_updates = allow
if self._allow_updates and self._update_requests > 0:
self._update_requests = 0
self.update()
def control_notifications_enabled(self):
return self.is_enabled()
def application(self):
return Live.Application.get_application()
def song(self):
return self._song
@lazy_attribute
@depends(parent_task_group=None)
def _tasks(self, parent_task_group = None):
tasks = parent_task_group.add(Task.TaskGroup())
if not self._is_enabled:
tasks.pause()
self._has_task_group = True
return tasks
def _get_layer(self):
return self._layer
def _set_layer(self, new_layer):
if self._layer != new_layer:
self._layer and self._layer.release(self)
self._layer = new_layer
if new_layer and self.is_enabled():
grabbed = new_layer.grab(self)
if not grabbed:
raise AssertionError, 'Only one component can use a layer at atime'
layer = property(_get_layer, _set_layer)
def is_enabled(self, explicit = False):
"""
Returns whether the component is enabled.
If 'explicit' is True the parent state is ignored.
"""
return self._is_enabled if not explicit else self._explicit_is_enabled
def on_track_list_changed(self):
"""
Called by the control surface if tracks are added/removed,
to be overridden
"""
pass
def on_scene_list_changed(self):
"""
Called by the control surface if scenes are added/removed, to
be overridden
"""
pass
def on_selected_track_changed(self):
"""
Called by the control surface when a track is selected, to be
overridden
"""
pass
def on_selected_scene_changed(self):
"""
Called by the control surface when a scene is selected, to be
overridden
"""
pass
@depends(parent_task_group=None)
def _register_timer_callback(self, callback, parent_task_group = None):
"""
DEPRECATED. Use tasks instead
"""
raise callable(callback) or AssertionError
raise parent_task_group.find(callback) is None or AssertionError
def wrapper(delta):
callback()
return Task.RUNNING
parent_task_group.add(Task.FuncTask(wrapper, callback))
@depends(parent_task_group=None)
def _unregister_timer_callback(self, callback, parent_task_group = None):
"""
DEPRECATED. Use tasks instead
"""
raise callable(callback) or AssertionError
task = parent_task_group.find(callback)
raise task is not None or AssertionError
parent_task_group.remove(task)
|
[
"[email protected]"
] | |
c860c4759252bb1e15565c399acfdcac7a7de65c
|
f3d80a90a6688aa05c11e488e0eb52a53bc5c713
|
/python/ccxt/async_support/bitpanda.py
|
e3fb53ae49e50088eced4b1a000427f326c064b7
|
[
"MIT"
] |
permissive
|
woododoCode/ccxt
|
1da09329b53fabed9d1628b16bd027ab4f5668b3
|
51d23b4c54b132fe381aea4b88ea2d1da2a2a6f8
|
refs/heads/master
| 2023-04-15T05:51:12.056281 | 2021-04-22T18:31:34 | 2021-04-22T18:31:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 74,047 |
py
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
class bitpanda(Exchange):
def describe(self):
return self.deep_extend(super(bitpanda, self).describe(), {
'id': 'bitpanda',
'name': 'Bitpanda Pro',
'countries': ['AT'], # Austria
'rateLimit': 300,
'version': 'v1',
# new metainfo interface
'has': {
'CORS': False,
'publicAPI': True,
'privateAPI': True,
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': True,
'createDepositAddress': True,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDeposits': True,
'fetchDepositAddress': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderTrades': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFees': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1/MINUTES',
'5m': '5/MINUTES',
'15m': '15/MINUTES',
'30m': '30/MINUTES',
'1h': '1/HOURS',
'4h': '4/HOURS',
'1d': '1/DAYS',
'1w': '1/WEEKS',
'1M': '1/MONTHS',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87591171-9a377d80-c6f0-11ea-94ac-97a126eac3bc.jpg',
'api': {
'public': 'https://api.exchange.bitpanda.com/public',
'private': 'https://api.exchange.bitpanda.com/public',
},
'www': 'https://www.bitpanda.com/en/pro',
'doc': [
'https://developers.bitpanda.com/exchange/',
],
'fees': 'https://www.bitpanda.com/en/pro/fees',
},
'api': {
'public': {
'get': [
'currencies',
'candlesticks/{instrument_code}',
'fees',
'instruments',
'order-book/{instrument_code}',
'market-ticker',
'market-ticker/{instrument_code}',
'price-ticks/{instrument_code}',
'time',
],
},
'private': {
'get': [
'account/balances',
'account/deposit/crypto/{currency_code}',
'account/deposit/fiat/EUR',
'account/deposits',
'account/deposits/bitpanda',
'account/withdrawals',
'account/withdrawals/bitpanda',
'account/fees',
'account/orders',
'account/orders/{order_id}',
'account/orders/{order_id}/trades',
'account/trades',
'account/trades/{trade_id}',
'account/trading-volume',
],
'post': [
'account/deposit/crypto',
'account/withdraw/crypto',
'account/withdraw/fiat',
'account/fees',
'account/orders',
],
'delete': [
'account/orders',
'account/orders/{order_id}',
'account/orders/client/{client_id}',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.15 / 100,
'maker': 0.10 / 100,
'tiers': [
# volume in BTC
{
'taker': [
[0, 0.15 / 100],
[100, 0.13 / 100],
[250, 0.13 / 100],
[1000, 0.1 / 100],
[5000, 0.09 / 100],
[10000, 0.075 / 100],
[20000, 0.065 / 100],
],
'maker': [
[0, 0.1 / 100],
[100, 0.1 / 100],
[250, 0.09 / 100],
[1000, 0.075 / 100],
[5000, 0.06 / 100],
[10000, 0.05 / 100],
[20000, 0.05 / 100],
],
},
],
},
},
'requiredCredentials': {
'apiKey': True,
'secret': False,
},
'exceptions': {
'exact': {
'INVALID_CLIENT_UUID': InvalidOrder,
'ORDER_NOT_FOUND': OrderNotFound,
'ONLY_ONE_ERC20_ADDRESS_ALLOWED': InvalidAddress,
'DEPOSIT_ADDRESS_NOT_USED': InvalidAddress,
'INVALID_CREDENTIALS': AuthenticationError,
'MISSING_CREDENTIALS': AuthenticationError,
'INVALID_APIKEY': AuthenticationError,
'INVALID_SCOPES': AuthenticationError,
'INVALID_SUBJECT': AuthenticationError,
'INVALID_ISSUER': AuthenticationError,
'INVALID_AUDIENCE': AuthenticationError,
'INVALID_DEVICE_ID': AuthenticationError,
'INVALID_IP_RESTRICTION': AuthenticationError,
'APIKEY_REVOKED': AuthenticationError,
'APIKEY_EXPIRED': AuthenticationError,
'SYNCHRONIZER_TOKEN_MISMATCH': AuthenticationError,
'SESSION_EXPIRED': AuthenticationError,
'INTERNAL_ERROR': AuthenticationError,
'CLIENT_IP_BLOCKED': PermissionDenied,
'MISSING_PERMISSION': PermissionDenied,
'ILLEGAL_CHARS': BadRequest,
'UNSUPPORTED_MEDIA_TYPE': BadRequest,
'ACCOUNT_HISTORY_TIME_RANGE_TOO_BIG': BadRequest,
'CANDLESTICKS_TIME_RANGE_TOO_BIG': BadRequest,
'INVALID_INSTRUMENT_CODE': BadRequest,
'INVALID_ORDER_TYPE': BadRequest,
'INVALID_UNIT': BadRequest,
'INVALID_PERIOD': BadRequest,
'INVALID_TIME': BadRequest,
'INVALID_DATE': BadRequest,
'INVALID_CURRENCY': BadRequest,
'INVALID_AMOUNT': BadRequest,
'INVALID_PRICE': BadRequest,
'INVALID_LIMIT': BadRequest,
'INVALID_QUERY': BadRequest,
'INVALID_CURSOR': BadRequest,
'INVALID_ACCOUNT_ID': BadRequest,
'INVALID_SIDE': InvalidOrder,
'INVALID_ACCOUNT_HISTORY_FROM_TIME': BadRequest,
'INVALID_ACCOUNT_HISTORY_MAX_PAGE_SIZE': BadRequest,
'INVALID_ACCOUNT_HISTORY_TIME_PERIOD': BadRequest,
'INVALID_ACCOUNT_HISTORY_TO_TIME': BadRequest,
'INVALID_CANDLESTICKS_GRANULARITY': BadRequest,
'INVALID_CANDLESTICKS_UNIT': BadRequest,
'INVALID_ORDER_BOOK_DEPTH': BadRequest,
'INVALID_ORDER_BOOK_LEVEL': BadRequest,
'INVALID_PAGE_CURSOR': BadRequest,
'INVALID_TIME_RANGE': BadRequest,
'INVALID_TRADE_ID': BadRequest,
'INVALID_UI_ACCOUNT_SETTINGS': BadRequest,
'NEGATIVE_AMOUNT': InvalidOrder,
'NEGATIVE_PRICE': InvalidOrder,
'MIN_SIZE_NOT_SATISFIED': InvalidOrder,
'BAD_AMOUNT_PRECISION': InvalidOrder,
'BAD_PRICE_PRECISION': InvalidOrder,
'BAD_TRIGGER_PRICE_PRECISION': InvalidOrder,
'MAX_OPEN_ORDERS_EXCEEDED': BadRequest,
'MISSING_PRICE': InvalidOrder,
'MISSING_ORDER_TYPE': InvalidOrder,
'MISSING_SIDE': InvalidOrder,
'MISSING_CANDLESTICKS_PERIOD_PARAM': ArgumentsRequired,
'MISSING_CANDLESTICKS_UNIT_PARAM': ArgumentsRequired,
'MISSING_FROM_PARAM': ArgumentsRequired,
'MISSING_INSTRUMENT_CODE': ArgumentsRequired,
'MISSING_ORDER_ID': InvalidOrder,
'MISSING_TO_PARAM': ArgumentsRequired,
'MISSING_TRADE_ID': ArgumentsRequired,
'INVALID_ORDER_ID': OrderNotFound,
'NOT_FOUND': OrderNotFound,
'INSUFFICIENT_LIQUIDITY': InsufficientFunds,
'INSUFFICIENT_FUNDS': InsufficientFunds,
'NO_TRADING': ExchangeNotAvailable,
'SERVICE_UNAVAILABLE': ExchangeNotAvailable,
'GATEWAY_TIMEOUT': ExchangeNotAvailable,
'RATELIMIT': DDoSProtection,
'CF_RATELIMIT': DDoSProtection,
'INTERNAL_SERVER_ERROR': ExchangeError,
},
'broad': {
},
},
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
# exchange-specific options
'options': {
'fetchTradingFees': {
'method': 'fetchPrivateTradingFees', # or 'fetchPublicTradingFees'
},
'fiat': ['EUR', 'CHF'],
},
})
async def fetch_time(self, params={}):
response = await self.publicGetTime(params)
#
# {
# iso: '2020-07-10T05:17:26.716Z',
# epoch_millis: 1594358246716,
# }
#
return self.safe_integer(response, 'epoch_millis')
async def fetch_currencies(self, params={}):
response = await self.publicGetCurrencies(params)
#
# [
# {
# "code":"BEST",
# "precision":8
# }
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'code')
code = self.safe_currency_code(id)
result[code] = {
'id': id,
'code': code,
'name': None,
'info': currency, # the original payload
'active': None,
'fee': None,
'precision': self.safe_integer(currency, 'precision'),
'limits': {
'amount': {'min': None, 'max': None},
'withdraw': {'min': None, 'max': None},
},
}
return result
async def fetch_markets(self, params={}):
response = await self.publicGetInstruments(params)
#
# [
# {
# state: 'ACTIVE',
# base: {code: 'ETH', precision: 8},
# quote: {code: 'CHF', precision: 2},
# amount_precision: 4,
# market_precision: 2,
# min_size: '10.0'
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
baseAsset = self.safe_value(market, 'base', {})
quoteAsset = self.safe_value(market, 'quote', {})
baseId = self.safe_string(baseAsset, 'code')
quoteId = self.safe_string(quoteAsset, 'code')
id = baseId + '_' + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'amount_precision'),
'price': self.safe_integer(market, 'market_precision'),
}
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_size'),
'max': None,
},
}
state = self.safe_string(market, 'state')
active = (state == 'ACTIVE')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'precision': precision,
'limits': limits,
'info': market,
'active': active,
})
return result
async def fetch_trading_fees(self, params={}):
method = self.safe_string(params, 'method')
params = self.omit(params, 'method')
if method is None:
options = self.safe_value(self.options, 'fetchTradingFees', {})
method = self.safe_string(options, 'method', 'fetchPrivateTradingFees')
return await getattr(self, method)(params)
async def fetch_public_trading_fees(self, params={}):
await self.load_markets()
response = await self.publicGetFees(params)
#
# [
# {
# "fee_group_id":"default",
# "display_text":"The standard fee plan.",
# "fee_tiers":[
# {"volume":"0.0","fee_group_id":"default","maker_fee":"0.1","taker_fee":"0.15"},
# {"volume":"100.0","fee_group_id":"default","maker_fee":"0.1","taker_fee":"0.13"},
# {"volume":"250.0","fee_group_id":"default","maker_fee":"0.09","taker_fee":"0.13"},
# {"volume":"1000.0","fee_group_id":"default","maker_fee":"0.075","taker_fee":"0.1"},
# {"volume":"5000.0","fee_group_id":"default","maker_fee":"0.06","taker_fee":"0.09"},
# {"volume":"10000.0","fee_group_id":"default","maker_fee":"0.05","taker_fee":"0.075"},
# {"volume":"20000.0","fee_group_id":"default","maker_fee":"0.05","taker_fee":"0.065"}
# ],
# "fee_discount_rate":"25.0",
# "minimum_price_value":"0.12"
# }
# ]
#
feeGroupsById = self.index_by(response, 'fee_group_id')
feeGroupId = self.safe_value(self.options, 'fee_group_id', 'default')
feeGroup = self.safe_value(feeGroupsById, feeGroupId, {})
feeTiers = self.safe_value(feeGroup, 'fee_tiers')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
fee = {
'info': feeGroup,
'symbol': symbol,
'maker': None,
'taker': None,
'percentage': True,
'tierBased': True,
}
takerFees = []
makerFees = []
for i in range(0, len(feeTiers)):
tier = feeTiers[i]
volume = self.safe_number(tier, 'volume')
taker = self.safe_number(tier, 'taker_fee')
maker = self.safe_number(tier, 'maker_fee')
taker /= 100
maker /= 100
takerFees.append([volume, taker])
makerFees.append([volume, maker])
if i == 0:
fee['taker'] = taker
fee['maker'] = maker
tiers = {
'taker': takerFees,
'maker': makerFees,
}
fee['tiers'] = tiers
result[symbol] = fee
return result
async def fetch_private_trading_fees(self, params={}):
await self.load_markets()
response = await self.privateGetAccountFees(params)
#
# {
# "account_id": "ed524d00-820a-11e9-8f1e-69602df16d85",
# "running_trading_volume": "0.0",
# "fee_group_id": "default",
# "collect_fees_in_best": False,
# "fee_discount_rate": "25.0",
# "minimum_price_value": "0.12",
# "fee_tiers": [
# {"volume": "0.0", "fee_group_id": "default", "maker_fee": "0.1", "taker_fee": "0.1"},
# {"volume": "100.0", "fee_group_id": "default", "maker_fee": "0.09", "taker_fee": "0.1"},
# {"volume": "250.0", "fee_group_id": "default", "maker_fee": "0.08", "taker_fee": "0.1"},
# {"volume": "1000.0", "fee_group_id": "default", "maker_fee": "0.07", "taker_fee": "0.09"},
# {"volume": "5000.0", "fee_group_id": "default", "maker_fee": "0.06", "taker_fee": "0.08"},
# {"volume": "10000.0", "fee_group_id": "default", "maker_fee": "0.05", "taker_fee": "0.07"},
# {"volume": "20000.0", "fee_group_id": "default", "maker_fee": "0.05", "taker_fee": "0.06"},
# {"volume": "50000.0", "fee_group_id": "default", "maker_fee": "0.05", "taker_fee": "0.05"}
# ],
# "active_fee_tier": {"volume": "0.0", "fee_group_id": "default", "maker_fee": "0.1", "taker_fee": "0.1"}
# }
#
activeFeeTier = self.safe_value(response, 'active_fee_tier', {})
result = {
'info': response,
'maker': self.safe_number(activeFeeTier, 'maker_fee'),
'taker': self.safe_number(activeFeeTier, 'taker_fee'),
'percentage': True,
'tierBased': True,
}
feeTiers = self.safe_value(response, 'fee_tiers')
takerFees = []
makerFees = []
for i in range(0, len(feeTiers)):
tier = feeTiers[i]
volume = self.safe_number(tier, 'volume')
taker = self.safe_number(tier, 'taker_fee')
maker = self.safe_number(tier, 'maker_fee')
taker /= 100
maker /= 100
takerFees.append([volume, taker])
makerFees.append([volume, maker])
tiers = {
'taker': takerFees,
'maker': makerFees,
}
result['tiers'] = tiers
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker, fetchTickers
#
# {
# "instrument_code":"BTC_EUR",
# "sequence":602562,
# "time":"2020-07-10T06:27:34.951Z",
# "state":"ACTIVE",
# "is_frozen":0,
# "quote_volume":"1695555.1783768",
# "base_volume":"205.67436",
# "last_price":"8143.91",
# "best_bid":"8143.71",
# "best_ask":"8156.9",
# "price_change":"-147.47",
# "price_change_percentage":"-1.78",
# "high":"8337.45",
# "low":"8110.0"
# }
#
timestamp = self.parse8601(self.safe_string(ticker, 'time'))
marketId = self.safe_string(ticker, 'instrument_code')
symbol = self.safe_symbol(marketId, market, '_')
last = self.safe_number(ticker, 'last_price')
percentage = self.safe_number(ticker, 'price_change_percentage')
change = self.safe_number(ticker, 'price_change')
open = None
average = None
if (last is not None) and (change is not None):
open = last - change
average = self.sum(last, open) / 2
baseVolume = self.safe_number(ticker, 'base_volume')
quoteVolume = self.safe_number(ticker, 'quote_volume')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': self.safe_number(ticker, 'best_bid'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'best_ask'),
'askVolume': None,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'instrument_code': market['id'],
}
response = await self.publicGetMarketTickerInstrumentCode(self.extend(request, params))
#
# {
# "instrument_code":"BTC_EUR",
# "sequence":602562,
# "time":"2020-07-10T06:27:34.951Z",
# "state":"ACTIVE",
# "is_frozen":0,
# "quote_volume":"1695555.1783768",
# "base_volume":"205.67436",
# "last_price":"8143.91",
# "best_bid":"8143.71",
# "best_ask":"8156.9",
# "price_change":"-147.47",
# "price_change_percentage":"-1.78",
# "high":"8337.45",
# "low":"8110.0"
# }
#
return self.parse_ticker(response, market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetMarketTicker(params)
#
# [
# {
# "instrument_code":"BTC_EUR",
# "sequence":602562,
# "time":"2020-07-10T06:27:34.951Z",
# "state":"ACTIVE",
# "is_frozen":0,
# "quote_volume":"1695555.1783768",
# "base_volume":"205.67436",
# "last_price":"8143.91",
# "best_bid":"8143.71",
# "best_ask":"8156.9",
# "price_change":"-147.47",
# "price_change_percentage":"-1.78",
# "high":"8337.45",
# "low":"8110.0"
# }
# ]
#
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'instrument_code': self.market_id(symbol),
# level 1 means only the best bid and ask
# level 2 is a compiled order book up to market precision
# level 3 is a full orderbook
# if you wish to get regular updates about orderbooks please use the Websocket channel
# heavy usage of self endpoint may result in limited access according to rate limits rules
# 'level': 3, # default
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetOrderBookInstrumentCode(self.extend(request, params))
#
# level 1
#
# {
# "instrument_code":"BTC_EUR",
# "time":"2020-07-10T07:39:06.343Z",
# "asks":{
# "value":{
# "price":"8145.29",
# "amount":"0.96538",
# "number_of_orders":1
# }
# },
# "bids":{
# "value":{
# "price":"8134.0",
# "amount":"1.5978",
# "number_of_orders":5
# }
# }
# }
#
# level 2
#
# {
# "instrument_code":"BTC_EUR","time":"2020-07-10T07:36:43.538Z",
# "asks":[
# {"price":"8146.59","amount":"0.89691","number_of_orders":1},
# {"price":"8146.89","amount":"1.92062","number_of_orders":1},
# {"price":"8169.5","amount":"0.0663","number_of_orders":1},
# ],
# "bids":[
# {"price":"8143.49","amount":"0.01329","number_of_orders":1},
# {"price":"8137.01","amount":"5.34748","number_of_orders":1},
# {"price":"8137.0","amount":"2.0","number_of_orders":1},
# ]
# }
#
# level 3
#
# {
# "instrument_code":"BTC_EUR",
# "time":"2020-07-10T07:32:31.525Z",
# "bids":[
# {"price":"8146.79","amount":"0.01537","order_id":"5d717da1-a8f4-422d-afcc-03cb6ab66825"},
# {"price":"8139.32","amount":"3.66009","order_id":"d0715c68-f28d-4cf1-a450-d56cf650e11c"},
# {"price":"8137.51","amount":"2.61049","order_id":"085fd6f4-e835-4ca5-9449-a8f165772e60"},
# ],
# "asks":[
# {"price":"8153.49","amount":"0.93384","order_id":"755d3aa3-42b5-46fa-903d-98f42e9ae6c4"},
# {"price":"8153.79","amount":"1.80456","order_id":"62034cf3-b70d-45ff-b285-ba6307941e7c"},
# {"price":"8167.9","amount":"0.0018","order_id":"036354e0-71cd-492f-94f2-01f7d4b66422"},
# ]
# }
#
timestamp = self.parse8601(self.safe_string(response, 'time'))
return self.parse_order_book(response, timestamp, 'bids', 'asks', 'price', 'amount')
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "instrument_code":"BTC_EUR",
# "granularity":{"unit":"HOURS","period":1},
# "high":"9252.65",
# "low":"9115.27",
# "open":"9250.0",
# "close":"9132.35",
# "total_amount":"33.85924",
# "volume":"311958.9635744",
# "time":"2020-05-08T22:59:59.999Z",
# "last_sequence":461123
# }
#
granularity = self.safe_value(ohlcv, 'granularity')
unit = self.safe_string(granularity, 'unit')
period = self.safe_string(granularity, 'period')
units = {
'MINUTES': 'm',
'HOURS': 'h',
'DAYS': 'd',
'WEEKS': 'w',
'MONTHS': 'M',
}
lowercaseUnit = self.safe_string(units, unit)
timeframe = period + lowercaseUnit
durationInSeconds = self.parse_timeframe(timeframe)
duration = durationInSeconds * 1000
timestamp = self.parse8601(self.safe_string(ohlcv, 'time'))
alignedTimestamp = duration * int(timestamp / duration)
options = self.safe_value(self.options, 'fetchOHLCV', {})
volumeField = self.safe_string(options, 'volume', 'total_amount')
return [
alignedTimestamp,
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, volumeField),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
periodUnit = self.safe_string(self.timeframes, timeframe)
period, unit = periodUnit.split('/')
durationInSeconds = self.parse_timeframe(timeframe)
duration = durationInSeconds * 1000
if limit is None:
limit = 1500
request = {
'instrument_code': market['id'],
# 'from': self.iso8601(since),
# 'to': self.iso8601(self.milliseconds()),
'period': period,
'unit': unit,
}
if since is None:
now = self.milliseconds()
request['to'] = self.iso8601(now)
request['from'] = self.iso8601(now - limit * duration)
else:
request['from'] = self.iso8601(since)
request['to'] = self.iso8601(self.sum(since, limit * duration))
response = await self.publicGetCandlesticksInstrumentCode(self.extend(request, params))
#
# [
# {"instrument_code":"BTC_EUR","granularity":{"unit":"HOURS","period":1},"high":"9252.65","low":"9115.27","open":"9250.0","close":"9132.35","total_amount":"33.85924","volume":"311958.9635744","time":"2020-05-08T22:59:59.999Z","last_sequence":461123},
# {"instrument_code":"BTC_EUR","granularity":{"unit":"HOURS","period":1},"high":"9162.49","low":"9040.0","open":"9132.53","close":"9083.69","total_amount":"26.19685","volume":"238553.7812365","time":"2020-05-08T23:59:59.999Z","last_sequence":461376},
# {"instrument_code":"BTC_EUR","granularity":{"unit":"HOURS","period":1},"high":"9135.7","low":"9002.59","open":"9055.45","close":"9133.98","total_amount":"26.21919","volume":"238278.8724959","time":"2020-05-09T00:59:59.999Z","last_sequence":461521},
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "instrument_code":"BTC_EUR",
# "price":"8137.28",
# "amount":"0.22269",
# "taker_side":"BUY",
# "volume":"1812.0908832",
# "time":"2020-07-10T14:44:32.299Z",
# "trade_timestamp":1594392272299,
# "sequence":603047
# }
#
# fetchMyTrades, fetchOrder, fetchOpenOrders, fetchClosedOrders trades(private)
#
# {
# "fee": {
# "fee_amount": "0.0014",
# "fee_currency": "BTC",
# "fee_percentage": "0.1",
# "fee_group_id": "default",
# "fee_type": "TAKER",
# "running_trading_volume": "0.0"
# },
# "trade": {
# "trade_id": "fdff2bcc-37d6-4a2d-92a5-46e09c868664",
# "order_id": "36bb2437-7402-4794-bf26-4bdf03526439",
# "account_id": "a4c699f6-338d-4a26-941f-8f9853bfc4b9",
# "amount": "1.4",
# "side": "BUY",
# "instrument_code": "BTC_EUR",
# "price": "7341.4",
# "time": "2019-09-27T15:05:32.564Z",
# "sequence": 48670
# }
# }
#
feeInfo = self.safe_value(trade, 'fee', {})
trade = self.safe_value(trade, 'trade', trade)
timestamp = self.safe_integer(trade, 'trade_timestamp')
if timestamp is None:
timestamp = self.parse8601(self.safe_string(trade, 'time'))
side = self.safe_string_lower_2(trade, 'side', 'taker_side')
price = self.safe_number(trade, 'price')
amount = self.safe_number(trade, 'amount')
cost = self.safe_number(trade, 'volume')
if (cost is None) and (amount is not None) and (price is not None):
cost = amount * price
marketId = self.safe_string(trade, 'instrument_code')
symbol = self.safe_symbol(marketId, market, '_')
feeCost = self.safe_number(feeInfo, 'fee_amount')
takerOrMaker = None
fee = None
if feeCost is not None:
feeCurrencyId = self.safe_string(feeInfo, 'fee_currency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
feeRate = self.safe_number(feeInfo, 'fee_percentage')
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
'rate': feeRate,
}
takerOrMaker = self.safe_string_lower(feeInfo, 'fee_type')
return {
'id': self.safe_string_2(trade, 'trade_id', 'sequence'),
'order': self.safe_string(trade, 'order_id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'takerOrMaker': takerOrMaker,
'fee': fee,
'info': trade,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'instrument_code': market['id'],
# 'from': self.iso8601(since),
# 'to': self.iso8601(self.milliseconds()),
}
if since is not None:
# returns price ticks for a specific market with an interval of maximum of 4 hours
# sorted by latest first
request['from'] = self.iso8601(since)
request['to'] = self.iso8601(self.sum(since, 14400000))
response = await self.publicGetPriceTicksInstrumentCode(self.extend(request, params))
#
# [
# {
# "instrument_code":"BTC_EUR",
# "price":"8137.28",
# "amount":"0.22269",
# "taker_side":"BUY",
# "volume":"1812.0908832",
# "time":"2020-07-10T14:44:32.299Z",
# "trade_timestamp":1594392272299,
# "sequence":603047
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetAccountBalances(params)
#
# {
# "account_id":"4b95934f-55f1-460c-a525-bd5afc0cf071",
# "balances":[
# {
# "account_id":"4b95934f-55f1-460c-a525-bd5afc0cf071",
# "currency_code":"BTC",
# "change":"10.0",
# "available":"10.0",
# "locked":"0.0",
# "sequence":142135994,
# "time":"2020-07-01T10:57:32.959Z"
# }
# ]
# }
#
balances = self.safe_value(response, 'balances', [])
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency_code')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'locked')
result[code] = account
return self.parse_balance(result, False)
def parse_deposit_address(self, depositAddress, currency=None):
code = None
if currency is not None:
code = currency['code']
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string(depositAddress, 'destination_tag')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
async def create_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.privatePostAccountDepositCrypto(self.extend(request, params))
#
# {
# "address":"rBnNhk95FrdNisZtXcStzriFS8vEzz53DM",
# "destination_tag":"865690307",
# "enabled":true,
# "is_smart_contract":false
# }
#
return self.parse_deposit_address(response, currency)
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency_code': currency['id'],
}
response = await self.privateGetAccountDepositCryptoCurrencyCode(self.extend(request, params))
#
# {
# "address":"rBnNhk95FrdNisZtXcStzriFS8vEzz53DM",
# "destination_tag":"865690307",
# "enabled":true,
# "is_smart_contract":false,
# "can_create_more":false
# }
#
return self.parse_deposit_address(response, currency)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'cursor': 'string', # pointer specifying the position from which the next pages should be returned
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency_code'] = currency['id']
if limit is not None:
request['max_page_size'] = limit
if since is not None:
to = self.safe_string(params, 'to')
if to is None:
raise ArgumentsRequired(self.id + ' fetchDeposits() requires a "to" iso8601 string param with the since argument is specified')
request['from'] = self.iso8601(since)
response = await self.privateGetAccountDeposits(self.extend(request, params))
#
# {
# "deposit_history": [
# {
# "transaction_id": "e5342efcd-d5b7-4a56-8e12-b69ffd68c5ef",
# "account_id": "c2d0076a-c20d-41f8-9e9a-1a1d028b2b58",
# "amount": "100",
# "type": "CRYPTO",
# "funds_source": "INTERNAL",
# "time": "2020-04-22T09:57:47Z",
# "currency": "BTC",
# "fee_amount": "0.0",
# "fee_currency": "BTC"
# },
# {
# "transaction_id": "79793d00-2899-4a4d-95b7-73ae6b31384f",
# "account_id": "c2d0076a-c20d-41f8-9e9a-1a1d028b2b58",
# "time": "2020-05-05T11:22:07.925Z",
# "currency": "EUR",
# "funds_source": "EXTERNAL",
# "type": "FIAT",
# "amount": "50.0",
# "fee_amount": "0.01",
# "fee_currency": "EUR"
# }
# ],
# "max_page_size": 2,
# "cursor": "eyJhY2NvdW50X2lkIjp7InMiOiJlMzY5YWM4MC00NTc3LTExZTktYWUwOC05YmVkYzQ3OTBiODQiLCJzcyI6W10sIm5zIjpbXSwiYnMiOltdLCJtIjp7fSwibCI6W119LCJpdGVtX2tleSI6eyJzIjoiV0lUSERSQVdBTDo6MmFlMjYwY2ItOTk3MC00YmNiLTgxNmEtZGY4MDVmY2VhZTY1Iiwic3MiOltdLCJucyI6W10sImJzIjpbXSwibSI6e30sImwiOltdfSwiZ2xvYmFsX3dpdGhkcmF3YWxfaW5kZXhfaGFzaF9rZXkiOnsicyI6ImUzNjlhYzgwLTQ1NzctMTFlOS1hZTA4LTliZWRjNDc5MGI4NCIsInNzIjpbXSwibnMiOltdLCJicyI6W10sIm0iOnt9LCJsIjpbXX0sInRpbWVzdGFtcCI6eyJuIjoiMTU4ODA1ODc2Nzk0OCIsInNzIjpbXSwibnMiOltdLCJicyI6W10sIm0iOnt9LCJsIjpbXX19"
# }
#
depositHistory = self.safe_value(response, 'deposit_history', [])
return self.parse_transactions(depositHistory, currency, since, limit, {'type': 'deposit'})
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'cursor': 'string', # pointer specifying the position from which the next pages should be returned
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency_code'] = currency['id']
if limit is not None:
request['max_page_size'] = limit
if since is not None:
to = self.safe_string(params, 'to')
if to is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals() requires a "to" iso8601 string param with the since argument is specified')
request['from'] = self.iso8601(since)
response = await self.privateGetAccountWithdrawals(self.extend(request, params))
#
# {
# "withdrawal_history": [
# {
# "account_id": "e369ac80-4577-11e9-ae08-9bedc4790b84",
# "amount": "0.1",
# "currency": "BTC",
# "fee_amount": "0.00002",
# "fee_currency": "BTC",
# "funds_source": "EXTERNAL",
# "related_transaction_id": "e298341a-3855-405e-bce3-92db368a3157",
# "time": "2020-05-05T11:11:32.110Z",
# "transaction_id": "6693ff40-bb10-4dcf-ada7-3b287727c882",
# "type": "CRYPTO"
# },
# {
# "account_id": "e369ac80-4577-11e9-ae08-9bedc4790b84",
# "amount": "0.1",
# "currency": "BTC",
# "fee_amount": "0.0",
# "fee_currency": "BTC",
# "funds_source": "INTERNAL",
# "time": "2020-05-05T10:29:53.464Z",
# "transaction_id": "ec9703b1-954b-4f76-adea-faac66eabc0b",
# "type": "CRYPTO"
# }
# ],
# "cursor": "eyJhY2NvdW50X2lkIjp7InMiOiJlMzY5YWM4MC00NTc3LTExZTktYWUwOC05YmVkYzQ3OTBiODQiLCJzcyI6W10sIm5zIjpbXSwiYnMiOltdLCJtIjp7fSwibCI6W119LCJpdGVtX2tleSI6eyJzIjoiV0lUSERSQVdBTDo6ZWM5NzAzYjEtOTU0Yi00Zjc2LWFkZWEtZmFhYzY2ZWFiYzBiIiwic3MiOltdLCJucyI6W10sImJzIjpbXSwibSI6e30sImwiOltdfSwiZ2xvYmFsX3dpdGhkcmF3YWxfaW5kZXhfaGFzaF9rZXkiOnsicyI6ImUzNjlhYzgwLTQ1NzctMTFlOS1hZTA4LTliZWRjNDc5MGI4NCIsInNzIjpbXSwibnMiOltdLCJicyI6W10sIm0iOnt9LCJsIjpbXX0sInRpbWVzdGFtcCI6eyJuIjoiMTU4ODY3NDU5MzQ2NCIsInNzIjpbXSwibnMiOltdLCJicyI6W10sIm0iOnt9LCJsIjpbXX19",
# "max_page_size": 2
# }
#
withdrawalHistory = self.safe_value(response, 'withdrawal_history', [])
return self.parse_transactions(withdrawalHistory, currency, since, limit, {'type': 'withdrawal'})
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'currency': code,
'amount': self.currency_to_precision(code, amount),
# 'payout_account_id': '66756a10-3e86-48f4-9678-b634c4b135b2', # fiat only
# 'recipient': { # crypto only
# 'address': address,
# # 'destination_tag': '',
# },
}
options = self.safe_value(self.options, 'fiat', [])
isFiat = self.in_array(code, options)
method = 'privatePostAccountWithdrawFiat' if isFiat else 'privatePostAccountWithdrawCrypto'
if isFiat:
payoutAccountId = self.safe_string(params, 'payout_account_id')
if payoutAccountId is None:
raise ArgumentsRequired(self.id + ' withdraw() requires a payout_account_id param for fiat ' + code + ' withdrawals')
else:
recipient = {'address': address}
if tag is not None:
recipient['destination_tag'] = tag
request['recipient'] = recipient
response = await getattr(self, method)(self.extend(request, params))
#
# crypto
#
# {
# "amount": "1234.5678",
# "fee": "1234.5678",
# "recipient": "3NacQ7rzZdhfyAtfJ5a11k8jFPdcMP2Bq7",
# "destination_tag": "",
# "transaction_id": "d0f8529f-f832-4e6a-9dc5-b8d5797badb2"
# }
#
# fiat
#
# {
# "transaction_id": "54236cd0-4413-11e9-93fb-5fea7e5b5df6"
# }
#
return self.parse_transaction(response, currency)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits, fetchWithdrawals
#
# {
# "transaction_id": "C2b42efcd-d5b7-4a56-8e12-b69ffd68c5ef",
# "type": "FIAT",
# "account_id": "c2d0076a-c20d-41f8-9e9a-1a1d028b2b58",
# "amount": "1234.5678",
# "time": "2019-08-24T14:15:22Z",
# "funds_source": "INTERNAL",
# "currency": "BTC",
# "fee_amount": "1234.5678",
# "fee_currency": "BTC",
# "blockchain_transaction_id": "f4184fc596403b9d638783cf57adfe4c75c605f6356fbc91338530e9831e9e16",
# "related_transaction_id": "e298341a-3855-405e-bce3-92db368a3157"
# }
#
# withdraw
#
#
# crypto
#
# {
# "amount": "1234.5678",
# "fee": "1234.5678",
# "recipient": "3NacQ7rzZdhfyAtfJ5a11k8jFPdcMP2Bq7",
# "destination_tag": "",
# "transaction_id": "d0f8529f-f832-4e6a-9dc5-b8d5797badb2"
# }
#
# fiat
#
# {
# "transaction_id": "54236cd0-4413-11e9-93fb-5fea7e5b5df6"
# }
#
id = self.safe_string(transaction, 'transaction_id')
amount = self.safe_number(transaction, 'amount')
timestamp = self.parse8601(self.safe_string(transaction, 'time'))
currencyId = self.safe_string(transaction, 'currency')
currency = self.safe_currency(currencyId, currency)
status = 'ok' # the exchange returns cleared transactions only
feeCost = self.safe_number_2(transaction, 'fee_amount', 'fee')
fee = None
addressTo = self.safe_string(transaction, 'recipient')
tagTo = self.safe_string(transaction, 'destination_tag')
if feeCost is not None:
feeCurrencyId = self.safe_string(transaction, 'fee_currency', currencyId)
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return {
'info': transaction,
'id': id,
'currency': currency['code'],
'amount': amount,
'address': addressTo,
'addressFrom': None,
'addressTo': addressTo,
'tag': tagTo,
'tagFrom': None,
'tagTo': tagTo,
'status': status,
'type': None,
'updated': None,
'txid': self.safe_string(transaction, 'blockchain_transaction_id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def parse_order_status(self, status):
statuses = {
'FILLED': 'open',
'FILLED_FULLY': 'closed',
'FILLED_CLOSED': 'canceled',
'FILLED_REJECTED': 'rejected',
'OPEN': 'open',
'REJECTED': 'rejected',
'CLOSED': 'canceled',
'FAILED': 'failed',
'STOP_TRIGGERED': 'triggered',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "order_id": "d5492c24-2995-4c18-993a-5b8bf8fffc0d",
# "client_id": "d75fb03b-b599-49e9-b926-3f0b6d103206",
# "account_id": "a4c699f6-338d-4a26-941f-8f9853bfc4b9",
# "instrument_code": "BTC_EUR",
# "time": "2019-08-01T08:00:44.026Z",
# "side": "BUY",
# "price": "5000",
# "amount": "1",
# "filled_amount": "0.5",
# "type": "LIMIT",
# "time_in_force": "GOOD_TILL_CANCELLED"
# }
#
# fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# {
# "order": {
# "order_id": "66756a10-3e86-48f4-9678-b634c4b135b2",
# "account_id": "1eb2ad5d-55f1-40b5-bc92-7dc05869e905",
# "instrument_code": "BTC_EUR",
# "amount": "1234.5678",
# "filled_amount": "1234.5678",
# "side": "BUY",
# "type": "LIMIT",
# "status": "OPEN",
# "sequence": 123456789,
# "price": "1234.5678",
# "average_price": "1234.5678",
# "reason": "INSUFFICIENT_FUNDS",
# "time": "2019-08-24T14:15:22Z",
# "time_in_force": "GOOD_TILL_CANCELLED",
# "time_last_updated": "2019-08-24T14:15:22Z",
# "expire_after": "2019-08-24T14:15:22Z",
# "is_post_only": False,
# "time_triggered": "2019-08-24T14:15:22Z",
# "trigger_price": "1234.5678"
# },
# "trades": [
# {
# "fee": {
# "fee_amount": "0.0014",
# "fee_currency": "BTC",
# "fee_percentage": "0.1",
# "fee_group_id": "default",
# "fee_type": "TAKER",
# "running_trading_volume": "0.0"
# },
# "trade": {
# "trade_id": "fdff2bcc-37d6-4a2d-92a5-46e09c868664",
# "order_id": "36bb2437-7402-4794-bf26-4bdf03526439",
# "account_id": "a4c699f6-338d-4a26-941f-8f9853bfc4b9",
# "amount": "1.4",
# "side": "BUY",
# "instrument_code": "BTC_EUR",
# "price": "7341.4",
# "time": "2019-09-27T15:05:32.564Z",
# "sequence": 48670
# }
# }
# ]
# }
#
rawOrder = self.safe_value(order, 'order', order)
id = self.safe_string(rawOrder, 'order_id')
clientOrderId = self.safe_string(rawOrder, 'client_id')
timestamp = self.parse8601(self.safe_string(rawOrder, 'time'))
rawStatus = self.parse_order_status(self.safe_string(rawOrder, 'status'))
status = self.parse_order_status(rawStatus)
marketId = self.safe_string(rawOrder, 'instrument_code')
symbol = self.safe_symbol(marketId, market, '_')
price = self.safe_number(rawOrder, 'price')
amount = self.safe_number(rawOrder, 'amount')
filledString = self.safe_string(rawOrder, 'filled_amount')
filled = self.parse_number(filledString)
side = self.safe_string_lower(rawOrder, 'side')
type = self.safe_string_lower(rawOrder, 'type')
timeInForce = self.parse_time_in_force(self.safe_string(rawOrder, 'time_in_force'))
stopPrice = self.safe_number(rawOrder, 'trigger_price')
postOnly = self.safe_value(rawOrder, 'is_post_only')
rawTrades = self.safe_value(order, 'trades', [])
trades = self.parse_trades(rawTrades, market, None, None, {
'type': type,
})
return self.safe_order({
'id': id,
'clientOrderId': clientOrderId,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': None,
'average': None,
'filled': filled,
'remaining': None,
'status': status,
# 'fee': None,
'trades': trades,
})
def parse_time_in_force(self, timeInForce):
timeInForces = {
'GOOD_TILL_CANCELLED': 'GTC',
'GOOD_TILL_TIME': 'GTT',
'IMMEDIATE_OR_CANCELLED': 'IOC',
'FILL_OR_KILL': 'FOK',
}
return self.safe_string(timeInForces, timeInForce, timeInForce)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
uppercaseType = type.upper()
request = {
'instrument_code': market['id'],
'type': uppercaseType, # LIMIT, MARKET, STOP
'side': side.upper(), # or SELL
'amount': self.amount_to_precision(symbol, amount),
# "price": "1234.5678", # required for LIMIT and STOP orders
# "client_id": "d75fb03b-b599-49e9-b926-3f0b6d103206", # optional
# "time_in_force": "GOOD_TILL_CANCELLED", # limit orders only, GOOD_TILL_CANCELLED, GOOD_TILL_TIME, IMMEDIATE_OR_CANCELLED and FILL_OR_KILL
# "expire_after": "2020-07-02T19:40:13Z", # required for GOOD_TILL_TIME
# "is_post_only": False, # limit orders only, optional
# "trigger_price": "1234.5678" # required for stop orders
}
priceIsRequired = False
if uppercaseType == 'LIMIT' or uppercaseType == 'STOP':
priceIsRequired = True
if uppercaseType == 'STOP':
triggerPrice = self.safe_number(params, 'trigger_price')
if triggerPrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a trigger_price param for ' + type + ' orders')
request['trigger_price'] = self.price_to_precision(symbol, triggerPrice)
params = self.omit(params, 'trigger_price')
if priceIsRequired:
request['price'] = self.price_to_precision(symbol, price)
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_id')
if clientOrderId is not None:
request['client_id'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'client_id'])
response = await self.privatePostAccountOrders(self.extend(request, params))
#
# {
# "order_id": "d5492c24-2995-4c18-993a-5b8bf8fffc0d",
# "client_id": "d75fb03b-b599-49e9-b926-3f0b6d103206",
# "account_id": "a4c699f6-338d-4a26-941f-8f9853bfc4b9",
# "instrument_code": "BTC_EUR",
# "time": "2019-08-01T08:00:44.026Z",
# "side": "BUY",
# "price": "5000",
# "amount": "1",
# "filled_amount": "0.5",
# "type": "LIMIT",
# "time_in_force": "GOOD_TILL_CANCELLED"
# }
#
return self.parse_order(response, market)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_id')
params = self.omit(params, ['clientOrderId', 'client_id'])
method = 'privateDeleteAccountOrdersOrderId'
request = {}
if clientOrderId is not None:
method = 'privateDeleteAccountOrdersClientClientId'
request['client_id'] = clientOrderId
else:
request['order_id'] = id
response = await getattr(self, method)(self.extend(request, params))
#
# responds with an empty body
#
return response
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
request = {}
if symbol is not None:
market = self.market(symbol)
request['instrument_code'] = market['id']
response = await self.privateDeleteAccountOrders(self.extend(request, params))
#
# [
# "a10e9bd1-8f72-4cfe-9f1b-7f1c8a9bd8ee"
# ]
#
return response
async def cancel_orders(self, ids, symbol=None, params={}):
await self.load_markets()
request = {
'ids': ','.join(ids),
}
response = await self.privateDeleteAccountOrders(self.extend(request, params))
#
# [
# "a10e9bd1-8f72-4cfe-9f1b-7f1c8a9bd8ee"
# ]
#
return response
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'order_id': id,
}
response = await self.privateGetAccountOrdersOrderId(self.extend(request, params))
#
# {
# "order": {
# "order_id": "36bb2437-7402-4794-bf26-4bdf03526439",
# "account_id": "a4c699f6-338d-4a26-941f-8f9853bfc4b9",
# "time_last_updated": "2019-09-27T15:05:35.096Z",
# "sequence": 48782,
# "price": "7349.2",
# "filled_amount": "100.0",
# "status": "FILLED_FULLY",
# "amount": "100.0",
# "instrument_code": "BTC_EUR",
# "side": "BUY",
# "time": "2019-09-27T15:05:32.063Z",
# "type": "MARKET"
# },
# "trades": [
# {
# "fee": {
# "fee_amount": "0.0014",
# "fee_currency": "BTC",
# "fee_percentage": "0.1",
# "fee_group_id": "default",
# "fee_type": "TAKER",
# "running_trading_volume": "0.0"
# },
# "trade": {
# "trade_id": "fdff2bcc-37d6-4a2d-92a5-46e09c868664",
# "order_id": "36bb2437-7402-4794-bf26-4bdf03526439",
# "account_id": "a4c699f6-338d-4a26-941f-8f9853bfc4b9",
# "amount": "1.4",
# "side": "BUY",
# "instrument_code": "BTC_EUR",
# "price": "7341.4",
# "time": "2019-09-27T15:05:32.564Z",
# "sequence": 48670
# }
# }
# ]
# }
#
return self.parse_order(response)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'from': self.iso8601(since),
# 'to': self.iso8601(self.milliseconds()), # max range is 100 days
# 'instrument_code': market['id'],
# 'with_cancelled_and_rejected': False, # default is False, orders which have been cancelled by the user before being filled or rejected by the system as invalid, additionally, all inactive filled orders which would return with "with_just_filled_inactive"
# 'with_just_filled_inactive': False, # orders which have been filled and are no longer open, use of "with_cancelled_and_rejected" extends "with_just_filled_inactive" and in case both are specified the latter is ignored
# 'with_just_orders': False, # do not return any trades corresponsing to the orders, it may be significanly faster and should be used if user is not interesting in trade information
# 'max_page_size': 100,
# 'cursor': 'string', # pointer specifying the position from which the next pages should be returned
}
market = None
if symbol is not None:
market = self.market(symbol)
request['instrument_code'] = market['id']
if since is not None:
to = self.safe_string(params, 'to')
if to is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a "to" iso8601 string param with the since argument is specified, max range is 100 days')
request['from'] = self.iso8601(since)
if limit is not None:
request['max_page_size'] = limit
response = await self.privateGetAccountOrders(self.extend(request, params))
#
# {
# "order_history": [
# {
# "order": {
# "trigger_price": "12089.88",
# "order_id": "d453ca12-c650-46dd-9dee-66910d96bfc0",
# "account_id": "ef3a5f4c-cfcd-415e-ba89-5a9abf47b28a",
# "instrument_code": "BTC_USDT",
# "time": "2019-08-23T10:02:31.663Z",
# "side": "SELL",
# "price": "10159.76",
# "average_price": "10159.76",
# "amount": "0.2",
# "filled_amount": "0.2",
# "type": "STOP",
# "sequence": 8,
# "status": "FILLED_FULLY"
# },
# "trades": [
# {
# "fee": {
# "fee_amount": "0.4188869",
# "fee_currency": "USDT",
# "fee_percentage": "0.1",
# "fee_group_id": "default",
# "fee_type": "TAKER",
# "running_trading_volume": "0.0"
# },
# "trade": {
# "trade_id": "ec82896f-fd1b-4cbb-89df-a9da85ccbb4b",
# "order_id": "d453ca12-c650-46dd-9dee-66910d96bfc0",
# "account_id": "ef3a5f4c-cfcd-415e-ba89-5a9abf47b28a",
# "amount": "0.2",
# "side": "SELL",
# "instrument_code": "BTC_USDT",
# "price": "10159.76",
# "time": "2019-08-23T10:02:32.663Z",
# "sequence": 9
# }
# }
# ]
# },
# {
# "order": {
# "order_id": "5151a99e-f414-418f-8cf1-2568d0a63ea5",
# "account_id": "ef3a5f4c-cfcd-415e-ba89-5a9abf47b28a",
# "instrument_code": "BTC_USDT",
# "time": "2019-08-23T10:01:36.773Z",
# "side": "SELL",
# "price": "12289.88",
# "amount": "0.5",
# "filled_amount": "0.0",
# "type": "LIMIT",
# "sequence": 7,
# "status": "OPEN"
# },
# "trades": []
# },
# {
# "order": {
# "order_id": "ac80d857-75e1-4733-9070-fd4288395fdc",
# "account_id": "ef3a5f4c-cfcd-415e-ba89-5a9abf47b28a",
# "instrument_code": "BTC_USDT",
# "time": "2019-08-23T10:01:25.031Z",
# "side": "SELL",
# "price": "11089.88",
# "amount": "0.1",
# "filled_amount": "0.0",
# "type": "LIMIT",
# "sequence": 6,
# "status": "OPEN"
# },
# "trades": []
# }
# ],
# "max_page_size": 100
# }
#
orderHistory = self.safe_value(response, 'order_history', [])
return self.parse_orders(orderHistory, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'with_cancelled_and_rejected': True, # default is False, orders which have been cancelled by the user before being filled or rejected by the system as invalid, additionally, all inactive filled orders which would return with "with_just_filled_inactive"
}
return await self.fetch_open_orders(symbol, since, limit, self.extend(request, params))
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'order_id': id,
# 'max_page_size': 100,
# 'cursor': 'string', # pointer specifying the position from which the next pages should be returned
}
if limit is not None:
request['max_page_size'] = limit
response = await self.privateGetAccountOrdersOrderIdTrades(self.extend(request, params))
#
# {
# "trade_history": [
# {
# "trade": {
# "trade_id": "2b42efcd-d5b7-4a56-8e12-b69ffd68c5ef",
# "order_id": "66756a10-3e86-48f4-9678-b634c4b135b2",
# "account_id": "c2d0076a-c20d-41f8-9e9a-1a1d028b2b58",
# "amount": "1234.5678",
# "side": "BUY",
# "instrument_code": "BTC_EUR",
# "price": "1234.5678",
# "time": "2019-08-24T14:15:22Z",
# "price_tick_sequence": 0,
# "sequence": 123456789
# },
# "fee": {
# "fee_amount": "1234.5678",
# "fee_percentage": "1234.5678",
# "fee_group_id": "default",
# "running_trading_volume": "1234.5678",
# "fee_currency": "BTC",
# "fee_type": "TAKER"
# }
# }
# ],
# "max_page_size": 0,
# "cursor": "string"
# }
#
tradeHistory = self.safe_value(response, 'trade_history', [])
market = None
if symbol is not None:
market = self.market(symbol)
return self.parse_trades(tradeHistory, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'from': self.iso8601(since),
# 'to': self.iso8601(self.milliseconds()), # max range is 100 days
# 'instrument_code': market['id'],
# 'max_page_size': 100,
# 'cursor': 'string', # pointer specifying the position from which the next pages should be returned
}
market = None
if symbol is not None:
market = self.market(symbol)
request['instrument_code'] = market['id']
if since is not None:
to = self.safe_string(params, 'to')
if to is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a "to" iso8601 string param with the since argument is specified, max range is 100 days')
request['from'] = self.iso8601(since)
if limit is not None:
request['max_page_size'] = limit
response = await self.privateGetAccountTrades(self.extend(request, params))
#
# {
# "trade_history": [
# {
# "trade": {
# "trade_id": "2b42efcd-d5b7-4a56-8e12-b69ffd68c5ef",
# "order_id": "66756a10-3e86-48f4-9678-b634c4b135b2",
# "account_id": "c2d0076a-c20d-41f8-9e9a-1a1d028b2b58",
# "amount": "1234.5678",
# "side": "BUY",
# "instrument_code": "BTC_EUR",
# "price": "1234.5678",
# "time": "2019-08-24T14:15:22Z",
# "price_tick_sequence": 0,
# "sequence": 123456789
# },
# "fee": {
# "fee_amount": "1234.5678",
# "fee_percentage": "1234.5678",
# "fee_group_id": "default",
# "running_trading_volume": "1234.5678",
# "fee_currency": "BTC",
# "fee_type": "TAKER"
# }
# }
# ],
# "max_page_size": 0,
# "cursor": "string"
# }
#
tradeHistory = self.safe_value(response, 'trade_history', [])
return self.parse_trades(tradeHistory, market, since, limit)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
self.check_required_credentials()
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer ' + self.apiKey,
}
if method == 'POST':
body = self.json(query)
headers['Content-Type'] = 'application/json'
else:
if query:
url += '?' + self.urlencode(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
#
# {"error":"MISSING_FROM_PARAM"}
# {"error":"MISSING_TO_PARAM"}
# {"error":"CANDLESTICKS_TIME_RANGE_TOO_BIG"}
#
message = self.safe_string(response, 'error')
if message is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
|
[
"[email protected]"
] | |
c39858057a3901c9d072ff45806c19924868a44f
|
c4e3a57511eb7a39425847bdcd38a2207e560a13
|
/Algorithm/1026_Maximum_Difference_Between_NodeAncestor.py
|
7d5e3de66de692beca64df48a2fddd569d79d7de
|
[] |
no_license
|
Gi1ia/TechNoteBook
|
57af562b78278b7f937b906d1154b19f2c077ebd
|
1a3c1f4d6e9d3444039f087763b93241f4ba7892
|
refs/heads/master
| 2021-06-03T02:31:24.986063 | 2020-07-16T22:25:56 | 2020-07-16T22:25:56 | 141,761,958 | 7 | 1 | null | 2018-11-05T01:09:46 | 2018-07-20T22:06:12 |
HTML
|
UTF-8
|
Python
| false | false | 2,042 |
py
|
"""Given the root of a binary tree,
find the maximum value V for which there exists different nodes A and B where
V = |A.val - B.val| and A is an ancestor of B.
(A node A is an ancestor of B if either: any child of A is equal to B,
or any child of A is an ancestor of B.)
Input: [8,3,10,1,6,null,14,null,null,4,7,13]
Output: 7
Explanation:
We have various ancestor-node differences, some of which are given below :
|8 - 3| = 5
|3 - 7| = 4
|8 - 1| = 7
|10 - 13| = 3
Among all possible differences, the maximum value of 7 is obtained by |8 - 1| = 7.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def maxAncestorDiff(self, root: TreeNode) -> int:
""" Solution I
Solution with Stack has much better performance.
Using stack to maintain max and min through the way
"""
res = 0
stack = [[root, root.val, root.val]] # node, max, min
while stack:
temp, cur_mx, cur_mn = stack.pop()
if temp.val > cur_mx:
cur_mx = temp.val
if temp.val < cur_mn:
cur_mn = temp.val
if cur_mx - cur_mn > res:
res = cur_mx - cur_mn
if temp.left:
stack.append([temp.left, cur_mx, cur_mn])
if temp.right:
stack.append([temp.right, cur_mx, cur_mn])
return res
def maxAncestorDiff_dfs(self, root: TreeNode) -> int:
"""Solution II
DFS solution is more clean and straight forward.
"""
return self.dfs(root, root.val, root.val)
def dfs(self, root, min_val, max_val):
if not root:
return max_val - min_val
max_val = max(max_val, root.val)
min_val = min(min_val, root.val)
return max(self.dfs(root.left, min_val, max_val), self.dfs(root.right, min_val, max_val))
|
[
"[email protected]"
] | |
3de59b86a04b9a0c4689617def82d2aab258a76b
|
b586cec578da0e1904d07468a7f49dacc0af5e99
|
/chapter_4/util/Checker.py
|
497c6bddf812359153f10ce45072330700e03e0d
|
[
"MIT"
] |
permissive
|
LifeOfGame/mongodb_redis
|
bf21b989eeb95eeb39f684363f9436677252a63e
|
834fbdd65d4ea9e1e0056b711781e5f27a40333b
|
refs/heads/master
| 2021-06-22T17:01:19.497132 | 2019-08-20T06:54:21 | 2019-08-20T06:54:21 | 203,295,895 | 0 | 0 |
MIT
| 2021-03-20T01:37:02 | 2019-08-20T03:53:06 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,253 |
py
|
import re
class Checker(object):
FIELD_LIST = {'name', 'age', 'birthday', 'origin_home', 'current_home'}
def check_add_fields_exists(self, dict_tobe_inserted):
if not dict_tobe_inserted:
return False
return self.FIELD_LIST == set(dict_tobe_inserted.keys())
def check_update_fields_exists(self, dict_tobe_inserted):
if 'people_id' not in dict_tobe_inserted:
return False
return self.check_add_fields_exists(dict_tobe_inserted.get('updated_info', {}))
def check_value_valid(self, dict_tobe_inserted):
name = dict_tobe_inserted['name']
if not name:
return '姓名不能为空'
age = dict_tobe_inserted['age']
if not isinstance(age, int) or age < 0 or age > 120:
return '年龄必需是范围在0到120之间的整数'
birthday = dict_tobe_inserted['birthday']
if not re.match('\d{4}-\d{2}-\d{2}', birthday):
return '生日格式必需为:yyyy-mm-dd'
def transfer_people_id(self, people_id):
if isinstance(people_id, int):
return people_id
try:
people_id = int(people_id)
return people_id
except ValueError:
return -1
|
[
"[email protected]"
] | |
c91d77705d5b2a34a90a236ac8a1c0bc868f67b7
|
e6c65e2e354336a4bea5b6a4ccbccd3682915fe2
|
/out-bin/py/google/fhir/models/run_locally.runfiles/com_google_fhir/external/pypi__protobuf_3_6_1/google/protobuf/message.py
|
d36659371345f52866d822bd56c3eab76f705a14
|
[
"Apache-2.0"
] |
permissive
|
rasalt/fhir-datalab
|
c30ab773d84983dd04a37e9d0ddec8bf2824b8a4
|
3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de
|
refs/heads/master
| 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 133 |
py
|
/home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__protobuf_3_6_1/google/protobuf/message.py
|
[
"[email protected]"
] | |
122b4ac2a57cb41be1d2e24441f5237816146664
|
f9a2e67dd2f40b37d8ff81bf6cdce47c38d2dee4
|
/.c9/metadata/environment/ib_miniprojects_backend/project_management_portal_auth/storages/storage_implementation.py
|
bd52c8d1194655779f57bcac57789941d2efeb35
|
[] |
no_license
|
mohan277/backend_repo
|
4eae065cf0fffa29866a2b549028cb8df4c97643
|
25dbb4d0f1c174b6da95f4c73737e49db9978429
|
refs/heads/master
| 2022-11-13T00:08:37.600743 | 2020-07-09T04:36:44 | 2020-07-09T04:36:44 | 278,259,585 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,597 |
py
|
{"filter":false,"title":"storage_implementation.py","tooltip":"/ib_miniprojects_backend/project_management_portal_auth/storages/storage_implementation.py","undoManager":{"mark":61,"position":61,"stack":[[{"start":{"row":2,"column":5},"end":{"row":2,"column":17},"action":"remove","lines":["fb_post_auth"],"id":2},{"start":{"row":2,"column":5},"end":{"row":2,"column":35},"action":"insert","lines":["project_management_portal_auth"]},{"start":{"row":3,"column":5},"end":{"row":3,"column":17},"action":"remove","lines":["fb_post_auth"]},{"start":{"row":3,"column":5},"end":{"row":3,"column":35},"action":"insert","lines":["project_management_portal_auth"]},{"start":{"row":5,"column":5},"end":{"row":5,"column":17},"action":"remove","lines":["fb_post_auth"]},{"start":{"row":5,"column":5},"end":{"row":5,"column":35},"action":"insert","lines":["project_management_portal_auth"]}],[{"start":{"row":2,"column":41},"end":{"row":2,"column":48},"action":"remove","lines":["fb_post"],"id":3},{"start":{"row":2,"column":41},"end":{"row":2,"column":42},"action":"insert","lines":["u"]},{"start":{"row":2,"column":42},"end":{"row":2,"column":43},"action":"insert","lines":["s"]},{"start":{"row":2,"column":43},"end":{"row":2,"column":44},"action":"insert","lines":["e"]},{"start":{"row":2,"column":44},"end":{"row":2,"column":45},"action":"insert","lines":["r"]}],[{"start":{"row":2,"column":41},"end":{"row":2,"column":45},"action":"remove","lines":["user"],"id":4},{"start":{"row":2,"column":41},"end":{"row":2,"column":49},"action":"insert","lines":["user_dto"]}],[{"start":{"row":3,"column":75},"end":{"row":3,"column":76},"action":"insert","lines":["\\"],"id":5}],[{"start":{"row":3,"column":76},"end":{"row":4,"column":0},"action":"insert","lines":["",""],"id":6}],[{"start":{"row":4,"column":0},"end":{"row":4,"column":4},"action":"insert","lines":[" "],"id":7}],[{"start":{"row":5,"column":0},"end":{"row":5,"column":4},"action":"remove","lines":[" "],"id":8},{"start":{"row":4,"column":12},"end":{"row":5,"column":0},"action":"remove","lines":["",""]},{"start":{"row":4,"column":11},"end":{"row":4,"column":12},"action":"remove","lines":["\\"]}],[{"start":{"row":4,"column":10},"end":{"row":4,"column":11},"action":"remove","lines":[" "],"id":9}],[{"start":{"row":4,"column":10},"end":{"row":4,"column":11},"action":"insert","lines":[" "],"id":10}],[{"start":{"row":22,"column":60},"end":{"row":23,"column":0},"action":"insert","lines":["",""],"id":14},{"start":{"row":23,"column":0},"end":{"row":23,"column":23},"action":"insert","lines":[" "]}],[{"start":{"row":23,"column":22},"end":{"row":23,"column":23},"action":"remove","lines":[" "],"id":15},{"start":{"row":23,"column":21},"end":{"row":23,"column":22},"action":"remove","lines":[" "]},{"start":{"row":23,"column":20},"end":{"row":23,"column":21},"action":"remove","lines":[" "]},{"start":{"row":23,"column":16},"end":{"row":23,"column":20},"action":"remove","lines":[" "]},{"start":{"row":23,"column":12},"end":{"row":23,"column":16},"action":"remove","lines":[" "]},{"start":{"row":23,"column":8},"end":{"row":23,"column":12},"action":"remove","lines":[" "]}],[{"start":{"row":23,"column":4},"end":{"row":23,"column":8},"action":"remove","lines":[" "],"id":16}],[{"start":{"row":23,"column":4},"end":{"row":24,"column":0},"action":"insert","lines":["",""],"id":17},{"start":{"row":24,"column":0},"end":{"row":24,"column":4},"action":"insert","lines":[" "]},{"start":{"row":24,"column":4},"end":{"row":25,"column":0},"action":"insert","lines":["",""]},{"start":{"row":25,"column":0},"end":{"row":25,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":25,"column":4},"end":{"row":28,"column":0},"action":"insert","lines":[" def is_admin(self, user_id: int):"," is_admin = User.objects.get(id=user_id).is_admin"," return is_admin",""],"id":18}],[{"start":{"row":25,"column":0},"end":{"row":25,"column":4},"action":"remove","lines":[" "],"id":19},{"start":{"row":26,"column":0},"end":{"row":26,"column":4},"action":"remove","lines":[" "]},{"start":{"row":27,"column":0},"end":{"row":27,"column":4},"action":"remove","lines":[" "]}],[{"start":{"row":26,"column":0},"end":{"row":26,"column":4},"action":"insert","lines":[" "],"id":20},{"start":{"row":27,"column":0},"end":{"row":27,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":23,"column":0},"end":{"row":23,"column":4},"action":"remove","lines":[" "],"id":21},{"start":{"row":24,"column":0},"end":{"row":24,"column":4},"action":"remove","lines":[" "]}],[{"start":{"row":28,"column":0},"end":{"row":29,"column":0},"action":"remove","lines":["",""],"id":22}],[{"start":{"row":26,"column":56},"end":{"row":27,"column":0},"action":"insert","lines":["",""],"id":23},{"start":{"row":27,"column":0},"end":{"row":27,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":5,"column":54},"end":{"row":5,"column":55},"action":"insert","lines":[","],"id":24}],[{"start":{"row":5,"column":55},"end":{"row":5,"column":56},"action":"insert","lines":[" "],"id":25},{"start":{"row":5,"column":56},"end":{"row":5,"column":57},"action":"insert","lines":["I"]},{"start":{"row":5,"column":57},"end":{"row":5,"column":58},"action":"insert","lines":["s"]},{"start":{"row":5,"column":58},"end":{"row":5,"column":59},"action":"insert","lines":["A"]}],[{"start":{"row":5,"column":59},"end":{"row":5,"column":60},"action":"insert","lines":["m"],"id":26}],[{"start":{"row":5,"column":59},"end":{"row":5,"column":60},"action":"remove","lines":["m"],"id":27}],[{"start":{"row":5,"column":59},"end":{"row":5,"column":60},"action":"insert","lines":["d"],"id":28},{"start":{"row":5,"column":60},"end":{"row":5,"column":61},"action":"insert","lines":["m"]},{"start":{"row":5,"column":61},"end":{"row":5,"column":62},"action":"insert","lines":["i"]},{"start":{"row":5,"column":62},"end":{"row":5,"column":63},"action":"insert","lines":["n"]}],[{"start":{"row":5,"column":63},"end":{"row":5,"column":64},"action":"insert","lines":["D"],"id":29},{"start":{"row":5,"column":64},"end":{"row":5,"column":65},"action":"insert","lines":["t"]},{"start":{"row":5,"column":65},"end":{"row":5,"column":66},"action":"insert","lines":["o"]}],[{"start":{"row":5,"column":54},"end":{"row":5,"column":66},"action":"remove","lines":[", IsAdminDto"],"id":30}],[{"start":{"row":2,"column":64},"end":{"row":2,"column":65},"action":"insert","lines":[","],"id":31}],[{"start":{"row":2,"column":65},"end":{"row":2,"column":66},"action":"insert","lines":[" "],"id":32}],[{"start":{"row":2,"column":65},"end":{"row":2,"column":66},"action":"remove","lines":[" "],"id":33},{"start":{"row":2,"column":64},"end":{"row":2,"column":65},"action":"remove","lines":[","]}],[{"start":{"row":2,"column":64},"end":{"row":2,"column":76},"action":"insert","lines":[", IsAdminDto"],"id":34}],[{"start":{"row":27,"column":0},"end":{"row":27,"column":8},"action":"remove","lines":[" "],"id":35}],[{"start":{"row":2,"column":75},"end":{"row":2,"column":76},"action":"remove","lines":["o"],"id":36},{"start":{"row":2,"column":74},"end":{"row":2,"column":75},"action":"remove","lines":["t"]}],[{"start":{"row":2,"column":74},"end":{"row":2,"column":75},"action":"insert","lines":["T"],"id":37},{"start":{"row":2,"column":75},"end":{"row":2,"column":76},"action":"insert","lines":["O"]}],[{"start":{"row":26,"column":56},"end":{"row":27,"column":0},"action":"insert","lines":["",""],"id":38},{"start":{"row":27,"column":0},"end":{"row":27,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":27,"column":8},"end":{"row":27,"column":9},"action":"insert","lines":["i"],"id":39},{"start":{"row":27,"column":9},"end":{"row":27,"column":10},"action":"insert","lines":["s"]}],[{"start":{"row":27,"column":8},"end":{"row":27,"column":10},"action":"remove","lines":["is"],"id":40},{"start":{"row":27,"column":8},"end":{"row":27,"column":18},"action":"insert","lines":["is_admin()"]}],[{"start":{"row":27,"column":16},"end":{"row":27,"column":18},"action":"remove","lines":["()"],"id":41}],[{"start":{"row":27,"column":16},"end":{"row":27,"column":17},"action":"insert","lines":["_"],"id":42}],[{"start":{"row":27,"column":8},"end":{"row":27,"column":17},"action":"remove","lines":["is_admin_"],"id":43},{"start":{"row":27,"column":8},"end":{"row":27,"column":20},"action":"insert","lines":["is_admin_dto"]}],[{"start":{"row":27,"column":20},"end":{"row":27,"column":21},"action":"insert","lines":[" "],"id":44},{"start":{"row":27,"column":21},"end":{"row":27,"column":22},"action":"insert","lines":["="]}],[{"start":{"row":27,"column":22},"end":{"row":27,"column":23},"action":"insert","lines":[" "],"id":45}],[{"start":{"row":27,"column":23},"end":{"row":27,"column":24},"action":"insert","lines":["I"],"id":46}],[{"start":{"row":27,"column":23},"end":{"row":27,"column":24},"action":"remove","lines":["I"],"id":47},{"start":{"row":27,"column":23},"end":{"row":27,"column":33},"action":"insert","lines":["IsAdminDTO"]}],[{"start":{"row":27,"column":33},"end":{"row":27,"column":35},"action":"insert","lines":["()"],"id":48}],[{"start":{"row":27,"column":34},"end":{"row":27,"column":35},"action":"insert","lines":["i"],"id":49},{"start":{"row":27,"column":35},"end":{"row":27,"column":36},"action":"insert","lines":["s"]}],[{"start":{"row":27,"column":34},"end":{"row":27,"column":36},"action":"remove","lines":["is"],"id":50},{"start":{"row":27,"column":34},"end":{"row":27,"column":44},"action":"insert","lines":["is_admin()"]}],[{"start":{"row":27,"column":42},"end":{"row":27,"column":44},"action":"remove","lines":["()"],"id":51}],[{"start":{"row":27,"column":42},"end":{"row":27,"column":43},"action":"insert","lines":["="],"id":52},{"start":{"row":27,"column":43},"end":{"row":27,"column":44},"action":"insert","lines":["i"]},{"start":{"row":27,"column":44},"end":{"row":27,"column":45},"action":"insert","lines":["s"]}],[{"start":{"row":27,"column":43},"end":{"row":27,"column":45},"action":"remove","lines":["is"],"id":53},{"start":{"row":27,"column":43},"end":{"row":27,"column":53},"action":"insert","lines":["is_admin()"]}],[{"start":{"row":27,"column":51},"end":{"row":27,"column":53},"action":"remove","lines":["()"],"id":54}],[{"start":{"row":27,"column":52},"end":{"row":28,"column":0},"action":"remove","lines":["",""],"id":55}],[{"start":{"row":28,"column":23},"end":{"row":28,"column":24},"action":"insert","lines":["_"],"id":56},{"start":{"row":28,"column":24},"end":{"row":28,"column":25},"action":"insert","lines":["d"]},{"start":{"row":28,"column":25},"end":{"row":28,"column":26},"action":"insert","lines":["t"]},{"start":{"row":28,"column":26},"end":{"row":28,"column":27},"action":"insert","lines":["o"]}],[{"start":{"row":11,"column":16},"end":{"row":11,"column":17},"action":"insert","lines":["l"],"id":57},{"start":{"row":11,"column":17},"end":{"row":11,"column":18},"action":"insert","lines":["i"]},{"start":{"row":11,"column":18},"end":{"row":11,"column":19},"action":"insert","lines":["s"]},{"start":{"row":11,"column":19},"end":{"row":11,"column":20},"action":"insert","lines":["t"]},{"start":{"row":11,"column":20},"end":{"row":11,"column":21},"action":"insert","lines":["("]}],[{"start":{"row":11,"column":57},"end":{"row":11,"column":58},"action":"insert","lines":[")"],"id":58}],[{"start":{"row":21,"column":38},"end":{"row":22,"column":0},"action":"insert","lines":["",""],"id":59},{"start":{"row":22,"column":0},"end":{"row":22,"column":23},"action":"insert","lines":[" "]},{"start":{"row":22,"column":23},"end":{"row":22,"column":24},"action":"insert","lines":["i"]},{"start":{"row":22,"column":24},"end":{"row":22,"column":25},"action":"insert","lines":["s"]}],[{"start":{"row":22,"column":23},"end":{"row":22,"column":25},"action":"remove","lines":["is"],"id":60},{"start":{"row":22,"column":23},"end":{"row":22,"column":33},"action":"insert","lines":["is_admin()"]}],[{"start":{"row":22,"column":31},"end":{"row":22,"column":33},"action":"remove","lines":["()"],"id":61}],[{"start":{"row":22,"column":31},"end":{"row":22,"column":32},"action":"insert","lines":["="],"id":62},{"start":{"row":22,"column":32},"end":{"row":22,"column":33},"action":"insert","lines":["u"]},{"start":{"row":22,"column":33},"end":{"row":22,"column":34},"action":"insert","lines":["s"]},{"start":{"row":22,"column":34},"end":{"row":22,"column":35},"action":"insert","lines":["e"]},{"start":{"row":22,"column":35},"end":{"row":22,"column":36},"action":"insert","lines":["r"]}],[{"start":{"row":22,"column":32},"end":{"row":22,"column":36},"action":"remove","lines":["user"],"id":63},{"start":{"row":22,"column":32},"end":{"row":22,"column":36},"action":"insert","lines":["user"]}],[{"start":{"row":22,"column":36},"end":{"row":22,"column":37},"action":"insert","lines":["."],"id":64},{"start":{"row":22,"column":37},"end":{"row":22,"column":38},"action":"insert","lines":["i"]},{"start":{"row":22,"column":38},"end":{"row":22,"column":39},"action":"insert","lines":["s"]}],[{"start":{"row":22,"column":37},"end":{"row":22,"column":39},"action":"remove","lines":["is"],"id":65},{"start":{"row":22,"column":37},"end":{"row":22,"column":45},"action":"insert","lines":["is_admin"]}],[{"start":{"row":22,"column":45},"end":{"row":22,"column":46},"action":"insert","lines":[","],"id":66}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":25,"column":0},"end":{"row":25,"column":0},"isBackwards":true},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1593530927862,"hash":"9ea4d02a4395a101d8d414387f13e7b1af76c3bd"}
|
[
"[email protected]"
] | |
55e6472b2b061c94777f53c74adbd2c5e99bbe6c
|
ae8a1631f1b0da3cbe7a61cc6ad8c4839d3017e2
|
/experiments/experiments_toy/grid_search/run_grid_search_bnmtf_gibbs.py
|
c2d9d99fbb2d5384a7f33b6803e3ced4b49aef4a
|
[
"Apache-2.0"
] |
permissive
|
hansaimlim/BNMTF
|
ce3a5734feed209d284d98b5db508f944781c880
|
9cf8ad6475dac5dc7ece9d6dffb7f6f59a71ac18
|
refs/heads/master
| 2021-01-19T18:47:41.870310 | 2017-02-08T16:26:39 | 2017-02-08T16:26:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,030 |
py
|
"""
Run the grid search method for finding the best values for K and L for BNMTF.
We use the parameters for the true priors.
For BNMTF I find that the BIC is a better estimator - the log likelihood is
high for higher values for K and L than the true ones, same for the AIC. With
the BIC we get a nice peak just below the true K and L (for true K=L=5, at K=L=4).
"""
project_location = "/Users/thomasbrouwer/Documents/Projects/libraries/"
import sys
sys.path.append(project_location)
from BNMTF.data_toy.bnmtf.generate_bnmtf import generate_dataset, try_generate_M
from BNMTF.code.cross_validation.grid_search_bnmtf import GridSearch
from BNMTF.code.models.bnmtf_gibbs_optimised import bnmtf_gibbs_optimised
import numpy, matplotlib.pyplot as plt
import scipy.interpolate
##########
restarts = 5
iterations = 100
burn_in = 90
thinning = 1
I, J = 20,20
true_K, true_L = 3,3
values_K, values_L = range(1,4+1), range(1,4+1)
fraction_unknown = 0.1
attempts_M = 100
alpha, beta = 100., 1. #1., 1.
tau = alpha / beta
lambdaF = numpy.ones((I,true_K))
lambdaS = numpy.ones((true_K,true_L))
lambdaG = numpy.ones((J,true_L))
classifier = bnmtf_gibbs_optimised
initFG = 'kmeans'
initS = 'random'
# Generate data
(_,_,_,_,_,R) = generate_dataset(I,J,true_K,true_L,lambdaF,lambdaS,lambdaG,tau)
M = try_generate_M(I,J,fraction_unknown,attempts_M)
# Run the line search. The priors lambdaF,S,G need to be a single value (recall K,L is unknown)
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF[0,0], 'lambdaS':lambdaS[0,0], 'lambdaG':lambdaG[0,0] }
grid_search = GridSearch(classifier,values_K,values_L,R,M,priors,initS,initFG,iterations,restarts)
grid_search.search(burn_in,thinning)
# Plot the performances of all three metrics
for metric in ['loglikelihood', 'BIC', 'AIC','MSE']:
# Make three lists of indices X,Y,Z (K,L,metric)
values = numpy.array(grid_search.all_values(metric)).flatten()
list_values_K = numpy.array([values_K for l in range(0,len(values_L))]).T.flatten()
list_values_L = numpy.array([values_L for k in range(0,len(values_K))]).flatten()
# Set up a regular grid of interpolation points
Ki, Li = (numpy.linspace(min(list_values_K), max(list_values_K), 100),
numpy.linspace(min(list_values_L), max(list_values_L), 100))
Ki, Li = numpy.meshgrid(Ki, Li)
# Interpolate
rbf = scipy.interpolate.Rbf(list_values_K, list_values_L, values, function='linear')
values_i = rbf(Ki, Li)
# Plot
plt.figure()
plt.imshow(values_i, cmap='jet_r',
vmin=min(values), vmax=max(values), origin='lower',
extent=[min(values_K), max(values_K), min(values_L), max(values_L)])
plt.scatter(list_values_K, list_values_L, c=values, cmap='jet_r')
plt.colorbar()
plt.title("Metric: %s." % metric)
plt.xlabel("K")
plt.ylabel("L")
plt.show()
# Print the best value
best_K,best_L = grid_search.best_value(metric)
print "Best K,L for metric %s: %s,%s." % (metric,best_K,best_L)
|
[
"[email protected]"
] | |
88e4e6f9370a324cb7e546f9137cdd10522425d3
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/ac07c6b5e6bef82642d0ebb9c8574e011f298ec7-<dispatch>-fix.py
|
1845d2bcd937c3ee3fdd46301e50a4e77389de52
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 847 |
py
|
@ensure_connected
def dispatch(self, rpc_command=None, source=None, filter=None):
"\n Execute rpc on the remote device eg. dispatch('clear-arp-table')\n :param rpc_command: specifies rpc command to be dispatched either in plain text or in xml element format (depending on command)\n :param source: name of the configuration datastore being queried\n :param filter: specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)\n :return: Returns xml string containing the RPC response received from remote host\n "
if (rpc_command is None):
raise ValueError('rpc_command value must be provided')
req = fromstring(rpc_command)
resp = self.m.dispatch(req, source=source, filter=filter)
return (resp.data_xml if resp.data_ele else resp.xml)
|
[
"[email protected]"
] | |
2a4f4d986db59052b27d1c83c4126eaa341aae86
|
95c71453ed6cc6f9b94f38a3c1655680618d71a4
|
/kickstart/2019/RoundB/C/C-solve.py
|
c5fc44ebd49349f2f21430820232d8ea2d77f83c
|
[] |
no_license
|
ZX1209/gl-algorithm-practise
|
95f4d6627c1dbaf2b70be90149d897f003f9cb3a
|
dd0a1c92414e12d82053c3df981897e975063bb8
|
refs/heads/master
| 2020-05-16T14:56:34.568878 | 2019-12-27T07:37:11 | 2019-12-27T07:37:11 | 183,116,501 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 991 |
py
|
import logging
logging.basicConfig(level=logging.INFO)
from collections import Counter
def Count(i, j, counters, S):
ans = 0
tmpCounter = counters[j + 1] - counters[i]
for v in tmpCounter.values():
if v <= S:
ans += v
return ans
def main():
T = int(input())
for t in range(T):
answer = 0
N, S = [int(c) for c in input().split()]
A = input().split()
logging.debug((N, S, A))
tmpCounter = Counter()
counters = [tmpCounter.copy()]
for i in range(len(A)):
tmpCounter.update([A[i]])
counters.append(tmpCounter.copy())
tmpMax = 0
for i in range(len(A)):
for j in range(i + 1, len(A)):
tmp = Count(i, j, counters, S)
# logging.debug(tmp)
if tmp > tmpMax:
tmpMax = tmp
print("Case #" + str(t + 1) + ": " + str(tmpMax))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
1e7fd7a6ddd8dfbb09be12e0e07a753cc3d0c789
|
a6155458f58f2e40e2583557cf807eda52a0013b
|
/catalog/project.py
|
1dd22dfbdb35631cbd11ea846986af4f79fa6d8b
|
[] |
no_license
|
georgeplusplus-ZZ/udacity-project-2
|
ab6c80052cc601508743fd5003ae5d09103d8fbb
|
5442f1f99808af2f8663d59fdbd02be7dd7e425a
|
refs/heads/master
| 2021-10-26T02:47:28.841918 | 2019-04-10T01:52:40 | 2019-04-10T01:52:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,782 |
py
|
from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
from flask import url_for
from flask import jsonify
from flask import flash
from flask import abort
from flask import session as login_session
from sqlalchemy import create_engine, desc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Attraction
import os
import random
import string
# Globals
app = Flask(__name__, static_url_path='/static')
engine = create_engine('sqlite:///nycattractions.db', connect_args={'check_same_thread':False})
Base.metadata.bind = engine
DBSession = sessionmaker(bind= engine)
session = DBSession()
@app.route('/')
@app.route('/home')
def homepageContent():
items = session.query(Attraction).order_by(Attraction.created_at).limit(5).all()
return render_template('home.html', items= items)
# Create anti-forgery state token
@app.route('/login')
def showLogin():
state = ''.join(
random.choice(string.ascii_uppercase + string.digits) for x in range(32))
login_session['state'] = state
# return "The current session state is %s" % login_session['state']
return render_template('login.html', STATE=state)
@app.route('/view/<string:attraction_type>')
def attractionContent(attraction_type):
attractions = session.query(Attraction).filter(Attraction.category.has(name= attraction_type.lower())).all()
if not len(attractions):
abort(404)
return render_template('attractions.html', attractions= attractions)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host = '0.0.0.0', port = 5000)
|
[
"[email protected]"
] | |
73737b1228d0aa26332236f0bed3f9f92b2e599c
|
47175228ce25812549eb5203fc8b86b76fec6eb9
|
/API_scripts/dfp/dfp_python3/v201502/custom_targeting_service/create_custom_targeting_keys_and_values.py
|
4217821f245fa2d62bbcf1f3aa0132710d6031de
|
[] |
no_license
|
noelleli/documentation
|
c1efe9c2bdb169baa771e9c23d8f4e2683c2fe20
|
a375698b4cf0776d52d3a9d3c17d20143bd252e1
|
refs/heads/master
| 2021-01-10T05:41:30.648343 | 2016-02-13T05:46:31 | 2016-02-13T05:46:31 | 51,477,460 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,204 |
py
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates new custom targeting keys and values.
To determine which custom targeting keys and values exist, run
get_all_custom_targeting_keys_and_values.py. To target these custom targeting
keys and values, run target_custom_criteria_example.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CustomTargetingService.createCustomTargetingKeys
CustomTargetingService.createCustomTargetingValues
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201502')
# Create custom targeting key objects.
keys = [
{
'displayName': 'gender',
'name': 'g',
'type': 'PREDEFINED'
},
{
'displayName': 'car model',
'name': 'c',
'type': 'FREEFORM'
},
# Add predefined key that may be use for content targeting.
{
'displayName': 'genre',
'name': 'genre',
'type': 'PREDEFINED'
}
]
# Add custom targeting keys.
keys = custom_targeting_service.createCustomTargetingKeys(keys)
# Display results.
if keys:
for key in keys:
print(('A custom targeting key with id \'%s\', name \'%s\', and display '
'name \'%s\' was created.' % (key['id'], key['name'],
key['displayName'])))
else:
print('No keys were created.')
# Create custom targeting value objects.
values = [
{
'customTargetingKeyId': keys['id'],
'displayName': 'male',
# Name is set to 1 so that the actual name can be hidden from website
# users.
'name': '1',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys['id'],
'displayName': 'female',
# Name is set to 2 so that the actual name can be hidden from website
# users.
'name': '2',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys[1]['id'],
'displayName': 'honda civic',
'name': 'honda civic',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys[1]['id'],
'displayName': 'toyota',
'name': 'toyota',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys[2]['id'],
'displayName': 'comedy',
'name': 'comedy',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys[2]['id'],
'displayName': 'drama',
'name': 'drama',
'matchType': 'EXACT'
}
]
# Add custom targeting values.
values = custom_targeting_service.createCustomTargetingValues(values)
# Display results.
if values:
for value in values:
print(('A custom targeting value with id \'%s\', belonging to key with id'
' \'%s\', name \'%s\', and display name \'%s\' was created.'
% (value['id'], value['customTargetingKeyId'], value['name'],
value['displayName'])))
else:
print('No values were created.')
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
[
"[email protected]"
] | |
1e7fe6c92b81c4aea805851ef702489814f31b83
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/289/78183/submittedfiles/testes.py
|
5e9a5aefdf3d8285ef690f3498545979c9f5af98
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 178 |
py
|
idade = int(input("Digite sua idade: "))
print('A idade do indivíduo é %d' % idade)
altura = float(input("Digite sua altura: ")
print('A altura do indivíduo eh %.2f' %altura)
|
[
"[email protected]"
] | |
9e23792c94cee031550934457cf9eebd418a436d
|
cdcd71b8bb238ae1084e08d8d7c21c3c5595ba5b
|
/warriors.test.py
|
9a3fae86babebf5d3d72be28581a6952cd39558d
|
[] |
no_license
|
linnil1/2020pdsa
|
7118be250286aaa6831a21fd71e9de62d919ca6c
|
772a560bc5ce88eb052e102df7e0437372fd7ac1
|
refs/heads/master
| 2023-03-13T20:10:10.596073 | 2021-03-12T01:52:16 | 2021-03-12T01:52:16 | 295,599,510 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,088 |
py
|
import sys
import json
from pprint import pprint
from concurrent.futures import ProcessPoolExecutor, as_completed
import numpy as np
from tqdm import tqdm
import imp
Warriors = imp.load_source("Warriors", 'warriors.sol.py').Warriors
def quesion(n):
# init
np.random.seed()
st = np.random.choice(1000, size=n)
rg = np.random.choice(n, size=n) // 2
arg = np.stack([st, rg])
ops = {
'strength': st,
'attack_range': rg,
'answer': Warriors().warriors(st, rg)
}
return ops
def generateQuestion(N, n):
all_ops = []
with ProcessPoolExecutor(max_workers=20) as executor:
ops = [executor.submit(quesion, n)
for _ in range(N)]
for op in as_completed(ops):
all_ops.append(op.result())
return all_ops
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
# cases = json.load(open("warriors.json"))
cases = []
cases.append({
'case': 1,
'score': 20,
'data': [
# 0
{'strength': [11, 13, 11, 7, 15],
'attack_range': [ 1, 8, 1, 7, 2],
'answer': [0, 0, 0, 3, 2, 3, 3, 3, 2, 4]},
# 1
{'strength': [11],
'attack_range': [1],
'answer': [0, 0]},
# 2
{'strength': [11, 15],
'attack_range': [1, 1],
'answer': [0, 0, 0, 1]},
# 3
{'strength': [11, 15],
'attack_range': [1, 1],
'answer': [0, 0, 0, 1]},
# 4
{'strength': [15, 11],
'attack_range': [1, 1],
'answer': [0, 1, 1, 1]},
],
})
for i, arg in enumerate(cases[-1]['data']):
cases[-1]['data'][i]['answer'] = Warriors().warriors(arg['strength'], arg['attack_range'])
cases.append({
'case': 2,
'score': 20,
'data': [
# 0
# increasing
{'strength': np.arange(100000),
'attack_range': np.ones(100000, dtype=np.int) * 1000000},
# 1
# decreasing
{'strength': np.flip(np.arange(100000)),
'attack_range': np.ones(100000, dtype=np.int) * 1000000},
# 2
# increasing + decreasing
{'strength': np.hstack([np.arange(100000), np.flip(np.arange(100000))]),
'attack_range': np.ones(200000, dtype=np.int) * 4000000},
# 3
# decreasing + increasing
{'strength': np.hstack([np.flip(np.arange(100000)), np.arange(100000)]),
'attack_range': np.ones(200000, dtype=np.int) * 4000000},
# 4
# increasing + no attack
{'strength': np.arange(100000),
'attack_range': np.zeros(100000, dtype=np.int)},
# 5
{'strength': [0],
'attack_range': [1],
'answer': [0, 0]},
# 6
{'strength': [0],
'attack_range': [0],
'answer': [0, 0]},
# 7
{'strength': [0, 0],
'attack_range': [0, 0],
'answer': [0, 0, 1, 1]},
# 8
{'strength': [0, 1],
'attack_range': [0, 0],
'answer': [0, 0, 1, 1]},
],
})
for i, arg in enumerate(cases[-1]['data']):
cases[-1]['data'][i]['answer'] = Warriors().warriors(arg['strength'], arg['attack_range'])
# 30 * 30 -> 1000ms
cases.append({
'case': 3,
'score': 20,
'data': generateQuestion(30, 10000),
})
# 2400ms
cases.append({
'case': 4,
'score': 20,
'data': [
quesion(100000),
quesion(200000),
quesion(300000),
quesion(400000),
]
})
# 2400ms
cases.append({
'case': 5,
'score': 20,
'data': [
quesion(1000000),
]
})
# 10000 -> 30ms
# 100000 -> 300ms
# 200000 -> 450ms
# 300000 -> 750ms
# 400000 -> 1000ms
# 500000 -> 1200ms
# 800000 -> 2000ms
# 1000000 -> 2400ms
json.dump(cases, open("warriors.json", "w"), cls=MyEncoder)
# pprint(cases)
|
[
"[email protected]"
] | |
a3a7309b43e957fe90d03cf2bd26952fd25d1a50
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/appconfiguration/v20200701preview/key_value.py
|
0abcd6b95f09374cd0ce483a2ef8107baf4d53a8
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 |
Apache-2.0
| 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null |
UTF-8
|
Python
| false | false | 7,486 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['KeyValue']
class KeyValue(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config_store_name: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
key_value_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The key-value resource along with all resource properties.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] config_store_name: The name of the configuration store.
:param pulumi.Input[str] content_type: The content type of the key-value's value.
Providing a proper content-type can enable transformations of values when they are retrieved by applications.
:param pulumi.Input[str] key_value_name: Identifier of key and label combination. Key and label are joined by $ character. Label is optional.
:param pulumi.Input[str] resource_group_name: The name of the resource group to which the container registry belongs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A dictionary of tags that can help identify what a key-value may be applicable for.
:param pulumi.Input[str] value: The value of the key-value.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if config_store_name is None:
raise TypeError("Missing required property 'config_store_name'")
__props__['config_store_name'] = config_store_name
__props__['content_type'] = content_type
if key_value_name is None:
raise TypeError("Missing required property 'key_value_name'")
__props__['key_value_name'] = key_value_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['value'] = value
__props__['e_tag'] = None
__props__['key'] = None
__props__['label'] = None
__props__['last_modified'] = None
__props__['locked'] = None
__props__['name'] = None
__props__['type'] = None
super(KeyValue, __self__).__init__(
'azure-nextgen:appconfiguration/v20200701preview:KeyValue',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'KeyValue':
"""
Get an existing KeyValue resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return KeyValue(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> pulumi.Output[Optional[str]]:
"""
The content type of the key-value's value.
Providing a proper content-type can enable transformations of values when they are retrieved by applications.
"""
return pulumi.get(self, "content_type")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[str]:
"""
An ETag indicating the state of a key-value within a configuration store.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
The primary identifier of a key-value.
The key is used in unison with the label to uniquely identify a key-value.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def label(self) -> pulumi.Output[str]:
"""
A value used to group key-values.
The label is used in unison with the key to uniquely identify a key-value.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> pulumi.Output[str]:
"""
The last time a modifying operation was performed on the given key-value.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def locked(self) -> pulumi.Output[bool]:
"""
A value indicating whether the key-value is locked.
A locked key-value may not be modified until it is unlocked.
"""
return pulumi.get(self, "locked")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A dictionary of tags that can help identify what a key-value may be applicable for.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> pulumi.Output[Optional[str]]:
"""
The value of the key-value.
"""
return pulumi.get(self, "value")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"[email protected]"
] | |
0e5f9524a6311e8ca3f114cb22491e4aaff80c2b
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-datastream/samples/generated_samples/datastream_v1alpha1_generated_datastream_delete_private_connection_async.py
|
d0a8f20842e7cadf8600231215035ef761fa4c53
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 |
Apache-2.0
| 2023-09-14T21:45:18 | 2014-01-28T15:51:47 |
Python
|
UTF-8
|
Python
| false | false | 2,035 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeletePrivateConnection
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datastream
# [START datastream_v1alpha1_generated_Datastream_DeletePrivateConnection_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datastream_v1alpha1
async def sample_delete_private_connection():
# Create a client
client = datastream_v1alpha1.DatastreamAsyncClient()
# Initialize request argument(s)
request = datastream_v1alpha1.DeletePrivateConnectionRequest(
name="name_value",
)
# Make the request
operation = client.delete_private_connection(request=request)
print("Waiting for operation to complete...")
response = (await operation).result()
# Handle the response
print(response)
# [END datastream_v1alpha1_generated_Datastream_DeletePrivateConnection_async]
|
[
"[email protected]"
] | |
01324a0b0027287231b714660b00a89d2561d10a
|
a04c9e34c8abb6eb5857cb6e35fbbed0743ea8d4
|
/sample_db_funtions.py
|
aa1286b28047be47190becff32df50450902f654
|
[] |
no_license
|
SrikanthAmudala/PythonWorkShopConcordia
|
a2fd0a3103524733913c00767907bafecd1c6ad6
|
d2e383a89bc995d96313fd0723c064a0a45db6f9
|
refs/heads/master
| 2021-05-19T13:02:42.173832 | 2020-05-27T21:48:34 | 2020-05-27T21:48:34 | 251,713,287 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,509 |
py
|
db = {
"user_table": {
"sunny": {
"password": "sunny1234",
"books_reg": {
"COMPUTER SCIENCE": [],
"CIISE": []
}
},
"rohit": {
"password": "rohit",
"books_reg": {
"COMPUTER SCIENCE": [],
"CIISE": []
}
},
"negar": {
"password": "negar",
"books_reg": {
"COMPUTER SCIENCE": [],
"CIISE": []
}
}
},
"books_list": {
"COMPUTER SCIENCE": {
"C": 1, # name of the book: count of the book
"Python": 2,
"java": 1
},
"CIISE": {
"Quality Systems": 1,
"DataMining": 1,
"Project Management": 1
}
}
}
print("before update: \n",db)
username = "negar"
password = "negar"
password_from_db = db.get("user_table").get(username).get("password")
print("True password: ", password_from_db)
if password == password_from_db:
print("Login successful")
else:
print("Login Failed")
check_book = "Python"
book_catg = "COMPUTER SCIENCE"
total_books_avilable = db.get("books_list").get(book_catg).get(check_book)
if total_books_avilable > 0:
print("Book exists and total no of books available: ", total_books_avilable)
# adding book to the username
db['user_table'][username]['books_reg'][book_catg].append(check_book)
# updating the no of books in the book catg
db['books_list'][book_catg][check_book] = db.get('books_list').get(book_catg).get(check_book) - 1
else:
print("Book out of stock")
print("After update: \n",db)
|
[
"[email protected]"
] | |
4de7f4a672e43e92de4f70c0c5f4218db2023f8e
|
e16d7d8f60145c68640b25aa7c259618be60d855
|
/1-100/34.py
|
0fd3383f668beabf94a8c8f161a9febe2ef79fc5
|
[] |
no_license
|
zongqiqi/mypython
|
bbe212223002dabef773ee0dbeafbad5986b4639
|
b80f3ce6c30a0677869a7b49421a757c16035178
|
refs/heads/master
| 2020-04-21T07:39:59.594233 | 2017-12-11T00:54:44 | 2017-12-11T00:54:44 | 98,426,286 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 125 |
py
|
"""练习函数调用"""
def hello():
print('Hello World')
def many():
for a in range(3):
hello()
many()
|
[
"[email protected]"
] | |
50534e04bd484b27316b647b50f6e6c2f9ee131e
|
ca101e77a77cd86561c4e34553dbd3578a87a8b2
|
/transaction/urls.py
|
01537875c84f1a09282b06841208d3e55e7f4e96
|
[] |
no_license
|
vmgabriel/app-cycle-money
|
001c1baa1b1c77c6e965beaee6d1d7c4cd45c699
|
4381fb9c8288fe37cbcd1c9ecef14e6e8299b680
|
refs/heads/master
| 2023-08-13T19:07:19.065576 | 2020-07-26T22:23:35 | 2020-07-26T22:23:35 | 281,472,685 | 0 | 0 | null | 2021-09-22T19:28:41 | 2020-07-21T18:20:36 |
CSS
|
UTF-8
|
Python
| false | false | 2,181 |
py
|
# Develop: vmgabriel
"""Module that Define all Rooute of the module"""
from django.urls import path
from . import views
app_name = 'transactions'
urlpatterns = [
# Priorities Routes
path(
'priorities/',
views.PriorityEntityView.as_view(),
name='priorities_list',
),
path(
'priorities/create/',
views.PriorityEntityView.as_view(),
name='priorities_create'
),
path(
'priorities/<int:id>/edit/',
views.PriorityEntityView.as_view(),
name='priorities_edit'
),
path(
'priorities/<int:id>/delete/',
views.PriorityEntityView.as_view(),
name='priorities_delete'
),
path(
'priorities/<int:id>/show/',
views.PriorityEntityView.as_view(),
name='priorities_show'
),
# Type Consume Routes
path(
'type-consumes/',
views.TypeConsumeEntityView.as_view(),
name='type_consumes_list',
),
path(
'type-consumes/create/',
views.TypeConsumeEntityView.as_view(),
name='type_consumes_create'
),
path(
'type-consumes/<int:id>/edit/',
views.TypeConsumeEntityView.as_view(),
name='type_consumes_edit'
),
path(
'type-consumes/<int:id>/delete/',
views.TypeConsumeEntityView.as_view(),
name='type_consumes_delete'
),
path(
'type-consumes/<int:id>/show/',
views.TypeConsumeEntityView.as_view(),
name='type_consumes_show'
),
# Type Bill Routes
path(
'type-bills/',
views.TypeBillEntityView.as_view(),
name='type_bills_list',
),
path(
'type-bills/create/',
views.TypeBillEntityView.as_view(),
name='type_bills_create'
),
path(
'type-bills/<int:id>/edit/',
views.TypeBillEntityView.as_view(),
name='type_bills_edit'
),
path(
'type-bills/<int:id>/delete/',
views.TypeBillEntityView.as_view(),
name='type_bills_delete'
),
path(
'type-bills/<int:id>/show/',
views.TypeBillEntityView.as_view(),
name='type_bills_show'
),
]
|
[
"[email protected]"
] | |
586486eb94a499645daff8e3b832c1c44d56ffb7
|
257b1d32488ff8a3e9b5f148839d042512d5de83
|
/testing/pandas_rollup_plugin_test.py
|
0105128b05ec100dffb941d4e6991c6d74a469be
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hellios78/mldb
|
200ba8b38e2509585d563403c4d4d12543e00dc9
|
5f869dcfca1f8bcce3418138f130321656a0970c
|
refs/heads/master
| 2020-04-06T04:10:38.369452 | 2015-12-23T22:06:08 | 2015-12-23T22:06:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,400 |
py
|
# This file is part of MLDB. Copyright 2015 Datacratic. All rights reserved.
import os, socket, time
#import csv
#import json
#import datetime
from multiprocessing import Process
from py_connectors.mldb_connector import MldbConnectorAdHoc
#from mldb.data import DFrame
def startMldb():
s = socket.socket()
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
def startServer(port):
os.system("build/x86_64/bin/mldb_runner --http-listen-port %d --peer-listen-port %d" % (port, port+1))
proc = Process(target=startServer, args=[port])
proc.start()
time.sleep(1) # give enough time to start
if not proc.is_alive():
raise Exception("Failed to start api in background for test")
return proc, "http://localhost:%d" % port
mldbProc, mldbUrl = startMldb()
mldb = MldbConnectorAdHoc(mldbUrl).v1()
#######
# First we need to register the two plugins we'll be using
# b) the pandas_rollup plugin, which we'll use to do our exploration
######
pluginConfig = {
"type": "pandas_rollup",
"id": "pandas_rollup"
}
print mldb.plugins("pandas_rollup").put(pluginConfig, [("sync", "true")])
####
# Let's now create a script that we'll ship over and that will be executed
# on the server to create the dataset and import the data
####
scriptSource = """
import json
from datetime import datetime
print "Running a server-side script!!!"
# create a mutable beh dataset
datasetConfig = {
"type": "beh.mutable",
"id": "tng",
"address": "tng_py.beh.gz"
}
dataset = plugin.create_dataset(datasetConfig)
dataset.recordRow("picard", [["setscourse", "earth", datetime.fromtimestamp(1000)],
["setscourse", "neutralzone", datetime.fromtimestamp((10000))],
["setscourse", "neutralzone", datetime.fromtimestamp((20000))],
["setscourse", "neutralzone", datetime.fromtimestamp((30000))],
["setscourse", "neutralzone", datetime.fromtimestamp((4000))],
["setscourse", "wolf359", datetime.fromtimestamp((50000))]])
dataset.recordRow("riker", [["setscourse", "risa", datetime.fromtimestamp((500000))],
["fireon", "cardasians", datetime.fromtimestamp((500000))]])
dataset.recordRow("worf", [["fireon", "tardis", datetime.fromtimestamp((400000))],
["fireon", "borgcube", datetime.fromtimestamp((500000))],
["fireon", "cardasians", datetime.fromtimestamp((300000))]])
dataset.recordRow('One Zero', [["work", 1, datetime.fromtimestamp((300000))],
["sleep", 0, datetime.fromtimestamp((300000))]])
dataset.recordRow('Double', [["work", 1.5, datetime.fromtimestamp((300000))],
["sleep", 0.4, datetime.fromtimestamp((300000))]])
"""
# post the script for execution on the server
scriptConfig = {
"scriptSource": scriptSource
}
print MldbConnectorAdHoc(mldbUrl)._post("/v1/types/plugins/python/routes/run", "")(scriptConfig)
import json
queryConfig = {
"dataset": "tng",
"query": json.dumps({"head": "groupby", "body": [], "tail":["list_both"]})
}
print MldbConnectorAdHoc(mldbUrl)._post("/v1/plugins/pandas_rollup/routes/query", "")(queryConfig)
print queryConfig
# /v1/plugins/pandas_rollup/routes/query
# DFrame
|
[
"[email protected]"
] | |
bb79e60db39edda2427b197f26b4fc40bf317f28
|
ba2dbc19e899faaa17b994a1224e455a3de5b9ad
|
/01_jump to python/CHAP03/range.py
|
dcb8ae404e040165481136248bd6939ddcadcd0c
|
[] |
no_license
|
xsky21/bigdata2019
|
52d3dc9379a05ba794c53a28284de2168d0fc366
|
19464a6f8862b6e6e3d4e452e0dab85bdd954e40
|
refs/heads/master
| 2020-04-21T10:56:34.637812 | 2019-04-16T04:16:27 | 2019-04-16T04:16:27 | 169,503,618 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 37 |
py
|
for a in range(19,0,-2):
print(a)
|
[
"[email protected]"
] | |
c10e69f9a1357a6a97db16ffe61328333a3c305f
|
5b1022e257e3825a2d4ddcd7fa071367bf1be073
|
/广铁新闻/IPPool.py
|
98f961ce3071371d98c290a958b47146a9bd310c
|
[] |
no_license
|
KwinnerChen/zkjw
|
465d36fb583ac474ce12ced831b890ed938767d6
|
9dd577e73144f51bde6fd1af9715cf162f32799a
|
refs/heads/master
| 2020-05-27T22:35:07.613419 | 2019-06-03T10:08:43 | 2019-06-03T10:08:43 | 188,807,246 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,135 |
py
|
#! usr/bin/env python3
# -*- coding: utf-8 -*-
# python: v3.6.4
from storage import Oracle
from queue import Queue
from threading import Thread
from time import sleep
from random import random
class IPPool(Queue):
'''
实例化时生成一个自更新的代理IP列队
'''
def __init__(self, user, password, host, table_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
self.password = password
self.host = host
self.table_name = table_name
# self.delay_time = delay_time
t = Thread(target=self.__refresh)
t.start()
def __refresh(self):
while True:
if self.empty():
self.__put_proxy_queue()
else:
sleep(random()*2)
def __get_proxy_database(self):
ora = Oracle(self.user, self.password, self.host)
data = ora.getall(self.table_name)
ora.close()
return data
def __put_proxy_queue(self):
data = self.__get_proxy_database()
for ip, http in data:
self.put({http: http + '://' + ip})
|
[
"[email protected]"
] | |
d66838e90d413055054c3233f6efc543b06dd338
|
4a191e5aecd53c4cea28482a0179539eeb6cd74b
|
/comments/migrations/0001_initial.py
|
ec8c8dbcf06824eb238de80f5a28b2174fec528e
|
[] |
no_license
|
jiangjingwei/blogproject
|
631a2e8e2f72420cce45ddaf152174852376d831
|
daf14e88092dc030a3ab0c295ee06fb6b2164372
|
refs/heads/master
| 2020-03-14T23:29:08.052253 | 2018-05-10T11:35:59 | 2018-05-10T11:35:59 | 131,846,149 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 968 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2018-05-04 02:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=255)),
('url', models.URLField(blank=True)),
('text', models.TextField()),
('create_time', models.DateTimeField(auto_now_add=True)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Article')),
],
),
]
|
[
"[email protected]"
] | |
ecb0fe2d62c99430e0ba64d6400e35067c997735
|
eae3d77ac72c168cee7701462f1fc45d7d4dcd91
|
/List2/setSum.py
|
8d52e5e5a41b404427d4eb439220cf486c7187e5
|
[] |
no_license
|
ByeongjunCho/Algorithm-TIL
|
ed2f018d50bd2483bd1175ff9bf7e91913c14766
|
ad79125a1498915fe97c1d57ee6860b06c410958
|
refs/heads/master
| 2022-07-19T15:12:23.689319 | 2020-05-18T08:37:09 | 2020-05-18T08:37:09 | 256,399,493 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 21 |
py
|
def setSum(arr):
|
[
"[email protected]"
] | |
f0b8047aadd151aeaac9c8760e4f603bfda43cde
|
d697c1d45e96bd440be9c17ab14243a5882b1f52
|
/qianfeng/高级/测试/单元测试/myTest.py
|
44d63133e889f79192b9e8b1dae90e67d5ad0f8d
|
[] |
no_license
|
ithjl521/python
|
9eeda2e60dda97ee36e8764c06400eb12818689f
|
f4fe50799501c483cb64445fd05ee0f30f56576c
|
refs/heads/master
| 2020-07-12T23:10:53.608276 | 2019-11-08T08:59:35 | 2019-11-08T08:59:35 | 204,931,359 | 0 | 0 | null | null | null | null |
GB18030
|
Python
| false | false | 429 |
py
|
# coding=gbk
import unittest
from 对函数进行单元测试 import mySum
from 对函数进行单元测试 import mySub
class Test(unittest.TestCase):
def setUp(self):
print("开始测试时自动调用")
def tearDown(self):
print("结束测试时自动调用")
# 为了测试mySum
def test_mySum(self):
self.assertEqual(mySum(1,2),3,"加法有误")
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
45782f035e83c50f3e0b1a8b06450a74442b6049
|
f9892f6d55c79e65b40ba1da909779f355fb60f7
|
/tools/build_pytorch_libs.py
|
ff9eee3b3e953649973c1df5790b7c725894cbdf
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
bigmb/pytorch
|
a5bf2f75678e5cb47936784cdc7550a8191aa90f
|
d68802ba473602c3c66cad80d724f01386780753
|
refs/heads/master
| 2020-05-21T10:39:23.090476 | 2019-05-10T14:57:46 | 2019-05-10T15:00:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,389 |
py
|
from .setup_helpers.env import (IS_64BIT, IS_DARWIN, IS_WINDOWS,
DEBUG, REL_WITH_DEB_INFO, USE_MKLDNN,
check_env_flag, check_negative_env_flag)
import os
import sys
import distutils
import distutils.sysconfig
from subprocess import check_call, check_output
from distutils.version import LooseVersion
from .setup_helpers.cuda import USE_CUDA, CUDA_HOME
from .setup_helpers.dist_check import USE_DISTRIBUTED, USE_GLOO_IBVERBS
from .setup_helpers.nccl import USE_SYSTEM_NCCL, NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB, USE_NCCL
from .setup_helpers.rocm import USE_ROCM
from .setup_helpers.nnpack import USE_NNPACK
from .setup_helpers.qnnpack import USE_QNNPACK
from .setup_helpers.cudnn import CUDNN_INCLUDE_DIR, CUDNN_LIBRARY, USE_CUDNN
from pprint import pprint
from glob import glob
import multiprocessing
import shutil
def which(thefile):
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
for dir in path:
fname = os.path.join(dir, thefile)
fnames = [fname]
if IS_WINDOWS:
exts = os.environ.get('PATHEXT', '').split(os.pathsep)
fnames += [fname + ext for ext in exts]
for name in fnames:
if (os.path.exists(name) and os.access(name, os.F_OK | os.X_OK)
and not os.path.isdir(name)):
return name
return None
def cmake_version(cmd):
for line in check_output([cmd, '--version']).decode('utf-8').split('\n'):
if 'version' in line:
return LooseVersion(line.strip().split(' ')[2])
raise Exception('no version found')
def get_cmake_command():
cmake_command = 'cmake'
if IS_WINDOWS:
return cmake_command
cmake3 = which('cmake3')
if cmake3 is not None:
cmake = which('cmake')
if cmake is not None:
bare_version = cmake_version(cmake)
if bare_version < LooseVersion("3.5.0") and cmake_version(cmake3) > bare_version:
cmake_command = 'cmake3'
return cmake_command
def cmake_defines(lst, **kwargs):
for key in sorted(kwargs.keys()):
value = kwargs[key]
if value is not None:
lst.append('-D{}={}'.format(key, value))
# Ninja
# Use ninja if it is on the PATH. Previous version of PyTorch required the
# ninja python package, but we no longer use it, so we do not have to import it
USE_NINJA = not check_negative_env_flag('USE_NINJA') and (which('ninja') is not None)
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
install_dir = base_dir + "/torch"
build_type = "Release"
if DEBUG:
build_type = "Debug"
elif REL_WITH_DEB_INFO:
build_type = "RelWithDebInfo"
def overlay_windows_vcvars(env):
if sys.version_info >= (3, 5):
from distutils._msvccompiler import _get_vc_env
vc_arch = 'x64' if IS_64BIT else 'x86'
vc_env = _get_vc_env(vc_arch)
for k, v in env.items():
lk = k.lower()
if lk not in vc_env:
vc_env[lk] = v
return vc_env
else:
return env
def mkdir_p(dir):
try:
os.makedirs(dir)
except OSError:
pass
def create_build_env():
# XXX - our cmake file sometimes looks at the system environment
# and not cmake flags!
# you should NEVER add something to this list. It is bad practice to
# have cmake read the environment
my_env = os.environ.copy()
if USE_CUDNN:
my_env['CUDNN_LIBRARY'] = escape_path(CUDNN_LIBRARY)
my_env['CUDNN_INCLUDE_DIR'] = escape_path(CUDNN_INCLUDE_DIR)
if USE_CUDA:
my_env['CUDA_BIN_PATH'] = escape_path(CUDA_HOME)
if IS_WINDOWS:
my_env = overlay_windows_vcvars(my_env)
# When using Ninja under Windows, the gcc toolchain will be chosen as default.
# But it should be set to MSVC as the user's first choice.
if USE_NINJA:
cc = my_env.get('CC', 'cl')
cxx = my_env.get('CXX', 'cl')
my_env['CC'] = cc
my_env['CXX'] = cxx
return my_env
def run_cmake(version,
cmake_python_library,
build_python,
build_test,
build_dir,
my_env):
cmake_args = [
get_cmake_command()
]
if USE_NINJA:
cmake_args.append('-GNinja')
elif IS_WINDOWS:
if IS_64BIT:
cmake_args.append('-GVisual Studio 15 2017 Win64')
else:
cmake_args.append('-GVisual Studio 15 2017')
try:
import numpy as np
NUMPY_INCLUDE_DIR = np.get_include()
USE_NUMPY = True
except ImportError:
USE_NUMPY = False
NUMPY_INCLUDE_DIR = None
cflags = os.getenv('CFLAGS', "") + " " + os.getenv('CPPFLAGS', "")
ldflags = os.getenv('LDFLAGS', "")
if IS_WINDOWS:
cmake_defines(cmake_args, MSVC_Z7_OVERRIDE=os.getenv('MSVC_Z7_OVERRIDE', "ON"))
cflags += " /EHa"
mkdir_p(install_dir)
mkdir_p(build_dir)
cmake_defines(
cmake_args,
PYTHON_EXECUTABLE=escape_path(sys.executable),
PYTHON_LIBRARY=escape_path(cmake_python_library),
PYTHON_INCLUDE_DIR=escape_path(distutils.sysconfig.get_python_inc()),
BUILDING_WITH_TORCH_LIBS=os.getenv("BUILDING_WITH_TORCH_LIBS", "ON"),
TORCH_BUILD_VERSION=version,
CMAKE_BUILD_TYPE=build_type,
BUILD_TORCH=os.getenv("BUILD_TORCH", "ON"),
BUILD_PYTHON=build_python,
BUILD_SHARED_LIBS=os.getenv("BUILD_SHARED_LIBS", "ON"),
BUILD_BINARY=check_env_flag('BUILD_BINARY'),
BUILD_TEST=build_test,
INSTALL_TEST=build_test,
BUILD_CAFFE2_OPS=not check_negative_env_flag('BUILD_CAFFE2_OPS'),
ONNX_NAMESPACE=os.getenv("ONNX_NAMESPACE", "onnx_torch"),
ONNX_ML=os.getenv("ONNX_ML", False),
USE_CUDA=USE_CUDA,
USE_DISTRIBUTED=USE_DISTRIBUTED,
USE_FBGEMM=not (check_env_flag('NO_FBGEMM') or check_negative_env_flag('USE_FBGEMM')),
NAMEDTENSOR_ENABLED=(check_env_flag('USE_NAMEDTENSOR') or check_negative_env_flag('NO_NAMEDTENSOR')),
USE_NUMPY=USE_NUMPY,
NUMPY_INCLUDE_DIR=escape_path(NUMPY_INCLUDE_DIR),
USE_SYSTEM_NCCL=USE_SYSTEM_NCCL,
NCCL_INCLUDE_DIR=NCCL_INCLUDE_DIR,
NCCL_ROOT_DIR=NCCL_ROOT_DIR,
NCCL_SYSTEM_LIB=NCCL_SYSTEM_LIB,
CAFFE2_STATIC_LINK_CUDA=check_env_flag('USE_CUDA_STATIC_LINK'),
USE_ROCM=USE_ROCM,
USE_NNPACK=USE_NNPACK,
USE_LEVELDB=check_env_flag('USE_LEVELDB'),
USE_LMDB=check_env_flag('USE_LMDB'),
USE_OPENCV=check_env_flag('USE_OPENCV'),
USE_QNNPACK=USE_QNNPACK,
USE_TENSORRT=check_env_flag('USE_TENSORRT'),
USE_FFMPEG=check_env_flag('USE_FFMPEG'),
USE_SYSTEM_EIGEN_INSTALL="OFF",
USE_MKLDNN=USE_MKLDNN,
USE_NCCL=USE_NCCL,
NCCL_EXTERNAL=USE_NCCL,
CMAKE_INSTALL_PREFIX=install_dir,
CMAKE_C_FLAGS=cflags,
CMAKE_CXX_FLAGS=cflags,
CMAKE_EXE_LINKER_FLAGS=ldflags,
CMAKE_SHARED_LINKER_FLAGS=ldflags,
THD_SO_VERSION="1",
CMAKE_PREFIX_PATH=os.getenv('CMAKE_PREFIX_PATH') or distutils.sysconfig.get_python_lib(),
BLAS=os.getenv('BLAS'),
CUDA_NVCC_EXECUTABLE=escape_path(os.getenv('CUDA_NVCC_EXECUTABLE')),
USE_REDIS=os.getenv('USE_REDIS'),
USE_GLOG=os.getenv('USE_GLOG'),
USE_GFLAGS=os.getenv('USE_GFLAGS'),
WERROR=os.getenv('WERROR'))
if os.getenv('USE_OPENMP'):
cmake_defines(cmake_args, USE_OPENMP=check_env_flag('USE_OPENMP'))
if os.getenv('MKL_SEQ'):
cmake_defines(cmake_args, INTEL_MKL_SEQUENTIAL=check_env_flag('MKL_SEQ'))
mkldnn_threading = os.getenv('MKLDNN_THREADING')
if mkldnn_threading:
cmake_defines(cmake_args, MKLDNN_THREADING=mkldnn_threading)
if USE_GLOO_IBVERBS:
cmake_defines(cmake_args, USE_IBVERBS="1", USE_GLOO_IBVERBS="1")
if USE_MKLDNN:
cmake_defines(cmake_args, MKLDNN_ENABLE_CONCURRENT_EXEC="ON")
expected_wrapper = '/usr/local/opt/ccache/libexec'
if IS_DARWIN and os.path.exists(expected_wrapper):
cmake_defines(cmake_args,
CMAKE_C_COMPILER="{}/gcc".format(expected_wrapper),
CMAKE_CXX_COMPILER="{}/g++".format(expected_wrapper))
for env_var_name in my_env:
if env_var_name.startswith('gh'):
# github env vars use utf-8, on windows, non-ascii code may
# cause problem, so encode first
try:
my_env[env_var_name] = str(my_env[env_var_name].encode("utf-8"))
except UnicodeDecodeError as e:
shex = ':'.join('{:02x}'.format(ord(c)) for c in my_env[env_var_name])
sys.stderr.write('Invalid ENV[{}] = {}\n'.format(env_var_name, shex))
# According to the CMake manual, we should pass the arguments first,
# and put the directory as the last element. Otherwise, these flags
# may not be passed correctly.
# Reference:
# 1. https://cmake.org/cmake/help/latest/manual/cmake.1.html#synopsis
# 2. https://stackoverflow.com/a/27169347
cmake_args.append(base_dir)
pprint(cmake_args)
check_call(cmake_args, cwd=build_dir, env=my_env)
def build_caffe2(version,
cmake_python_library,
build_python,
rerun_cmake,
build_dir):
my_env = create_build_env()
build_test = not check_negative_env_flag('BUILD_TEST')
max_jobs = os.getenv('MAX_JOBS', None)
cmake_cache_file = 'build/CMakeCache.txt'
if rerun_cmake and os.path.isfile(cmake_cache_file):
os.remove(cmake_cache_file)
if not os.path.exists(cmake_cache_file) or (USE_NINJA and not os.path.exists('build/build.ninja')):
run_cmake(version,
cmake_python_library,
build_python,
build_test,
build_dir,
my_env)
if IS_WINDOWS:
build_cmd = ['cmake', '--build', '.', '--target', 'install', '--config', build_type, '--']
if USE_NINJA:
# sccache will fail if all cores are used for compiling
j = max(1, multiprocessing.cpu_count() - 1)
if max_jobs is not None:
j = min(int(max_jobs), j)
build_cmd += ['-j', str(j)]
check_call(build_cmd, cwd=build_dir, env=my_env)
else:
j = max_jobs or str(multiprocessing.cpu_count())
build_cmd += ['/maxcpucount:{}'.format(j)]
check_call(build_cmd, cwd=build_dir, env=my_env)
else:
if USE_NINJA:
ninja_cmd = ['ninja', 'install']
if max_jobs is not None:
ninja_cmd += ['-j', max_jobs]
check_call(ninja_cmd, cwd=build_dir, env=my_env)
else:
max_jobs = max_jobs or str(multiprocessing.cpu_count())
check_call(['make', '-j', str(max_jobs), 'install'], cwd=build_dir, env=my_env)
# in cmake, .cu compilation involves generating certain intermediates
# such as .cu.o and .cu.depend, and these intermediates finally get compiled
# into the final .so.
# Ninja updates build.ninja's timestamp after all dependent files have been built,
# and re-kicks cmake on incremental builds if any of the dependent files
# have a timestamp newer than build.ninja's timestamp.
# There is a cmake bug with the Ninja backend, where the .cu.depend files
# are still compiling by the time the build.ninja timestamp is updated,
# so the .cu.depend file's newer timestamp is screwing with ninja's incremental
# build detector.
# This line works around that bug by manually updating the build.ninja timestamp
# after the entire build is finished.
if os.path.exists('build/build.ninja'):
os.utime('build/build.ninja', None)
if build_python:
for proto_file in glob('build/caffe2/proto/*.py'):
if os.path.sep != '/':
proto_file = proto_file.replace(os.path.sep, '/')
if proto_file != 'build/caffe2/proto/__init__.py':
shutil.copyfile(proto_file, "caffe2/proto/" + os.path.basename(proto_file))
def escape_path(path):
if os.path.sep != '/' and path is not None:
return path.replace(os.path.sep, '/')
return path
|
[
"[email protected]"
] | |
73fe0f0f7dffea7edbcb3cb0f2ed6a763c1067ac
|
580905861e3bdd1990cde76ba2b057c898e6f088
|
/task_scheduler/src/task_scheduler/urls.py
|
198511a3938452d4bb45a734c9d9fe1dd61e2ebc
|
[
"MIT"
] |
permissive
|
muhammad-mamdouh/Django_Projects
|
14eddfdc25aa4be43c5d35e30c5efb146e255101
|
1f31e12aefb36b33474256db40a2c551882f445e
|
refs/heads/master
| 2022-12-10T20:02:38.918760 | 2019-12-14T21:24:08 | 2019-12-14T21:24:08 | 198,602,869 | 0 | 1 | null | 2022-11-22T04:13:34 | 2019-07-24T09:28:59 |
Python
|
UTF-8
|
Python
| false | false | 834 |
py
|
"""task_scheduler URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('send_mail_task.urls', namespace='mail_task')),
]
|
[
"[email protected]"
] | |
b82fb7fe8747e8f913ed23b9afd7587c9d26b697
|
024b554051b873e255b98d862b5985ffa0ae1db6
|
/backend/manage.py
|
08f3c54bf4da786dcb5b14fca0ce27fac3556d68
|
[] |
no_license
|
crowdbotics-apps/test-4895-dev-16173
|
74171d867ea95703a66c8870ad338b1536d52fcd
|
15668ad1df225f9e06e02ddd15f933574e732073
|
refs/heads/master
| 2023-01-21T08:21:32.469480 | 2020-12-03T07:54:52 | 2020-12-03T07:54:52 | 318,115,597 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 639 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_4895_dev_16173.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
a9d1d757bf0eda6e44becfa0aa2e67aa7c86121a
|
a1517979b20286311bdac7cd153f698498e04223
|
/application/modules/tache/__init__.py
|
ee94819f39cb45b3b25e3ca5ba08fcdc39e07b59
|
[] |
no_license
|
wilrona/Gesacom_mongoDB
|
441367029b899ceb0304879fd808fb8dbdbfb457
|
d043136889c5f2c3e10ace8ebacf55c11b91b4c0
|
refs/heads/master
| 2020-12-25T14:23:39.343917 | 2017-07-07T16:38:26 | 2017-07-07T16:38:26 | 67,689,398 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 161 |
py
|
__author__ = 'Ronald'
from views_tache import *
app.register_blueprint(prefix_projet, url_prefix='/projet')
app.register_blueprint(prefix, url_prefix='/tache')
|
[
"[email protected]"
] | |
aa9fd9f921e0e3d5e66dcd6281d0202037000305
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/leetcode/LeetcodePythonProject/leetcode_0701_0750/LeetCode728_SelfDividingNumbers.py
|
25a4ba8beb3af61a5e6a91825a9811b22e069d64
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 929 |
py
|
'''
Created on Mar 4, 2018
@author: tongq
'''
class Solution(object):
def selfDividingNumbers(self, left, right):
"""
:type left: int
:type right: int
:rtype: List[int]
"""
res = []
for num in range(left, right+1):
if self.isSelfDividing(num):
res.append(num)
return res
def isSelfDividing(self, num):
for digit in str(num):
d = int(digit)
if d == 0 or num%d != 0:
return False
return True
def test(self):
testCases = [
[1, 22],
]
for left, right in testCases:
print('left: %s' % left)
print('right: %s' % right)
result = self.selfDividingNumbers(left, right)
print('result: %s' % result)
print('-='*30+'-')
if __name__ == '__main__':
Solution().test()
|
[
"[email protected]"
] | |
1dd0de92808d94d40c70083b3d598435fd4edaad
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/algorithm/expert_algo/2_20.py
|
8e3be8d2091b9b978bf541ef81f355ccec8d2e40
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,917 |
py
|
Number of ways to reach the end of matrix with non-zero AND value
Given an **N * N** matrix **arr[][]** consisting of non-negative integers, the
task is to find the number of ways to reach **arr[N – 1][N – 1]** with a non-
zero AND value starting from the **arr[0][0]** by going down or right in every
move. Whenever a cell **arr[i][j]** is reached, ‘AND’ value is updated as
**currentVal & arr[i][j]**.
**Examples:**
> **Input:** arr[][] = {
> {1, 1, 1},
> {1, 1, 1},
> {1, 1, 1}}
>
> **Output:** 6
> All the paths will give non-zero and value.
> Thus, number of ways equals 6.
>
> **Input:** arr[][] = {
> {1, 1, 2},
> {1, 2, 1},
> {2, 1, 1}}
> **Output:** 0
>
>
>
>
>
>
## Recommended: Please try your approach on **__{IDE}__** first, before moving
on to the solution.
**Approach:** This problem can be solved using dynamic programming. First, we
need to decide the states of the DP. For every cell **arr[i][j]** and a number
**X** , we will store the number of ways to reach the **arr[N – 1][N – 1]**
from **arr[i][j]** with non-zero AND where **X** is the AND value of path till
now. Thus, our solution will use 3-dimensional dynamic programming, two for
the coordinates of the cells and one for **X**.
The required recurrence relation is:
> dp[i][j][X] = dp[i][j + 1][X & arr[i][j]] + dp[i + 1][j][X & arr[i][j]]
Below is the implementation of the above approach:
## C++
__
__
__
__
__
__
__
// C++ implementation of the approach
#include <bits/stdc++.h>
#define n 3
#define maxV 20
using namespace std;
// 3d array to store
// states of dp
int dp[n][n][maxV];
// Array to determine whether
// a state has been solved before
int v[n][n][maxV];
// Function to return the count of required paths
int countWays(int i, int j, int x, int arr[][n])
{
// Base cases
if (i == n || j == n)
return 0;
x = (x & arr[i][j]);
if (x == 0)
return 0;
if (i == n - 1 && j == n - 1)
return 1;
// If a state has been solved before
// it won't be evaluated again
if (v[i][j][x])
return dp[i][j][x];
v[i][j][x] = 1;
// Recurrence relation
dp[i][j][x] = countWays(i + 1, j, x, arr)
+ countWays(i, j + 1, x, arr);
return dp[i][j][x];
}
// Driver code
int main()
{
int arr[n][n] = { { 1, 2, 1 },
{ 1, 1, 0 },
{ 2, 1, 1 } };
cout << countWays(0, 0, arr[0][0], arr);
return 0;
}
---
__
__
## Java
__
__
__
__
__
__
__
// Java implementation of the approach
class GFG {
static int n = 3;
static int maxV = 20;
// 3d array to store
// states of dp
static int[][][] dp = new int[n][n][maxV];
// Array to determine whether
// a state has been solved before
static int[][][] v = new int[n][n][maxV];
// Function to return the count of required paths
static int countWays(int i, int j,
int x, int arr[][])
{
// Base cases
if (i == n || j == n) {
return 0;
}
x = (x & arr[i][j]);
if (x == 0) {
return 0;
}
if (i == n - 1 && j == n - 1) {
return 1;
}
// If a state has been solved before
// it won't be evaluated again
if (v[i][j][x] == 1) {
return dp[i][j][x];
}
v[i][j][x] = 1;
// Recurrence relation
dp[i][j][x] = countWays(i + 1, j, x, arr)
+ countWays(i, j + 1, x, arr);
return dp[i][j][x];
}
// Driver code
public static void main(String[] args)
{
int arr[][] = { { 1, 2, 1 },
{ 1, 1, 0 },
{ 2, 1, 1 } };
System.out.println(countWays(0, 0, arr[0][0], arr));
}
}
// This code contributed by Rajput-Ji
---
__
__
## Python3
__
__
__
__
__
__
__
# Python3 implementation of the approach
n = 3
maxV = 20
# 3d array to store states of dp
dp = [[[0 for i in range(maxV)]
for i in range(n)]
for i in range(n)]
# Array to determine whether
# a state has been solved before
v = [[[0 for i in range(maxV)]
for i in range(n)]
for i in range(n)]
# Function to return
# the count of required paths
def countWays(i, j, x, arr):
# Base cases
if (i == n or j == n):
return 0
x = (x & arr[i][j])
if (x == 0):
return 0
if (i == n - 1 and j == n - 1):
return 1
# If a state has been solved before
# it won't be evaluated again
if (v[i][j][x]):
return dp[i][j][x]
v[i][j][x] = 1
# Recurrence relation
dp[i][j][x] = countWays(i + 1, j, x, arr) + \
countWays(i, j + 1, x, arr);
return dp[i][j][x]
# Driver code
arr = [[1, 2, 1 ],
[1, 1, 0 ],
[2, 1, 1 ]]
print(countWays(0, 0, arr[0][0], arr))
# This code is contributed by Mohit Kumar
---
__
__
## C#
__
__
__
__
__
__
__
// C# implementation of the approach
using System;
class GFG
{
static int n = 3;
static int maxV = 20;
// 3d array to store
// states of dp
static int[,,] dp = new int[n, n, maxV];
// Array to determine whether
// a state has been solved before
static int[,,] v = new int[n, n, maxV];
// Function to return the count of required paths
static int countWays(int i, int j,
int x, int [,]arr)
{
// Base cases
if (i == n || j == n)
{
return 0;
}
x = (x & arr[i, j]);
if (x == 0)
{
return 0;
}
if (i == n - 1 && j == n - 1)
{
return 1;
}
// If a state has been solved before
// it won't be evaluated again
if (v[i, j, x] == 1)
{
return dp[i, j, x];
}
v[i, j, x] = 1;
// Recurrence relation
dp[i, j, x] = countWays(i + 1, j, x, arr)
+ countWays(i, j + 1, x, arr);
return dp[i, j, x];
}
// Driver code
public static void Main()
{
int [,]arr = { { 1, 2, 1 },
{ 1, 1, 0 },
{ 2, 1, 1 } };
Console.WriteLine(countWays(0, 0, arr[0,0], arr));
}
}
// This code is contributed by AnkitRai01
---
__
__
**Output:**
1
Attention reader! Don’t stop learning now. Get hold of all the important DSA
concepts with the **DSA Self Paced Course** at a student-friendly price and
become industry ready. To complete your preparation from learning a language
to DS Algo and many more, please refer **Complete Interview Preparation
Course** **.**
My Personal Notes _arrow_drop_up_
Save
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.