blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
60016e02566b22722c3290ab83e45f138859db6d
|
d4a78a9099884c1e1c203f7e5b78b844de053ff7
|
/tensorflow/tools/compatibility/reorders_v2.py
|
3f05aea6cadeaf75ab57d43fe56d4a2d01b9ed3b
|
[
"Apache-2.0"
] |
permissive
|
pint1022/tensorflow
|
b4b7632c0f833135a0bb37ab5a939a6c1ec51ef6
|
ab1f872bbcf7749112f76a7f9ba17406e8fbbf4e
|
refs/heads/master
| 2020-04-15T00:16:48.132100 | 2019-02-05T17:48:11 | 2019-02-05T17:48:11 | 164,233,910 | 2 | 2 |
Apache-2.0
| 2019-01-05T16:53:25 | 2019-01-05T16:53:25 | null |
UTF-8
|
Python
| false | false | 8,953 |
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
This file should be updated whenever a function is added to
self.reordered_function_names in tf_upgrade_v2.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
reorders = {
'tf.argmax': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.argmin': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.batch_gather': ['params', 'indices', 'name'],
'tf.batch_to_space': ['input', 'crops', 'block_size', 'name'],
'tf.boolean_mask': ['tensor', 'mask', 'name', 'axis'],
'tf.confusion_matrix': ['labels', 'predictions', 'num_classes', 'dtype', 'name', 'weights'],
'tf.convert_to_tensor': ['value', 'dtype', 'name', 'preferred_dtype'],
'tf.decode_csv': ['records', 'record_defaults', 'field_delim', 'use_quote_delim', 'name', 'na_value', 'select_cols'],
'tf.depth_to_space': ['input', 'block_size', 'name', 'data_format'],
'tf.feature_column.categorical_column_with_vocabulary_file': ['key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'default_value', 'dtype'],
'tf.io.decode_csv': ['records', 'record_defaults', 'field_delim', 'use_quote_delim', 'name', 'na_value', 'select_cols'],
'tf.io.parse_example': ['serialized', 'features', 'name', 'example_names'],
'tf.io.parse_single_example': ['serialized', 'features', 'name', 'example_names'],
'tf.io.serialize_many_sparse': ['sp_input', 'name', 'out_type'],
'tf.io.serialize_sparse': ['sp_input', 'name', 'out_type'],
'tf.linalg.norm': ['tensor', 'ord', 'axis', 'keepdims', 'name', 'keep_dims'],
'tf.math.argmax': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.math.argmin': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.math.confusion_matrix': ['labels', 'predictions', 'num_classes', 'dtype', 'name', 'weights'],
'tf.math.in_top_k': ['predictions', 'targets', 'k', 'name'],
'tf.math.reduce_all': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_any': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_logsumexp': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_max': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_mean': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_min': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_prod': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_sum': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.multinomial': ['logits', 'num_samples', 'seed', 'name', 'output_dtype'],
'tf.nn.convolution': ['input', 'filter', 'padding', 'strides', 'dilation_rate', 'name', 'data_format'],
'tf.nn.crelu': ['features', 'name', 'axis'],
'tf.nn.depth_to_space': ['input', 'block_size', 'name', 'data_format'],
'tf.nn.depthwise_conv2d': ['input', 'filter', 'strides', 'padding', 'rate', 'name', 'data_format'],
'tf.nn.embedding_lookup': ['params', 'ids', 'partition_strategy', 'name', 'validate_indices', 'max_norm'],
'tf.nn.embedding_lookup_sparse': ['params', 'sp_ids', 'sp_weights', 'partition_strategy', 'name', 'combiner', 'max_norm'],
'tf.nn.in_top_k': ['predictions', 'targets', 'k', 'name'],
'tf.nn.moments': ['x', 'axes', 'shift', 'name', 'keep_dims'],
'tf.nn.pool': ['input', 'window_shape', 'pooling_type', 'padding', 'dilation_rate', 'strides', 'name', 'data_format'],
'tf.nn.separable_conv2d': ['input', 'depthwise_filter', 'pointwise_filter', 'strides', 'padding', 'rate', 'name', 'data_format'],
'tf.nn.space_to_batch': ['input', 'paddings', 'block_size', 'name'],
'tf.nn.space_to_depth': ['input', 'block_size', 'name', 'data_format'],
'tf.nn.weighted_moments': ['x', 'axes', 'frequency_weights', 'name', 'keep_dims'],
'tf.norm': ['tensor', 'ord', 'axis', 'keepdims', 'name', 'keep_dims'],
'tf.pad': ['tensor', 'paddings', 'mode', 'name', 'constant_values'],
'tf.parse_example': ['serialized', 'features', 'name', 'example_names'],
'tf.parse_single_example': ['serialized', 'features', 'name', 'example_names'],
'tf.quantize_v2': ['input', 'min_range', 'max_range', 'T', 'mode', 'name', 'round_mode'],
'tf.random.multinomial': ['logits', 'num_samples', 'seed', 'name', 'output_dtype'],
'tf.random.poisson': ['lam', 'shape', 'dtype', 'seed', 'name'],
'tf.random_poisson': ['lam', 'shape', 'dtype', 'seed', 'name'],
'tf.reduce_all': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_any': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices'],
'tf.reduce_logsumexp': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_max': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_mean': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_min': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_prod': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_sum': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reverse_sequence': ['input', 'seq_lengths', 'seq_axis', 'batch_axis', 'name', 'seq_dim', 'batch_dim'],
'tf.serialize_many_sparse': ['sp_input', 'name', 'out_type'],
'tf.serialize_sparse': ['sp_input', 'name', 'out_type'],
'tf.shape': ['input', 'name', 'out_type'],
'tf.size': ['input', 'name', 'out_type'],
'tf.space_to_batch': ['input', 'paddings', 'block_size', 'name'],
'tf.space_to_depth': ['input', 'block_size', 'name', 'data_format'],
'tf.sparse.add': ['a', 'b', 'threshold', 'thresh'],
'tf.sparse.concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim'],
'tf.sparse.reduce_max': ['sp_input', 'axis', 'keepdims', 'reduction_axes', 'keep_dims'],
'tf.sparse.segment_mean': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.segment_sqrt_n': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.segment_sum': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.split': ['keyword_required', 'sp_input', 'num_split', 'axis', 'name', 'split_dim'],
'tf.sparse_add': ['a', 'b', 'threshold', 'thresh'],
'tf.sparse_concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim'],
'tf.sparse_matmul': ['a', 'b', 'transpose_a', 'transpose_b', 'a_is_sparse', 'b_is_sparse', 'name'],
'tf.sparse_reduce_max': ['sp_input', 'axis', 'keepdims', 'reduction_axes', 'keep_dims'],
'tf.sparse_segment_mean': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse_segment_sqrt_n': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse_segment_sum': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse_split': ['keyword_required', 'sp_input', 'num_split', 'axis', 'name', 'split_dim'],
'tf.strings.length': ['input', 'name', 'unit'],
'tf.strings.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices'],
'tf.strings.substr': ['input', 'pos', 'len', 'name', 'unit'],
'tf.substr': ['input', 'pos', 'len', 'name', 'unit'],
'tf.transpose': ['a', 'perm', 'name', 'conjugate'],
'tf.tuple': ['tensors', 'name', 'control_inputs'],
'tf.while_loop': ['cond', 'body', 'loop_vars', 'shape_invariants', 'parallel_iterations', 'back_prop', 'swap_memory', 'name', 'maximum_iterations', 'return_same_structure']
}
|
[
"[email protected]"
] | |
e478a8dd22f338846584b48de48cc352e36990e4
|
a574d0c0ebc8e17eb641777f93544c0ae43850c9
|
/final_problem_set/3_blackjack.py
|
0cf5d30adb0ea1093e5eedc11666ee8dcdb2ceff
|
[] |
no_license
|
broepke/GTx
|
1e33c97d0f86e95124ceb5f0436f965154822466
|
e12143c9b1fc93d4489eb0f6c093637503139bf6
|
refs/heads/master
| 2020-04-08T09:35:41.884572 | 2020-01-03T03:37:34 | 2020-01-03T03:37:34 | 159,230,824 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,883 |
py
|
# -----------------------------------------------------------
# In this problem, we're going to explore a little of how
# game AI works. We'll do this with a simple problem: building
# an agent to play the popular card game Blackjack.
#
# Blackjack is a card game played with a standard 52-card
# deck. Suits do not matter in Blackjack, and so we'll just
# use letters to indicate the different cards: A, 2, 3, 4, 5,
# 6, 7, 8, 9, 10, J, Q, K.
#
# The goal of Blackjack is to get as close to 21 points as
# possible without going higher. Each of the thirteen cards
# above has a point total attached: the numerals are worth
# their given value (2 points for 2, 7 points for 7, etc.).
# J, Q, and K are worth 10 points. A is worth either 1 or 11
# points, whichever is better for the player.
#
# At any time, the player has some number of cards in their
# hand. They must then make a decision of whether to Hit or
# Stay. Hit means they request an additional card, Stay means
# they stop with their current total. Players generally try
# to Hit until it is likely that another card will push them
# over 21. For example, if a player has a 5 and a 7, there is
# a relatively low chance that another card would push them
# over 21 (only J, Q, and K would do so, since 12 + 10 = 22).
# On the other hand, if they have a 5, a 6, and a 7, they will
# likely stay because any card above 3 will push them over 21
# points.
#
# The specific goal in Blackjack is to get closer to 21 than
# the dealer. Dealers must follow a set of prescribed rules
# for when to Hit and Stay. These are the rules we'll use for
# our Blackjack-playing AI.
#
# The rules are:
#
# - The dealer must Hit if their total is below 17.
# - The dealer must Stay as soon as their total is 17 or
# higher.
# - An Ace (A) should be counted as 11 if it puts the
# dealer between 17 and 21 points. If it puts them over
# 21, though, it should be counted as 1.
#
# For example, imagine the dealer's first cards are A and 3.
# Their point total is either 4 or 14, both below 17, so they
# Hit. The next card is a 9. If we count the A as 11, then
# their total is now 23 (11 + 3 + 9), and so we count the
# A as 1. Their total is 13, and so they Hit again. The next
# card is a 7, so their total is 20, so they Stay.
#
# Write a function called next_move. next_move should have
# one parameter, a string. Each character of the string will
# be a card in the dealer's current hand, such as "AK" or
# "175". The function should return one of three strings:
#
# - "Hit" if the dealer should take another card.
# - "Stay" if the dealer should not take another card.
# - "Bust" if the sum is already over 21.
#
# Remember, your function is only responsible for playing
# one move at a time. Take in a string representing the
# current hand, return "Hit", "Stay", or "Bust".
# Add your code here!
def next_move(cards):
face_cards = ["J", "Q", "K"]
total = 0
ace_count = 0
# turn the string into a list for easy processing
cards = list(cards)
# sum up the total
for item in cards:
if item.isdigit():
total += int(item)
elif item in face_cards:
total += 10
elif item == "A":
ace_count += 1
# now add the aces after the other cards have been added
for i in range(0, ace_count):
if total + 11 > 21:
total += 1
else:
total += 11
# return the dealer's response
if total < 17:
return "Hit"
elif 16 <= total <= 21:
return "Stay"
elif total > 21:
return "Bust"
return total
# Below are some lines of code that will test your function.
# You can change the value of the variable(s) to test your
# function with different inputs.
#
# If your function works correctly, this will originally
# print: Hit, Hit, Stay, and Bust.
print(next_move("A39"))
|
[
"[email protected]"
] | |
b0c462aa3ac6c9a36bdd6a3d58d5bea208e8680a
|
47dd9eadf5d2c7421eeb0c39d1ff0bad440ac513
|
/handprint/__version__.py
|
f6beec44457f0f8b28ee2024b1f5b6b6faa9147e
|
[
"CC-BY-3.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
JanaldoChen/handprint
|
53a26f9195e96f1a649dc2c026c85f85d66ce28e
|
0bc155b65d222fae8e753f75394a6f203d37e47a
|
refs/heads/master
| 2020-05-02T10:09:46.746419 | 2018-12-22T01:48:09 | 2018-12-22T01:48:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 844 |
py
|
# =============================================================================
# @file __version__.py
# @brief handprint version info
# @author Michael Hucka <[email protected]>
# @license Please see the file named LICENSE in the project directory
# @website https://github.com/caltechlibrary/handprint
# =============================================================================
__version__ = '0.9.0'
__title__ = 'handprint'
__description__ = 'Handprint: HANDwritten Page RecognitIoN Test for Caltech Archives'
__url__ = 'https://github.com/caltechlibrary/handprint'
__author__ = 'Michael Hucka <[email protected]>'
__email__ = '[email protected]'
__license__ = 'BSD 3-clause license -- see LICENSE file'
__copyright__ = 'Copyright (C) 2018 by the California Institute of Technology'
|
[
"[email protected]"
] | |
970f9a3a0c4bb549735c394ed87a95815931f452
|
f29336e5442d59961e22f925b3e6ee96ed3bf0ca
|
/src/mnemotopy/middleware/locale.py
|
8f090b2d5fe154c0ff33927932ce488927e364f7
|
[] |
no_license
|
louiseGrandjonc/mnemotopy
|
be00b304924689ecd1cfd714da8416aaac55466c
|
7744745a7fcba7b5721577ce18c654df48aaa1ba
|
refs/heads/master
| 2022-12-10T07:32:47.100612 | 2020-01-02T10:01:03 | 2020-01-02T10:01:03 | 84,743,189 | 1 | 1 | null | 2022-12-08T03:22:03 | 2017-03-12T17:18:10 |
JavaScript
|
UTF-8
|
Python
| false | false | 2,327 |
py
|
from django.conf import settings
from django.conf.urls.i18n import is_language_prefix_patterns_used
from django.http import HttpResponseRedirect
from django.urls import get_script_prefix, is_valid_path
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.middleware.locale import LocaleMiddleware
class CustomLocaleMiddleware(LocaleMiddleware):
def process_request(self, request):
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
i18n_patterns_used, _ = is_language_prefix_patterns_used(urlconf)
language = translation.get_language_from_request(request, check_path=i18n_patterns_used)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
i18n_patterns_used, prefixed_default_language = is_language_prefix_patterns_used(urlconf)
if not language_from_path and language != settings.LANGUAGE_CODE:
language_path = '/%s%s' % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
path_needs_slash = (
not path_valid and (
settings.APPEND_SLASH and not language_path.endswith('/') and
is_valid_path('%s/' % language_path, urlconf)
)
)
if path_valid or path_needs_slash:
script_prefix = get_script_prefix()
# Insert language after the script prefix and before the
# rest of the URL
language_url = request.get_full_path(force_append_slash=path_needs_slash).replace(
script_prefix,
'%s%s/' % (script_prefix, language),
1
)
return self.response_redirect_class(language_url)
if not (i18n_patterns_used and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
|
[
"[email protected]"
] | |
5ffb6b6dd9d960be90008d1703cfd4f5baa5923c
|
915c31ce84a826d225bcb1cc5f1e0323e712f6e4
|
/phase_1.py
|
23d9a60800f43eabfa859f9270499bce8902d0f7
|
[
"Apache-2.0"
] |
permissive
|
mac389/overdosed
|
64162aaf8f57f7ca57bcc95678d0d18e231cda87
|
434255db4ea36581c9f94c7aa09ca6ca15169e8a
|
refs/heads/master
| 2021-01-10T07:44:41.804936 | 2015-06-25T23:22:51 | 2015-06-25T23:22:51 | 36,990,551 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,081 |
py
|
import os, gzip, json,tokenize,token
import nltk, csv
from nltk.corpus import stopwords
import utils as tech
import matplotlib.pyplot as plt
from pprint import pprint
#Rule-in component
CONTROL = os.path.join(os.getcwd(),'data','control')
CASE = os.path.join(os.getcwd(),'data','case')
corpus = {'case':CASE}
text = {}
for condition,path in corpus.iteritems():
if not os.path.isfile(os.path.join(path,'combined.txt')):
for filename in os.listdir(path):
if filename.endswith('.gz'):
'''
Salvaging attempt, this will only pull one tweet text and id from each file.
This will hurt controls more than case tweets, which is ok.
'''
with gzip.open(os.path.join(path,filename),'rb') as fid, open(os.path.join(path,'combined.txt'),'a+') as outfile:
print>>outfile, '\t '.join(tech.get_field_damaged_string(fid.read()))
else:
text[condition] = open(os.path.join(path,'combined.txt'),'rb').read().splitlines()
#reddit and r/Drugs in particular another good source of information
#Identify most common words that are not stopwords in case series
|
[
"[email protected]"
] | |
1e543f53fd9bfe1768f6b104c50eb5b05ea71eaf
|
be1907ef4b4eb56567d8df5703a98a7a6a34c88a
|
/modules/bsg/bsg_encoder.py
|
75081a8fa6698841440e75fcde77b8df308846f4
|
[] |
no_license
|
griff4692/LMC
|
f9bd4803c5b37bbae98cbe5014a719d3c9ea7a2f
|
f07dfa472d3f6bfd7ce7f7ac7168687beb8efdaf
|
refs/heads/master
| 2023-01-30T00:37:53.505757 | 2020-12-08T17:35:48 | 2020-12-08T17:35:48 | 223,016,633 | 13 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,448 |
py
|
import os
import sys
import torch
from torch import nn
import torch.utils.data
home_dir = os.path.expanduser('~/LMC/')
sys.path.insert(0, os.path.join(home_dir, 'utils'))
from compute_utils import compute_att
class BSGEncoder(nn.Module):
"""
BSG Encoder as described in original BSG paper. It accepts a center word and context words
and outputs Gaussian parameters mu and sigma.
Its parameters are the shared variational parameters for the distribution q(z|w, c)
where z represents latent meaning, w the center word, and c the list of context tokens
"""
def __init__(self, vocab_size, input_dim=100, hidden_dim=64):
super(BSGEncoder, self).__init__()
self.embeddings = nn.Embedding(vocab_size, input_dim, padding_idx=0)
self.dropout = nn.Dropout(0.2)
self.lstm = nn.LSTM(input_dim * 2, hidden_dim, bidirectional=True, batch_first=True)
self.att = nn.Linear(hidden_dim * 2, 1, bias=True)
self.u = nn.Linear(hidden_dim * 2, input_dim, bias=True)
self.v = nn.Linear(hidden_dim * 2, 1, bias=True)
def forward(self, center_ids, context_ids, mask, token_mask_p=0.2):
"""
:param center_ids: LongTensor of batch_size
:param context_ids: LongTensor of batch_size x 2 * context_window
:param mask: BoolTensor of batch_size x 2 * context_window (which context_ids are just the padding idx)
:return: mu (batch_size, latent_dim), var (batch_size, 1)
"""
batch_size, num_context_ids = context_ids.shape
center_embedding = self.embeddings(center_ids)
if token_mask_p is not None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
context_mask = torch.FloatTensor(batch_size, num_context_ids).uniform_().to(device) < token_mask_p
mask.masked_fill_(context_mask, True)
context_embedding = self.embeddings(context_ids)
center_embedding_tiled = center_embedding.unsqueeze(1).repeat(1, num_context_ids, 1)
merged_embeds = torch.cat([center_embedding_tiled, context_embedding], dim=-1)
merged_embeds = self.dropout(merged_embeds)
mask_tiled = mask.unsqueeze(-1).repeat(1, 1, merged_embeds.size()[-1])
merged_embeds.masked_fill_(mask_tiled, 0)
h_reps, (h, _) = self.lstm(merged_embeds)
h_sum = self.dropout(compute_att(h_reps, mask, self.att))
return self.u(h_sum), self.v(h_sum).exp()
|
[
"[email protected]"
] | |
2eaf99a83d401d021b7fd901f0ccfb97a268d4f6
|
6f9a5717fed38b0a79c399f7e5da55c6a461de6d
|
/Templates/RelativeCoord.py
|
850330f5311780fa1954f91eb4ca1b86c6726a15
|
[] |
no_license
|
Alfred-Walker/pythonps
|
d4d3b0f7fe93c138d02651e05ca5165825676a5e
|
81ef8c712c36aa83d1c53aa50886eb845378d035
|
refs/heads/master
| 2022-04-16T21:34:39.316565 | 2020-04-10T07:50:46 | 2020-04-10T07:50:46 | 254,570,527 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 60 |
py
|
# 상하좌우 네칸
dx = [-1, 0, 1, 0]
dy = [0, -1, 0, 1]
|
[
"[email protected]"
] | |
c56aaf008716622e25169ac587aa548837eeda2e
|
3e5cc67b0c8336114110d1558d481a3ff5d0eb3c
|
/apps/h5/views.py
|
f565ab6e6efb0570e5fea943a59e3da852b45b65
|
[] |
no_license
|
xiaoxiaolulu/mtserver
|
32b46488705bc2c962d60c288c482f81dca0a2b0
|
912222bbf1cd7c9bd242e96062570073b6544f6a
|
refs/heads/master
| 2021-02-22T07:42:57.579124 | 2020-03-10T17:50:28 | 2020-03-10T17:50:28 | 245,372,121 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,677 |
py
|
import random
from rest_framework import views
from django.contrib.auth import get_user_model
from apps.h5.serializers import LoginSerializer
from apps.h5.throttles import SMSCodeRateThrottle
from apps.meituan.models import Merchant
from apps.meituan.serializers import MerchantSerializer
from apps.mtauth.authentications import generate_jwt
from apps.mtauth.serializers import UserSerializer
from utils.CCPSDK import CCPRestSDK
from rest_framework.response import Response
from rest_framework import status
from django.core.cache import cache
from django.utils.timezone import now
from rest_framework import viewsets
from rest_framework import mixins
from rest_framework.pagination import PageNumberPagination
from rest_framework import generics
from rest_framework import filters
User = get_user_model()
class SmSCodeView(views.APIView):
throttle_classes = [SMSCodeRateThrottle]
def __init__(self, *args, **kwargs):
super(SmSCodeView, self).__init__(*args, **kwargs)
self.number = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
def generate_sms_code(self):
return "".join(random.choices(self.number, k=4))
def get(self, request):
telephone = request.GET.get('tel')
if telephone:
auth_token = 'a2573d4b2d9a4136b23cc54911a999b7'
auth_sid = '8aaf070870bf34550170bf6075260039'
app_id = '8aaf070870bf34550170bf607590003f'
rest = CCPRestSDK.REST(auth_sid, auth_token, app_id)
code = self.generate_sms_code()
result = rest.sendTemplateSMS(telephone, [code, 5], "1")
cache.set(telephone, code, 60 * 5)
return Response({"code": code})
# if result['statusCode'] == '000000':
# return Response("success")
# else:
# return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class LoginView(views.APIView):
def generate_sms_code(self):
number = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
return "".join(random.choices(number, k=6))
def post(self, request):
serializer = LoginSerializer(data=request.data)
if serializer.is_valid():
telephone = serializer.validated_data.get('telephone')
try:
user = User.objects.get(telephone=telephone)
user.last_login = now()
user.save()
except:
username = "美团用户" + self.generate_sms_code()
password = ""
user = User.objects.create(username=username, password=password, telephone=telephone, last_login=now())
serializer = UserSerializer(user)
token = generate_jwt(user)
return Response({"user": serializer.data, "token": token})
else:
return Response(data={"message": dict(serializer.errors)}, status=status.HTTP_400_BAD_REQUEST)
class MerchantPagination(PageNumberPagination):
page_size = 10
page_query_param = 'page'
class MerchantViewSet(
viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.RetrieveModelMixin
):
queryset = Merchant.objects.all()
serializer_class = MerchantSerializer
pagination_class = MerchantPagination
class MerchantSearchView(generics.ListAPIView):
class MerchantSearchFilter(filters.SearchFilter):
search_param = 'q'
queryset = Merchant.objects.all()
serializer_class = MerchantSerializer
filter_backends = [MerchantSearchFilter]
search_fields = ['name', 'categories__name', 'categories__goods_list__name']
|
[
"[email protected]"
] | |
d23e8296c9d073516f7dfbbf011c50abf54fda86
|
97f861630898e1aa38dd6f08f351640688b3c17f
|
/tensorflow/python/ops/metrics.py
|
04cd4ecbd4095391abec2422add886a0a9c469f3
|
[
"Apache-2.0"
] |
permissive
|
alexerwu/tensorflow
|
31e33020fcfd4cd405a2f4c4ddbad0839352935b
|
95195bd0ef742f2876a1694ebb4c018f858af062
|
refs/heads/master
| 2021-01-12T08:28:27.693374 | 2016-12-15T17:34:54 | 2016-12-15T17:34:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 111,386 |
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation-related metrics.
@@accuracy
@@auc
@@mean
@@mean_absolute_error
@@mean_cosine_distance
@mean_iou
@@mean_relative_error
@@mean_squared_error
@@mean_tensor
@@percentage_below
@@precision
@@precision_at_thresholds
@@recall
@@recall_at_k
@@recall_at_thresholds
@@root_mean_squared_error
@@sensitivity_at_specificity
@@sparse_average_precision_at_k
@@sparse_precision_at_k
@@specificity_at_sensitivity
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
def _local_variable(initial_value, validate_shape=True, name=None):
"""Create variable and add it to `GraphKeys.LOCAL_VARIABLES` collection.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
Returns:
New variable.
"""
return variables.Variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape, name=name)
def _remove_squeezable_dimensions(labels, predictions, weights):
"""Internal version of _remove_squeezable_dimensions which handles weights.
Squeezes `predictions` and `labels` if their rank differs by 1.
Squeezes `weights` if its rank is 1 more than the new rank of `predictions`
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
weights: Optional weight `Tensor`. It will be squeezed if its rank is 1
more than the new rank of `predictions`
Returns:
Tuple of `predictions`, `labels` and `weights`, possibly with the last
dimension squeezed.
"""
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if weights is not None:
weights = ops.convert_to_tensor(weights)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if (predictions_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - predictions_rank == 1:
weights = array_ops.squeeze(weights, [-1])
elif (weights_rank is None) or (
weights_shape.dims[-1].is_compatible_with(1)):
# Use dynamic rank
weights = control_flow_ops.cond(
math_ops.equal(array_ops.rank(weights),
math_ops.add(array_ops.rank(predictions), 1)),
lambda: array_ops.squeeze(weights, [-1]),
lambda: weights)
return labels, predictions, weights
def _maybe_expand_labels(labels, predictions):
"""If necessary, expand `labels` along last dimension to match `predictions`.
Args:
labels: `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN]. The latter implies
num_labels=1, in which case the result is an expanded `labels` with shape
[D1, ... DN, 1].
predictions: `Tensor` with shape [D1, ... DN, num_classes].
Returns:
`labels` with the same rank as `predictions`.
Raises:
ValueError: if `labels` has invalid shape.
"""
with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
# If sparse, expand sparse shape.
if isinstance(labels, sparse_tensor.SparseTensor):
return control_flow_ops.cond(
math_ops.equal(
array_ops.rank(predictions),
array_ops.size(labels.dense_shape) + 1),
lambda: sparse_ops.sparse_reshape( # pylint: disable=g-long-lambda
labels,
shape=array_ops.concat(0, (labels.dense_shape, (1,))),
name=scope),
lambda: labels)
# Otherwise, try to use static shape.
labels_rank = labels.get_shape().ndims
if labels_rank is not None:
predictions_rank = predictions.get_shape().ndims
if predictions_rank is not None:
if predictions_rank == labels_rank:
return labels
if predictions_rank == labels_rank + 1:
return array_ops.expand_dims(labels, -1, name=scope)
raise ValueError(
'Unexpected labels shape %s for predictions shape %s.' % (
labels.get_shape(), predictions.get_shape()))
# Otherwise, use dynamic shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(predictions), array_ops.rank(labels) + 1),
lambda: array_ops.expand_dims(labels, -1, name=scope),
lambda: labels)
def _create_local(name, shape, collections=None, validate_shape=True,
dtype=dtypes.float32):
"""Creates a new local variable.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
collections: A list of collection names to which the Variable will be added.
validate_shape: Whether to validate the shape of the variable.
dtype: Data type of the variables.
Returns:
The created variable.
"""
# Make sure local variables are added to tf.GraphKeys.LOCAL_VARIABLES
collections = list(collections or [])
collections += [ops.GraphKeys.LOCAL_VARIABLES]
return variables.Variable(
initial_value=array_ops.zeros(shape, dtype=dtype),
name=name,
trainable=False,
collections=collections,
validate_shape=validate_shape)
def _broadcast_weights(weights, values):
"""Broadcast `weights` to the same shape as `values`.
This returns a version of `weights` following the same broadcast rules as
`mul(weights, values)`. When computing a weighted average, use this function
to broadcast `weights` before summing them; e.g.,
`reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.
Args:
weights: `Tensor` whose shape is broadcastable to `values`.
values: `Tensor` of any shape.
Returns:
`weights` broadcast to `values` shape.
"""
weights_shape = weights.get_shape()
values_shape = values.get_shape()
if (weights_shape.is_fully_defined() and
values_shape.is_fully_defined() and
weights_shape.is_compatible_with(values_shape)):
return weights
return math_ops.mul(
weights, array_ops.ones_like(values), name='broadcast_weights')
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.truediv(numerator, denominator),
0,
name=name)
def _safe_scalar_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is 0.
Args:
numerator: A scalar `float64` `Tensor`.
denominator: A scalar `float64` `Tensor`.
name: Name for the returned op.
Returns:
0 if `denominator` == 0, else `numerator` / `denominator`
"""
numerator.get_shape().with_rank_at_most(1)
denominator.get_shape().with_rank_at_most(1)
return control_flow_ops.cond(
math_ops.equal(
array_ops.constant(0.0, dtype=dtypes.float64), denominator),
lambda: array_ops.constant(0.0, dtype=dtypes.float64),
lambda: math_ops.div(numerator, denominator),
name=name)
def mean(values, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes the (weighted) mean of the given values.
The `mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = _create_local('total', shape=[])
count = _create_local('count', shape=[])
if weights is not None:
weights = math_ops.to_float(weights)
values = math_ops.mul(values, weights)
num_values = math_ops.reduce_sum(_broadcast_weights(weights, values))
else:
num_values = math_ops.to_float(array_ops.size(values))
total_compute_op = state_ops.assign_add(total, math_ops.reduce_sum(values))
count_compute_op = state_ops.assign_add(count, num_values)
mean_t = _safe_div(total, count, 'value')
with ops.control_dependencies([total_compute_op, count_compute_op]):
update_op = _safe_div(total, count, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_t)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
def accuracy(labels, predictions, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Calculates how often `predictions` matches `labels`.
The `accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
predictions: The predicted values, a `Tensor` of any shape.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
return mean(is_correct, weights, metrics_collections,
updates_collections, name or 'accuracy')
def _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=None, includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invaild key: %s.' % include)
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.pack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
weights = math_ops.to_float(weights)
weights_tiled = array_ops.tile(array_ops.reshape(_broadcast_weights(
weights, predictions), [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_p = _create_local('true_positives', shape=[num_thresholds])
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(
true_p, math_ops.reduce_sum(is_true_positive, 1))
values['tp'] = true_p
if 'fn' in includes:
false_n = _create_local('false_negatives', shape=[num_thresholds])
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(
false_n, math_ops.reduce_sum(is_false_negative, 1))
values['fn'] = false_n
if 'tn' in includes:
true_n = _create_local('true_negatives', shape=[num_thresholds])
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(
true_n, math_ops.reduce_sum(is_true_negative, 1))
values['tn'] = true_n
if 'fp' in includes:
false_p = _create_local('false_positives', shape=[num_thresholds])
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(
false_p, math_ops.reduce_sum(is_false_positive, 1))
values['fp'] = false_p
return values, update_ops
def auc(labels, predictions, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None,
curve='ROC', name=None):
"""Computes the approximate AUC via a Riemann sum.
The `auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'auc', (labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' %
(curve))
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_auc(tp, fn, tn, fp, name):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
x = fp_rate
y = rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
x = rec
y = prec
return math_ops.reduce_sum(math_ops.mul(
x[:num_thresholds - 1] - x[1:],
(y[:num_thresholds - 1] + y[1:]) / 2.), name=name)
# sum up the areas of all the trapeziums
auc_value = compute_auc(
values['tp'], values['fn'], values['tn'], values['fp'], 'value')
update_op = compute_auc(
update_ops['tp'], update_ops['fn'], update_ops['tn'], update_ops['fp'],
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, auc_value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return auc_value, update_op
def mean_absolute_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
absolute_errors = math_ops.abs(predictions - labels)
return mean(absolute_errors, weights, metrics_collections,
updates_collections, name or 'mean_absolute_error')
def mean_cosine_distance(labels, predictions, dim, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of arbitrary shape.
predictions: A `Tensor` of the same shape as `labels`.
dim: The dimension along which the cosine distance is computed.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`,
and whose dimension `dim` is 1.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.mul(predictions, labels)
radial_diffs = math_ops.reduce_sum(radial_diffs,
reduction_indices=[dim,],
keep_dims=True)
mean_distance, update_op = mean(radial_diffs, weights,
None,
None,
name or 'mean_cosine_distance')
mean_distance = math_ops.sub(1.0, mean_distance)
update_op = math_ops.sub(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def mean_iou(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened, if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened, if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'mean_iou', (predictions, labels, weights)):
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# Local variable to accumulate the predictions in the confusion matrix.
cm_dtype = dtypes.int64 if weights is not None else dtypes.float64
total_cm = _create_local('total_confusion_matrix',
shape=[num_classes, num_classes], dtype=cm_dtype)
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
num_classes = math_ops.to_int64(num_classes)
# Flatten the input if its rank > 1.
predictions_rank = predictions.get_shape().ndims
if predictions_rank > 1:
predictions = array_ops.reshape(predictions, [-1])
labels_rank = labels.get_shape().ndims
if labels_rank > 1:
labels = array_ops.reshape(labels, [-1])
if weights is not None:
weights_rank = weights.get_shape().ndims
if weights_rank > 1:
weights = array_ops.reshape(weights, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
labels, predictions, num_classes, weights=weights, dtype=cm_dtype)
update_op = state_ops.assign_add(total_cm, current_cm)
def compute_mean_iou(name):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
denominator = sum_over_row + sum_over_col - cm_diag
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = array_ops.where(
math_ops.greater(denominator, 0),
denominator,
array_ops.ones_like(denominator))
iou = math_ops.div(cm_diag, denominator)
return math_ops.reduce_mean(iou, name=name)
mean_iou_v = compute_mean_iou('mean_iou')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_iou_v)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_iou_v, update_op
def mean_relative_error(labels, predictions, normalizer, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions, normalizer = confusion_matrix.remove_squeezable_dimensions(
predictions, normalizer)
predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())
relative_errors = array_ops.where(
math_ops.equal(normalizer, 0.0),
array_ops.zeros_like(labels),
math_ops.div(math_ops.abs(labels - predictions), normalizer))
return mean(relative_errors, weights, metrics_collections,
updates_collections, name or 'mean_relative_error')
def mean_squared_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
squared_error = math_ops.square(labels - predictions)
return mean(squared_error, weights, metrics_collections,
updates_collections, name or 'mean_squared_error')
def mean_tensor(values, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'mean', (values, weights)):
total = _create_local('total_tensor', shape=values.get_shape())
count = _create_local('count_tensor', shape=values.get_shape())
num_values = array_ops.ones_like(values)
if weights is not None:
weights = math_ops.to_float(weights)
values = math_ops.mul(values, weights)
num_values = math_ops.mul(num_values, weights)
total_compute_op = state_ops.assign_add(total, values)
count_compute_op = state_ops.assign_add(count, num_values)
def compute_mean(total, count, name):
non_zero_count = math_ops.maximum(count,
array_ops.ones_like(count),
name=name)
return math_ops.truediv(total, non_zero_count, name=name)
mean_t = compute_mean(total, count, 'value')
with ops.control_dependencies([total_compute_op, count_compute_op]):
update_op = compute_mean(total, count, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_t)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
def percentage_below(values, threshold, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `percentage_below` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))
return mean(is_below_threshold,
weights,
metrics_collections,
updates_collections,
name or 'percentage_below_threshold')
def _count_condition(values, weights=None, metrics_collections=None,
updates_collections=None):
"""Sums the weights of cases where the given values are True.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `bool` `Tensor` of arbitrary size.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
check_ops.assert_type(values, dtypes.bool)
count = _create_local('count', shape=[])
values = math_ops.to_float(values)
if weights is not None:
weights = math_ops.to_float(weights)
values = math_ops.mul(values, weights)
value_tensor = array_ops.identity(count)
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
if metrics_collections:
ops.add_to_collections(metrics_collections, value_tensor)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value_tensor, update_op
def true_positives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary
dimensions.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'true_positives', (predictions, labels, weights)):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_true_positive = math_ops.logical_and(math_ops.equal(labels, 1),
math_ops.equal(predictions, 1))
return _count_condition(is_true_positive, weights, metrics_collections,
updates_collections)
def false_positives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary
dimensions.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'false_positives', (predictions, labels, weights)):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_false_positive = math_ops.logical_and(math_ops.equal(labels, 0),
math_ops.equal(predictions, 1))
return _count_condition(is_false_positive, weights, metrics_collections,
updates_collections)
def precision(labels, predictions, weights=None,
metrics_collections=None, updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'precision', (predictions, labels, weights)):
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
true_p, true_positives_update_op = true_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
false_p, false_positives_update_op = false_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
def compute_precision(name):
return array_ops.where(
math_ops.greater(true_p + false_p, 0),
math_ops.div(true_p, true_p + false_p),
0,
name)
p = compute_precision('value')
with ops.control_dependencies([true_positives_update_op,
false_positives_update_op]):
update_op = compute_precision('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, p)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return p, update_op
def precision_at_thresholds(labels, predictions, thresholds,
weights=None,
metrics_collections=None,
updates_collections=None, name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'precision_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fp'))
tp = values['tp']
fp = values['fp']
# Avoid division by zero.
epsilon = 1e-7
def compute_precision(name):
return math_ops.div(tp, epsilon + tp + fp, name='precision_' + name)
prec = compute_precision('value')
with ops.control_dependencies(update_ops.values()):
update_op = compute_precision('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, prec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return prec, update_op
def false_negatives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary
dimensions.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(
name, 'false_negatives', (predictions, labels, weights)):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_false_negative = math_ops.logical_and(math_ops.equal(labels, 1),
math_ops.equal(predictions, 0))
return _count_condition(is_false_negative, weights, metrics_collections,
updates_collections)
def recall(labels, predictions, weights=None,
metrics_collections=None, updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'recall', (predictions, labels, weights)):
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
true_p, true_positives_update_op = true_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
false_n, false_negatives_update_op = false_negatives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
def compute_recall(true_p, false_n, name):
return array_ops.where(
math_ops.greater(true_p + false_n, 0),
math_ops.div(true_p, true_p + false_n),
0,
name)
rec = compute_recall(true_p, false_n, 'value')
with ops.control_dependencies([true_positives_update_op,
false_negatives_update_op]):
update_op = compute_recall(true_p, false_n, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, rec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
def _select_class_id(ids, selected_id):
"""Filter all but `selected_id` out of `ids`.
Args:
ids: `int64` `Tensor` or `SparseTensor` of IDs.
selected_id: Int id to select.
Returns:
`SparseTensor` of same dimensions as `ids`. This contains only the entries
equal to `selected_id`.
"""
ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids)
if isinstance(ids, sparse_tensor.SparseTensor):
return sparse_ops.sparse_retain(
ids, math_ops.equal(ids.values, selected_id))
# TODO(ptucker): Make this more efficient, maybe add a sparse version of
# tf.equal and tf.reduce_any?
# Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
ids_shape = array_ops.shape(ids, out_type=dtypes.int64)
ids_last_dim = array_ops.size(ids_shape) - 1
filled_selected_id_shape = math_ops.reduced_shape(
ids_shape, array_ops.reshape(ids_last_dim, [1]))
# Intersect `ids` with the selected ID.
filled_selected_id = array_ops.fill(
filled_selected_id_shape, math_ops.to_int64(selected_id))
result = sets.set_intersection(filled_selected_id, ids)
return sparse_tensor.SparseTensor(
indices=result.indices, values=result.values, dense_shape=ids_shape)
def _maybe_select_class_id(labels, predictions_idx, selected_id=None):
"""If class ID is specified, filter all other classes.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]
where N >= 1. Commonly, N=1 and `predictions_idx` has shape
[batch size, k].
selected_id: Int id to select.
Returns:
Tuple of `labels` and `predictions_idx`, possibly with classes removed.
"""
if selected_id is None:
return labels, predictions_idx
return (_select_class_id(labels, selected_id),
_select_class_id(predictions_idx, selected_id))
def _sparse_true_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None,
name=None):
"""Calculates true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose shape is broadcastable to the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
name: Name of operation.
Returns:
A [D1, ... DN] `Tensor` of true positive counts.
"""
with ops.name_scope(name, 'true_positives', (predictions_idx, labels)):
labels, predictions_idx = _maybe_select_class_id(
labels, predictions_idx, class_id)
tp = sets.set_size(sets.set_intersection(predictions_idx, labels))
tp = math_ops.to_double(tp)
if weights is not None:
weights = math_ops.to_double(weights)
tp = math_ops.mul(tp, weights)
return tp
def _streaming_sparse_true_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose shape is broadcastable to the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incomptable shape.
"""
default_name = _at_k_name('true_positive', k, class_id=class_id)
with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope:
tp = _sparse_true_positive_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_tp = math_ops.to_double(math_ops.reduce_sum(tp))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_tp, name='update')
def _sparse_false_negative_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose shape is broadcastable to the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
Returns:
A [D1, ... DN] `Tensor` of false negative counts.
"""
with ops.name_scope(None, 'false_negatives', (predictions_idx, labels)):
labels, predictions_idx = _maybe_select_class_id(labels,
predictions_idx,
class_id)
fn = sets.set_size(sets.set_difference(predictions_idx,
labels,
aminusb=False))
fn = math_ops.to_double(fn)
if weights is not None:
weights = math_ops.to_double(weights)
fn = math_ops.mul(fn, weights)
return fn
def _streaming_sparse_false_negative_at_k(labels,
predictions_idx,
k,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose shape is broadcastable to the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incomptable shape.
"""
default_name = _at_k_name('false_negative', k, class_id=class_id)
with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope:
fn = _sparse_false_negative_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fn, name='update')
def recall_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: An optional `Tensor` whose shape is broadcastable to the first
[D1, ... DN] dimensions of `predictions` and `labels`.
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
default_name = _at_k_name('recall', k, class_id=class_id)
with ops.name_scope(name, default_name, (predictions, labels)) as scope:
labels = _maybe_expand_labels(labels, predictions)
_, top_k_idx = nn.top_k(predictions, k)
top_k_idx = math_ops.to_int64(top_k_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
fn, fn_update = _streaming_sparse_false_negative_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
metric = math_ops.div(tp, math_ops.add(tp, fn), name=scope)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fn_update), name='update')
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
def recall_at_thresholds(labels, predictions, thresholds,
weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'recall_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fn'))
tp = values['tp']
fn = values['fn']
# Avoid division by zero.
epsilon = 1e-7
def compute_recall(name):
return math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)
rec = compute_recall('value')
with ops.control_dependencies(update_ops.values()):
update_op = compute_recall('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, rec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def root_mean_squared_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
value_tensor, update_op = mean_squared_error(
labels, predictions, weights, None, None,
name or 'root_mean_squared_error')
rmse = math_ops.sqrt(value_tensor)
with ops.control_dependencies([update_op]):
update_op = math_ops.sqrt(update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, rmse)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rmse, update_op
def sensitivity_at_specificity(
labels, predictions, specificity, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None, name=None):
"""Computes the specificity at a given sensitivity.
The `sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
specificity: A scalar value in range `[0, 1]`.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'sensitivity_at_specificity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
tp = values['tp']
fn = values['fn']
tn = values['tn']
fp = values['fp']
def compute_sensitivity_at_specificity(name):
specificities = math_ops.div(tn, tn + fp + kepsilon)
tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the sensitivity:
return math_ops.div(tp[tf_index],
tp[tf_index] + fn[tf_index] + kepsilon,
name)
sensitivity = compute_sensitivity_at_specificity('value')
with ops.control_dependencies(update_ops.values()):
update_op = compute_sensitivity_at_specificity('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, sensitivity)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return sensitivity, update_op
def _expand_and_tile(tensor, multiple, dim=0, name=None):
"""Slice `tensor` shape in 2, then tile along the sliced dimension.
A new dimension is inserted in shape of `tensor` before `dim`, then values are
tiled `multiple` times along the new dimension.
Args:
tensor: Input `Tensor` or `SparseTensor`.
multiple: Integer, number of times to tile.
dim: Integer, dimension along which to tile.
name: Name of operation.
Returns:
`Tensor` result of expanding and tiling `tensor`.
Raises:
ValueError: if `multiple` is less than 1, or `dim` is not in
`[-rank(tensor), rank(tensor)]`.
"""
if multiple < 1:
raise ValueError('Invalid multiple %s, must be > 0.' % multiple)
with ops.name_scope(
name, 'expand_and_tile', (tensor, multiple, dim)) as scope:
# Sparse.
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
if dim < 0:
expand_dims = array_ops.reshape(
array_ops.size(tensor.dense_shape) + dim, [1])
else:
expand_dims = [dim]
expanded_shape = array_ops.concat_v2(
(array_ops.slice(tensor.dense_shape, [0], expand_dims), [1],
array_ops.slice(tensor.dense_shape, expand_dims, [-1])),
0,
name='expanded_shape')
expanded = sparse_ops.sparse_reshape(
tensor, shape=expanded_shape, name='expand')
if multiple == 1:
return expanded
return sparse_ops.sparse_concat(
dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)
# Dense.
expanded = array_ops.expand_dims(
tensor, dim if (dim >= 0) else (dim - 1), name='expand')
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
tile_multiples = array_ops.concat_v2(
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
def _num_relevant(labels, k):
"""Computes number of relevant values for each row in labels.
For labels with shape [D1, ... DN, num_labels], this is the minimum of
`num_labels` and `k`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels].
k: Integer, k for @k metric.
Returns:
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.minimum(sets.set_size(labels), k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
def _sparse_average_precision_at_k(labels, predictions, k):
"""Computes average precision@k of predictions with respect to sparse labels.
From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula
for each row is:
AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items
A "row" is the elements in dimension [D1, ... DN] of `predictions`, `labels`,
and the result `Tensors`. In the common case, this is [batch_size]. Each row
of the results contains the average precision for that row.
Internally, a `top_k` operation computes a `Tensor` indicating the top `k`
`predictions`. Set operations applied to `top_k` and `labels` calculate the
true positives, which are used to calculate the precision ("P_{i}" term,
above).
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
Returns:
`float64` `Tensor` of shape [D1, ... DN], where each value is the average
precision for that row.
Raises:
ValueError: if k is invalid.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(
None, 'average_precision', (predictions, labels, k)) as scope:
labels = _maybe_expand_labels(labels, predictions)
# Calculate top k indices to produce [D1, ... DN, k] tensor.
_, predictions_idx = nn.top_k(predictions, k)
predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')
# Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate
# prediction for each k, so we can calculate separate true positive values
# for each k.
predictions_idx_per_k = array_ops.expand_dims(
predictions_idx, -1, name='predictions_idx_per_k')
# Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor.
labels_per_k = _expand_and_tile(
labels, multiple=k, dim=-1, name='labels_per_k')
# The following tensors are all of shape [D1, ... DN, k], containing values
# per row, per k value.
# `relevant_per_k` (int32) - Relevance indicator, 1 if the prediction at
# that k value is correct, 0 otherwise. This is the "rel_{i}" term from
# the formula above.
# `tp_per_k` (int32) - True positive counts.
# `retrieved_per_k` (int32) - Number of predicted values at each k. This is
# the precision denominator.
# `precision_per_k` (float64) - Precision at each k. This is the "P_{i}"
# term from the formula above.
# `relevant_precision_per_k` (float64) - Relevant precisions; i.e.,
# precisions at all k for which relevance indicator is true.
relevant_per_k = _sparse_true_positive_at_k(
labels_per_k, predictions_idx_per_k, name='relevant_per_k')
tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k')
retrieved_per_k = math_ops.cumsum(
array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k')
precision_per_k = math_ops.div(
math_ops.to_double(tp_per_k), math_ops.to_double(retrieved_per_k),
name='precision_per_k')
relevant_precision_per_k = math_ops.mul(
precision_per_k, math_ops.to_double(relevant_per_k),
name='relevant_precision_per_k')
# Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor.
precision_sum = math_ops.reduce_sum(
relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum')
# Divide by number of relevant items to get average precision. These are
# the "num_relevant_items" and "AveP" terms from the formula above.
num_relevant_items = math_ops.to_double(_num_relevant(labels, k))
return math_ops.div(precision_sum, num_relevant_items, name=scope)
def sparse_average_precision_at_k(labels,
predictions,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: An optional `Tensor` whose shape is broadcastable to the first
[D1, ... DN] dimensions of `predictions` and `labels`.
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
default_name = _at_k_name('average_precision', k)
with ops.name_scope(name, default_name, (predictions, labels)) as scope:
# Calculate per-example average precision, and apply weights.
average_precision = _sparse_average_precision_at_k(
predictions=predictions, labels=labels, k=k)
if weights is not None:
weights = math_ops.to_double(weights)
average_precision = math_ops.mul(average_precision, weights)
# Create accumulation variables and update ops for max average precision and
# total average precision.
with ops.name_scope(None, 'max', (average_precision,)) as max_scope:
# `max` is the max possible precision. Since max for any row is 1.0:
# - For the unweighted case, this is just the number of rows.
# - For the weighted case, it's the sum of the weights broadcast across
# `average_precision` rows.
max_var = _local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=max_scope)
if weights is None:
batch_max = math_ops.to_double(
array_ops.size(average_precision, name='batch_max'))
else:
# TODO(ptucker): More efficient way to broadcast?
broadcast_weights = math_ops.mul(
weights, array_ops.ones_like(average_precision),
name='broadcast_weights')
batch_max = math_ops.reduce_sum(broadcast_weights, name='batch_max')
max_update = state_ops.assign_add(max_var, batch_max, name='update')
with ops.name_scope(None, 'total', (average_precision,)) as total_scope:
total_var = _local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=total_scope)
batch_total = math_ops.reduce_sum(average_precision, name='batch_total')
total_update = state_ops.assign_add(total_var, batch_total, name='update')
# Divide total by max to get mean, for both vars and the update ops.
mean_average_precision = _safe_scalar_div(total_var, max_var, name='mean')
update = _safe_scalar_div(total_update, max_update, name=scope)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_average_precision)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return mean_average_precision, update
def _sparse_false_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose shape is broadcastable to the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
Returns:
A [D1, ... DN] `Tensor` of false positive counts.
"""
with ops.name_scope(None, 'false_positives', (predictions_idx, labels)):
labels, predictions_idx = _maybe_select_class_id(labels,
predictions_idx,
class_id)
fp = sets.set_size(sets.set_difference(
predictions_idx, labels, aminusb=True))
fp = math_ops.to_double(fp)
if weights is not None:
weights = math_ops.to_double(weights)
fp = math_ops.mul(fp, weights)
return fp
def _streaming_sparse_false_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose shape is broadcastable to the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incomptable shape.
"""
default_name = _at_k_name('false_positive', k, class_id=class_id)
with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope:
fp = _sparse_false_positive_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fp, name='update')
def sparse_precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is in the top-k highest
`predictions`, and computing the fraction of them for which `class_id` is
indeed a correct label.
If `class_id` is not specified, we'll calculate precision as how often on
average a class among the top-k classes with the highest predicted values
of a batch entry is correct and can be found in the label for that entry.
`sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: An optional `Tensor` whose shape is broadcastable to the first
[D1, ... DN] dimensions of `predictions` and `labels`.
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions)
_, top_k_idx = nn.top_k(predictions, k)
top_k_idx = math_ops.to_int64(top_k_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
fp, fp_update = _streaming_sparse_false_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
metric = math_ops.div(tp, math_ops.add(tp, fp), name=scope)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fp_update), name='update')
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
def specificity_at_sensitivity(
labels, predictions, sensitivity, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None, name=None):
"""Computes the specificity at a given sensitivity.
The `specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
sensitivity: A scalar value in range `[0, 1]`.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'specificity_at_sensitivity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 - kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
tp = values['tp']
fn = values['fn']
tn = values['tn']
fp = values['fp']
def compute_specificity_at_sensitivity(name):
"""Computes the specificity at the given sensitivity.
Args:
name: The name of the operation.
Returns:
The specificity using the aggregated values.
"""
sensitivities = math_ops.div(tp, tp + fn + kepsilon)
# We'll need to use this trick until tf.argmax allows us to specify
# whether we should use the first or last index in case of ties.
min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
indices_at_minval = math_ops.equal(
math_ops.abs(sensitivities - sensitivity), min_val)
indices_at_minval = math_ops.to_int64(indices_at_minval)
indices_at_minval = math_ops.cumsum(indices_at_minval)
tf_index = math_ops.argmax(indices_at_minval, 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the specificity:
return math_ops.div(tn[tf_index],
tn[tf_index] + fp[tf_index] + kepsilon,
name)
specificity = compute_specificity_at_sensitivity('value')
with ops.control_dependencies(update_ops.values()):
update_op = compute_specificity_at_sensitivity('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, specificity)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return specificity, update_op
|
[
"[email protected]"
] | |
06d1da03bb473ec8c74994de7dded0aac1d089af
|
bc1e4bdfe4c759cb8fc39dfb2181bbcf79a2c5a5
|
/React-Tasks/dj/lib/python3.6/weakref.py
|
08e07af5cee91eb3751018654b88c40612e44099
|
[] |
no_license
|
ProgMmgGhoneim/React_Js
|
19b4f2eef617c1a9623ef93297b2a3c7c84769c6
|
d038ac7fbc60757a04d777ffe1b6dc1cfcdfaea7
|
refs/heads/master
| 2023-01-10T20:23:03.466801 | 2019-10-11T11:25:56 | 2019-10-11T11:25:56 | 214,394,220 | 0 | 0 | null | 2023-01-05T23:20:36 | 2019-10-11T09:16:05 |
Python
|
UTF-8
|
Python
| false | false | 48 |
py
|
/home/ghoneim/anaconda3/lib/python3.6/weakref.py
|
[
"[email protected]"
] | |
5d2eb91da355a9b16280bef5d63c1395eae24179
|
f421d6ae91120184a7ce1b82e779255fd64fdf85
|
/structs/models/conv.py
|
b8a0b86846e3b9280b45ce61e3af9572f72dbada
|
[] |
no_license
|
landjbs/MCTS
|
54f8a979878ad3ca895e6ff151bd3c872e96859c
|
db6f490cb3c27b6ffc31b48350ce63c68b59315b
|
refs/heads/master
| 2020-11-27T05:23:09.726422 | 2019-12-27T19:44:01 | 2019-12-27T19:44:01 | 229,320,234 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,281 |
py
|
import torch
import torch.nn as nn
class Conv(nn.Module):
'''
The Conv model is tasked with prediciting a vector (p) of move probabilities
across the avaliable moves and a scalar (v) of win probability at the
current state. To avoid shape issues, p always has length 8. Input board
is a 4th order tensor with shape (boardSize, boardSize, 4) where the last
order has indicies (0-shots, 1-walls, 2-enemies, 3-player).
Sizes are currently hard-coded.
'''
def __init__(self, lr, boardSize=20):
super(Conv, self).__init__()
# layers
self.conv1 = nn.Sequential(
nn.Conv2d(4, 20, kernel_size=1, stride=1, padding=0),
nn.ReLU()
) #nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Sequential(
nn.Conv2d(20, 20, kernel_size=1, stride=1, padding=0),
nn.ReLU()
) #nn.MaxPool2d(kernel_size=2, stride=2)
self.dropout = nn.Dropout(p=0.3)
self.lin1 = nn.Linear(3 * 3 * 20, 1000)
self.pLin = nn.Linear(1000, 8)
self.soft = nn.Softmax(dim=1)
self.vLin = nn.Linear(1000, 1)
self.sig = nn.Sigmoid()
# optimizers and loss
self.optim = torch.optim.Adam(self.parameters(), lr=lr)
# self.pCriterion = nn.CrossEntropyLoss()
self.vCriterion = nn.BCELoss()
def pCriterion(self, p, target):
pC = p[0, target]
pLog = torch.log(pC)
loss = -(pLog)
return loss
def forward(self, boardTensor):
convOut = self.conv1(boardTensor)
convOut = self.conv2(convOut)
convOut = convOut.reshape(convOut.size(0), -1)
convOut = self.dropout(convOut)
linOut = self.lin1(convOut)
p = self.soft(self.pLin(linOut))
v = self.sig(self.vLin(linOut))[0]
# print(f'p: {p} | v: {v}')
return p, v
def eval_and_prop(self, pX, vX, pY, vY):
pLoss = self.pCriterion(pX, pY)
vLoss = self.vCriterion(vX, torch.tensor([vY], dtype=torch.float))
loss = pLoss + vLoss
self.optim.zero_grad()
loss.backward()
self.optim.step()
return loss
def train_step(self, x, yP, yV):
p, v = self(x)
return self.eval_and_prop(p, v, yP, yV)
|
[
"[email protected]"
] | |
95a32e6f81e5d22d9e2310154eb8ea8d02f8449a
|
805705b87e889c18ac0b8be6ee50fcc292bf32e1
|
/main.py
|
7d5e45fd1fab3b9071debd0070268a77e592ef62
|
[] |
no_license
|
jaybenaim/day18-reinforcements
|
283ea6578cb4dbc252da6b30e77a2899b6feac03
|
e29ae6f1b16ea8c0b5d4fd1eba931799c0a70c57
|
refs/heads/master
| 2020-06-24T07:16:45.253006 | 2019-07-25T20:32:52 | 2019-07-25T20:32:52 | 198,893,102 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 482 |
py
|
def word_counter(string):
word_list = []
for words in string:
each_word = string.split()
if words == None:
return 0
else:
for word in each_word:
word_list.append(word)
return len(word_list)
print(word_counter('this is a test'))
print(word_counter("Hello world") )# returns 2
print(word_counter("This is a sentence")) # returns 4
print(word_counter("")) # returns 0
|
[
"[email protected]"
] | |
f84aaf8671a71a32fd27a410f2fb67fb424f2fd9
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2942/60731/239792.py
|
be1b184072b692962edd810b9ee96fe99b0afb2a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 120 |
py
|
n=int(input())
num=input()
l=num.split()
l.sort(reverse=True)
result=''.join(l)
num1=int(result)
print(result,end='')
|
[
"[email protected]"
] | |
b2524bfa2f3066ef2a19eac93a3434a6ce088a9b
|
223b5e108951378f9de33258d2602fc01c17c4cb
|
/django16/project_name/settings.py
|
3e07f544761d6b0d1b95bea8f4090296f58955c3
|
[
"MIT"
] |
permissive
|
barscka/django-project-template
|
55934fb14f665aae5accd0dfbef23ed7efa0c3ba
|
2515d7a4f0964feb8aef2f340db4aa2f820c1d87
|
refs/heads/master
| 2021-05-31T09:06:17.254535 | 2015-07-28T00:14:10 | 2015-07-28T00:14:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,571 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Django settings for {{ project_name }} project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = [
'*',
]
# Application definition
INSTALLED_APPS = (
# django core
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# third party apps
# local apps
# south in the end
'south',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# =============================================================================
# Django Local settings here
# =============================================================================
ADMINS = (
('John Doe', '[email protected]'),
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# =============================================================================
# {{ project_name }} settings here
# =============================================================================
SITE_NAME = '{{ project_name }}'
SITE_DOMAIN = '{{ project_name }}.com'
# =============================================================================
# Load settings_local.py if exists
# =============================================================================
try:
from .settings_local import *
except ImportError:
pass
|
[
"[email protected]"
] | |
4fac3d9ddcb200a86ec0789f848a9079e75c1994
|
0c452a191d35a26499abec71854f8a8cdb1efc68
|
/test/unit/backends/msbuild/test_writer.py
|
81b41a280bbd23b9d969441b074964552239bd10
|
[
"BSD-3-Clause"
] |
permissive
|
luc3001/bfg9000
|
fe609d64d7b605fef0ffb375873729c8cf0bd641
|
41452e9dd12f1a44bae68d3bf44f362d283e6802
|
refs/heads/master
| 2020-09-15T17:10:52.434281 | 2019-11-22T04:33:26 | 2019-11-22T04:33:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,200 |
py
|
import mock
from ... import *
from bfg9000.backends.msbuild.writer import version
from bfg9000.versioning import Version
def mock_bad_which(*args, **kwargs):
raise IOError()
def mock_bad_execute(*args, **kwargs):
raise OSError()
class TestMsBuildVersion(TestCase):
def test_good(self):
with mock.patch('bfg9000.shell.which', return_value=['command']), \
mock.patch('bfg9000.shell.execute',
return_value='MSBuild 1.23'): # noqa
self.assertEqual(version({}), Version('1.23'))
def test_unrecognized_version(self):
with mock.patch('bfg9000.shell.which', return_value=['command']), \
mock.patch('bfg9000.shell.execute',
return_value='MSBuild'): # noqa
self.assertEqual(version({}), None)
def test_not_found(self):
with mock.patch('bfg9000.shell.which', mock_bad_which):
self.assertEqual(version({}), None)
def test_bad_execute(self):
with mock.patch('bfg9000.shell.which', return_value=['command']), \
mock.patch('bfg9000.shell.execute', mock_bad_execute): # noqa
self.assertEqual(version({}), None)
|
[
"[email protected]"
] | |
64ea2ea9564dcd0bab25d5cf5c5a4115f4769d93
|
a81c1492783e7cafcaf7da5f0402d2d283b7ce37
|
/google/ads/google_ads/v6/proto/enums/shared_set_status_pb2.py
|
6b94727fd3b8dc33bf74d4a95e08d59671258bef
|
[
"Apache-2.0"
] |
permissive
|
VincentFritzsche/google-ads-python
|
6650cf426b34392d1f58fb912cb3fc25b848e766
|
969eff5b6c3cec59d21191fa178cffb6270074c3
|
refs/heads/master
| 2023-03-19T17:23:26.959021 | 2021-03-18T18:18:38 | 2021-03-18T18:18:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | true | 4,087 |
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/enums/shared_set_status.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/enums/shared_set_status.proto',
package='google.ads.googleads.v6.enums',
syntax='proto3',
serialized_options=b'\n!com.google.ads.googleads.v6.enumsB\024SharedSetStatusProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v6/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V6.Enums\312\002\035Google\\Ads\\GoogleAds\\V6\\Enums\352\002!Google::Ads::GoogleAds::V6::Enums',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n5google/ads/googleads/v6/enums/shared_set_status.proto\x12\x1dgoogle.ads.googleads.v6.enums\x1a\x1cgoogle/api/annotations.proto\"`\n\x13SharedSetStatusEnum\"I\n\x0fSharedSetStatus\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0b\n\x07\x45NABLED\x10\x02\x12\x0b\n\x07REMOVED\x10\x03\x42\xe9\x01\n!com.google.ads.googleads.v6.enumsB\x14SharedSetStatusProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v6/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V6.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V6\\Enums\xea\x02!Google::Ads::GoogleAds::V6::Enumsb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_SHAREDSETSTATUSENUM_SHAREDSETSTATUS = _descriptor.EnumDescriptor(
name='SharedSetStatus',
full_name='google.ads.googleads.v6.enums.SharedSetStatusEnum.SharedSetStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENABLED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REMOVED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=141,
serialized_end=214,
)
_sym_db.RegisterEnumDescriptor(_SHAREDSETSTATUSENUM_SHAREDSETSTATUS)
_SHAREDSETSTATUSENUM = _descriptor.Descriptor(
name='SharedSetStatusEnum',
full_name='google.ads.googleads.v6.enums.SharedSetStatusEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_SHAREDSETSTATUSENUM_SHAREDSETSTATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=118,
serialized_end=214,
)
_SHAREDSETSTATUSENUM_SHAREDSETSTATUS.containing_type = _SHAREDSETSTATUSENUM
DESCRIPTOR.message_types_by_name['SharedSetStatusEnum'] = _SHAREDSETSTATUSENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SharedSetStatusEnum = _reflection.GeneratedProtocolMessageType('SharedSetStatusEnum', (_message.Message,), {
'DESCRIPTOR' : _SHAREDSETSTATUSENUM,
'__module__' : 'google.ads.googleads.v6.enums.shared_set_status_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.enums.SharedSetStatusEnum)
})
_sym_db.RegisterMessage(SharedSetStatusEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"[email protected]"
] | |
1522d5404ed627f0931f89bb5684acad594c31df
|
493a36f1f8606c7ddce8fc7fe49ce4409faf80be
|
/.history/B073040023/client_20210614185239.py
|
0b152bdb8714864bface6716198073ed81196b44
|
[] |
no_license
|
ZhangRRz/computer_network
|
f7c3b82e62920bc0881dff923895da8ae60fa653
|
077848a2191fdfe2516798829644c32eaeded11e
|
refs/heads/main
| 2023-05-28T02:18:09.902165 | 2021-06-15T06:28:59 | 2021-06-15T06:28:59 | 376,568,344 | 0 | 0 | null | 2021-06-13T14:48:36 | 2021-06-13T14:48:36 | null |
UTF-8
|
Python
| false | false | 5,029 |
py
|
import socket
import threading
import tcppacket
import struct
from time import sleep
# socket.socket() will create a TCP socket (default)
# socket.socket(socket.AF_INET, socket.SOCK_STREAM) to explicitly define a TCP socket
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # explicitly define a UDP socket
udp_host = '127.0.0.1' # Host IP
udp_port = 12345 # specified port to connect
def init_new_calc_req(msg):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port) # Sending message to UDP server
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
def init_new_videoreq_req(i):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
msg = "video 1".encode('utf-8')
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port)) # Sending message to UDP server
recvdata = b''
ack_seq = 0
seq = 0
counter = 0
while True:
data, address = sock.recvfrom(512*1024)
s = struct.calcsize('!HHLLBBHHH')
raw = struct.unpack('!HHLLBBHHH', data[:s])
print("receive packet from ", address,
"with header", raw)
if(raw[2] == ack_seq and raw[7] == 0):
recvdata += data[s:]
if(raw[5] % 2):
# fin_falg
fin_flag = 1
else:
fin_flag = 0
ack_seq += 1
counter += 1
else:
print("Receive ERROR packet from ", address)
fin_flag = 1
counter = 3
# --------------------------------------------
# send ACK
if(counter == 3):
tcp = tcppacket.TCPPacket(
data=str("ACK").encode('utf-8'),
seq=seq, ack_seq=ack_seq,
flags_ack=1,
flags_fin=fin_flag)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address,
"with ack seq: ", ack_seq, " and seq: ", seq)
sock.sendto(tcp.raw, address)
if(not fin_flag):
counter = 0
seq += 1
# --------------------------------------------
print(fin_flag)
if(fin_flag):
break
savename = str(i+1)+"received.mp4"
f = open(savename, "wb")
f.write(recvdata)
f.close()
def init_new_dns_req(i):
# ---------------------
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg = "dns google.com"
msg = msg.encode('utf-8')
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
# ----------------------
# def init_new
threads = []
#Calculation
print("Demo calculation function")
init_new_calc_req("calc 2 + 6")
sleep(0.25)
init_new_calc_req("calc 2 - 6")
sleep(0.25)
init_new_calc_req("calc 2 * 6")
sleep(0.25)
init_new_calc_req("calc 2 / 6")
sleep(0.25)
init_new_calc_req("calc 2 + 6")
sleep(0.25)
init_new_calc_req("calc 2 + 6")
sleep(0.25)
threads.append(threading.Thread(target = init_new_calc_req, args = (i,)))
threads[-1].start()
for i in range(1):
threads.append(threading.Thread(target = init_new_dns_req, args = (i,)))
threads[-1].start()
for i in range(1):
threads.append(threading.Thread(target = init_new_videoreq_req, args = (i,)))
threads[-1].start()
|
[
"[email protected]"
] | |
fbe2b17d459317f2efe8ffe5d2cae5b3c831ec34
|
4e5b20fdcca20f458322f0a8cd11bbdacb6fb3e5
|
/suning/api/oto/OrderGetRequest.py
|
4a5953e4ea31fa728bfb4e0a68491e4b13475f83
|
[] |
no_license
|
shijingyu/sunningAPI
|
241f33b0660dc84635ce39688fed499f5c57a5da
|
4a3b2ef7f9bdc4707d1eaff185bc7eb636fe90d5
|
refs/heads/master
| 2020-04-24T22:15:11.584028 | 2019-02-24T06:41:20 | 2019-02-24T06:41:20 | 172,305,179 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 489 |
py
|
# -*- coding: utf-8 -*-
'''
Created on 2016-12-2
@author: suning
'''
from suning.api.abstract import AbstractApi
class OrderGetRequest(AbstractApi):
'''
'''
def __init__(self):
AbstractApi.__init__(self)
self.b2cOrderId = None
self.setParamRule({
'b2cOrderId':{'allow_empty':False}
})
def getApiBizName(self):
return 'getOrder'
def getApiMethod(self):
return 'suning.oto.order.get'
|
[
"[email protected]"
] | |
f7054ba24a5ccaf1ec397da00458afa1b156dfc4
|
9184e230f8b212e8f686a466c84ecc89abe375d1
|
/arcseventdata/tests/module/events2IQQQE_TestCase.py
|
99df9481553bd7ad7dbd5f5d4277ab0a0d79268b
|
[] |
no_license
|
danse-inelastic/DrChops
|
75b793d806e6351dde847f1d92ab6eebb1ef24d2
|
7ba4ce07a5a4645942192b4b81f7afcae505db90
|
refs/heads/master
| 2022-04-26T17:37:41.666851 | 2015-05-02T23:21:13 | 2015-05-02T23:21:13 | 34,094,584 | 0 | 1 | null | 2020-09-10T01:50:10 | 2015-04-17T03:30:52 |
Python
|
UTF-8
|
Python
| false | false | 2,272 |
py
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
import unittest
from unittest import TestCase
class events2IQQQE_TestCase(TestCase):
def test1(self):
'events2IQQQE: default intensity type (int)'
import arcseventdata.arcseventdata as aa
events = aa.readevents( "events.dat", 10 )
import numpy
I = numpy.zeros( 20*20*20*10, 'i' )
Ei = 60.
pixelPositions = aa.readpixelpositions( 'pixelID2position.bin' )
aa.events2IQQQE_numpyarray(
events, 10,
-10, 10, 1.,
-10, 10, 1.,
-10, 10, 1.,
-50, 50, 10. ,
I,
Ei, pixelPositions)
return
def test2(self):
'events2IQQQE: intensity type is "double"'
import arcseventdata.arcseventdata as aa
events = aa.readevents( "events.dat", 10 )
import numpy
I = numpy.zeros( 20*20*20*10, 'double' )
Ei = 60.
pixelPositions = aa.readpixelpositions( 'pixelID2position.bin' )
ntotpixels = 115*8*128
tofUnit = 1e-7
mod2sample = 13.6
toffset = 0
intensity_npy_typecode = numpy.dtype('double').num
aa.events2IQQQE_numpyarray(
events, 10,
-10, 10, 1.,
-10, 10, 1.,
-10, 10, 1.,
-50, 50, 10. ,
I,
Ei, pixelPositions,
ntotpixels, tofUnit,
mod2sample, toffset, intensity_npy_typecode,
)
return
pass # end of events2IQQQE_TestCase
def pysuite():
suite1 = unittest.makeSuite(events2IQQQE_TestCase)
return unittest.TestSuite( (suite1,) )
def main():
pytests = pysuite()
alltests = unittest.TestSuite( (pytests, ) )
unittest.TextTestRunner(verbosity=2).run(alltests)
return
if __name__ == '__main__': main()
# version
__id__ = "$Id$"
# End of file
|
[
"[email protected]"
] | |
c2184ae39c88d495df7b7e3f616625ec741446de
|
bde1e989aea152374f37b3511c39954d813016ae
|
/platypush/plugins/media/webtorrent.py
|
f496644fdf180552d3233c0f94dd5d409194d471
|
[
"MIT"
] |
permissive
|
lopeben/platypush
|
bad4f3df159582b77577a91bd03246af428073e8
|
6ba92e7fdd40090c5ed70fad3dd961c21f894844
|
refs/heads/master
| 2022-10-12T12:24:48.789230 | 2020-05-23T21:12:39 | 2020-05-23T21:12:45 | 270,212,467 | 1 | 0 |
MIT
| 2020-06-07T06:14:53 | 2020-06-07T06:14:52 | null |
UTF-8
|
Python
| false | false | 15,277 |
py
|
import enum
import os
import re
import select
import subprocess
import threading
import time
from platypush.config import Config
from platypush.context import get_bus, get_plugin
from platypush.plugins.media import PlayerState, MediaPlugin
from platypush.message.event.torrent import TorrentDownloadStartEvent, \
TorrentDownloadCompletedEvent, TorrentDownloadedMetadataEvent
from platypush.plugins import action
from platypush.utils import find_bins_in_path, find_files_by_ext, \
is_process_alive, get_ip_or_hostname
class TorrentState(enum.IntEnum):
IDLE = 1
DOWNLOADING_METADATA = 2
DOWNLOADING = 3
DOWNLOADED = 4
class MediaWebtorrentPlugin(MediaPlugin):
"""
Plugin to download and stream videos using webtorrent
Requires:
* **webtorrent** installed on your system (``npm install -g webtorrent``)
* **webtorrent-cli** installed on your system (``npm install -g webtorrent-cli``)
* A media plugin configured for streaming (e.g. media.mplayer, media.vlc, media.mpv or media.omxplayer)
"""
_supported_media_plugins = {'media.mplayer', 'media.omxplayer', 'media.mpv',
'media.vlc', 'media.webtorrent'}
# Download at least 15 MBs before starting streaming
_download_size_before_streaming = 15 * 2**20
_web_stream_ready_timeout = 120
def __init__(self, webtorrent_bin=None, webtorrent_port=None, *args,
**kwargs):
"""
media.webtorrent will use the default media player plugin you have
configured (e.g. mplayer, omxplayer, mpv) to stream the torrent.
:param webtorrent_bin: Path to your webtorrent executable. If not set,
then Platypush will search for the right executable in your PATH
:type webtorrent_bin: str
:param webtorrent_port: Port where the webtorrent will be running
streaming server will be running (default: 8000)
:type webtorrent_port: int
"""
super().__init__(*args, **kwargs)
self.webtorrent_port = webtorrent_port
self._webtorrent_process = None
self._init_webtorrent_bin(webtorrent_bin=webtorrent_bin)
self._init_media_player()
self._download_started_event = threading.Event()
self._torrent_stream_urls = {}
def _init_webtorrent_bin(self, webtorrent_bin=None):
if not webtorrent_bin:
bin_name = 'webtorrent.exe' if os.name == 'nt' else 'webtorrent'
bins = find_bins_in_path(bin_name)
if not bins:
raise RuntimeError('Webtorrent executable not specified and ' +
'not found in your PATH. Make sure that ' +
'webtorrent is either installed or ' +
'configured and that both webtorrent and ' +
'webtorrent-cli are installed')
self.webtorrent_bin = bins[0]
else:
webtorrent_bin = os.path.expanduser(webtorrent_bin)
if not (os.path.isfile(webtorrent_bin)
and (os.name == 'nt' or os.access(webtorrent_bin, os.X_OK))):
raise RuntimeError('{} is does not exist or is not a valid ' +
'executable file'.format(webtorrent_bin))
self.webtorrent_bin = webtorrent_bin
def _init_media_player(self):
self._media_plugin = None
for plugin_name in self._supported_media_plugins:
try:
if Config.get(plugin_name):
self._media_plugin = get_plugin(plugin_name)
break
except:
pass
if not self._media_plugin:
raise RuntimeError(('No media player specified and no ' +
'compatible media plugin configured - ' +
'supported media plugins: {}').format(
self._supported_media_plugins))
def _read_process_line(self):
line = self._webtorrent_process.stdout.readline().decode().strip()
# Strip output of the colors
return re.sub('\x1b\[(([0-9]+m)|(.{1,2}))', '', line).strip()
def _process_monitor(self, resource, download_dir, download_only,
player_type, player_args):
def _thread():
if not self._webtorrent_process:
return
######
state = TorrentState.IDLE
bus = get_bus()
webtorrent_url = None
output_dir = None
media_file = None
poll = select.poll()
poll.register(self._webtorrent_process.stdout, select.POLLIN)
# First wait for the metadata to be ready and the streaming started
while True:
result = poll.poll(0)
if not result:
continue
if not self._is_process_alive():
break
line = self._read_process_line()
if 'fetching torrent metadata from' in line.lower() \
and state == TorrentState.IDLE:
# IDLE -> DOWNLOADING_METADATA
state = TorrentState.DOWNLOADING_METADATA
bus.post(TorrentDownloadedMetadataEvent(resource=resource))
elif 'downloading: ' in line.lower() \
and media_file is None:
# Find video files in torrent directory
output_dir = os.path.join(
download_dir, re.search(
'downloading: (.+?)$', line, flags=re.IGNORECASE
).group(1))
elif 'server running at: ' in line.lower() \
and webtorrent_url is None:
# Streaming started
webtorrent_url = re.search('server running at: (.+?)$',
line, flags=re.IGNORECASE).group(1)
webtorrent_url = webtorrent_url.replace(
'http://localhost', 'http://' + get_ip_or_hostname())
self._torrent_stream_urls[resource] = webtorrent_url
self._download_started_event.set()
self.logger.info('Torrent stream started on {}'.format(
webtorrent_url))
if output_dir and not media_file:
media_files = sorted(find_files_by_ext(
output_dir, *self._media_plugin.video_extensions))
if media_files:
# TODO support for queueing multiple media
media_file = os.path.join(output_dir, media_files[0])
else:
time.sleep(1) # Wait before the media file is created
if state.value <= TorrentState.DOWNLOADING_METADATA.value \
and media_file and webtorrent_url:
# DOWNLOADING_METADATA -> DOWNLOADING
state = TorrentState.DOWNLOADING
bus.post(TorrentDownloadStartEvent(
resource=resource, media_file=media_file,
stream_url=webtorrent_url))
break
if not output_dir:
raise RuntimeError('Could not download torrent')
if not download_only and (not media_file or not webtorrent_url):
if not media_file:
self.logger.warning(
'The torrent does not contain any video files')
else:
self.logger.warning('WebTorrent could not start streaming')
# Keep downloading but don't start the player
try: self._webtorrent_process.wait()
except: pass
return
player = None
if not download_only:
# Wait until we have enough chunks to start the player
while True:
result = poll.poll(0)
if not result:
continue
if not self._is_process_alive():
break
try:
if os.path.getsize(media_file) > \
self._download_size_before_streaming:
break
except FileNotFoundError:
continue
player = get_plugin('media.' + player_type) if player_type \
else self._media_plugin
media = media_file if player.is_local() else webtorrent_url
self.logger.info(
'Starting playback of {} to {} through {}'.format(
media_file, player.__class__.__name__,
webtorrent_url))
subfile = self.get_subtitles(media)
if subfile:
player_args['subtitles'] = subfile
player.play(media, **player_args)
self.logger.info('Waiting for player to terminate')
self._wait_for_player(player)
self.logger.info('Torrent player terminated')
bus.post(TorrentDownloadCompletedEvent(resource=resource,
output_dir=output_dir,
media_file=media_file))
try: self.quit()
except: pass
self.logger.info('WebTorrent process terminated')
return _thread
def _wait_for_player(self, player):
stop_evt = None
if player:
media_cls = player.__class__.__name__
if media_cls == 'MediaMplayerPlugin':
stop_evt = player._mplayer_stopped_event
elif media_cls == 'MediaMpvPlugin' or media_cls == 'MediaVlcPlugin':
stop_evt = threading.Event()
def stop_callback():
stop_evt.set()
player.on_stop(stop_callback)
elif media_cls == 'MediaOmxplayerPlugin':
stop_evt = threading.Event()
def stop_callback():
stop_evt.set()
player.add_handler('stop', stop_callback)
if stop_evt:
stop_evt.wait()
else:
# Fallback: wait for the webtorrent process to terminate
self._webtorrent_process.wait()
def _get_torrent_download_dir(self):
if self._media_plugin.download_dir:
return self._media_plugin.download_dir
else:
d = os.path.join(os.path.expanduser('~'), 'Downloads')
os.makedirs(d, exist_ok=True)
return d
def get_subtitles(self, filepath):
try:
plugin = get_plugin('media.subtitles')
if not plugin or not plugin.languages:
return
subs = plugin.get_subtitles(filepath).output
if not subs:
return
sub = plugin.download_subtitles(subs[0]['SubDownloadLink'],
filepath).output
if sub:
return sub['filename']
except Exception as e:
self.logger.warning('Could not get subtitles for {}: {}'.format(
filepath, str(e)))
@action
def play(self, resource, player=None, download_only=False, **player_args):
"""
Download and stream a torrent
:param resource: Play a resource, as a magnet link, torrent URL or
torrent file path
:type resource: str
:param player: If set, use this plugin type as a player for the
torrent. Supported types: 'mplayer', 'vlc', 'omxplayer', 'chromecast', 'mpv'.
If not set, then the default configured media plugin will be used.
:type player: str
:param player_args: Any arguments to pass to the player plugin's
play() method
:type player_args: dict
:param download_only: If false then it will start streaming the torrent on the local player once the
download starts, otherwise it will just download it (default: false)
:type download_only: bool
"""
if self._webtorrent_process:
try:
self.quit()
except Exception as e:
self.logger.debug('Failed to quit the previous instance: {}'.
format(str(e)))
download_dir = self._get_torrent_download_dir()
webtorrent_args = [self.webtorrent_bin, 'download', '-o', download_dir]
if self.webtorrent_port:
webtorrent_args += ['-p', self.webtorrent_port]
webtorrent_args += [resource]
self._download_started_event.clear()
self._webtorrent_process = subprocess.Popen(webtorrent_args,
stdout=subprocess.PIPE)
threading.Thread(target=self._process_monitor(
resource=resource, download_dir=download_dir,
player_type=player, player_args=player_args,
download_only=download_only)).start()
stream_url = None
player_ready_wait_start = time.time()
while not stream_url:
triggered = self._download_started_event.wait(
self._web_stream_ready_timeout)
if not triggered or time.time() - player_ready_wait_start >= \
self._web_stream_ready_timeout:
break
stream_url = self._torrent_stream_urls.get(resource)
if not stream_url:
return (None, ('The webtorrent process hasn\'t started ' +
'streaming after {} seconds').format(
self._web_stream_ready_timeout))
return {'resource': resource, 'url': stream_url}
@action
def download(self, resource, **kwargs):
return self.play(resource, download_only=True)
@action
def stop(self):
""" Stop the playback """
return self.quit()
@action
def quit(self):
""" Quit the player """
if self._is_process_alive():
self._webtorrent_process.terminate()
self._webtorrent_process.wait()
try: self._webtorrent_process.kill()
except: pass
self._webtorrent_process = None
@action
def load(self, resource, **kwargs):
"""
Load a torrent resource in the player.
"""
return self.play(resource)
def _is_process_alive(self):
return is_process_alive(self._webtorrent_process.pid) \
if self._webtorrent_process else False
@action
def status(self):
"""
Get the current player state.
:returns: A dictionary containing the current state.
Example::
output = {
"state": "play" # or "stop" or "pause"
}
"""
return {'state': self._media_plugin.status().get('state', PlayerState.STOP.value)}
# vim:sw=4:ts=4:et:
|
[
"[email protected]"
] | |
86956855ea6b190a1147fd6a1574cf7b802e2f9a
|
d6b8c5594c668cd2aa907f79e80ea00f97b82d97
|
/lux/openapi/core.py
|
b2bb9e3338a2e96a216236526b0b6297e14f2503
|
[
"BSD-3-Clause"
] |
permissive
|
quantmind/lux
|
68d44242cd10a012f32888174d9db801c09b2715
|
7318fcd86c77616aada41d8182a04339680a554c
|
refs/heads/master
| 2021-01-23T11:32:03.180026 | 2018-01-06T09:28:30 | 2018-01-06T09:28:30 | 16,417,125 | 21 | 16 |
BSD-3-Clause
| 2019-10-22T23:21:37 | 2014-01-31T18:53:55 |
Python
|
UTF-8
|
Python
| false | false | 10,310 |
py
|
import logging
from collections import OrderedDict
from .utils import compact
METHODS = ['get', 'head', 'post', 'put', 'patch', 'delete', 'trace']
LOGGER = logging.getLogger('lux.rest.openapi')
class OpenAPIError(Exception):
"""Base class for all apispec-related errors."""
pass
class PluginError(OpenAPIError):
"""Raised when a plugin cannot be found or is invalid."""
pass
class OperationInfo:
def __init__(self,
path=None,
body=None,
query=None,
header=None,
responses=None,
default_response=200,
default_response_schema=None):
self.path = path
self.body = body
self.query = query
self.header = header
self.default_response = default_response
self.responses = dict()
self.responses[default_response] = default_response_schema
if isinstance(responses, dict):
self.responses.update(responses)
elif responses:
self.responses.update(((r, None) for r in responses))
@property
def schema(self):
schema = self.responses[self.default_response]
if isinstance(schema, list):
schema = schema[0]
return schema
class OpenApiSchema:
def __init__(self, name):
self.name = name
def schema(self):
"""Convert the schema into a valid OpenApi Json schema object
"""
raise NotImplementedError
def parameters(self, **kwargs):
"""Convert the schema into a valid OpenApi Parameters map
"""
raise NotImplementedError
class OpenAPIbase:
def __init__(self, doc=None):
self.doc = doc or {}
self.parameters = {}
self.servers = []
self._name_loc = {}
self.tags = dict(tag_generator(self.doc.pop('tags', None)))
def add(self, key, value):
if value:
self.doc[key] = value
def add_parameters(self, schema, spec=None, **kw):
"""Add parameters from a schema object
"""
if not schema:
return
spec = spec or self
for func in spec.schema_helpers:
schema = func(schema) or schema
if isinstance(schema, OpenApiSchema):
for param in schema.parameters(spec, **kw):
name = param['name']
loc = param['in']
key = (loc, name)
if key in self._name_loc:
LOGGER.error('parameter %s already in %s', name, loc)
else:
self._name_loc[key] = True
if name in self.parameters:
name = '%s%s' % (name, loc)
self.parameters[name] = param
else:
raise PluginError(
'Could not find a valid plugin to convert a schema to '
'an OpenAPI schema'
)
def add_server(self, url, description=None):
"""Add a server object to this Api Object
"""
self.servers.append(compact(url=url, description=description))
class OpenAPI(OpenAPIbase):
"""Open API v 3.0 document builder
"""
version = '3.0.0'
def __init__(self, title, version, info=None, plugins=(),
default_content_type=None, default_responses=None,
**options):
super().__init__(options)
self.doc.update(
openapi=self.version,
info=dict(title=title, version=version),
paths=OrderedDict()
)
self.doc['info'].update(info or {})
# Metadata
self.schemas = {}
self.parameters = {}
self.responses = {}
self.tags = OrderedDict()
self.plugins = {}
self.default_content_type = default_content_type or 'application/json'
self.default_responses = default_responses or {}
#
self.parameter_helpers = []
self.schema_helpers = []
for plugin_path in plugins:
self.setup_plugin(plugin_path)
def __repr__(self):
return '%s %s' % (self.__class__.__name__, self.version)
@property
def paths(self):
return self.doc['paths']
def to_dict(self):
s = self.schemas
p = self.parameters
r = self.responses
ret = self.doc.copy()
ret.update(compact(
tags=[self.tags[name] for name in sorted(self.tags)],
components=compact(
schemas=OrderedDict(((k, s[k]) for k in sorted(s))),
parameters=OrderedDict(((k, p[k]) for k in sorted(p))),
responses=OrderedDict(((k, r[k]) for k in sorted(r))),
),
servers=self.servers
))
return ret
# adapted from Sphinx
def setup_plugin(self, path):
"""Import and setup a plugin. No-op if called twice
for the same plugin.
:param str path: Import path to the plugin.
:raise: PluginError if the given plugin is invalid.
"""
if path in self.plugins:
return
try:
mod = __import__(
path, globals=None, locals=None, fromlist=('setup',)
)
except ImportError as err:
raise PluginError(
'Could not import plugin "{0}"\n\n{1}'.format(path, err)
)
if not hasattr(mod, 'setup'):
raise PluginError(
'Plugin "{0}" has no setup(spec) function'.format(path))
else:
# Each plugin gets a dict to store arbitrary data
self.plugins[path] = {}
mod.setup(self)
return None
def add_path(self, router, **kwargs):
path = ApiPath(router, **kwargs)
path.add_to_spec(self)
def add_schema(self, schema):
"""Add a schema to the schema mapping in component
"""
for func in self.schema_helpers:
schema = func(schema) or schema
if isinstance(schema, OpenApiSchema):
if schema.name not in self.schemas:
self.schemas[schema.name] = schema.schema(self)
return {'$ref': '#/components/schemas/%s' % schema.name}
elif schema:
LOGGER.error(
'Could not find a valid plugin to convert %r to '
'an OpenAPI schema', schema
)
def schema2parameters(self, schema, **kw):
for func in self.parameter_helpers:
schema = func(schema) or schema
if isinstance(schema, OpenApiSchema):
return schema.parameters(self, **kw)
else:
raise PluginError(
'Could not find a valid plugin to convert schema to '
'OpenAPI parameters'
)
class ApiPath(OpenAPIbase):
"""Utility class for adding a path object to the OpenAPI spec
The path object (dictionary) is extracted from the router
HTTP methods
"""
def __init__(self, path, doc=None):
super().__init__(doc)
self.path = path
self.operations = OrderedDict()
def __repr__(self):
return self.path
def add_to_spec(self, spec):
spec.doc['paths'][self.path] = self.doc
class ApiOperation(OpenAPIbase):
"""Utility class for adding an operation to an API Path
"""
def __init__(self, doc, method, extra_info=None):
super().__init__(doc)
self.method = method
self.info = extra_info
def __repr__(self):
return self.method
def add_to_path(self, path, spec):
info = self.info
self.tags.update(path.tags)
self.add('tags', sorted(self.tags))
spec.tags.update(self.tags)
if info:
self.add_parameters(info.path, spec, location='path')
self.add_parameters(info.query, spec, location='query')
self.add_parameters(info.header, spec, location='header')
self.add_body(info.body, spec)
self.add_responses(path, spec)
p = self.parameters
self.add('parameters', [p[k] for k in sorted(p)])
return self.doc
def add_body(self, schema, spec):
if not schema:
return
body = self.doc.get('requestBody')
if not body:
self.doc['requestBody'] = body = dict()
self.add_content_schema(schema, body, spec)
def add_responses(self, path, spec):
info = self.info
responses = self.doc.get('responses', None) or {}
if info:
defaults = spec.default_responses
all_responses = {}
for code, cfg in info.responses.items():
doc = {}
doc.update(defaults.get(code) or ())
try:
doc.update(responses.get(code) or ())
except ValueError as exc:
LOGGER.error(
"Cannot updated '%s %s' response '%s' with "
"doc string %r, a dictionary is expected",
self, path, code, responses[code]
)
schema = doc.pop('schema', None)
self.add_content_schema(cfg or schema, doc, spec)
all_responses[code] = doc
responses = all_responses
self.doc['responses'] = OrderedDict(
((code, responses[code]) for code in responses)
)
def add_content_schema(self, schema, doc, spec, content_type=None):
is_array = False
if isinstance(schema, list) and len(schema) == 1:
is_array = True
schema = schema[0]
if not isinstance(schema, str):
schema = spec.add_schema(schema)
if not schema:
return
if 'content' not in doc:
doc['content'] = {}
content = doc['content']
if not content_type:
content_type = spec.default_content_type
if is_array:
content[content_type] = dict(
schema=dict(type='array', items=schema)
)
else:
content[content_type] = dict(schema=schema)
def tag_generator(tags):
for tag in (tags or ()):
if isinstance(tag, str):
tag = dict(name=tag)
yield tag['name'], tag
|
[
"[email protected]"
] | |
9fa8a0e9bb487fcb5ad1e68336225d1649d3661b
|
ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3
|
/python/baiduads-sdk-auto/test/test_campaign_feed_type.py
|
e632ecbca72b678f7a4db97229be13d4f1347343
|
[
"Apache-2.0"
] |
permissive
|
baidu/baiduads-sdk
|
24c36b5cf3da9362ec5c8ecd417ff280421198ff
|
176363de5e8a4e98aaca039e4300703c3964c1c7
|
refs/heads/main
| 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 |
Apache-2.0
| 2023-06-02T05:19:40 | 2022-01-11T07:23:17 |
Python
|
UTF-8
|
Python
| false | false | 1,017 |
py
|
"""
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.campaignfeed.model.app_info_shadow_type import AppInfoShadowType
from baiduads.campaignfeed.model.app_info_type import AppInfoType
from baiduads.campaignfeed.model.schedule_type import ScheduleType
globals()['AppInfoShadowType'] = AppInfoShadowType
globals()['AppInfoType'] = AppInfoType
globals()['ScheduleType'] = ScheduleType
from baiduads.campaignfeed.model.campaign_feed_type import CampaignFeedType
class TestCampaignFeedType(unittest.TestCase):
"""CampaignFeedType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCampaignFeedType(self):
"""Test CampaignFeedType"""
# FIXME: construct object with mandatory attributes with example values
# model = CampaignFeedType() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
d6bcca1d303319ff1a55c17cac8ab8a370e5a279
|
a303be0a547d717b0deb19b5bdcc75010e131b51
|
/Contests/Others/ Week of Code/ Week of Code 37 /p5.py
|
e817e377cb4991fe33d589076be6739cb4fb319f
|
[] |
no_license
|
harrypotter0/competitive-programming
|
ff883c4dc5aa8d72f1af589bb654a422e32c8a38
|
82a8497e69212dc62e75af74b0d5a3b390b8aca2
|
refs/heads/master
| 2023-03-23T07:07:14.295053 | 2021-03-17T01:24:45 | 2021-03-17T01:24:45 | 70,964,689 | 16 | 9 | null | 2021-03-17T01:24:49 | 2016-10-15T03:52:53 |
Python
|
UTF-8
|
Python
| false | false | 6,684 |
py
|
# /*
# *
# ********************************************************************************************
# * AUTHOR : AKASH KANDPAL *
# * Language : Python2 *
# * Motto : The master has failed more times than the beginner has even tried. *
# * IDE used: Atom *
# * My Domain : http://harrypotter.tech/ *
# ********************************************************************************************
# *
# */
from collections import Counter
from math import ceil
from fractions import gcd
import math
import itertools
from itertools import permutations
from itertools import combinations
import calendar
from itertools import product
from datetime import date
from string import ascii_uppercase
def printdec(ans):
print '{0:.6f}'.format(ans)
def countchars(stra):
s=Counter(stra)
return s
def readInts():
return list(map(int, raw_input().strip().split()))
def readInt():
return int(raw_input())
def readStrs():
return raw_input().split()
def readStr():
return raw_input().strip()
def readarr(n):
return [map(int,list(readStr())) for i in xrange(n)]
def readnumbertolist():
a=[int(i) for i in list(raw_input())]
return a
def strlistTostr(list1):
return ''.join(list1)
def numlistTostr(list1):
return ''.join(str(e) for e in list1)
def strTolist(str):
return str.split()
def strlistTointlist(str):
return map(int, str)
def slicenum(number,x):
return int(str(number)[:x])
def precise(num):
return "{0:.10f}".format(num)
def rsorted(a):
return sorted(a,reverse=True)
def binar(x):
return '{0:063b}'.format(x)
def findpermute(word):
perms = [''.join(p) for p in permutations(word)]
perms = list(set(perms))
return perms
def findsubsets(S,m):
return list(set(itertools.combinations(S, m)))
def sort1(yy,index):
return yy.sort(key = lambda x:x[index])
def reversepair(yy):
return yy[::-1]
def checkint(x):
return (x).is_integer()
def sum_digits(n):
s = 0
while n:
s += n % 10
n //= 10
return s
def vowel_count(str):
count = 0
vowel = set("aeiouAEIOU")
for alphabet in str:
if alphabet in vowel:
count = count + 1
return count
def leapyear(year):
return calendar.isleap(year)
def factorial(n):
p=1
for i in range(2,n+1):
p=p*i
return p
def primes_sieve(limit):
limitn = limit+1
not_prime = set()
primes = []
for i in range(2, limitn):
if i in not_prime:
continue
for f in range(i*2, limitn, i):
not_prime.add(f)
primes.append(i)
return primes
def distinctstr(s):
t =''.join(set(s))
return t
def countdict(s):
d ={}
for i in range(len(s)):
if s[i] not in d.keys():
d[s[i]]=1
else:
d[s[i]]+=1
return d
import operator as op
def nck(n, k):
k = min(n-k,k)
result = 1
for i in range(1, k+1):
result = result* (n-i+1) / i
return result
def gcd(a,b):
while b > 0:
a, b = b, a % b
return a
def lcm(a, b):
return a * b / gcd(a, b)
def matrixcheck(x,y):
faadu = []
directions = zip((0,0,1,-1),(1,-1,0,0))
for dx,dy in directions:
if R>x+dx>=0<=y+dy<C and A[x+dx][y+dy]==0:
faadu.append((x+dx,y+dy))
return faadu
def stringcount(s):
return [s.count(i) for i in "abcdefghijklmnopqrstuvwxyz"]
def bubbleSort(arr):
n = len(arr)
for i in range(n):
for j in range(0, n-i-1):
if arr[j] > arr[j+1] :
arr[j], arr[j+1] = arr[j+1], arr[j]
def isSubsetSum(st, n, sm) :
# arr, n, k
subset=[[True] * (sm+1)] * (n+1)
for i in range(0, n+1) :
subset[i][0] = True
for i in range(1, sm + 1) :
subset[0][i] = False
for i in range(1, n+1) :
for j in range(1, sm+1) :
if(j < st[i-1]) :
subset[i][j] = subset[i-1][j]
if (j >= st[i-1]) :
subset[i][j] = subset[i-1][j] or subset[i - 1][j-st[i-1]]
return subset[n][sm];
def decimal_to_octal(dec):
decimal = int(dec)
return oct(decimal)
def decimal_to_binary(dec):
decimal = int(dec)
return bin(decimal)
def decimal_to_hexadecimal(dec):
decimal = int(dec)
return hex(decimal)
def find_duplicate(expr):
stack=[]
char_in_between = 0
f =1
for i in range(0, len(expr)):
if expr[i] == '}' or expr[i] == ')':
pair = '{' if expr[i] == '}' else '('
pop=''
while(len(stack) > 0 and pop != pair):
pop = stack.pop()
if (pop != '{' and pop != '('): char_in_between +=1
if char_in_between == 0:
print "Duplicate"
f =0
break
char_in_between = 0
else:
stack.append(expr[i])
return f
def dictlist(keys,values):
{d.setdefault(key,[]).append(value) for key, value in zip(keys,values)}
return d
def mullistbyconst(my_list,r):
my_new_list = []
for i in my_list:
my_new_list.append(i * r)
return my_new_list
def coinchange(S, m, n):
# (arr,length,sum)
table = [0 for k in range(n+1)]
table[0] = 1
for i in range(0,m):
for j in range(S[i],n+1):
table[j] += table[j-S[i]]
return table[n]
def palincheck(i):
return str(i) == str(i)[::-1]
def days(year1,year2):
begin = date(year1, 1, 1)
end = date(year2, 1, 1)
return (end-begin).days
from functools import reduce
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(pow(n, 0.5) + 1)) if n % i == 0)))
def prelongfact(factt):
for i in reversed(range(1,int(factt**0.5))):
if factt%i==0:
break
return factt/i
def factmul(n,lim,m):
mul=1
ans=1
if(n>=lim):
print 0
else:
for j in range(1,n+1):
mul=(mul*j)%m
ans=(ans*mul)%m
print ans
def knapSack(W , wt , val , n):
if n == 0 or W == 0 :
return 0
if (wt[n-1] > W):
return knapSack(W , wt , val , n-1)
else:
return max(val[n-1] + knapSack(W-wt[n-1] , wt , val , n-1),
knapSack(W , wt , val , n-1))
m = 329885391853
lim = prelongfact(m)
mod = 10 ** 9 + 7
# fact=[1]
# for i in xrange(1,100001):
# fact.append(((arr[i-1]%mod)*(i%mod))%mod)
# for i,j in product(xrange(R),xrange(C)):
# print "Case #{}: {}".format(i+1,ans)
for __ in range(readInt()):
n,k = readInts()
'''
'''
|
[
"[email protected]"
] | |
a06675d8d7e084e4ae02e553a590be3a8e9ce495
|
e206ea09a316757e8028d803616634a4a9a50f72
|
/atcoder/abc129/b.py
|
fbb59bcae9ff0c22019f642b3dc086816f80c85a
|
[] |
no_license
|
seiichiinoue/procon
|
46cdf27ab42079002c4c11b8abe84662775b34a4
|
f0b33062a5f31cf0361c7973f4a5e81e8d5a428f
|
refs/heads/master
| 2021-06-26T19:02:24.797354 | 2020-11-01T14:12:54 | 2020-11-01T14:12:54 | 140,285,300 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 203 |
py
|
n, w = int(input()), list(map(int, input().split()))
l, r = 0, sum(w)
ans = 10 ** 8
for i in range(len(w)):
l += w[i]
r -= w[i]
tmp = abs(l - r)
if tmp < ans:
ans = tmp
print(ans)
|
[
"[email protected]"
] | |
0401788f6eda4715532dae6556764db985193daa
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/pybites/100DaysOfCode-master/081/test_whotweeted.py
|
081e54fb084f4de91e390ed679575664df25c697
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,989 |
py
|
import unittest
from unittest.mock import patch
import tweepy
from whotweeted import get_country_code, who_is_output
from whotweeted import load_cache
DATA = dict(AU='875639674244444160',
ES='875669971954806784',
nopb='846302762736504833',
noloc='844092059988508673',
badid='8756396742444441da'
)
get_tweet = lambda x: load_cache(DATA.get(x)) # noqa E731
class WhoTweetedTestCase(unittest.TestCase):
@patch.object(tweepy.API, 'get_status', return_value=get_tweet('AU'))
def test_julian(self, mock_method):
tweetid = DATA.get('AU')
country = get_country_code(tweetid)
who_is_out = who_is_output(country)
self.assertEqual(country, 'AU')
self.assertIn('Julian', who_is_out)
@patch.object(tweepy.API, 'get_status', return_value=get_tweet('ES'))
def test_bob(self, mock_method):
tweetid = DATA.get('ES')
country = get_country_code(tweetid)
who_is_out = who_is_output(country)
self.assertEqual(country, 'ES')
self.assertIn('Bob', who_is_out)
@patch.object(tweepy.API, 'get_status', return_value=get_tweet('nopb'))
def test_no_pybites_account(self, mock_method):
tweetid = DATA.get('nopb')
with self.assertRaises(ValueError):
get_country_code(tweetid)
@patch.object(tweepy.API, 'get_status', return_value=get_tweet('noloc'))
def test_no_location_in_tweet(self, mock_method):
tweetid = DATA.get('noloc')
with self.assertRaises(AttributeError):
get_country_code(tweetid)
# not really a return value, it crashes before decorator can cash tweet
@patch.object(tweepy.API, 'get_status', return_value=get_tweet('nopb'))
def test_bad_tweet_id(self, mock_method):
tweetid = DATA.get('badid')
print(tweetid)
with self.assertRaises(ValueError):
get_country_code(tweetid)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
1e1f43619e376683aa885790296b265493835bef
|
1929443c8e4ec6ccd79777f18d161546867e17ef
|
/methods/transformers/src/transformers/training_args.py
|
294fc20b905f93d58c360df7ba0ed6a6889c2cab
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
INK-USC/RiddleSense
|
6f4b00546d7f4d5ada12db50929c1f0d7713d541
|
a3d57eaf084da9cf6b77692c608e2cd2870fbd97
|
refs/heads/main
| 2023-08-14T19:01:01.478946 | 2021-07-05T04:06:01 | 2021-07-05T04:06:01 | 376,487,870 | 8 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 24,012 |
py
|
import dataclasses
import json
import os
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
from .file_utils import cached_property, is_torch_available, is_torch_tpu_available, torch_required
from .trainer_utils import EvaluationStrategy
from .utils import logging
if is_torch_available():
import torch
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
logger = logging.get_logger(__name__)
def default_logdir() -> str:
"""
Same default as PyTorch
"""
import socket
from datetime import datetime
current_time = datetime.now().strftime("%b%d_%H-%M-%S")
return os.path.join("runs", current_time + "_" + socket.gethostname())
@dataclass
class TrainingArguments:
"""
TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop
itself**.
Using :class:`~transformers.HfArgumentParser` we can turn this class into argparse arguments to be able to specify
them on the command line.
Parameters:
output_dir (:obj:`str`):
The output directory where the model predictions and checkpoints will be written.
overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`True`, overwrite the content of the output directory. Use this to continue training if
:obj:`output_dir` points to a checkpoint directory.
do_train (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run training or not. This argument is not directly used by :class:`~transformers.Trainer`, it's
intended to be used by your training/evaluation scripts instead. See the `example scripts
<https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.
do_eval (:obj:`bool`, `optional`):
Whether to run evaluation on the dev set or not. Will be set to :obj:`True` if :obj:`evaluation_strategy`
is different from :obj:`"no"`. This argument is not directly used by :class:`~transformers.Trainer`, it's
intended to be used by your training/evaluation scripts instead. See the `example scripts
<https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.
do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run predictions on the test set or not. This argument is not directly used by
:class:`~transformers.Trainer`, it's intended to be used by your training/evaluation scripts instead. See
the `example scripts <https://github.com/huggingface/transformers/tree/master/examples>`__ for more
details.
evaluation_strategy (:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`"no"`):
The evaluation strategy to adopt during training. Possible values are:
* :obj:`"no"`: No evaluation is done during training.
* :obj:`"steps"`: Evaluation is done (and logged) every :obj:`eval_steps`.
* :obj:`"epoch"`: Evaluation is done at the end of each epoch.
prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):
When performing evaluation and predictions, only returns the loss.
per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for training.
per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for evaluation.
gradient_accumulation_steps (:obj:`int`, `optional`, defaults to 1):
Number of updates steps to accumulate the gradients for, before performing a backward/update pass.
.. warning::
When using gradient accumulation, one step is counted as one step with backward pass. Therefore,
logging, evaluation, save will be conducted every ``gradient_accumulation_steps * xxx_step`` training
examples.
eval_accumulation_steps (:obj:`int`, `optional`):
Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If
left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but
requires more memory).
learning_rate (:obj:`float`, `optional`, defaults to 5e-5):
The initial learning rate for Adam.
weight_decay (:obj:`float`, `optional`, defaults to 0):
The weight decay to apply (if not zero).
adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8):
Epsilon for the Adam optimizer.
max_grad_norm (:obj:`float`, `optional`, defaults to 1.0):
Maximum gradient norm (for gradient clipping).
num_train_epochs(:obj:`float`, `optional`, defaults to 3.0):
Total number of training epochs to perform (if not an integer, will perform the decimal part percents of
the last epoch before stopping training).
max_steps (:obj:`int`, `optional`, defaults to -1):
If set to a positive number, the total number of training steps to perform. Overrides
:obj:`num_train_epochs`.
warmup_steps (:obj:`int`, `optional`, defaults to 0):
Number of steps used for a linear warmup from 0 to :obj:`learning_rate`.
logging_dir (:obj:`str`, `optional`):
Tensorboard log directory. Will default to `runs/**CURRENT_DATETIME_HOSTNAME**`.
logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to log and evaluate the first :obj:`global_step` or not.
logging_steps (:obj:`int`, `optional`, defaults to 500):
Number of update steps between two logs.
save_steps (:obj:`int`, `optional`, defaults to 500):
Number of updates steps before two checkpoint saves.
save_total_limit (:obj:`int`, `optional`):
If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in
:obj:`output_dir`.
no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to not use CUDA even when it is available or not.
seed (:obj:`int`, `optional`, defaults to 42):
Random seed for initialization.
fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to use 16-bit (mixed) precision training (through NVIDIA apex) instead of 32-bit training.
fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):
For :obj:`fp16` training, apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details
on the `apex documentation <https://nvidia.github.io/apex/amp.html>`__.
local_rank (:obj:`int`, `optional`, defaults to -1):
During distributed training, the rank of the process.
tpu_num_cores (:obj:`int`, `optional`):
When training on TPU, the number of TPU cores (automatically passed by launcher script).
debug (:obj:`bool`, `optional`, defaults to :obj:`False`):
When training on TPU, whether to print debug metrics or not.
dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)
or not.
eval_steps (:obj:`int`, `optional`):
Number of update steps between two evaluations if :obj:`evaluation_strategy="steps"`. Will default to the
same value as :obj:`logging_steps` if not set.
dataloader_num_workers (:obj:`int`, `optional`, defaults to 0):
Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the
main process.
past_index (:obj:`int`, `optional`, defaults to -1):
Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can
make use of the past hidden states for their predictions. If this argument is set to a positive int, the
``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model
at the next training step under the keyword argument ``mems``.
run_name (:obj:`str`, `optional`):
A descriptor for the run. Notably used for wandb logging.
disable_tqdm (:obj:`bool`, `optional`):
Whether or not to disable the tqdm progress bars. Will default to :obj:`True` if the logging level is set
to warn or lower (default), :obj:`False` otherwise.
remove_unused_columns (:obj:`bool`, `optional`, defaults to :obj:`True`):
If using `nlp.Dataset` datasets, whether or not to automatically remove the columns unused by the model
forward method.
(Note that this behavior is not implemented for :class:`~transformers.TFTrainer` yet.)
label_names (:obj:`List[str]`, `optional`):
The list of keys in your dictionary of inputs that correspond to the labels.
Will eventually default to :obj:`["labels"]` except if the model used is one of the
:obj:`XxxForQuestionAnswering` in which case it will default to :obj:`["start_positions",
"end_positions"]`.
load_best_model_at_end (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to load the best model found during training at the end of training.
.. note::
When set to :obj:`True`, the parameters :obj:`save_steps` will be ignored and the model will be saved
after each evaluation.
metric_for_best_model (:obj:`str`, `optional`):
Use in conjunction with :obj:`load_best_model_at_end` to specify the metric to use to compare two different
models. Must be the name of a metric returned by the evaluation with or without the prefix :obj:`"eval_"`.
Will default to :obj:`"loss"` if unspecified and :obj:`load_best_model_at_end=True` (to use the evaluation
loss).
If you set this value, :obj:`greater_is_better` will default to :obj:`True`. Don't forget to set it to
:obj:`False` if your metric is better when lower.
greater_is_better (:obj:`bool`, `optional`):
Use in conjunction with :obj:`load_best_model_at_end` and :obj:`metric_for_best_model` to specify if better
models should have a greater metric or not. Will default to:
- :obj:`True` if :obj:`metric_for_best_model` is set to a value that isn't :obj:`"loss"` or
:obj:`"eval_loss"`.
- :obj:`False` if :obj:`metric_for_best_model` is not set, or set to :obj:`"loss"` or :obj:`"eval_loss"`.
"""
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."}
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory."
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=None, metadata={"help": "Whether to run eval on the dev set."})
do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
evaluate_during_training: bool = field(
default=False,
metadata={"help": "Run evaluation during training at each logging step."},
)
evaluation_strategy: EvaluationStrategy = field(
default="no",
metadata={"help": "Run evaluation during training at each logging step."},
)
prediction_loss_only: bool = field(
default=False,
metadata={"help": "When performing evaluation and predictions, only returns the loss."},
)
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
per_gpu_train_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_train_batch_size` is preferred. "
"Batch size per GPU/TPU core/CPU for training."
},
)
per_gpu_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_eval_batch_size` is preferred."
"Batch size per GPU/TPU core/CPU for evaluation."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={"help": "Number of updates steps to accumulate before performing a backward/update pass."},
)
eval_accumulation_steps: Optional[int] = field(
default=None,
metadata={"help": "Number of predictions steps to accumulate before moving the tensors to the CPU."},
)
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for Adam."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay if we apply some."})
adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for Adam optimizer"})
adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for Adam optimizer"})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for Adam optimizer."})
max_grad_norm: float = field(default=1.0, metadata={"help": "Max gradient norm."})
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
max_steps: int = field(
default=-1,
metadata={"help": "If > 0: set total number of training steps to perform. Override num_train_epochs."},
)
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={"help": "Tensorboard log dir."})
logging_first_step: bool = field(default=False, metadata={"help": "Log the first global_step"})
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
save_total_limit: Optional[int] = field(
default=None,
metadata={
"help": (
"Limit the total amount of checkpoints."
"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints"
)
},
)
no_cuda: bool = field(default=False, metadata={"help": "Do not use CUDA even when it is available"})
seed: int = field(default=42, metadata={"help": "random seed for initialization"})
fp16: bool = field(
default=False,
metadata={"help": "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"},
)
fp16_opt_level: str = field(
default="O1",
metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
)
},
)
local_rank: int = field(default=-1, metadata={"help": "For distributed training: local_rank"})
tpu_num_cores: Optional[int] = field(
default=None, metadata={"help": "TPU: Number of TPU cores (automatically passed by launcher script)"}
)
tpu_metrics_debug: bool = field(
default=False,
metadata={"help": "Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics"},
)
debug: bool = field(default=False, metadata={"help": "Whether to print debug metrics on TPU"})
dataloader_drop_last: bool = field(
default=False, metadata={"help": "Drop the last incomplete batch if it is not divisible by the batch size."}
)
eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
dataloader_num_workers: int = field(
default=0,
metadata={
"help": "Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process."
},
)
past_index: int = field(
default=-1,
metadata={"help": "If >=0, uses the corresponding part of the output as the past state for next step."},
)
run_name: Optional[str] = field(
default=None, metadata={"help": "An optional descriptor for the run. Notably used for wandb logging."}
)
disable_tqdm: Optional[bool] = field(
default=None, metadata={"help": "Whether or not to disable the tqdm progress bars."}
)
remove_unused_columns: Optional[bool] = field(
default=True, metadata={"help": "Remove columns not required by the model when using an nlp.Dataset."}
)
label_names: Optional[List[str]] = field(
default=None, metadata={"help": "The list of keys in your dictionary of inputs that correspond to the labels."}
)
load_best_model_at_end: Optional[bool] = field(
default=False,
metadata={"help": "Whether or not to load the best model found during training at the end of training."},
)
metric_for_best_model: Optional[str] = field(
default=None, metadata={"help": "The metric to use to compare two different models."}
)
greater_is_better: Optional[bool] = field(
default=None, metadata={"help": "Whether the `metric_for_best_model` should be maximized or not."}
)
def __post_init__(self):
if self.disable_tqdm is None:
self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN
if self.evaluate_during_training is True:
self.evaluation_strategy = EvaluationStrategy.STEPS
warnings.warn(
"The `evaluate_during_training` argument is deprecated in favor of `evaluation_strategy` (which has more options)",
FutureWarning,
)
self.evaluation_strategy = EvaluationStrategy(self.evaluation_strategy)
if self.do_eval is False and self.evaluation_strategy != EvaluationStrategy.NO:
self.do_eval = True
if self.eval_steps is None:
self.eval_steps = self.logging_steps
if self.load_best_model_at_end and self.metric_for_best_model is None:
self.metric_for_best_model = "loss"
if self.greater_is_better is None and self.metric_for_best_model is not None:
self.greater_is_better = self.metric_for_best_model not in ["loss", "eval_loss"]
if self.run_name is None:
self.run_name = self.output_dir
if is_torch_available() and self.device.type != "cuda" and self.fp16:
raise ValueError("AMP (`--fp16`) can only be used on CUDA devices.")
@property
def train_batch_size(self) -> int:
"""
The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).
"""
if self.per_gpu_train_batch_size:
logger.warning(
"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future "
"version. Using `--per_device_train_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size
return per_device_batch_size * max(1, self.n_gpu)
@property
def eval_batch_size(self) -> int:
"""
The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).
"""
if self.per_gpu_eval_batch_size:
logger.warning(
"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future "
"version. Using `--per_device_eval_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size
return per_device_batch_size * max(1, self.n_gpu)
@cached_property
@torch_required
def _setup_devices(self) -> Tuple["torch.device", int]:
logger.info("PyTorch: setting up devices")
if self.no_cuda:
device = torch.device("cpu")
n_gpu = 0
elif is_torch_tpu_available():
device = xm.xla_device()
n_gpu = 0
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
device = torch.device("cuda", self.local_rank)
n_gpu = 1
if device.type == "cuda":
torch.cuda.set_device(device)
return device, n_gpu
@property
@torch_required
def device(self) -> "torch.device":
"""
The device used by this process.
"""
return self._setup_devices[0]
@property
@torch_required
def n_gpu(self):
"""
The number of GPUs used by this process.
Note:
This will only be greater than one when you have multiple GPUs available but are not using distributed
training. For distributed training, it will always be 1.
"""
return self._setup_devices[1]
def to_dict(self):
"""
Serializes this instance while replace `Enum` by their values (for JSON serialization support).
"""
d = dataclasses.asdict(self)
for k, v in d.items():
if isinstance(v, Enum):
d[k] = v.value
return d
def to_json_string(self):
"""
Serializes this instance to a JSON string.
"""
return json.dumps(self.to_dict(), indent=2)
def to_sanitized_dict(self) -> Dict[str, Any]:
"""
Sanitized serialization to use with TensorBoard’s hparams
"""
d = self.to_dict()
d = {**d, **{"train_batch_size": self.train_batch_size, "eval_batch_size": self.eval_batch_size}}
valid_types = [bool, int, float, str]
if is_torch_available():
valid_types.append(torch.Tensor)
return {k: v if type(v) in valid_types else str(v) for k, v in d.items()}
|
[
"[email protected]"
] | |
fb7e5991c19a66f9ab6e23cb29f7dd6b7855614f
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startPyquil3353.py
|
660ce9caee9673fe64e6ea34e24751bbdfda931e
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,931 |
py
|
# qubit number=4
# total number=41
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(3) # number=30
prog += CZ(0,3) # number=31
prog += H(3) # number=32
prog += CNOT(0,3) # number=33
prog += X(3) # number=34
prog += CNOT(0,3) # number=35
prog += CNOT(0,3) # number=29
prog += H(1) # number=2
prog += CNOT(2,0) # number=38
prog += Z(2) # number=39
prog += CNOT(2,0) # number=40
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(2) # number=37
prog += Y(3) # number=12
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=9
prog += Y(2) # number=10
prog += Y(2) # number=11
prog += CNOT(1,0) # number=13
prog += H(0) # number=15
prog += CZ(1,0) # number=16
prog += H(1) # number=20
prog += H(2) # number=19
prog += CNOT(3,0) # number=24
prog += Z(3) # number=25
prog += CNOT(3,0) # number=26
prog += H(0) # number=17
prog += CNOT(2,0) # number=21
prog += X(1) # number=23
prog += CNOT(2,0) # number=22
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil3353.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
928689715808bfb8d54dac1bb4fbce927c5ef862
|
4fb5eb0a9a24fa5c112a4ebc854ee2604b04adda
|
/python/test/test_account_properties.py
|
459f9f00474222b10ed1e1fd2aa16ae83ec57295
|
[
"MIT"
] |
permissive
|
KoenBal/OANDA_V20_Client
|
ed4c182076db62ecf7a216c3e3246ae682300e94
|
e67b9dbaddff6ed23e355d3ce7f9c9972799c702
|
refs/heads/master
| 2020-03-27T20:42:25.777471 | 2019-12-02T15:44:06 | 2019-12-02T15:44:06 | 147,088,130 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,130 |
py
|
# coding: utf-8
"""
OANDA v20 REST API
The full OANDA v20 REST API Specification. This specification defines how to interact with v20 Accounts, Trades, Orders, Pricing and more. To authenticate use the string 'Bearer ' followed by the token which can be obtained at https://www.oanda.com/demo-account/tpa/personal_token # noqa: E501
OpenAPI spec version: 3.0.23
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import oanda
from oanda.models.account_properties import AccountProperties # noqa: E501
from oanda.rest import ApiException
class TestAccountProperties(unittest.TestCase):
"""AccountProperties unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAccountProperties(self):
"""Test AccountProperties"""
# FIXME: construct object with mandatory attributes with example values
# model = oanda.models.account_properties.AccountProperties() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
25e99ed5ed165357611d11826c5c6747f9a2c1c7
|
015a54d9bfdf81184da6f8513d1870a2cbc17224
|
/busshaming/conf/dev_settings.py
|
99b6a3342cc982e6ef9d087e331d059103510590
|
[
"MIT"
] |
permissive
|
katharosada/bus-shaming
|
54e4cc4fa36b55faa1da1b6b8a15a390a2f681a6
|
c8d7cd4baf9ff049cda49c92da4d5ca10f68e6a9
|
refs/heads/master
| 2021-01-15T22:08:33.290718 | 2018-12-03T08:19:24 | 2018-12-03T08:19:24 | 99,885,413 | 44 | 7 |
MIT
| 2018-01-29T06:44:45 | 2017-08-10T05:38:22 |
Python
|
UTF-8
|
Python
| false | false | 1,447 |
py
|
"""
Django settings for busshaming project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#$sxqt-6%d*d95@7*=j%bg*-32(ic@lst#396=0f$54_4*++r3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
CORS_ORIGIN_WHITELIST = (
'127.0.0.1:8000',
'localhost:8000',
'127.0.0.1:8080',
'localhost:8080',
)
ROOT_URLCONF = 'busshaming.urls'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'busshaming-local',
'HOST': 'localhost',
'PORT': '',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'INFO',
},
}
}
|
[
"[email protected]"
] | |
c19cb11d4583210fc5f30bc666a548f7590b7f2d
|
1f05211127ded22bad7b947d771578d658f4ef77
|
/apostello/templatetags/apostello_extras.py
|
1c513fe3ca0769e7b3a0e8f288cbb81540291ced
|
[
"MIT"
] |
permissive
|
dmccubbing/apostello
|
60afa716b756a9619f5a01b690223e7705b98b10
|
859a54e9936097628719680ed3c0a67fe4553947
|
refs/heads/master
| 2021-01-12T02:38:59.332852 | 2016-12-24T15:10:09 | 2016-12-24T15:10:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,722 |
py
|
from django import template
from django.urls import reverse
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def fab_button(href, text, icon_name):
"""Output a formatted fab link"""
result = '''
<a class="hvr-backward item" href="{href}">
<i class="large {icon_name} icon"></i>
<div class="content"><div class="header">{text}</div>
</div>
</a>
'''.format(
href=href,
text=text,
icon_name=icon_name,
)
return mark_safe(result)
# Contacts
@register.simple_tag
def fab_new_contact():
return fab_button(reverse('recipient'), 'New Contact', 'plus')
@register.simple_tag
def fab_contacts_archive():
return fab_button(
reverse('recipients_archive'), 'Archived Contacts', 'table'
)
# Groups
@register.simple_tag
def fab_new_group():
return fab_button(reverse('group'), 'New Group', 'plus')
@register.simple_tag
def fab_groups_archive():
return fab_button(
reverse('recipient_groups_archive'), 'Archived Groups', 'table'
)
@register.simple_tag
def fab_groups():
return fab_button(reverse('recipient_groups'), 'Groups', 'table')
# Incoming SMS
@register.simple_tag
def fab_incoming_wall():
return fab_button(reverse('incoming_wall'), 'Live Updates', 'inbox')
@register.simple_tag
def fab_incoming_wall_curator():
return fab_button(
reverse('incoming_wall_curator'), 'Live Curator', 'table'
)
# Keywords
@register.simple_tag
def fab_new_keyword():
return fab_button(reverse('keyword'), 'New Keyword', 'plus')
@register.simple_tag
def fab_keywords():
return fab_button(reverse('keywords'), 'Keywords', 'table')
@register.simple_tag
def fab_keywords_archive():
return fab_button(
reverse('keywords_archive'), 'Archived Keywords', 'table'
)
@register.simple_tag
def fab_keyword_csv(keyword):
return fab_button(
reverse(
'keyword_csv', args=[keyword.pk]
),
'Export {k} responses'.format(k=keyword.keyword),
'download'
)
@register.simple_tag
def fab_keyword_edit(keyword):
return fab_button(reverse('keyword', args=[keyword.pk]), 'Edit', 'edit')
@register.simple_tag
def fab_keyword_responses(keyword):
return fab_button(
reverse(
'keyword_responses', args=[keyword.pk]
),
'Replies ({n})'.format(n=keyword.num_matches),
'inbox'
)
@register.simple_tag
def fab_keyword_responses_archive(keyword):
return fab_button(
reverse(
'keyword_responses_archive', args=[keyword.pk]
),
'Archived Replies ({n})'.format(n=keyword.num_archived_matches),
'inbox'
)
|
[
"[email protected]"
] | |
a3f4f5793bcdec480e1194c9dc2ccd3f15a0de41
|
908655251066427f654ee33ebdf804f9f302fcc3
|
/Tests/Pedestrian/av_simulator.py
|
c1fcf61e499b916ebf90939efbc2b8f9ea7c7d17
|
[] |
no_license
|
maxiaoba/MCTSPO
|
be567f80f1dcf5c35ac857a1e6690e1ac599a59d
|
eedfccb5a94e089bd925b58f3d65eef505378bbc
|
refs/heads/main
| 2023-07-05T02:20:16.752650 | 2021-07-06T06:04:40 | 2021-07-06T06:04:40 | 381,811,407 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,541 |
py
|
#import base Simulator class
from mylab.simulators.simulator import Simulator
#Used for math and debugging
import numpy as np
import pdb
#Define the class
class AVSimulator(Simulator):
"""
Class template for a non-interactive simulator.
"""
#Accept parameters for defining the behavior of the system under test[SUT]
def __init__(self,
use_seed, #use seed or disturbance at step
spaces,
num_peds = 1,
dt = 0.1,
alpha = 0.85,
beta = 0.005,
v_des = 11.17,
delta = 4.0,
t_headway = 1.5,
a_max = 3.0,
s_min = 4.0,
d_cmf = 2.0,
d_max = 9.0,
min_dist_x = 2.5,
min_dist_y = 1.4,
car_init_x = 35.0,
car_init_y = 0.0,
**kwargs):
#Constant hyper-params -- set by user
self.use_seed = use_seed
self.spaces = spaces
self.c_num_peds = num_peds
self.c_dt = dt
self.c_alpha = alpha
self.c_beta = beta
self.c_v_des = v_des
self.c_delta = delta
self.c_t_headway = t_headway
self.c_a_max = a_max
self.c_s_min = s_min
self.c_d_cmf = d_cmf
self.c_d_max = d_max
self.c_min_dist = np.array([min_dist_x, min_dist_y])
self.c_car_init_x = car_init_x
self.c_car_init_y = car_init_y
#These are set by reset, not the user
self._car = np.zeros((4))
self._car_accel = np.zeros((2))
self._peds = np.zeros((self.c_num_peds, 4))
self._measurements = np.zeros((self.c_num_peds, 4))
self._car_obs = np.zeros((self.c_num_peds, 4))
self._env_obs = np.zeros((self.c_num_peds, 4))
self._done = False
self._reward = 0.0
self._info = []
self._step = 0
self._action = None
self._first_step = True
self.directions = np.random.randint(2, size=self.c_num_peds) * 2 - 1
self.y = np.random.rand(self.c_num_peds) * 14 - 5
self.x = np.random.rand(self.c_num_peds) * 4 - 2
self.low_start_bounds = [-1.0, -4.25, -1.0, 5.0, 0.0, -6.0, 0.0, 5.0]
self.high_start_bounds = [0.0, -3.75, 0.0, 9.0, 1.0, -2.0, 1.0, 9.0]
self.v_start = [1.0, -1.0, 1.0, -1.0]
self._state = None
#initialize the base Simulator
super().__init__(**kwargs)
def simulate(self, actions, s_0):
"""
Run/finish the simulation
Input
-----
action : A sequential list of actions taken by the simulation
Outputs
-------
(terminal_index)
terminal_index : The index of the action that resulted in a state in the goal set E. If no state is found
terminal_index should be returned as -1.
"""
# initialize the simulation
path_length = 0
self.reset(s_0)
self._info = []
# Take simulation steps unbtil horizon is reached
while path_length < self.c_max_path_length:
#get the action from the list
self._action = actions[path_length]
# move the peds
self.update_peds()
# move the car
self._car = self.move_car(self._car, self._car_accel)
# take new measurements and noise them
noise = self._action.reshape((self.c_num_peds,6))[:, 2:6]
self._measurements = self.sensors(self._car, self._peds, noise)
# filter out the noise with an alpha-beta tracker
self._car_obs = self.tracker(self._car_obs, self._measurements)
# select the SUT action for the next timestep
self._car_accel[0] = self.update_car(self._car_obs, self._car[0])
# grab simulation state, if interactive
self.observe()
# record step variables
self.log()
# check if a crash has occurred. If so return the timestep, otherwise continue
if self.is_goal():
return path_length, np.array(self._info)
path_length = path_length + 1
# horizon reached without crash, return -1
self._is_terminal = True
return -1, np.array(self._info)
def step(self, action):
"""
Handle anything that needs to take place at each step, such as a simulation update or write to file
Input
-----
action : action taken on the turn
Outputs
-------
(terminal_index)
terminal_index : The index of the action that resulted in a state in the goal set E. If no state is found
terminal_index should be returned as -1.
"""
#get the action from the list
if self.use_seed:
np.random.seed(action)
action = self.action_space.sample()
self._action = action
# move the peds
self.update_peds()
# move the car
self._car = self.move_car(self._car, self._car_accel)
# take new measurements and noise them
noise = self._action.reshape((self.c_num_peds,6))[:, 2:6]
self._measurements = self.sensors(self._car, self._peds, noise)
# filter out the noise with an alpha-beta tracker
self._car_obs = self.tracker(self._car_obs, self._measurements)
# select the SUT action for the next timestep
self._car_accel[0] = self.update_car(self._car_obs, self._car[0])
# grab simulation state, if interactive
self.observe()
# record step variables
self.log() #self._step += 1
if self.is_goal() or self._step >= self.c_max_path_length:
self._is_terminal = True
return self._env_obs[0]
def reset(self, s_0):
"""
Resets the state of the environment, returning an initial observation.
Outputs
-------
observation : the initial observation of the space. (Initial reward is assumed to be 0.)
"""
# initialize variables
self._info = []
self._step = 0
self._is_terminal = False
self.init_conditions = s_0
self._first_step = True
# Get v_des if it is sampled from a range
v_des = self.init_conditions[3*self.c_num_peds]
# initialize SUT location
car_init_x = self.init_conditions[3*self.c_num_peds + 1]
self._car = np.array([v_des, 0.0, car_init_x, self.c_car_init_y])
# zero out the first SUT acceleration
self._car_accel = np.zeros((2))
# initialize pedestrian locations and velocities
pos = self.init_conditions[0:2*self.c_num_peds]
self.x = pos[0:self.c_num_peds*2:2]
self.y = pos[1:self.c_num_peds*2:2]
v_start = self.init_conditions[2*self.c_num_peds:3*self.c_num_peds]
self._peds[0:self.c_num_peds, 0] = np.zeros((self.c_num_peds))
self._peds[0:self.c_num_peds, 1] = v_start
self._peds[0:self.c_num_peds, 2] = self.x
self._peds[0:self.c_num_peds, 3] = self.y
# Calculate the relative position measurements
self._measurements = self._peds - self._car
self._env_obs = self._measurements
self._car_obs = self._measurements
# return the initial simulation state
self._car = np.array([self.c_v_des, 0.0, self.c_car_init_x, self.c_car_init_y])
self._car_accel = np.zeros((2))
self._peds[:, 0:4] = np.array([0.0, 1.0, -0.5, -4.0])
self._measurements = self._peds - self._car
self._env_obs = self._measurements
self._car_obs = self._measurements
return np.ndarray.flatten(self._measurements)
def get_reward_info(self):
"""
returns any info needed by the reward function to calculate the current reward
"""
return {"peds": self._peds,
"car": self._car,
"is_goal": self.is_goal(),
"is_terminal": self._is_terminal,
"action": self._action}
def is_goal(self):
"""
returns whether the current state is in the goal set
:return: boolean, true if current state is in goal set.
"""
# calculate the relative distances between the pedestrians and the car
dist = self._peds[:, 2:4] - self._car[2:4]
# return True if any relative distance is within the SUT's hitbox
if (np.any(np.all(np.less_equal(abs(dist), self.c_min_dist), axis=1))):
return True
return False
def log(self):
# Create a cache of step specific variables for post-simulation analysis
cache = np.hstack([0.0, # Dummy, will be filled in with trial # during post processing in save_trials.py
self._step,
np.ndarray.flatten(self._car),
np.ndarray.flatten(self._peds),
np.ndarray.flatten(self._action),
0.0])
self._info.append(cache)
self._step += 1
def sensors(self, car, peds, noise):
measurements = peds + noise
return measurements
def tracker(self, observation_old, measurements):
observation = np.zeros_like(observation_old)
observation[:, 0:2] = observation_old[:, 0:2]
observation[:, 2:4] = observation_old[:, 2:4] + self.c_dt * observation_old[:, 0:2]
residuals = measurements[:, 2:4] - observation[:, 2:4]
observation[:, 2:4] += self.c_alpha * residuals
observation[:, 0:2] += self.c_beta / self.c_dt * residuals
return observation
def update_car(self, obs, v_car):
cond = np.repeat(np.resize(np.logical_and(obs[:, 3] > -1.5, obs[:, 3] < 4.5), (self.c_num_peds, 1)), 4, axis=1)
in_road = np.expand_dims(np.extract(cond, obs), axis=0)
if in_road.size != 0:
mins = np.argmin(in_road.reshape((-1, 4)), axis=0)
v_oth = obs[mins[3], 0]
s_headway = obs[mins[3], 2] - self._car[2]
del_v = v_oth - v_car
s_des = self.c_s_min + v_car * self.c_t_headway - v_car * del_v / (2 * np.sqrt(self.c_a_max * self.c_d_cmf))
if self.c_v_des > 0.0:
v_ratio = v_car / self.c_v_des
else:
v_ratio = 1.0
a = self.c_a_max * (1.0 - v_ratio ** self.c_delta - (s_des / s_headway) ** 2)
else:
del_v = self.c_v_des - v_car
a = del_v
if np.isnan(a):
pdb.set_trace()
return np.clip(a, -self.c_d_max, self.c_a_max)
def move_car(self, car, accel):
car[2:4] += self.c_dt * car[0:2]
car[0:2] += self.c_dt * accel
return car
def update_peds(self):
# Update ped state from actions
action = self._action.reshape((self.c_num_peds, 6))[:, 0:2]
mod_a = np.hstack((action,
self._peds[:, 0:2] + 0.5 * self.c_dt * action))
if np.any(np.isnan(mod_a)):
pdb.set_trace()
self._peds += self.c_dt * mod_a
if np.any(np.isnan(self._peds)):
pdb.set_trace()
def observe(self):
self._env_obs = self._peds - self._car
@property
def observation_space(self):
return self.spaces.observation_space
@property
def action_space(self):
return self.spaces.action_space
|
[
"[email protected]"
] | |
218b85febe0584455f176aaf225870a30c0ac84e
|
241b3614c8493f0b4432b127b63b2dbc17cdce3e
|
/Chapter 4/4-3.py
|
26f8738f61b41339e144179e435af6c9a1db00cd
|
[] |
no_license
|
joose1983/answer-for-python-crush-course-2ndE
|
6edc5e8909f4374f029ca8ac686ddaf83e331a10
|
b773e9cffd69695ecbb004d50c60fa9dbbd4f2ff
|
refs/heads/master
| 2023-01-07T00:42:00.048631 | 2020-11-06T10:12:39 | 2020-11-06T10:12:39 | 293,987,709 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 67 |
py
|
numbers=list(range(1,21))
for number in numbers:
print(number)
|
[
"[email protected]"
] | |
499db621a567d7c9bcdf75cb9232c3c6fbe6ad2e
|
031b223ef09542b34ad495a941eb895ab367c2fa
|
/framework/codejam/extract/identifier.py
|
2ce2957c989136bb6062e295cd1990ac9bc0d662
|
[
"MIT"
] |
permissive
|
neizod/coding-analysis
|
7fb41af09cf193b18cac7b58da1f7c4ae085bc6c
|
cc086bcf204e570032d11b12a46ac819cfe93f2b
|
refs/heads/master
| 2021-01-10T07:02:45.672795 | 2015-05-29T00:15:31 | 2015-05-29T00:15:31 | 36,041,627 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,916 |
py
|
import os
import logging
from framework._utils import FunctionHook
class CodeJamExtractIdentifier(FunctionHook):
''' This method will extract all identifiers in submitted source code
from each contestants for futher analysis. '''
@staticmethod
def get_identifiers(directory):
''' returns all identifiers in source code files in a directory. '''
from framework._utils.misc import datapath
from framework._utils.source import SourceCode
identifiers = set()
for filename in os.listdir(directory):
filepath = datapath('codejam', directory, filename)
if not os.path.isfile(filepath):
continue
source_code = SourceCode.open(filepath)
try:
identifiers |= source_code.identifiers().keys()
except NotImplementedError:
continue
return identifiers
def main(self, year, force=False, **_):
from framework._utils import write
from framework._utils.misc import datapath, make_ext
from framework.codejam._helper import iter_submission
os.makedirs(datapath('codejam', 'extract'), exist_ok=True)
outpath = datapath('codejam', 'extract',
make_ext('identifier', year, 'json'))
if not force and os.path.isfile(outpath):
return logging.warn('output file already exists, aborting.')
extracted_data = []
for _, pid, pio, uname in iter_submission(year):
directory = datapath('codejam', 'source', pid, pio, uname)
logging.info('extracting: %i %i %s', pid, pio, uname)
extracted_data += [{
'pid': pid,
'io': pio,
'uname': uname,
'identifiers': sorted(self.get_identifiers(directory)),
}]
write.json(extracted_data, open(outpath, 'w'))
|
[
"[email protected]"
] | |
018091484c376685fd97ef99f65f8580350e4606
|
38c10c01007624cd2056884f25e0d6ab85442194
|
/device/hid/hid.gyp
|
00b9a90cafed806e6061d85486132b364e924b55
|
[
"BSD-3-Clause"
] |
permissive
|
zenoalbisser/chromium
|
6ecf37b6c030c84f1b26282bc4ef95769c62a9b2
|
e71f21b9b4b9b839f5093301974a45545dad2691
|
refs/heads/master
| 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 |
BSD-3-Clause
| 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null |
UTF-8
|
Python
| false | false | 2,332 |
gyp
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'device_hid',
'type': 'static_library',
'include_dirs': [
'../..',
],
'dependencies': [
'../../components/components.gyp:device_event_log_component',
'../../net/net.gyp:net',
'../core/core.gyp:device_core',
],
'sources': [
'device_monitor_linux.cc',
'device_monitor_linux.h',
'fake_input_service_linux.cc',
'fake_input_service_linux.h',
'hid_collection_info.cc',
'hid_collection_info.h',
'hid_connection.cc',
'hid_connection.h',
'hid_connection_linux.cc',
'hid_connection_linux.h',
'hid_connection_mac.cc',
'hid_connection_mac.h',
'hid_connection_win.cc',
'hid_connection_win.h',
'hid_device_filter.cc',
'hid_device_filter.h',
'hid_device_info.cc',
'hid_device_info.h',
'hid_device_info_linux.cc',
'hid_device_info_linux.h',
'hid_report_descriptor.cc',
'hid_report_descriptor.h',
'hid_report_descriptor_item.cc',
'hid_report_descriptor_item.h',
'hid_service.cc',
'hid_service.h',
'hid_service_linux.cc',
'hid_service_linux.h',
'hid_service_mac.cc',
'hid_service_mac.h',
'hid_service_win.cc',
'hid_service_win.h',
'hid_usage_and_page.cc',
'hid_usage_and_page.h',
'input_service_linux.cc',
'input_service_linux.h',
],
'conditions': [
['use_udev==1', {
'dependencies': [
'../udev_linux/udev.gyp:udev_linux',
],
}, { # use_udev==0
# The Linux implementation is based on Udev.
'sources!': [
'device_monitor_linux.cc',
'device_monitor_linux.h',
'hid_service_linux.cc',
'hid_service_linux.h',
'fake_input_service_linux.cc',
'fake_input_service_linux.h',
'input_service_linux.cc',
'input_service_linux.h',
],
}],
],
},
],
}
|
[
"[email protected]"
] | |
665ecc18a624f9b4460d72d5909f74e5e93b8d51
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_QC1775.py
|
aaf9aaec4c26ff1808999763b15d3f1700a67c47
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,846 |
py
|
# qubit number=5
# total number=60
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=31
prog.y(input_qubit[1]) # number=56
prog.cz(input_qubit[1],input_qubit[0]) # number=32
prog.h(input_qubit[1]) # number=52
prog.h(input_qubit[0]) # number=33
prog.h(input_qubit[1]) # number=44
prog.cz(input_qubit[0],input_qubit[1]) # number=45
prog.h(input_qubit[1]) # number=46
prog.cx(input_qubit[0],input_qubit[1]) # number=53
prog.cx(input_qubit[0],input_qubit[1]) # number=57
prog.x(input_qubit[1]) # number=58
prog.cx(input_qubit[0],input_qubit[1]) # number=59
prog.cx(input_qubit[0],input_qubit[1]) # number=55
prog.h(input_qubit[1]) # number=48
prog.cz(input_qubit[0],input_qubit[1]) # number=49
prog.h(input_qubit[1]) # number=50
prog.x(input_qubit[0]) # number=26
prog.cx(input_qubit[1],input_qubit[0]) # number=27
prog.h(input_qubit[1]) # number=37
prog.cz(input_qubit[0],input_qubit[1]) # number=38
prog.h(input_qubit[1]) # number=39
prog.x(input_qubit[1]) # number=35
prog.cx(input_qubit[0],input_qubit[1]) # number=36
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
prog.cx(input_qubit[3],input_qubit[2]) # number=43
prog.cx(input_qubit[3],input_qubit[2]) # number=47
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.cx(input_qubit[0],input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
prog.cx(input_qubit[0],input_qubit[1]) # number=24
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[1]) # number=29
prog.y(input_qubit[4]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[3]) # number=51
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1775.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
3d6df9e59eb1385aba3f6d29a0ddcc933590dc73
|
514fd4f09243055e4769efb426710338048454b1
|
/tensorflow/python/data/kernel_tests/cache_test.py
|
b561cd58baf732f557d518e7eb237ab00512acc1
|
[
"Apache-2.0"
] |
permissive
|
nnsuite/ubuntuport-tensorflow
|
7fa1d26f3cc282cd725bd87f2864c8ac2e76bf99
|
01ea2d56d3f87063f86076e45673fa49794eebb0
|
refs/heads/debian/c_api/1.13.1
| 2022-12-14T03:14:59.691723 | 2022-12-06T07:29:53 | 2022-12-07T00:46:40 | 202,048,406 | 2 | 8 |
Apache-2.0
| 2022-12-07T00:46:41 | 2019-08-13T02:36:22 |
C++
|
UTF-8
|
Python
| false | false | 9,031 |
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.cache()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import shutil
import tempfile
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FileCacheTest(test_base.DatasetTestBase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.cache_prefix = path.join(self.tmp_dir, "cache")
def tearDown(self):
if self.tmp_dir:
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def testCacheDatasetPassthrough(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
def dataset_fn(count=5, filename=None):
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count))
if filename:
return repeat_dataset.cache(filename)
else:
return repeat_dataset
self.assertEqual(
tuple([c.shape[1:] for c in components]),
dataset_fn().output_shapes)
get_next = self.getNext(dataset_fn())
# First run without caching to collect the "ground truth".
elements = []
for _ in range(20):
elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Assert that the cached dataset has the same elements as the
# "ground truth".
get_next = self.getNext(dataset_fn(filename=self.cache_prefix))
cached_elements = []
for _ in range(20):
cached_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(elements, cached_elements)
# Re-initialize with an empty upstream (to throw errors.OutOfRangeError
# if we didn't use the cache).
get_next = self.getNext(dataset_fn(count=0, filename=self.cache_prefix))
replayed_elements = []
for _ in range(20):
replayed_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(cached_elements, replayed_elements)
# Re-initialize with an empty upstream and a missing cache file (should
# throw errors.OutOfRangeError immediately).
get_next = self.getNext(
dataset_fn(count=0, filename=self.cache_prefix + "nonsense"))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testConcurrentWriters(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
self.evaluate(get_next1()) # this should succeed
with self.assertRaises(errors.AlreadyExistsError):
self.evaluate(get_next2())
self.evaluate(get_next1()) # this should continue to succeed
def testConcurrentReaders(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
elements = []
for _ in range(4):
elements.append(self.evaluate(get_next1()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
# Re-initialize
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
# Reading concurrently should succeed.
elements_itr1 = []
elements_itr2 = []
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
# Intentionally reversing the order
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
self.assertAllEqual(elements, elements_itr1)
self.assertAllEqual(elements, elements_itr2)
@test_util.run_all_in_graph_and_eager_modes
class MemoryCacheTest(test_base.DatasetTestBase):
def testCacheDatasetPassthrough(self):
with ops.device("cpu:0"):
repeat_count = variables.Variable(constant_op.constant(10, dtypes.int64))
dataset = dataset_ops.Dataset.range(3).flat_map(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(repeat_count))
cached_dataset = dataset.cache().repeat(2)
uncached_dataset = dataset.repeat(2)
self.evaluate(repeat_count.initializer)
# Needs to be initializable to capture the variable.
cached_next = self.getNext(cached_dataset, requires_initialization=True)
uncached_next = self.getNext(
uncached_dataset, requires_initialization=True)
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
self.assertEqual(self.evaluate(uncached_next()), i)
self.evaluate(repeat_count.assign(0))
# The uncached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(uncached_next())
# The cached iterator replays from cache.
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
# The cached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(cached_next())
def testEmptyCacheReading(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(0))
cache_dataset = repeat_dataset.cache()
# Create initialization ops for iterators without and with
# caching, respectively.
self.assertDatasetProduces(cache_dataset, expected_output=[])
def testConcurrentReaders(self):
dataset = dataset_ops.Dataset.range(5).cache()
d1 = dataset.map(lambda x: x + 1)
d2 = dataset.map(lambda x: x + 6)
get_next1 = self.getNext(d1)
self.assertEqual(1, self.evaluate(get_next1()))
self.assertEqual(2, self.evaluate(get_next1()))
self.assertEqual(3, self.evaluate(get_next1()))
get_next2 = self.getNext(d2)
self.assertEqual(6, self.evaluate(get_next2()))
self.assertEqual(7, self.evaluate(get_next2()))
self.assertEqual(4, self.evaluate(get_next1())) # interleave execution
self.assertEqual([8, 5],
[self.evaluate(get_next2()),
self.evaluate(get_next1())])
self.assertEqual(9, self.evaluate(get_next2()))
self.assertEqual(10, self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
def testCacheTakeRepeat(self):
dataset = dataset_ops.Dataset.range(10).cache().take(5).repeat(2)
expected_output = [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
self.assertDatasetProduces(dataset, expected_output=expected_output)
if __name__ == "__main__":
test.main()
|
[
"[email protected]"
] | |
ecefc29203c6dbd9ad3567e3c4923a92540392cc
|
1c390cd4fd3605046914767485b49a929198b470
|
/codechef/MODULO3.py
|
56225738efe0487b5051721c932373bd79d04918
|
[] |
no_license
|
wwwwodddd/Zukunft
|
f87fe736b53506f69ab18db674311dd60de04a43
|
03ffffee9a76e99f6e00bba6dbae91abc6994a34
|
refs/heads/master
| 2023-01-24T06:14:35.691292 | 2023-01-21T15:42:32 | 2023-01-21T15:42:32 | 163,685,977 | 7 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 155 |
py
|
for t in range(int(input())):
a, b = map(int, input().split())
if a % 3 == 0 or b % 3 == 0:
print(0)
elif a % 3 == b % 3:
print(1)
else:
print(2)
|
[
"[email protected]"
] | |
1eb5f0a5a7672ff71340cf10f0cff2fccd26163f
|
b346530b455135b224470511061cffcffa6274b5
|
/test/hierarchical/test_hierarchical.py
|
452d9eae3c129fcdb8f095a3b34bac964cbcce4a
|
[
"BSD-3-Clause"
] |
permissive
|
ICB-DCM/pyPESTO
|
cd3ac5c26be0648c77a409d012a6321ade968d5d
|
9a754573a7b77d30d5dc1f67a8dc1be6c29f1640
|
refs/heads/main
| 2023-09-03T19:34:55.581478 | 2023-06-29T09:22:57 | 2023-06-29T09:22:57 | 142,321,950 | 174 | 48 |
BSD-3-Clause
| 2023-09-14T14:59:50 | 2018-07-25T15:51:29 |
Python
|
UTF-8
|
Python
| false | false | 14,990 |
py
|
"""Tests for hierarchical optimization."""
import time
import amici
import numpy as np
import pandas as pd
import petab
import pypesto
from pypesto.C import (
INNER_PARAMETER_BOUNDS,
LOG10,
LOWER_BOUND,
MODE_FUN,
UPPER_BOUND,
InnerParameterType,
)
from pypesto.hierarchical.parameter import InnerParameter
from pypesto.hierarchical.petab import validate_hierarchical_petab_problem
from pypesto.hierarchical.problem import InnerProblem
from pypesto.hierarchical.solver import (
AnalyticalInnerSolver,
NumericalInnerSolver,
)
from pypesto.hierarchical.util import (
apply_offset,
apply_scaling,
compute_optimal_offset,
compute_optimal_offset_coupled,
compute_optimal_scaling,
compute_optimal_sigma,
)
from pypesto.optimize import FidesOptimizer, OptimizeOptions
from pypesto.petab import PetabImporter
from pypesto.testing.examples import (
get_Boehm_JProteomeRes2014_hierarchical_petab,
get_Boehm_JProteomeRes2014_hierarchical_petab_corrected_bounds,
)
# Suitable test cases from the benchmark collection
# - Boehm
# - Fujita
def test_hierarchical_optimization_pipeline():
"""Test hierarchical optimization of sigma and scaling parameters.
Here (mostly): the flags `True` and `False` indicate that hierarchical
optimization is enabled and disabled, respectively.
"""
flags = [False, True]
petab_problems = {
False: get_Boehm_JProteomeRes2014_hierarchical_petab(),
True: get_Boehm_JProteomeRes2014_hierarchical_petab_corrected_bounds(),
}
problems = {}
for flag in flags:
importer = PetabImporter(petab_problems[flag], hierarchical=flag)
objective = importer.create_objective()
problem = importer.create_problem(objective)
problem.objective.amici_solver.setSensitivityMethod(
amici.SensitivityMethod_adjoint
)
problem.objective.amici_solver.setAbsoluteTolerance(1e-8)
problem.objective.amici_solver.setRelativeTolerance(1e-8)
problems[flag] = problem
# Check for same optimization result
n_starts = 1
engine = pypesto.engine.SingleCoreEngine()
startpoints = problems[False].get_full_vector(
pypesto.startpoint.latin_hypercube(
n_starts=n_starts,
lb=problems[False].lb,
ub=problems[False].ub,
)
)
problems[False].set_x_guesses(startpoints)
outer_indices = [
ix
for ix, x in enumerate(problems[False].x_names)
if x
not in problems[True].objective.calculator.inner_problem.get_x_ids()
]
problems[True].set_x_guesses(startpoints[:, outer_indices])
inner_solvers = {
'analytical': AnalyticalInnerSolver(),
'numerical': NumericalInnerSolver(),
}
history_options = pypesto.HistoryOptions(trace_record=True)
def get_result(problem, inner_solver_id, inner_solvers=inner_solvers):
if inner_solver_id:
problem.objective.calculator.inner_solver = inner_solvers[
inner_solver_id
]
start_time = time.time()
result = pypesto.optimize.minimize(
problem=problem,
n_starts=n_starts,
engine=engine,
history_options=history_options,
options=OptimizeOptions(allow_failed_starts=False),
optimizer=FidesOptimizer(),
)
wall_time = time.time() - start_time
best_x = result.optimize_result.list[0].x
best_fval = result.optimize_result.list[0].fval
result = {
'list': result.optimize_result.list,
'time': wall_time,
'best_x': best_x,
'best_fval': best_fval,
}
return result
results = {}
for problem, inner_solver_id in [
(problems[True], 'analytical'),
(problems[False], False),
(problems[True], 'numerical'),
]:
results[inner_solver_id] = get_result(problem, inner_solver_id)
trace_False = np.array(
results[False]['list'][0].history.get_fval_trace(trim=True)
)
trace_numerical = np.array(
results['numerical']['list'][0].history.get_fval_trace(trim=True)
)
trace_analytical = np.array(
results['numerical']['list'][0].history.get_fval_trace(trim=True)
)
# The analytical inner solver is at least as good as (fval / speed) the
# numerical inner solver.
assert at_least_as_good_as(v=trace_analytical, v0=trace_numerical)
# The numerical inner solver is at least as good as (fval / speed) no
# inner solver (non-hierarchical).
assert at_least_as_good_as(v=trace_numerical, v0=trace_False)
# Now implied that analytical is at least as good as non-hierarchical.
def test_hierarchical_calculator_and_objective():
"""Test hierarchical calculation of sigma and objective values.
Here (mostly): the flags `True` and `False` indicate that hierarchical
optimization is enabled and disabled, respectively.
"""
petab_problem = (
get_Boehm_JProteomeRes2014_hierarchical_petab_corrected_bounds()
)
flags = [False, True]
problems = {}
for flag in flags:
importer = PetabImporter(petab_problem, hierarchical=flag)
objective = importer.create_objective()
problem = importer.create_problem(objective)
problem.objective.amici_solver.setSensitivityMethod(
amici.SensitivityMethod_adjoint
)
problem.objective.amici_solver.setAbsoluteTolerance(1e-8)
problem.objective.amici_solver.setRelativeTolerance(1e-8)
problems[flag] = problem
def calculate(problem, x_dct):
return problem.objective.calculator(
x_dct=x_dct,
sensi_orders=(0, 1),
mode=MODE_FUN,
amici_model=problem.objective.amici_model,
amici_solver=problem.objective.amici_solver,
edatas=problem.objective.edatas,
n_threads=1,
x_ids=petab_problem.x_ids,
parameter_mapping=problem.objective.parameter_mapping,
fim_for_hess=False,
)
x_dct = dict(zip(petab_problem.x_ids, petab_problem.x_nominal_scaled))
# Nominal sigma values are close to optimal.
# One is changed here to facilitate testing.
x_dct['sd_pSTAT5A_rel'] = 0.5
calculator_results = {
flag: calculate(problems[flag], x_dct=x_dct) for flag in flags
}
# Hierarchical optimization means that the results differ here, because
# the `False` case has suboptimal sigma values.
assert not np.isclose(
calculator_results[True]['fval'],
calculator_results[False]['fval'],
)
assert not np.isclose(
calculator_results[True]['grad'],
calculator_results[False]['grad'],
).all()
x_dct.update(calculator_results[True]['inner_parameters'])
calculator_results[False] = calculate(problem=problems[False], x_dct=x_dct)
# The `False` case has copied the optimal sigma values from hierarchical
# optimization, so can produce the same results now.
assert np.isclose(
calculator_results[True]['fval'],
calculator_results[False]['fval'],
)
assert np.isclose(
calculator_results[True]['grad'],
calculator_results[False]['grad'],
).all()
parameters = [x_dct[x_id] for x_id in petab_problem.x_free_ids]
fval_false = problems[False].objective(parameters)
outer_parameters = [
x_dct[x_id] for x_id in problems[True].objective.x_names
]
fval_true = problems[True].objective(outer_parameters)
# Hierarchical optimization does not affect the function value, if optimal
# sigma are provided to the normal function. High precision is required as
# the nominal values are very good already, so the test might pass
# accidentally if the nominal values are used accidentally.
assert np.isclose(fval_true, fval_false, atol=1e-12, rtol=1e-14)
def test_analytical_computations():
"""Test analytically-solved hierarchical inner parameters."""
function = np.exp
timepoints = np.linspace(0, 10, 101)
simulation = function(timepoints)
dummy_sigma = np.ones(simulation.shape)
mask = np.full(simulation.shape, True)
expected_scaling_value = 5
expected_offset_value = 2
expected_sigma_value = 2
rtol = 1e-3
# Scaling
simulation = function(timepoints)
data = expected_scaling_value * simulation
scaling_value = compute_optimal_scaling(
data=[data],
sim=[simulation],
sigma=[dummy_sigma],
mask=[mask],
)
assert np.isclose(scaling_value, expected_scaling_value, rtol=rtol)
# Offset
simulation = function(timepoints)
data = simulation + expected_offset_value
offset_value = compute_optimal_offset(
data=[data],
sim=[simulation],
sigma=[dummy_sigma],
mask=[mask],
)
assert np.isclose(offset_value, expected_offset_value, rtol=rtol)
# Coupled (scaling and offset)
simulation = function(timepoints)
data = expected_scaling_value * simulation + expected_offset_value
offset_value = compute_optimal_offset_coupled(
data=[data],
sim=[simulation],
sigma=[dummy_sigma],
mask=[mask],
)
apply_offset(offset_value=expected_offset_value, data=[data], mask=[mask])
scaling_value = compute_optimal_scaling(
data=[data],
sim=[simulation],
sigma=[dummy_sigma],
mask=[mask],
)
assert np.isclose(offset_value, expected_offset_value, rtol=rtol)
assert np.isclose(scaling_value, expected_scaling_value, rtol=rtol)
# All (scaling, offset, sigma)
simulation = function(timepoints)
data = expected_scaling_value * simulation + expected_offset_value
data[0::2] -= expected_sigma_value
data[1::2] += expected_sigma_value
offset_value = compute_optimal_offset_coupled(
data=[data],
sim=[simulation],
sigma=[dummy_sigma],
mask=[mask],
)
apply_offset(offset_value=offset_value, data=[data], mask=[mask])
scaling_value = compute_optimal_scaling(
data=[data],
sim=[simulation],
sigma=[dummy_sigma],
mask=[mask],
)
apply_scaling(scaling_value=scaling_value, sim=[simulation], mask=[mask])
sigma_value = compute_optimal_sigma(data=data, sim=simulation, mask=mask)
assert np.isclose(offset_value, expected_offset_value, rtol=rtol)
assert np.isclose(scaling_value, expected_scaling_value, rtol=rtol)
assert np.isclose(sigma_value, expected_sigma_value, rtol=rtol)
def inner_problem_exp():
function = np.exp
timepoints = np.linspace(0, 10, 101)
expected_values = {
'scaling_': 5,
'offset_': 2,
'sigma_': 3,
}
simulation = function(timepoints)
data = (
expected_values['scaling_'] * simulation + expected_values['offset_']
)
data[0::2] -= expected_values['sigma_']
data[1::2] += expected_values['sigma_']
mask = np.full(data.shape, True)
inner_parameters = [
InnerParameter(
inner_parameter_id=inner_parameter_id,
inner_parameter_type=inner_parameter_type,
scale=LOG10,
lb=INNER_PARAMETER_BOUNDS[inner_parameter_type][LOWER_BOUND],
ub=INNER_PARAMETER_BOUNDS[inner_parameter_type][UPPER_BOUND],
ixs=mask,
)
for inner_parameter_id, inner_parameter_type in [
('offset_', InnerParameterType.OFFSET),
('scaling_', InnerParameterType.SCALING),
('sigma_', InnerParameterType.SIGMA),
]
]
inner_parameters[0].coupled = True
inner_parameters[1].coupled = True
inner_problem = InnerProblem(xs=inner_parameters, data=[data])
return inner_problem, expected_values, simulation
def test_analytical_inner_solver():
"""Test numerically-solved hierarchical inner parameters."""
inner_problem, expected_values, simulation = inner_problem_exp()
dummy_sigma = np.ones(simulation.shape)
rtol = 1e-3
solver = AnalyticalInnerSolver()
result = solver.solve(
problem=inner_problem,
sim=[simulation],
sigma=[dummy_sigma],
scaled=False,
)
assert np.isclose(result['offset_'], expected_values['offset_'], rtol=rtol)
assert np.isclose(
result['scaling_'], expected_values['scaling_'], rtol=rtol
)
assert np.isclose(result['sigma_'], expected_values['sigma_'], rtol=rtol)
def test_numerical_inner_solver():
"""Test numerically-solved hierarchical inner parameters."""
inner_problem, expected_values, simulation = inner_problem_exp()
dummy_sigma = np.ones(simulation.shape)
rtol = 1e-3
solver = NumericalInnerSolver(minimize_kwargs={'n_starts': 10})
result = solver.solve(
problem=inner_problem,
sim=[simulation],
sigma=[dummy_sigma],
scaled=False,
)
assert np.isclose(result['offset_'], expected_values['offset_'], rtol=rtol)
assert np.isclose(
result['scaling_'], expected_values['scaling_'], rtol=rtol
)
assert np.isclose(result['sigma_'], expected_values['sigma_'], rtol=rtol)
def at_least_as_good_as(v, v0) -> bool:
"""Check that the first vector of fvals is at least as good the second.
Parameters
----------
v:
The first vector of fvals.
v0:
The second vector of fvals.
Returns
-------
Whether the first vector of fvals is at least as good as the second.
"""
max_index = min(len(v), len(v0))
return (v[:max_index] <= v0[:max_index]).all()
def test_validate():
# Scaling shared across multiple observables - okay
observable_df = petab.get_observable_df(
pd.DataFrame(
{
petab.OBSERVABLE_ID: ["obs1", "obs2"],
petab.OBSERVABLE_FORMULA: [
"observableParameter1_obs1 * x1",
"observableParameter1_obs2 * x2",
],
petab.NOISE_FORMULA: [
"noiseParameter1_obs1",
"noiseParameter1_obs2",
],
}
)
)
measurement_df = petab.get_measurement_df(
pd.DataFrame(
{
petab.OBSERVABLE_ID: ["obs1", "obs2"],
petab.TIME: [0, 1],
petab.MEASUREMENT: [1, 2],
petab.OBSERVABLE_PARAMETERS: ["s", "s"],
petab.NOISE_PARAMETERS: [0.1, 0.1],
}
)
)
parameter_df = petab.get_parameter_df(
pd.DataFrame(
{
petab.PARAMETER_ID: ["s"],
"parameterType": ['scaling'],
}
)
)
petab_problem = petab.Problem(
observable_df=observable_df,
parameter_df=parameter_df,
measurement_df=measurement_df,
)
validate_hierarchical_petab_problem(petab_problem)
|
[
"[email protected]"
] | |
6386bb45b6d3ab17a1f9afcb15656b0f1a6892fe
|
989b3499948137f57f14be8b2c77d0610d5975e6
|
/question_python(resolved)/chapter3_data_type(완결)/ii_replace_it.py
|
01732f47a8bda1b2a4f65637446d87f13dd4f158
|
[] |
no_license
|
namkiseung/python_BasicProject
|
76b4c070934ad4cb9d16ce844efa05f64fb09ac0
|
460d05248b2d1431624aba960e28bece888643e4
|
refs/heads/master
| 2022-12-13T21:12:06.865241 | 2020-04-23T01:30:08 | 2020-04-23T01:30:08 | 142,980,920 | 1 | 1 | null | 2022-12-08T02:27:40 | 2018-07-31T07:49:17 |
Python
|
UTF-8
|
Python
| false | false | 449 |
py
|
# -*- coding: utf-8 -*-
def replace_it(input_str):
""" 입력된 문자열에 소문자 o를, 대문자 O로 바꿔서 반환하는 함수를 작성해보자
hint: replace
sample data: "google"
expected output: "gOOgle"
"""
result=input_str.replace('o', 'O')
# 여기 작성
return result
if __name__ == "__main__":
input_str = 'google'
print(replace_it(input_str))
pass
|
[
"[email protected]"
] | |
415e464cf1107b46d06e17330af69e5a62cabaa7
|
86986fc336d87823b45c427ac2326d6d733c7f91
|
/social/groups/views.py
|
ab699144185ce0ecbebdbef63e928827bb1996db
|
[] |
no_license
|
CryptAthlos/cryptathloscap
|
b0d83efb1ed9d628677aec47a24841f6fa52ed4c
|
1a91ce7a0f57548523e02a7f1544018ccd587d4d
|
refs/heads/master
| 2020-03-15T12:11:36.453570 | 2018-05-29T09:09:50 | 2018-05-29T09:09:50 | 132,138,497 | 0 | 0 | null | 2018-05-29T09:09:51 | 2018-05-04T12:39:09 |
Python
|
UTF-8
|
Python
| false | false | 1,927 |
py
|
from django.contrib import messages
from django.db import IntegrityError
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.core.urlresolvers import reverse
from django.views import generic
from groups.models import Group, GroupMember
# Create your views here.
class CreateGroup(LoginRequiredMixin, generic.CreateView):
fields = ('name', 'description')
model = Group
class SingleGroup(generic.DetailView):
model = Group
class ListGroups(generic.ListView):
model = Group
class JoinGroup(LoginRequiredMixin, generic.RedirectView):
def get_redirect_url(self, *args, **kwargs):
return reverse('groups:single', kwargs={'slug': self.kwargs.get('slug')})
def get(self, request, *args, **kwargs):
group = get_object_or_404(Group, slug=self.kwargs.get('slug'))
try:
GroupMember.objects.create(user=self.request.user, group=group)
except IntegrityError:
messages.warning(self.request, 'Warning already a memebr.')
else:
messages.success(self.request, 'You are a member')
return super().get(request, *args, **kwargs)
class LeaveGroup(LoginRequiredMixin, generic.RedirectView):
def get_redirect_url(self, *args, **kwargs):
return reverse('groups:single', kwargs={'slug': self.kwargs.get('slug')})
def get(self, request, *args, **kwargs):
try:
membership = GroupMember.objects.filter(
user=self.request.user,
group__slug=self.kwargs.get('slug')
).get()
except GroupMember.DoesNotExist:
messages.warning(self.request, 'You are not in this group')
else:
membership.delete()
messages.success(self.request, 'You have left the group')
return super().get(request, *args, **kwargs)
|
[
"[email protected]"
] | |
a4aebac3982dc6e6551d597587c45f909039ce3b
|
c3612d29df2fd6224c61b693a0cbd3554b6213f2
|
/03_django/06_django_axios/articles/views.py
|
c25a540c0aa075716a900db72f11ad0f2cf75694
|
[] |
no_license
|
mycomax0416/TIL
|
5a12b8067e22fc095c7998b9ffb17d6fb210933f
|
21d3b62db84b1fd64318a278eddc287bdc9678f0
|
refs/heads/master
| 2023-01-11T00:59:42.851469 | 2019-11-28T08:48:54 | 2019-11-28T08:48:54 | 195,918,118 | 3 | 0 | null | 2023-01-07T16:48:23 | 2019-07-09T02:31:05 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 6,798 |
py
|
import hashlib
from django.http import JsonResponse, HttpResponseBadRequest
from IPython import embed
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.http import Http404, HttpResponse
from django.views.decorators.http import require_POST
from .models import Article, Comment, Hashtag
from .forms import ArticleForm, CommentForm
# Create your views here.
def index(request):
# session 에 visit_num 키로 접근해 값을 가져온다.
# 기본적으로 존재하지 않는 키이기 때문에 키가 없다면(방문한적이 없다면) 0 값을 가져오도록 한다.
visits_num = request.session.get('visits_num', 0)
# 그리고 가져온 값은 session 에 visits_num 에 매번 1씩 증가한 값으로 할당한다. (유저의 다음 방문을 위해)
request.session['visits_num'] = visits_num + 1
# session data 안에 있는 새로운 정보를 수정했따면 django 는 수정한 사실을 알아채지 못하기 때문에 다음과 같이 설정.
request.session.modified = True
articles = Article.objects.all()
context = {'articles': articles, 'visits_num': visits_num,}
return render(request, 'articles/index.html', context)
@login_required
def create(request):
if request.method == 'POST':
# form 인스턴스를 생성하고 요청에 의한 데이터를 인자로 받는다. (binding)
# 이 처리과정은 binding 이라고 불리며 유효성 체크를 할 수 있도록 해준다.
form = ArticleForm(request.POST)
# form 이 유효한지 체크한다.
if form.is_valid():
article = form.save(commit=False)
article.user = request.user
article.save()
# hashtag 의 시작점
for word in article.content.split(): # content 를 공백 기준으로 리스트로 변경
if word.startswith('#'): # '#' 으로 시작하는 요소만 선택
hashtag, created = Hashtag.objects.get_or_create(content=word) # word랑 같은 해시태그를 찾는데 있으면 기존 객체(.get), 없으면 새로운 객체를 생성(.create)
article.hashtags.add(hashtag) # created 를 사용하지 않았다면, hashtag[0] 로 작성
return redirect(article)
else:
form = ArticleForm()
# 상황에 따라 context 에 넘어가는 2가지 form
# 1. GET : 기본 form
# 2. POST : 검증에 실패 후의 form(is_valid == False)
context = {'form': form,}
return render(request, 'articles/form.html', context)
def detail(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
comments = article.comment_set.all() # article 의 모든 댓글
person = get_object_or_404(get_user_model(), pk=article.user_id)
comment_form = CommentForm() # 댓글 폼
context = {'article': article, 'comment_form': comment_form, 'comments': comments, 'person': person,}
return render(request, 'articles/detail.html', context)
@require_POST
def delete(request, article_pk):
if request.user.is_authenticated:
article = get_object_or_404(Article, pk=article_pk)
if request.user == article.user:
article.delete()
else:
return redirect(article)
return redirect('articles:index')
@login_required
def update(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
if request.user == article.user:
if request.method == 'POST':
form = ArticleForm(request.POST, instance=article)
if form.is_valid():
article = form.save()
# hashtag
article.hashtags.clear() # 해당 article 의 hashtag 전체 삭제
for word in article.content.split():
if word.startswith('#'):
hashtag, created = Hashtag.objects.get_or_create(content=word)
article.hashtags.add(hashtag)
return redirect(article)
else:
form = ArticleForm(instance=article)
else:
return redirect('articles:index')
context = {'form': form, 'article': article,}
return render(request, 'articles/form.html', context)
@require_POST
def comments_create(request, article_pk):
if request.user.is_authenticated:
comment_form = CommentForm(request.POST)
if comment_form.is_valid():
# 객체를 Create 하지만, db 에 레코드는 작성하지 않는다.
comment = comment_form.save(commit=False)
comment.article_id = article_pk
comment.user = request.user
comment.save()
return redirect('articles:detail', article_pk)
@require_POST
def comments_delete(request, article_pk, comment_pk):
if request.user.is_authenticated:
comment = get_object_or_404(Comment, pk=comment_pk)
if request.user == comment.user:
comment.delete()
return redirect('articles:detail', article_pk)
return HttpResponse('You are Unauthorized', status=401)
@login_required
def like(request, article_pk):
if request.is_ajax():
article = get_object_or_404(Article, pk=article_pk)
if article.like_users.filter(pk=request.user.pk).exists():
article.like_users.remove(request.user)
liked = False
else:
article.like_users.add(request.user)
liked = True
context = {'liked': liked, 'count': article.like_users.count()}
return JsonResponse(context)
else:
return HttpResponseBadRequest()
# 해당 게시글에 좋아요를 누른 사람들 중에서 현재 접속유저가 있다면 좋아요를 취소
# if request.user in article.like_users.all():
# article.like_users.remove(request.user) # 좋아요 취소
# else:
# article.like_users.add(request.user) # 좋아요
@login_required
def follow(request, article_pk, user_pk):
# 게시글 유저
person = get_object_or_404(get_user_model(), pk=user_pk)
# 접속 유저
user = request.user
if person != user:
# 내(request.user)가 게시글 유저 팔로워 목록에 이미 존재 한다면,
if person.followers.filter(pk=user.pk).exists():
person.followers.remove(user)
else:
person.followers.add(user)
return redirect('articles:detail', article_pk)
def hashtag(request, hash_pk):
hashtag = get_object_or_404(Hashtag, pk=hash_pk)
articles = hashtag.article_set.order_by('-pk')
context = {'hashtag': hashtag, 'articles': articles,}
return render(request, 'articles/hashtag.html', context)
|
[
"[email protected]"
] | |
d7269d784f0dd8367a8b8ad07ab5661409480603
|
51d0377511a5da902033fb9d80184db0e096fe2c
|
/21-deep-learning-in-python/1-basics-of-deep-learning-and-neural-networks/01-coding-the-forward-propagation-algorithm.py
|
52eed540367f55989509b534772aa4bdd0b4485f
|
[] |
no_license
|
sashakrasnov/datacamp
|
c28c6bda178163337baed646220b2f7dcc36047d
|
759f4cec297883907e21118f24a3449d84c80761
|
refs/heads/master
| 2021-12-07T02:54:51.190672 | 2021-09-17T21:05:29 | 2021-09-17T21:05:29 | 157,093,632 | 6 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,066 |
py
|
'''
Coding the forward propagation algorithm
In this exercise, you'll write code to do forward propagation (prediction) for your first neural network:
https://s3.amazonaws.com/assets.datacamp.com/production/course_3524/datasets/1_4.png
Each data point is a customer. The first input is how many accounts they have, and the second input is how many children they have. The model will predict how many transactions the user makes in the next year. You will use this data throughout the first 2 chapters of this course.
The input data has been pre-loaded as input_data, and the weights are available in a dictionary called weights. The array of weights for the first node in the hidden layer are in weights['node_0'], and the array of weights for the second node in the hidden layer are in weights['node_1'].
The weights feeding into the output node are available in weights['output'].
NumPy will be pre-imported for you as np in all exercises.
'''
import numpy as np
input_data = np.array([3, 5])
weights = {
'node_0': np.array([ 2, 4]),
'node_1': np.array([ 4, -5]),
'output': np.array([ 2, 7])
}
'''
INSTRUCTIONS
* Calculate the value in node 0 by multiplying input_data by its weights weights['node_0'] and computing their sum. This is the 1st node in the hidden layer.
* Calculate the value in node 1 using input_data and weights['node_1']. This is the 2nd node in the hidden layer.
* Put the hidden layer values into an array. This has been done for you.
* Generate the prediction by multiplying hidden_layer_outputs by weights['output'] and computing their sum.
* Hit 'Submit Answer' to print the output!
'''
# Calculate node 0 value: node_0_value
node_0_value = (input_data * weights['node_0']).sum()
# Calculate node 1 value: node_1_value
node_1_value = (input_data * weights['node_1']).sum()
# Put node values into array: hidden_layer_outputs
hidden_layer_outputs = np.array([node_0_value, node_1_value])
# Calculate output: output
output = (hidden_layer_outputs * weights['output']).sum()
# Print output
print(output)
|
[
"[email protected]"
] | |
5b8c798cf4696fcbcf01bc1c454ccfac58142539
|
3f7240da3dc81205a0a3bf3428ee4e7ae74fb3a2
|
/src/Week2/NestedForLoops/printMysteryStarShape.py
|
e984963bec5498ff2b46607c922afbc823618747
|
[] |
no_license
|
theguyoverthere/CMU15-112-Spring17
|
b4ab8e29c31410b4c68d7b2c696a76b9d85ab4d8
|
b8287092b14e82d2a3aeac6c27bffbc95382eb34
|
refs/heads/master
| 2021-04-27T08:52:45.237631 | 2018-10-02T15:38:18 | 2018-10-02T15:38:18 | 107,882,442 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 188 |
py
|
def printMysteryStarShape(n):
for row in range(n):
print(row, end=" ")
for col in range(row):
print("*", end=" ")
print()
printMysteryStarShape(5)
|
[
"[email protected]"
] | |
39cbb5a9beaf9637dc7fb56113ad2c3884e152c0
|
93c43f774b11d3f44a3c4b83ab94be3600d606df
|
/Deep-Learning/R-CNN/utils.py
|
40a7d2ba1b2752903b37b5576f40183ecfa2485c
|
[
"Apache-2.0"
] |
permissive
|
sadbb/CVCode
|
299afeafb5a09d030b42ec00c6cd9c8087bc718f
|
c7c8b527af786d8f113122231e6296987b242b59
|
refs/heads/master
| 2020-04-07T04:24:04.040378 | 2018-11-17T14:13:23 | 2018-11-17T14:13:23 | 158,053,757 | 1 | 0 |
Apache-2.0
| 2018-11-18T05:41:43 | 2018-11-18T05:41:42 | null |
UTF-8
|
Python
| false | false | 1,408 |
py
|
# -*- coding:utf-8 -*-
import numpy as np
def get_IoU(ground_truth, region):
# xmin, ymin, xmax, ymax
x1 = max(ground_truth[0], region[0])
y1 = max(ground_truth[1], region[1])
x2 = min(ground_truth[2], region[2])
y2 = min(ground_truth[3], region[3])
if x2 - x1 < 0:
return 0
inter_area = (x2 - x1 + 1) * (y2 - y1 + 1)
outer_area = (region[2] - region[0]) * (region[3] - region[1]) \
+ (ground_truth[2] - ground_truth[0]) * (ground_truth[3] - ground_truth[1]) - inter_area
iou = inter_area / outer_area
return iou
def NMS(nms_sum):
regions = []
nms_sum = nms_sum[nms_sum[:,6]!=20]
for i in range(len(nms_sum)):
i_xmin, i_ymin, i_width, i_height, i_image_region, i_score, i_label = nms_sum[i]
flag = False
for j in range(len(nms_sum)):
if i == j:
continue
j_xmin, j_ymin, j_width, j_height, j_image_region, j_score, j_label = nms_sum[j]
iou = get_IoU([i_xmin, i_xmin+i_width, i_ymin, i_ymin+i_height],
[j_xmin, j_xmin+j_width, j_ymin, j_ymin+j_height])
if iou > 0.5 and i_score > j_score:
flag = True
elif i_score < j_score:
break
if flag == True:
regions.append([[i_xmin, i_ymin, i_width, i_height], i_label])
return np.asarray(regions)
|
[
"[email protected]"
] | |
b881534e43a240f73a78798cdc8a12f464a714f5
|
0ddfc02a2cc459e6ccd5322571b430af3b86001b
|
/book_management/book_management/asgi.py
|
bec0c1dc1ddae3d2adafba607aa143eb00c27844
|
[] |
no_license
|
XMLPro/2020_groupdev
|
87e5d6d6f9d91bff79f56c29bf24e02b6322fac8
|
3f01e3e7e74ed76950d2526d74de566158bf3971
|
refs/heads/master
| 2023-07-18T16:17:23.892887 | 2020-06-09T12:50:07 | 2020-06-09T12:50:07 | 267,032,912 | 1 | 1 | null | 2021-09-22T19:11:47 | 2020-05-26T12:10:05 |
Python
|
UTF-8
|
Python
| false | false | 407 |
py
|
"""
ASGI config for book_management project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'book_management.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
661012d59719e3422c970f2479d9719ecb5d6489
|
40132307c631dccbf7aa341eb308f69389715c73
|
/OLD/idmt/maya/MTD/getInfoByName.py
|
b34d1be3aea426c52a8e50c35ea69f19dc77fa97
|
[] |
no_license
|
Bn-com/myProj_octv
|
be77613cebc450b1fd6487a6d7bac991e3388d3f
|
c11f715996a435396c28ffb4c20f11f8e3c1a681
|
refs/heads/master
| 2023-03-25T08:58:58.609869 | 2021-03-23T11:17:13 | 2021-03-23T11:17:13 | 348,676,742 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 744 |
py
|
__author__ = 'xuweijian'
import maya.cmds as mc
class getInfoByName():
def getInfoByScene(self):
dict={}
fileName = mc.file(q=1, sn=1,shn=1)
SceneName=fileName
info=SceneName.split('_')
SCinfo=[info[1][1:3],info[1][4:7]]
dict['project']=info[0]
dict['scene']=SCinfo[0]
dict['camera']=SCinfo[1]
dict['part']=info[2]
return dict
def getInfoByAsset(self):
dict={}
fileName = mc.file(q=1, sn=1,shn=1)
SceneName=fileName
info=SceneName.split('_')
SCinfo=[info[1][1:2],info[1][4:6]]
dict['project']=info[0]
dict['type']=info[1]
dict['name']=info[2]
dict['part']=info[3]
return dict
|
[
"[email protected]"
] | |
d83be9292f9ea22d23ead848f56214334727719b
|
69cefee12d0bd4c374c5a0f0ebb25c6c92e58475
|
/src/core/geom/prim/box.py
|
729b0676b2dcc540335185f15922f26bc295a481
|
[] |
no_license
|
PlumpMath/panda3dstudio
|
5821353ce5519733f0d75fc518dd97f2e6c1da6c
|
e736f17bcfb1c597fd4b217610bd5430642eea5a
|
refs/heads/master
| 2021-01-25T06:55:38.174353 | 2017-05-21T14:04:27 | 2017-05-21T14:04:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 22,850 |
py
|
from .base import *
def _get_mesh_density(segments):
poly_count = 2 * segments["x"] * segments["y"]
poly_count += 2 * segments["x"] * segments["z"]
poly_count += 2 * segments["y"] * segments["z"]
return poly_count
def _define_geom_data(segments, temp=False):
geom_data = []
# store PosObjs referring to positions along the box edges, so they can be
# shared by adjacent sides; this in turn will ensure that the corresponding
# Vertex objects will be merged
edge_positions = {}
def get_side_data(i):
d = {}
for sign in (-1, 1):
d[sign] = {
"normal": tuple(map(lambda x: sign * 1. if x == i else 0., range(3))),
"vert_data": {}
}
return "xyz"[i - 2] + "xyz"[i - 1], d
sides = dict(map(get_side_data, range(3)))
offsets = {"x": -.5, "y": -.5, "z": 0.}
# Define vertex data
for plane in sides:
axis1, axis2 = plane
axis3 = "xyz".replace(axis1, "").replace(axis2, "")
coords = {"x": 0., "y": 0., "z": 0.}
segs1 = segments[axis1]
segs2 = segments[axis2]
segs3 = segments[axis3]
i1 = "xyz".index(axis1)
i2 = "xyz".index(axis2)
range1 = xrange(segs1 + 1)
range2 = xrange(segs2 + 1)
side_pair = sides[plane]
for direction in side_pair:
vert_id = 0
side = side_pair[direction]
vert_data = side["vert_data"]
normal = side["normal"]
coords[axis3] = (0. if direction == -1 else 1.) + offsets[axis3]
offset1 = offsets[axis1]
offset2 = offsets[axis2]
for i in range2:
b = (1. / segs2) * i
coords[axis2] = b + offset2
for j in range1:
a = (1. / segs1) * j
coords[axis1] = a + offset1
pos = tuple(coords[axis] for axis in "xyz")
if i in (0, segs2) or j in (0, segs1):
k = 0 if direction == -1 else segs3
key_components = {axis1: j, axis2: i, axis3: k}
key = tuple(key_components[axis] for axis in "xyz")
if key in edge_positions:
pos_obj = edge_positions[key]
else:
pos_obj = PosObj(pos)
edge_positions[key] = pos_obj
else:
pos_obj = PosObj(pos)
if temp:
vert_data[vert_id] = {"pos": pos_obj, "normal": normal}
else:
u = (-b if plane == "zx" else a) * direction
u += (1. if (direction > 0 if plane == "zx" else direction < 0) else 0.)
v = a if plane == "zx" else b
vert_data[vert_id] = {"pos": pos_obj, "normal": normal, "uvs": {0: (u, v)}}
vert_id += 1
if not temp:
smoothing_id = 0
# Define faces
for plane in sides:
axis1, axis2 = plane
segs1 = segments[axis1]
segs2 = segments[axis2]
side_pair = sides[plane]
for direction in side_pair:
side = side_pair[direction]
vert_data = side["vert_data"]
for i in xrange(segs2):
for j in xrange(segs1):
vi1 = i * (segs1 + 1) + j
vi2 = vi1 + 1
vi3 = vi2 + segs1
vi4 = vi3 + 1
vert_ids = (vi1, vi2, vi4) if direction == 1 else (vi1, vi4, vi2)
tri_data1 = [vert_data[vi] for vi in vert_ids]
vert_ids = (vi1, vi4, vi3) if direction == 1 else (vi1, vi3, vi4)
tri_data2 = [vert_data[vi] for vi in vert_ids]
if temp:
poly_data = (tri_data1, tri_data2)
else:
tris = (tri_data1, tri_data2)
poly_data = {"tris": tris, "smoothing": [(smoothing_id, True)]}
geom_data.append(poly_data)
if not temp:
smoothing_id += 1
return geom_data
class TemporaryBox(TemporaryPrimitive):
def __init__(self, segments, color, pos):
TemporaryPrimitive.__init__(self, "box", color, pos)
self._size = {"x": 0., "y": 0., "z": 0.}
geom_data = _define_geom_data(segments, True)
self.create_geometry(geom_data)
self.get_origin().set_sz(.001)
def update_size(self, x=None, y=None, z=None):
origin = self.get_origin()
size = self._size
if x is not None:
sx = max(abs(x), .001)
sy = max(abs(y), .001)
origin.set_x((-sx if x < 0. else sx) * .5)
origin.set_y((-sy if y < 0. else sy) * .5)
if size["x"] != sx:
size["x"] = sx
origin.set_sx(sx)
if size["y"] != sy:
size["y"] = sy
origin.set_sy(sy)
if z is not None:
sz = max(abs(z), .001)
s = -sz if z < 0. else sz
if size["z"] != s:
size["z"] = s
origin.set_sz(sz)
origin.set_z(s if s < 0. else 0.)
def get_size(self):
return self._size
def is_valid(self):
return max(self._size.itervalues()) > .001
def finalize(self):
pos = self._pivot.get_pos()
pivot = self.get_pivot()
origin = self.get_origin()
x, y, z = origin.get_pos()
pos = self.world.get_relative_point(pivot, Point3(x, y, 0.))
pivot.set_pos(self.world, pos)
origin.set_x(0.)
origin.set_y(0.)
return TemporaryPrimitive.finalize(self)
class Box(Primitive):
def __init__(self, model):
prop_ids = ["size_%s" % axis for axis in "xyz"]
prop_ids.append("segments")
Primitive.__init__(self, "box", model, prop_ids)
self._segments = {"x": 1, "y": 1, "z": 1}
self._segments_backup = {"x": 1, "y": 1, "z": 1}
self._size = {"x": 0., "y": 0., "z": 0.}
def define_geom_data(self):
return _define_geom_data(self._segments)
def create(self, segments):
self._segments = segments
for step in Primitive.create(self, _get_mesh_density(segments)):
yield
self.update_initial_coords()
def set_segments(self, segments):
if self._segments == segments:
return False
self._segments_backup = self._segments
self._segments = segments
return True
def __update_size(self):
size = self._size
sx = size["x"]
sy = size["y"]
sz = size["z"]
origin = self.get_origin()
origin.set_scale(sx, sy, abs(sz))
origin.set_z(sz if sz < 0. else 0.)
self.reset_initial_coords()
self.get_geom_data_object().bake_transform()
def init_size(self, x, y, z):
origin = self.get_origin()
size = self._size
size["x"] = max(abs(x), .001)
size["y"] = max(abs(y), .001)
size["z"] = max(abs(z), .001) * (-1. if z < 0. else 1.)
self.__update_size()
def set_dimension(self, axis, value):
if self._size[axis] == value:
return False
self._size[axis] = value
return True
def get_side_data(self):
side_data = {}
side_ids = ("left", "right", "back", "front", "bottom", "top")
side_axes = ("x", "x", "y", "y", "z", "z")
side_vecs = (Vec3.left(), Vec3.right(), Vec3.back(), Vec3.forward(), Vec3.down(), Vec3.up())
size = self._size.copy()
height = size["z"]
size["z"] = abs(height)
segs = self._segments
center = Point3() + Vec3.up() * height * .5
for side_id, side_axis, side_vec in zip(side_ids, side_axes, side_vecs):
pos = center + side_vec * size[side_axis] * .5
x, y = [size[axis] for axis in "xyz".replace(side_axis, "")]
segs_x, segs_y = [segs[axis] for axis in "xyz".replace(side_axis, "")]
side_segs = {"x": segs_x, "y": segs_y}
side_data[side_id] = {"pos": pos, "size": (x, y), "segs": side_segs}
return side_data
def get_data_to_store(self, event_type, prop_id=""):
if event_type == "prop_change" and prop_id in self.get_type_property_ids():
data = {}
data[prop_id] = {"main": self.get_property(prop_id)}
if prop_id == "segments":
data.update(self.get_geom_data_backup().get_data_to_store("deletion"))
data.update(self.get_geom_data_object().get_data_to_store("creation"))
self.remove_geom_data_backup()
elif "size" in prop_id:
data.update(self.get_geom_data_object().get_property_to_store("subobj_transform",
"prop_change", "all"))
return data
return Primitive.get_data_to_store(self, event_type, prop_id)
def cancel_geometry_recreation(self, info):
Primitive.cancel_geometry_recreation(self, info)
if info == "creation":
self._segments = self._segments_backup
Mgr.update_remotely("selected_obj_prop", "box", "segments", self._segments)
def set_property(self, prop_id, value, restore=""):
def update_app():
Mgr.update_remotely("selected_obj_prop", "box", prop_id,
self.get_property(prop_id, True))
obj_id = self.get_toplevel_object().get_id()
if prop_id == "segments":
if restore:
segments = value["count"]
self.restore_initial_coords(value["pos_data"])
else:
segments = self._segments.copy()
segments.update(value)
change = self.set_segments(segments)
if change:
if not restore:
self.recreate_geometry(_get_mesh_density(segments))
update_app()
return change
elif "size" in prop_id:
axis = prop_id.split("_")[1]
change = self.set_dimension(axis, value)
if change:
task = self.__update_size
sort = PendingTasks.get_sort("set_normals", "object") - 1
PendingTasks.add(task, "upd_size", "object", sort, id_prefix=obj_id)
self.get_model().update_group_bbox()
update_app()
return change
else:
return Primitive.set_property(self, prop_id, value, restore)
def get_property(self, prop_id, for_remote_update=False):
if prop_id == "segments":
if for_remote_update:
return self._segments
else:
return {"count": self._segments, "pos_data": self.get_initial_coords()}
elif "size" in prop_id:
axis = prop_id.split("_")[1]
return self._size[axis]
else:
return Primitive.get_property(self, prop_id, for_remote_update)
def __center_origin(self, adjust_pivot=True):
model = self.get_model()
origin = self.get_origin()
x, y, z = origin.get_pos()
pivot = model.get_pivot()
if adjust_pivot:
pos = self.world.get_relative_point(pivot, Point3(x, y, 0.))
pivot.set_pos(self.world, pos)
origin.set_x(0.)
origin.set_y(0.)
def finalize(self):
self.__center_origin()
self.__update_size()
Primitive.finalize(self)
class BoxManager(PrimitiveManager):
def __init__(self):
PrimitiveManager.__init__(self, "box", custom_creation=True)
self._height_axis = V3D(0., 0., 1.)
self._draw_plane = None
self._draw_plane_normal = V3D()
self._dragged_point = Point3()
self._tmp_box_origin = None
self._created_planes = []
for axis in "xyz":
self.set_property_default("size_%s" % axis, 1.)
self.set_property_default("temp_segments", {"x": 1, "y": 1, "z": 1})
self.set_property_default("segments", {"x": 1, "y": 1, "z": 1})
Mgr.add_app_updater("box_to_planes", self.__convert_boxes_to_planes)
def setup(self):
creation_phases = []
creation_phase = (self.__start_creation_phase1, self.__creation_phase1)
creation_phases.append(creation_phase)
creation_phase = (self.__start_creation_phase2, self.__creation_phase2)
creation_phases.append(creation_phase)
status_text = {}
status_text["obj_type"] = "box"
status_text["phase1"] = "draw out the base"
status_text["phase2"] = "draw out the height"
return PrimitiveManager.setup(self, creation_phases, status_text)
def create_temp_primitive(self, color, pos):
segs = self.get_property_defaults()["segments"]
tmp_segs = self.get_property_defaults()["temp_segments"]
segments = dict((axis, min(segs[axis], tmp_segs[axis])) for axis in "xyz")
tmp_prim = TemporaryBox(segments, color, pos)
return tmp_prim
def create_primitive(self, model):
prim = Box(model)
segments = self.get_property_defaults()["segments"]
poly_count = _get_mesh_density(segments)
progress_steps = (poly_count // 20) * 4
gradual = progress_steps > 80
for step in prim.create(segments):
if gradual:
yield
yield prim, gradual
def init_primitive_size(self, prim, size=None):
if size is None:
prop_defaults = self.get_property_defaults()
x, y, z = [prop_defaults["size_%s" % axis] for axis in "xyz"]
else:
x, y, z = [size[axis] for axis in "xyz"]
prim.init_size(x, y, z)
def __start_creation_phase1(self):
""" Start drawing out box base """
tmp_prim = self.get_temp_primitive()
origin = tmp_prim.get_origin()
self._height_axis = self.world.get_relative_vector(origin, V3D(0., 0., 1.))
def __creation_phase1(self):
""" Draw out box base """
screen_pos = self.mouse_watcher.get_mouse()
point = Mgr.get(("grid", "point_at_screen_pos"), screen_pos)
if not point:
return
grid_origin = Mgr.get(("grid", "origin"))
self._dragged_point = self.world.get_relative_point(grid_origin, point)
tmp_prim = self.get_temp_primitive()
pivot = tmp_prim.get_pivot()
x, y, z = pivot.get_relative_point(grid_origin, point)
tmp_prim.update_size(x, y)
def __start_creation_phase2(self):
""" Start drawing out box height """
cam = self.cam()
cam_forward_vec = self.world.get_relative_vector(cam, Vec3.forward())
normal = V3D(cam_forward_vec - cam_forward_vec.project(self._height_axis))
# If the plane normal is the null vector, the axis must be parallel to
# the forward camera direction. In this case, a new normal can be chosen
# arbitrarily, e.g. a horizontal vector perpendicular to the axis.
if normal.length_squared() < .0001:
x, y, z = self._height_axis
# if the height axis is nearly vertical, any horizontal vector will
# qualify as plane normal, e.g. a vector pointing in the the positive
# X-direction; otherwise, the plane normal can be computed as
# perpendicular to the axis
normal = V3D(1., 0., 0.) if max(abs(x), abs(y)) < .0001 else V3D(y, -x, 0.)
self._draw_plane = Plane(normal, self._dragged_point)
if self.cam.lens_type == "persp":
cam_pos = cam.get_pos(self.world)
if normal * V3D(self._draw_plane.project(cam_pos) - cam_pos) < .0001:
normal *= -1.
self._draw_plane_normal = normal
def __creation_phase2(self):
""" Draw out box height """
if not self.mouse_watcher.has_mouse():
return
screen_pos = self.mouse_watcher.get_mouse()
cam = self.cam()
lens_type = self.cam.lens_type
near_point = Point3()
far_point = Point3()
self.cam.lens.extrude(screen_pos, near_point, far_point)
rel_pt = lambda point: self.world.get_relative_point(cam, point)
near_point = rel_pt(near_point)
far_point = rel_pt(far_point)
if lens_type == "persp":
# the height cannot be calculated if the cursor points away from the plane
# in which it is drawn out
if V3D(far_point - near_point) * self._draw_plane_normal < .0001:
return
point = Point3()
if not self._draw_plane.intersects_line(point, near_point, far_point):
return
tmp_prim = self.get_temp_primitive()
pivot = tmp_prim.get_pivot()
z = pivot.get_relative_point(self.world, point)[2]
tmp_prim.update_size(z=z)
def create_custom_primitive(self, name, x, y, z, segments, pos, inverted=False,
rel_to_grid=False, gradual=False):
model_id = self.generate_object_id()
model = Mgr.do("create_model", model_id, name, pos)
if not rel_to_grid:
pivot = model.get_pivot()
pivot.clear_transform()
pivot.set_pos(self.world, pos)
next_color = self.get_next_object_color()
model.set_color(next_color, update_app=False)
prim = Box(model)
for step in prim.create(segments):
if gradual:
yield
prim.init_size(x, y, z)
prim.get_geom_data_object().finalize_geometry()
model.set_geom_object(prim)
self.set_next_object_color()
if inverted:
prim.set_property("normal_flip", True)
yield model
def __boxes_to_planes_conversion(self):
selection = Mgr.get("selection", "top")
objs = selection[:]
obj_names = GlobalData["obj_names"]
box_names = []
poly_count = 0
for obj in objs:
geom_data_obj = obj.get_geom_object().get_geom_data_object()
poly_count += len(geom_data_obj.get_subobjects("poly"))
progress_steps = (poly_count // 20) * 4
gradual = progress_steps > 80
if gradual:
Mgr.show_screenshot()
GlobalData["progress_steps"] = progress_steps
planes = self._created_planes
side_hprs = {"left": VBase3(0., 90., -90.), "right": VBase3(0., 90., 90.),
"back": VBase3(0., 90., 0.), "front": VBase3(180., 90., 0.),
"bottom": VBase3(180., 180., 0.), "top": VBase3(0., 0., 0.)}
for obj in objs:
group = obj.get_group()
parent = obj.get_parent()
material = obj.get_material()
box_name = obj.get_name()
box_names.append(box_name)
box_origin = NodePath(obj.get_origin().node().make_copy())
box_origin.set_transform(obj.get_origin().get_net_transform())
self._tmp_box_origin = box_origin
box = obj.get_geom_object()
side_data = box.get_side_data()
for side_id, data in side_data.iteritems():
name = box_name + " " + side_id
name = get_unique_name(name, obj_names)
obj_names.append(name)
pos = data["pos"]
x, y = data["size"]
segments = data["segs"]
inverted = box.has_flipped_normals()
creator = Mgr.do("create_custom_plane", name, x, y, segments, pos,
inverted, gradual=gradual)
for plane in creator:
if gradual:
yield True
plane.register(restore=False)
planes.append(plane)
plane_pivot = plane.get_pivot()
plane_pivot.set_hpr(side_hprs[side_id])
plane_pivot.reparent_to(box_origin)
plane_pivot.wrt_reparent_to(Mgr.get("object_root"))
if group:
plane.set_group(group.get_id())
elif parent:
plane.set_parent(parent.get_id())
if material:
plane.set_material(material)
box_origin.remove_node()
Mgr.exit_state("processing")
self._tmp_box_origin = None
Mgr.do("update_history_time")
obj_data = {}
for obj in objs:
obj_data[obj.get_id()] = obj.get_data_to_store("deletion")
obj.destroy(add_to_hist=False)
for plane in planes:
hist_data = plane.get_data_to_store("creation")
hist_data["selection_state"] = {"main": True}
obj_data[plane.get_id()] = hist_data
selection.add(planes, add_to_hist=False, update=False)
self._created_planes = []
if len(objs) == 1:
event_descr = 'Make planes from box "%s"' % box_names[0]
else:
event_descr = 'Make planes from boxes:\n'
event_descr += "".join(['\n "%s"' % name for name in box_names])
event_data = {"objects": obj_data}
event_data["object_ids"] = set(Mgr.get("object_ids"))
Mgr.do("add_history", event_descr, event_data, update_time_id=False)
yield False
def __cancel_conversion_process(self, info):
if info == "convert_boxes_to_planes":
for obj in self._created_planes:
obj.destroy(unregister=False, add_to_hist=False)
self._created_planes = []
if self._tmp_box_origin:
self._tmp_box_origin.remove_node()
self._tmp_box_origin = None
def __convert_boxes_to_planes(self):
Mgr.do("create_registry_backups")
Mgr.do("create_id_range_backups")
process = self.__boxes_to_planes_conversion()
if process.next():
handler = self.__cancel_conversion_process
Mgr.add_notification_handler("long_process_cancelled", "box_mgr", handler, once=True)
task = lambda: Mgr.remove_notification_handler("long_process_cancelled", "box_mgr")
task_id = "remove_notification_handler"
PendingTasks.add(task, task_id, "object", id_prefix="box_mgr", sort=100)
descr = "Converting..."
Mgr.do_gradually(process, "convert_boxes_to_planes", descr, cancellable=True)
MainObjects.add_class(BoxManager)
|
[
"[email protected]"
] | |
1df2a7f99628c9b74c20ae77a1c0534f60c03ee2
|
46f2834ae92da9e17463def0c635f75bf05886a1
|
/abc/abc138/D/main.py
|
9b51ace7c6d8605a5b3a4db56c898dc568cb7d84
|
[] |
no_license
|
replu/atcoder
|
bf3da10c937c955ca1bc3fa33b8f24c74d2d6c50
|
a6183d03355058bccc2b89db5e07b7f72598fea3
|
refs/heads/master
| 2023-03-30T15:03:47.879783 | 2021-03-28T17:08:19 | 2021-03-28T17:08:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 979 |
py
|
#!/usr/bin/env python3
import sys
def solve(N: int, Q: int, a: "List[int]", b: "List[int]", p: "List[int]", x: "List[int]"):
return
# Generated by 1.1.7.1 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
Q = int(next(tokens)) # type: int
a = [int()] * (N - 1) # type: "List[int]"
b = [int()] * (N - 1) # type: "List[int]"
for i in range(N - 1):
a[i] = int(next(tokens))
b[i] = int(next(tokens))
p = [int()] * (Q) # type: "List[int]"
x = [int()] * (Q) # type: "List[int]"
for i in range(Q):
p[i] = int(next(tokens))
x[i] = int(next(tokens))
solve(N, Q, a, b, p, x)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
acd05ffa3bcc918a2abbf4312a2812f07553c039
|
649bd422025e421d86025743eac324c9b882a2e8
|
/exam/1_three-dimensional_atomic_system/dump/phasetrans/temp209_3500.py
|
5fa9737008648c72e1023832853fbdab46d1d6c0
|
[] |
no_license
|
scheuclu/atom_class
|
36ddee1f6a5995872e858add151c5942c109847c
|
0c9a8c63d9b38898c1869fe8983126cef17662cd
|
refs/heads/master
| 2021-01-21T10:52:28.448221 | 2017-03-07T23:04:41 | 2017-03-07T23:04:41 | 83,489,471 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 68,815 |
py
|
ITEM: TIMESTEP
3500
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
-1.7138210531037575e+02 2.1858210531042008e+02
-1.7138210531037575e+02 2.1858210531042008e+02
-1.7138210531037575e+02 2.1858210531042008e+02
ITEM: ATOMS id type xs ys zs
37 1 0.526386 0.0125299 0.0148382
1848 1 0.61859 0.041269 0.0431283
911 1 0.456585 0.0651106 0.0177887
563 1 0.724494 0.061223 0.0130083
1405 1 0.299137 0.0750985 0.01415
665 1 0.77906 0.498803 0.112953
187 1 0.703163 0.166028 0.0209972
300 1 0.300908 0.491011 0.496003
709 1 0.252767 0.418633 0.477177
1109 1 0.861993 0.296465 0.00476949
778 1 0.574025 0.254773 0.0124845
1449 1 0.159708 0.30292 0.0121509
1312 1 0.295728 0.253473 0.0269219
421 1 0.193101 0.0237521 0.155931
284 1 0.775171 0.313088 0.00590561
1569 1 0.859539 0.31749 0.0214392
691 1 0.114557 0.322269 0.00432635
1323 1 0.259933 0.327337 0.0404397
564 1 0.343064 0.356586 0.036614
1994 1 0.821942 0.114193 0.484088
1366 1 0.278551 0.379666 0.487161
800 1 0.564391 0.425216 0.0348902
1179 1 0.625416 0.457722 0.0385562
1738 1 0.607836 0.427575 0.00559167
1150 1 0.0757371 0.488466 0.0266354
1734 1 0.627131 0.163478 0.0120844
399 1 0.964535 0.422961 0.480119
1049 1 0.236222 0.472889 0.00748955
188 1 0.342177 0.0143592 0.0206597
1806 1 0.240182 0.0328638 0.0459125
1886 1 0.00076344 0.054908 0.0353548
1501 1 0.0322404 0.0627317 0.0168144
1889 1 0.26022 0.140749 0.00913824
817 1 0.826063 0.13226 0.0134929
776 1 0.211001 0.147697 0.0112749
429 1 0.356179 0.146828 0.012872
1206 1 0.737288 0.204296 0.0414402
1664 1 0.0405527 0.262179 0.0234951
575 1 0.554853 0.310621 0.0216895
1064 1 0.00101327 0.495672 0.286583
1947 1 0.00537831 0.00357852 0.392817
1053 1 0.699016 0.35797 0.0320762
1214 1 0.25502 0.367229 0.0165674
1972 1 0.688914 0.416941 0.0568715
673 1 0.303891 0.494971 0.057364
73 1 0.681108 0.396356 0.00181737
1760 1 0.179844 0.464742 0.0575061
554 1 0.1713 0.484147 0.00719237
1022 1 0.688546 0.473262 0.0251384
660 1 0.00812171 0.0809588 0.498008
867 1 0.906503 0.0115078 0.0126497
1803 1 0.671217 0.0218457 0.0400771
1055 1 0.818082 0.0160713 0.0798477
1890 1 0.979502 0.0234739 0.0812527
1822 1 0.228481 0.0767032 0.0484762
77 1 0.927317 0.108717 0.0276405
863 1 0.148027 0.171151 0.0915671
1897 1 0.682613 0.157988 0.083264
1191 1 0.932939 0.119596 0.0475241
445 1 0.00305702 0.124527 0.0416175
1640 1 0.432035 0.158439 0.0377118
1238 1 0.562066 0.19046 0.0741604
341 1 0.0172115 0.216007 0.0343712
793 1 0.570113 0.194919 0.0681847
62 1 0.0664184 0.30017 0.0261862
50 1 0.293232 0.305763 0.021898
957 1 0.752698 0.317331 0.0411497
1245 1 0.635311 0.406241 0.00480324
1523 1 0.761121 0.416055 0.0651185
1752 1 0.516495 0.430969 0.0590714
699 1 0.513082 0.468619 0.0789045
1082 1 0.16382 0.464943 0.0697651
1917 1 0.196303 0.48661 0.0531354
849 1 0.147426 0.00546576 0.338486
1271 1 0.657914 0.0208386 0.0637206
968 1 0.268071 0.140428 0.0399058
1749 1 0.699359 0.168991 0.0546152
68 1 0.959605 0.150601 0.0764109
1891 1 0.228484 0.135655 0.0517556
908 1 0.386056 0.128684 0.0809426
1328 1 0.517247 0.190889 0.0341375
443 1 0.595421 0.20289 0.082162
525 1 0.476414 0.200846 0.0879292
1453 1 0.845629 0.224506 0.0416786
1925 1 0.697109 0.220633 0.0653882
295 1 0.347854 0.236751 0.0676578
1638 1 0.994776 0.258192 0.068115
1244 1 0.485256 0.302527 0.0681909
1673 1 0.494695 0.312999 0.106356
1333 1 0.737544 0.334951 0.0934652
1762 1 0.647584 0.332066 0.0561844
1167 1 0.265719 0.36821 0.0735272
645 1 0.323153 0.393402 0.066371
171 1 0.770247 0.388638 0.101891
1717 1 0.305499 0.407197 0.100384
2045 1 0.342025 0.409383 0.0596619
1283 1 0.00573049 0.411414 0.124295
383 1 0.396425 0.43761 0.0721333
1125 1 0.280903 0.46898 0.0763099
1338 1 0.284977 0.469332 0.0409206
1308 1 0.568523 0.0546802 0.0456526
1255 1 0.293287 0.0669006 0.0497038
774 1 0.862222 0.0473179 0.0758791
1782 1 0.471894 0.119784 0.0958749
612 1 0.195504 0.147433 0.122454
1856 1 0.783752 0.165394 0.106242
1036 1 0.0882586 0.262301 0.0776204
292 1 0.540743 0.281276 0.0767614
1817 1 0.136146 0.313395 0.0671727
1983 1 0.257858 0.338567 0.0715756
1459 1 0.664226 0.385716 0.0780798
1893 1 0.898292 0.403078 0.0480143
1424 1 0.118379 0.452123 0.0736376
365 1 0.49982 0.462859 0.0584243
323 1 0.896069 0.496399 0.0985225
1705 1 0.529347 0.0478694 0.0943626
1028 1 0.861888 0.0759532 0.10245
959 1 0.324863 0.124745 0.103
568 1 0.880965 0.115014 0.0961953
1992 1 0.861677 0.201895 0.114722
390 1 0.896432 0.196646 0.0856564
1525 1 0.962358 0.299911 0.114398
72 1 0.186895 0.240234 0.0642211
797 1 0.831865 0.313706 0.0700918
1375 1 0.498914 0.41569 0.0861595
1973 1 0.888494 0.48695 0.0798269
2037 1 0.410989 0.011356 0.132183
220 1 0.121744 0.0475725 0.127499
460 1 0.581276 0.0374232 0.141503
1730 1 0.731379 0.062392 0.152632
1577 1 0.147536 0.129483 0.127239
990 1 0.954156 0.125603 0.128174
205 1 0.610248 0.167773 0.139487
814 1 0.973937 0.135311 0.0933918
1327 1 0.28859 0.21298 0.111594
2018 1 0.892511 0.260536 0.133504
1732 1 0.306721 0.231285 0.126127
500 1 0.305401 0.358253 0.118303
1066 1 0.36117 0.33769 0.129609
890 1 0.311077 0.400822 0.131719
1196 1 0.348011 0.37539 0.108964
543 1 0.978683 0.411674 0.135488
1911 1 0.181464 0.453931 0.113896
1334 1 0.531887 0.249752 0.486031
1482 1 0.4178 0.0294063 0.137524
370 1 0.395729 0.0610513 0.176718
1062 1 0.580466 0.0532957 0.117438
2022 1 0.851257 0.0646931 0.152123
1713 1 0.870964 0.0522385 0.152946
380 1 0.422998 0.0909225 0.154101
577 1 0.666617 0.0955231 0.134018
1936 1 0.327557 0.137291 0.145614
11 1 0.0497036 0.163977 0.166872
1940 1 0.240984 0.175812 0.138713
947 1 0.195205 0.175877 0.120903
482 1 0.211006 0.208337 0.123364
519 1 0.914113 0.190409 0.163616
1658 1 0.493036 0.234163 0.128656
1364 1 0.972714 0.21108 0.130214
1982 1 0.23161 0.222178 0.128891
1847 1 0.468772 0.229019 0.132528
1478 1 0.27788 0.248625 0.141643
914 1 0.391374 0.292657 0.159035
576 1 0.322269 0.304094 0.178058
766 1 0.403202 0.321749 0.161076
122 1 0.80109 0.351773 0.1501
1083 1 0.0496852 0.35898 0.111009
1502 1 0.103641 0.352201 0.143498
1468 1 0.134783 0.36658 0.165901
720 1 0.278606 0.469523 0.121028
1839 1 0.0734557 0.449061 0.481359
170 1 0.369739 0.0628003 0.136385
61 1 0.322938 0.250746 0.489329
1145 1 0.522079 0.489672 0.388267
1357 1 0.207506 0.0766266 0.158006
895 1 0.346222 0.0794373 0.160418
1807 1 0.455977 0.144321 0.176083
1635 1 0.26869 0.177579 0.172864
796 1 0.714469 0.170705 0.150405
1695 1 0.323077 0.211825 0.170468
1689 1 0.612377 0.228788 0.181619
74 1 0.732852 0.266446 0.135713
1132 1 0.75771 0.269354 0.155161
1788 1 0.602396 0.398363 0.129078
917 1 0.826855 0.354052 0.182911
1235 1 0.0724385 0.42217 0.176742
381 1 0.173174 0.416 0.143719
1309 1 0.963695 0.41926 0.127632
579 1 0.237682 0.447632 0.174536
756 1 0.381899 0.475136 0.177123
807 1 0.448513 0.496344 0.15464
1091 1 0.495809 0.499548 0.135722
1234 1 0.536502 0.0425028 0.183764
114 1 0.544063 0.00210225 0.175938
574 1 0.895881 0.100262 0.195455
1306 1 0.87614 0.0757815 0.179191
1246 1 0.72698 0.1087 0.168874
1443 1 0.509985 0.137987 0.18127
936 1 0.715619 0.160861 0.14319
63 1 0.347952 0.216149 0.143976
1257 1 0.199631 0.334691 0.175889
1102 1 0.747716 0.278514 0.159343
444 1 0.716968 0.288348 0.183975
1185 1 0.0355913 0.402037 0.188345
521 1 0.782571 0.420657 0.147235
1487 1 0.292055 0.433553 0.167152
827 1 0.510191 0.463286 0.185755
992 1 0.801621 0.0429025 0.192382
1474 1 0.753049 0.00255897 0.162881
571 1 0.907753 0.0314589 0.1769
137 1 0.708306 0.0735381 0.192423
836 1 0.795696 0.0489548 0.199531
1773 1 0.916331 0.0971929 0.175235
474 1 0.336349 0.157247 0.172912
643 1 0.017014 0.233577 0.191367
889 1 0.766873 0.22813 0.196842
1791 1 0.167123 0.29501 0.208608
1068 1 0.485523 0.283309 0.216943
662 1 0.210336 0.330807 0.186606
447 1 0.637507 0.350973 0.184325
1320 1 0.287225 0.38295 0.17654
1097 1 0.713272 0.376753 0.203387
722 1 0.0699323 0.394135 0.192486
1812 1 0.544743 0.418837 0.15863
167 1 0.995568 0.409694 0.203279
1582 1 0.387072 0.428626 0.207034
1483 1 0.989988 0.407501 0.18923
933 1 0.346197 0.428893 0.199067
1881 1 0.129839 0.499712 0.184052
1037 1 0.694566 0.00730142 0.23876
956 1 0.952605 0.0784854 0.203791
59 1 0.997634 0.128267 0.199759
1411 1 0.981628 0.1335 0.223561
2040 1 0.472895 0.195937 0.165712
266 1 0.798495 0.264212 0.190424
1237 1 0.773022 0.238722 0.213116
38 1 0.746425 0.265648 0.223355
1232 1 0.125516 0.303045 0.205699
976 1 0.60631 0.350185 0.221899
1184 1 0.870101 0.350651 0.243544
1039 1 0.300543 0.472194 0.22696
1541 1 0.162785 0.0124261 0.225408
1690 1 0.24862 0.0649911 0.227976
1829 1 0.430083 0.0401797 0.245903
1837 1 0.277253 0.0452507 0.231491
1901 1 0.572978 0.0959414 0.232463
1878 1 0.0406789 0.11108 0.241935
1883 1 0.732522 0.131278 0.206379
1513 1 0.934541 0.177243 0.231264
1950 1 0.0920938 0.173459 0.261037
1838 1 0.439372 0.187308 0.265252
449 1 0.672146 0.250621 0.255178
1898 1 0.853183 0.214695 0.24234
67 1 0.956431 0.31295 0.21185
1928 1 0.318273 0.344818 0.228582
1625 1 0.390493 0.34038 0.25748
1186 1 0.470831 0.361521 0.22428
1545 1 0.156508 0.390618 0.250137
1968 1 0.16291 0.429452 0.235085
583 1 0.202882 0.494225 0.0441237
636 1 0.801744 0.493872 0.352841
750 1 0.34685 0.017589 0.256159
1857 1 0.588054 0.0662173 0.24833
1225 1 0.588346 0.0670963 0.230901
620 1 0.402487 0.154412 0.21269
96 1 0.980458 0.172387 0.252251
1604 1 0.677058 0.172882 0.250709
1827 1 0.00120102 0.153778 0.306164
897 1 0.577634 0.190541 0.219717
1135 1 0.395389 0.296058 0.243679
2001 1 0.113882 0.350063 0.25296
1639 1 0.332017 0.395567 0.264663
324 1 0.313272 0.406236 0.277321
121 1 0.989369 0.441384 0.263525
404 1 0.550087 0.495121 0.233245
1084 1 0.734321 0.486526 0.261528
1339 1 0.639915 0.325764 0.0141741
281 1 0.212419 0.0177028 0.296645
846 1 0.0595212 0.0587068 0.235817
1914 1 0.0615928 0.135566 0.261034
1464 1 0.173763 0.113668 0.247642
628 1 0.618748 0.145577 0.275881
834 1 0.274429 0.144854 0.262562
1230 1 0.746763 0.169139 0.262818
837 1 0.0371554 0.17141 0.269788
308 1 0.160167 0.188847 0.243821
1725 1 0.381842 0.288427 0.272425
1578 1 0.410631 0.281574 0.278014
1341 1 0.1761 0.271552 0.280738
240 1 0.195889 0.320546 0.26636
1669 1 0.460383 0.299625 0.2625
1918 1 0.74317 0.304704 0.277361
765 1 0.715643 0.397812 0.252794
1016 1 0.850838 0.418028 0.245769
725 1 0.941162 0.403287 0.268022
742 1 0.62791 0.429496 0.249343
1874 1 0.508174 0.481862 0.26094
1876 1 0.865504 0.488771 0.289832
1954 1 0.181742 0.00335248 0.271384
1969 1 0.418558 0.470052 0.273084
942 1 0.679567 0.0390118 0.297706
1035 1 0.121258 0.0825247 0.278112
176 1 0.26196 0.0651249 0.26291
820 1 0.277434 0.102091 0.269192
1547 1 0.253182 0.0661933 0.27287
595 1 0.390035 0.0820978 0.266018
536 1 0.707688 0.101658 0.297815
719 1 0.0229175 0.178372 0.281167
379 1 0.0641136 0.216475 0.25871
392 1 0.712793 0.15172 0.288317
1263 1 0.613844 0.227358 0.242348
684 1 0.657009 0.205087 0.313252
1816 1 0.57367 0.225197 0.261075
1330 1 0.227527 0.365776 0.289332
366 1 0.726593 0.362787 0.261177
1144 1 0.857468 0.372202 0.260348
1544 1 0.722138 0.375373 0.275969
1430 1 0.822019 0.407785 0.261982
1787 1 0.503619 0.453757 0.294863
491 1 0.0815504 0.486074 0.300359
566 1 0.621695 0.458263 0.255787
1779 1 0.221506 0.15675 0.282132
385 1 0.334063 0.156632 0.305622
1932 1 0.831524 0.171677 0.292458
769 1 0.386357 0.213803 0.278391
1463 1 0.57336 0.237015 0.324263
1748 1 0.749522 0.246275 0.322115
1472 1 0.366574 0.316464 0.302037
1978 1 0.0172933 0.358608 0.319495
406 1 0.631654 0.337036 0.309995
1701 1 0.984988 0.346052 0.272794
1500 1 0.638651 0.343106 0.308193
255 1 0.71411 0.358695 0.327031
1261 1 0.788479 0.364294 0.28718
1054 1 0.642752 0.372831 0.299742
2013 1 0.0533359 0.415352 0.301142
1213 1 0.642739 0.465444 0.290011
333 1 0.593636 0.474313 0.304215
844 1 0.625068 0.021862 0.30053
713 1 0.542668 0.0621124 0.347574
1224 1 0.151476 0.185535 0.362139
1014 1 0.939953 0.197357 0.341425
1915 1 0.948617 0.198803 0.289916
570 1 0.578791 0.210401 0.331731
1413 1 0.413335 0.252084 0.313975
896 1 0.832589 0.259057 0.353133
539 1 0.870489 0.377418 0.36721
232 1 0.202403 0.399407 0.314738
733 1 0.191269 0.458202 0.347473
1528 1 0.53231 0.495505 0.0199044
600 1 0.918957 0.0210117 0.375237
53 1 0.63417 0.0269231 0.314102
944 1 0.895997 0.0591897 0.350163
1197 1 0.185551 0.142303 0.375745
780 1 0.155465 0.127174 0.342458
506 1 0.431884 0.145336 0.360957
1254 1 0.458149 0.189687 0.305766
1902 1 0.662376 0.169289 0.35238
1085 1 0.303961 0.216121 0.364095
199 1 0.138968 0.219819 0.360203
1579 1 0.608277 0.239253 0.399098
1351 1 0.446966 0.273359 0.345422
98 1 0.430232 0.275705 0.338933
1172 1 0.484478 0.28317 0.34696
1611 1 0.751114 0.320801 0.342187
998 1 0.587178 0.322093 0.304438
811 1 0.236287 0.360282 0.351527
931 1 0.468652 0.40738 0.355031
1621 1 0.0270991 0.436493 0.362561
1231 1 0.11215 0.421344 0.344171
1961 1 0.431246 0.395946 0.353284
132 1 0.560882 0.441651 0.353621
1402 1 0.953589 0.404075 0.343939
761 1 0.612909 0.486231 0.351658
1006 1 0.894426 0.470814 0.444248
1706 1 0.518327 0.0261288 0.354816
451 1 0.741957 0.0489234 0.336896
2011 1 0.296731 0.133794 0.35762
997 1 0.0564654 0.178412 0.358607
1260 1 0.130848 0.198965 0.368487
1802 1 0.852678 0.246435 0.389644
943 1 0.111299 0.27733 0.349936
1697 1 0.78979 0.370884 0.334098
546 1 0.579986 0.364249 0.354827
486 1 0.700838 0.368164 0.36033
915 1 0.366811 0.404084 0.318414
1407 1 0.410202 0.463241 0.373439
1957 1 0.468607 0.446576 0.348685
484 1 0.716154 0.463556 0.361691
1511 1 0.91264 0.44004 0.334055
229 1 0.946192 0.00644479 0.367241
1798 1 0.28597 0.0674861 0.34379
706 1 0.0253413 0.0824442 0.369219
1841 1 0.335184 0.101612 0.350942
1659 1 0.743115 0.138043 0.356749
1148 1 0.44114 0.115568 0.40163
573 1 0.143982 0.220204 0.375424
1352 1 0.847929 0.192657 0.399399
2020 1 0.542054 0.230875 0.364207
18 1 0.216436 0.294757 0.374029
772 1 0.626141 0.347718 0.343593
711 1 0.642221 0.319319 0.3658
1624 1 0.265964 0.327623 0.402271
596 1 0.391119 0.339841 0.363032
340 1 0.918415 0.384107 0.348541
142 1 0.76742 0.403073 0.348621
46 1 0.73509 0.417214 0.42859
1964 1 0.391321 0.421281 0.361911
446 1 0.67206 0.42502 0.387269
110 1 0.840995 0.42752 0.377054
1126 1 0.199848 0.493906 0.113126
1979 1 0.985454 0.0337928 0.415788
1326 1 0.0584838 0.0450533 0.423291
489 1 0.935095 0.127473 0.403379
534 1 0.511793 0.129171 0.415347
9 1 0.630368 0.126322 0.390013
685 1 0.46683 0.180031 0.394176
1356 1 0.702487 0.218941 0.390176
297 1 0.393243 0.355148 0.405411
1200 1 0.804272 0.358511 0.400977
1875 1 0.0250826 0.408265 0.378116
1239 1 0.76935 0.42841 0.395831
1506 1 0.427035 0.391964 0.387687
892 1 0.114591 0.477089 0.386378
813 1 0.13156 0.474727 0.412473
1250 1 0.549323 0.0168639 0.493876
920 1 0.255321 0.0238823 0.393062
23 1 0.570509 0.011873 0.408862
191 1 0.557202 0.0817128 0.426411
1476 1 0.581132 0.0883823 0.430376
1358 1 0.623179 0.163355 0.420241
434 1 0.0320096 0.145589 0.401529
1041 1 0.119983 0.137126 0.416712
1509 1 0.636546 0.152613 0.398406
1001 1 0.440227 0.209425 0.434999
1586 1 0.497742 0.250169 0.434659
1163 1 0.57648 0.284175 0.422593
798 1 0.816648 0.271779 0.365507
375 1 0.271469 0.343671 0.429333
1378 1 0.264551 0.390454 0.422511
90 1 0.966345 0.395512 0.392158
904 1 0.569738 0.439602 0.424861
1009 1 0.62396 0.448156 0.443045
856 1 0.0909231 0.499201 0.40834
1546 1 0.637567 0.00368372 0.470962
1486 1 0.675803 0.0492677 0.410599
1866 1 0.997492 0.0387857 0.437056
201 1 0.364111 0.0860163 0.428872
1864 1 0.371658 0.0974585 0.423471
698 1 0.985774 0.1578 0.440987
900 1 0.741933 0.156704 0.420521
777 1 0.406107 0.146926 0.430245
1116 1 0.339722 0.212132 0.433026
593 1 0.475673 0.2283 0.463117
423 1 0.43451 0.273722 0.425894
609 1 0.609848 0.258704 0.425897
39 1 0.9896 0.281123 0.419554
1615 1 0.43309 0.307386 0.404697
1629 1 0.835095 0.301706 0.409256
1661 1 0.115849 0.358888 0.433967
1795 1 0.725579 0.35061 0.45074
1268 1 0.156952 0.423267 0.438241
2043 1 0.424435 0.408652 0.400028
981 1 0.61054 0.0579737 0.446094
1674 1 0.25914 0.0946861 0.455364
1159 1 0.653826 0.110364 0.434264
1832 1 0.621236 0.126152 0.488504
1110 1 0.628451 0.191185 0.429871
1733 1 0.270815 0.163844 0.454891
695 1 0.627647 0.150208 0.460377
676 1 0.539744 0.226406 0.466235
462 1 0.756531 0.240174 0.442464
388 1 0.293288 0.263069 0.439886
996 1 0.784 0.30978 0.486403
2033 1 0.621145 0.367388 0.46303
1211 1 0.79492 0.413144 0.430418
126 1 0.912779 0.37927 0.471307
1105 1 0.201093 0.420421 0.45437
465 1 0.204391 0.477661 0.447549
1771 1 0.657009 0.462298 0.458775
209 1 0.907869 0.460555 0.393258
311 1 0.00169978 0.0205474 0.230054
118 1 0.140337 0.0354995 0.487565
508 1 0.257665 0.0841476 0.48181
1606 1 0.367433 0.495081 0.0215278
149 1 0.852597 0.0995409 0.479357
1273 1 0.436386 0.127794 0.475623
125 1 0.988543 0.136614 0.452844
751 1 0.123272 0.133621 0.466242
1707 1 0.459829 0.181968 0.470341
1844 1 0.0229065 0.155007 0.492569
950 1 0.520117 0.14909 0.494856
1810 1 0.449844 0.20402 0.483241
1754 1 0.835335 0.191805 0.48061
2019 1 0.81476 0.322264 0.480119
1207 1 0.384924 0.33355 0.475302
25 1 0.146036 0.216999 0.496618
1078 1 0.82057 0.355683 0.455428
692 1 0.792979 0.400799 0.470099
217 1 0.351407 0.46757 0.467386
367 1 0.175419 0.45168 0.499901
1536 1 0.227379 0.49695 0.480599
384 1 0.949611 0.460067 0.463228
1416 1 0.894323 0.158149 0.489418
1763 1 0.425328 0.0168201 0.496882
606 1 0.307346 0.319709 0.491978
1157 1 0.0143212 0.0336522 0.4719
1650 1 0.948488 0.0383907 0.474442
955 1 0.711137 0.0921486 0.48393
903 1 0.918859 0.0861859 0.489071
1317 1 0.662427 0.482518 0.358269
984 1 0.836229 0.135859 0.476618
1877 1 0.615572 0.0072142 0.222345
1610 1 0.522706 0.0116992 0.0977406
1576 1 0.06048 0.483591 0.117668
1276 1 0.707841 0.246428 0.475152
1655 1 0.751349 0.234482 0.495826
1612 1 0.0761933 0.0316988 0.129802
1553 1 0.553494 0.0316144 0.395111
702 1 0.978143 0.202708 0.493769
1251 1 0.943796 0.483089 0.00293176
1262 1 0.744197 0.0864972 0.498373
507 1 0.774222 0.476984 0.00201014
1376 1 0.901124 0.470395 0.000434215
155 1 0.945546 0.428448 0.0019166
1910 1 0.918748 0.00508635 0.660223
1161 1 0.32937 0.0363879 0.767221
400 1 0.939811 0.0449434 0.539176
472 1 0.955921 0.0705955 0.518583
2036 1 0.00749938 0.344102 0.504185
771 1 0.201958 0.155271 0.507129
1515 1 0.525017 0.400043 0.994731
179 1 0.957896 0.157861 0.517591
677 1 0.0849944 0.175271 0.503416
578 1 0.596089 0.157135 0.50582
1824 1 0.964757 0.173698 0.519882
1198 1 0.579818 0.0121248 0.595119
1804 1 0.224826 0.00448764 0.576947
1065 1 0.425105 0.141999 0.524106
252 1 0.611648 0.26723 0.54327
103 1 0.520824 0.393958 0.517294
273 1 0.729191 0.395303 0.516364
256 1 0.390033 0.070384 0.99622
934 1 0.502671 0.449045 0.523582
1119 1 0.729289 0.494154 0.512634
2000 1 0.787272 0.435288 0.507605
322 1 0.435265 0.0766063 0.531388
622 1 0.323324 0.093848 0.508818
999 1 0.123292 0.338077 0.996655
1390 1 0.761379 0.116591 0.538261
2030 1 0.821122 0.176986 0.5491
1173 1 0.0725029 0.205885 0.530745
1951 1 0.94442 0.24901 0.532233
75 1 0.210208 0.285656 0.554308
1965 1 0.471292 0.239032 0.530977
1540 1 0.0896024 0.315526 0.536505
1757 1 0.525388 0.36919 0.545062
353 1 0.343047 0.431756 0.501709
1304 1 0.957228 0.387632 0.524597
667 1 0.639429 0.426585 0.524114
151 1 0.885408 0.432603 0.516243
1386 1 0.82655 0.464445 0.50751
728 1 0.640244 0.472531 0.558997
805 1 0.762249 0.463257 0.536328
734 1 0.87144 0.0401868 0.535601
1948 1 0.149873 0.0514 0.546164
1622 1 0.371702 0.150459 0.527248
195 1 0.936829 0.170906 0.519624
1519 1 0.293074 0.18996 0.559571
829 1 0.187203 0.188317 0.554845
717 1 0.119509 0.213477 0.53862
337 1 0.739498 0.251661 0.523816
705 1 0.972093 0.218553 0.548185
1038 1 0.0211311 0.267594 0.600565
1743 1 0.227732 0.273612 0.519011
32 1 0.68393 0.281542 0.553952
815 1 0.437864 0.348729 0.552291
1916 1 0.495051 0.341051 0.553071
919 1 0.556448 0.354711 0.561781
962 1 0.937948 0.370059 0.550261
430 1 0.960889 0.376286 0.529946
52 1 0.0971821 0.411958 0.557277
1537 1 0.892215 0.387746 0.527874
60 1 0.0279057 0.416001 0.550582
659 1 0.429646 0.472875 0.547508
1758 1 0.0421646 0.348969 0.97784
1095 1 0.774431 0.448495 0.533406
389 1 0.378046 0.026223 0.554446
1368 1 0.617926 0.176169 0.575322
748 1 0.064044 0.180648 0.582845
572 1 0.219933 0.202857 0.57367
1677 1 0.909852 0.184356 0.567606
1990 1 0.653392 0.237566 0.58024
1693 1 0.207747 0.288734 0.600002
1099 1 0.243105 0.295768 0.544842
226 1 0.170844 0.356708 0.560755
1882 1 0.929922 0.375479 0.564165
1275 1 0.0499757 0.415015 0.557494
294 1 0.918708 0.381826 0.559626
1160 1 0.776117 0.384908 0.55624
28 1 0.219851 0.453786 0.621641
560 1 0.133836 0.454494 0.541964
225 1 0.35335 0.00618314 0.552474
1724 1 0.648559 0.0621633 0.56811
1293 1 0.839065 0.0541106 0.587602
216 1 0.84933 0.079844 0.600574
2009 1 0.568165 0.138024 0.556378
1668 1 0.80223 0.137909 0.622141
638 1 0.184603 0.127389 0.571893
2006 1 0.338116 0.172074 0.592856
1776 1 0.390738 0.213948 0.562223
1021 1 0.476951 0.260447 0.553974
1849 1 0.918502 0.287098 0.568538
1694 1 0.769039 0.319595 0.589897
435 1 0.311994 0.346062 0.560818
1756 1 0.904672 0.325863 0.589873
953 1 0.126092 0.350222 0.566063
1205 1 0.479862 0.429364 0.634488
1593 1 0.625328 0.402982 0.619919
1253 1 0.28412 0.432136 0.571798
700 1 0.354752 0.425754 0.587887
1409 1 0.318499 0.435915 0.564344
1090 1 0.462709 0.457094 0.60787
275 1 0.604756 0.448589 0.55782
312 1 0.804757 0.459267 0.61295
1410 1 0.296812 0.334761 0.988374
1354 1 0.507915 0.00133262 0.60762
158 1 0.108016 0.0999978 0.594314
148 1 0.248263 0.0982143 0.596467
952 1 0.290649 0.0847743 0.573794
1434 1 0.263354 0.0993425 0.617522
993 1 0.856844 0.102112 0.603374
1607 1 0.75806 0.176212 0.599883
296 1 0.437723 0.167068 0.602201
1508 1 0.533225 0.205946 0.604623
966 1 0.655922 0.302833 0.633117
735 1 0.777672 0.331994 0.591135
1781 1 0.317153 0.432436 0.574311
1024 1 0.662158 0.449449 0.613063
84 1 0.896489 0.468616 0.645559
218 1 0.949116 0.135349 0.501932
792 1 0.984729 0.017715 0.631587
1737 1 0.727269 0.0330371 0.636363
1555 1 0.43978 0.137062 0.645414
1676 1 0.202335 0.15291 0.634331
1123 1 0.946714 0.10235 0.63144
1584 1 0.965899 0.145008 0.616525
334 1 0.463343 0.136218 0.652317
707 1 0.913379 0.199089 0.587598
918 1 0.800324 0.217762 0.605811
1146 1 0.825228 0.260502 0.621957
1414 1 0.533274 0.310983 0.655796
1279 1 0.83426 0.397097 0.627423
162 1 0.752499 0.457507 0.63952
922 1 0.0430949 0.0460103 0.621053
1929 1 0.475039 0.0780988 0.620232
1980 1 0.741053 0.140987 0.649771
134 1 0.156997 0.136487 0.621349
581 1 0.212377 0.129209 0.623524
1853 1 0.0815833 0.167905 0.655338
634 1 0.318021 0.164514 0.643863
639 1 0.0625782 0.231753 0.636408
1485 1 0.803989 0.23893 0.616792
335 1 0.185689 0.292448 0.650751
1521 1 0.183493 0.28799 0.662166
1905 1 0.422606 0.291433 0.643455
127 1 0.462784 0.377889 0.654357
1986 1 0.634285 0.352234 0.640282
1842 1 0.861665 0.37154 0.620646
648 1 0.529475 0.381999 0.626312
88 1 0.650515 0.393707 0.65436
1512 1 0.0319032 0.423314 0.641469
1372 1 0.0764377 0.44355 0.64257
185 1 0.355154 0.433963 0.630112
1070 1 0.713915 0.448109 0.595027
1131 1 0.812862 0.453781 0.643536
982 1 0.76514 0.00978978 0.643755
838 1 0.896122 0.0356241 0.646672
1845 1 0.138269 0.0799141 0.643928
960 1 0.286853 0.0536719 0.674932
1349 1 0.441937 0.0843816 0.641032
1229 1 0.924184 0.0584507 0.675499
885 1 0.541552 0.235348 0.611072
1846 1 0.044936 0.338864 0.663773
1442 1 0.500872 0.414735 0.675157
1360 1 0.102741 0.410255 0.665742
1630 1 0.670872 0.452554 0.662495
2021 1 0.94582 0.490205 0.651254
732 1 0.00729977 0.0234138 0.674983
1285 1 0.47823 0.151473 0.676081
1984 1 0.18379 0.153136 0.689798
1855 1 0.386201 0.198845 0.701088
1369 1 0.180022 0.210195 0.654286
1151 1 0.203528 0.210962 0.693207
1032 1 0.842189 0.267063 0.699583
57 1 0.275307 0.284783 0.726077
989 1 0.839232 0.27188 0.673943
921 1 0.972767 0.310115 0.702365
871 1 0.247256 0.397412 0.672924
590 1 0.818643 0.389506 0.693028
1815 1 0.00320304 0.436364 0.704364
1043 1 0.240006 0.426771 0.686878
770 1 0.732589 0.442521 0.680418
1046 1 0.156495 0.458329 0.638213
905 1 0.0876191 0.498858 0.962326
932 1 0.265766 0.201073 0.970924
1653 1 0.306105 0.0470118 0.513124
1249 1 0.441209 0.0851869 0.71006
120 1 0.224092 0.0856032 0.691128
1735 1 0.799248 0.128233 0.694899
15 1 0.291883 0.185712 0.683638
1048 1 0.23778 0.156264 0.692923
291 1 0.421178 0.152294 0.6978
1421 1 0.901835 0.188693 0.658262
1222 1 0.817502 0.270469 0.718293
1220 1 0.304165 0.262834 0.699174
1470 1 0.462745 0.38199 0.697172
1599 1 0.507951 0.358834 0.731968
347 1 0.641888 0.405368 0.701592
325 1 0.526657 0.408943 0.696275
850 1 0.0685711 0.421226 0.700366
916 1 0.914856 0.446422 0.687486
82 1 0.211704 0.27326 0.994123
401 1 0.150577 0.054674 0.71262
172 1 0.148158 0.0791889 0.695695
1348 1 0.901412 0.0895681 0.695958
153 1 0.0834221 0.0893387 0.699031
1687 1 0.0147179 0.131128 0.691135
1121 1 0.112809 0.179145 0.699222
1835 1 0.310046 0.151275 0.697945
782 1 0.298485 0.219233 0.692989
181 1 0.848003 0.24038 0.742073
80 1 0.532337 0.266344 0.709713
1355 1 0.551796 0.263555 0.730322
510 1 0.221254 0.311425 0.691229
927 1 0.543793 0.302301 0.711319
343 1 0.585933 0.354315 0.665691
145 1 0.694903 0.326878 0.718226
7 1 0.409937 0.35227 0.742063
2023 1 0.176272 0.362905 0.719142
727 1 0.774012 0.416958 0.691884
1575 1 0.950214 0.420155 0.709617
1137 1 0.946424 0.418187 0.758836
503 1 0.150193 0.0313848 0.685963
1665 1 0.11089 0.220786 0.501509
117 1 0.847933 0.0208804 0.706365
213 1 0.773111 0.0501034 0.757786
526 1 0.656743 0.0970737 0.757741
1909 1 0.528133 0.178941 0.749093
641 1 0.586235 0.157017 0.740419
906 1 0.934256 0.192061 0.748845
1919 1 0.39624 0.296182 0.696641
615 1 0.670152 0.314197 0.771106
354 1 0.797821 0.329474 0.771911
135 1 0.392527 0.352632 0.730368
1156 1 0.846928 0.39927 0.751007
607 1 0.878499 0.382807 0.706248
1415 1 0.956953 0.365524 0.753126
1522 1 0.367829 0.431744 0.686047
428 1 0.828803 0.456857 0.728079
730 1 0.387658 0.46593 0.740443
290 1 0.920966 0.135507 0.988009
1138 1 0.909106 0.393463 0.992556
611 1 0.79275 0.0511029 0.771758
1171 1 0.116261 0.0711265 0.74654
1117 1 0.232792 0.0660723 0.730329
588 1 0.908363 0.0942636 0.775689
845 1 0.179546 0.125878 0.746885
886 1 0.347671 0.101679 0.747171
870 1 0.520541 0.124239 0.737022
1454 1 0.415291 0.143574 0.726899
1180 1 0.0196369 0.222297 0.751921
938 1 0.592932 0.193953 0.784859
1850 1 0.667061 0.260943 0.745726
475 1 0.351992 0.314286 0.74546
454 1 0.683342 0.356126 0.714274
819 1 0.177196 0.371384 0.771115
433 1 0.42003 0.374384 0.74773
603 1 0.383873 0.404266 0.74652
27 1 0.959111 0.464493 0.755641
1281 1 0.0242337 0.459412 0.752684
1574 1 0.564976 0.221878 0.533089
1433 1 0.283152 0.0313929 0.779063
51 1 0.472862 0.0147888 0.754604
1025 1 0.497365 0.0483423 0.760349
1086 1 0.709638 0.0660692 0.774768
1457 1 0.733708 0.0721536 0.751776
1404 1 0.867881 0.06897 0.76844
115 1 0.181187 0.0666006 0.778347
490 1 0.528519 0.114109 0.775568
1030 1 0.368059 0.0921749 0.779058
602 1 0.525659 0.0993301 0.753706
1799 1 0.625093 0.0952069 0.741381
1716 1 0.522328 0.123777 0.76896
3 1 0.111814 0.120672 0.78306
785 1 0.336949 0.129854 0.759203
1299 1 0.858244 0.197993 0.786296
31 1 0.135536 0.150965 0.793152
183 1 0.455089 0.180621 0.78337
140 1 0.479025 0.251006 0.746453
528 1 0.948952 0.203224 0.743983
1193 1 0.415281 0.215043 0.755454
1551 1 0.151319 0.307604 0.755261
1526 1 0.0404171 0.369837 0.769556
1759 1 0.175001 0.360532 0.788571
1497 1 0.517832 0.377656 0.78546
2010 1 0.952821 0.36518 0.741736
2046 1 0.0152725 0.394502 0.735752
913 1 0.0988293 0.390344 0.766121
569 1 0.643117 0.403156 0.764841
251 1 0.190027 0.402763 0.757954
1770 1 0.640495 0.44464 0.74108
33 1 0.151648 0.462237 0.764409
234 1 0.678388 0.498543 0.721055
1278 1 0.68019 0.0367792 0.806554
601 1 0.625489 0.0309057 0.780056
1906 1 0.551703 0.101635 0.797682
584 1 0.0393462 0.159271 0.762927
6 1 0.918262 0.152816 0.811241
1471 1 0.604617 0.172461 0.801282
21 1 0.069003 0.26504 0.805286
356 1 0.687691 0.301253 0.808917
675 1 0.54525 0.333668 0.818992
866 1 0.740348 0.343797 0.802844
1726 1 0.966301 0.333186 0.788799
1926 1 0.532697 0.41836 0.762789
242 1 0.227321 0.452876 0.776081
1479 1 0.796121 0.375711 0.776628
1403 1 0.710509 0.488573 0.766506
2024 1 0.980685 0.413934 0.511796
1704 1 0.831991 0.0670343 0.799047
718 1 0.821351 0.0805388 0.812159
327 1 0.107044 0.136746 0.836931
1445 1 0.742827 0.156598 0.812125
1557 1 0.324258 0.381794 0.821729
1518 1 0.37074 0.394075 0.82144
1657 1 0.535701 0.392946 0.812701
876 1 0.430038 0.461197 0.781716
1149 1 0.239023 0.481276 0.803765
230 1 0.104006 0.0261446 0.83199
760 1 0.743666 0.127504 0.82294
204 1 0.74067 0.127795 0.805405
1162 1 0.0151137 0.148754 0.777113
161 1 0.424386 0.234185 0.835033
85 1 0.359005 0.2391 0.797709
1859 1 0.740782 0.262702 0.842556
1922 1 0.644538 0.342252 0.825525
1374 1 0.557517 0.371808 0.816205
901 1 0.918348 0.360351 0.813008
1597 1 0.620129 0.359358 0.790878
320 1 0.608365 0.434293 0.806444
1960 1 0.89461 0.44241 0.826314
65 1 0.969777 0.435764 0.972657
19 1 0.599407 0.492778 0.828019
552 1 0.395801 0.000841213 0.819945
79 1 0.641 0.00863928 0.835816
1953 1 0.584786 0.00236675 0.835696
1287 1 0.429047 0.0608424 0.846908
301 1 0.879015 0.0895092 0.823598
1019 1 0.0142392 0.0952976 0.821663
160 1 0.589802 0.135229 0.865227
99 1 0.460741 0.178191 0.842766
411 1 0.746597 0.221445 0.810444
763 1 0.446654 0.277255 0.812357
1243 1 0.725701 0.360952 0.806415
1556 1 0.183573 0.38189 0.845114
1458 1 0.922319 0.385236 0.859708
548 1 0.133688 0.473825 0.81843
644 1 0.335847 0.443101 0.856241
945 1 0.722616 0.0498634 0.861848
78 1 0.763286 0.0427735 0.854514
1570 1 0.0587468 0.0540587 0.855749
1143 1 0.171448 0.0473799 0.864008
1302 1 0.195065 0.0821859 0.859779
54 1 0.204815 0.12707 0.844984
2038 1 0.306612 0.235404 0.842214
492 1 0.610124 0.236679 0.883656
1315 1 0.215823 0.283981 0.858851
1008 1 0.310569 0.260241 0.858379
350 1 0.890941 0.299136 0.866625
1153 1 0.806686 0.325882 0.84816
995 1 0.974806 0.3773 0.850891
865 1 0.178939 0.389257 0.850602
1367 1 0.272464 0.407104 0.864293
617 1 0.262607 0.498164 0.84552
35 1 0.812888 0.0314233 0.887086
737 1 0.984795 0.0303818 0.868876
437 1 0.850749 0.0774389 0.88796
994 1 0.483537 0.0588694 0.898966
1531 1 0.567583 0.192926 0.896229
937 1 0.111856 0.183808 0.88409
1821 1 0.385086 0.222699 0.897643
477 1 0.179943 0.260018 0.861334
1380 1 0.954484 0.2444 0.860412
1935 1 0.242105 0.287809 0.873226
1588 1 0.606003 0.29922 0.887663
2028 1 0.077252 0.308503 0.882617
1387 1 0.194612 0.334103 0.909451
822 1 0.478079 0.357606 0.849366
1843 1 0.0420525 0.363252 0.909505
1034 1 0.708878 0.376938 0.898303
24 1 0.99987 0.369726 0.859439
1177 1 0.663388 0.413513 0.899975
1052 1 0.0941135 0.435725 0.894298
1335 1 0.0275021 0.0185941 0.909268
894 1 0.947103 0.033651 0.899807
208 1 0.826551 0.0246758 0.856315
394 1 0.885662 0.0520034 0.91521
715 1 0.201884 0.07524 0.915197
101 1 0.420976 0.0674124 0.906645
618 1 0.615197 0.0706513 0.904316
361 1 0.885193 0.068698 0.912283
1708 1 0.678781 0.101448 0.877202
627 1 0.880546 0.118392 0.896487
359 1 0.354209 0.14385 0.895708
93 1 0.298779 0.142208 0.88021
818 1 0.890407 0.189472 0.897801
387 1 0.118535 0.18578 0.917767
1340 1 0.701475 0.210053 0.879487
2027 1 0.795407 0.197529 0.935381
939 1 0.774089 0.205561 0.888263
1392 1 0.114716 0.242088 0.923007
1363 1 0.705741 0.265565 0.904071
1002 1 0.358769 0.304187 0.904485
1565 1 0.437634 0.315419 0.895482
1075 1 0.428523 0.300812 0.918477
531 1 0.581551 0.332353 0.897549
556 1 0.121014 0.380901 0.906508
239 1 0.0141776 0.382175 0.927314
245 1 0.255744 0.480239 0.886171
1934 1 0.468257 0.44705 0.857997
879 1 0.437599 0.0475278 0.92119
1240 1 0.434683 0.0473 0.910675
647 1 0.380947 0.0966149 0.986612
697 1 0.987012 0.10278 0.935813
357 1 0.376984 0.10231 0.920471
403 1 0.913504 0.140782 0.891774
930 1 0.300855 0.212571 0.905714
4 1 0.0103756 0.222689 0.898125
804 1 0.111435 0.256306 0.893886
10 1 0.170723 0.237325 0.913908
784 1 0.441299 0.247096 0.900581
1388 1 0.751208 0.260829 0.894875
1012 1 0.838875 0.326292 0.907133
317 1 0.0632871 0.349118 0.910693
656 1 0.712435 0.398715 0.934004
1089 1 0.742193 0.380741 0.926106
1337 1 0.558289 0.415082 0.917199
1188 1 0.932954 0.428284 0.918691
1792 1 0.0492324 0.00461892 0.965344
1122 1 0.690021 0.0378206 0.952684
1517 1 0.792205 0.0392588 0.89945
1942 1 0.413678 0.0232569 0.951999
1764 1 0.163801 0.0389413 0.914661
1265 1 0.322723 0.0426704 0.914414
1967 1 0.876165 0.0841487 0.943124
258 1 0.174172 0.122025 0.917448
1603 1 0.429143 0.127733 0.93828
480 1 0.565372 0.139216 0.925051
376 1 0.524574 0.225278 0.958665
1550 1 0.885533 0.245569 0.957196
1623 1 0.772957 0.254553 0.899588
1005 1 0.850634 0.262534 0.932374
1027 1 0.522879 0.265009 0.933203
420 1 0.973841 0.27174 0.934948
1563 1 0.587493 0.331687 0.919761
789 1 0.0923946 0.395596 0.926406
640 1 0.884486 0.386449 0.933392
1667 1 0.251186 0.436231 0.947014
1892 1 0.251891 0.435526 0.924371
1362 1 0.450739 0.461245 0.944099
1594 1 0.00295967 0.445562 0.950656
1044 1 0.045637 0.362074 0.978739
1465 1 0.877717 0.053223 0.973304
872 1 0.981419 0.0562375 0.911938
1183 1 0.0203231 0.109896 0.963604
129 1 0.557437 0.10881 0.9635
1609 1 0.733369 0.126391 0.916637
854 1 0.335801 0.114067 0.931278
1395 1 0.863012 0.106961 0.965927
1491 1 0.596166 0.186161 0.953088
965 1 0.205677 0.205864 0.948301
119 1 0.240437 0.2426 0.958775
415 1 0.624902 0.272059 0.981518
1236 1 0.019676 0.321925 0.970652
1439 1 0.0552616 0.359601 0.93684
1767 1 0.134867 0.349347 0.950336
1314 1 0.654987 0.299307 0.999422
1527 1 0.254599 0.338093 0.940351
124 1 0.513785 0.336614 0.940915
1879 1 0.671259 0.33159 0.97149
694 1 0.246827 0.369764 0.95454
857 1 0.466848 0.347741 0.931892
424 1 0.707363 0.452418 0.945413
26 1 0.21655 0.482542 0.956671
1212 1 0.955272 0.493862 0.929894
689 1 0.787768 0.411937 0.981659
505 1 0.0547709 0.0342811 0.973816
1227 1 0.9481 0.0582981 0.947768
1199 1 0.133679 0.0332901 0.976772
330 1 0.302436 0.0967016 0.981273
1215 1 0.474208 0.0669345 0.989919
1870 1 0.81267 0.125158 0.964288
882 1 0.225834 0.165303 0.981958
339 1 0.996723 0.211147 0.994796
875 1 0.192979 0.216375 0.958376
801 1 0.393325 0.210817 0.993982
788 1 0.86931 0.231163 0.91786
439 1 0.502715 0.250417 0.924047
272 1 0.561554 0.247538 0.966805
1289 1 0.361455 0.30372 0.959089
862 1 0.962549 0.0152923 0.757948
1013 1 0.732484 0.397656 0.979941
1755 1 0.225413 0.448595 0.924553
1685 1 0.314249 0.479616 0.951482
1811 1 0.00826005 0.417047 0.973124
1602 1 0.359317 0.485965 0.996233
461 1 0.870377 0.0495804 0.505387
71 1 0.0581167 0.00248328 0.804584
1303 1 0.0366853 0.440511 0.96518
663 1 0.775318 0.484485 0.988622
1274 1 0.38728 0.496932 0.639285
1460 1 0.155261 0.0288777 0.964608
1423 1 0.341146 0.490946 0.512564
1949 1 0.659554 0.0149774 0.960974
189 1 0.206843 0.0933366 0.957792
1680 1 0.676591 0.372334 0.992987
714 1 0.117442 0.12737 0.974762
637 1 0.244554 0.228812 0.985291
1813 1 0.58905 0.244404 0.993934
2025 1 0.882534 0.287189 0.973659
821 1 0.680753 0.517498 0.00120526
753 1 0.842881 0.537743 0.0144082
2044 1 0.684261 0.975878 0.163745
1627 1 0.537985 0.556319 0.0118863
1923 1 0.894074 0.511606 0.243995
1429 1 0.0299054 0.664333 0.00183642
1128 1 0.558902 0.694671 0.00457324
1530 1 0.62363 0.672518 0.02498
1666 1 0.816459 0.507239 0.257239
958 1 0.185928 0.757876 0.00375326
1165 1 0.273604 0.77749 0.0149634
561 1 0.525791 0.610401 0.00102261
1381 1 0.6774 0.989888 0.142626
899 1 0.40973 0.929268 0.470527
1168 1 0.294903 0.978009 0.339574
244 1 0.0191063 0.848056 0.0307937
762 1 0.54605 0.873021 0.0217874
1448 1 0.107692 0.989121 0.137403
987 1 0.0495052 0.987643 0.0177045
1739 1 0.729782 0.946424 0.036987
1259 1 0.853734 0.960158 0.0300567
203 1 0.769991 0.565997 0.481779
924 1 0.380656 0.663112 0.00567853
1336 1 0.399554 0.517223 0.00979462
1228 1 0.42128 0.523379 0.0112834
111 1 0.874434 0.54175 0.0232439
1389 1 0.468291 0.550758 0.0674829
303 1 0.453434 0.574125 0.0331667
1204 1 0.657548 0.684999 0.0502703
977 1 0.194476 0.643829 0.0335057
1642 1 0.632805 0.714569 0.0144866
109 1 0.240363 0.73141 0.0240041
1830 1 0.716342 0.729439 0.0592095
830 1 0.791152 0.734962 0.0288704
1361 1 0.347449 0.800174 0.0520667
2005 1 0.819288 0.784678 0.0329219
476 1 0.969495 0.822828 0.0354396
540 1 0.459262 0.911085 0.0329009
1431 1 0.748791 0.888836 0.0356217
1324 1 0.294041 0.979757 0.0294199
1209 1 0.963278 0.510082 0.360245
1784 1 0.460004 0.520881 0.0545382
56 1 0.792449 0.508846 0.0459839
530 1 0.725979 0.605419 0.0548721
1740 1 0.428662 0.624961 0.0426703
926 1 0.914071 0.661441 0.0447945
2002 1 0.384865 0.735546 0.0348404
1559 1 0.416272 0.704958 0.0477722
1114 1 0.238383 0.722449 0.0435056
12 1 0.411606 0.747634 0.0624181
1751 1 0.860162 0.723775 0.0754429
1452 1 0.236648 0.767483 0.0850856
840 1 0.826521 0.757443 0.0606801
299 1 0.852856 0.754276 0.0697647
666 1 0.427783 0.801712 0.0381926
1307 1 0.468697 0.82891 0.0428435
448 1 0.246072 0.811219 0.0581602
874 1 0.0229632 0.942919 0.0611484
485 1 0.405383 0.990021 0.040309
1567 1 0.774562 0.987215 0.0401047
795 1 0.348794 0.50754 0.396721
1481 1 0.0561245 0.638665 0.0901356
1663 1 0.322008 0.611707 0.0776844
261 1 0.369973 0.601574 0.0867138
726 1 0.435835 0.646686 0.049809
2039 1 0.354653 0.607598 0.0708579
1080 1 0.930332 0.630522 0.0639055
133 1 0.413423 0.76374 0.0535996
1761 1 0.254082 0.777311 0.0518273
1598 1 0.373313 0.798545 0.06931
1636 1 0.702097 0.78715 0.0625852
1684 1 0.41263 0.834369 0.0609607
1534 1 0.109989 0.886457 0.0600733
276 1 0.843544 0.906551 0.0566541
1872 1 0.368452 0.916687 0.0637389
1747 1 0.744246 0.969167 0.0667446
1252 1 0.994038 0.998428 0.0878692
1937 1 0.0377791 0.570773 0.0825919
360 1 0.957369 0.551004 0.0572848
1851 1 0.0722089 0.639818 0.0768009
1647 1 0.482854 0.632443 0.0937013
1744 1 0.745759 0.650315 0.0731081
182 1 0.737374 0.576309 0.0517696
1311 1 0.928929 0.647864 0.110951
2031 1 0.09965 0.664106 0.0671157
978 1 0.632667 0.663691 0.0928547
1966 1 0.761276 0.661417 0.102713
746 1 0.635298 0.740045 0.104402
495 1 0.818706 0.730711 0.0727873
783 1 0.35993 0.761825 0.0821454
1505 1 0.178562 0.759098 0.0779107
186 1 0.262684 0.781152 0.126997
268 1 0.443418 0.780487 0.0801271
1601 1 0.955085 0.774462 0.0620501
368 1 0.307515 0.825356 0.0969313
1595 1 0.36454 0.831614 0.0875688
910 1 0.978806 0.808593 0.104864
736 1 0.124974 0.848215 0.0905086
604 1 0.899607 0.836993 0.10498
909 1 0.550752 0.909111 0.0660057
1370 1 0.0722454 0.87965 0.0937172
1971 1 0.949883 0.883652 0.0970192
608 1 0.353686 0.907735 0.105367
2008 1 0.462478 0.959813 0.0933617
248 1 0.614323 0.942234 0.0815109
1427 1 0.28763 0.968908 0.100672
459 1 0.400021 0.512616 0.115032
635 1 0.0322211 0.99505 0.240634
1201 1 0.407014 0.507841 0.124187
1322 1 0.576184 0.606133 0.0784174
671 1 0.374174 0.626461 0.0866787
344 1 0.929748 0.598189 0.0715105
877 1 0.922958 0.626445 0.160518
964 1 0.377898 0.645283 0.0896205
247 1 0.381125 0.63625 0.0875174
1867 1 0.563347 0.664015 0.0768707
749 1 0.712867 0.679373 0.0725523
1662 1 0.427422 0.694779 0.0926467
712 1 0.113577 0.735748 0.0905594
47 1 0.0127973 0.766525 0.089707
1373 1 0.975143 0.842292 0.111373
1495 1 0.781355 0.83382 0.114389
594 1 0.630157 0.926969 0.123893
86 1 0.0184632 0.970316 0.104348
1520 1 0.394195 0.959066 0.489109
2032 1 0.599586 0.940028 0.111838
1683 1 0.64021 0.528713 0.0990295
1425 1 0.156409 0.520975 0.103791
22 1 0.968773 0.818097 0.488809
1094 1 0.663656 0.50777 0.106737
2003 1 0.947046 0.527524 0.130448
1814 1 0.933941 0.530233 0.0977982
422 1 0.986455 0.575661 0.127707
1223 1 0.167204 0.607581 0.0990181
210 1 0.732445 0.62659 0.120635
1313 1 0.441999 0.659668 0.127258
1974 1 0.545069 0.638286 0.147287
105 1 0.748383 0.675214 0.0881586
1178 1 0.569032 0.730222 0.156154
544 1 0.996293 0.673545 0.132002
412 1 0.598312 0.70665 0.132064
1617 1 0.0700659 0.710673 0.094649
386 1 0.416816 0.702706 0.114491
1218 1 0.226925 0.709148 0.0855505
328 1 0.576329 0.73848 0.134496
651 1 0.825419 0.75495 0.118912
586 1 0.116582 0.782238 0.0864716
731 1 0.122886 0.763991 0.117702
515 1 0.749466 0.789384 0.112473
1412 1 0.921117 0.808166 0.124005
1061 1 0.115489 0.844994 0.113989
1107 1 0.384863 0.854378 0.145366
358 1 0.875596 0.856844 0.138343
1023 1 0.947344 0.82681 0.0962826
1993 1 0.530858 0.895219 0.0846486
1997 1 0.196035 0.943968 0.156936
1417 1 0.454385 0.977204 0.106015
672 1 0.791855 0.973283 0.121032
1477 1 0.182846 0.990179 0.134811
630 1 0.208723 0.610743 0.136896
826 1 0.569447 0.608289 0.123355
2016 1 0.838835 0.602928 0.119905
969 1 0.926222 0.611326 0.125118
1359 1 0.296677 0.64408 0.14081
1652 1 0.321845 0.656809 0.133732
1115 1 0.491993 0.647284 0.139808
407 1 0.489014 0.612206 0.149475
538 1 0.749148 0.657038 0.159508
168 1 0.682811 0.801037 0.119442
1092 1 0.18753 0.80956 0.133692
803 1 0.023649 0.924929 0.182622
1258 1 0.434959 0.568184 0.45787
1295 1 0.645101 0.998014 0.110961
1715 1 0.673793 0.517353 0.169393
1783 1 0.276224 0.526348 0.169752
843 1 0.359273 0.521141 0.145787
348 1 0.570737 0.542588 0.189252
787 1 0.506307 0.544915 0.148572
1000 1 0.185938 0.592679 0.168857
207 1 0.564043 0.648995 0.130501
655 1 0.865933 0.661417 0.17068
729 1 0.837678 0.671323 0.153661
1202 1 0.828902 0.692558 0.12499
1017 1 0.457668 0.756074 0.162171
215 1 0.845027 0.808631 0.187969
565 1 0.756455 0.810688 0.167486
1379 1 0.295336 0.861812 0.161999
1583 1 0.238758 0.888938 0.13981
249 1 0.146272 0.908346 0.149304
1619 1 0.0513885 0.911959 0.176876
1419 1 0.367301 0.994086 0.143696
1216 1 0.0169892 0.965811 0.169923
1345 1 0.0869536 0.893414 0.02917
509 1 0.330166 0.523447 0.177351
1226 1 0.117271 0.57136 0.142757
306 1 0.562465 0.532015 0.147443
949 1 0.573278 0.596546 0.164611
547 1 0.990188 0.618572 0.155863
567 1 0.435294 0.643552 0.194714
1050 1 0.729253 0.654849 0.140964
1865 1 0.839936 0.681945 0.136017
1140 1 0.371745 0.650643 0.143944
1297 1 0.176041 0.680464 0.170531
372 1 0.868338 0.702193 0.183727
1004 1 0.58351 0.751215 0.185611
1498 1 0.662357 0.723986 0.180105
973 1 0.0205791 0.749674 0.190441
779 1 0.609688 0.730963 0.188034
107 1 0.469241 0.761072 0.179941
1709 1 0.62952 0.826131 0.185285
471 1 0.972536 0.831186 0.162466
1651 1 0.119669 0.896613 0.169384
432 1 0.574723 0.979791 0.16935
313 1 0.0106157 0.581321 0.195348
1895 1 0.963262 0.53352 0.216801
887 1 0.405955 0.641715 0.186756
591 1 0.35753 0.720147 0.163666
708 1 0.421366 0.745816 0.194721
1241 1 0.446084 0.737207 0.208501
1104 1 0.225557 0.800644 0.167545
1903 1 0.98 0.755911 0.153884
1298 1 0.640809 0.832669 0.188299
1045 1 0.592039 0.846237 0.15346
1451 1 0.144314 0.878614 0.198552
745 1 0.205712 0.896833 0.203219
1634 1 0.086805 0.876734 0.167515
1343 1 0.158133 0.879087 0.205657
1111 1 0.691253 0.983598 0.197856
458 1 0.752305 0.564281 0.223073
2041 1 0.986033 0.533272 0.244342
1058 1 0.304005 0.562781 0.188991
1192 1 0.872779 0.629772 0.243649
338 1 0.655747 0.729236 0.184439
2012 1 0.118683 0.722361 0.201189
1042 1 0.0634333 0.772709 0.201435
681 1 0.895857 0.830061 0.186474
1399 1 0.683966 0.813892 0.20655
858 1 0.987662 0.861032 0.224748
710 1 0.353925 0.862273 0.225409
1210 1 0.619102 0.850004 0.220781
2034 1 0.204848 0.838134 0.244418
55 1 0.364568 0.865185 0.215148
1981 1 0.241478 0.886321 0.196935
280 1 0.131314 0.904705 0.168901
373 1 0.32442 0.942165 0.229959
1011 1 0.185579 0.560188 0.221937
402 1 0.622745 0.608245 0.210154
1133 1 0.0365476 0.599385 0.221779
391 1 0.359061 0.660063 0.211196
92 1 0.20917 0.709194 0.232443
169 1 0.684696 0.670326 0.220082
1970 1 0.26357 0.669258 0.225559
1277 1 0.916799 0.674972 0.204735
682 1 0.998227 0.648742 0.212486
869 1 0.016288 0.750946 0.24304
282 1 0.163588 0.730841 0.236652
511 1 0.642282 0.751694 0.207916
1786 1 0.58571 0.782324 0.213193
1618 1 0.664019 0.754806 0.261494
263 1 0.753502 0.763472 0.234599
398 1 0.953102 0.831516 0.221162
2026 1 0.223778 0.865934 0.231892
1722 1 0.109365 0.892511 0.225706
1920 1 0.107583 0.882912 0.234771
632 1 0.767241 0.876232 0.195608
1382 1 0.0811333 0.943299 0.21958
498 1 0.669774 0.909822 0.225119
123 1 0.827972 0.904402 0.243885
473 1 0.310694 0.980538 0.236299
1384 1 0.635084 0.990439 0.193478
271 1 0.52038 0.960886 0.488389
988 1 0.629322 0.997919 0.215618
923 1 0.791742 0.911203 0.024412
626 1 0.78169 0.529754 0.247316
136 1 0.846552 0.533724 0.262983
259 1 0.953362 0.624492 0.235848
76 1 0.989108 0.598875 0.261422
1660 1 0.194311 0.640492 0.223875
1904 1 0.964986 0.686678 0.231449
283 1 0.151105 0.722682 0.280446
274 1 0.338842 0.815861 0.256198
190 1 0.205429 0.777525 0.274578
1549 1 0.182898 0.813431 0.213962
1539 1 0.396149 0.838648 0.22274
1952 1 0.059458 0.792824 0.259531
629 1 0.104697 0.85317 0.251132
661 1 0.591326 0.874825 0.264985
157 1 0.159978 0.827455 0.227302
657 1 0.659246 0.86762 0.263546
1112 1 0.336569 0.898196 0.229061
314 1 0.896435 0.840817 0.246983
1101 1 0.916412 0.848917 0.229259
1166 1 0.217486 0.954658 0.248021
624 1 0.545071 0.972053 0.261397
382 1 0.152641 0.542622 0.309558
861 1 0.706102 0.562944 0.289663
1675 1 0.771894 0.542505 0.25128
972 1 0.861637 0.683618 0.263871
1087 1 0.362747 0.767644 0.24944
2 1 0.647269 0.850067 0.258782
929 1 0.614301 0.915764 0.242806
1590 1 0.698462 0.905912 0.272229
542 1 0.879569 0.987137 0.248739
1564 1 0.258775 0.969887 0.295732
967 1 0.344197 0.962504 0.246582
1987 1 0.967428 0.531673 0.287639
1710 1 0.847958 0.835248 0.496119
740 1 0.390901 0.543841 0.315019
1702 1 0.585637 0.547362 0.289873
1316 1 0.901576 0.541323 0.281757
1496 1 0.343599 0.628079 0.264915
1753 1 0.234455 0.671154 0.336964
2004 1 0.429647 0.690882 0.31018
1819 1 0.906786 0.683365 0.260062
58 1 0.358613 0.675857 0.293767
1571 1 0.97933 0.700128 0.281584
1195 1 0.368205 0.761534 0.271115
1862 1 0.948051 0.734685 0.28398
619 1 0.00905473 0.733225 0.299005
1248 1 0.0887495 0.793361 0.290425
1745 1 0.328177 0.782015 0.247657
1632 1 0.354796 0.802452 0.276511
483 1 0.420467 0.78918 0.295465
1408 1 0.672429 0.824124 0.289324
318 1 0.115714 0.866282 0.286721
184 1 0.779997 0.822319 0.309356
1863 1 0.972541 0.842526 0.286498
580 1 0.642531 0.911039 0.322844
212 1 0.258354 0.888821 0.312765
1264 1 0.255442 0.528407 0.294825
1631 1 0.971084 0.888963 0.453826
1147 1 0.19887 0.566459 0.338026
1907 1 0.0119128 0.559059 0.318079
1772 1 0.154311 0.609465 0.29683
1581 1 0.283723 0.617444 0.27788
553 1 0.503367 0.619157 0.327624
828 1 0.698816 0.644499 0.327142
1158 1 0.440934 0.690536 0.325891
1007 1 0.657481 0.720756 0.297711
1301 1 0.813334 0.713098 0.309553
197 1 0.886284 0.732065 0.342351
658 1 0.594889 0.745711 0.312773
1480 1 0.405602 0.808305 0.291635
1998 1 0.330205 0.807625 0.323465
888 1 0.339518 0.837419 0.322578
95 1 0.281685 0.878561 0.320082
1908 1 0.492856 0.870693 0.309857
1493 1 0.966166 0.90527 0.320104
1056 1 0.615321 0.927681 0.340114
1854 1 0.872698 0.996865 0.30515
496 1 0.144236 0.55333 0.337807
1325 1 0.950392 0.506792 0.328369
1794 1 0.0354959 0.584929 0.304501
597 1 0.440012 0.627198 0.366642
152 1 0.024893 0.635558 0.325439
1616 1 0.539835 0.634526 0.329274
902 1 0.0818768 0.714881 0.336872
501 1 0.00330607 0.73771 0.3232
431 1 0.187328 0.738164 0.303688
144 1 0.802305 0.730928 0.340623
97 1 0.642073 0.771234 0.333405
851 1 0.718435 0.773599 0.326688
847 1 0.498575 0.744421 0.336554
192 1 0.848061 0.836675 0.336933
1461 1 0.394242 0.87072 0.336572
1272 1 0.186963 0.883478 0.325579
948 1 0.700597 0.935531 0.328718
1267 1 0.829361 0.947556 0.341002
1291 1 0.480951 0.983827 0.303647
1029 1 0.615369 1.00022 0.335323
1296 1 0.0657413 0.976104 0.282042
545 1 0.187218 0.50677 0.32373
450 1 0.199559 0.712928 0.492581
646 1 0.269755 0.598355 0.343985
1560 1 0.0188021 0.689402 0.357338
174 1 0.443262 0.760828 0.351302
739 1 0.209594 0.778038 0.358465
494 1 0.484088 0.750904 0.336516
1256 1 0.853862 0.797938 0.336172
1552 1 0.0384267 0.821536 0.358224
277 1 0.300813 0.825937 0.346585
224 1 0.806228 0.857928 0.325215
1383 1 0.178148 0.87881 0.333261
971 1 0.2581 0.840936 0.326942
279 1 0.537391 0.920029 0.356976
310 1 0.572995 0.849557 0.348827
1646 1 0.845906 0.910672 0.339787
520 1 0.155578 0.906691 0.328921
1899 1 0.355296 0.917509 0.349657
452 1 0.0987486 0.958262 0.345703
1944 1 0.570268 0.970899 0.337935
1746 1 0.0832893 0.526355 0.361479
523 1 0.931891 0.531958 0.358708
285 1 0.322897 0.550986 0.35714
1079 1 0.973705 0.517855 0.379285
1999 1 0.840488 0.587695 0.318695
1608 1 0.642461 0.597802 0.370091
1554 1 0.927479 0.626873 0.350748
767 1 0.717623 0.626713 0.35188
1401 1 0.919903 0.698404 0.37643
1628 1 0.157311 0.700297 0.32394
683 1 0.568859 0.686153 0.379021
1620 1 0.508619 0.647987 0.322088
1945 1 0.222005 0.70182 0.354794
1711 1 0.646948 0.768708 0.375898
502 1 0.00120536 0.794804 0.348588
131 1 0.374848 0.830077 0.358343
1777 1 0.527999 0.940836 0.340701
138 1 0.560492 0.972662 0.351589
196 1 0.982123 0.979561 0.370369
883 1 0.444619 0.523519 0.374392
1637 1 0.42418 0.803643 0.496837
1063 1 0.73461 0.540139 0.390741
599 1 0.0762594 0.579808 0.347882
533 1 0.879061 0.5953 0.380163
214 1 0.876794 0.556214 0.344663
557 1 0.346237 0.636481 0.401095
1393 1 0.23839 0.6374 0.389464
524 1 0.340518 0.678323 0.38132
180 1 0.86762 0.667689 0.379045
1040 1 0.764517 0.717382 0.394936
1769 1 0.69403 0.734305 0.412896
855 1 0.222679 0.755239 0.409765
154 1 0.75053 0.768451 0.406356
1124 1 0.631461 0.832016 0.362782
260 1 0.605935 0.828525 0.386325
1775 1 0.723893 0.867793 0.390073
413 1 0.923178 0.882586 0.365794
1946 1 0.189425 0.886898 0.38627
518 1 0.653442 0.879747 0.368255
288 1 0.978248 0.897082 0.39032
222 1 0.237643 0.94868 0.395839
1774 1 0.277929 0.959565 0.369499
48 1 0.367524 0.94218 0.372202
558 1 0.705059 0.967369 0.368258
652 1 0.647311 0.937773 0.494416
336 1 0.609995 0.804593 0.483695
1321 1 0.108621 0.586536 0.374891
1818 1 0.147431 0.570823 0.39877
1467 1 0.489016 0.66646 0.388191
1686 1 0.75012 0.7016 0.403096
1217 1 0.123447 0.63574 0.416924
262 1 0.803576 0.728533 0.409169
1596 1 0.872559 0.78162 0.388229
69 1 0.207924 0.771989 0.382998
49 1 0.413307 0.849579 0.398772
1440 1 0.19095 0.909082 0.356062
1365 1 0.974685 0.923601 0.367868
487 1 0.421654 0.940962 0.37466
986 1 0.182057 0.961852 0.386802
1 1 0.554039 0.715802 0.0161144
393 1 0.747684 0.539467 0.446217
512 1 0.9254 0.587896 0.411218
409 1 0.949724 0.597118 0.422927
1573 1 0.512429 0.581479 0.438562
233 1 0.607808 0.616949 0.401349
1436 1 0.869398 0.65947 0.424356
1933 1 0.529135 0.688205 0.414916
678 1 0.596687 0.718579 0.428877
928 1 0.150157 0.725353 0.446678
1884 1 0.303588 0.760006 0.41813
270 1 0.444393 0.759617 0.409414
156 1 0.766253 0.798406 0.447554
1288 1 0.339125 0.869906 0.375213
1633 1 0.180713 0.891451 0.452963
305 1 0.533362 0.866998 0.401613
925 1 0.5702 0.888012 0.414951
108 1 0.223469 0.917059 0.441891
254 1 0.875864 0.932991 0.408914
1861 1 0.0327701 0.953046 0.440067
1096 1 0.976148 0.963655 0.41996
1473 1 0.99004 0.725821 0.471271
842 1 0.989131 0.576778 0.453634
1712 1 0.937564 0.639198 0.397237
442 1 0.470634 0.649201 0.445933
1871 1 0.616491 0.705265 0.441589
1098 1 0.742777 0.799395 0.431825
1789 1 0.176114 0.762747 0.424762
206 1 0.226873 0.984095 0.455415
1797 1 0.662458 0.969643 0.428531
1270 1 0.0693181 0.541705 0.487336
1800 1 0.139612 0.5452 0.485545
1825 1 0.935435 0.566079 0.442829
649 1 0.454402 0.551703 0.46197
1641 1 0.270257 0.569784 0.433742
241 1 0.0668786 0.615228 0.440572
703 1 0.379412 0.6143 0.471177
1645 1 0.087261 0.667984 0.43844
34 1 0.611002 0.720523 0.443391
100 1 0.583641 0.707557 0.472838
1538 1 0.197918 0.733651 0.454341
1793 1 0.172438 0.777009 0.475472
1731 1 0.508728 0.801666 0.44876
1391 1 0.445349 0.782668 0.446611
1943 1 0.419731 0.793145 0.4595
1490 1 0.677085 0.815507 0.479305
610 1 0.984915 0.836426 0.434256
1432 1 0.0156438 0.860467 0.44855
621 1 0.126395 0.96533 0.476409
1719 1 0.869766 0.970431 0.464838
66 1 0.143549 0.510379 0.491803
860 1 0.285435 0.53352 0.48987
1100 1 0.381494 0.543281 0.490716
106 1 0.133999 0.566323 0.471969
493 1 0.116514 0.728474 0.447867
178 1 0.111723 0.68344 0.472242
1868 1 0.853784 0.735964 0.457344
36 1 0.538113 0.773491 0.44537
1568 1 0.984972 0.848054 0.475087
1723 1 0.0221851 0.874843 0.482675
625 1 0.598991 0.962211 0.474756
1350 1 0.0144009 0.955344 0.466796
1858 1 0.0688646 0.864465 0.0188039
1447 1 0.343417 0.991761 0.492308
1329 1 0.0876572 0.862728 0.012494
1377 1 0.71603 0.83479 0.490896
724 1 0.311746 0.991546 0.0436118
1103 1 0.119799 0.818369 0.486849
1566 1 0.444303 0.591066 0.48294
824 1 0.435837 0.712751 0.478837
363 1 0.486999 0.749001 0.481169
1310 1 0.998282 0.788093 0.495563
589 1 0.205983 0.996754 0.297655
287 1 0.732449 0.633337 0.00941855
853 1 0.472186 0.567211 0.0204534
1319 1 0.8305 0.510443 0.0140232
94 1 0.00198988 0.993325 0.0169606
319 1 0.387597 0.998409 0.0325406
1174 1 0.883039 0.944675 0.499755
1187 1 0.937504 0.760849 0.986673
1831 1 0.258366 0.584997 0.527597
687 1 0.861484 0.523954 0.517981
397 1 0.983109 0.548098 0.504255
194 1 0.155168 0.535512 0.522827
1344 1 0.505644 0.580046 0.510384
414 1 0.246899 0.511718 0.693705
759 1 0.3827 0.580869 0.51639
812 1 0.423519 0.616464 0.546764
1721 1 0.644082 0.625446 0.50647
517 1 0.623986 0.673914 0.500187
1996 1 0.27748 0.648342 0.519075
680 1 0.767119 0.990747 0.999693
935 1 0.102126 0.988373 0.78498
236 1 0.322502 0.810157 0.5319
1921 1 0.512892 0.575613 0.515712
419 1 0.70793 0.971827 0.69914
1913 1 0.954315 0.7828 0.544442
1836 1 0.15231 0.772088 0.545196
469 1 0.471944 0.806652 0.51854
1955 1 0.0840184 0.829223 0.974531
1494 1 0.962141 0.858624 0.531041
721 1 0.434662 0.827287 0.529894
1900 1 0.728162 0.857967 0.513094
2035 1 0.0318415 0.890105 0.519243
1219 1 0.326444 0.925882 0.511915
752 1 0.882807 0.512676 0.704428
940 1 0.804878 0.932116 0.524809
1342 1 0.367233 0.999472 0.637181
163 1 0.637908 0.993536 0.934231
1286 1 0.411977 0.571278 0.509957
985 1 0.305185 0.577381 0.533433
89 1 0.612211 0.562575 0.528654
716 1 0.782111 0.512685 0.803496
693 1 0.859416 0.554956 0.518726
605 1 0.343037 0.623903 0.537832
1587 1 0.0661737 0.691184 0.55794
5 1 0.131279 0.677144 0.527565
1031 1 0.307041 0.700477 0.502608
302 1 0.789902 0.723593 0.515927
809 1 0.297852 0.759705 0.50837
1155 1 0.369615 0.746161 0.534994
1585 1 0.0261051 0.778243 0.526297
1869 1 0.948757 0.511741 0.951278
307 1 0.932225 0.81531 0.531496
378 1 0.328806 0.884153 0.518386
1081 1 0.637974 0.890239 0.539331
1504 1 0.777682 0.8675 0.539916
1208 1 0.512415 0.928391 0.567689
1018 1 0.584099 0.953362 0.548485
1455 1 0.571431 0.54273 0.569008
1057 1 0.217611 0.537572 0.510352
1510 1 0.786705 0.519291 0.512121
541 1 0.820651 0.541376 0.551156
1129 1 0.665203 0.556973 0.548044
499 1 0.693291 0.587494 0.549014
781 1 0.2784 0.641445 0.536516
1700 1 0.962484 0.654072 0.583514
613 1 0.535431 0.621659 0.560608
1820 1 0.339506 0.677471 0.55664
481 1 0.862542 0.740996 0.556094
786 1 0.00891665 0.771868 0.548358
623 1 0.0589997 0.824276 0.529286
653 1 0.0587683 0.851002 0.536805
1142 1 0.235677 0.846895 0.506417
670 1 0.498141 0.909245 0.511446
741 1 0.860007 0.519729 0.59647
1499 1 0.475841 0.525696 0.579534
1924 1 0.505028 0.534387 0.591199
2014 1 0.435722 0.5825 0.591373
17 1 0.508269 0.628194 0.553056
1170 1 0.0442865 0.72994 0.544161
1727 1 0.861587 0.760858 0.566841
1514 1 0.141422 0.786255 0.563419
642 1 0.279102 0.896039 0.545343
304 1 0.355758 0.932704 0.576354
102 1 0.724706 0.928987 0.581895
374 1 0.787816 0.897448 0.586562
975 1 0.899451 0.958053 0.567427
1720 1 0.519221 0.948213 0.577135
970 1 0.881417 0.939845 0.575153
1181 1 0.657014 0.981905 0.569339
686 1 0.533261 0.551537 0.996125
1488 1 0.435261 0.537321 0.589954
425 1 0.940228 0.547453 0.599157
704 1 0.187927 0.648409 0.573154
1026 1 0.323799 0.683409 0.582313
1681 1 0.269063 0.640223 0.571309
1294 1 0.0891138 0.714571 0.620907
1975 1 0.0761849 0.796995 0.575946
1406 1 0.224556 0.780387 0.609854
1790 1 0.174483 0.837439 0.61908
1728 1 0.838819 0.836091 0.586121
1614 1 0.790331 0.865564 0.548818
535 1 0.341796 0.86273 0.602647
1385 1 0.547888 0.884973 0.572214
852 1 0.719612 0.925826 0.567063
321 1 0.71784 0.908783 0.623288
1809 1 0.806609 0.937827 0.580198
345 1 0.876997 0.955449 0.546732
1956 1 0.172227 0.977872 0.540743
513 1 0.425121 0.996379 0.602804
128 1 0.232633 0.521712 0.620206
974 1 0.854741 0.51507 0.619573
1418 1 0.982785 0.502701 0.929654
427 1 0.658428 0.538599 0.57987
755 1 0.579527 0.551459 0.597167
1644 1 0.395561 0.520866 0.57593
768 1 0.640726 0.58331 0.613402
395 1 0.90468 0.601674 0.588454
1450 1 0.954415 0.639607 0.603707
1398 1 0.489663 0.6687 0.618004
2048 1 0.728714 0.709166 0.634174
1120 1 0.651477 0.746456 0.610715
211 1 0.0704889 0.741023 0.56271
1532 1 0.00278116 0.773285 0.605298
1108 1 0.266219 0.787097 0.625373
1912 1 0.919898 0.829771 0.628991
1672 1 0.38858 0.843237 0.558511
831 1 0.835248 0.84116 0.634914
864 1 0.775852 0.979646 0.588763
1190 1 0.132657 0.512289 0.636761
1233 1 0.179403 0.582519 0.621448
331 1 0.948739 0.508775 0.657482
585 1 0.554313 0.566133 0.624081
873 1 0.834542 0.544023 0.629853
64 1 0.112289 0.564432 0.626227
1438 1 0.135117 0.546368 0.61667
1242 1 0.989148 0.588819 0.646483
1958 1 0.762835 0.582049 0.623352
592 1 0.846141 0.586953 0.636471
723 1 0.116126 0.636137 0.630096
1426 1 0.841041 0.603318 0.592737
1670 1 0.956251 0.596706 0.599148
44 1 0.379894 0.632771 0.589834
504 1 0.522622 0.650041 0.61029
1939 1 0.763585 0.629393 0.622439
1182 1 0.129081 0.70604 0.608562
1524 1 0.742732 0.752984 0.642968
758 1 0.324373 0.839975 0.603162
951 1 0.684379 0.871629 0.633818
1446 1 0.542237 0.979119 0.617615
1456 1 0.35984 0.843469 0.984747
816 1 0.0608244 0.556504 0.65078
664 1 0.36208 0.523796 0.650839
747 1 0.0092609 0.565903 0.641444
1130 1 0.373654 0.607912 0.659283
1331 1 0.752781 0.664191 0.65922
1420 1 0.899854 0.661633 0.62554
679 1 0.213052 0.751645 0.687964
1247 1 0.124122 0.722945 0.659841
1643 1 0.539984 0.719634 0.644166
1516 1 0.554609 0.774653 0.636587
478 1 0.613154 0.807661 0.623266
1093 1 0.963715 0.833333 0.67564
1930 1 0.783924 0.84619 0.655521
1703 1 0.838753 0.835785 0.623119
1462 1 0.792763 0.889755 0.6404
159 1 0.459728 0.942101 0.605094
559 1 0.369209 0.947926 0.625511
198 1 0.933803 0.963694 0.605744
614 1 0.0393918 0.957542 0.645805
898 1 0.613802 0.995184 0.759604
227 1 0.28468 0.531986 0.645826
1072 1 0.393157 0.549041 0.654454
1840 1 0.704622 0.535593 0.636238
1649 1 0.746824 0.544752 0.678634
1118 1 0.816376 0.55609 0.657537
1896 1 0.538166 0.538577 0.662953
309 1 0.553797 0.588421 0.679042
738 1 0.336791 0.5611 0.646933
466 1 0.391093 0.668081 0.66642
408 1 0.616192 0.658649 0.671557
1888 1 0.0261389 0.718075 0.680939
1020 1 0.966909 0.731599 0.640466
696 1 0.996089 0.782882 0.665271
1600 1 0.783971 0.740243 0.640226
764 1 0.315687 0.869191 0.675227
87 1 0.0285039 0.88386 0.646225
1671 1 0.731763 0.862311 0.645234
1435 1 0.765723 0.901859 0.630922
371 1 0.217991 0.902055 0.644897
1548 1 0.282198 0.937206 0.670886
881 1 0.521475 0.952368 0.654304
1282 1 0.729048 0.926867 0.668877
202 1 0.597894 0.965322 0.684735
1572 1 0.993594 0.981469 0.656392
1613 1 0.465581 0.996491 0.641573
267 1 0.756867 0.529788 0.508074
562 1 0.906828 0.573025 0.696599
243 1 0.496996 0.594709 0.661961
112 1 0.0835019 0.771954 0.666251
1692 1 0.0514113 0.856113 0.649593
1088 1 0.96403 0.817293 0.675152
1698 1 0.436112 0.837524 0.688566
946 1 0.697421 0.845026 0.701883
1003 1 0.326684 0.901177 0.659261
235 1 0.118932 0.884624 0.680314
278 1 0.19693 0.894519 0.680528
200 1 0.120572 0.879521 0.691883
1991 1 0.228543 0.93694 0.683604
193 1 0.188475 0.93172 0.682426
1682 1 0.46927 0.937322 0.657353
1059 1 0.901176 0.60854 0.68034
773 1 0.201834 0.62664 0.689956
878 1 0.241543 0.710717 0.70349
1656 1 0.508602 0.70852 0.718456
532 1 0.133823 0.812271 0.676103
147 1 0.260547 0.865183 0.704716
130 1 0.766418 0.897597 0.691325
1808 1 0.978821 0.851217 0.644498
1152 1 0.336957 0.879328 0.715289
1941 1 0.698635 0.878495 0.687984
941 1 0.0880073 0.916658 0.701082
1696 1 0.357291 0.899392 0.682466
790 1 0.744328 0.948417 0.674681
522 1 0.117059 0.952267 0.718416
29 1 0.436308 0.538361 0.691267
1280 1 0.549456 0.500008 0.704836
1346 1 0.346666 0.551121 0.719256
1778 1 0.746924 0.661557 0.705805
1780 1 0.29417 0.669042 0.707401
891 1 0.766313 0.675243 0.722288
463 1 0.715747 0.726718 0.726179
550 1 0.400851 0.733549 0.687396
650 1 0.586976 0.752852 0.714335
808 1 0.513784 0.856203 0.711852
549 1 0.960548 0.860808 0.726777
377 1 0.350592 0.926134 0.694125
1221 1 0.400859 0.937009 0.705752
1074 1 0.842388 0.935367 0.710395
1169 1 0.0415358 0.967906 0.707477
1985 1 0.136606 0.564681 0.507713
436 1 0.916535 0.549589 0.718058
237 1 0.611544 0.515992 0.734701
1742 1 0.678203 0.530173 0.736094
456 1 0.249147 0.567381 0.737523
1269 1 0.381326 0.591676 0.724116
1592 1 0.525572 0.604628 0.743669
1397 1 0.765563 0.621062 0.732328
141 1 0.055721 0.645982 0.736258
264 1 0.600457 0.675033 0.728469
848 1 0.248895 0.698671 0.725848
464 1 0.121245 0.742123 0.721198
980 1 0.923418 0.747179 0.670235
1489 1 0.973317 0.77516 0.752986
1766 1 0.713092 0.847601 0.744277
150 1 0.65252 0.882782 0.73831
1033 1 0.606624 0.928128 0.743418
246 1 0.542376 0.608597 0.760071
1626 1 0.74441 0.548787 0.712126
1860 1 0.0426045 0.618713 0.738687
1300 1 0.695334 0.6967 0.739801
743 1 0.159852 0.670413 0.764868
326 1 0.767142 0.669146 0.730949
979 1 0.668994 0.790579 0.75667
1428 1 0.546406 0.826189 0.740028
286 1 0.550258 0.876216 0.723108
587 1 0.111382 0.886468 0.776354
1134 1 0.813116 0.959759 0.746441
342 1 0.195681 0.531547 0.779917
961 1 0.49753 0.502976 0.748284
488 1 0.834874 0.50708 0.714635
2047 1 0.571557 0.600622 0.746606
453 1 0.281603 0.596688 0.777544
1139 1 0.143268 0.623216 0.77232
757 1 0.00979567 0.714076 0.792054
43 1 0.442964 0.794902 0.783629
1591 1 0.128228 0.816859 0.803074
1318 1 0.172446 0.886973 0.764655
1894 1 0.434401 0.878233 0.740387
907 1 0.581961 0.900082 0.787475
1648 1 0.305359 0.88952 0.983206
1699 1 0.251282 0.602969 0.520387
1852 1 0.290438 0.536277 0.786795
223 1 0.674634 0.54341 0.809999
175 1 0.869977 0.531637 0.79019
416 1 0.92019 0.541207 0.807158
668 1 0.478622 0.575005 0.802393
219 1 0.295296 0.559891 0.780036
1164 1 0.678213 0.597363 0.770541
1371 1 0.447744 0.635855 0.778016
616 1 0.424131 0.713653 0.763775
40 1 0.392487 0.725438 0.804136
551 1 0.598554 0.714476 0.795682
1113 1 0.984929 0.737329 0.774552
250 1 0.28665 0.71724 0.803906
351 1 0.338485 0.741337 0.816196
1469 1 0.741248 0.793964 0.767304
537 1 0.674926 0.804811 0.782568
868 1 0.068081 0.816159 0.794391
1959 1 0.101866 0.917253 0.805831
146 1 0.299593 0.874094 0.786107
1823 1 0.950489 0.939607 0.830836
438 1 0.651833 0.941787 0.801522
701 1 0.44256 0.503398 0.808101
238 1 0.679781 0.545417 0.794677
1988 1 0.648277 0.594257 0.811734
45 1 0.264576 0.569725 0.853453
1284 1 0.0293283 0.655179 0.813387
315 1 0.834301 0.662791 0.790791
1785 1 0.865203 0.683234 0.783026
81 1 0.0119117 0.695687 0.857503
70 1 0.875453 0.728424 0.815876
14 1 0.561852 0.764381 0.803126
457 1 0.864169 0.778761 0.803714
893 1 0.757463 0.827228 0.789151
1194 1 0.431516 0.802493 0.826321
289 1 0.633745 0.814975 0.793551
1437 1 0.64618 0.902087 0.817518
744 1 0.723132 0.899394 0.820185
859 1 0.145294 0.925636 0.77384
440 1 0.545216 0.896674 0.827426
362 1 0.132083 0.746472 0.993743
1466 1 0.376884 0.504963 0.841571
954 1 0.0333923 0.580129 0.831719
1010 1 0.929311 0.545552 0.806013
116 1 0.960254 0.560206 0.856542
468 1 0.527481 0.608273 0.835365
1203 1 0.736824 0.722442 0.812634
631 1 0.191643 0.757122 0.821514
1051 1 0.258522 0.833407 0.831698
810 1 0.904868 0.859688 0.834471
832 1 0.655833 0.891485 0.821086
1885 1 0.256083 0.916813 0.829347
1562 1 0.46101 0.878039 0.835137
1475 1 0.535225 0.938192 0.841597
355 1 0.652337 0.602768 0.851657
690 1 0.638093 0.600473 0.850914
1963 1 0.172691 0.571832 0.873675
775 1 0.203724 0.592006 0.864952
1873 1 0.300339 0.618611 0.852237
555 1 0.708007 0.620825 0.815251
1290 1 0.134144 0.649171 0.833748
1175 1 0.930175 0.695345 0.842977
1394 1 0.868931 0.725522 0.836773
364 1 0.066648 0.70668 0.818274
2029 1 0.0234425 0.742614 0.834235
880 1 0.177304 0.753378 0.816721
1176 1 0.0334589 0.747211 0.835483
527 1 0.115393 0.785409 0.866314
1741 1 0.524886 0.78059 0.802042
405 1 0.571948 0.765955 0.800874
83 1 0.601844 0.800569 0.849689
1441 1 0.107012 0.807243 0.835452
1503 1 0.223044 0.810606 0.857366
983 1 0.90609 0.845446 0.845746
1688 1 0.699218 0.871871 0.854397
1396 1 0.136531 0.948078 0.858062
884 1 0.00627238 0.97995 0.847648
2017 1 0.637298 0.991395 0.86163
20 1 0.441908 0.522673 0.834212
1353 1 0.60874 0.570429 0.849912
991 1 0.823618 0.600816 0.869212
688 1 0.655534 0.618413 0.894175
1106 1 0.904856 0.645215 0.837812
113 1 0.795189 0.694586 0.859727
346 1 0.648424 0.762521 0.882852
1060 1 0.149744 0.836578 0.83054
332 1 0.674486 0.840525 0.829276
1492 1 0.285792 0.929604 0.884802
1266 1 0.758763 0.975055 0.863186
467 1 0.107702 0.991253 0.841163
674 1 0.90873 0.966261 0.853725
329 1 0.039371 0.504647 0.876912
257 1 0.450171 0.52297 0.86146
1938 1 0.933612 0.58657 0.879831
1543 1 0.0874855 0.586504 0.891049
1400 1 0.83527 0.67489 0.859464
1561 1 0.0300464 0.716034 0.8665
1977 1 0.408463 0.723075 0.859473
497 1 0.495618 0.698802 0.863605
754 1 0.631669 0.70751 0.898071
1136 1 0.456088 0.698916 0.884883
441 1 0.0618588 0.766107 0.864019
1976 1 0.77517 0.779633 0.892333
41 1 0.398144 0.792563 0.877171
794 1 0.316021 0.816433 0.913971
228 1 0.778047 0.820074 0.860882
1077 1 0.987037 0.85804 0.864816
669 1 0.424991 0.909245 0.874739
269 1 0.501415 0.905724 0.88728
1067 1 0.970688 0.954601 0.868275
164 1 0.572826 0.539481 0.928543
1154 1 0.00130564 0.618483 0.903943
352 1 0.408174 0.602929 0.901869
173 1 0.627383 0.5723 0.888499
16 1 0.300448 0.649483 0.915263
1927 1 0.93304 0.643929 0.892167
963 1 0.530293 0.650579 0.912424
806 1 0.198387 0.6982 0.900418
1580 1 0.196762 0.775581 0.904909
1189 1 0.965908 0.769079 0.894765
1422 1 0.721176 0.781718 0.87971
316 1 0.322462 0.847193 0.922548
479 1 0.0750595 0.836984 0.871814
1292 1 0.465278 0.811732 0.891636
514 1 0.725807 0.845759 0.924725
1071 1 0.40069 0.882147 0.934529
791 1 0.957496 0.968415 0.8973
1347 1 0.0922256 0.966551 0.915551
1047 1 0.567124 0.975492 0.906138
91 1 0.149995 0.520432 0.898149
1605 1 0.0921231 0.571353 0.897016
1678 1 0.732144 0.590084 0.901863
1801 1 0.95997 0.603986 0.942184
1076 1 0.258433 0.625927 0.916863
426 1 0.032319 0.637857 0.896925
177 1 0.432748 0.614445 0.929369
293 1 0.392099 0.667634 0.905519
1887 1 0.669183 0.685244 0.924208
1880 1 0.83903 0.68047 0.905976
396 1 0.604443 0.713137 0.941674
1989 1 0.083129 0.784798 0.919727
802 1 0.0963214 0.768832 0.884695
598 1 0.295636 0.801551 0.889948
1542 1 0.211056 0.843493 0.89192
42 1 0.949144 0.835174 0.937829
529 1 0.764723 0.878671 0.880887
418 1 0.124324 0.971217 0.893698
823 1 0.845191 0.991119 0.894343
1484 1 0.707321 0.564325 0.913587
1679 1 0.927806 0.64663 0.936216
349 1 0.360155 0.659326 0.930273
1332 1 0.970162 0.742877 0.936085
1529 1 0.782591 0.781425 0.932364
1507 1 0.745658 0.834457 0.965366
1995 1 0.215636 0.860241 0.926356
1765 1 0.212996 0.936392 0.951867
104 1 0.120003 0.93985 0.919677
1141 1 0.444848 0.523468 0.949753
470 1 0.494648 0.677516 0.952573
1069 1 0.654248 0.695879 0.967608
231 1 0.834399 0.686195 0.99378
1833 1 0.447216 0.719955 0.946482
1654 1 0.822529 0.714859 0.925339
1718 1 0.995881 0.75328 0.95731
2042 1 0.213038 0.757596 0.966486
253 1 0.476994 0.782293 0.952796
633 1 0.0600548 0.807341 0.935903
1073 1 0.749944 0.813176 0.967889
265 1 0.195584 0.833966 0.972039
1736 1 0.392562 0.871137 0.93712
369 1 0.798893 0.859004 0.968831
1589 1 0.789146 0.909135 0.959963
1834 1 0.413383 0.904092 0.935768
139 1 0.988506 0.851203 0.931368
143 1 0.782844 0.897034 0.960755
1796 1 0.771898 0.931907 0.958932
1805 1 0.0514794 0.945294 0.957583
799 1 0.981876 0.972213 0.953689
1535 1 0.0834334 0.524705 0.959223
2015 1 0.0375074 0.68713 0.934484
1962 1 0.305758 0.775826 0.956691
833 1 0.814224 0.792278 0.999519
1444 1 0.800219 0.785926 0.922681
841 1 0.916065 0.849157 0.978577
1714 1 0.687208 0.83385 0.942588
1305 1 0.107905 0.885608 0.960072
1768 1 0.230501 0.85964 0.968946
654 1 0.495958 0.894521 0.941774
417 1 0.612085 0.538796 0.985644
835 1 0.751864 0.556608 0.984591
825 1 0.767602 0.520386 0.737498
165 1 0.545164 0.992498 0.707342
1558 1 0.397115 0.510164 0.74504
1750 1 0.804555 0.619299 0.982533
221 1 0.334413 0.690827 0.986916
1691 1 0.596921 0.981005 0.57883
839 1 0.193512 0.531252 0.534275
455 1 0.530627 0.705509 0.980709
8 1 0.293317 0.721768 0.980921
582 1 0.0638162 0.725733 0.993711
298 1 0.724307 0.716759 0.9546
13 1 0.281912 0.777848 0.976871
410 1 0.802634 0.9428 0.958587
1729 1 0.741353 0.987151 0.73623
516 1 0.720986 0.978373 0.976255
1826 1 0.192276 0.965997 0.972468
1127 1 0.626778 0.907967 0.992992
30 1 0.974141 0.906492 0.995145
166 1 0.434719 0.932005 0.996926
1533 1 0.418568 0.589984 0.988811
912 1 0.366599 0.995925 0.577453
2007 1 0.217862 0.993424 0.668931
1828 1 0.759639 0.995111 0.752485
1015 1 0.0495187 0.999561 0.563128
1931 1 0.635153 0.753988 0.999394
|
[
"[email protected]"
] | |
89fdfc7929383d46dd47a2c43d1b8f94270d44c9
|
e3b9aa9b17ebb55e53dbc4fa9d1f49c3a56c6488
|
/duo_auth/vendor/duo_client_python/duo_client/accounts.py
|
556580b7f43fe411ee3f8915eed02cbb5f3ca610
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
OSSSP/insightconnect-plugins
|
ab7c77f91c46bd66b10db9da1cd7571dfc048ab7
|
846758dab745170cf1a8c146211a8bea9592e8ff
|
refs/heads/master
| 2023-04-06T23:57:28.449617 | 2020-03-18T01:24:28 | 2020-03-18T01:24:28 | 248,185,529 | 1 | 0 |
MIT
| 2023-04-04T00:12:18 | 2020-03-18T09:14:53 | null |
UTF-8
|
Python
| false | false | 1,287 |
py
|
"""
Duo Security Accounts API reference client implementation.
<http://www.duosecurity.com/docs/accountsapi>
"""
from __future__ import absolute_import
from . import client
class Accounts(client.Client):
def get_child_accounts(self):
"""
Return a list of all child accounts of the integration's account.
"""
params = {}
response = self.json_api_call('POST',
'/accounts/v1/account/list',
params)
return response
def create_account(self, name):
"""
Create a new child account of the integration's account.
"""
params = {
'name': name,
}
response = self.json_api_call('POST',
'/accounts/v1/account/create',
params)
return response
def delete_account(self, account_id):
"""
Delete a child account of the integration's account.
"""
params = {
'account_id': account_id,
}
response = self.json_api_call('POST',
'/accounts/v1/account/delete',
params)
return response
|
[
"[email protected]"
] | |
ad182d71d82a93f43397c6323d260b37ee09705a
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_160/ch10_2019_06_06_18_15_04_170519.py
|
e076ee131d18a9953d0f81065eebe575a27e2103
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 62 |
py
|
def libras_para_kg(libras):
kg = libras/2.20
return kg
|
[
"[email protected]"
] | |
5ea84d476170a94be1ff7dcd2b8b67bd8b5db885
|
782316ea755c025a331bf084b89b7d8e775a36eb
|
/main.py
|
c94a3077af22bb818b1e35bf4284517ec30f27e5
|
[] |
no_license
|
ravenusmc/market
|
eb7c5f1e1736a3de02d77df7f7e63828dab6bca6
|
ad9fed2e24b8789e8ae6b554dfa219cd50d089d6
|
refs/heads/master
| 2021-08-19T14:49:04.153170 | 2017-11-26T18:34:51 | 2017-11-26T18:34:51 | 106,032,373 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,564 |
py
|
#importing outside libraries for use in the project
from flask import Flask, session, jsonify, redirect, url_for, escape, render_template, request, flash
import json
import requests
#importing files I made for this project
from food import *
from food_data import *
from quote import *
from user import *
#Setting up Flask
app = Flask(__name__)
#This route takes the user to the landing page
@app.route('/', methods=['GET', 'POST'])
def landing():
if request.method == 'POST':
#Recieving the information from the user.
username = request.form['username']
password = request.form['password']
#Creating a user object
user = User()
#Checking to see if the user is in the database.
flag, not_found, password_no_match = user.check(username, password)
#Conditional statement to test if the user is a member of the site.
if flag == True:
#If the user is in the database, the user gets sent to the index page.
session['username'] = request.form['username']
#Sending the user to the index page
return redirect(url_for('home'))
else:
#If the user is not in the database then they will be sent to the
#sign up page.
if not_found:
flash('Username not found, maybe sign up!')
elif password_no_match:
flash('Password does not match! Maybe sign up!')
return render_template('login.html')
#This route takes the user to the signup page
@app.route('/signup', methods=['GET', 'POST'])
def signup():
if request.method == 'POST':
name = request.form['name']
username = request.form['username']
password = request.form['password']
#Creating the user object
user = User()
#Encrypting the password
password, hashed = user.encrypt_pass(password)
#Adding the user to the database
user.insert(name, username, hashed)
#Letting them into the index Page
return redirect(url_for('home'))
return render_template('signup.html')
#This route takes the user to the home page
@app.route('/home', methods=['GET', 'POST'])
def home():
#This session will prevent users who have not signed up from coming in.
if 'username' not in session:
return redirect(url_for('signup'))
#This method fires when the user hits the submit button.
if request.method == 'POST':
#Creating a food object
food = Food()
#Calling the get_food method which will set food amounts for each type of food.
food_list = food.get_Food()
#Creating the food data object to insert food into the database
data = Food_Data()
data.insert_food(food_list)
username = session['username']
#Creating a list to hold the quotes
quotes = []
#Setting up the quote object
quote = Quote()
#Getting the response from my quote api
response = quote.getting_quotes()
#parsing out the data that I need-the actual quotes
quotes = quote.get_data(response)
return render_template('home.html', name = username, quotes = quotes)
#This route will take the user to the stats page
@app.route('/stats', methods=['GET', "POST"])
def stats():
#Creating a food object
food = Food_Data()
#Calling the pull_food method which will pull the data out of the database
food_data = food.pull_food()
#Here I'm getting the total pounds for EACH food
pound_data = food.get_pounds(food_data)
#Here I'm getting the total profit for EACH food
profit_data = food.get_profit(food_data)
#Here I'm getting the total pounds for all the food
total_pounds = food.total_pounds(pound_data)
#Here I'm getting the total profit for all the food
total_profit = food.total_profit(profit_data)
return render_template('stats_page.html', pound_info = pound_data, total_pounds = total_pounds,
profit_info =profit_data, total_profit = total_profit, pound_data=json.dumps(pound_data),
profit_data=json.dumps(profit_data))
#This function is what will log out the user.
@app.route('/sign_out')
def logout():
# remove the username from the session if it's there
session.pop('username', None)
#Redirect to Landing page
return redirect(url_for('landing'))
# set the secret key. keep this really secret:
app.secret_key = 'n3A\xef(\xb0Cf^\xda\xf7\x97\xb1x\x8e\x94\xd5r\xe0\x11\x88\x1b\xb9'
#This line will actually run the app.
if __name__ == '__main__':
app.run(debug=True)
|
[
"[email protected]"
] | |
c0eaa6d05399b136b2483494825e6cbc648f18a0
|
fd11d784974fc316b57dfd48827c038ff0d98909
|
/baselines/jft/experiments/jft300m_vit_base16.py
|
108071743048fce2f8459f78789299f0b9641fde
|
[
"Apache-2.0"
] |
permissive
|
SuhongMoon/uncertainty-baselines
|
ada6c3382f91aeff548d5fb707863f08acd8fc7b
|
1a7b24f86994c7b69d9263bf47be7169736f0da9
|
refs/heads/main
| 2023-08-05T21:07:12.515521 | 2021-09-13T20:50:57 | 2021-09-13T20:51:22 | 398,609,284 | 0 | 0 |
Apache-2.0
| 2021-08-21T16:45:13 | 2021-08-21T16:45:12 | null |
UTF-8
|
Python
| false | false | 3,225 |
py
|
# coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""ViT-B/16.
"""
# pylint: enable=line-too-long
import ml_collections
import get_fewshot # local file import
def get_config():
"""Config for training a patch-transformer on JFT."""
config = ml_collections.ConfigDict()
# Directory for the version de-dup'd from BiT downstream test-sets.
config.dataset = 'jft/entity:1.0.0'
config.val_split = 'test[:49511]' # aka tiny_test/test[:5%] in task_adapt
config.train_split = 'train' # task_adapt used train+validation so +64167
config.num_classes = 18291
config.init_head_bias = -10.0
config.trial = 0
config.batch_size = 4096
config.num_epochs = 7
pp_common = '|value_range(-1, 1)'
pp_common += f'|onehot({config.num_classes})'
# To use ancestor 'smearing', use this line instead:
# pp_common += f'|onehot({config.num_classes}, key='labels_extended', key_result='labels') # pylint: disable=line-too-long
pp_common += '|keep("image", "labels")'
config.pp_train = 'decode_jpeg_and_inception_crop(224)|flip_lr' + pp_common
config.pp_eval = 'decode|resize_small(256)|central_crop(224)' + pp_common
config.shuffle_buffer_size = 250_000 # Per host, so small-ish is ok.
config.log_training_steps = 50
config.log_eval_steps = 1000
# NOTE: eval is very fast O(seconds) so it's fine to run it often.
config.checkpoint_steps = 1000
# Model section
config.model = ml_collections.ConfigDict()
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = [16, 16]
config.model.hidden_size = 768
config.model.transformer = ml_collections.ConfigDict()
config.model.transformer.attention_dropout_rate = 0.
config.model.transformer.dropout_rate = 0.
config.model.transformer.mlp_dim = 3072
config.model.transformer.num_heads = 12
config.model.transformer.num_layers = 12
config.model.classifier = 'token' # Or 'gap'
config.model.representation_size = 768
# Optimizer section
config.optim_name = 'Adam'
config.optim = ml_collections.ConfigDict()
config.optim.weight_decay = 0.1
config.optim.beta1 = 0.9
config.optim.beta2 = 0.999
config.weight_decay = None # No explicit weight decay
# TODO(lbeyer): make a mini-language like preprocessings.
config.lr = ml_collections.ConfigDict()
config.lr.base = 8e-4 # LR has to be lower for larger models!
config.lr.warmup_steps = 10_000
config.lr.decay_type = 'linear'
config.lr.linear_end = 1e-5
# Few-shot eval section
config.fewshot = get_fewshot()
config.fewshot.log_steps = 25_000
config.args = {}
return config
def get_sweep(hyper):
return hyper.product([])
|
[
"[email protected]"
] | |
ffbf4f577cb055f3ba0aff800f1e40d5f72aef31
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_135/3039.py
|
c0fc7a7b9f2a2f7aa673c3c70009e8f2ea1a127d
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,341 |
py
|
#!/usr/bin/python
import sys,os
def solve(index1, grid1, index2, grid2):
"""Returns a string result to one case of a problem"""
intersection = set(grid1[index1]) & set(grid2[index2])
if len(intersection) > 1 :
return "Bad magician!"
if len(intersection) < 1 :
return "Volunteer cheated!"
return intersection.pop()
#Shared########################################################################
def main():
with open(sys.argv[1], 'rU') as f_in:
cases = int(f_in.readline().strip())
for case in range(1,cases+1):
#Get input data
index1 = int(f_in.readline().strip()) - 1
grid1 = [[int(x) for x in f_in.readline().strip().split()] for _ in range(4)]
index2 = int(f_in.readline().strip()) - 1
grid2 = [[int(x) for x in f_in.readline().strip().split()] for _ in range(4)]
#Solve and output
print("Case #{}: {}".format(case, solve(index1, grid1, index2, grid2)))
if __name__ == '__main__':
if len(sys.argv) > 1 and os.path.exists(sys.argv[1]):
main()
elif len(sys.argv) > 1 and not os.path.exists(sys.argv[1]):
print "File '"+str(sys.argv[1])+"' does not exist!"
else:
print "No file supplied! Run program this way: '"+str(sys.argv[0])+" something.in'"
|
[
"[email protected]"
] | |
bae6c38d93ed8a773579e39d1ec375da72b1dfee
|
0ef993b4dc63d6ed10e579304f6258c04f5d9f47
|
/032.py
|
668f6bd489044477e6250af0ad17fe73f9ba29ac
|
[] |
no_license
|
yu5shi8/yukicoder
|
c5eb9f77d355cfdb1ae4e36634acca18b1d797f2
|
2bfc5b1b044b8f2c5a33db037585788787ebbc88
|
refs/heads/master
| 2020-05-02T04:46:17.694923 | 2020-03-12T08:37:28 | 2020-03-12T08:37:28 | 177,757,480 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 655 |
py
|
# -*- coding: utf-8 -*-
# No.32 貯金箱の憂鬱
# https://yukicoder.me/problems/5
# 硬貨の枚数を入力
l = int(input())
m = int(input())
n = int(input())
# 後から見返してみると、if を毎度付けなくても良かった件
# 1円硬貨の場合
if n >= 25:
m = m + (n // 25)
n = n % 25
# 25円硬貨の場合
if m >= 4:
l = l + (m // 4)
m = m % 4
# 100円硬貨の場合
if l >= 10:
l = l % 10
# 硬貨の合計数
coin_count = l + m + n
print(coin_count)
'''
【参考回答】
https://yukicoder.me/submissions/321110
l,m,n = [int(input()) for _ in '1'*3]
print(n%25 + (n//25+m)%4 + ((n//25+m)//4+l)%10)
'''
|
[
"[email protected]"
] | |
9331f10c63e60f252f6aea317e8e0f8981837a4e
|
d1c6de4e0d4aafbe1e7d15a02487494f86bf9b7e
|
/알고리즘문제/공통조상.py
|
fb4e31b961fce86f0136ebde7630b07013b14abe
|
[] |
no_license
|
kdm604/TIL
|
d2ce2122e0b828a595530ac2a405a4661cf60205
|
554bbd8e884f4e7fbebdefbfa22a1a5eee0fa452
|
refs/heads/master
| 2023-01-11T21:41:57.845549 | 2020-03-24T08:55:10 | 2020-03-24T08:55:10 | 195,938,033 | 0 | 0 | null | 2023-01-05T01:14:37 | 2019-07-09T05:23:00 |
Python
|
UTF-8
|
Python
| false | false | 1,048 |
py
|
def check(T):
if T:
ans.append(nxn[T][2])
check(nxn[T][2])
def check2(T):
if T:
ans2.append(nxn[T][2])
check2(nxn[T][2])
def number(T):
if T:
num.append(T)
number(nxn[T][0])
number(nxn[T][1])
T = int(input())
for test in range(T):
V, E, A, B = map(int, input().split())
nxn = [[0 for _ in range(3)] for _ in range(V + 1)]
arr = list(map(int, input().split()))
ans = []
ans2 = []
max = 0
b = 0
for i in range(0, len(arr), 2):
if nxn[arr[i]][0] == 0:
nxn[arr[i]][0] = arr[i + 1]
nxn[arr[i + 1]][2] = arr[i]
else:
nxn[arr[i]][1] = arr[i + 1]
nxn[arr[i + 1]][2] = arr[i]
check(A)
check2(B)
for i in range(len(ans)):
for j in range(len(ans2)):
if ans[i] == ans2[j]:
max = ans[i]
b = 1
break
if b == 1:
break
num = []
number(max)
print("#%d %d %d" % (test+1, max, len(num)))
|
[
"[email protected]"
] | |
9e1518600878f1c780387c3d1ba96ee3f7a7b7d8
|
f889bc01147869459c0a516382e7b95221295a7b
|
/test/test_sales_data_shipment_item_creation_interface.py
|
b6198d3e36b25d454919911ba2064c4129ed7f47
|
[] |
no_license
|
wildatheart/magento2-api-client
|
249a86f5c0289743f8df5b0324ccabd76f326512
|
e6a707f85b37c6c3e4ef3ff78507a7deb8f71427
|
refs/heads/master
| 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,135 |
py
|
# coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.sales_data_shipment_item_creation_interface import SalesDataShipmentItemCreationInterface
class TestSalesDataShipmentItemCreationInterface(unittest.TestCase):
""" SalesDataShipmentItemCreationInterface unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testSalesDataShipmentItemCreationInterface(self):
"""
Test SalesDataShipmentItemCreationInterface
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.sales_data_shipment_item_creation_interface.SalesDataShipmentItemCreationInterface()
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
31f909913d17784821fc85958b1ace8371fbcc6c
|
e0378adb07d3a051b65420e98ed8d141aabcbf18
|
/Assignment 10/tests/test_repository.py
|
1992e7d97bbfc504289619266be344a85d787f3c
|
[] |
no_license
|
BVlad917/Fundamentals-of-Programming
|
bc36c961be8d7144dd57aaf97b7419db551bd291
|
afd98cd572d456b2dda40c41882c1f2eb8f139f5
|
refs/heads/main
| 2023-08-06T17:02:26.966825 | 2021-09-20T19:56:11 | 2021-09-20T19:56:11 | 408,119,112 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,590 |
py
|
import unittest
from domain.person import Person
from repository.custom_repo import Repository
from repository.in_memory_repo import Repository as InMemoryRepo
from repository.repository_exceptions import DeleteException, AddException, RepositoryException
class TestRepository(unittest.TestCase):
def setUp(self):
self.pers_1 = Person(1, 'Vlad Bogdan', '0745000111')
self.pers_2 = Person(2, 'Test Person', '0241234567')
self.custom_repo = Repository()
self.in_memory_repo = InMemoryRepo()
def test_length_repo_attribute(self):
self.assertEqual(len(self.custom_repo.elements), 0)
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.add_to_repo(self.pers_2)
self.assertEqual(len(self.custom_repo.elements), 2)
def test_contains_attribute_repo(self):
self.custom_repo.add_to_repo(self.pers_1)
self.assertTrue(self.pers_1 in self.custom_repo.elements)
self.assertFalse(self.pers_2 in self.custom_repo.elements)
def test_iter_repo(self):
self.custom_repo.add_to_repo(self.pers_1)
list_of_pers = []
# Test to see if repository is iterable
for pers in self.custom_repo.elements:
list_of_pers.append(pers)
# Test to see if repository is 'index-able'
self.assertEqual(list_of_pers[0], self.custom_repo.elements[0])
def test_delete_item_from_repo_by_id(self):
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.delete_by_id(1)
self.assertEqual(len(self.custom_repo.elements), 0)
self.assertRaises(DeleteException, self.custom_repo.delete_by_id, 1)
self.assertRaises(DeleteException, self.in_memory_repo.delete_by_id, 1)
def test_get_all_ids(self):
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.add_to_repo(self.pers_2)
self.assertEqual(self.custom_repo.get_all_ids(), [1, 2])
def test_find_by_id(self):
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.add_to_repo(self.pers_2)
self.assertEqual(self.custom_repo.find_by_id(1)[0], self.pers_1)
def test_add_to_repo(self):
self.assertEqual(len(self.custom_repo.elements), 0)
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.add_to_repo(self.pers_2)
self.assertEqual(len(self.custom_repo.elements), 2)
self.assertRaises(AddException, self.custom_repo.add_to_repo, self.pers_1)
self.in_memory_repo.add_to_repo(self.pers_1)
self.assertRaises(AddException, self.in_memory_repo.add_to_repo, self.pers_1)
def test_get_all(self):
self.assertEqual(self.custom_repo.elements, [])
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.add_to_repo(self.pers_2)
self.assertEqual(self.custom_repo.elements, [self.pers_1, self.pers_2])
def test_update(self):
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.add_to_repo(self.pers_2)
update_pers = Person(15, 'New Name', '0745 094 735')
self.assertRaises(RepositoryException, self.custom_repo.update, update_pers)
update_pers = Person(1, 'New Name', '0745 094 735')
self.custom_repo.update(update_pers)
pers1, _ = self.custom_repo.find_by_id(1)
self.assertEqual(pers1.name, 'New Name')
self.assertEqual(pers1.phone_number, '0745 094 735')
self.assertRaises(RepositoryException, self.in_memory_repo.update, self.pers_1)
|
[
"[email protected]"
] | |
0182ed33149dae1755168e285acaf878bfeb96b8
|
bd4535b2ff5fc80234eed709f46da53b9ab260cf
|
/Packs/TeamCymru/Integrations/TeamCymru/TeamCymru_test.py
|
84aa522a7c62c7309caf6468531c5395be42a3bd
|
[
"MIT"
] |
permissive
|
vibhuabharadwaj/content
|
0641284c862668b577e82e32e2daecdb9fabb39a
|
518da763814fefce538379560282ff8c2ce661b9
|
refs/heads/master
| 2023-03-07T21:36:31.768989 | 2022-09-28T15:50:46 | 2022-09-28T15:50:46 | 202,795,410 | 1 | 0 |
MIT
| 2023-03-06T17:25:01 | 2019-08-16T20:30:23 |
Python
|
UTF-8
|
Python
| false | false | 8,298 |
py
|
"""TeamCymru for Cortex XSOAR - Unit Tests file"""
import json
import demistomock as demisto
import pytest
from unittest.mock import MagicMock
import TeamCymru
'''GLOBALS'''
client = MagicMock()
MOCK_ENTRY_ID = '@123'
MOCK_BULK_LIST = "1.1.1.1, b, 2.2.2, n, 3.3.3.3,2001:0db8:85a3:0000:0000:8a2e:0370:7334,a,\"8.8.8.8\"," \
"4.4.4.4, 1.1.2.2, 6,6.6.6.6, 1.1.2.2"
MOCK_IPS_LIST = ['1.1.1.1', 'b', '2.2.2', 'n',
'3.3.3.3', '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'a', '8.8.8.8', '4.4.4.4',
'1.1.2.2', '6', '6.6.6.6', '1.1.2.2']
MOCK_INVALID_IPS = ['b', '2.2.2', 'n', '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'a', '6']
MOCK_VALID_IPS = ['1.1.1.1', '3.3.3.3', '8.8.8.8', '4.4.4.4', '1.1.2.2', '6.6.6.6', '1.1.2.2']
MOCK_FILE_RES = {
'id': 'test_id',
'path': 'test_data/test_ips_file.csv',
'name': 'test_ips_file.csv',
}
def load_test_data(json_path):
with open(json_path) as f:
return json.load(f)
@pytest.mark.parametrize('args, expected_error',
[({'ip': None}, 'IP not specified'),
({'ip': '172.16.0'}, 'The given IP address: 172.16.0 is not valid')])
def test_ip_command_invalid_ip(args, expected_error):
"""
Given:
- Invalid IP
When:
- Running the IP command
Then:
- Raise ValueError with the expected value
"""
from TeamCymru import ip_command
with pytest.raises(ValueError, match=expected_error):
ip_command(client, args)
def test_ip_command(mocker):
"""
Given:
- Command arguments: ip ip = 8.8.8.8 (valid IPv4)
When:
- Running the IP command
Then:
- Validate the output compared to the mock output
"""
from TeamCymru import ip_command
mock_arg = {'ip': '8.8.8.8'}
test_data = load_test_data('test_data/test_ip_command.json')
return_value = test_data.get('ip_command_response')
mocker.patch.object(TeamCymru, 'team_cymru_ip', return_value=return_value)
response = ip_command(client, mock_arg)
mock_outputs = test_data.get('mock_output')
mock_readable_outputs = test_data.get('mock_readable')
assert mock_outputs == response[0].outputs
assert mock_readable_outputs == response[0].readable_output
assert response[0].indicator
def test_ip_command_with_list(mocker):
"""
Given:
- List of IP addresses
When:
- Running the IP command
Then:
- Verify support list of IPs
- Verify the result is as expected and returns the expected warning
"""
from TeamCymru import ip_command
mock_arg = {"ip": MOCK_BULK_LIST}
test_data = load_test_data('test_data/test_cymru_bulk_whois_command.json')
return_value = test_data.get('cymru_bulk_whois_command_response')
mocker.patch.object(TeamCymru, 'team_cymru_bulk_whois', return_value=return_value)
warning = mocker.patch.object(TeamCymru, 'return_warning')
mock_outputs = test_data.get('mock_output')
mock_readable_outputs = test_data.get('mock_readable')
response = ip_command(client, mock_arg)
assert warning.call_args[0][0] == test_data.get("warning_message")
assert warning.call_args[1] == {'exit': False}
for i, res in enumerate(response):
assert mock_outputs[i] == res.outputs
assert res.indicator
assert mock_readable_outputs[i] == res.readable_output
def test_cymru_bulk_whois_command_with_file(mocker):
"""
Given:
- File of IP addresses
When:
- Running the cymru_bulk_whois command
Then:
- Verify support file of IPs
- Verify the result is as expected
"""
from TeamCymru import cymru_bulk_whois_command
mock_arg = {"entry_id": MOCK_ENTRY_ID}
test_data = load_test_data('test_data/test_cymru_bulk_whois_command.json')
return_value = test_data.get('cymru_bulk_whois_command_response')
mocker.patch.object(TeamCymru, 'team_cymru_bulk_whois', return_value=return_value)
mocker.patch.object(demisto, 'getFilePath', return_value=MOCK_FILE_RES)
mock_outputs = test_data.get('mock_output')
mock_readable_outputs = test_data.get('mock_readable')
response = cymru_bulk_whois_command(client, mock_arg)
for i, res in enumerate(response):
assert mock_outputs[i] == res.outputs
assert res.indicator
assert mock_readable_outputs[i] == res.readable_output
@pytest.mark.parametrize('args, expected_error',
[({'entry_id': MOCK_ENTRY_ID}, 'No file was found for given entry_id'),
({}, 'No entry_id specified.')])
def test_cymru_bulk_whois_invalid_bulk(args, expected_error, mocker):
"""
Given:
- Invalid given argument
When:
- Running the cymru-bulk-whois command
Then:
- Raise ValueError with the expected value
"""
from TeamCymru import cymru_bulk_whois_command
mocker.patch.object(demisto, 'getFilePath', return_value=None)
with pytest.raises(ValueError, match=expected_error):
cymru_bulk_whois_command(client, args)
def test_team_cymru_parse_file():
"""
Given:
- get_file_path_res, dict: Object contains file ID, path and name
When:
- Running the parse_file function
Then:
- Return list of the elements in the file without spaces
"""
from TeamCymru import parse_file
mock_arg = {
'id': 'test_id',
'path': 'test_data/test_ips_file.csv',
'name': 'test_ips_file.csv',
}
assert parse_file(mock_arg) == MOCK_IPS_LIST
def test_team_cymru_validate_ip_addresses():
"""
Given:
- Ips list
When:
- Running the validate_ip_addresses function
Then:
- Returns two list of invalid and valid IPv4 addresses
"""
from TeamCymru import validate_ip_addresses
invalid_ip_addresses, valid_ip_addresses = validate_ip_addresses(MOCK_IPS_LIST)
assert invalid_ip_addresses == MOCK_INVALID_IPS
assert valid_ip_addresses == MOCK_VALID_IPS
def test_team_cymru_parse_ip_result():
"""
Given:
- The function arguments: ip, ip_data
When:
- Running the parse_ip_result function
Then:
- Validate the returned value (commandResult) compared to the mock output
"""
from TeamCymru import parse_ip_result
from CommonServerPython import Common
test_data = load_test_data('test_data/test_ip_command.json')
ip_data = test_data.get('ip_command_response')
ip = "8.8.8.8"
mock_entry_context = test_data.get('mock_output')
mock_readable = test_data.get('mock_readable')
command_result = parse_ip_result(ip, ip_data)
assert command_result.outputs == mock_entry_context
assert command_result.readable_output == mock_readable
assert command_result.indicator
assert command_result.raw_response == ip_data
assert isinstance(command_result.indicator, Common.IP)
def test_empty_command_result(mocker):
"""
Given:
- Valid ip address, running the ip_command and cymru_bulk_whois_command functions
When:
- team_cymru_ip, team_cymru_bulk_whois functions return None
Then:
- Verify the functions doesn't fail and returns empty list
"""
from TeamCymru import ip_command, cymru_bulk_whois_command
mocker.patch("TeamCymru.team_cymru_ip", return_value=None)
result = ip_command(client, {'ip': '1.1.1.1'})
assert not result
mocker.patch("TeamCymru.team_cymru_bulk_whois", return_value=None)
mocker.patch.object(demisto, 'getFilePath', return_value=MOCK_FILE_RES)
result = cymru_bulk_whois_command(client, {'entry_id': MOCK_ENTRY_ID})
assert not result
def assert_results_ok():
assert demisto.results.call_count == 1
# call_args is tuple (args list, kwargs). we only need the first one
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'ok'
def test_test_command(mocker):
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'command', return_value='test-module')
return_value = load_test_data('test_data/test_ip_command.json').get('ip_command_response')
mocker.patch("TeamCymru.team_cymru_ip", return_value=return_value)
TeamCymru.main()
assert_results_ok()
|
[
"[email protected]"
] | |
04196cf405334cb323c1846ac46eeb6777d8eef4
|
8b634dc196162dff328d61bf6f8d4121dfb59bd4
|
/Queue/movingAverage.py
|
87c8bc033130a1c01772012764b36c6e3cf616d4
|
[] |
no_license
|
kqg13/LeetCode
|
84268b2146dc8323cb71f041b6664069baaa339c
|
1c584f4ca4cda7a3fb3148801a1ff4c73befed24
|
refs/heads/master
| 2023-08-05T09:46:28.103910 | 2023-07-29T21:02:26 | 2023-07-29T21:02:26 | 165,123,023 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 744 |
py
|
# Easy queue problem 346: Moving Average from Data Stream
# Given a stream of integers and a window size, calculate the moving average
# of all integers in the sliding window.
# Example:
# MovingAverage m = new MovingAverage(3);
# m.next(1) = 1
# m.next(10) = (1 + 10) / 2
# m.next(3) = (1 + 10 + 3) / 3
# m.next(5) = (10 + 3 + 5) / 3
from collections import deque
class MovingAverage:
def __init__(self, size):
"""
Initialize your data structure here.
:type size: int
"""
self.size = size
self.d = deque(maxlen=self.size)
def next(self, val):
"""
:type val: int
:rtype: float
"""
self.d.append(val)
return sum(self.d) / len(self.d)
|
[
"[email protected]"
] | |
6d1846fe2f0d777ba67f4e06e571587e1499e552
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/remove-all-ones-with-row-and-column-flips.py
|
c8c1b705b138b90f15933da251f8e4dc0c4d8bf7
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 |
MIT
| 2023-05-31T06:10:33 | 2018-10-11T17:38:35 |
C++
|
UTF-8
|
Python
| false | false | 302 |
py
|
# Time: O(m * n)
# Space: O(1)
class Solution(object):
def removeOnes(self, grid):
"""
:type grid: List[List[int]]
:rtype: bool
"""
return all(grid[i] == grid[0] or all(grid[i][j] != grid[0][j] for j in xrange(len(grid[0]))) for i in xrange(1, len(grid)))
|
[
"[email protected]"
] | |
71f373e412b3cdc65f292f31da7234801c241465
|
fb16f7024e0d93ecb07c122e633c1a957a8ab645
|
/django/project31/app31/views.py
|
879c85bbdf9271ed121d3b5472a6fc43df156373
|
[] |
no_license
|
rajeshanu/rajeshprograms
|
c23cf550e060040c7b336242a805e274d3305371
|
83f0fc9c4a8628bba590d1066ca93fd98137f0bc
|
refs/heads/master
| 2020-04-04T13:17:55.986558 | 2018-11-03T06:42:51 | 2018-11-03T06:42:51 | 155,956,676 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,634 |
py
|
from django.shortcuts import render
from .models import register
from app31.models import login
# Create your views here.
def showindex(request):
return render(request,"index.html")
def show(request):
name=request.POST.get("t1")
cno=request.POST.get("t2")
email=request.POST.get("t3")
password=request.POST.get("t4")
r=register(name=name,cno=cno,email=email,password=password)
r.save()
return render(request,"index.html",{"msg":"data saved"})
def login(request):
email=request.POST.get("t5")
password=request.POST.get("t6")
#p=register(email=email,password=password)
#p.save()
s=register.objects.get(email=email)
if s.email==email and s.password==password:
return render(request,"details.html",{"email":s.email})
else:
return render(request,"index.html",{"msg2":"invalid"})
def profile(request):
email=request.GET.get("email")
d=register.objects.get(email=email)
return render(request,"profile.html",{"email":email})
def feedback(request):
uemail=request.GET.get("email")
try:
fmail=request.session["email"]
if uemail==fmail:
return render(request,"details.html",{"msg1":" already given feedback"})
else:
return render(request,"feedback.html",{"email":uemail})
except:
return render(request,"feedback.html",{"email":uemail})
def savefeedback(request):
email=request.POST.get("id")
feed=request.POST.get("msg")
l=login(feedback=feed,uemail=email)
l.save()
request.session["email"]=email
return render(request,"details.html",{"msg":"feedback given "})
|
[
"[email protected]"
] | |
b8997bbbf7cc05c670173ee6e09e86e7926a283b
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/eliben_deep-learning-samples/deep-learning-samples-master/logistic-regression/timer.py
|
91f4c60c70cdc0ca403d542731a68b3ae57a0cdd
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 |
Python
|
UTF-8
|
Python
| false | false | 411 |
py
|
from __future__ import print_function
import sys
import time
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
if self.name:
print('[%s] ' % self.name, end='')
sys.stdout.flush()
def __exit__(self, type, value, traceback):
print('Elapsed: %s' % (time.time() - self.tstart))
|
[
"[email protected]"
] | |
a3fa619d0476dcab06ff8e9e1a778e21dcc927c2
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/DataQuality/DataQualityUtils/scripts/physval_make_web_display.py
|
00caa4b028f5b52fe6935076c5a26874739787fa
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,841 |
py
|
#!/usr/bin/env python
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
"""
Transate arbitrary root file into a han config file
@author: [email protected]
9 Oct 2008
Adapted for physics validation 14 May 2014
"""
from DQConfMakerBase.DQElements import *
from DQConfMakerBase.Helpers import IDHelper, make_thresholds
from DataQualityUtils.hanwriter import writeHanConfiguration
import ROOT
repeatalgorithm = DQAlgorithm(id='RepeatAlgorithm',
libname='libdqm_algorithms.so')
worst = DQAlgorithm(id='WorstCaseSummary',libname='libdqm_summaries.so')
### SOME THINGS YOU MIGHT WANT TO EDIT
# Edit this to change what algorithm is applied (AuxAlgName--xxx)
# or to disable printing the number of entries for each reference
#algorithmparameters = [DQAlgorithmParameter('AuxAlgName--Chi2Test_Prob', 1),
algorithmparameters = [DQAlgorithmParameter('AuxAlgName--Chi2Test_Chi2_per_NDF', 1),
DQAlgorithmParameter('RepeatAlgorithm--ResultsNEntries', 1)]
# Edit this to change thresholds
thresh = make_thresholds('Chi2_per_NDF', 1.0, 1.50, 'Chi2Thresholds')
def recurse(rdir, dqregion, ignorepath, refs=None, displaystring='Draw=PE', displaystring2D='Draw=COLZ', regex=None, startpath=None, hists=None):
import re
for key in rdir.GetListOfKeys():
cl = key.GetClassName(); rcl = ROOT.TClass.GetClass(cl)
if ' ' in key.GetName():
print 'WARNING: cannot have spaces in histogram names for han config; not including %s %s' % (cl, key.GetName())
continue
if rcl.InheritsFrom('TH1') or rcl.InheritsFrom('TGraph') or rcl.InheritsFrom('TEfficiency'):
if '/' in key.GetName():
print 'WARNING: cannot have slashes in histogram names, encountered in directory %s, histogram %s' % (rdir.GetPath(), key.GetName())
continue
if key.GetName() == 'summary':
print 'WARNING: cannot have histogram named summary, encountered in %s' % rdir.GetPath()
continue
fpath = rdir.GetPath().replace(ignorepath, '')
name = (fpath + '/' + key.GetName()).lstrip('/')
#print rdir.GetPath(), ignorepath, name
if hists:
match = False
for hist in hists:
if hist.match(name):
match = True
if not match: continue
elif regex:
if not regex.match(name): continue
dqpargs = { 'id' : ('' if fpath else 'top_level/') + name,
'algorithm': repeatalgorithm,
'inputdatasource': (startpath + '/' if startpath else '') + name,
'algorithmparameters': algorithmparameters,
#'thresholds': chi2thresh,
'thresholds': thresh,
}
if refs:
dqpargs['references'] = refs
dqpar = dqregion.newDQParameter( **dqpargs)
drawstrs = []
if not options.normalize: drawstrs.append('NoNorm')
if options.logy and (cl.startswith('TH1') or cl.startswith('TProfile')): drawstrs.append('LogY')
if options.logy and cl.startswith('TH2'): drawstrs.append('LogZ')
if cl.startswith('TH1'): drawstrs.append(displaystring)
if cl.startswith('TProfile'): drawstrs.append(displaystring)
if cl.startswith('TH2'): drawstrs.append(displaystring2D)
if options.scaleref != 1: drawstrs.append('ScaleRef=%f' % options.scaleref)
if options.ratio: drawstrs.append('RatioPad')
#if options.ratio: drawstrs.append('Ref2DSignif')
if options.ratio2D: drawstrs.append('Ref2DRatio')
if options.ratiorange is not None:
drawstrs.append('delta(%f)' % options.ratiorange)
drawstrs.append('DataName=%s' % options.title)
dqpar.addAnnotation('display', ','.join(drawstrs))
elif rcl.InheritsFrom('TDirectory'):
newregion = dqregion.newDQRegion( key.GetName(), algorithm=worst )
recurse(key.ReadObj(), newregion, ignorepath, refs, displaystring, displaystring2D, regex, startpath, hists)
def prune(dqregion):
"""
returns True if we should kill this node
False if we should not
"""
params = dqregion.getDQParameters()
if params == None:
params = []
subregions = dqregion.getSubRegions()
if subregions == None:
subregions = []
else:
subregions = subregions[:]
# kill subregions
for sr in subregions:
if sr == None:
continue
if prune(sr):
dqregion.delRelation('DQRegions', sr)
subregions = dqregion.getSubRegions()
if subregions == None:
subregions = []
if len(subregions) + len(params) == 0:
return True
else:
return False
def paramcount(dqregion):
params = dqregion.getDQParameters()
if params == None:
params = []
subregions = dqregion.getSubRegions()
if subregions == None:
subregions = []
return len(params) + sum([paramcount(region) for region in subregions])
def process(infname, confname, options, refs=None):
import re
f = ROOT.TFile.Open(infname, 'READ')
if not f.IsOpen():
print 'ERROR: cannot open %s' % infname
return
top_level = DQRegion(id='topRegion',algorithm=worst)
print 'Building tree...'
refpairs = refs.split(',')
try:
refdict = dict(_.split(':') for _ in refpairs)
except Exception, e:
print e
dqrs = [DQReference(reference='%s:same_name' % v, id=k)
for k, v in refdict.items()]
displaystring = options.drawopt
if options.refdrawopt:
displaystring += ',' + (','.join('DrawRef=%s' % _ for _ in options.refdrawopt.split(',')))
displaystring2D = options.drawopt2D
if options.drawrefopt2D:
displaystring2D += ',' + (','.join('DrawRef2D=%s' % _ for _ in options.drawrefopt2D.split(',')))
if options.startpath:
topindir = f.Get(options.startpath)
if not topindir:
raise ValueError("Path %s doesn't exist in input file" % options.startpath)
topindirname = f.GetPath() + options.startpath.strip('/')
startpath = options.startpath.strip('/')
else:
topindir = f
topindirname = f.GetPath()
startpath = None
hists = []
if options.histlistfile:
hists = [re.compile(line.rstrip('\n')) for line in open(options.histlistfile)]
if options.pathregex: print "histlistfile given, pathregex is ignored"
recurse(topindir, top_level, topindirname, dqrs, displaystring, displaystring2D,
re.compile(options.pathregex), startpath, hists)
print 'Pruning dead branches...'
prune(top_level)
pc = paramcount(top_level)
sublevel = top_level.getSubRegions()[:]
for x in sublevel:
top_level.delRelation('DQRegions', x)
print 'Writing output'
writeHanConfiguration( filename = confname , roots = sublevel)
return pc
def super_process(fname, options):
import shutil, os, sys, contextlib
import ROOT
han_is_found = (ROOT.gSystem.Load('libDataQualityInterfaces') != 1)
if not han_is_found:
print 'ERROR: unable to load offline DQMF; unable to proceed'
sys.exit(1)
bname = os.path.basename(fname)
hanconfig = None
hanhcfg = None
hanoutput = None
failed = False
@contextlib.contextmanager
def tmpdir():
import tempfile
td = tempfile.mkdtemp()
yield td
shutil.rmtree(td)
with tmpdir() as hantmpdir:
try:
print '====> Processing file %s' % (fname)
print '====> Generating han configuration file'
hantmpinput = os.path.join(hantmpdir, bname)
shutil.copyfile(fname, hantmpinput)
haninput = hantmpinput
hanconfig = os.path.join(hantmpdir, 'han.config')
rv = process(hantmpinput, hanconfig, options, options.reffile)
#shutil.copy(hanconfig, os.getcwd())
# bad hack. rv = number of histogram nodes
if rv == 0:
print 'No histograms to display; exiting with code 0'
sys.exit(0)
print '====> Compiling han configuration'
hanhcfg = os.path.join(hantmpdir, 'han.hcfg')
ROOT.dqi.HanConfig().AssembleAndSave( hanconfig, hanhcfg )
print '====> Executing han'
import resource
memlimit = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (memlimit[1], memlimit[1]))
hanoutput = haninput.rpartition('.')[0] + '_han.root'
rv = ROOT.dqi.HanApp().Analyze( hanhcfg, haninput, hanoutput )
if rv != 0:
raise Exception('failure in han')
print '====> Dumping web display output'
from DataQualityUtils import handimod
handimod.handiWithComparisons( options.title,
hanoutput,
options.outdir,
'', False, False,
'https://atlasdqm.web.cern.ch/atlasdqm/js/',
3 if options.jsRoot else 1)
## print '====> Copying to', hantargetdir
## hantargetfile = os.path.join(hantargetdir, 'out_han.root')
## if not os.access(hantargetdir, os.W_OK):
## try:
## os.makedirs(hantargetdir)
## except Exception, e:
## print 'Unable to create %s for some reason: %s' % (hantargetdir, e)
## raise Exception('Error during execute')
## shutil.copy2(hanoutput, hantargetfile)
## print '====> Cleaning up'
os.unlink(hanoutput)
except Exception, e:
print e
if 'canonical format' not in str(e):
failed = True
finally:
try:
if not prebuilt_hcfg:
os.unlink(hantmpinput)
os.unlink(hanconfig)
os.unlink(hanhcfg)
os.unlink(hanoutput)
except:
pass
return not failed
if __name__=="__main__":
import sys, optparse, shutil, os
os.environ['TDAQ_ERS_NO_SIGNAL_HANDLERS']='1'
parser = optparse.OptionParser(usage='usage: %prog [options] inputfile')
parser.add_option('--reffile', default=None,
help='Reference files to use. Must have same structure as inputfile. Format: tag1:reffile1.root,tag2:reffile2.root,...')
parser.add_option('--outdir', default='./handi',
help='Directory for web ouptut')
parser.add_option('--normalize', default=False, action='store_true',
help='Normalize reference histograms for display')
parser.add_option('--title', default='Summary',
help='Title for histograms being tested')
parser.add_option('--drawopt', default='Draw=PE',
help='Draw options for tested histograms (only use if you know what you are doing)')
parser.add_option('--refdrawopt',
help='ROOT Draw option for reference histograms (e.g. HIST)')
parser.add_option('--drawopt2D', default='Draw=COLZ',
help='Draw options for tested TH2 histograms (only use if you know what you are doing)')
parser.add_option('--drawrefopt2D', default=None,
help='Draw options for reference TH2 histograms. If nothing is specified, no 2D reference histograms are drawn. If you want to draw both test and reference histo, recommended settings are --drawopt2D="Draw=BOX" --drawrefopt2D="COLZ"')
parser.add_option('--logy', action='store_true',
help='Display on log Y scale')
parser.add_option('--pathregex', default='.*',
help='Specify regex to match histograms, e.g. "(Btag|Jets)"')
parser.add_option('--startpath', default=None,
help='Start from this subdirectory of the file')
parser.add_option('--histlistfile',
help='text file with a list of regexes/histogram names')
parser.add_option('--scaleref', type=float, default=1,
help='Scale references by this value')
parser.add_option('--Kolmogorov', default=False, action='store_true',
help='Run Kolmogorov test instead of Chi2 test')
parser.add_option('--ratio', default=False, action='store_true',
help='Draw histograms with ratio plots')
parser.add_option('--ratio2D', default=False, action='store_true',
help='Draw 2D histograms with ratio plots')
parser.add_option('--jsRoot',action='store_true', default=False,
help="make interactive jsRoot displays")
parser.add_option('--ratiorange', default=None, type=float,
help='set range for ratio plots (as delta to 1.0)')
options, args = parser.parse_args()
if not 1 == len(args):
parser.print_help()
sys.exit(1)
fname = args[0]
if options.Kolmogorov:
algorithmparameters = [DQAlgorithmParameter('AuxAlgName--KolmogorovTest_Prob', 1),
DQAlgorithmParameter('RepeatAlgorithm--ResultsNEntries', 1)]
thresh = make_thresholds('P', 0.05, 0.01, 'pThresholds')
rv = super_process(fname, options)
if rv == True:
sys.exit(0)
else:
sys.exit(1)
|
[
"[email protected]"
] | |
5583ff3947649c6f8c30dd9edb02cad147b9a2fe
|
60543d88cb19e7a264cf30622cb45ea039b243f8
|
/mnist/keras/mnist_train.py
|
07627f828c25aef98e42601ab9c313a26050aa83
|
[] |
no_license
|
andrewsmedina/kaggle
|
90b3a9bb6945b839d75ee4a9bb52d76b4b5c9bef
|
51bc2fad9e6b57e00ed50e36897870cdbead6372
|
refs/heads/master
| 2021-01-10T22:19:58.366222 | 2018-07-29T16:39:55 | 2018-07-29T16:39:55 | 69,138,999 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,973 |
py
|
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
model.save('my_model.h5')
|
[
"[email protected]"
] | |
0819c7e1251822f165d64e5c64fa8b790cb86716
|
bb4dc40ec0b62e5d2fc3ce1234013aebd4e648d5
|
/src/modules/customised/payroll_test_2/payroll_currupt/hr_payroll_arrear/hr_payroll_arrear.py
|
e84abac33ee604e572e2008b06091712ae5e3934
|
[] |
no_license
|
kakamble-aiims/work
|
ba6cbaf4c525ff7bc28d0a407f16c829d0c35983
|
cd392bf0e80d71c4742568e9c1dd5e5211da56a9
|
refs/heads/master
| 2022-04-02T14:45:58.515014 | 2019-12-31T14:00:51 | 2019-12-31T14:00:51 | 199,015,147 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 688 |
py
|
from trytond.pool import Pool, PoolMeta
__all__ = [
'HrSalaryRule',
]
class HrSalaryRule(metaclass=PoolMeta):
__name__ = 'hr.salary.rule'
def calculate_OTA(self, payslip, employee, contract):
amount = 0
pool = Pool()
ota = pool.get('hr.allowance.ota')
ot_allowance = ota.search([
('employee', '=', employee),
('salary_code', '=', employee.salary_code),
#('state', '=', 'approve')
])
if ot_allowance != []:
employee_record = ot_allowance[0]
amount = employee_record.amount
return amount
else:
return amount
|
[
"[email protected]"
] | |
6c87fe2f66b078f6b284dbe97e0a53bba2d75771
|
35c8d8762a87f9adf8964d1ec50c7ed69583d92e
|
/lecturer/migrations/0004_auto_20171213_2143.py
|
f91ccaab02e610d73c024ecf38f583e4da85ef7a
|
[
"MIT"
] |
permissive
|
VirginiaNdungu1/Grading-System
|
f15b9c1346986fc10336ab75938079393331330e
|
7f30a81cf2b1717c9f0a5a7f5814404c646eee9d
|
refs/heads/master
| 2021-05-06T06:34:39.719493 | 2017-12-24T14:30:04 | 2017-12-24T14:30:04 | 113,868,441 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 877 |
py
|
# Generated by Django 2.0 on 2017-12-13 18:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lecturer', '0003_auto_20171213_1916'),
]
operations = [
migrations.AddField(
model_name='profile',
name='roles',
field=models.CharField(blank=True, choices=[('STUDENT', 'Student'), ('TEACHER', 'Teacher'), ('DEAN', 'Dean')], max_length=30),
),
migrations.AlterField(
model_name='profile',
name='gender',
field=models.CharField(blank=True, choices=[('FEMALE', 'F'), ('MALE', 'M'), ('NONE', 'None')], default='NONE', max_length=30),
),
migrations.AlterField(
model_name='profile',
name='reg_date',
field=models.DateTimeField(auto_now_add=True),
),
]
|
[
"[email protected]"
] | |
99a93a979b8bff83507c7790ad34b390b3361f5b
|
3f4edccfbcada494673214bb4556dea5098a7af3
|
/dataset/binary_train.py
|
9321d1f73510ec2044f114ff440353e7bd602448
|
[] |
no_license
|
Below0/konlpy-nf-analyzer
|
a79b401c641586fd191f518286c2aa54f9f4cf25
|
e6abf1181efccdad15bfd1119320f70d709ab440
|
refs/heads/master
| 2023-01-21T11:29:29.053651 | 2020-11-29T09:54:09 | 2020-11-29T09:54:09 | 315,824,970 | 0 | 0 | null | 2020-11-25T11:34:16 | 2020-11-25T04:05:55 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 3,982 |
py
|
import codecs
import csv
import pandas as pd
import numpy as np
import os
import sys
import urllib.request
from urllib.parse import *
import requests
from bs4 import BeautifulSoup
#from kafka import KafkaProducer
import json
import re
import json
import datetime
from konlpy.tag import *
import konlpy
import re
from keras.preprocessing.text import Tokenizer
from keras_preprocessing.text import tokenizer_from_json
from keras.models import model_from_json
def below_threshold_len(max_len, nested_list):
cnt = 0
for s in nested_list:
if len(s) <= max_len:
cnt = cnt + 1
print('전체 샘플 중 길이가 %s 이하인 샘플의 비율: %s' % (max_len, (cnt / len(nested_list)) * 100))
dataset = pd.read_csv('./dataset3.csv')
train_data = dataset[:11000]
test_data = dataset[11000:]
stopwords = ['.', ',', '', '의', '가', '이', '은', '들', '는', '좀', '잘', '걍', '과', '도', '를', '으로', '자', '에', '와', '한', '하다']
# In[72]:
okt = Mecab()
hangul = re.compile('[^ ㄱ-ㅣ가-힣]+')
X_train = []
for sentence in train_data['content']:
temp_X = okt.morphs(sentence) # 토큰화
token_X = []
for word in temp_X:
temp = hangul.sub('', word)
if temp == '' or temp in stopwords:
continue
token_X.append(temp)
X_train.append(token_X)
X_test = []
for sentence in test_data['content']:
temp_X = okt.morphs(sentence) # 토큰화
token_X = []
for word in temp_X:
temp = hangul.sub('', word)
if temp == '' or temp in stopwords:
continue
token_X.append(temp)
X_test.append(token_X)
print('tokenizing complete!')
# In[73]:
max_words = 50000
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
tokenizer_json = tokenizer.to_json()
with open('tokenizer3.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(tokenizer_json, ensure_ascii=False))
y_train = []
y_test = []
for i in range(len(train_data['label'])):
if train_data['label'].iloc[i] == 1:
y_train.append([1])
elif train_data['label'].iloc[i] == -1:
y_train.append([0])
for i in range(len(test_data['label'])):
if test_data['label'].iloc[i] == 1:
y_test.append([1])
elif test_data['label'].iloc[i] == -1:
y_test.append([0])
y_train = np.array(y_train)
y_test = np.array(y_test)
print('리뷰의 최대 길이 :', max(len(l) for l in X_train))
print('리뷰의 평균 길이 :', sum(map(len, X_train)) / len(X_train))
below_threshold_len(200, X_train)
# In[77]:
from keras.layers import Embedding, Dense, LSTM
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import load_model
from keras.callbacks import EarlyStopping, ModelCheckpoint
max_len = 200 # 전체 데이터의 길이를 20로 맞춘다
X_train = pad_sequences(X_train, maxlen=max_len)
X_test = pad_sequences(X_test, maxlen=max_len)
model = Sequential()
model.add(Embedding(max_words, 128))
model.add(LSTM(128))
model.add(Dense(1, activation='sigmoid'))
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=7)
mc = ModelCheckpoint('best_model.h5', monitor='val_acc', mode='max', verbose=1, save_best_only=True)
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(X_train, y_train, epochs=15, callbacks=[es, mc], batch_size=1000, validation_split=0.1)
model_json = model.to_json()
with open("model3.json", "w") as json_file :
json_file.write(model_json)
# In[78]:
with open("model3.json", "r") as json_file:
loaded_model_json = json_file.read()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("best_model.h5")
loaded_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
print(model.evaluate(X_test, y_test)[1] * 100)
|
[
"[email protected]"
] | |
91759c4c00968d80511f1adeb92b86e2bbe92547
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2169/60615/257442.py
|
8f2708f19202be73ba8ddf22bf44ef88570c0474
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 606 |
py
|
time=int(input())
result=[]
opset=['+','-','*','/']
while time>0:
postfix=[i for i in input()] #231*+9-
num=[]
for item in postfix:
if item not in opset:
num.append(int(item))
else:
if item=='+':
temp=num[-2]+num[-1]
elif item=='-':
temp=num[-2]-num[-1]
elif item=='*':
temp=num[-2]*num[-1]
else:
temp=num[-2]/num[-1]
del num[-2:]
num.append(temp)
result.append(num[0])
time=time-1
for res in result:
print(res)
|
[
"[email protected]"
] | |
e88db4589a9f0746f6feef2ebb164eba6e4790e2
|
62974b03a14008f950db0c68d5962311a3401d3b
|
/medium/slidewindow/test_209_Minimum_Size_Subarray_Sum.py
|
0932a43778243b2896f0388d188dcf9c386f90e7
|
[] |
no_license
|
wuxu1019/leetcode_sophia
|
818f39e8324aaf2b588150b1f7edbc2897f64935
|
0e99f9a5226507706b3ee66fd04bae813755ef40
|
refs/heads/master
| 2018-10-13T13:58:29.762836 | 2018-07-24T04:20:23 | 2018-07-24T04:20:23 | 111,624,101 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,020 |
py
|
"""
Given an array of n positive integers and a positive integer s, find the minimal length of a contiguous subarray of which the sum ≥ s. If there isn't one, return 0 instead.
Example:
Input: [2,3,1,2,4,3], s = 7
Output: 2
Explanation: the subarray [4,3] has the minimal length under the problem constraint.
"""
class Solution(object):
def minSubArrayLen_bruteforce(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
ans = float('INF')
record = [nums[0]]
for i in range(1, len(nums)):
record.append(record[i-1] + nums[i])
for i in range(len(nums)):
for j in range(i, len(nums)):
sm = record[j] - record[i] + nums[i]
if sm >= s:
ans = min(ans, j - i + 1)
return ans if ans != float('INF') else 0
def minSubArrayLen_binarysearch(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
ans = float('INF')
record = [nums[0]]
for i in range(1, len(nums)):
record.append(record[i - 1] + nums[i])
for i in range(0, len(nums)):
if i == 0:
to_find = s
else:
to_find = s + record[i - 1]
j = bisect.bisect_left(record, to_find, i)
if j < len(nums):
ans = min(ans, j - i + 1)
return ans if ans != float('INF') else 0
def minSubArrayLen_onepass(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
j = 0
sm = 0
ans = float('INF')
for i in range(len(nums)):
sm += nums[i]
while sm >= s:
ans = min(ans, i - j + 1)
sm -= nums[j]
j += 1
return ans if ans != float('INF') else 0
|
[
"[email protected]"
] | |
4c75404055009fd4e4fe3fb26886920537e95665
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/3855.py
|
20efe8a1143906981815f06e93781546149a4733
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,522 |
py
|
import sys
class InputFileParser(object):
def __init__(self, filename):
self.test_cases = []
with open(filename, 'r') as in_f:
self.test_case_count = int(in_f.readline())
for i in xrange(1, self.test_case_count + 1):
tc = TestCase(i)
tc.last = int(in_f.readline())
self.test_cases.append(tc)
class TestCase(object):
def __init__(self, index):
self.index = index
self.last = 0
self.result = 0
def to_digits(number):
return map(lambda digit_str: int(digit_str), str(number))
def to_number(digits):
return int(''.join(map(str, digits)))
def drop_last(digits):
result = digits[:-1]
result[-1] = result[-1] - 1
return result
def is_tidy(digits):
previous = 0
for digit in digits:
if digit < previous:
return False
previous = digit
return True
def last_tidy(last):
left = to_digits(last)
right = []
while not is_tidy(left):
right.append(9)
left = drop_last(left)
return to_number(left + right)
if __name__ == '__main__':
if len(sys.argv) < 3:
exit('input and output file not specified!')
parser = InputFileParser(sys.argv[1])
with open(sys.argv[2], 'w') as out_f:
for tc in parser.test_cases:
tc.result = last_tidy(tc.last)
print 'Case #{0}: {1}'.format(tc.index, tc.result)
out_f.write('Case #{0}: {1}\n'.format(tc.index, tc.result))
|
[
"[email protected]"
] | |
8433440edf9c893209f3b5aff55141b655b9b5d9
|
ae5a285b64731886c736a076b9cb35b6f5e18266
|
/blogs/mainapp/urls.py
|
b25f20645ded6357044666bb29924b915c61d538
|
[] |
no_license
|
MichaelDc86/test_task_django
|
0c398af6c748532bcdc217f9dd23acdfd48ce0c5
|
04a58a29f6786037e310329a9b28a13efbfe2d41
|
refs/heads/master
| 2023-04-26T14:48:32.183376 | 2019-06-28T13:29:21 | 2019-06-28T13:29:21 | 193,847,278 | 0 | 0 | null | 2023-04-21T20:33:20 | 2019-06-26T06:57:04 |
JavaScript
|
UTF-8
|
Python
| false | false | 732 |
py
|
from django.contrib.auth.views import LogoutView
from django.urls import re_path
import mainapp.views as mainapp
app_name = 'mainapp'
urlpatterns = [
re_path(r'^$', mainapp.BlogListView.as_view(), name='blog_list'),
re_path(r'login/$', mainapp.BloggerLogin.as_view(), name='login'),
re_path(r'register/$', mainapp.BloggerRegister.as_view(), name='register'),
re_path(r'logout/$', LogoutView.as_view(), name='logout'),
re_path(r'post_create/$', mainapp.PostblogCreateView.as_view(), name='post_create'),
re_path(r'post_update/(?P<pk>\d+)/$', mainapp.PostBlogUpdateView.as_view(), name='post_update'),
re_path(r'post_delete/(?P<pk>\d+)/$', mainapp.PostBlogDeleteView.as_view(), name='post_delete'),
]
|
[
"[email protected]"
] | |
257e6320821547e22d65ca68d945fad58dc00cfe
|
276c86a451c4110ba0885dbe8509d46f23c21715
|
/esp32-micropython/examples/oled_thermometer.py
|
e94deebb1ab3bf42ebb3dc5fddf9718e04a3d88f
|
[] |
no_license
|
vtt-info/octopuslab
|
055c9bfdc1466a6e5acf90a3cd0db2826e72dee7
|
3d20933c05bae3eec4d0c033f228bde369e46e07
|
refs/heads/master
| 2022-07-27T14:19:16.386241 | 2020-05-16T13:50:21 | 2020-05-16T13:50:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 456 |
py
|
# octopusLAB example - 2019
# simple example: dallas thermometer and oled display
from time import sleep
from util.octopus import oled_init
from util.iot import Thermometer
from util.display_segment import threeDigits
print("init > ")
ts = Thermometer()
oled = oled_init()
print("start > ")
while True:
temp = ts.get_temp()
print(temp)
temp10 = int(temp * 10)
threeDigits(oled, temp10, True, True)
sleep(1)
|
[
"[email protected]"
] | |
b79572bf6408b286a553a38cf007ee7d293068da
|
b2db386a35e167dd67d6de90d95c06d5c2ed91cd
|
/403_FrogJump.py
|
fcab3104b67560cffd6596e6451af41ef135978a
|
[] |
no_license
|
rohitmungre/leetcode
|
9edb1b8b0cd714eb1a5e1fa847f2e17c455fd624
|
d49836b2b46a980f073bb9a6f2e47c4a903e48ac
|
refs/heads/master
| 2020-08-07T16:55:38.699188 | 2020-03-12T11:00:13 | 2020-03-12T11:00:13 | 213,531,119 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,977 |
py
|
import copy
class Solution(object):
def canCross(self, stones):
"""
:type stones: List[int]
:rtype: bool
"""
if len(stones) == 0:
return False
return self.cc_dp(stones, 0, [], {})
def cc_dp(self, stones, idx, steps, memo):
if idx == len(stones)-1:
return True
if idx == 0:
if stones[1] != 1:
return False
k = 0
csteps = copy.copy(steps)
csteps.append(1)
return self.cc_dp(stones, 1 , csteps, memo)
else:
k = steps[-1]
mstr = str(idx) + '~' + str(k)
if mstr in memo:
return memo[mstr]
stm1 = stones[idx] + k-1
st0 = stones[idx] + k
stp1 = stones[idx] + k+1
if stm1 not in stones and stp1 not in stones and st0 not in stones:
memo[mstr] = False
return False
if stp1 not in stones and st0 not in stones and k==1:
memo[mstr] = False
return False
if stp1 in stones:
csteps = copy.copy(steps)
csteps.append(k+1)
rp = self.cc_dp(stones, stones.index(stp1), csteps, memo)
if rp:
memo[mstr] = True
return True
if stm1 in stones and k !=1 :
csteps = copy.copy(steps)
csteps.append(k-1)
rm = self.cc_dp(stones, stones.index(stm1), csteps, memo)
if rm:
memo[mstr] = True
return True
if st0 in stones and k !=0 :
csteps = copy.copy(steps)
csteps.append(k)
r0 = self.cc_dp(stones, stones.index(st0), csteps, memo)
if r0:
memo[mstr] = True
return True
memo[mstr] = False
return memo[mstr]
|
[
"[email protected]"
] | |
db2c9ab6255eab8ada3b3adeee266a9885a90be2
|
31a928cff4960236923b6bc3b68e34bb2f46f470
|
/Speculator/speculator/utils/stats.py
|
b5694a5daf9476ee9825d50ef9478a2dc65ded64
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
webclinic017/ml_monorepo
|
707df2afd2f986eb0721d26430e6135c917817c6
|
945f0a83d6b94282c547bb6f4805f3381ad9c16a
|
refs/heads/master
| 2021-10-19T21:02:53.322944 | 2019-02-19T20:58:51 | 2019-02-23T20:06:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 443 |
py
|
"""
Various math functions used throughout Speculator
"""
def avg(vals, count=None):
""" Returns the average value
Args:
vals: List of numbers to calculate average from.
count: Int of total count that vals was part of.
Returns:
Float average value throughout a count.
"""
sum = 0
for v in vals:
sum += v
if count is None:
count = len(vals)
return float(sum) / count
|
[
"[email protected]"
] | |
209b0f1761fc67877e597f756011b8176fe26c31
|
853d4cec42071b76a80be38c58ffe0fbf9b9dc34
|
/venv/Lib/site-packages/pandas/core/missing.py
|
84bd72cb184263e97bf91d22e1eb5c4af5de2c80
|
[] |
no_license
|
msainTesting/TwitterAnalysis
|
5e1646dbf40badf887a86e125ef30a9edaa622a4
|
b1204346508ba3e3922a52380ead5a8f7079726b
|
refs/heads/main
| 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 20,162 |
py
|
"""
Routines for filling missing data.
"""
import numpy as np
from pandas._libs import algos, lib
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.common import (
ensure_float64,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer,
is_integer_dtype,
is_numeric_v_string_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import isna
def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
dtype, values_to_mask = infer_dtype_from_array(values_to_mask)
try:
values_to_mask = np.array(values_to_mask, dtype=dtype)
except Exception:
values_to_mask = np.array(values_to_mask, dtype=object)
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
mask = None
for x in nonna:
if mask is None:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask = False
else:
mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if is_scalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask |= False
else:
mask |= arr == x
if na_mask.any():
if mask is None:
mask = isna(arr)
else:
mask |= isna(arr)
# GH 21977
if mask is None:
mask = np.zeros(arr.shape, dtype=bool)
return mask
def clean_fill_method(method, allow_nearest=False):
# asfreq is compat for resampling
if method in [None, "asfreq"]:
return None
if isinstance(method, str):
method = method.lower()
if method == "ffill":
method = "pad"
elif method == "bfill":
method = "backfill"
valid_methods = ["pad", "backfill"]
expecting = "pad (ffill) or backfill (bfill)"
if allow_nearest:
valid_methods.append("nearest")
expecting = "pad (ffill), backfill (bfill) or nearest"
if method not in valid_methods:
msg = "Invalid fill method. Expecting {expecting}. Got {method}".format(
expecting=expecting, method=method
)
raise ValueError(msg)
return method
def clean_interp_method(method, **kwargs):
order = kwargs.get("order")
valid = [
"linear",
"time",
"index",
"values",
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
"polynomial",
"krogh",
"piecewise_polynomial",
"pchip",
"akima",
"spline",
"from_derivatives",
]
if method in ("spline", "polynomial") and order is None:
raise ValueError("You must specify the order of the spline or " "polynomial.")
if method not in valid:
raise ValueError(
"method must be one of {valid}. Got '{method}' "
"instead.".format(valid=valid, method=method)
)
return method
def interpolate_1d(
xvalues,
yvalues,
method="linear",
limit=None,
limit_direction="forward",
limit_area=None,
fill_value=None,
bounds_error=False,
order=None,
**kwargs
):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argument.
"""
# Treat the original, non-scipy methods first.
invalid = isna(yvalues)
valid = ~invalid
if not valid.any():
# have to call np.asarray(xvalues) since xvalues could be an Index
# which can't be mutated
result = np.empty_like(np.asarray(xvalues), dtype=np.float64)
result.fill(np.nan)
return result
if valid.all():
return yvalues
if method == "time":
if not getattr(xvalues, "is_all_dates", None):
# if not issubclass(xvalues.dtype.type, np.datetime64):
raise ValueError(
"time-weighted interpolation only works "
"on Series or DataFrames with a "
"DatetimeIndex"
)
method = "values"
valid_limit_directions = ["forward", "backward", "both"]
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
msg = "Invalid limit_direction: expecting one of {valid!r}, " "got {invalid!r}."
raise ValueError(
msg.format(valid=valid_limit_directions, invalid=limit_direction)
)
if limit_area is not None:
valid_limit_areas = ["inside", "outside"]
limit_area = limit_area.lower()
if limit_area not in valid_limit_areas:
raise ValueError(
"Invalid limit_area: expecting one of {}, got "
"{}.".format(valid_limit_areas, limit_area)
)
# default limit is unlimited GH #16282
if limit is None:
# limit = len(xvalues)
pass
elif not is_integer(limit):
raise ValueError("Limit must be an integer")
elif limit < 1:
raise ValueError("Limit must be greater than 0")
from pandas import Series
ys = Series(yvalues)
# These are sets of index pointers to invalid values... i.e. {0, 1, etc...
all_nans = set(np.flatnonzero(invalid))
start_nans = set(range(ys.first_valid_index()))
end_nans = set(range(1 + ys.last_valid_index(), len(valid)))
mid_nans = all_nans - start_nans - end_nans
# Like the sets above, preserve_nans contains indices of invalid values,
# but in this case, it is the final set of indices that need to be
# preserved as NaN after the interpolation.
# For example if limit_direction='forward' then preserve_nans will
# contain indices of NaNs at the beginning of the series, and NaNs that
# are more than'limit' away from the prior non-NaN.
# set preserve_nans based on direction using _interp_limit
if limit_direction == "forward":
preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0))
elif limit_direction == "backward":
preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit))
else:
# both directions... just use _interp_limit
preserve_nans = set(_interp_limit(invalid, limit, limit))
# if limit_area is set, add either mid or outside indices
# to preserve_nans GH #16284
if limit_area == "inside":
# preserve NaNs on the outside
preserve_nans |= start_nans | end_nans
elif limit_area == "outside":
# preserve NaNs on the inside
preserve_nans |= mid_nans
# sort preserve_nans and covert to list
preserve_nans = sorted(preserve_nans)
xvalues = getattr(xvalues, "values", xvalues)
yvalues = getattr(yvalues, "values", yvalues)
result = yvalues.copy()
if method in ["linear", "time", "index", "values"]:
if method in ("values", "index"):
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if needs_i8_conversion(inds.dtype.type):
inds = inds.view(np.int64)
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid])
result[preserve_nans] = np.nan
return result
sp_methods = [
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
"krogh",
"spline",
"polynomial",
"from_derivatives",
"piecewise_polynomial",
"pchip",
"akima",
]
if method in sp_methods:
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(np.int64)
result[invalid] = _interpolate_scipy_wrapper(
inds[valid],
yvalues[valid],
inds[invalid],
method=method,
fill_value=fill_value,
bounds_error=bounds_error,
order=order,
**kwargs
)
result[preserve_nans] = np.nan
return result
def _interpolate_scipy_wrapper(
x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs
):
"""
Passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method.
"""
extra = "{method} interpolation requires SciPy.".format(method=method)
import_optional_dependency("scipy", extra=extra)
from scipy import interpolate
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
"barycentric": interpolate.barycentric_interpolate,
"krogh": interpolate.krogh_interpolate,
"from_derivatives": _from_derivatives,
"piecewise_polynomial": _from_derivatives,
}
if getattr(x, "is_all_dates", False):
# GH 5975, scipy.interp1d can't hande datetime64s
x, new_x = x._values.astype("i8"), new_x.astype("i8")
if method == "pchip":
try:
alt_methods["pchip"] = interpolate.pchip_interpolate
except AttributeError:
raise ImportError(
"Your version of Scipy does not support " "PCHIP interpolation."
)
elif method == "akima":
alt_methods["akima"] = _akima_interpolate
interp1d_methods = [
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"polynomial",
]
if method in interp1d_methods:
if method == "polynomial":
method = order
terp = interpolate.interp1d(
x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error
)
new_y = terp(new_x)
elif method == "spline":
# GH #10633, #24014
if isna(order) or (order <= 0):
raise ValueError(
"order needs to be specified and greater than 0; "
"got order: {}".format(order)
)
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x, **kwargs)
return new_y
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
"""
Convenience function for interpolate.BPoly.from_derivatives.
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
order: None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This numberincludes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R.
"""
from scipy import interpolate
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate)
return m(x)
def _akima_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
scipy.interpolate.Akima1DInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
from scipy import interpolate
P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif interpolate._isscalar(der):
return P(x, der=der)
else:
return [P(x, nu) for nu in der]
def interpolate_2d(
values, method="pad", axis=0, limit=None, fill_value=None, dtype=None
):
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with " "axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = clean_fill_method(method)
if method == "pad":
values = transf(pad_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(
backfill_2d(transf(values), limit=limit, mask=mask, dtype=dtype)
)
# reshape back
if ndim == 1:
values = values[0]
return values
def _cast_values_for_fillna(values, dtype):
"""
Cast values to a dtype that algos.pad and algos.backfill can handle.
"""
# TODO: for int-dtypes we make a copy, but for everything else this
# alters the values in-place. Is this intentional?
if (
is_datetime64_dtype(dtype)
or is_datetime64tz_dtype(dtype)
or is_timedelta64_dtype(dtype)
):
values = values.view(np.int64)
elif is_integer_dtype(values):
# NB: this check needs to come after the datetime64 check above
values = ensure_float64(values)
return values
def _fillna_prep(values, mask=None, dtype=None):
# boilerplate for pad_1d, backfill_1d, pad_2d, backfill_2d
if dtype is None:
dtype = values.dtype
if mask is None:
# This needs to occur before datetime/timedeltas are cast to int64
mask = isna(values)
values = _cast_values_for_fillna(values, dtype)
mask = mask.view(np.uint8)
return values, mask
def pad_1d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
algos.pad_inplace(values, mask, limit=limit)
return values
def backfill_1d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
algos.backfill_inplace(values, mask, limit=limit)
return values
def pad_2d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
if np.all(values.shape):
algos.pad_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def backfill_2d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
if np.all(values.shape):
algos.backfill_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values
_fill_methods = {"pad": pad_1d, "backfill": backfill_1d}
def get_fill_func(method):
method = clean_fill_method(method)
return _fill_methods[method]
def clean_reindex_fill_method(method):
return clean_fill_method(method, allow_nearest=True)
def _interp_limit(invalid, fw_limit, bw_limit):
"""
Get indexers of values that won't be filled
because they exceed the limits.
Parameters
----------
invalid : boolean ndarray
fw_limit : int or None
forward limit to index
bw_limit : int or None
backward limit to index
Returns
-------
set of indexers
Notes
-----
This is equivalent to the more readable, but slower
.. code-block:: python
def _interp_limit(invalid, fw_limit, bw_limit):
for x in np.where(invalid)[0]:
if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
yield x
"""
# handle forward first; the backward direction is the same except
# 1. operate on the reversed array
# 2. subtract the returned indices from N - 1
N = len(invalid)
f_idx = set()
b_idx = set()
def inner(invalid, limit):
limit = min(limit, N)
windowed = _rolling_window(invalid, limit + 1).all(1)
idx = set(np.where(windowed)[0] + limit) | set(
np.where((~invalid[: limit + 1]).cumsum() == 0)[0]
)
return idx
if fw_limit is not None:
if fw_limit == 0:
f_idx = set(np.where(invalid)[0])
else:
f_idx = inner(invalid, fw_limit)
if bw_limit is not None:
if bw_limit == 0:
# then we don't even need to care about backwards
# just use forwards
return f_idx
else:
b_idx = list(inner(invalid[::-1], bw_limit))
b_idx = set(N - 1 - np.asarray(b_idx))
if fw_limit == 0:
return b_idx
return f_idx & b_idx
def _rolling_window(a, window):
"""
[True, True, False, True, False], 2 ->
[
[True, True],
[True, False],
[False, True],
[True, False],
]
"""
# https://stackoverflow.com/a/6811241
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
|
[
"[email protected]"
] | |
15e688993d21c158b79180b3621f8f9c92e788ca
|
f5c3841a08c3faa1818d3ee210c8b9921dc9499d
|
/sql/selection.py
|
158e8c8c265a604e3ee97f88126eb69744e3da20
|
[] |
no_license
|
villancikos/realpython-book2
|
a4e74b51fe1d3a8e5af206c2938ff4966ef00df6
|
6c9a2ef714531f1163f3c78c80fad335661dacf2
|
refs/heads/master
| 2016-09-06T10:06:49.227106 | 2014-09-22T18:56:58 | 2014-09-22T18:56:58 | 23,493,659 | 1 | 1 | null | 2014-09-19T23:35:40 | 2014-08-30T14:44:52 |
Python
|
UTF-8
|
Python
| false | false | 304 |
py
|
# SELECT statement
import sqlite3
with sqlite3.connect('new.db') as connection:
c = connection.cursor()
#for row in c.execute("SELECT firstname, lastname from employees"):
# print row
c.execute("SELECT firstname, lastname from employees")
rows = c.fetchall()
for r in rows:
print r[0],r[1]
|
[
"[email protected]"
] | |
3e8ef970d06299d87bd03604d228bc22a57da735
|
283bbf2ce575ea72010e9823907285b08d20fce4
|
/breathecode/tests/mocks/screenshotmachine/requests_mock.py
|
bc661c61e3d798348dce1188edecdeee67e37d3f
|
[] |
no_license
|
AnMora/apiv2
|
c084ffcb4ff5b7a0a01dac8fca26f4f4c37aad97
|
fa3b3f0ce4a069facdecd18e133c7b4222a0004a
|
refs/heads/master
| 2023-05-19T23:00:34.257230 | 2021-06-08T21:17:56 | 2021-06-08T21:17:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 563 |
py
|
"""
Requests mock
"""
class ResponseMock():
"""Simutate Response to be used by mocks"""
status_code = None
data = None
content = None
def __init__(self, status_code=200, data=''):
self.status_code = status_code
if isinstance(data, str):
self.content = data
else:
self.data = data
def json(self) -> dict:
"""Convert Response to JSON"""
return self.data
def get_mock(url: str, stream=False):
"""Requests get mock"""
return ResponseMock(data='ok', status_code=200)
|
[
"[email protected]"
] | |
cc7e44a17c3f456e0f8fa5d7f81a928fcb8c2e17
|
d680f545562589a34224f3a422c00c1697c84c5d
|
/mutasi_aset_2021/perkim/pm_perkim_ke_dinas_koperasi/pm_perkim_ke_dinas_koperasi_insert.py
|
43c13c1e420a8f0068401f47303524a0b009cc78
|
[
"BSD-2-Clause"
] |
permissive
|
muntaza/catatan_openaset_balangan
|
9362d0beb4de6e0abc3f4ec5ebb63a5aaff66294
|
01d1eb79ea8f203d231956d74b88e39789d54429
|
refs/heads/master
| 2022-02-18T10:27:21.494091 | 2022-02-14T10:19:50 | 2022-02-14T10:19:50 | 233,332,577 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,923 |
py
|
from peralatanmesin.models import PeralatanMesin
from gedungbangunan.clone_object import clone_object
def pm_perkim_ke_dinas_koperasi():
clone_object(PeralatanMesin.objects.get(pk=28772))
clone_object(PeralatanMesin.objects.get(pk=28773))
clone_object(PeralatanMesin.objects.get(pk=28774))
clone_object(PeralatanMesin.objects.get(pk=28776))
clone_object(PeralatanMesin.objects.get(pk=28777))
clone_object(PeralatanMesin.objects.get(pk=28778))
clone_object(PeralatanMesin.objects.get(pk=28779))
clone_object(PeralatanMesin.objects.get(pk=28781))
clone_object(PeralatanMesin.objects.get(pk=28782))
clone_object(PeralatanMesin.objects.get(pk=28783))
clone_object(PeralatanMesin.objects.get(pk=28784))
clone_object(PeralatanMesin.objects.get(pk=28830))
clone_object(PeralatanMesin.objects.get(pk=28832))
clone_object(PeralatanMesin.objects.get(pk=28833))
clone_object(PeralatanMesin.objects.get(pk=28834))
clone_object(PeralatanMesin.objects.get(pk=28838))
clone_object(PeralatanMesin.objects.get(pk=28841))
clone_object(PeralatanMesin.objects.get(pk=28843))
clone_object(PeralatanMesin.objects.get(pk=28844))
clone_object(PeralatanMesin.objects.get(pk=28845))
clone_object(PeralatanMesin.objects.get(pk=28846))
clone_object(PeralatanMesin.objects.get(pk=28847))
clone_object(PeralatanMesin.objects.get(pk=28848))
clone_object(PeralatanMesin.objects.get(pk=28849))
clone_object(PeralatanMesin.objects.get(pk=52028))
clone_object(PeralatanMesin.objects.get(pk=71070))
clone_object(PeralatanMesin.objects.get(pk=88871))
clone_object(PeralatanMesin.objects.get(pk=88873))
clone_object(PeralatanMesin.objects.get(pk=88874))
clone_object(PeralatanMesin.objects.get(pk=88875))
clone_object(PeralatanMesin.objects.get(pk=88882))
clone_object(PeralatanMesin.objects.get(pk=88883))
clone_object(PeralatanMesin.objects.get(pk=88884))
clone_object(PeralatanMesin.objects.get(pk=88891))
clone_object(PeralatanMesin.objects.get(pk=88892))
clone_object(PeralatanMesin.objects.get(pk=88893))
clone_object(PeralatanMesin.objects.get(pk=88898))
clone_object(PeralatanMesin.objects.get(pk=88908))
clone_object(PeralatanMesin.objects.get(pk=88909))
clone_object(PeralatanMesin.objects.get(pk=88910))
clone_object(PeralatanMesin.objects.get(pk=88911))
clone_object(PeralatanMesin.objects.get(pk=88912))
clone_object(PeralatanMesin.objects.get(pk=88913))
clone_object(PeralatanMesin.objects.get(pk=88914))
clone_object(PeralatanMesin.objects.get(pk=88915))
clone_object(PeralatanMesin.objects.get(pk=89759))
clone_object(PeralatanMesin.objects.get(pk=89760))
clone_object(PeralatanMesin.objects.get(pk=89761))
clone_object(PeralatanMesin.objects.get(pk=89762))
clone_object(PeralatanMesin.objects.get(pk=89763))
clone_object(PeralatanMesin.objects.get(pk=89764))
clone_object(PeralatanMesin.objects.get(pk=89955))
clone_object(PeralatanMesin.objects.get(pk=89956))
clone_object(PeralatanMesin.objects.get(pk=99266))
clone_object(PeralatanMesin.objects.get(pk=99385))
clone_object(PeralatanMesin.objects.get(pk=99386))
clone_object(PeralatanMesin.objects.get(pk=99387))
clone_object(PeralatanMesin.objects.get(pk=99388))
clone_object(PeralatanMesin.objects.get(pk=99389))
clone_object(PeralatanMesin.objects.get(pk=99393))
clone_object(PeralatanMesin.objects.get(pk=99394))
clone_object(PeralatanMesin.objects.get(pk=99395))
clone_object(PeralatanMesin.objects.get(pk=99396))
clone_object(PeralatanMesin.objects.get(pk=99410))
clone_object(PeralatanMesin.objects.get(pk=99411))
clone_object(PeralatanMesin.objects.get(pk=99412))
clone_object(PeralatanMesin.objects.get(pk=99413))
clone_object(PeralatanMesin.objects.get(pk=99414))
clone_object(PeralatanMesin.objects.get(pk=99415))
clone_object(PeralatanMesin.objects.get(pk=99416))
clone_object(PeralatanMesin.objects.get(pk=99417))
clone_object(PeralatanMesin.objects.get(pk=99418))
clone_object(PeralatanMesin.objects.get(pk=99419))
clone_object(PeralatanMesin.objects.get(pk=99433))
clone_object(PeralatanMesin.objects.get(pk=99436))
clone_object(PeralatanMesin.objects.get(pk=99437))
clone_object(PeralatanMesin.objects.get(pk=99438))
clone_object(PeralatanMesin.objects.get(pk=99466))
clone_object(PeralatanMesin.objects.get(pk=99467))
clone_object(PeralatanMesin.objects.get(pk=99468))
clone_object(PeralatanMesin.objects.get(pk=99469))
clone_object(PeralatanMesin.objects.get(pk=99470))
clone_object(PeralatanMesin.objects.get(pk=99471))
clone_object(PeralatanMesin.objects.get(pk=99472))
clone_object(PeralatanMesin.objects.get(pk=99473))
clone_object(PeralatanMesin.objects.get(pk=99474))
clone_object(PeralatanMesin.objects.get(pk=99475))
|
[
"[email protected]"
] | |
84936bffedb2e3603a2b4acff728d7de87b37826
|
cf7c928d6066da1ce15d2793dcf04315dda9b9ed
|
/Jungol/Lv1_LCoder_Python/py20_변수와입력/Main_JO_725_변수와입력_자가진단6.py
|
bf0bc102b6ac2ee3d0fcceed1c228fdd9570335d
|
[] |
no_license
|
refresh6724/APS
|
a261b3da8f53de7ff5ed687f21bb1392046c98e5
|
945e0af114033d05d571011e9dbf18f2e9375166
|
refs/heads/master
| 2022-02-01T23:31:42.679631 | 2021-12-31T14:16:04 | 2021-12-31T14:16:04 | 251,617,280 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 371 |
py
|
print("Number 1?")
Number1 = int(input())
print("Number 2?")
Number2 = int(input())
mul = Number1*Number2
div = float(Number1)/Number2
print("%d * %d = %d" % (Number1, Number2, Number1 * Number2))
# 왜 다를까
# print("%d / %d = %f" % (Number1, Number2, Number1 / Number2))
# 79 / 74 = 1.067568
print(f"{Number1} / {Number2} = {div}")
# 79 / 74 = 1.0675675675675675
|
[
"[email protected]"
] | |
c8aa8d0b005670120daa41f4f2b57539208a43e2
|
a5a386c05ea962cd34f27214130d4117e650f1e7
|
/awards/serializer.py
|
ee55415fb39d51df5732b1ccd57ce5a92ccf01d0
|
[
"MIT"
] |
permissive
|
iankabugi/Awards
|
eccba8866b16830ced36227c657c773a1bb5088b
|
3d0e58faf679b71cf4e588e59e8c705660830d2f
|
refs/heads/master
| 2021-09-09T09:33:12.535405 | 2019-06-25T22:43:32 | 2019-06-25T22:43:32 | 175,820,927 | 1 | 0 |
MIT
| 2021-09-08T00:54:20 | 2019-03-15T13:00:27 |
Python
|
UTF-8
|
Python
| false | false | 370 |
py
|
from rest_framework import serializers
from .models import Profile, Project
class ProfSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ('bio', 'profile_pic', 'user')
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('title', 'description', 'landing_page')
|
[
"[email protected]"
] | |
b0b14e8a0b6dfcb29da3c2d478cb88bddfc84212
|
68c1cb7e2e7b7bb4174951c817a36b16cf1e9f83
|
/pytype/tools/arg_parser.py
|
c3fe1d9b319c84b1f507a25a020a0c5d6b218caa
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
priyansh19/pytype
|
f28f439943859f61e3ce5ff51d8a56a925ae5cb8
|
44b1f6f7cddccb326abac4c21b4f26688369764e
|
refs/heads/master
| 2020-06-19T07:11:11.508597 | 2019-07-11T18:34:08 | 2019-07-11T18:34:08 | 196,610,656 | 2 | 0 |
NOASSERTION
| 2019-07-12T16:23:13 | 2019-07-12T16:23:13 | null |
UTF-8
|
Python
| false | false | 2,790 |
py
|
"""Argument parsing for tools that pass args on to pytype_single."""
import argparse
from pytype import config as pytype_config
class ParserWrapper(object):
"""Wrapper that adds arguments to a parser while recording them."""
def __init__(self, parser, actions=None):
self.parser = parser
self.actions = {} if actions is None else actions
def add_argument(self, *args, **kwargs):
try:
action = self.parser.add_argument(*args, **kwargs)
except argparse.ArgumentError:
# We might want to mask some pytype-single options.
pass
else:
self.actions[action.dest] = action
def add_argument_group(self, *args, **kwargs):
group = self.parser.add_argument_group(*args, **kwargs)
wrapped_group = self.__class__(group, actions=self.actions)
return wrapped_group
def string_to_bool(s):
return s == 'True' if s in ('True', 'False') else s
def convert_string(s):
s = s.replace('\n', '')
try:
return int(s)
except ValueError:
return string_to_bool(s)
class Parser(object):
"""Parser that integrates tool and pytype-single args."""
def __init__(self, parser, pytype_single_args):
"""Initialize a parser.
Args:
parser: An argparse.ArgumentParser or compatible object
pytype_single_args: Iterable of args that will be passed to pytype_single
"""
self.parser = parser
self.pytype_single_args = pytype_single_args
def create_initial_args(self, keys):
"""Creates the initial set of args.
Args:
keys: A list of keys to create args from
Returns:
An argparse.Namespace.
"""
return argparse.Namespace(**{k: None for k in keys})
def parse_args(self, argv):
"""Parses argv.
Args:
argv: sys.argv[1:]
Returns:
An argparse.Namespace.
"""
args = self.create_initial_args(self.pytype_single_args)
self.parser.parse_args(argv, args)
self.postprocess(args)
return args
def postprocess(self, args, from_strings=False):
"""Postprocesses the subset of pytype_single_args that appear in args.
Args:
args: an argparse.Namespace.
from_strings: Whether the args are all strings. If so, we'll do our best
to convert them to the right types.
"""
names = set()
for k in self.pytype_single_args:
if hasattr(args, k):
names.add(k)
if from_strings:
setattr(args, k, convert_string(getattr(args, k)))
pytype_config.Postprocessor(names, args).process()
def get_pytype_kwargs(self, args):
"""Return a set of kwargs to pass to pytype.config.Options.
Args:
args: an argparse.Namespace.
Returns:
A dict of kwargs with pytype_single args as keys.
"""
return {k: getattr(args, k) for k in self.pytype_single_args}
|
[
"[email protected]"
] | |
c81b79fe6529cc8728ee50a7f74f7af8d732f321
|
a8d1abca67457263e6e2d85e1005bbef2ef1b059
|
/app/supplier/__init__.py
|
a260ee8c0aa173ca38726f57095177bdfa29ec35
|
[] |
no_license
|
kid-kodi/manager
|
a46d706b11e411943e86bcfbcdc19783826a73a8
|
cb1f1233a795fce7148bcbf70989815687248db0
|
refs/heads/master
| 2022-12-05T04:29:24.248769 | 2018-12-28T11:00:58 | 2018-12-28T11:00:58 | 163,323,042 | 0 | 0 | null | 2022-11-22T02:55:02 | 2018-12-27T18:01:49 |
Python
|
UTF-8
|
Python
| false | false | 95 |
py
|
from flask import Blueprint
bp = Blueprint('supplier', __name__)
from . import routes, forms
|
[
"[email protected]"
] | |
38e12f8b9db38ea3e3eae5f141e881819ae484c8
|
6320fef2ea7376c2b35f97f1a5af004e90f09098
|
/1-2주차 실습(복습)/venv/Lib/site-packages/pygame/_numpysndarray.py
|
5daa10746bf656d4595f81604d3399fbe65bd257
|
[] |
no_license
|
Dplo1514/ploaistudy
|
7aa08d7f71653748a9e32dcc09ee8f6cec0aaed9
|
e35e42b1e5f0c90cc1e2a59993a1ef73d8872d0c
|
refs/heads/master
| 2023-09-03T00:45:55.601651 | 2021-10-24T12:19:38 | 2021-10-24T12:19:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,660 |
py
|
## pygame - Python Game Library
## Copyright (C) 2008 Marcus von Appen
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Marcus von Appen
## [email protected]
"""pygame module for accessing sound sample data using numpy
Functions to convert between numpy arrays and Sound objects. This module
will only be available when pygame can use the external numpy package.
Sound data is made of thousands of samples per second, and each sample
is the amplitude of the wave at a particular moment in time. For
example, in 22-kHz format, element number 5 of the array is the
amplitude of the wave after 5/22000 seconds.
Each sample is an 8-bit or 16-bit integer, depending on the data format.
A stereo sound file has two values per sample, while a mono sound file
only has one.
"""
import pygame.mixer as mixer
import numpy
def array(sound):
"""pygame._numpysndarray.array(Sound): return array
Copy Sound samples into an array.
Creates a new array for the sound data and copies the samples. The
array will always be in the format returned from
pygame.mixer.get_init().
"""
return numpy.array(sound, copy=True)
def samples(sound):
"""pygame._numpysndarray.samples(Sound): return array
Reference Sound samples into an array.
Creates a new array that directly references the samples in a Sound
object. Modifying the array will change the Sound. The array will
always be in the format returned from pygame.mixer.get_init().
"""
return numpy.array(sound, copy=False)
def make_sound(array):
"""pygame._numpysndarray.make_sound(array): return Sound
Convert an array into a Sound object.
Create a new playable Sound object from an array. The mixer module
must be initialized and the array format must be similar to the mixer
audio format.
"""
return mixer.Sound(array=array)
|
[
"[email protected]"
] | |
ebca73888ad4378afca4d9cc286766ded94c5a00
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03612/s108764043.py
|
931ac55645fd8b56e3c8ddb91cf3650d5feaebe2
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 325 |
py
|
N = int(input())
A = list(map(int, input().split()))
cnt = 0
for i, a in enumerate(A, 1):
if a == i:
if i != len(A):
x = A[i - 1]
y = A[i]
A[i - 1] = y
A[i] = x
cnt += 1
else:
x = A[i - 2]
y = A[i - 1]
A[i - 2] = y
A[i - 1] = x
cnt += 1
print(cnt)
|
[
"[email protected]"
] | |
fb1aef471f1ebba3fa245f15ce0af02930d314ec
|
a37c6678a5630925e6600fe65113828918f80563
|
/raylab/envs/environments/hvac.py
|
c23e639295c021d002a4d06af42c2efd75694361
|
[
"MIT"
] |
permissive
|
rudrasohan/raylab
|
658186d2955b7f949ab13045fb8f72ac8bd17978
|
12b93003a863caf7f9ead621d3afe4c83c4d7ee1
|
refs/heads/master
| 2023-03-24T11:03:57.626283 | 2021-03-11T19:53:53 | 2021-03-11T19:53:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,344 |
py
|
# pylint:disable=missing-docstring,invalid-name
import gym
import numpy as np
import torch
DEFAULT_CONFIG = {
"ADJ": [[False, True, True], [False, False, True], [False, False, False]],
"ADJ_OUTSIDE": [True, True, False],
"ADJ_HALL": [True, False, True],
"R_OUTSIDE": [4.0, 4.0, 4.0],
"R_HALL": [2.0, 2.0, 2.0],
"R_WALL": [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5], [1.5, 1.5, 1.5]],
"IS_ROOM": [True, True, True],
"CAP": [80.0, 80.0, 80.0],
"CAP_AIR": 1.006,
"COST_AIR": 1.0,
"TIME_DELTA": 1.0,
"TEMP_AIR": 40.0,
"TEMP_UP": [23.5, 23.5, 23.5],
"TEMP_LOW": [20.0, 20.0, 20.0],
"PENALTY": 20000.0,
"AIR_MAX": [10.0, 10.0, 10.0],
"TEMP_OUTSIDE_MEAN": [6.0, 6.0, 6.0],
"TEMP_OUTSIDE_VARIANCE": [1.0, 1.0, 1.0],
"TEMP_HALL_MEAN": [10.0, 10.0, 10.0],
"TEMP_HALL_VARIANCE": [1.0, 1.0, 1.0],
"init": {"temp": [10.0, 10.0, 10.0]},
"horizon": 40,
}
class HVACEnv(gym.Env):
metadata = {"render.modes": ["human"]}
def __init__(self, config=None):
self._config = {**DEFAULT_CONFIG, **(config or {})}
self._num_rooms = len(self._config["init"]["temp"])
self.observation_space = gym.spaces.Box(
low=np.array([-np.inf] * self._num_rooms + [0.0], dtype=np.float32),
high=np.array([np.inf] * self._num_rooms + [1.0], dtype=np.float32),
)
self.action_space = gym.spaces.Box(
low=np.array([0.0] * self._num_rooms, dtype=np.float32),
high=np.array([1.0] * self._num_rooms, dtype=np.float32),
)
self._horizon = self._config["horizon"]
self._state = None
self.reset()
def reset(self):
self._state = np.array(self._config["init"]["temp"] + [0.0])
return self._state
@property
def temp(self):
obs, _ = self._unpack_state(self._state)
return torch.as_tensor(obs, dtype=torch.float32)
@torch.no_grad()
def step(self, action):
state, action = map(torch.as_tensor, (self._state, action))
next_state, _ = self.transition_fn(state, action)
reward = self.reward_fn(state, action, next_state).item()
self._state = next_state.numpy()
return self._state, reward, self._terminal(), {}
def transition_fn(self, state, action, sample_shape=()):
# pylint:disable=missing-docstring
state, time = self._unpack_state(state)
AIR_MAX = torch.as_tensor(self._config["AIR_MAX"])
action = torch.as_tensor(action) * AIR_MAX
temp_hall, logp_temp_hall = self._temp_hall(sample_shape)
temp_outside, logp_temp_outside = self._temp_outside(sample_shape)
next_state = self._temp(action, temp_outside, temp_hall)
logp = logp_temp_hall + logp_temp_outside
time = self._step_time(time)
time = time.expand_as(next_state[..., -1:])
return torch.cat([next_state, time], dim=-1), logp
def _temp_hall(self, sample_shape=()):
TEMP_HALL_MEAN = torch.as_tensor(self._config["TEMP_HALL_MEAN"])
TEMP_HALL_VARIANCE = torch.sqrt(
torch.as_tensor(self._config["TEMP_HALL_VARIANCE"])
)
dist = torch.distributions.Normal(TEMP_HALL_MEAN, TEMP_HALL_VARIANCE)
sample = dist.rsample(sample_shape)
logp = dist.log_prob(sample.detach())
return sample, logp
def _temp_outside(self, sample_shape=()):
TEMP_OUTSIDE_MEAN = torch.as_tensor(self._config["TEMP_OUTSIDE_MEAN"])
TEMP_OUTSIDE_VARIANCE = torch.sqrt(
torch.as_tensor(self._config["TEMP_OUTSIDE_VARIANCE"])
)
dist = torch.distributions.Normal(TEMP_OUTSIDE_MEAN, TEMP_OUTSIDE_VARIANCE)
sample = dist.rsample(sample_shape)
logp = dist.log_prob(sample.detach())
return sample, logp
def _temp(self, action, temp_outside, temp_hall): # pylint:disable=too-many-locals
air = action
TIME_DELTA = torch.as_tensor(self._config["TIME_DELTA"])
CAP = torch.as_tensor(self._config["CAP"])
CAP_AIR = torch.as_tensor(self._config["CAP_AIR"])
TEMP_AIR = torch.as_tensor(self._config["TEMP_AIR"])
IS_ROOM = torch.as_tensor(self._config["IS_ROOM"])
ADJ = torch.as_tensor(self._config["ADJ"])
ADJ_OUTSIDE = torch.as_tensor(self._config["ADJ_OUTSIDE"])
ADJ_HALL = torch.as_tensor(self._config["ADJ_HALL"])
R_OUTSIDE = torch.as_tensor(self._config["R_OUTSIDE"])
R_HALL = torch.as_tensor(self._config["R_HALL"])
R_WALL = torch.as_tensor(self._config["R_WALL"])
temp = self.temp
temp_ = temp + TIME_DELTA / CAP * (
air * CAP_AIR * (TEMP_AIR - temp) * IS_ROOM
+ ((ADJ | ADJ.T) * (temp[np.newaxis] - temp[np.newaxis].T) / R_WALL).sum(
dim=-1
)
+ ADJ_OUTSIDE * (temp_outside - temp) / R_OUTSIDE
+ ADJ_HALL * (temp_hall - temp) / R_HALL
)
return temp_
def _step_time(self, time):
timestep = torch.round(self._horizon * time)
return torch.clamp((timestep + 1) / self._horizon, 0, 1)
def reward_fn(self, state, action, next_state):
# pylint:disable=unused-argument,missing-docstring
AIR_MAX = torch.as_tensor(self._config["AIR_MAX"])
air = torch.as_tensor(action) * AIR_MAX
temp, _ = self._unpack_state(next_state)
IS_ROOM = torch.as_tensor(self._config["IS_ROOM"])
COST_AIR = torch.as_tensor(self._config["COST_AIR"])
TEMP_LOW = torch.as_tensor(self._config["TEMP_LOW"])
TEMP_UP = torch.as_tensor(self._config["TEMP_UP"])
PENALTY = torch.as_tensor(self._config["PENALTY"])
reward = -(
IS_ROOM
* (
air * COST_AIR
+ ((temp < TEMP_LOW) | (temp > TEMP_UP)) * PENALTY
+ 10.0 * torch.abs((TEMP_UP + TEMP_LOW) / 2.0 - temp)
)
).sum(dim=-1)
return reward
def _terminal(self):
_, time = self._unpack_state(self._state)
return time.item() >= 1.0
@staticmethod
def _unpack_state(state):
obs = torch.as_tensor(state[..., :-1], dtype=torch.float32)
time = torch.as_tensor(state[..., -1:], dtype=torch.float32)
return obs, time
def render(self, mode="human"):
pass
|
[
"[email protected]"
] | |
342cf1d5605e21d8c21f41d42db60fb27432b9a7
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/eagerli.py
|
488fed7f8a0bcdbe716710498de1ed841c81ca21
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 |
Python
|
UTF-8
|
Python
| false | false | 1,767 |
py
|
ii = [('EmerRN.py', 1), ('CookGHP3.py', 7), ('LyelCPG2.py', 1), ('MarrFDI.py', 4), ('RogePAV2.py', 3), ('CoolWHM2.py', 4), ('GodwWSL2.py', 6), ('RogePAV.py', 2), ('SadlMLP.py', 6), ('FerrSDO3.py', 1), ('WilbRLW.py', 3), ('WilbRLW4.py', 3), ('RennJIT.py', 3), ('AubePRP2.py', 1), ('CookGHP.py', 9), ('MartHSI2.py', 1), ('KembFJ1.py', 2), ('WilkJMC3.py', 2), ('WilbRLW5.py', 1), ('PettTHE.py', 1), ('TennAP.py', 1), ('PeckJNG.py', 1), ('BailJD2.py', 25), ('WilbRLW2.py', 4), ('ClarGE2.py', 3), ('CarlTFR.py', 8), ('LyttELD.py', 2), ('CoopJBT2.py', 4), ('TalfTAC.py', 2), ('GrimSLE.py', 1), ('RoscTTI3.py', 7), ('AinsWRR3.py', 7), ('CookGHP2.py', 1), ('KiddJAE.py', 1), ('BailJD1.py', 16), ('RoscTTI2.py', 2), ('CoolWHM.py', 2), ('CrokTPS.py', 2), ('ClarGE.py', 8), ('IrviWVD.py', 1), ('GilmCRS.py', 4), ('DaltJMA.py', 1), ('WestJIT2.py', 2), ('DibdTRL2.py', 1), ('AinsWRR.py', 5), ('WadeJEB.py', 1), ('FerrSDO2.py', 2), ('TalfTIT.py', 2), ('GodwWLN.py', 3), ('CoopJBT.py', 4), ('KirbWPW2.py', 3), ('BackGNE.py', 3), ('HowiWRL2.py', 1), ('BailJD3.py', 15), ('MereHHB.py', 1), ('WilkJMC.py', 1), ('MartHRW.py', 6), ('MackCNH.py', 1), ('WestJIT.py', 1), ('FitzRNS4.py', 4), ('CoolWHM3.py', 5), ('DequTKM.py', 4), ('FitzRNS.py', 3), ('EdgeMHT.py', 6), ('FerrSDO.py', 2), ('RoscTTI.py', 3), ('StorJCC.py', 1), ('KembFJ2.py', 2), ('JacoWHI2.py', 1), ('SomeMMH.py', 1), ('WilbRLW3.py', 4), ('AinsWRR2.py', 2), ('MereHHB2.py', 1), ('JacoWHI.py', 1), ('ClarGE3.py', 6), ('RogeSIP.py', 2), ('MartHRW2.py', 3), ('FitzRNS2.py', 15), ('HogaGMM2.py', 4), ('EvarJSP.py', 1), ('DwigTHH.py', 2), ('SadlMLP2.py', 4), ('BowrJMM2.py', 1), ('BeckWRE.py', 2), ('TaylIF.py', 8), ('WordWYR.py', 3), ('KirbWPW.py', 1), ('WaylFEP.py', 1), ('BentJDO.py', 1), ('ClarGE4.py', 5), ('HowiWRL.py', 4)]
|
[
"[email protected]"
] | |
77f6155a68dccbb2a045fcc13e502d7bf2afd801
|
d7fac5517b409224584e5ffef20b1bf3dbb895cc
|
/test/test_generic_throttle_policy.py
|
09f96db22b801129ab65e7415ccc4d0ff3aa0864
|
[] |
no_license
|
junetigerlee/python-wso2-apim-adminclient
|
0bac09899e02a8eee6fd5a0e80cc34472ec6b055
|
7eca928a727e8eb6901c38be83bc1adeb87540cf
|
refs/heads/master
| 2021-01-01T16:12:24.090906 | 2017-07-25T06:22:09 | 2017-07-25T06:22:09 | 97,788,017 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,096 |
py
|
# coding: utf-8
"""
WSO2 API Manager - Admin
This document specifies a **RESTful API** for WSO2 **API Manager** - Admin Portal. It is written with [swagger 2](http://swagger.io/).
OpenAPI spec version: 0.11.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import wso2_apim_adminclient
from wso2_apim_adminclient.rest import ApiException
from wso2_apim_adminclient.models.generic_throttle_policy import GenericThrottlePolicy
class TestGenericThrottlePolicy(unittest.TestCase):
""" GenericThrottlePolicy unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testGenericThrottlePolicy(self):
"""
Test GenericThrottlePolicy
"""
# FIXME: construct object with mandatory attributes with example values
#model = wso2_apim_adminclient.models.generic_throttle_policy.GenericThrottlePolicy()
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
204856232cbe127a20e8e3eae73148857c5dab65
|
d5f0377d31e708b0be7f1e79dca714e274deba67
|
/hex_game/create_databases.py
|
97e08f94919b393048db84a91eca2f71dc2fed7e
|
[] |
no_license
|
Phyronnaz/TIPE_Hex
|
b65a2651476661930e52f07654d2a843b1201ddb
|
fda070968368204dac08ac81d79a4ab0d604296a
|
refs/heads/master
| 2021-01-16T21:52:07.530676 | 2017-06-15T15:34:48 | 2017-06-15T15:34:48 | 68,295,841 | 0 | 0 | null | 2017-05-18T11:46:33 | 2016-09-15T13:22:49 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 477 |
py
|
import os
import sys
# path = os.path.dirname(os.path.realpath(__file__))[:-8]
path = "/home/victor/PycharmProjects/TIPE_Hex/"
if path not in sys.path:
sys.path.insert(0, path)
import numpy as np
from hex_game.q_learning import create_database
size = int(sys.argv[1])
path = os.path.expanduser("~") + "/Hex/database_{}.npy".format(size)
if os.path.exists(path):
database = np.load(path)
else:
database = create_database(size, 10000)
np.save(path, database)
|
[
"[email protected]"
] | |
9f230e3838e4776ce4c598ffa0521f145bb1d3d8
|
7a0144da5a567d8497551b09875298ea224bb5bd
|
/백준/백준 1904.py
|
8e1eae03c8e54734ee929270ed6129b7f8fe2c5d
|
[] |
no_license
|
SLT-DJH/algorithm
|
dba34614bb0fbbad0ecf5d85b02cb541ab047c5a
|
e33c843be4efdfc6c6a7300ab4e53b9a7c4b2e67
|
refs/heads/master
| 2023-03-08T08:27:01.476793 | 2021-02-25T15:06:42 | 2021-02-25T15:06:42 | 297,017,366 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 320 |
py
|
def fibo(num) :
if num == 1 :
return 1
elif num == 2 :
return 2
else :
num = num - 2
a = 1
b = 2
while num != 0 :
c = (a+b) % 15746
a = b
b = c
num = num - 1
return b
a = int(input())
print(fibo(a))
|
[
"[email protected]"
] | |
3efae8c54e17d120f845bd7510d914395fad5f69
|
6ab31b5f3a5f26d4d534abc4b197fe469a68e8e5
|
/tests/kyu_4_tests/test_roman_numerals_encoder.py
|
e944d06c5e05cbbdc925b8114ab23e040fd2a232
|
[
"MIT"
] |
permissive
|
mveselov/CodeWars
|
e4259194bfa018299906f42cd02b8ef4e5ab6caa
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
refs/heads/master
| 2021-06-09T04:17:10.053324 | 2017-01-08T06:36:17 | 2017-01-08T06:36:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 348 |
py
|
import unittest
from katas.kyu_4.roman_numerals_encoder import solution
class RomanNumeralsEncoderTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(solution(1), 'I')
def test_equals_2(self):
self.assertEqual(solution(4), 'IV')
def test_equals_3(self):
self.assertEqual(solution(6), 'VI')
|
[
"[email protected]"
] | |
5f809c27b6953e210a747515d832cb54e2f3eadb
|
e34cbf5fce48f661d08221c095750240dbd88caf
|
/python/homework/day23_cmdb_web/s16MadKing/asset/views.py
|
7ded2a7085ab28d0954eacb4d1a5ae9594548088
|
[] |
no_license
|
willianflasky/growup
|
2f994b815b636e2582594375e90dbcb2aa37288e
|
1db031a901e25bbe13f2d0db767cd28c76ac47f5
|
refs/heads/master
| 2023-01-04T13:13:14.191504 | 2020-01-12T08:11:41 | 2020-01-12T08:11:41 | 48,899,304 | 2 | 0 | null | 2022-12-26T19:46:22 | 2016-01-02T05:04:39 |
C
|
UTF-8
|
Python
| false | false | 2,631 |
py
|
from django.shortcuts import render, HttpResponse
import json
from django.views.decorators.csrf import csrf_exempt
from asset import core
from asset import utils
from asset.models import *
import datetime
# Create your views here.
@csrf_exempt
def asset_with_no_asset_id(request):
if request.method == 'POST':
ass_handler = core.Asset(request)
res = ass_handler.get_asset_id_by_sn()
# return render(request,'assets/acquire_asset_id_test.html',{'response':res})
return HttpResponse(json.dumps(res))
@csrf_exempt
@utils.token_required
def asset_report(request):
if request.method == "POST":
ass_handler = core.Asset(request)
if ass_handler.data_is_valid():
ass_handler.data_inject()
return HttpResponse(json.dumps(ass_handler.response))
def login(request):
pass
def index(request):
return render(request, 'index.html', locals())
def asset(request):
from asset.page import PageInfo
all_count = Asset.objects.all().count()
page_info = PageInfo(request.GET.get('p'), 20, all_count, request.path_info, page_range=3)
objs = Asset.objects.all()[page_info.start():page_info.end()]
# result = []
# all_data = Asset.objects.all().values()
# for line in all_data:
# line['create_date'] = line['create_date'].strftime('%Y-%m-%d %H:%M:%S')
# line['update_date'] = line['update_date'].strftime('%Y-%m-%d %H:%M:%S')
# result.append(line)
return render(request, 'asset.html', locals())
def data(request):
# 图表1
asset_list_num = []
asset_list = []
asset_dic = {'server': "服务器", 'networkdevice': "网络设备", 'storagedevice': "存储设备",
'securitydevice': "安全设备", 'idcdevice': "IDC设备",
'accescories': "备件", 'software': "软件"}
for item, value in asset_dic.items():
res = Asset.objects.filter(asset_type=item).count()
asset_list_num.append(res)
asset_list.append(value)
# 图表2
status_list = []
status_result = []
status_dic = {0: '在线', 1: '已下线', 2: '未知', 3: '故障', 4: '备用'}
for item, value in status_dic.items():
status_tmp = {'value': 0, 'name': ""}
res = Asset.objects.filter(status=item).count()
status_list.append(value)
status_tmp['value'] = res
status_tmp['name'] = value
status_result.append(status_tmp)
data = {
'p11': asset_list,
'p12': asset_list_num,
'p21': status_list,
'p22': status_result
}
return HttpResponse(json.dumps(data))
|
[
"[email protected]"
] | |
800b9ad4822aef0e6593471d1a2f467a7fbb3365
|
a499fbdd93f85a286505433a08afc25d84c8ff04
|
/python/tvm/topi/gpu/dense.py
|
b9009d3f3393afddc4cb2d6a32b717cffa09776e
|
[
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
elphinkuo/tvm
|
a81e0ccc5950a1473efdcdbb8263de9adbe36787
|
9df2ae8eaa8b394013182a7ad09ac57fe401f80e
|
refs/heads/main
| 2023-08-05T07:41:18.652097 | 2021-09-28T00:38:26 | 2021-09-28T00:38:26 | 411,311,927 | 2 | 0 |
Apache-2.0
| 2021-09-28T14:51:56 | 2021-09-28T14:17:46 | null |
UTF-8
|
Python
| false | false | 8,459 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Schedule for dense operator"""
import logging
from tvm import autotvm, te
from tvm.autotvm.task.space import SplitEntity
from .. import nn
from ..utils import traverse_inline, get_const_tuple
logger = logging.getLogger("topi")
@autotvm.register_topi_compute("dense_small_batch.gpu")
def dense_small_batch(cfg, data, weight, bias=None, out_dtype=None):
"""Dense operator on GPU"""
return nn.dense(data, weight, bias, out_dtype)
@autotvm.register_topi_schedule("dense_small_batch.gpu")
def schedule_dense_small_batch(cfg, outs):
"""Schedule float32/64 dense with small batch size"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "dense":
_schedule_dense_small_batch(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("matmul_default.gpu")
def matmul_default(
cfg,
tensor_a,
tensor_b,
bias=None,
out_dtype=None,
transpose_a=False,
transpose_b=False,
):
"""Matmul operator on GPU"""
return nn.matmul(tensor_a, tensor_b, bias, out_dtype, transpose_a, transpose_b)
@autotvm.register_topi_schedule("matmul_default.gpu")
def schedule_matmul_default(cfg, outs):
"""Schedule matmul on GPU"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "matmul":
# Temporary use this as a basic schedule for matmul
# TODO(jcf94): Add a more general schedule for matmul
_schedule_dense_small_batch(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_dense_small_batch(cfg, s, C):
A, weights = C.op.input_tensors
_, in_dim_weights = get_const_tuple(weights.shape)
_, in_dim_A = get_const_tuple(A.shape)
if isinstance(in_dim_A, int):
in_dim = in_dim_A
elif isinstance(in_dim_weights, int):
in_dim = in_dim_weights
else:
in_dim = None
if in_dim is not None:
cfg.define_split("tile_k", in_dim, num_outputs=2)
if cfg.is_fallback:
cfg["tile_k"] = SplitEntity([-1, 64] if in_dim > 64 else [1, 64])
_, kf = cfg["tile_k"].apply(s, C, C.op.reduce_axis[0])
else:
tile_k = 64
_, kf = s[C].split(C.op.reduce_axis[0], tile_k)
CF = s.rfactor(C, kf)
if C.op in s.outputs:
Out = C
else:
Out = s.outputs[0].output(0)
s[C].compute_at(s[Out], s[Out].op.axis[1])
s[Out].bind(s[Out].op.axis[0], te.thread_axis("blockIdx.y"))
s[Out].bind(s[Out].op.axis[1], te.thread_axis("blockIdx.x"))
tx = s[C].op.reduce_axis[0]
thread_x = te.thread_axis("threadIdx.x")
s[C].bind(tx, thread_x)
s[CF].compute_at(s[C], tx)
s[C].set_store_predicate(thread_x.var.equal(0))
s[Out].set_store_predicate(thread_x.var.equal(0))
@autotvm.register_topi_compute("dense_large_batch.gpu")
def dense_large_batch(cfg, data, weight, bias=None, out_dtype=None):
"""Dense operator on GPU"""
return nn.dense(data, weight, bias, out_dtype)
@autotvm.register_topi_schedule("dense_large_batch.gpu")
def schedule_dense_large_batch(cfg, outs):
"""Schedule float32/64 dense with large batch size"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "dense":
_schedule_dense_large_batch(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
def _schedule_dense_large_batch(cfg, s, C):
"""Schedule float32/64 dense with large batch size"""
A, B = C.op.input_tensors
batch, in_dim = get_const_tuple(A.shape)
out_dim, _ = get_const_tuple(B.shape)
k = C.op.reduce_axis[0]
# create tuning space
try:
block_cand = [64, 128]
vthread_cand = [2 ** x for x in range(1, 7)]
n_thread_cand = [2 ** x for x in range(3, 7)]
cfg.define_split(
"tile_x",
batch,
num_outputs=4,
filter=lambda x: (
x.size[1] in vthread_cand
and x.size[2] in n_thread_cand
and (x.size[1] * x.size[2] * x.size[3]) in block_cand
),
)
cfg.define_split(
"tile_y",
out_dim,
num_outputs=4,
filter=lambda x: (
x.size[1] in vthread_cand
and x.size[2] in n_thread_cand
and (x.size[1] * x.size[2] * x.size[3]) in block_cand
),
)
cfg.define_split("tile_k", in_dim, num_outputs=3, filter=lambda x: x.size[0] > 2)
except IndexError:
# Index error happens when no entities left after filtering, which was designed
# to prune tuning space for better search efficiency.
logger.debug("Tuning space was created without pruning due to unfit shapes")
cfg.define_split("tile_x", batch, num_outputs=4)
cfg.define_split("tile_y", out_dim, num_outputs=4)
cfg.define_split("tile_k", in_dim, num_outputs=3)
if cfg.is_fallback:
if batch > 1:
cfg["tile_x"] = SplitEntity([-1, 2, 16, 2])
else:
cfg["tile_x"] = SplitEntity([1, 1, 1, 1])
if out_dim > 1:
cfg["tile_y"] = SplitEntity([-1, 2, 16, 2])
else:
cfg["tile_y"] = SplitEntity([1, 1, 1, 1])
if in_dim > 8:
cfg["tile_k"] = SplitEntity([-1, 8, 1])
else:
cfg["tile_k"] = SplitEntity([-1, 1, 1])
# Explicit memory access
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
# Deal with op fusion
if C.op not in s.outputs:
s[C].compute_inline()
C = s.outputs[0].output(0)
# Split and reorder computation
bx, txz, tx, xi = cfg["tile_x"].apply(s, C, C.op.axis[0])
by, tyz, ty, yi = cfg["tile_y"].apply(s, C, C.op.axis[1])
s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
# Binding
s[C].bind(by, te.thread_axis("blockIdx.y"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tyz, te.thread_axis("vthread"))
s[C].bind(txz, te.thread_axis("vthread"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
# Split reduction
yo, xo = CC.op.axis
ko, kt, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].reorder(ko, kt, ki, yo, xo)
s[AA].compute_at(s[CC], ko)
s[BB].compute_at(s[CC], ko)
s[CC].unroll(kt)
s[AL].compute_at(s[CC], kt)
s[BL].compute_at(s[CC], kt)
# Schedule for A's shared memory load
num_thread_x = cfg["tile_x"].size[2]
ty, _ = s[AA].split(s[AA].op.axis[0], nparts=num_thread_x)
_, xi = s[AA].split(s[AA].op.axis[1], factor=num_thread_x * 4)
tx, xi = s[AA].split(xi, nparts=num_thread_x)
s[AA].bind(ty, te.thread_axis("threadIdx.y"))
s[AA].bind(tx, te.thread_axis("threadIdx.x"))
s[AA].double_buffer()
# Schedule for B' shared memory load
num_thread_y = cfg["tile_y"].size[2]
ty, _ = s[BB].split(s[BB].op.axis[0], nparts=num_thread_y)
_, xi = s[BB].split(s[BB].op.axis[1], factor=num_thread_y * 4)
tx, xi = s[BB].split(xi, nparts=num_thread_y)
s[BB].bind(ty, te.thread_axis("threadIdx.y"))
s[BB].bind(tx, te.thread_axis("threadIdx.x"))
s[BB].double_buffer()
|
[
"[email protected]"
] | |
5b510ccf31da37f6df7b6f12e6ab9ac61759eaec
|
fffa5f1fe7e1d2bd875f302fefa2295563773440
|
/posts/migrations/0001_initial.py
|
204feab05266016c6f35f1128814ec13ff33dfd0
|
[] |
no_license
|
Awalamoo7/blogs-api
|
9d2fe2d37837366863e8ffbab4372aa927e49875
|
fd8112d666b10d728a3f12cfc284d7379eeedf41
|
refs/heads/main
| 2023-08-28T13:06:45.637222 | 2021-10-24T01:20:02 | 2021-10-24T01:20:02 | 420,555,567 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,159 |
py
|
# Generated by Django 3.2.8 on 2021-10-23 17:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_time', models.DateTimeField(auto_created=True)),
('title', models.CharField(max_length=400)),
('description', models.TextField()),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_time', models.DateTimeField(auto_created=True)),
('text', models.TextField()),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.post')),
],
),
]
|
[
"[email protected]"
] | |
8fc3e828360f0eb1a186ea87f4cfc474a58e4949
|
195b8d12796872c05d539aa9283fc3f407b8d8b5
|
/cinder/cinder/volume/drivers/nexenta/volume.py
|
2b752314f48e0f3831ddd234b86042da954388a0
|
[
"Apache-2.0"
] |
permissive
|
rvbelapure/openstack-nova-sched
|
afaa5928da3a8430b64bc23aedb251bae0e7d3ef
|
325da0e08979d79b7470d7506ced1b4210e2b696
|
refs/heads/master
| 2021-01-17T05:28:44.474242 | 2013-04-20T21:18:35 | 2013-04-20T21:18:35 | 9,082,500 | 0 | 1 | null | 2021-09-07T08:33:18 | 2013-03-28T17:30:46 |
Python
|
UTF-8
|
Python
| false | false | 11,503 |
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nexenta.volume` -- Driver to store volumes on Nexenta Appliance
=====================================================================
.. automodule:: nexenta.volume
.. moduleauthor:: Yuriy Taraday <[email protected]>
"""
from oslo.config import cfg
from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers import nexenta
from cinder.volume.drivers.nexenta import jsonrpc
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
nexenta_opts = [
cfg.StrOpt('nexenta_host',
default='',
help='IP address of Nexenta SA'),
cfg.IntOpt('nexenta_rest_port',
default=2000,
help='HTTP port to connect to Nexenta REST API server'),
cfg.StrOpt('nexenta_rest_protocol',
default='auto',
help='Use http or https for REST connection (default auto)'),
cfg.StrOpt('nexenta_user',
default='admin',
help='User name to connect to Nexenta SA'),
cfg.StrOpt('nexenta_password',
default='nexenta',
help='Password to connect to Nexenta SA',
secret=True),
cfg.IntOpt('nexenta_iscsi_target_portal_port',
default=3260,
help='Nexenta target portal port'),
cfg.StrOpt('nexenta_volume',
default='cinder',
help='pool on SA that will hold all volumes'),
cfg.StrOpt('nexenta_target_prefix',
default='iqn.1986-03.com.sun:02:cinder-',
help='IQN prefix for iSCSI targets'),
cfg.StrOpt('nexenta_target_group_prefix',
default='cinder/',
help='prefix for iSCSI target groups on SA'),
cfg.StrOpt('nexenta_blocksize',
default='',
help='block size for volumes (blank=default,8KB)'),
cfg.BoolOpt('nexenta_sparse',
default=False,
help='flag to create sparse volumes'),
]
FLAGS.register_opts(nexenta_opts)
class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921
"""Executes volume driver commands on Nexenta Appliance."""
def __init__(self, *args, **kwargs):
super(NexentaDriver, self).__init__(*args, **kwargs)
def do_setup(self, context):
protocol = FLAGS.nexenta_rest_protocol
auto = protocol == 'auto'
if auto:
protocol = 'http'
self.nms = jsonrpc.NexentaJSONProxy(
'%s://%s:%s/rest/nms/' % (protocol, FLAGS.nexenta_host,
FLAGS.nexenta_rest_port),
FLAGS.nexenta_user, FLAGS.nexenta_password, auto=auto)
def check_for_setup_error(self):
"""Verify that the volume for our zvols exists.
:raise: :py:exc:`LookupError`
"""
if not self.nms.volume.object_exists(FLAGS.nexenta_volume):
raise LookupError(_("Volume %s does not exist in Nexenta SA"),
FLAGS.nexenta_volume)
@staticmethod
def _get_zvol_name(volume_name):
"""Return zvol name that corresponds given volume name."""
return '%s/%s' % (FLAGS.nexenta_volume, volume_name)
@staticmethod
def _get_target_name(volume_name):
"""Return iSCSI target name to access volume."""
return '%s%s' % (FLAGS.nexenta_target_prefix, volume_name)
@staticmethod
def _get_target_group_name(volume_name):
"""Return Nexenta iSCSI target group name for volume."""
return '%s%s' % (FLAGS.nexenta_target_group_prefix, volume_name)
def create_volume(self, volume):
"""Create a zvol on appliance.
:param volume: volume reference
"""
self.nms.zvol.create(
self._get_zvol_name(volume['name']),
'%sG' % (volume['size'],),
FLAGS.nexenta_blocksize, FLAGS.nexenta_sparse)
def delete_volume(self, volume):
"""Destroy a zvol on appliance.
:param volume: volume reference
"""
try:
self.nms.zvol.destroy(self._get_zvol_name(volume['name']), '')
except nexenta.NexentaException as exc:
if "zvol has children" in exc.args[1]:
raise exception.VolumeIsBusy(volume_name=volume['name'])
else:
raise
def create_snapshot(self, snapshot):
"""Create snapshot of existing zvol on appliance.
:param snapshot: shapshot reference
"""
self.nms.zvol.create_snapshot(
self._get_zvol_name(snapshot['volume_name']),
snapshot['name'], '')
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self.nms.zvol.clone(
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
snapshot['name']),
self._get_zvol_name(volume['name']))
def delete_snapshot(self, snapshot):
"""Delete volume's snapshot on appliance.
:param snapshot: shapshot reference
"""
try:
self.nms.snapshot.destroy(
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
snapshot['name']),
'')
except nexenta.NexentaException as exc:
if "snapshot has dependent clones" in exc.args[1]:
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
else:
raise
def local_path(self, volume):
"""Return local path to existing local volume.
We never have local volumes, so it raises NotImplementedError.
:raise: :py:exc:`NotImplementedError`
"""
raise NotImplementedError
def _do_export(self, _ctx, volume, ensure=False):
"""Do all steps to get zvol exported as LUN 0 at separate target.
:param volume: reference of volume to be exported
:param ensure: if True, ignore errors caused by already existing
resources
:return: iscsiadm-formatted provider location string
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
try:
self.nms.iscsitarget.create_target({'target_name': target_name})
except nexenta.NexentaException as exc:
if not ensure or 'already configured' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored target creation error "%s"'
' while ensuring export'), exc)
try:
self.nms.stmf.create_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
if not ensure or 'already exists' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored target group creation error "%s"'
' while ensuring export'), exc)
try:
self.nms.stmf.add_targetgroup_member(target_group_name,
target_name)
except nexenta.NexentaException as exc:
if not ensure or 'already exists' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored target group member addition error "%s"'
' while ensuring export'), exc)
try:
self.nms.scsidisk.create_lu(zvol_name, {})
except nexenta.NexentaException as exc:
if not ensure or 'in use' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored LU creation error "%s"'
' while ensuring export'), exc)
try:
self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
'target_group': target_group_name,
'lun': '0'})
except nexenta.NexentaException as exc:
if not ensure or 'view entry exists' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored LUN mapping entry addition error "%s"'
' while ensuring export'), exc)
return '%s:%s,1 %s 0' % (FLAGS.nexenta_host,
FLAGS.nexenta_iscsi_target_portal_port,
target_name)
def create_export(self, _ctx, volume):
"""Create new export for zvol.
:param volume: reference of volume to be exported
:return: iscsiadm-formatted provider location string
"""
loc = self._do_export(_ctx, volume, ensure=False)
return {'provider_location': loc}
def ensure_export(self, _ctx, volume):
"""Recreate parts of export if necessary.
:param volume: reference of volume to be exported
"""
self._do_export(_ctx, volume, ensure=True)
def remove_export(self, _ctx, volume):
"""Destroy all resources created to export zvol.
:param volume: reference of volume to be unexported
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
self.nms.scsidisk.delete_lu(zvol_name)
try:
self.nms.stmf.destroy_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
# We assume that target group is already gone
LOG.warn(_('Got error trying to destroy target group'
' %(target_group)s, assuming it is '
'already gone: %(exc)s'),
{'target_group': target_group_name, 'exc': exc})
try:
self.nms.iscsitarget.delete_target(target_name)
except nexenta.NexentaException as exc:
# We assume that target is gone as well
LOG.warn(_('Got error trying to delete target %(target)s,'
' assuming it is already gone: %(exc)s'),
{'target': target_name, 'exc': exc})
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
raise NotImplementedError()
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
raise NotImplementedError()
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
raise NotImplementedError()
|
[
"[email protected]"
] | |
cb9081784eab4efbdda08880b0b3ceeae5077746
|
1d9138d777744fa2d9d6e3b629a43041f2358d06
|
/real_time/abc/116/2.py
|
30a70f19d2c5f010e386aaa7b16333c9f9b3ca64
|
[] |
no_license
|
Yuyats/AtCoderAnswers
|
f1956b790ee64a4d0b3b48b98791a91679a30244
|
fac7e3eb74a888e77ba7a6b6a15d836c589baa3e
|
refs/heads/master
| 2021-06-24T16:19:45.848524 | 2021-06-13T03:51:07 | 2021-06-13T03:51:07 | 198,857,448 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 548 |
py
|
def f(n):
if n % 2 == 0:
return n / 2
else:
return 3 * n + 1
s = int(input())
a = []
for i in range(1, 1000000):
if i == 1:
a.append(s)
else:
ai = f(a[-1])
if ai in a:
print(i)
break
else:
a.append(ai)
def f(n):
if n % 2 == 0:
return n / 2
else:
return 3 * n + 1
s = int(input())
a = [s]
for i in range(2, 1000001):
ai = f(a[-1])
if ai in a:
print(i)
break
else:
a.append(ai)
|
[
"[email protected]"
] | |
97d7f3bed79bfe6a5b45e7b0f4a83a690de7fcc1
|
10a18920f93008659f49b18fb016b84a21765e73
|
/server/venv_ubuntu/lib/python3.8/site-packages/oslo_reports/tests/test_openstack_generators.py
|
84bd46deb5bdd50843dced8da3a4d297ad4c65bb
|
[] |
no_license
|
psitadmin/network-junco
|
48fa704878fe9566cda393eddd88dafae07fa47c
|
63a1e4afa46514852345e03eb32a2911621540f2
|
refs/heads/master
| 2023-08-18T07:01:48.836761 | 2021-09-28T09:47:42 | 2021-09-28T09:47:42 | 403,928,187 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,757 |
py
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from unittest import mock
import greenlet
from oslo_config import cfg
from oslotest import base
import six
from oslo_reports.generators import conf as os_cgen
from oslo_reports.generators import threading as os_tgen
from oslo_reports.generators import version as os_pgen
from oslo_reports.models import threading as os_tmod
class TestOpenstackGenerators(base.BaseTestCase):
def test_thread_generator(self):
model = os_tgen.ThreadReportGenerator()()
# self.assertGreaterEqual(len(model.keys()), 1)
self.assertTrue(len(model.keys()) >= 1)
was_ok = False
for val in model.values():
self.assertIsInstance(val, os_tmod.ThreadModel)
self.assertIsNotNone(val.stack_trace)
if val.thread_id == threading.current_thread().ident:
was_ok = True
break
self.assertTrue(was_ok)
model.set_current_view_type('text')
self.assertIsNotNone(six.text_type(model))
def test_thread_generator_tb(self):
class FakeModel(object):
def __init__(self, thread_id, tb):
self.traceback = tb
with mock.patch('oslo_reports.models'
'.threading.ThreadModel', FakeModel):
model = os_tgen.ThreadReportGenerator("fake traceback")()
curr_thread = model.get(threading.current_thread().ident, None)
self.assertIsNotNone(curr_thread, None)
self.assertEqual("fake traceback", curr_thread.traceback)
def test_green_thread_generator(self):
curr_g = greenlet.getcurrent()
model = os_tgen.GreenThreadReportGenerator()()
# self.assertGreaterEqual(len(model.keys()), 1)
self.assertTrue(len(model.keys()) >= 1)
was_ok = False
for tm in model.values():
if tm.stack_trace == os_tmod.StackTraceModel(curr_g.gr_frame):
was_ok = True
break
self.assertTrue(was_ok)
model.set_current_view_type('text')
self.assertIsNotNone(six.text_type(model))
def test_config_model(self):
conf = cfg.ConfigOpts()
conf.register_opt(cfg.StrOpt('crackers', default='triscuit'))
conf.register_opt(cfg.StrOpt('secrets', secret=True,
default='should not show'))
conf.register_group(cfg.OptGroup('cheese', title='Cheese Info'))
conf.register_opt(cfg.IntOpt('sharpness', default=1),
group='cheese')
conf.register_opt(cfg.StrOpt('name', default='cheddar'),
group='cheese')
conf.register_opt(cfg.BoolOpt('from_cow', default=True),
group='cheese')
conf.register_opt(cfg.StrOpt('group_secrets', secret=True,
default='should not show'),
group='cheese')
model = os_cgen.ConfigReportGenerator(conf)()
model.set_current_view_type('text')
# oslo.config added a default config_source opt which gets included
# in our output, but we also need to support older versions where that
# wasn't the case. This logic can be removed once the oslo.config
# lower constraint becomes >=6.4.0.
config_source_line = ' config_source = \n'
try:
conf.config_source
except cfg.NoSuchOptError:
config_source_line = ''
target_str = ('\ncheese: \n'
' from_cow = True\n'
' group_secrets = ***\n'
' name = cheddar\n'
' sharpness = 1\n'
'\n'
'default: \n'
'%s'
' crackers = triscuit\n'
' secrets = ***') % config_source_line
self.assertEqual(target_str, six.text_type(model))
def test_package_report_generator(self):
class VersionObj(object):
def vendor_string(self):
return 'Cheese Shoppe'
def product_string(self):
return 'Sharp Cheddar'
def version_string_with_package(self):
return '1.0.0'
model = os_pgen.PackageReportGenerator(VersionObj())()
model.set_current_view_type('text')
target_str = ('product = Sharp Cheddar\n'
'vendor = Cheese Shoppe\n'
'version = 1.0.0')
self.assertEqual(target_str, six.text_type(model))
def test_package_report_generator_without_vendor_string(self):
class VersionObj(object):
def product_string(self):
return 'Sharp Cheddar'
def version_string_with_package(self):
return '1.0.0'
model = os_pgen.PackageReportGenerator(VersionObj())()
model.set_current_view_type('text')
target_str = ('product = Sharp Cheddar\n'
'vendor = None\n'
'version = 1.0.0')
self.assertEqual(target_str, six.text_type(model))
|
[
"[email protected]"
] | |
ede14b5deeefa8d17da62f26de32885f2fad3528
|
c0c536f619292d8cac6bc0d340fa855ce6d21310
|
/torch/_guards.py
|
35550f1d664a71efca5309ca80d89bd38c780b67
|
[
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
manuelmusngi/pytorch
|
32915e01515f1ba81cdd20340dbae5dbfe6809e6
|
bab21d20ebf45a5dc620b48791bb526f664445a5
|
refs/heads/main
| 2023-07-01T23:21:47.013111 | 2023-06-19T19:34:42 | 2023-06-20T09:24:21 | 339,895,372 | 2 | 0 |
NOASSERTION
| 2021-02-18T00:46:11 | 2021-02-18T00:46:11 | null |
UTF-8
|
Python
| false | false | 19,443 |
py
|
import contextlib
import dataclasses
import enum
import logging
import traceback
import unittest.mock
import weakref
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import (
Any,
Callable,
Dict,
Generic,
List,
NamedTuple,
Optional,
Set,
Tuple,
TypeVar,
)
import torch
log = logging.getLogger(__name__)
import sympy
"""
torch._guards is the definitional source of truth for general purpose guard structures.
An important thing to keep in mind here is the preservation of layering. There should be no dynamo notions,
and no guard installation notions here.
"""
class GuardSource(enum.Enum):
LOCAL = 0
GLOBAL = 1
LOCAL_NN_MODULE = 2
GLOBAL_NN_MODULE = 3
CONSTANT = 4
RANDOM_VALUE = 5
SHAPE_ENV = 6
LOCAL_FSDP_MODULE = 7
GLOBAL_FSDP_MODULE = 8
def select(self, locals_, globals_):
# SHAPE_ENV counts as locals, because the guard expressions
# created by shape env can reference f_locals
#
# RANDOM_VALUE counts as locals, because what we do is we run
# Python RNG and assign it to a temporary, and then perform
# guard tests on that temporary
if self in (
GuardSource.LOCAL,
GuardSource.LOCAL_NN_MODULE,
GuardSource.LOCAL_FSDP_MODULE,
GuardSource.SHAPE_ENV,
GuardSource.RANDOM_VALUE,
):
return locals_
if self in (
GuardSource.GLOBAL,
GuardSource.GLOBAL_NN_MODULE,
GuardSource.GLOBAL_FSDP_MODULE,
):
return globals_
raise NotImplementedError(str(self))
def is_fsdp_module(self) -> bool:
return self in (GuardSource.GLOBAL_FSDP_MODULE, GuardSource.LOCAL_FSDP_MODULE)
def is_nn_module(self) -> bool:
return (
self
in (
GuardSource.GLOBAL_NN_MODULE,
GuardSource.LOCAL_NN_MODULE,
)
or self.is_fsdp_module()
)
def is_local(self):
return self in (
GuardSource.LOCAL,
GuardSource.LOCAL_NN_MODULE,
GuardSource.LOCAL_FSDP_MODULE,
)
"""
Base class for a "GuardBuilder" role.
The GuardBuilderBase role is to represent a scope within which to build a guard. The name is a little
confusing, as its not a builder, but for the sake of avoiding a lot of renames and keeping the original reference
to torchdynamo's GuardBuilder.
Note: create_fn is invoked with a GuardBuilderBase and a Guard. A GuardBuilder is chosen based
on GuardSource's select function.
There is value in keeping this GuardBuilderBase empty to keep layering clean.
"""
class GuardBuilderBase:
pass
class ShapeGuard(NamedTuple):
expr: sympy.Expr
# TODO: store this in slightly less formatted form
stack: str
@dataclasses.dataclass
class Guard:
# The name of a Guard specifies what exactly it is the guard is guarding
# on. The meaning of the name is dependent on the create_fn; you must
# look at the use-site inside create_fn to know what name means.
#
# That being said, although you might think this is just a "name", name is
# usually an arbitrary Python expression that will be evaluated with all
# globals (and locals, if you create a LOCAL guard) to extract the Python
# object that we want to perform guard tests on. This evaluation
# typically happens in GuardBuilder.eval. In these cases, name is
# typically produced by Source.name() (not to be confused with
# GuardSource)--morally, we could have stored a Source here.
#
# Occasionally, name is not a valid Python expression; sometimes
# it is meaningless. Example create_fns that are like this include
# GRAD_MODE and SHAPE_ENV.
name: str
source: GuardSource
create_fn: Callable[[GuardBuilderBase, "Guard"], None]
is_volatile: bool = False
# Export only. These values are written to at time of guard check_fn creation.
guard_types: Optional[List[str]] = None
code_list: Optional[List[str]] = None
obj_weakref: Optional[object] = None
guarded_class_weakref: Optional[type] = None
def __hash__(self):
return hash((self.name, self.source, id(self.create_fn)))
def sort_key(self):
return (
self.source.value if self.source else -1,
len(self.name),
self.name,
self.create_fn.__code__.co_firstlineno,
)
def __lt__(self, other):
return self.sort_key() < other.sort_key()
@staticmethod
def weakref_to_str(obj_weakref):
"""
This is a workaround of a Python weakref bug.
`obj_weakref` is instance returned by `weakref.ref`,
`str(obj_weakref)` is buggy if the original obj overrides __getattr__, e.g:
class MyConfig(dict):
def __getattr__(self, x):
return self[x]
obj = MyConfig(offset=5)
obj_weakref = weakref.ref(obj)
str(obj_weakref) # raise error: KeyError: '__name__'
"""
if isinstance(obj_weakref, weakref.ReferenceType):
obj = obj_weakref()
if obj is not None:
return f"<weakref at {hex(id(obj_weakref))}; to '{obj.__class__.__name__}' at {hex(id(obj))}>"
else:
return f"<weakref at {hex(id(obj_weakref))}; dead>"
else:
return str(obj_weakref)
def __repr__(self):
s = f"""
{self.source.name.lower() if self.source else ""} {repr(self.name)} {self.create_fn.__name__}
{{
'guard_types': {self.guard_types},
'code': {self.code_list},
'obj_weakref': {self.weakref_to_str(self.obj_weakref)}
'guarded_class': {self.guarded_class_weakref}
}}
"""
return s
def __str__(self):
output = f"Name: {repr(self.name)}\n"
source = self.source.name.lower() if self.source else ""
output += f" Source: {source}\n"
output += f" Create Function: {self.create_fn.__name__}\n"
output += f" Guard Types: {self.guard_types}\n"
output += f" Code List: {self.code_list}\n"
output += f" Object Weakref: {self.weakref_to_str(self.obj_weakref)}\n"
output += f" Guarded Class Weakref: {self.guarded_class_weakref}\n"
return output
def create(self, local_builder: GuardBuilderBase, global_builder: GuardBuilderBase):
return self.create_fn(self.source.select(local_builder, global_builder), self)
def is_nn_module(self):
return self.source.is_nn_module()
def is_fsdp_module(self):
return self.source.is_fsdp_module()
def is_local(self):
return self.source.is_local()
def set_export_info(self, guard_type, guarded_class, code_list, obj_weakref):
if not self.guard_types:
self.guard_types = list()
self.guard_types.append(guard_type)
assert self.guarded_class_weakref in (
guarded_class,
None,
), "Guarded class id must be identical, or None"
self.guarded_class_weakref = guarded_class
if not self.code_list:
self.code_list = code_list
else:
self.code_list.extend(code_list)
assert self.obj_weakref in (
obj_weakref,
None,
), "Guarded object must be identical, or None"
self.obj_weakref = obj_weakref
T = TypeVar("T")
"""
Parent structure for guard env expressions.
A GuardEnvExpr can have any subtype.
Note: All subtypes must be handled exhaustively in
torch._dynamo.guards._parse_guard_env_guards to avoid a RuntimeError.
"""
@dataclasses.dataclass
class GuardEnvExpr:
pass
"""
A class representing a pair of duplicate inputs.
input_pos_a and input_pos_b are input positions we have deduped.
"""
@dataclasses.dataclass
class DuplicateInputs(GuardEnvExpr):
input_source_a: "Source"
input_source_b: "Source"
def __post_init__(self):
assert self.input_source_a != self.input_source_b
"""
Checkpointable is an interface for driving state snapshotting, left purposely vague for now.
copy_graphstate() -> T, a somewhat legacy name, is expected to emit a snapshot of any type that
can also be taken in at restore_graphstate(T) calls.
When to snapshot, is, at the moment, an implementation detail of upstream callers. Checkpointable
does not provide any garuantees around consistency, idempotency, or safety of calling its APIs, yet.
In the future, it will have a closer coupling to a generic Checkpoint management system.
"""
class Checkpointable(ABC, Generic[T]):
@abstractmethod
def copy_graphstate(self) -> T:
...
@abstractmethod
def restore_graphstate(self, state: T):
...
"""
The GuardCheckpointState - it is the T of Checkpointable[T] for GuardsContext
"""
class GuardsCheckpointState:
dynamo_guards: Set[Guard] = set()
def __init__(self, dynamo_guards):
self.dynamo_guards = dynamo_guards
"""
Produces a delta against another GuardsCheckpointState.
Returns None if no delta is found, otherwise, return a set() of mismatched
Guard type objects.
"""
def diff(self, other):
r = self.dynamo_guards.difference(other.dynamo_guards)
if len(r) == 0:
return None
return r
def __eq__(self, other):
return self.diff(other) is None
class ModuleContextCheckpointState:
nn_modules: Dict[str, torch.nn.Module] = {}
def __init__(self, nn_modules):
self.nn_modules = nn_modules
"""
Produces a delta against another ModuleContextCheckpointState.
Returns None if no delta is found, otherwise, return a set() of mismatched
module key names.
"""
def diff(self, other):
r = set(self.nn_modules.keys()).difference(set(other.nn_modules.keys()))
if len(r) == 0:
return None
return r
def __eq__(self, other):
return self.diff(other) is None
class ModuleContext(Checkpointable[ModuleContextCheckpointState]):
def __init__(self):
self.nn_modules: Dict[str, torch.nn.Module] = {}
def copy_graphstate(self):
return ModuleContextCheckpointState(dict(self.nn_modules))
def restore_graphstate(self, state):
assert isinstance(state, ModuleContextCheckpointState)
self.nn_modules = state.nn_modules
class GlobalContextCheckpointState:
global_state: Dict[str, Tuple[Callable, ...]] = {}
def __init__(self, global_states):
self.global_state = global_states
"""
Produces a delta against another GlobalContextCheckpointState.
Returns None if no delta is found, otherwise, return a set() of mismatched
global key names.
"""
def diff(self, other):
r = set(self.global_state.keys()).difference(set(other.global_state.keys()))
if len(r) == 0:
return None
return r
def __eq__(self, other):
return self.diff(other) is None
class GlobalContext(Checkpointable[GlobalContextCheckpointState]):
"""
This keeps track of the global torch state during tracing of a function.
For example, torch.is_grad_enabled.
"""
_supported_global_states = {
"grad_enabled",
"autocast_enabled",
"autocast_cpu_enabled",
"autocast_gpu_dtype",
"autocast_cpu_dtype",
"autocast_cache_enabled",
}
def __init__(self):
self.global_state: Dict[str, Tuple[Callable, ...]] = {}
def copy_graphstate(self):
return GlobalContextCheckpointState(dict(self.global_state))
def restore_graphstate(self, state):
assert isinstance(state, GlobalContextCheckpointState)
self.global_state = state.global_state
assert (
len(self.global_state) == len(self._supported_global_states)
and set(self.global_state.keys()) == self._supported_global_states
), "Global state mismatch"
for func, args in self.global_state.values():
func(args)
"""
A GuardsContext is a checkpointable representation of all the guards in the current tracing
context. It's lifecycle is bound 1:1 to the tracing context, and it should never be instantiated
directly outside of it. For passing around internal state representations of this object,
prefer to extract them with copy_graphstate to produce a GuardsCheckpointState.
"""
class GuardsContext(Checkpointable[GuardsCheckpointState]):
def __init__(self):
self.dynamo_guards: Set[Guard] = set()
self.aotautograd_guards: List[GuardEnvExpr] = []
def copy_graphstate(self):
return GuardsCheckpointState(set(self.dynamo_guards))
def restore_graphstate(self, state):
assert isinstance(state, GuardsCheckpointState)
self.dynamo_guards = state.dynamo_guards
_CURRENT_TRACING_CONTEXT = None
"""
TracingContext is the source of truth for all currently accumulated information
needed to trace. Its lifecycle is kept 1:1 when using TorchDynamo, but other systems
are open to managing their own TracingContext with that in mind.
Currently, only guards live on the TracingContext, in the form of a GuardsContext.
However, future implementations will move FakeTensorMode (and its owned ShapeEnv), as well
as other structures into it.
The purpose of TracingContext is not to be a dumping ground, or god object, but rather to avoid
having to plumb complex subsystems across multiple verticals.
Ex: A common example is guard accumulation between dynamo, shape_env, aot_autograd, and inductor.
Accessing the current tracing context via
TracingContext.get() allows users to accumulate their own guards for processing, without needing to know how
to plumb objects back up to where frame interpretation happened.
"""
class TracingContext:
"""
Provides the currently installed TracingContext, or None.
Note that it is a staticmethod, and invocations outside of `with tracing()` (see below), are valid but
will return None.
"""
@staticmethod
def get() -> Optional["TracingContext"]:
return _CURRENT_TRACING_CONTEXT
def __init__(self, fake_mode):
self.guards_context = GuardsContext()
self.module_context = ModuleContext()
self.global_context = GlobalContext()
self.fake_mode = fake_mode
self.frame_summary_stack = []
self.loc_in_frame = None
# this is only set after aot_autograd
self.fw_metadata = None
self.params_flat = None
@staticmethod
def extract_stack():
self = TracingContext.get()
if self is None:
return traceback.StackSummary()
stack = list(self.frame_summary_stack)
if self.loc_in_frame is not None:
stack.append(self.loc_in_frame)
return traceback.StackSummary.from_list(stack)
# Call this when you want to call into some code that isn't necessarily
# associated with the current frame state
@staticmethod
@contextlib.contextmanager
def clear_frame():
tc = TracingContext.get()
assert (
tc is not None
), "Frame context manager must be called within an ongoing trace."
with unittest.mock.patch.object(
tc, "frame_summary_stack", []
), unittest.mock.patch.object(tc, "loc_in_frame", None):
yield
@staticmethod
@contextlib.contextmanager
def current_frame(frame_summary):
tc = TracingContext.get()
assert (
tc is not None
), "Frame context manager must be called within an ongoing trace."
tc.frame_summary_stack.append(frame_summary)
try:
yield
finally:
tc.frame_summary_stack.pop()
@staticmethod
def set_current_loc(filename, lineno, frame_name):
tc = TracingContext.get()
assert (
tc is not None
), "Loc context manager must be called within an ongoing trace."
tc.loc_in_frame = traceback.FrameSummary(filename, lineno, frame_name)
"""
This function installs the passed in tracing context as a dynamic scoped global variable.
Calls to TracingContext.get() while not under a `with tracing()` context will return None.
"""
@contextmanager
def tracing(context: TracingContext):
global _CURRENT_TRACING_CONTEXT
old_context = _CURRENT_TRACING_CONTEXT
_CURRENT_TRACING_CONTEXT = context
try:
yield _CURRENT_TRACING_CONTEXT
finally:
_CURRENT_TRACING_CONTEXT = old_context
# Subclasses can be found in torch/_dynamo/source.py
# TODO(voz): Consider a toplevel torch/_source.py
@dataclasses.dataclass(frozen=True)
class Source:
def reconstruct(self, codegen):
raise NotImplementedError()
def guard_source(self) -> GuardSource:
raise NotImplementedError()
def name(self) -> str:
raise NotImplementedError()
def make_guard(self, fn, is_volatile=False) -> Guard:
if self.guard_source() is GuardSource.CONSTANT:
raise NotImplementedError()
return Guard(self.name(), self.guard_source(), fn, is_volatile)
def is_nn_module(self) -> bool:
return self.guard_source().is_nn_module()
# Subclasses can be found in torch/_dynamo/source.py
# Note - there is an odd exception to this invariant of a single base,
# see class SuperSource
@dataclasses.dataclass(frozen=True)
class ChainedSource(Source):
base: Source
def detect_fake_mode(inputs: Any = None):
"""
Attempts to "detect" what the current fake mode is. If there is one ambiently
available from TracingContext, we preferentially use that. Otherwise, we
heuristically detect the fake mode via the following sources, in order of
priority:
- Currently active fake mode on stack
- Fake mode associated with passed in tensors (inputs does not
have to be flattened)
"""
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch.utils._pytree import tree_flatten
fake_modes = []
context = TracingContext.get()
if context is not None:
fake_mode = context.fake_mode
if fake_mode is not None:
fake_modes.append((fake_mode, "tracing context", 0))
from torch.utils._python_dispatch import _get_current_dispatch_mode_stack
for i, m in enumerate(reversed(_get_current_dispatch_mode_stack())):
if isinstance(m, FakeTensorMode):
fake_modes.append((m, "active fake mode", i))
flat_inputs, _ = tree_flatten(inputs)
for i, flat_input in enumerate(flat_inputs):
if isinstance(flat_input, FakeTensor):
fake_modes.append((flat_input.fake_mode, "fake tensor input", i))
if fake_modes:
fake_mode, desc1, i1 = fake_modes[0]
for m, desc2, i2 in fake_modes[1:]:
assert (
fake_mode is m
), f"fake mode ({fake_mode}) from {desc1} {i1} doesn't match mode ({m}) from {desc2} {i2}"
return fake_mode
else:
return None
EXPORT_FAKE_MODE = None
@contextlib.contextmanager
def export_fake_mode(fake_mode):
global EXPORT_FAKE_MODE
assert EXPORT_FAKE_MODE is None
EXPORT_FAKE_MODE = fake_mode
try:
yield
finally:
EXPORT_FAKE_MODE = None
|
[
"[email protected]"
] | |
376ec43ace0b27750be44e8dc6139fac96157d25
|
9e138b34d78573f70ef9bdb3335efa1fd65712aa
|
/users/migrations/0002_auto_20210122_0209.py
|
138dbebb6e18566f27d87a81983c82259772b95b
|
[] |
no_license
|
crowdbotics-apps/sample-23979
|
425d0a0f1542ddc4189d68ca941eb3e0b1dcbae6
|
08630ad101da5da1bff294b827630c9114be5f22
|
refs/heads/master
| 2023-02-20T05:10:47.820119 | 2021-01-22T02:09:03 | 2021-01-22T02:09:03 | 331,809,685 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 394 |
py
|
# Generated by Django 2.2.17 on 2021-01-22 02:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"[email protected]"
] | |
6fd5d011b418c7de5e80a6fbddc50b9f85a3fcfe
|
141d1fb160fcfb4294d4b0572216033218da702d
|
/exec -l /bin/zsh/google-cloud-sdk/lib/surface/notebooks/environments/__init__.py
|
dad9f635bf941599a6342ab5848568562a9de4d2
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
sudocams/tech-club
|
1f2d74c4aedde18853c2b4b729ff3ca5908e76a5
|
c8540954b11a6fd838427e959e38965a084b2a4c
|
refs/heads/master
| 2021-07-15T03:04:40.397799 | 2020-12-01T20:05:55 | 2020-12-01T20:05:55 | 245,985,795 | 0 | 1 | null | 2021-04-30T21:04:39 | 2020-03-09T08:51:41 |
Python
|
UTF-8
|
Python
| false | false | 986 |
py
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud notebooks environments command group."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class Environments(base.Group):
"""Notebooks Environments Command Group."""
pass
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.