blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
261
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
45
| license_type
stringclasses 2
values | repo_name
stringlengths 8
111
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 72
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 530k
616M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
24.6k
| gha_license_id
stringclasses 9
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 40
values | src_encoding
stringclasses 10
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 2
classes | length_bytes
int64 11
4.05M
| extension
stringclasses 25
values | content
stringlengths 10
4.04M
| authors
sequencelengths 1
1
| author_id
stringclasses 578
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f0d3727a003f65b28d97e95316cdc9eefd284eb | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_196/ch80_2020_04_13_18_23_05_143280.py | f6edda895b2e0e2bcd29788dd3078b902f425c3f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | def interseccao_chaves(dic1,dic2):
lista = []
for a in dic1.keys() and in dic2.keys():
lista.append(a,b)
return lista
| [
"[email protected]"
] | |
441e60c7846fde6cca41e6cbb3845b685e4f8672 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/cctbx/symmetry_search/boost_python/SConscript | be2824dfaa2fdc51694642b708bafd590f93bda6 | [
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 216 | Import("env_cctbx_boost_python_ext")
env = env_cctbx_boost_python_ext.Clone()
env.Prepend(LIBS=["cctbx", "omptbx"])
env.SharedLibrary(target="#lib/cctbx_symmetry_search_ext", source=[
"symmetry_search_ext.cpp",
])
| [
"[email protected]"
] | ||
3f39b4c11c3aa082d210897c4b788bb31b2e0551 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/carbonui/control/windowDropDownMenu.py | 6c26d7806b20cec4ebb3158345c97b472461b7f6 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 1,453 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\carbonui\control\windowDropDownMenu.py
import carbonui.const as uiconst
from carbonui.primitives.container import Container
from carbonui.primitives.fill import Fill
from carbonui.primitives.line import Line
from carbonui.control.label import LabelOverride as Label
class WindowDropDownMenuCore(Container):
__guid__ = 'uicls.WindowDropDownMenuCore'
default_height = 10
default_align = uiconst.TOLEFT
default_state = uiconst.UI_NORMAL
def Setup(self, name, GetMenu):
self.name = name
self.expandOnLeft = 1
self.PrepareLayout()
self.GetMenu = GetMenu
def PrepareLayout(self):
Line(parent=self, align=uiconst.TORIGHT)
self.label = Label(text=self.name, parent=self, align=uiconst.CENTER, fontsize=9, letterspace=1, top=1, state=uiconst.UI_DISABLED, uppercase=1)
self.hilite = Fill(parent=self, state=uiconst.UI_HIDDEN, padding=1)
self.width = self.label.width + 10
self.cursor = uiconst.UICURSOR_SELECT
def OnMouseEnter(self):
self.hilite.state = uiconst.UI_DISABLED
def OnMouseExit(self):
self.hilite.state = uiconst.UI_HIDDEN
def GetMenuPosition(self, *args):
return (self.absoluteLeft, self.absoluteBottom + 2)
class WindowDropDownMenuCoreOverride(WindowDropDownMenuCore):
pass
| [
"[email protected]"
] | |
bc9fb2afed22a652d7a229f920fb725987c8015a | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /7DrvnMeY2Ebzk2mfH_8.py | cdf4c6f5d8fb4f7a25817718499599ad9938b579 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py |
import re
body_insert = '(?<=<body>\n)'
body_append = '(?=\n</body>)'
body_rewrite = '(?<=<body>\n)(?:\n|.)+(?=\n</body>)'
| [
"[email protected]"
] | |
0c297e81d5ab99eb43ea0c1b3d9a817d82935b03 | 7aebf21ea5e46697804d395b1a32f8f97b9acc5c | /models/Bert-deepwide/bert_attention.py | 2578d791610fca342abbd1d5c9c7d341597c467f | [] | no_license | chenxingqiang/RecSys-CTR-Model-2020 | d37b4a5b336bcdcf908780c116b6407c998e772c | 3407657dc71427daf33b4a962173f36467378c1e | refs/heads/main | 2023-07-04T17:56:26.926269 | 2021-08-27T08:45:05 | 2021-08-27T08:45:05 | 399,761,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,369 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=128,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=1,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
if sum_reduce_last_dim:
context_layer = tf.reduce_sum(context_layer,axis=-1)
return context_layer
| [
"[email protected]"
] | |
18dc511ca7e964ffb86151143fd018120be351dd | 482d7d5770dfc17db5b1a0e780b634d3a9f5572a | /Project3/code/Flipped counties.py | 9c909942d9dfbf7bca813ffccfd5049540d6eb76 | [] | no_license | fmsilver89/FYS_STK_4155 | 5b9a878330f06a29ec6416aff92a06ebf0ba8dd8 | 189b7ef0d18cd9395eeab82702376ae91ad24d17 | refs/heads/master | 2020-09-11T13:24:15.963157 | 2019-11-16T10:18:21 | 2019-11-16T10:18:21 | 222,078,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | import numpy as np
import pandas as pd
# Import CSV-file
data = pd.read_table('Data/US_Election.csv', sep = ';', header = 0, encoding = 'latin1')
# 2012 Election
DEM_2012 = np.array(data.iloc[:, -2])
DEM_2012 = np.where(DEM_2012 > 0.5, 1, 0)
# 2016 Election
DEM_2016 = np.array(data.iloc[:, -4])
DEM_2016 = np.where(DEM_2016 > 0.5, 1, 0)
# Counties indices
flipped_counties_indices = np.where(DEM_2012 != DEM_2016)[0]
# Binary classification of the counties
n = len(DEM_2012)
flipped_counties = np.zeros((n,))
flipped_counties[flipped_counties_indices] = 1
# Write to file
#f = open("Data/flipped_counties.txt", "w+")
#for i in range(n):
# f.write('%d \n' % flipped_counties[i])
#f.close() | [
"[email protected]"
] | |
f67de883a6752ffbaab01bd20e984e4ddb2a51eb | 7a40213ccfe36a16c803cf37111b96148a0a69a6 | /tests/unit/async_/io/test_class_bolt5x1.py | 2ee26b130a08e1ce24775282c0adad56c15af6cc | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | neo4j/neo4j-python-driver | 6bc0ae1acf63e65e8b6db8bc890e1d8ae45d7b7d | 1bd382f48e00c748c83cb910401a74336fbf2feb | refs/heads/5.0 | 2023-09-06T05:19:09.892773 | 2023-09-05T15:04:40 | 2023-09-05T15:04:40 | 35,100,117 | 873 | 214 | NOASSERTION | 2023-09-05T15:04:42 | 2015-05-05T13:08:20 | Python | UTF-8 | Python | false | false | 22,452 | py | # Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
import pytest
import neo4j
import neo4j.exceptions
from neo4j._async.io._bolt5 import AsyncBolt5x1
from neo4j._conf import PoolConfig
from neo4j._meta import USER_AGENT
from neo4j.exceptions import ConfigurationError
from ...._async_compat import mark_async_test
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_stale(fake_socket, set_stale):
address = neo4j.Address(("127.0.0.1", 7687))
max_connection_lifetime = 0
connection = AsyncBolt5x1(address, fake_socket(address),
max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is True
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_not_stale_if_not_enabled(fake_socket, set_stale):
address = neo4j.Address(("127.0.0.1", 7687))
max_connection_lifetime = -1
connection = AsyncBolt5x1(address, fake_socket(address),
max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is set_stale
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_not_stale(fake_socket, set_stale):
address = neo4j.Address(("127.0.0.1", 7687))
max_connection_lifetime = 999999999
connection = AsyncBolt5x1(address, fake_socket(address),
max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is set_stale
@pytest.mark.parametrize(("args", "kwargs", "expected_fields"), (
(("", {}), {"db": "something"}, ({"db": "something"},)),
(("", {}), {"imp_user": "imposter"}, ({"imp_user": "imposter"},)),
(
("", {}),
{"db": "something", "imp_user": "imposter"},
({"db": "something", "imp_user": "imposter"},)
),
))
@mark_async_test
async def test_extra_in_begin(fake_socket, args, kwargs, expected_fields):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.begin(*args, **kwargs)
await connection.send_all()
tag, is_fields = await socket.pop_message()
assert tag == b"\x11"
assert tuple(is_fields) == expected_fields
@pytest.mark.parametrize(("args", "kwargs", "expected_fields"), (
(("", {}), {"db": "something"}, ("", {}, {"db": "something"})),
(("", {}), {"imp_user": "imposter"}, ("", {}, {"imp_user": "imposter"})),
(
("", {}),
{"db": "something", "imp_user": "imposter"},
("", {}, {"db": "something", "imp_user": "imposter"})
),
))
@mark_async_test
async def test_extra_in_run(fake_socket, args, kwargs, expected_fields):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.run(*args, **kwargs)
await connection.send_all()
tag, is_fields = await socket.pop_message()
assert tag == b"\x10"
assert tuple(is_fields) == expected_fields
@mark_async_test
async def test_n_extra_in_discard(fake_socket):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.discard(n=666)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == {"n": 666}
@pytest.mark.parametrize(
"test_input, expected",
[
(666, {"n": -1, "qid": 666}),
(-1, {"n": -1}),
]
)
@mark_async_test
async def test_qid_extra_in_discard(fake_socket, test_input, expected):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.discard(qid=test_input)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(777, {"n": 666, "qid": 777}),
(-1, {"n": 666}),
]
)
@mark_async_test
async def test_n_and_qid_extras_in_discard(fake_socket, test_input, expected):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.discard(n=666, qid=test_input)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(666, {"n": 666}),
(-1, {"n": -1}),
]
)
@mark_async_test
async def test_n_extra_in_pull(fake_socket, test_input, expected):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.pull(n=test_input)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(777, {"n": -1, "qid": 777}),
(-1, {"n": -1}),
]
)
@mark_async_test
async def test_qid_extra_in_pull(fake_socket, test_input, expected):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.pull(qid=test_input)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == expected
@mark_async_test
async def test_n_and_qid_extras_in_pull(fake_socket):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.pull(n=666, qid=777)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == {"n": 666, "qid": 777}
@mark_async_test
async def test_hello_passes_routing_metadata(fake_socket_pair):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(b"\x70", {"server": "Neo4j/4.4.0"})
await sockets.server.send_message(b"\x70", {})
connection = AsyncBolt5x1(
address, sockets.client, PoolConfig.max_connection_lifetime,
routing_context={"foo": "bar"}
)
await connection.hello()
tag, fields = await sockets.server.pop_message()
assert tag == b"\x01"
assert len(fields) == 1
assert fields[0]["routing"] == {"foo": "bar"}
async def _assert_logon_message(sockets, auth):
tag, fields = await sockets.server.pop_message()
assert tag == b"\x6A" # LOGON
assert len(fields) == 1
keys = ["scheme", "principal", "credentials"]
assert list(fields[0].keys()) == keys
for key in keys:
assert fields[0][key] == getattr(auth, key)
@mark_async_test
async def test_hello_pipelines_logon(fake_socket_pair):
auth = neo4j.Auth("basic", "alice123", "supersecret123")
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(
b"\x7F", {"code": "Neo.DatabaseError.General.MadeUpError",
"message": "kthxbye"}
)
connection = AsyncBolt5x1(
address, sockets.client, PoolConfig.max_connection_lifetime, auth=auth
)
with pytest.raises(neo4j.exceptions.Neo4jError):
await connection.hello()
tag, fields = await sockets.server.pop_message()
assert tag == b"\x01" # HELLO
assert len(fields) == 1
assert list(fields[0].keys()) == ["user_agent"]
assert auth.credentials not in repr(fields)
await _assert_logon_message(sockets, auth)
@mark_async_test
async def test_logon(fake_socket_pair):
auth = neo4j.Auth("basic", "alice123", "supersecret123")
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, sockets.client,
PoolConfig.max_connection_lifetime, auth=auth)
connection.logon()
await connection.send_all()
await _assert_logon_message(sockets, auth)
@mark_async_test
async def test_re_auth(fake_socket_pair, mocker, static_auth):
auth = neo4j.Auth("basic", "alice123", "supersecret123")
auth_manager = static_auth(auth)
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(
b"\x7F", {"code": "Neo.DatabaseError.General.MadeUpError",
"message": "kthxbye"}
)
connection = AsyncBolt5x1(address, sockets.client,
PoolConfig.max_connection_lifetime)
connection.pool = mocker.AsyncMock()
connection.re_auth(auth, auth_manager)
await connection.send_all()
with pytest.raises(neo4j.exceptions.Neo4jError):
await connection.fetch_all()
tag, fields = await sockets.server.pop_message()
assert tag == b"\x6B" # LOGOFF
assert len(fields) == 0
await _assert_logon_message(sockets, auth)
assert connection.auth is auth
assert connection.auth_manager is auth_manager
@mark_async_test
async def test_logoff(fake_socket_pair):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(b"\x70", {})
connection = AsyncBolt5x1(address, sockets.client,
PoolConfig.max_connection_lifetime)
connection.logoff()
assert not sockets.server.recv_buffer # pipelined, so no response yet
await connection.send_all()
assert sockets.server.recv_buffer # now!
tag, fields = await sockets.server.pop_message()
assert tag == b"\x6B" # LOGOFF
assert len(fields) == 0
@pytest.mark.parametrize(("hints", "valid"), (
({"connection.recv_timeout_seconds": 1}, True),
({"connection.recv_timeout_seconds": 42}, True),
({}, True),
({"whatever_this_is": "ignore me!"}, True),
({"connection.recv_timeout_seconds": -1}, False),
({"connection.recv_timeout_seconds": 0}, False),
({"connection.recv_timeout_seconds": 2.5}, False),
({"connection.recv_timeout_seconds": None}, False),
({"connection.recv_timeout_seconds": False}, False),
({"connection.recv_timeout_seconds": "1"}, False),
))
@mark_async_test
async def test_hint_recv_timeout_seconds(
fake_socket_pair, hints, valid, caplog, mocker
):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
sockets.client.settimeout = mocker.Mock()
await sockets.server.send_message(
b"\x70", {"server": "Neo4j/4.3.4", "hints": hints}
)
await sockets.server.send_message(b"\x70", {})
connection = AsyncBolt5x1(
address, sockets.client, PoolConfig.max_connection_lifetime
)
with caplog.at_level(logging.INFO):
await connection.hello()
if valid:
if "connection.recv_timeout_seconds" in hints:
sockets.client.settimeout.assert_called_once_with(
hints["connection.recv_timeout_seconds"]
)
else:
sockets.client.settimeout.assert_not_called()
assert not any("recv_timeout_seconds" in msg
and "invalid" in msg
for msg in caplog.messages)
else:
sockets.client.settimeout.assert_not_called()
assert any(repr(hints["connection.recv_timeout_seconds"]) in msg
and "recv_timeout_seconds" in msg
and "invalid" in msg
for msg in caplog.messages)
CREDENTIALS = "+++super-secret-sauce+++"
@pytest.mark.parametrize("auth", (
("user", CREDENTIALS),
neo4j.basic_auth("user", CREDENTIALS),
neo4j.kerberos_auth(CREDENTIALS),
neo4j.bearer_auth(CREDENTIALS),
neo4j.custom_auth("user", CREDENTIALS, "realm", "scheme"),
neo4j.Auth("scheme", "principal", CREDENTIALS, "realm", foo="bar"),
))
@mark_async_test
async def test_credentials_are_not_logged(auth, fake_socket_pair, caplog):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(b"\x70", {"server": "Neo4j/4.3.4"})
await sockets.server.send_message(b"\x70", {})
connection = AsyncBolt5x1(
address, sockets.client, PoolConfig.max_connection_lifetime, auth=auth
)
with caplog.at_level(logging.DEBUG):
await connection.hello()
if isinstance(auth, tuple):
auth = neo4j.basic_auth(*auth)
for field in ("scheme", "principal", "realm", "parameters"):
value = getattr(auth, field, None)
if value:
assert repr(value) in caplog.text
assert CREDENTIALS not in caplog.text
@pytest.mark.parametrize(("method", "args"), (
("run", ("RETURN 1",)),
("begin", ()),
))
@pytest.mark.parametrize("kwargs", (
{"notifications_min_severity": "WARNING"},
{"notifications_disabled_categories": ["HINT"]},
{"notifications_disabled_categories": []},
{
"notifications_min_severity": "WARNING",
"notifications_disabled_categories": ["HINT"]
},
))
def test_does_not_support_notification_filters(fake_socket, method,
args, kwargs):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
method = getattr(connection, method)
with pytest.raises(ConfigurationError, match="Notification filtering"):
method(*args, **kwargs)
@mark_async_test
@pytest.mark.parametrize("kwargs", (
{"notifications_min_severity": "WARNING"},
{"notifications_disabled_categories": ["HINT"]},
{"notifications_disabled_categories": []},
{
"notifications_min_severity": "WARNING",
"notifications_disabled_categories": ["HINT"]
},
))
async def test_hello_does_not_support_notification_filters(
fake_socket, kwargs
):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(
address, socket, PoolConfig.max_connection_lifetime,
**kwargs
)
with pytest.raises(ConfigurationError, match="Notification filtering"):
await connection.hello()
@mark_async_test
@pytest.mark.parametrize(
"user_agent", (None, "test user agent", "", USER_AGENT)
)
async def test_user_agent(fake_socket_pair, user_agent):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(b"\x70", {"server": "Neo4j/1.2.3"})
await sockets.server.send_message(b"\x70", {})
max_connection_lifetime = 0
connection = AsyncBolt5x1(
address, sockets.client, max_connection_lifetime, user_agent=user_agent
)
await connection.hello()
tag, fields = await sockets.server.pop_message()
extra = fields[0]
if not user_agent:
assert extra["user_agent"] == USER_AGENT
else:
assert extra["user_agent"] == user_agent
@mark_async_test
@pytest.mark.parametrize(
"user_agent", (None, "test user agent", "", USER_AGENT)
)
async def test_does_not_send_bolt_agent(fake_socket_pair, user_agent):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(b"\x70", {"server": "Neo4j/1.2.3"})
await sockets.server.send_message(b"\x70", {})
max_connection_lifetime = 0
connection = AsyncBolt5x1(
address, sockets.client, max_connection_lifetime, user_agent=user_agent
)
await connection.hello()
tag, fields = await sockets.server.pop_message()
extra = fields[0]
assert "bolt_agent" not in extra
@mark_async_test
@pytest.mark.parametrize(
("func", "args", "extra_idx"),
(
("run", ("RETURN 1",), 2),
("begin", (), 0),
)
)
@pytest.mark.parametrize(
("timeout", "res"),
(
(None, None),
(0, 0),
(0.1, 100),
(0.001, 1),
(1e-15, 1),
(0.0005, 1),
(0.0001, 1),
(1.0015, 1002),
(1.000499, 1000),
(1.0025, 1002),
(3.0005, 3000),
(3.456, 3456),
(1, 1000),
(
-1e-15,
ValueError("Timeout must be a positive number or 0")
),
(
"foo",
ValueError("Timeout must be specified as a number of seconds")
),
(
[1, 2],
TypeError("Timeout must be specified as a number of seconds")
)
)
)
async def test_tx_timeout(
fake_socket_pair, func, args, extra_idx, timeout, res
):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(b"\x70", {})
connection = AsyncBolt5x1(address, sockets.client, 0)
func = getattr(connection, func)
if isinstance(res, Exception):
with pytest.raises(type(res), match=str(res)):
func(*args, timeout=timeout)
else:
func(*args, timeout=timeout)
await connection.send_all()
tag, fields = await sockets.server.pop_message()
extra = fields[extra_idx]
if timeout is None:
assert "tx_timeout" not in extra
else:
assert extra["tx_timeout"] == res
@pytest.mark.parametrize(
"actions",
itertools.combinations_with_replacement(
itertools.product(
("run", "begin", "begin_run"),
("reset", "commit", "rollback"),
(None, "some_db", "another_db"),
),
2
)
)
@mark_async_test
async def test_tracks_last_database(fake_socket_pair, actions):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, sockets.client, 0)
await sockets.server.send_message(b"\x70", {"server": "Neo4j/1.2.3"})
await sockets.server.send_message(b"\x70", {})
await connection.hello()
assert connection.last_database is None
for action, finish, db in actions:
await sockets.server.send_message(b"\x70", {})
if action == "run":
connection.run("RETURN 1", db=db)
elif action == "begin":
connection.begin(db=db)
elif action == "begin_run":
connection.begin(db=db)
assert connection.last_database == db
await sockets.server.send_message(b"\x70", {})
connection.run("RETURN 1")
else:
raise ValueError(action)
assert connection.last_database == db
await connection.send_all()
await connection.fetch_all()
assert connection.last_database == db
await sockets.server.send_message(b"\x70", {})
if finish == "reset":
await connection.reset()
elif finish == "commit":
if action == "run":
connection.pull()
else:
connection.commit()
elif finish == "rollback":
if action == "run":
connection.pull()
else:
connection.rollback()
else:
raise ValueError(finish)
await connection.send_all()
await connection.fetch_all()
assert connection.last_database == db
| [
"[email protected]"
] | |
bd5d4faa17a341677a9d9f49921f040b2ffa8302 | 0391b9a73193e36156cd17c2f778eb03b96f575e | /seism_to_csv_Z.py | 00a5f6552ee8c47efe3c0ac2d693281b17f5f51c | [] | no_license | xian-ran/anisotropy_nn | 0349565eafef87c2ab32ff59859409520e5c8bc7 | 65bee9be0673a48bfd87774c1d1b8b2a90ec8541 | refs/heads/master | 2022-04-05T17:07:33.910285 | 2020-02-18T17:05:21 | 2020-02-18T17:05:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | # Z-component
# column -- timesample, row -- trace
import obspy
import numpy as np
import pandas as pd
st = obspy.read('model_1\\model+GathNP-Z.sgy')
N_traces = len(st)
N_timesamples = len(st[0])
for i in range(1,1101):
print('\t',i,'\t/\t1100')
st = obspy.read('model_{}/model+GathNP-Z.sgy'.format(i))
data = np.empty([N_timesamples])
for n in range(N_traces):
data = np.vstack((data,st[n]))
df = pd.DataFrame(data[1:])
df.to_csv('csv_models_2frac_Thomsen_saturated_full_formulae_Z\\model_{}.csv'.format(i),index=None)
| [
"[email protected]"
] | |
47705667e33f6a7904b1f49b64c31ef0d425499d | 613bac317b6094b6d055e1d2e83576611a47b535 | /Lab-2/filters.py | 6d18959c8ef3b325fdd875054cb09dcaf57c87d2 | [] | no_license | Bylundc/ME-499-Python | 2b082b9608613e35b5fc046d3e4d74dbb498a68d | 30fedc6855268bc61a2a4f5cf25eeaee442fa502 | refs/heads/master | 2021-01-19T03:52:24.929976 | 2017-07-26T20:11:39 | 2017-07-26T20:11:39 | 87,340,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,861 | py | #!/usr/bin/env python
#import Gnuplot, Gnuplot.funcutils
import numpy
from sensor import *
# load raw data in
rawme = numpy.loadtxt("raw",usecols=(1,))
print rawme
# load raw data in
rawmed = numpy.loadtxt("raw",usecols=(1,))
# define a MEAN filter
def filtme(data):
k = 0
filtered = []
while True:
filtered += [sum(data[k:k+w])/w]
if k < len(data)-w:
k +=1
else:
break
return filtered
# define a MEDIAN filter
def filtmed(data, w = 3):
k = 0
filteredm = []
while True:
filteredm += [numpy.median(rawmed[k:k+w])]
if k < len(rawmed)-w:
k +=1
else:
break
return filteredm
# Ask for a filter width
w = int(raw_input('Enter a filter width: ')) #width
if w % 2 == 0:
print "Width is even, choose an odd number."
elif w < 0:
print "Width is negative, choose a positive number."
else:
print filtme(rawme)
"""
# save filtered data
print_sensor_data(filtme(rawme), 'filtered')
# load in filtered data
filtered = numpy.loadtxt("filtered",usecols=(1,))
# plot raw data vs filtered data - MEAN
gplot = Gnuplot.Gnuplot(persist=1)
gplot.title("Filtered Data vs Raw - Mean")
rawme = Gnuplot.PlotItems.Data(rawme, with_="linespoints lt rgb 'blue' lw 1 pt 1", title="raw")
filteredme = Gnuplot.PlotItems.Data(filtered, with_="linespoints lt rgb 'black' lw 1 pt 1", title="filtered")
gplot.plot(rawme,filteredme)
# save filtered data
print_sensor_data(filtmed(rawmed), 'filteredm')
# load in filtered data
filteredm = numpy.loadtxt("filteredm",usecols=(1,))
# plot raw data vs filtered data - MEDIAN
g = Gnuplot.Gnuplot(persist=1)
g.title("Filtered Data vs Raw - Median")
rawmed = Gnuplot.PlotItems.Data(rawmed, with_="linespoints lt rgb 'blue' lw 1 pt 1", title="raw")
filteredmed = Gnuplot.PlotItems.Data(filteredm, with_="linespoints lt rgb 'red' lw 1 pt 1", title="filtered")
g.plot(rawmed,filteredmed)
"""
| [
"[email protected]"
] | |
27a9e101cd4a7f253db5f5c89fb3068918340ead | 34745a8d54fa7e3d9e4237415eb52e507508ad79 | /Python Fundamentals/03 Lists Basics/Exercises/07_Easter_Gifts.py | 172ea7853a18b1443adb30323f730642b61c1f6b | [] | no_license | DilyanTsenkov/SoftUni-Software-Engineering | 50476af0dc88b267d72c56fa87eeb88d841164b2 | fe446e3a50a00bb2e48d71ab8f783e0a4a406094 | refs/heads/main | 2023-08-12T18:18:42.144210 | 2021-09-25T11:10:38 | 2021-09-25T11:10:38 | 317,235,419 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | gifts_names = input().split(" ")
command = input()
while command != "No Money":
command_list = command.split(" ")
if command_list[0] == "OutOfStock":
if command_list[1] in gifts_names:
for i in range(len(gifts_names)):
if gifts_names[i] == command_list[1]:
gifts_names[i] = "None"
elif command_list[0] == "Required" and int(command_list[2]) > 0 and int(command_list[2]) <= int(
len(gifts_names)) - 1:
gifts_names[int(command_list[2])] = command_list[1]
elif command_list[0] == "JustInCase":
gifts_names[int(len(gifts_names)) - 1] = command_list[1]
command = input()
for n in range(len(gifts_names)):
if "None" in gifts_names:
gifts_names.remove("None")
gifts_names_print = " ".join(gifts_names)
print(gifts_names_print)
| [
"[email protected]"
] | |
fbd540a9a8a2dc77e250b42930f27847e6734bb8 | 53015e1d44805dc884b282583608ad5a03dcc8a0 | /P25.py | a90e2715fab96b790dcef6f1b2647bc295e22732 | [] | no_license | mitali-1703/Python-Lab-Work | 0db24ed5d663f8b0ad09867594ad86d9c30b9b0d | 30438481fd46fcfac93f06dd6cda2b961914f881 | refs/heads/master | 2023-04-22T01:57:51.526041 | 2021-05-13T18:38:14 | 2021-05-13T18:38:14 | 295,008,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | #Write a function calculation() which accepts 2 variables and calculates their sum and
# difference in a single return call.
def calculation(x,y):
sum=x+y
diff=x-y
return(sum,diff)
a=int(input("Enter first number:"))
b=int(input("Enter second number:"))
s,d=calculation(a,b)
print("The sum and difference of the numbers respectively is:",s,d) | [
"[email protected]"
] | |
37d23ae628c21b76f4715d973d1d08d02af4b6ca | 7ab15522084e2f81d39cda505da844fb4d519f9d | /Linear DS/Hard/Array Manipulation/array_manipulation.py | 965441f349f2bce6c1db189177727e984daceb2b | [] | no_license | Infinite-Loop-KJSIEIT/Algo-Talks | 1662cfd802bfbe4a9bfcf80a9c3157334e5cb4fd | 27d85ae3827f8765a4ebe98c80cc55b53c0562b0 | refs/heads/master | 2022-12-25T21:53:57.745115 | 2020-10-03T07:07:02 | 2020-10-03T07:07:02 | 286,681,402 | 13 | 3 | null | 2020-10-03T07:07:04 | 2020-08-11T07:53:23 | Python | UTF-8 | Python | false | false | 419 | py | import sys
def uno(): return int(sys.stdin.readline().strip())
def dos(): return sys.stdin.readline().strip()
def tres(): return map(int, sys.stdin.readline().strip().split())
def cuatro(): return sys.stdin.readline().strip().split()
n, m = tres()
ar, mx, sm = [0]*(n+1), 0, 0
for i in range(m):
a, b, k = tres()
ar[a-1] += k
ar[b] -= k
for i in range(n+1):
sm += ar[i]
mx = max(mx, sm)
print(mx)
| [
"[email protected]"
] | |
c4d693a018899753b9d47f6da7643ece8efb4bfe | 10fbe5526e5f0b8588b65f70f088cd86b6e9afbe | /irmtbds/migrations/0002_auto_20150218_1621.py | 3c05b27f5b6c037590a673b577c9744a196e934f | [] | no_license | MarkusH/django-migrations-benchmark | eb4b2312bb30a5a5d2abf25e95eca8f714162056 | e2bd24755389668b34b87d254ec8ac63725dc56e | refs/heads/master | 2016-09-05T15:36:45.250134 | 2015-03-31T23:44:28 | 2015-03-31T23:44:28 | 31,168,231 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('irmtbds', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='rqzheruyb',
name='xknvpfy',
),
migrations.AddField(
model_name='rqzheruyb',
name='kplrvqptcm',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
6c9b764a14bf8bfa12a485be883a1637e1498062 | 8efe1f1ea1a9ac81b8abc261aae0a8084131b478 | /utility/get_korea_stock_code_list.py | c7f6113b033b39f2293a5738c3d698a82af033f2 | [] | no_license | linanzhu/TradeBot | 8de6befd715724ff5602b5dc71c89132b0cf0cca | a9b08fc48d2ad4b5e27c92c72968a88eed191acf | refs/heads/master | 2020-03-18T17:47:48.062419 | 2018-05-27T14:30:13 | 2018-05-27T14:30:13 | 135,051,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | # -*- coding: utf-8 -*-
# 다음은 주식알고리즘 전문회사 사이트이다
# 상장 주식들의 코드를 제공한다.
# http://bigdata-trader.com/itemcodehelp.jsp
# Install해야 할 package들
# : pip install lxml
# : pip install html5lib
# : pip install beautifulsoup4
import os
import numpy as np
import html5lib
import pandas as pd
if float(pd.__version__[0:3]) >= 0.2:
# Need to install "pip3 install pandas_datareader"
import pandas_datareader.data as pdr
else:
import pandas.io.data as pdr
code_df = pd.read_html('http://bigdata-trader.com/itemcodehelp.jsp', header=0)[0]
code_df = code_df.rename(columns={'종목코드': 'Code', '종목명': 'Name', '종류': 'Market'})
code_df = code_df[['Code', 'Name', 'Market']]
# 종목코드가 6자리이기 때문에 6자리를 맞춰주기 위해 설정해줌
#code_df.Code = code_df.Code.map('{:06d}'.format)
savepath = os.getcwd() + '/korea_all_stock_code.csv'
code_df.to_csv(savepath, sep=',', index=False)
| [
"[email protected]"
] | |
15255dffd47f10b3f99409f7b5dea95315005ab9 | fb8cbebdf034b2f478943752d5443afc82c6eef5 | /tuirer/users/models.py | a3a6f2b88a946f2a8ca0ab80decd3e78a3924509 | [] | no_license | fariasjr/CitiTuirer | f64e0ec93ef088f8140bb0961d2ad4ed3b59448a | deb3f7a9c2d45b8a7f54639037f097b99abdac11 | refs/heads/master | 2020-03-24T05:10:36.261050 | 2018-08-01T20:24:30 | 2018-08-01T20:24:30 | 142,477,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
picture = models.ImageField('Fotode perfil', default='/img/blank-pic.png')
following = models.ManyToManyField('self', blank=True) | [
"[email protected]"
] | |
605b69b97d71ca06ff53108fa17904b0d3e284f3 | e9ceaa0bb091c189373ac0c70a545bca5791d50d | /egg_timer_2.py | feffa178e94448820d73a59289c40ae4f4105fe6 | [] | no_license | oupofat/lesson-one | 5a967a14a68175ddde4b6f4e77d0a068e8ad8262 | 8fa591fc4be08ccd4eb0bb01a72eaa5795eb295a | refs/heads/master | 2021-05-01T21:24:38.509955 | 2018-02-10T02:18:29 | 2018-02-10T02:18:29 | 120,976,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | '''It takes 2 eggs to make 5 pancakes. Ask the user how many pancakes they want to make, and tell them how many eggs they need. Decimals are okay.'''
pancakes = float(input("How many pancakse do you like"))
eggs = 2/5
eggs_uses = eggs * pancakes
print ("you will need",eggs_uses,"eggs to make this many",pancakes,"!") | [
"[email protected]"
] | |
727df59b7e7d7e6f5d0fe4af8ed16d4cd63151dd | 0459eca6819b9a57a7fc388ee626fbcece9e6c90 | /projet_st.R | 941c8b00513acd02e1cda0a3404be7fc97b5a664 | [] | no_license | Orlogskapten/Vectoriel_auto_regressif | c3138ee88b05f1765cfb43941061e675ad984356 | 09782d9d33d9dc387d22c16803d14ffa7f78145a | refs/heads/master | 2022-12-01T23:11:59.502390 | 2020-08-13T07:31:51 | 2020-08-13T07:31:51 | 287,211,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44,876 | r | #!/usr/bin/env python
# coding: utf-8
# # Projet d'analyse de séries temporelles
#
# En colaboration avec :
#
# - [Paul Leydier](https://github.com/pleydier)
#
# - [Damien Raimond](https://github.com/dams-lab/)
#
# - [Wenceslas Sanchez](https://github.com/Orlogskapten)
#
# ---
#
# Le but du projet est de:
#
# - développer (sans aucun package R) des outils permettant de modéliser un modèle VAR généralisé à l'ordre p
#
# - construire les fonctions de réponses aux chocs d'un modèle VAR d'ordre p
#
# - et d'appliquer tout cela à l'analyse des dépendances entre les économies chinoise, américaine et de la Zone Euro.
#
# Toutes les fonctions que nous avons développé sont généralisées pour l'ordre p, et sont commentées pour comprendre leurs inputs, et leur construction.
#
# ---
#
# ### Sommaire :
#
# [Question 1](#1)
#
# [Question 2](#2)
#
# [Question 3](#3)
#
# [Question 4](#4)
#
# [Annexes](#a)
# In[1]:
setwd("C:/Users/Wenceslas/Desktop/R/R_project/serie_temp/projet")
data= read.csv("Data.csv", head= TRUE, sep= ";", dec=",")
dates= as.Date(data[, 1], format= "%d.%m.%Y")
data[, 1] = NULL # on enleve les dates
data[, 4] = NULL # on enleve la dernière colonne
# On vérifie le typage
str(data)
# On a des factors au lieu de double
# on change donc le typage factor -> double
for (i in 1:3){
data[, i] = as.double(levels(data[, i]))[data[, i]] / 100 # on en profite pour diviser par 100
}
data_matrix= as.matrix(data)
head(data)
# In[2]:
fig <- function(width, heigth){
# Permet de définir la taille d'un plot
# Equivalent à plt.figure(figsize= (width, heigth)) sur Python
options(repr.plot.width= width, repr.plot.height= heigth)
}
fig(5, 8)
layout(matrix(1:3, 3, 1, byrow= T))
col_named= colnames(data) # récupère le nom des pays
for (i in 1:3){
plot(dates, data[, i]
, col= "red", main= col_named[i]
, ylab= "", xlab= "Dates", type= "l")
grid(col= "grey")
}
# #### Remarque :
#
# Voici-ci ci-dessus les dynamiques de nos trois séries de taux de croissance. Premier fait, on remarque que les séries Eurozone et USA ont la même forme, mais pas la même amplitude. On a donc deux économies dont les taux de croissance réagissent de la même manière ; ci-dessous on peut voir une corrélation de près de 0.7 pour ces deux séries.
#
# Cela pourrait être important pour la suite de l'analyse, notamment pour comprendre la diffusion d'un choc d'un pays vers un autre.
# In[3]:
print("Matrice de corrélation")
cor(data_matrix)
# Autre point, mise à part la période de 2008-2012, il semblerais que les Etats-Unis et la Zone Euro aient des taux de croissance plutot stable entre 0 et 2% (4% pour les Etats-Unis). Ce n'est pas sans rappeler les théories à propos de la stagnation séculaire, même si selon Summers et Gordon les taux de croissance dans une telle situation devraient être bien plus faible (~0.5%).
#
# Dernier point à propos de la Chine, depuis la crise de 2008 et sa reprise en 2011, on remarque une tendance à la baisse de son taux de croissance. En effet, il semblerait que les 10% de croissance annuel dont profitait jusque là la Chine soit une époque révolue, avec des taux qui pourraient converger vers ceux des pays développés (type Etats-Unis) d'ici quelques années.
# En effet, les taux de croissances exeptionnels de l'Empire du Milieu était dû à l'effet de rattrapage de son économie et la réduction de ses taux est probablement liée au fait que la Chine par rattraper les pays les plus développés.
# <a id= "1"></a>
#
# ### 1). Explain what a VAR model and an impulse response function is.
# Le modèle VAR permet d’analyser des séries temporelles de manière multivariées, en étudiant les dépendances linéaires au sein de toutes les séries que l'on considère. Il se distingue d'un modèle AR par cet aspect multivarié.
#
# Pour expliquer une variable $X_t$, avec un modèle VAR d'ordre 1, nous allons utiliser la donnée de la période précédente telle que :
#
# $$VAR(1): X_t= \phi_0 + \phi_1X_{t-1} + \epsilon_t$$
# avec :
# $$X_t= \begin{pmatrix}
# chine\_growth_t\\
# USA\_growth_t\\
# Zone€\_growth_t
# \end{pmatrix}$$
#
# A l'ordre p on a:
# $$VAR(p): X_t= \phi_0 + \phi_1X_{t-1} + \phi_2X_{t-2} + ... + \phi_pX_{t-p} + \epsilon_t$$
# C'est-à-dire qu'un VAR d'ordre p permet de considérer un lien entre nos données en t et les données observées jusqu'en t-p.
#
# Ce qu'il y a de magique avec un modèle VAR, c'est qu'on peut le transformer et faire disparaître les séries $X_{t-p}$ pour ne faire apparaître que les chocs. L'idée derrière cette transformation est de pouvoir expliquer les valeurs de $X_t$ en fonction des chocs passés. En économie, c'est un concept utile par exemple pour voir comment un choc de demande se propage à travers les séries de taux de croissance, taux d'intérêt et de taux de chômage ; car en économie les variables macroéconomiques ne sont que rarement indépendantes les unes des autres, et donc observer et comprendre l'impact d'un choc provenant d'une série sur les autres est essentiel.
#
# Dans notre projet, nous avons les taux de croissance de la Chine, des Etats-Unis et de la Zone Euro de 1996 à fin 2019. Avec la mondialisation, et les connexions de plus en plus grandes entres les économies, il est intéressant de voir comment un choc de croissance dans un pays, pourrait en atteindre un autre et surtout, pendant combien de temps. Ce n'est pas sans rappeler la situation actuelle et la crise mondiale du COVID qui touche toutes nos économies. Mais on aura l'occasion d'en parler plus longuement dans la dernière question.
# Pour coder un VAR d'ordre p, il nous a fallu repenser la construction du dataset, et de son interaction avec les coefficients à optimiser.
#
# Imaginons que l'on doive construire un **VAR d'ordre 2**. Le dataset que nous devrions utiliser pour le construire ressemblerait à ceci :
#
#
# | t | Chine | USA | Zone€ | | t - 1 | Chine | USA | Zone€ | | t - 2 | Chine | USA | Zone€ |
# | --- | --- | --- | --- | | --- | --- | --- | --- | | --- | --- | --- | --- |
# | 1 | 0.109 | 0.026 | 0.012 | | | | | | | | | | |
# | 2 | 0.094 | 0.04 | 0.015 | | 1 | 0.109 | 0.026 | 0.012 | | | | |
# | 3 | 0.092 | 0.041 | 0.018 | | 2 | 0.094 | 0.04 | 0.015 | | 1 | 0.109 | 0.026 | 0.012 |
# | 4 | 0.103 | 0.044 | 0.019 | | 3 | 0.092 | 0.041 | 0.018 | | 2 | 0.094 | 0.04 | 0.015 |
# | 5 | | | | | 4 | 0.103 | 0.044 | 0.019 | | 3 | 0.092 | 0.041 | 0.018 |
# | 6 | | | | | 5 | | | | | 4 | 0.103 | 0.044 | 0.019 |
#
#
# avec notre première série $X_t$ à prédire, suivi des 2 autre séries nécessaires pour construire un VAR d'ordre 2. On se rend compte que les 2 premières lignes de $X_t$ ne sont pas observables si on souhaite construire ce VAR étant donné le vide des 2 premières lignes de la série $X_{t-2}$. Il faudra donc modéliser uniquement avec les observations à t = 3 et t = 4. Avec un ordre p, il faudrait alors modéliser sur m-p observations, avec m le nombre d'observation du dataset.
#
# On récupère chaque série lagguée indépendament, et on la multiple au set de coefficient $\phi$ qui lui est associé pour pouvoir calculer la valeur de notre prédiction $\tilde{X_t}$. Puis on calcule l'erreur de prédiction en redimensionnant bien $X_t$. Le but étant de minimiser l'erreur, on va chercher à maximiser la log vraisemblance en passant par une fonction de densité gaussienne multivariée pour chercher les bons paramètres $\phi$.
#
# Dernier point, le set de paramètre $\phi$ que l'on rentre initialement est généré aléatoirement ; il n'est pas égal à un vecteur constitué de 0. Cette solution permet, parfois, d'augmenter la vitesse de convergence de notre modèle, voire de le faire converger.
# In[4]:
mvnorm<-function(X,mu,sigma)
{
# Permet de calculer la fonction densité
# X représente un vecteur 1xn
# Mu représente un vecteur 1xn
# Sigma une matrice nxn
A=(2*pi)^(ncol(sigma)/2)
B=det(sigma)^(1/2)
C=-1/2*t(X-mu)%*%solve(sigma)%*%(X-mu)
D=exp(C)
return(1/(A*B)*D)
}
# test
mu= apply(data_matrix,2,mean)
sigma= diag(3)
mvnorm(data_matrix[2,],mu,sigma)
# In[5]:
VAR_loglik_p<-function(para, vectored)
{
# Permet de calculer la log vraisemblance d'un modèle VAR(p)
# para désigne un vecteur de (n + n*n*p) contenant les paramètres du modèle
# vectored correspond à un vecteur contenant le datatset, l'ordre du VAR, et les dimensions du datatset
# Récupère l'information du vecteur
stocked= tail(vectored, 3)
p= stocked[3]
n= stocked[1]
m= stocked[2]
X= matrix(vectored[1: (length(vectored) - 3)], m, n )
# Extraction des intercepts
phi_0= para[1:n]
# E désigne la valeur X calculée à l'aide du modèle
# On construit en amont E, ce qui nous permet d'ajouter les intercepts, et de bien définir
# sa dimension
E= matrix(phi_0, m-p, n, byrow= T)
# Si l'ordre du VAR = 3, alors il y aura 3 matrices de dimension nxn
# On récupère par itération les coefficients qui sont associés à chaque matrice (en premier
# la matrice associée aux données avec le premier retard, puis le deuxième etc.)
for (i in 1:p){
# Récupère les coefficients de la matrice du retard i
phi_i= matrix(para[((n*n*i + n) -(n*n) + 1):(n*n*i + n)], n ,n)
# Pour la matrice phi_1, les coefficients phi1_11, phi1_12, phi_13 dans le cas d'une var
# avec 3 séries, ne seront pas en ligne comme dans une représentation matricielle
# mais seront stockés dans la première colonne !!
# E= E[-1,] + X[-c((m-i+1):m),]%*%phi_i # enlève le bas
# E= E[-dim(E)[1],] + X[-c((m-i+1):m),]%*%phi_i # enlève le bas
# On fait le calcul phi_p . , mais comme les séries de X sont stockées en ligne
# et que les coefficients sont sotckés en colonne, on doit faire X . phi_p
# On enlève une partie de la matrice (le bas) qui dépend de l'ordre sur lequel on itère
# cf le markdown ?
phi_compute= X[-c((m-i+1):m),]%*%phi_i # enlève le bas de la matrice X pour associer les bons retards
if (i == p){
E= E + phi_compute
}
else {
E= E + phi_compute[-c(1:(p-i)),] # enlève le haut pour que les retards fit bien avec E et X
}
}
# Pour concorder avec le retard max (= ordre p), on doit se séparer des p premières lignes de X
residus= X[-c(1:p), ] - E
sigma= var(residus)
log_lik= 0
# Calcul de la log vraisemblance
# On commence la boucle à p+1 et non à 1 pour simplifier le raisonnement (permet de
# sélectionner les données X à partir de i)
# Mais on aurait pu commencer à 1 et on aurait modifier l'indice dans X et E
for (i in (1+p):m){
temp= mvnorm(X[i, ], E[(i-p),], sigma) # E est pris à partir de p car j'ai enlevé p lignes
# dans le processus précédent
temp= log(temp)
log_lik= log_lik - temp
}
return(log_lik)
}
# test
n= ncol(data_matrix)
p_order= 2 # ordre 2
VAR_loglik_p(numeric(n + n*n*p_order)
, c(data_matrix, n, nrow(data_matrix), p_order))
# In[6]:
# Optimisation test pour VAR(2)
n= ncol(data_matrix)
p_order= 2 # VAR d'ordre 2
estimation_auto= function(X= data_matrix, p= p_order, num= n){
# Permet de sortir les résultats de l'optim (neg log vraissemblance et coef)
# X désigne le dataset
# p l'ordre de la VAR à calculer
# n le nombre de série du VAR
# On utilise dans un premier temps des poids aléatoires compris entre 0 et 1
# mais si on a un soucis on utilise un set de paramètres avec que des 0
# (dans notre cas, on a parfois eu des problèmes)
tryCatch({
weight= round(runif(num + num*num*p, 0, 1), 1)
para= weight/sum(weight) # permet de ne pas faire bugger l'optim
estimation= optim(para, fn= VAR_loglik_p
, vectored= c(X, ncol(X), nrow(X), p)
, method= "BFGS")
print("Initialization with random parameters")
return (estimation)
}, error= function(e) # au cas où
{
# Set de paramètres 0
para= numeric(num + num*num*p)
estimation= optim(para, fn= VAR_loglik_p
, vectored= c(X, ncol(X), nrow(X), p)
, method= "BFGS")
print("Initialization with zero values for parameters")
return (estimation)
})
}
# test
estimation_auto(X= data_matrix, p= p_order, num= n)
# <a id= "2"></a>
#
# ### 2). Using information criterions, estimate the lag to be used with the three data series for your VAR model. What do you think of this lag?
# Comme il est possible de générer p VAR, il nous faut déterminer l'ordre qui est le plus intéressant pour modéliser notre série de données. Dans ce but, nous allons utiliser les fonctions précédentes pour construire tous les modèles VAR de l'ordre 1 à 10.
# Pour comparer tous ces modèles, nous allons utiliser des critères d'informations (AIC, BIC et HQ), qui permettent de prendre en compte à la fois la performance d'un modèle (la valeur de la log vraisemblance) mais aussi sa complexité (le nombre de paramètres). En effet, il est très simple d'avoir une bonne log vraisemblance en augmentant le nombre de paramètres. Mais le modèle devient trop spécifique à notre jeu de données.
# Dans le cas d'un VAR, si on prend un ordre très élevé, le nombre de paramètres sera alors plus grand qu'avec un VAR d'ordre 1. En effet, si n représente le nombre de séries, le nombre de paramètres d'un VAR d'ordre p sera alors de $n*n*p + n$.
#
# En cherchant à minimiser les critères d'informations, on trouve le modèle qui a un bon équilibre entre performance et complexité.
#
# Dans notre cas, on a représenté la valeur de ces critères dans le tableau ci-dessous.
# In[7]:
# On va chercher à savoir l'ordre du VAR que nous devons choisir
cb_de_var= 10
formule_generale= function(p, log_lik, n, m, g){
# Permet de calculer tous les critères d'informations en fonction de g
# P désigne l'ordre du VAR
# log_lik désigne la log vraisemblance du modèle calculée
# n le nombre de séries (permet de calculer le nombre de paramètres du modèle)
# m le nombre d'observations
# g correspond à la fonction d'information sélectionnée
base= -(2*log(log_lik))
k= n + n*n*p # nombre de param
return (base + (k*g))
}
bic_g= function(m){
# Permet de calculer la fonction g pour le critère BIC
return (log(m))
}
hq_g= function(m){
# Permet de calculer la fonction g pour le critère HQ
return (log(bic_g(m)))
}
# Préparation
n= ncol(data_matrix)
order_var= c(1:cb_de_var)
aic= c()
bic= c()
hq= c()
# On va itérer sur tous les ordres p sélectionnés et calculer les critères d'informations
for (i in order_var){
m_in= nrow(data_matrix) - i # à chaque ordre p, le dataset diminue de - p
estimated= estimation_auto(X= data_matrix, p= i, num= n)
log_like_estimated_i= -1*estimated$value # la valeur sortie est la negative log likelihood
# donc on multiplie par -1
aic= c(aic, formule_generale(i, log_like_estimated_i, n, m_in, g= 2))
bic= c(bic, formule_generale(i, log_like_estimated_i, n, m_in, g= bic_g(m_in)))
hq= c(hq, formule_generale(i, log_like_estimated_i, n, m_in, g= hq_g(m_in)))
}
# In[8]:
# Construction du dataset pour représenter la valeur des critères en fonction de l'ordre
df_which_order= data.frame(p_order= order_var
, AIC= aic
, BIC= bic
, HQ= hq)
df_which_order
# Pour rappel, le meilleur des modèles est celui qui a le critère le plus faible. Dans notre cas, nous avons de la chance car tous les critères nous ramènent à la même conclusion : le modèle VAR d'ordre 1 est le meilleur.
# C'est à dire qu'un lag de 1 nous permet au mieux de modéliser $X_t$.
#
# On doit vous avouer qu'on s'attendait à obtenir un lag de 2 ou de 4, étant donné la nature de nos séries. En effet, nous avons à modèliser des taux de croissance de pays ; il est probable que le taux de croissance du premier trimestre impacte celui du troisième.
# Aussi, on pensait que chacune des séries étaient autocorrélées sur plusieurs périodes (6 mois voire 1 an). Et quand on trace notre autocorrélogramme, pour chaque série séparément, on se rend compte que c'est bien le cas : on observe de fortes autocorrélations, significative jusqu'à 5 périodes, c'est à dire 1 an et 1 trimestre.
#
# En se renseignant un peu [_$^{1}$_](https://stats.stackexchange.com/questions/207156/autocorrelation-of-var-residuals) , on s'est rendu compte que les critères d'informations ne cherchent pas à minimiser l'autocorrélation ; ils déterminent le modèle qui décrit bien nos données, mais pas un modèle qui les décrit parfaitement. Dans notre cas, il est probable que chercher à supprimer l'autocorrélation passe par une trop forte hausse de la complexité. C'est pourquoi les critères nous amènent à considérer le plus petit des ordres pour le modèle.
#
# Par conséquent, si notre but est de générer un modèle pour faire de la prédiction, un lag de 1 est parfait car c'est le modèle qui nous permet au mieux de gérer performance et overfitting. Mais si notre but est d'expliquer, et que l'autocorrélation est un problème pour notre analyse économique, alors il faudrait choisir un autre moyen pour sélectionner le bon ordre pour un VAR.
#
# Pour la suite du projet, on construira un VAR d'ordre 1 (c'est à dire que l'on suit les indications des critères d'informations).
#
# ---
# $^{1}$[Stats StackExchange](https://stats.stackexchange.com/questions/207156/autocorrelation-of-var-residuals)
# <a id= "3"></a>
#
# ### 3). Simulate impact
# Même si nous allons simuler l'impact de taux de croissance négatifs avec un VAR d'ordre 1, nous avons cherché à généraliser nos fonctions à tous les VAR possibles.
#
# Le problème, c'est que plus on modèlise de lag, plus la construction d'une fonction de réponse généralisée se complexifie. En effet, on pourrait imaginer des chocs différents sur plusieurs périodes comme par exemple au premier et au dernier trimestre. Dans notre cas, vu que le choc n'arrive qu'en t, nous n'avons pas besoin d'aller aussi loin dans la construction, mais nous avons quand même proposé une solution à ce problème.
#
# La solution que nous avons trouvé est de tranformer n'importe quel VAR p en un VAR d'ordre 1. Voici comment se présente notre transformation :
# Si on a $X_t$ nos n séries de données, $\phi_p$ notre matrice de coefficients associée aux séries lagguées à p périodes et $\epsilon_t$ le vecteur d'erreur de prédiction, exprimés comme suit:
#
# $$X_t= \begin{pmatrix}
# chine\_growth_t\\
# USA\_growth_t\\
# Zone€\_growth_t
# \end{pmatrix}$$
# et
# $$\phi_{p}=\begin{pmatrix}
# \phi_{1,1,p}&\phi_{1,2,p}&...&\phi_{1,n,p}\\
# \phi_{2,1,p}&\phi_{2,2,p}&...&\phi_{2,n,p}\\
# ...&...&...&...\\
# \phi_{n,1,p}&...&...&\phi_{n,n,p}
# \end{pmatrix}$$
# et
# $$\epsilon_{t}=\begin{pmatrix}
# \epsilon_{chine_t}\\
# \epsilon_{usa_t}\\
# \epsilon_{zone€_t}
# \end{pmatrix}$$
# $$$$
# alors notre représentation d'un VAR p en VAR 1 se fait de la manière suivante:
# $$$$
# $$\begin{pmatrix}
# X_t\\
# X_{t-1}\\
# ...\\
# ...\\
# X_{t-p+1}
# \end{pmatrix}
# =\begin{pmatrix}
# \phi_{1}&\phi_{2}&...&...&...&\phi_{p}\\
# 1\\
# 0&1&...\\
# ...\\
# 0&...&...&1&...&0
# \end{pmatrix}
# \begin{pmatrix}
# X_{t-1}\\
# X_{t-2}\\
# ...\\
# ...\\
# X_{t-p}
# \end{pmatrix}
# +\begin{pmatrix}
# \epsilon{t}\\
# 0\\
# ...\\
# ...\\
# 0
# \end{pmatrix}$$
# $$$$
# Etant donné que l'on sait construire la fonction de réponse généralisée à partir d'une matrice $phi_1$ dans le cas d'un VAR 1, on peut généraliser sa construction avec cette matrice (ci-dessus). Avec cette construction on pourra alors simuler des chocs sur plusieurs périodes à la fois. On a donc développé cette manière d'exprimer un VAR d'ordre p. Néanmoins, nous n'avons pas fait en sorte de pouvoir générer des chocs sur plus d'une période. Ainsi, pour construire la fonction de réponse, nous avons uniquement utilisé $\phi_1$ étant donné que le choc apparaît à la première période.
# In[9]:
# le but est de construire une matrice tel que en colonne on a
# Calculer l'impact d'un choc avec un VAR p est compliqué
# On va chercher à transformer notre VAR p en un VAR 1
transformation_Xt_varp= function(X, p_order){
# Permet de transformer le dataset initial pour permettre la permutation d'un VAR p à un VAR 1
# X désigne notre dataset
# p_ordre désigne l'ordre du VAR
n= ncol(X)
m= nrow(X)
# Si on a un VAR 1, alors on ne change pas notre dataset
if (p_order == 1){
return (X)
}
else {
tested= X
stocked= X[-c((m-p_order+1):m), ] # série initiale Xt
# Le but est de pouvoir coller les séries de données Xt, Xt-1, ... , Xt-p
# On a donc un dataset de dimension (m-p)x(n*p)
for (i in c(1:p_order)){
tested_copy= tested[-c(1:i), ]
ajout_p_col= tested_copy[-c((m-p_order+1):m), ]
stocked= cbind(ajout_p_col, stocked)
}
return(stocked)
}
}
# # Test
# p_order= 4
# test= transformation_Xt_varp(data_matrix, p_order)
# head(test)
# In[10]:
phi_zero_compute= function(X, p_order){
# Permet de sortir la matrice d'intercept
n= ncol(X)
m= nrow(X)
estimation_good_var= estimation_auto(X= X, p= p_order, num= n)
para= estimation_good_var$par
phi_zero= para[1:n]
return (phi_zero)
}
phi_transforma= function(X, p_order){
# Permet d'assembler toutes les matrices phi qui nous permettent la transformation VAR p -> VAR 1
n= ncol(X)
m= nrow(X)
estimation_good_var= estimation_auto(X= X, p= p_order, num= n)
para= estimation_good_var$par
# On ne fait pas de transformation si on a un VAR d'ordre 1
# On retourne uniquement les paramètres
if (p_order == 1){
phi_uno= matrix(para[(n+1):length(para)],n,n)
return (phi_uno)
}
else {
# Assemblage des coefficients
# On va stack de manière horizontale les matrices de coef phi 1 à phi p
stock_phi= matrix(numeric(n*n), n, n)
for (i in 1:p_order){
phi_i= matrix(para[((n*n*i + n) -(n*n) + 1):(n*n*i + n)], n ,n)
stock_phi= rbind(stock_phi, phi_i)
}
stock_phi= stock_phi[-c(1:n), ]
# On va combler les trous pour fiter notre matrice de coef aux nouveaux set données
# calculé transformation_Xt_varp
# La matrice de coef sera de dimension (n*p)x(n*p)
dim_n_p= n*p_order
identity_mat= diag(dim_n_p-n) # permet lors du calcul X . phi d'afficher
# Xt-1 = Xt-1 , Xt-2 = Xt-2
zero_mat_ligne= matrix(numeric((dim_n_p - n)*n), n, (dim_n_p - n))
stock_phi= cbind(stock_phi, rbind(identity_mat, zero_mat_ligne))
return (stock_phi)
}
}
phi_zero_transforma= function(X, p_order){
# Permet de redéfinir la matrice phi 0 (intercept) avec le dataset de transformation_Xt_varp
# X correspond au dataset (dimention m*n)
# p_order correspond à l'odre du VAR
# Cas particulier dans le cas d'un VAR 1, on ne fait pas de transformation
if (p_order == 1){
return (phi_zero_compute(X, 1))
}
else {
phi_zero= phi_zero_compute(X, p_order)
phi_matrice= phi_transforma(X, p_order)
diff_dim_col= ncol(phi_matrice) - length(phi_zero)
# On comble la matrice avec des 0 à gauche
zero_comble= matrix(numeric((diff_dim_col*nrow(phi_matrice)))
, nrow(phi_matrice), diff_dim_col)
phi_zero_new= cbind(matrix(phi_zero, nrow(phi_matrice), length(phi_zero), byrow= T)
, zero_comble)
return (phi_zero_new)
}
}
# # test
# p_order= 2
# phi_transforma(data_matrix, p_order)
# In[11]:
error_transformation= function(X, p_order, stock_phi, phi_zero){
# Permet de calculer l'erreur. Cette fonction permet de prendre en considération
# la structure du dataset d'un VAR p
n= ncol(data_matrix)
m= nrow(data_matrix)
dim_n_p= n*p_order
if (p_order == 1){
calcul_value= X[1:(m-1), ]%*%stock_phi + matrix(phi_zero, (m-1), n, byrow= T)
errors= X[-1, ] - calcul_value
return (errors)
}
else {
test= transformation_Xt_varp(X, p_order)
# enlève les 3 première colonnes qui sont t et pas t-1
train= test[, -c(1:n)]
# on a enlevé la dernière pour avoir t à t -p +1
true_vals= test[, -c((dim_n_p + 1):(dim_n_p + n))]
calcul_value= train%*%stock_phi + matrix(phi_zero[1, ]
, nrow(train), ncol(phi_zero), byrow= T)
# on calcule l'erreur
errors= true_vals - calcul_value
return (errors)
}
}
# # test
# p_order= 1
# stock_test_phi= phi_transforma(data_matrix, p_order)
# phi_zero= phi_zero_transforma(data_matrix, p_order)
# head(error_transformation(data_matrix, p_order, stock_test_phi, phi_zero))
# In[12]:
compute_choleski_p= function(X, error, p_order){
# Permet de récupérer la matrice triangulaire selon la factorisation de choleski
# X désigne le dataset
# error désigne la matrice d'erreur
# p_order désigne l'ordure du VAR
n= ncol(X)
if (p_order == 1){
sigma= var(error)
} else {
error_resized= error[, -c((n+1):dim(error)[1])]
sigma= var(error_resized)
}
p= t(chol(sigma))
return (p)
}
# In[13]:
irf_compute= function(X, p_order, phi_matrix, horizon, vecteur_choc, p){
# Permet de calculer la réponse à un choc
# on récupère la première matrice phi pour calculer notre choc étant donné
# que le choc n'a lieu qu'à une période
IRF= c()
n= ncol(X)
e= vecteur_choc
# Cas spécial pour un VAR 1
if (p_order == 1){
for (i in 1:horizon){
phi= phi_matrix^i
temp= phi%*%p%*%e
IRF= cbind(IRF, temp)
}
} else {
# On récupère la matrice phi 1
new_phi= stock_test_phi[ ,-c((n+1):dim(stock_test_phi)[1])]
new_phi_first= new_phi[c(1:n), c(1:n)]
for (i in 1:horizon){
phi= new_phi_first^i
temp= phi%*%p%*%e
IRF= cbind(IRF, temp)
}
}
return (IRF)
}
# test
# horizon= 4
# e= c(0, -0.05, 0)
# p_mat= compute_choleski_p(computed_error, p_order)
# irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# In[14]:
plot_irf= function(X, IRF){
# Permet de plot nos IRF
# X désigne notre dataset
# IRf repésente la matrice de dimension nxhorizon des chocs pour
# chaque série (n) sur les différentes périodes (horizon)
n= ncol(data_matrix)
# Si le nombre de colonne est impair, alors j'ajoute une case vide dans layout
if ((n %% 2) == 0){
layout(matrix(1:n, n/2, n/2))
} else {
n_1= n + 1
layout(matrix(1:n_1, n_1/2, n_1/2))
}
for (i in 1:3){
plot(IRF[i,], main= colnames(X)[i], ylim= range(0, IRF)
, col= "red", pch= "+", cex= 1.5, ylab= "IRF", xlab= "Horizon")
grid(col= "grey")
lines(IRF[i,]*0, lty= 1, col= "black", lwd= 2)
}
}
# # test
# fig(10, 10)
# plot_irf(data_matrix, irf_calculated)
# Avant de commencer l'analyse des chocs, il est important de comprendre que nous n'avons pas réussi à construire les intervalles de confiance.
# En effet, l'intervalle de confiance permet de créditer la véracité du choc : est-il statistiquement différent de zéro ?
#
# Nous avons essayé de mettre en place une méthode de **Block Bootstrapping** (du bootstrapping pour série temporelle, qui nous permet de créer des samples de données avec des blocks de 4 ou 5 observations regroupées au sein de la série initiale) ; mais cela ne s'est pas montré efficace à cause d'un temps de calcul beaucoup trop élevé. Vous pourrez néanmoins retrouvé en Annexe notre essai.
#
# Enfin, pour analyser l'impact d'un choc, il nous faut analyser le signe de la réponse, que l'on trouve en ordonnée. Dans notre cas, on verra que tous les chocs négatifs génèrent des réponses négatives.
# In[15]:
# test total calcul IRF pour ordre 7 et horizon 4
p_order= 1
horizon= 4
e= c(-0.08, 0, 0)
# Permet de calculer la matrice phi, de l'ordre 1 à p, pour faire la transformation var p à var 1
stock_test_phi= phi_transforma(data_matrix, p_order)
# Permet de calculer et de resizer le vecteur phi0 pour l'adapter à la transformation var p à var 1
phi_zero= phi_zero_transforma(data_matrix, p_order)
# Permet de calculer l'erreur dans le cas d'un var p (marche aussi en var 1)
computed_error= error_transformation(data_matrix, p_order, stock_test_phi, phi_zero)
# Calcul de la matrice p qui permet d'orthogonaliser mon système
p_mat= compute_choleski_p(data_matrix, computed_error, p_order)
# Calcul des chocs jusqu'à l'horizon souhaité
irf_calculated= irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# On plot l'IRF que nous venons de calculer
fig(10, 10)
print("Choc de -8% dans l'économie chinoise")
plot_irf(data_matrix, irf_calculated)
# Le premier choc que l'on simule est celui d'une croissance négative en Chine de 8%. On sait que la Chine est l'usine du monde. On se rend bien compte actuellement (crise du COVID) que si la Chine stoppe son appareil productif, le monde arrête de consommer. Voir l'impact d'un tel choc sur les autres pays, notamment développés est donc d'un intérêt tout particulier.
#
# Dans notre cas, on voit bien que ce choc négatif sur la Chine ne semble pas se résorber sur les 4 périodes que l'on a considéré. Attention néanmoins, peut-être que les intervalles de confiance sont très larges et donc que ce choc se résorbe au bout d'une période ; on ne pourra pas le savoir. En tout cas ce qui est sûr c'est que ce choc impact fortement la Chine (normal!) mais aussi les Etats-Unis et la Zone Euro.
#
# Dans le cas des Etats-Unis, le choc est négatif et constant sur les 4 périodes que nous avons considéré (soit 1 an). De plus, on observe le même phénomène pour la Zone Euro. A vrai dire, nous avons l'impression que le choc de croissance de l'économie chinoise modifie durablement l'équilibre de croissance de la Zone Euro et des Etats-Unis.
#
# C'est ce que nous avons cherché à observer avec le calcul du choc sur 24 périodes, et on se rend compte, que pour les Etats-Unis, ce choc a bien modifié durablement la structure de son économie avec un choc négatif et constant. Mais ce n'est pas le cas pour la Zone Euro. Enfin, même la Chine a du mal à se remettre du choc.
#
# Attention, nous n'avons pas tracé les intervalles de confiance, donc nous ne sommes pas en mesure de vérifier la fiabilité des résultats. Dans le cas des Etats-Unis, il se pourrait même que l'intervalle de confiance soit tellement large que le choc soit tout le temps nul !
#
# En tout cas, on observe bien l'interconnexion des économies, et la diffusion des chocs d'un pays vers les autres.
# In[16]:
horizon= 24
e= c(-0.08, 0, 0)
irf_calculated= irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# On plot l'IRF que nous venons de calculer
fig(10, 10)
print("Choc de -8% dans l'économie chinoise sur 24 périodes")
plot_irf(data_matrix, irf_calculated)
# In[17]:
horizon= 4
e= c(0, -0.05, 0)
irf_calculated= irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# On plot l'IRF que nous venons de calculer
fig(10, 10)
print("Choc de -5% dans l'économie américaine")
plot_irf(data_matrix, irf_calculated)
# Voici un choc négatif de 5% sur la croissance américaine. Comme convenu, ce choc affecte sur 1 an l'économie américaine. Et quand on observe la diffusion du choc sur 24 trimestres, on remarque que l'impact ne disparaît pas au cours du temps. C'est probablement lié au fait que pour une économie développée, un choc de -5% est un énorme impact, et que ce dernier a dû profondément changer la structure de l'appareil productif américain. Ce qui peut sembler déroutant étant donné que l'économie américaine est une économie très libérale, par exemple son marché de l'emploi est très flexible par rapport à celui de la France, ce qui est censé lui permettre de se remettre plus rapidement d'une crise économique comme celle d'un gros choc négatif de croissance. Soit l'économie américaine a en effet énormément de mal à se remettre d'un choc aussi important, soit notre modèle est mal spécifié, et donc il aurait fallu soit augmenter l'ordre du VAR, soit, lors de la construction de nos fonctions de réponses, positioner la série des Etats-Unis différemment (actuellement deuxième position).
#
# Pour la Chine cependant, le choc ne semble impacter négativement que la première période. En effet, pour toutes les autres périodes, le choc est nul ; ce que l'on peut aussi voir sur un choc diffusé sur 24 périodes ci-dessous.
#
# Pour l'économie de la Zone Euro, on remarque le même phénomène que pour un choc provenant de la Chine avec un pic négatif à la deuxième période (probablement significatif), puis une convergence vers 0. La Zone Euro reçoit pleinement l'impact d'un choc avec un décalage de 1 trimestre par rapport aux deux autres économies. Puis le choc se nullifie au bout de 10 trimestres.
# In[18]:
horizon= 24
e= c(0, -0.05, 0)
irf_calculated= irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# On plot l'IRF que nous venons de calculer
fig(10, 10)
print("Choc de -5% dans l'économie américaine sur 24 périodes")
plot_irf(data_matrix, irf_calculated)
# In[19]:
horizon= 4
e= c(0, 0, -0.05)
irf_calculated= irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# On plot l'IRF que nous venons de calculer
fig(10, 10)
print("Choc de -5% dans l'économie de la Zone Euro")
plot_irf(data_matrix, irf_calculated)
# Enfin, voici l'impact d'un choc négatif de 5% sur l'économie de la Zone Euro. Dans un premier temps, on remarque que pour la Zone Euro, le choc est diffu au cours du temps, car plus les trimestres passent, moins le choc impacte négativement l'économie de la Zone Euro. Vers le 15ème trimestre, son impact est nullifié.
#
# A propos de la réception de ce choc par l'économie chinoise, il est négatif et on se rend compte qu'il disparaît après une période, (3 périodes pour les Etats-Unis).
#
# Ce choc est à la fois rapidement assimilié par la Zone euro, mais aussi pour les deux autres pays.
# In[20]:
horizon= 24
e= c(0, 0, -0.05)
irf_calculated= irf_compute(data_matrix, p_order, stock_test_phi, horizon, e, p_mat)
# On plot l'IRF que nous venons de calculer
fig(10, 10)
print("Choc de -5% dans l'économie américaine")
plot_irf(data_matrix, irf_calculated)
# <a id= "4"></a>
#
# ### 4). Why is it important to estimate these second-round effects today?
# Dans quelle drôle d'époque vivons-nous ! Nous avons la possibilité d'acheter un objet provenant de Chine, tout en regardant une série américaine, bien au chaud dans notre lit à Paris. Dans cette drôle d'époque, les économies du monde entier sont interconnectées. Une catastrophe en Chine aura forcément un impact sur toutes les autres économies du monde. En effet, la Chine est à la fois l'usine du monde mais aussi un important partenaire commercial de beaucoup de pays, développés ou non. Et c'est la même chose pour les Etats-Unis et la Zone Euro.
#
#
# Prenons comme exemple une politique de relance keynesienne : celle de Mauroy de 1981. Cette politique peut se résumer par une hausse des salaires en France (SMIC, fonctionnaire). Elle s'est soldé par un échec et une hausse des déficits budgétaires et extérieurs, car isolée, cette politique n'avait pas pris en considération le fait que l'argent allait permettre d'acheter des biens produits à l'étranger (le fameux lecteur de cassette nippon) et non en France. Si on analysait ce choc de demande que représente la politique de Mauroy, peut-être pourrions nous remarquer un choc positif sur le taux de croissance japonais. C'est très similaire à la théorie du battement d'ailes d'un papillon.
#
# Voila pourquoi, construire des fonctions de réponse généralisée est nécessaire et si intéressant pour les économistes. C'est pour cela que nous avons voulu savoir si une récession importante dans un pays pouvait impacter l'économie d'un autre pays et pendant combien de temps. Plusieurs faits ont été soulevés par l'analyse des fonctions de réponse. Premièrement, une récession en Chine semble impacter sur plus de 24 trimestres le taux de croissance des Etats-Unis (avec un choc négatif constant). Le fait que la Chine soit un important partenaire commercial doit expliquer le puissant impact de la récession chinoise.
# La Zone Euro s'en remet beaucoup plus facilement, ce qui est étonnant, car comme les Etats-Unis, l'empire du Milieu est le deuxième exportateur de la Zone Euro. Evidemment, il n'y a pas que des liens d'exportation / importation qui relient des pays, on pourrait par exemple considérer la migration des personnes, mais aussi des capitaux pour expliquer cette différence. Ou sinon, c'est la réponse de l'Oncle Sam qui est aberrante, car si on regarde sur 24 périodes, la réponse de la Zone Euro disparaît au bout de 10 périodes, ce qui semble plutôt cohérent. De plus, nous savons par expérience que le marché du travail américain est beaucoup plus flexible que dans certain pays d'Europe ; par exemple on observait en 2008 un taux de chômage de près de 10%, contre 4% 2 ans plus tard. Cela lui permet de faire passer la tempête et de faire survivre ses entreprises plus longtemps. C'est aussi un de ses leviers pour générer une reprise rapide. C'est pourquoi une modification structurelle de l'économie américaine suite à une crise chinoise semble peu probable : l'intervalle de confiance que nous n'avons pas tracé doit assurément nullifié le choc sur plus de 10 périodes, tout comme en Europe.
#
# Deuxièmement, nous avons simuler un choc de -5% dans la croissance américaine, et nous avons observé le même problème qu'avec la réponse du choc chinois : le choc résonne sur toutes périodes que l'on trace (24 trimestres). La réponse de l'Empire du Milieu est pour le coup intéressante car le choc n'impacte que le premier trimestre de façon négative, puis disparaît. Enfin pour la Zone Euro, le choc est négatif et ceux jusqu'à 10 périodes environ.
#
# Cette réponse négative chinoise au choc américain semble similaire à un choc qui apparaîtrait en Zone Euro. Etant donné que l'Oncle Same et la Zone euro sont des marchés privilégiés pour la Chine, ce n'est pas étonant de voir apparaître une réponse négative. Mais, le fait qu'il disparaisse après 1 trimestre semble nous signaler que la Chine dissipe rapidement l'écho du choc, soit en ayant la possibilité d'exporter des biens ailleurs, soit les pays en crises ne penvent se passer des biens / capitaux chinois.
#
# Pour conclure, la situation actuelle nous semble bien différente de ces simulations étant donné que la crise sanitaire est un choc qui touche tous les pays, mais de façon retardée : la Chine en premier, l'Europe en deuxième puis finalement les Etats-Unis. En plus du choc, nous devons subir les echos des chocs (les réponses) : la crise en Chine a impacté le tourisme en France, le secteur du luxe, et donc bien avant de nous même subir la crise de plein fouet, nous y avons goûter par l'intermédiaire de la crise en Chine. C'etait alors la réponse de la France au choc de la crise de l'Empire du Milieu. Mais la France est aussi touchée par la crise sanitaire (télétravail , chômage partiel, confinement) ce qui rend à notre sens ardu de modéliser la crise et sa raisonnance avec un modèle VAR et ses fonctions de réponses.
# <a id= "a"></a>
#
# ### Annexes
#
# Voici notre tentative pour créer les intervalles de confiance pour les fonctions de réponses généralisées à l'aide de block bootstrapping.
# In[21]:
# calculons les intervalles de confiances pour les IRF
# on va utiliser une méthode de block bootstrap
# on va récupérer 95 % random lines de notre matrice
# calculer les coeff, l'erreur centrée et recalculer les IRF
# on va garder pour chaque période le max et le min pour chaque pays
block_bootstrap= function(X, ic){
# Permet de générer un échantillon block bootstrapped
# Le block bootstrapping permet de faire du bootstrapping sur une série, et donc en récupérant
# des lignes proches.
# Avec un bootstrap classique, la série temp perdrait sa notion de temps et d'autocorr (lien intertemporel)
# X désigne notre dataset
# ic désigne la part des lignes du dataset que l'on va prendre
# pour générer ce block bootstrapped
m= nrow(X)
n= ncol(X)
ic= 95
num= floor(m*ic/100)
# permet de block bootstrap ma série
stocked_data= matrix(0, 1, n, byrow= T)
for (i in 1:num){
# On va récupérer 2 lignes avant et après la ligne que l'on a sélectionné et les ajouter
# à la suite
random_id= floor(runif(1, 3, m - 2)) # on commence à 3 car on récupère 2 index en amont
# et on finit à m - 2 car on récupère 2 lignes
# après l'id sélectionné aléatoirement
before_1= random_id -2
before= random_id - 1
after= random_id + 1
after_1= random_id + 2
select_val= c(before_1, before, random_id, after, after_1)
data_sample= X[select_val, ]
stocked_data= rbind(stocked_data, data_sample)
}
stocked_data= stocked_data[-1, ] # supprime la première ligne
return (stocked_data)
}
head(block_bootstrap(data_matrix, 95))
# In[22]:
# p_order= 2
# horizon= 4
# e= c(-0.08, 0, 0)
# # on génère le dataset bootstrap
# data_bootstraped= block_bootstrap(data_matrix, 95)
# # Permet de calculer la matrice phi, de l'ordre 1 à p, pour faire la transformation var p à var 1
# stock_test_phi= phi_transforma(data_bootstraped, p_order)
# # Permet de calculer et de resizer le vecteur phi0 pour l'adapter à la transformation var p à var 1
# phi_zero= phi_zero_transforma(data_bootstraped, p_order)
# # Permet de calculer l'erreur dans le cas d'un var p (marche aussi en var 1)
# computed_error= error_transformation(data_bootstraped, p_order, stock_test_phi, phi_zero)
# ############################################################
# # on centre notre erreur
# mean_error= t(as.matrix(colMeans(computed_error)))
# mean_error_resized= matrix(mean_error, nrow(computed_error), ncol(mean_error), byrow= T)
# centr_error= computed_error - mean_error_resized
# centr_error_shuffle= centr_error[sample(nrow(centr_error)), ]
# n= ncol(data_bootstraped)
# propre_error= centr_error_shuffle[, c(1:n)]
# # ajout de l'erreur à notre sample
# prop_data_boots= data_bootstraped[-c(1:p_order), ] + propre_error
# #############################################################
# # On va à partir du dataset prop_data_boots
| [
"[email protected]"
] | |
ba11745933fa5c61976989834c195771c5305183 | 0aa5187e4bfa91434ac8446aced2763faac0d3b9 | /numerical_analysis.py | c55bc690f55acc5d99b5d9512e011575833495de | [] | no_license | nadavpo/real_fake_im_classifier | 889879ef26e74fe686ade52372b7697cb41c732c | 597bad2b3699fad8c629c6217db68a390d0f6adb | refs/heads/main | 2023-09-04T22:43:34.230129 | 2021-11-17T18:31:07 | 2021-11-17T18:31:07 | 428,213,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,097 | py | """Plot ROC and DET curves."""
import os
import argparse
import torch
import scipy.stats as sp
import matplotlib.pyplot as plt
from sklearn import metrics
from torch.utils.data import DataLoader
from common import FIGURES_DIR
from utils import load_dataset, load_model
device = "cpu"#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Arguments
def parse_args():
"""Parse script arguments.
Returns:
Namespace with model name, checkpoint path and dataset name.
"""
parser = argparse.ArgumentParser(description='Analyze network performance.')
parser.add_argument('--model', '-m',
default='XceptionBased', type=str,
help='Model name: SimpleNet or XceptionBased.') # default='XceptionBased'
parser.add_argument('--checkpoint_path', '-cpp',
default='checkpoints/synthetic_dataset_XceptionBased_Adam.pt', type=str,
help='Path to model checkpoint.') # default='checkpoints/XceptionBased.pt'
parser.add_argument('--dataset', '-d',
default='synthetic_dataset', type=str,
help='Dataset: fakes_dataset or synthetic_dataset.')
return parser.parse_args()
def get_soft_scores_and_true_labels(dataset, model):
"""Return the soft scores and ground truth labels for the dataset.
Loop through the dataset (in batches), log the model's soft scores for
all samples in two iterables: all_first_soft_scores and
all_second_soft_scores. Log the corresponding ground truth labels in
gt_labels.
Args:
dataset: the test dataset to scan.
model: the model used to compute the prediction.
Returns:
(all_first_soft_scores, all_second_soft_scores, gt_labels):
all_first_soft_scores: an iterable holding the model's first
inference result on the images in the dataset (data in index = 0).
all_second_soft_scores: an iterable holding the model's second
inference result on the images in the dataset (data in index = 1).
gt_labels: an iterable holding the samples' ground truth labels.
"""
test_dataloader = DataLoader(dataset,32,shuffle=True)
model = model.to(device=device)
all_first_soft_scores = []
all_second_soft_scores = []
gt_labels = []
for batch_idx, (inputs, targets) in enumerate(test_dataloader):
inputs = inputs.to(device)
targets = targets.to(device)
with torch.no_grad():
scores = model(inputs)
all_first_soft_scores = all_first_soft_scores + scores[:,0].tolist()
all_second_soft_scores = all_second_soft_scores + scores[:, 1].tolist()
gt_labels = gt_labels + targets.tolist()
return all_first_soft_scores, all_second_soft_scores, gt_labels
def plot_roc_curve(roc_curve_figure,
all_first_soft_scores,
all_second_soft_scores,
gt_labels):
"""Plot a ROC curve for the two scores on the given figure.
Args:
roc_curve_figure: the figure to plot on.
all_first_soft_scores: iterable of soft scores.
all_second_soft_scores: iterable of soft scores.
gt_labels: ground truth labels.
Returns:
roc_curve_first_score_figure: the figure with plots on it.
"""
fpr, tpr, _ = metrics.roc_curve(gt_labels, all_first_soft_scores)
plt.plot(fpr, tpr)
fpr, tpr, _ = metrics.roc_curve(gt_labels, all_second_soft_scores)
plt.plot(fpr, tpr)
plt.grid(True)
plt.xlabel('False Positive Rate (Positive label: 1)')
plt.ylabel('True Positive Rate (Positive label: 1)')
plt.title(f'ROC curves AuC Score for the first score: '
f'{metrics.roc_auc_score(gt_labels, all_first_soft_scores):.3f}, '
f'AuC second score: '
f'{metrics.roc_auc_score(gt_labels, all_second_soft_scores):.3f}')
plt.legend(['first score', 'second score'])
roc_curve_figure.set_size_inches((8, 8))
return roc_curve_figure
def plot_det_curve(det_curve_figure,
all_first_soft_scores,
all_second_soft_scores,
gt_labels):
"""Plot a DET curve for the two scores on the given figure.
Args:
det_curve_figure: the figure to plot on.
all_first_soft_scores: iterable of soft scores.
all_second_soft_scores: iterable of soft scores.
gt_labels: ground truth labels.
Returns:
roc_curve_first_score_figure: the figure with plots on it.
"""
fpr, fnr, _ = metrics.det_curve(gt_labels, all_first_soft_scores)
plt.plot(sp.norm.ppf(fpr), sp.norm.ppf(fnr))
fpr, fnr, _ = metrics.det_curve(gt_labels, all_second_soft_scores)
plt.plot(sp.norm.ppf(fpr), sp.norm.ppf(fnr))
plt.grid(True)
plt.xlabel('False Positive Rate (Positive label: 1)')
plt.ylabel('False Negative Rate (Positive label: 1)')
plt.title('DET curve for the first score')
axes = det_curve_figure.gca()
ticks = [0.001, 0.01, 0.05, 0.20, 0.5, 0.80, 0.95, 0.99, 0.999]
tick_labels = [
'{:.0%}'.format(s) if (100 * s).is_integer() else '{:.1%}'.format(s)
for s in ticks
]
tick_locations = sp.norm.ppf(ticks)
axes.set_xticks(tick_locations)
axes.set_xticklabels(tick_labels)
axes.set_yticks(tick_locations)
axes.set_yticklabels(tick_labels)
axes.set_ylim(-3, 3)
plt.legend(['first score', 'second score'])
det_curve_figure.set_size_inches((8, 8))
return det_curve_figure
def main():
"""Parse script arguments, log all the model's soft scores on the dataset
images and the true labels. Use the soft scores and true labels to
generate ROC and DET graphs."""
args = parse_args()
# load model
model_name = args.model
model = load_model(model_name)
model.load_state_dict(torch.load(args.checkpoint_path)['model'])
model.eval()
# load dataset
test_dataset = load_dataset(dataset_name=args.dataset, dataset_part='test')
all_first_soft_scores, all_second_soft_scores, gt_labels = \
get_soft_scores_and_true_labels(test_dataset, model)
# plot the roc curves
roc_curve_figure = plt.figure()
roc_curve_figure = plot_roc_curve(roc_curve_figure,
all_first_soft_scores,
all_second_soft_scores,
gt_labels)
roc_curve_figure.savefig(
os.path.join(FIGURES_DIR,
f'{args.dataset}_{args.model}_roc_curve.png'))
# plot the det curve for the scores of the first output of the network
det_curve_figure = plt.figure()
det_curve_figure = plot_det_curve(det_curve_figure,
all_first_soft_scores,
all_second_soft_scores,
gt_labels)
det_curve_figure.savefig(
os.path.join(FIGURES_DIR,
f'{args.dataset}_{args.model}_det_curve.png'))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
694553df0c0aa0de72c6cd3372d907b36a37b9fa | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3_neat/16_0_3_RTN8_solve.py | 7578551770778fbca70157c20919e407da47b880 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 2,357 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import math
def optimal(from_, to_):
if from_ % 2 == 0:
yield from_
from_ += 1
for divider_candidate in range(from_, to_, 2):
yield divider_candidate
def get_divider(x, from_, to_):
for divider_candidate in optimal(from_, min(to_, int(math.sqrt(x)) + 1)):
if x % divider_candidate == 0:
return divider_candidate
def solve(n_and_j):
n, j = n_and_j.split(' ')
n, j = int(n), int(j)
results_candidates = []
results = []
def generate_jamcoin_candidate():
for bin_number in range(0, 2 ** (n - 1)):
yield ('1{:0%sb}1' % (n - 2)).format(bin_number)
jamcoin_candidate_generator = generate_jamcoin_candidate()
def get_jamcoin_candidate(i):
if i >= len(results_candidates):
jamcoin_candidate = next(jamcoin_candidate_generator)
results_candidates.append((
jamcoin_candidate,
{'nums': [int(jamcoin_candidate, b) for b in range(2, 11)],
'step': 2,
'results': [None] * 9}))
return results_candidates[i]
jamcoin_candidate_i = 0
max_divider = 4
max_jamcoin_i = 2
max_bin_number = 2 ** (n - 1)
while True:
jamcoin_candidate, stats = get_jamcoin_candidate(jamcoin_candidate_i)
all_done = True
for i, num in enumerate(stats['nums']):
if stats['results'][i]:
continue
divider = get_divider(num, stats['step'], max_divider)
if divider:
stats['results'][i] = divider
else:
all_done = False
if all_done:
results.append(jamcoin_candidate + ' ' + ' '.join(map(str, stats['results'])))
results_candidates.pop(jamcoin_candidate_i)
if len(results) == j:
return '\n'.join(results)
else:
jamcoin_candidate_i += 1
if jamcoin_candidate_i >= max_jamcoin_i:
max_divider += 2
jamcoin_candidate_i = 0
max_jamcoin_i = min(max_bin_number, max_jamcoin_i * 2)
if __name__ == '__main__':
cases_number = int(input())
for case_number in range(1, cases_number + 1):
input_args = input()
print('Case #%s:\n%s' % (case_number, solve(input_args)))
| [
"[[email protected]]"
] | |
1bcd21e97a0088563cadcf935ce0e1dc6bc280f8 | 7f9954b117c7cd3e514c0643c0689245a7927e0c | /src/Speech_recognition.py | fd0c5339ea2f3ffc767352db7bfc5c6112ec6f4b | [
"MIT"
] | permissive | pranayjoshi/Speech_recognition | 30f0a9512724230a12000ebc0626b4f6d69b86a4 | e42e486babffc7941ff2e425fd48c47e206ce539 | refs/heads/master | 2022-12-24T14:13:00.326382 | 2020-09-04T18:01:07 | 2020-09-04T18:01:07 | 168,701,681 | 2 | 4 | MIT | 2020-10-01T20:17:11 | 2019-02-01T13:29:57 | Python | UTF-8 | Python | false | false | 10,035 | py | """
Project name = Pranay Assistant(Indo)
Name = Indo
Developer Name = Pranay Joshi
Version = 2.0
Old modules = Speech recognition, GTTs, PyAudio, os, re, webbrowser, smtplib, certifi, requests, pyttsx3 etc.
New Modules = google, word2number, wikipedia, time, json, datetime, ctime
"""
import speech_recognition as sr
import os
import re
import webbrowser
import smtplib
import requests
import pyttsx3
import time
from time import ctime
from word2number import w2n as converse
import wikipedia
import json
from datetime import date
# Defining global variables
engine = pyttsx3.init() # defining pyttsx3
indo = ["indo", "endo"] # deining the name by which the assistant will be called
# Intial defines
def speak(text): # This speak command will speak the text
engine.say(text)
engine.runAndWait()
speak("Hi Pranay") # Checking by speaking the developers name
def today(): # defining this to get the date
today = date.today()
return today
def present(l, command): # funtion used to check if the command is called by the user or not
ls = []
for i in indo:
for j in l:
get = str(i)+ " " + str(j)
if get in command:
return True
break
# Important function for recogninzing voice
def myCommand():
"listens for commands"
r = sr.Recognizer()
with sr.Microphone() as source:
speak('i am ready for your command')
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
try:
command = r.recognize_google(audio).lower()
speak('you said:' + command +'\n')
#loop back to continue to listen for commands if unrecognizable speech is received
except sr.UnknownValueError:
speak("Your last command couldn\'t be heard")
command = myCommand();
return command
# This is the main assistant
def assistant(command):
# Deining important variables and calling some files
with open("mailing_list.json", "r+") as file: #for mailing system
data1 = json.load(file)
mailing_list = data1
recipient_name = list(data1.keys())
with open("app_file.json", "r+") as file: # For opening apps
data2 = json.load(file)
location = data2
apps = list(data2.keys())
with open("app_file.json", "r+") as file: # For opening apps
data3 = json.load(file)
link= data3
web = list(link.keys())
# fun questions
if 'hey indo what\'s your actual name' in command:
speak("Pranay\'s Assistant")
elif present(['what\'s up'],command):
speak('Just doing my thing')
# Web based statements
elif present([(f"open {i}") for i in web], command): # websites in websites.json
con = re.search('open (.*)', command)
con = con.group(1)
url = link[con]
webbrowser.open(url)
speak('done')
elif present(['open website'], command): # websites in realtime
con = re.search('open website (.+)', command)
if con:
domain = con.group(1)
url = 'https://www.' + domain
webbrowser.open(url)
speak('done')
# web based commands/scrapping
# jokes
elif present(['tell some jokes', 'tell some joke', "tell me some jokes", "tell me some joke"], command):
res = requests.get(
'https://icanhazdadjoke.com/',
headers={"Accept":"application/json"}
)
if res.status_code == requests.codes.ok:
speak(str(res.json()['joke']))
else:
speak('oops!I ran out of jokes')
# Wikipedia Search
elif present(["wikipedia search", "search in wikipedia"], command):
con = re.search('for (.*)', command)
con = con.group(1)
speak(f"What do you want to hear about {con} , It's Definition, A short summary, A summary, or view full page content")
response = myCommand();
if "definition" in response:
speak(f"here is the defination of {con}, " + wikipedia.summary(con, sentences=2))
elif "short summary" in command:
speak(f"here is a short summary of {con}," + wikipedia.summary(con, sentences=4))
elif " summary" in command:
speak(f"here is a quick summary of {con}" + wikipedia.summary(con))
elif "page content" in command:
print(f"here is the full page content of {con}" + wikipedia.page(con).content)
else:
print("invalid command!")
# Whether
elif present(['what\'s current weather in'],command):
con = re.search('current weather in (.*)', command)
if con:
city = con.group(1)
url2 = 'https://api.openweathermap.org/data/2.5/weather?appid=608e56270a3d78b4012bbfdda0f05234&q=' + city
res = requests.get(url2)
database = res.json()
temp = database['main']['temp']
wind = database['wind']['speed']
overall = database['weather'][0]['main']
speak(f'The Current weather in is {overall}. The tempeture is {temp}.1f degree. it\'s wind speed is {wind} ')
# Longitude & Latitude
elif present(['find longitude and latitude of'],command):
con = re.search('find longitude and latitude of(.*)', command)
if con:
city = con.group(1)
url2 = 'https://api.openweathermap.org/data/2.5/weather?appid=608e56270a3d78b4012bbfdda0f05234&q=' + city
res = requests.get(url2)
database = res.json()
lat = database['coord']['lat']
long = database['coord']['lon']
speak(f'it\'s latitude is {lat}. it\'s longitude is {long}.')
# opens apps
elif present([(f"open {i}") for i in apps],command):
con = re.search('open (.*)', command)
con = con.group(1)
val = location[con]
os.startfile(val)
speak('done')
# Sending email
elif present(['open email', "send mail"], command):
speak("'Who is the recipient?'")
recipient = myCommand()
if recipient in recipient_name:
speak('What should I say?')
content = myCommand()
# init gmail SMTP
mail = smtplib.SMTP('smtp.gmail.com', )
# identify to server
mail.ehlo()
# encrypt session
mail.starttls()
# login
mail.login('[email protected]', 'pass123')
# send message
mail.sendmail(recipient, mailing_list[recipient], content)
# end mail connection
mail.close()
speak('Email sent.')
# OS based commands
# Computer shutdown
elif 'indo shutdown' in command:
speak('understood sir')
speak('connecting to command prompt')
speak('shutting down your computer')
os.system('shutdown -s')
# stope compiling
elif 'indo quit' in command:
speak('ok sir')
speak('closing all systems')
speak('disconnecting to servers')
speak('going offline')
quit()
#present time
elif "indo what's the time" in command:
time = ctime().split(" ")[3].split(":")[0:2]
if time[0] == "00":
hours = '12'
else:
hours = time[0]
minutes = time[1]
time = hours + " hours and " + minutes + "minutes"
speak(time)
# present date
elif present(["what's the date", "what is the date today", "what is the date", "today's date","what is today's date"],command):
d2 = today().strftime("%B %d, %Y")
speak(f"today's date is{d2}")
# pausing the script
elif present(["pause for", "wait for"], command):
con = re.search('for (.*)', command)
con = str(con.group(1))
l = con.split()
con = l[0]
con = int(con)
con_st = l[1]
print(con)
con = int(con)
check = "seconds"
minute = ["minutes", "mins", "minute"]
if con_st in minute:
con *= 60
check = "minutes"
speak(f"Okay! I am taking rest for {con} {check}")
time.sleep(con)
# google based search commands
# Google search results
elif present(['show the results for', "google search", "google", "results of"],command):
con = re.search('results for (.*)', command)
con = con.group(1)
try:
from googlesearch import search
except ImportError:
print("No module named 'google' found")
l = []
query = command
i = 1
for j in search(query, tld="co.in", num=10, stop=10, pause=2):
print(str(i) + "\t" + j)
l.append(j)
i += 1
speak("Which website do you want to see. Speak the number")
res = myCommand();
print("okay")
final = converse.word_to_num(res)
webbrowser.open_new_tab(l[final])
# Search for results in youtube
elif present(["open youtube", "open youtube and search for", "youtube search", "youtube"],command):
con = command.split("for")[-1]
url = "https://www.youtube.com/results?search_query=" + con
webbrowser.get().open(url)
speak("Here is what I found for " + con + "on youtube")
# rest search in google api = btnG=1&q=
else:
webbrowser.open_new_tab('http://www.google.com/search?btnG=1&q=' + command)
#loop to continue executing multiple commands
while True:
assistant(myCommand()) | [
"[email protected]"
] | |
d1597ffd8c87152ec49b9949a7de3ec827c5d1d4 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/matplotlib/2017/12/setupext.py | 2868fd76aee773dc4d8d576d9dfe80e8c6cca6b4 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 68,786 | py | from __future__ import print_function, absolute_import
from importlib import import_module
from distutils import sysconfig
from distutils import version
from distutils.core import Extension
import distutils.command.build_ext
import glob
import multiprocessing
import os
import platform
import re
import subprocess
from subprocess import check_output
import sys
import warnings
from textwrap import fill
import shutil
import versioneer
PY3min = (sys.version_info[0] >= 3)
def _get_home():
"""Find user's home directory if possible.
Otherwise, returns None.
:see:
http://mail.python.org/pipermail/python-list/2005-February/325395.html
"""
try:
if not PY3min and sys.platform == 'win32':
path = os.path.expanduser(b"~").decode(sys.getfilesystemencoding())
else:
path = os.path.expanduser("~")
except ImportError:
# This happens on Google App Engine (pwd module is not present).
pass
else:
if os.path.isdir(path):
return path
for evar in ('HOME', 'USERPROFILE', 'TMP'):
path = os.environ.get(evar)
if path is not None and os.path.isdir(path):
return path
return None
def _get_xdg_cache_dir():
"""
Returns the XDG cache directory, according to the `XDG
base directory spec
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
"""
path = os.environ.get('XDG_CACHE_HOME')
if path is None:
path = _get_home()
if path is not None:
path = os.path.join(path, '.cache', 'matplotlib')
return path
# SHA256 hashes of the FreeType tarballs
_freetype_hashes = {
'2.6.1': '0a3c7dfbda6da1e8fce29232e8e96d987ababbbf71ebc8c75659e4132c367014',
'2.6.2': '8da42fc4904e600be4b692555ae1dcbf532897da9c5b9fb5ebd3758c77e5c2d4',
'2.6.3': '7942096c40ee6fea882bd4207667ad3f24bff568b96b10fd3885e11a7baad9a3',
'2.6.4': '27f0e38347a1850ad57f84fc4dfed68ba0bc30c96a6fa6138ef84d485dd9a8d7',
'2.6.5': '3bb24add9b9ec53636a63ea8e867ed978c4f8fdd8f1fa5ccfd41171163d4249a',
'2.7': '7b657d5f872b0ab56461f3bd310bd1c5ec64619bd15f0d8e08282d494d9cfea4',
'2.7.1': '162ef25aa64480b1189cdb261228e6c5c44f212aac4b4621e28cf2157efb59f5',
'2.8': '33a28fabac471891d0523033e99c0005b95e5618dc8ffa7fa47f9dadcacb1c9b',
'2.8.1': '876711d064a6a1bd74beb18dd37f219af26100f72daaebd2d86cb493d7cd7ec6',
}
# This is the version of FreeType to use when building a local
# version. It must match the value in
# lib/matplotlib.__init__.py and also needs to be changed below in the
# embedded windows build script (grep for "REMINDER" in this file)
LOCAL_FREETYPE_VERSION = '2.6.1'
LOCAL_FREETYPE_HASH = _freetype_hashes.get(LOCAL_FREETYPE_VERSION, 'unknown')
if sys.platform != 'win32':
if not PY3min:
from commands import getstatusoutput
else:
from subprocess import getstatusoutput
if PY3min:
import configparser
else:
import ConfigParser as configparser
# matplotlib build options, which can be altered using setup.cfg
options = {
'display_status': True,
'verbose': False,
'backend': None,
'basedirlist': None
}
setup_cfg = os.environ.get('MPLSETUPCFG', 'setup.cfg')
if os.path.exists(setup_cfg):
if PY3min:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
config.read(setup_cfg)
if config.has_option('status', 'suppress'):
options['display_status'] = not config.getboolean("status", "suppress")
if config.has_option('rc_options', 'backend'):
options['backend'] = config.get("rc_options", "backend")
if config.has_option('directories', 'basedirlist'):
options['basedirlist'] = [
x.strip() for x in
config.get("directories", "basedirlist").split(',')]
if config.has_option('test', 'local_freetype'):
options['local_freetype'] = config.getboolean("test", "local_freetype")
else:
config = None
lft = bool(os.environ.get('MPLLOCALFREETYPE', False))
options['local_freetype'] = lft or options.get('local_freetype', False)
def get_win32_compiler():
"""
Determine the compiler being used on win32.
"""
# Used to determine mingw32 or msvc
# This is pretty bad logic, someone know a better way?
for v in sys.argv:
if 'mingw32' in v:
return 'mingw32'
return 'msvc'
win32_compiler = get_win32_compiler()
def extract_versions():
"""
Extracts version values from the main matplotlib __init__.py and
returns them as a dictionary.
"""
with open('lib/matplotlib/__init__.py') as fd:
for line in fd.readlines():
if (line.startswith('__version__numpy__')):
exec(line.strip())
return locals()
def has_include_file(include_dirs, filename):
"""
Returns `True` if `filename` can be found in one of the
directories in `include_dirs`.
"""
if sys.platform == 'win32':
include_dirs = list(include_dirs) # copy before modify
include_dirs += os.environ.get('INCLUDE', '.').split(os.pathsep)
for dir in include_dirs:
if os.path.exists(os.path.join(dir, filename)):
return True
return False
def check_include_file(include_dirs, filename, package):
"""
Raises an exception if the given include file can not be found.
"""
if not has_include_file(include_dirs, filename):
raise CheckFailed(
"The C/C++ header for %s (%s) could not be found. You "
"may need to install the development package." %
(package, filename))
def get_base_dirs():
"""
Returns a list of standard base directories on this platform.
"""
if options['basedirlist']:
return options['basedirlist']
if os.environ.get('MPLBASEDIRLIST'):
return os.environ.get('MPLBASEDIRLIST').split(os.pathsep)
win_bases = ['win32_static', ]
# on conda windows, we also add the <conda_env_dir>\Library,
# as conda installs libs/includes there
# env var names mess: https://github.com/conda/conda/issues/2312
conda_env_path = os.getenv('CONDA_PREFIX') # conda >= 4.1
if not conda_env_path:
conda_env_path = os.getenv('CONDA_DEFAULT_ENV') # conda < 4.1
if conda_env_path and os.path.isdir(conda_env_path):
win_bases.append(os.path.join(conda_env_path, "Library"))
basedir_map = {
'win32': win_bases,
'darwin': ['/usr/local/', '/usr', '/usr/X11',
'/opt/X11', '/opt/local'],
'sunos5': [os.getenv('MPLIB_BASE') or '/usr/local', ],
'gnu0': ['/usr'],
'aix5': ['/usr/local'],
}
return basedir_map.get(sys.platform, ['/usr/local', '/usr'])
def get_include_dirs():
"""
Returns a list of standard include directories on this platform.
"""
include_dirs = [os.path.join(d, 'include') for d in get_base_dirs()]
if sys.platform != 'win32':
# gcc includes this dir automatically, so also look for headers in
# these dirs
include_dirs.extend(
os.environ.get('CPLUS_INCLUDE_PATH', '').split(os.pathsep))
return include_dirs
def is_min_version(found, minversion):
"""
Returns `True` if `found` is at least as high a version as
`minversion`.
"""
expected_version = version.LooseVersion(minversion)
found_version = version.LooseVersion(found)
return found_version >= expected_version
# Define the display functions only if display_status is True.
if options['display_status']:
def print_line(char='='):
print(char * 76)
def print_status(package, status):
initial_indent = "%22s: " % package
indent = ' ' * 24
print(fill(str(status), width=76,
initial_indent=initial_indent,
subsequent_indent=indent))
def print_message(message):
indent = ' ' * 24 + "* "
print(fill(str(message), width=76,
initial_indent=indent,
subsequent_indent=indent))
def print_raw(section):
print(section)
else:
def print_line(*args, **kwargs):
pass
print_status = print_message = print_raw = print_line
# Remove the -Wstrict-prototypes option, is it's not valid for C++
customize_compiler = distutils.command.build_ext.customize_compiler
def my_customize_compiler(compiler):
retval = customize_compiler(compiler)
try:
compiler.compiler_so.remove('-Wstrict-prototypes')
except (ValueError, AttributeError):
pass
return retval
distutils.command.build_ext.customize_compiler = my_customize_compiler
def make_extension(name, files, *args, **kwargs):
"""
Make a new extension. Automatically sets include_dirs and
library_dirs to the base directories appropriate for this
platform.
`name` is the name of the extension.
`files` is a list of source files.
Any additional arguments are passed to the
`distutils.core.Extension` constructor.
"""
ext = DelayedExtension(name, files, *args, **kwargs)
for dir in get_base_dirs():
include_dir = os.path.join(dir, 'include')
if os.path.exists(include_dir):
ext.include_dirs.append(include_dir)
for lib in ('lib', 'lib64'):
lib_dir = os.path.join(dir, lib)
if os.path.exists(lib_dir):
ext.library_dirs.append(lib_dir)
ext.include_dirs.append('.')
return ext
def get_file_hash(filename):
"""
Get the SHA256 hash of a given filename.
"""
import hashlib
BLOCKSIZE = 1 << 16
hasher = hashlib.sha256()
with open(filename, 'rb') as fd:
buf = fd.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = fd.read(BLOCKSIZE)
return hasher.hexdigest()
class PkgConfig(object):
"""
This is a class for communicating with pkg-config.
"""
def __init__(self):
"""
Determines whether pkg-config exists on this machine.
"""
if sys.platform == 'win32':
self.has_pkgconfig = False
else:
try:
self.pkg_config = os.environ['PKG_CONFIG']
except KeyError:
self.pkg_config = 'pkg-config'
self.set_pkgconfig_path()
status, output = getstatusoutput(self.pkg_config + " --help")
self.has_pkgconfig = (status == 0)
if not self.has_pkgconfig:
print("IMPORTANT WARNING:")
print(
" pkg-config is not installed.\n"
" matplotlib may not be able to find some of its dependencies")
def set_pkgconfig_path(self):
pkgconfig_path = sysconfig.get_config_var('LIBDIR')
if pkgconfig_path is None:
return
pkgconfig_path = os.path.join(pkgconfig_path, 'pkgconfig')
if not os.path.isdir(pkgconfig_path):
return
try:
os.environ['PKG_CONFIG_PATH'] += ':' + pkgconfig_path
except KeyError:
os.environ['PKG_CONFIG_PATH'] = pkgconfig_path
def setup_extension(self, ext, package, default_include_dirs=[],
default_library_dirs=[], default_libraries=[],
alt_exec=None):
"""
Add parameters to the given `ext` for the given `package`.
"""
flag_map = {
'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
executable = alt_exec
if self.has_pkgconfig:
executable = (self.pkg_config + ' {0}').format(package)
use_defaults = True
if executable is not None:
command = "{0} --libs --cflags ".format(executable)
try:
output = check_output(command, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
pass
else:
output = output.decode(sys.getfilesystemencoding())
use_defaults = False
for token in output.split():
attr = flag_map.get(token[:2])
if attr is not None:
getattr(ext, attr).insert(0, token[2:])
if use_defaults:
basedirs = get_base_dirs()
for base in basedirs:
for include in default_include_dirs:
dir = os.path.join(base, include)
if os.path.exists(dir):
ext.include_dirs.append(dir)
for lib in default_library_dirs:
dir = os.path.join(base, lib)
if os.path.exists(dir):
ext.library_dirs.append(dir)
ext.libraries.extend(default_libraries)
return True
return False
def get_version(self, package):
"""
Get the version of the package from pkg-config.
"""
if not self.has_pkgconfig:
return None
status, output = getstatusoutput(
self.pkg_config + " %s --modversion" % (package))
if status == 0:
return output
return None
# The PkgConfig class should be used through this singleton
pkg_config = PkgConfig()
class CheckFailed(Exception):
"""
Exception thrown when a `SetupPackage.check` method fails.
"""
pass
class SetupPackage(object):
optional = False
pkg_names = {
"apt-get": None,
"yum": None,
"dnf": None,
"brew": None,
"port": None,
"windows_url": None
}
def check(self):
"""
Checks whether the build dependencies are met. Should raise a
`CheckFailed` exception if the dependency could not be met, otherwise
return a string indicating a version number or some other message
indicating what was found.
"""
pass
def runtime_check(self):
"""
True if the runtime dependencies of the backend are met. Assumes that
the build-time dependencies are met.
"""
return True
def get_packages(self):
"""
Get a list of package names to add to the configuration.
These are added to the `packages` list passed to
`distutils.setup`.
"""
return []
def get_namespace_packages(self):
"""
Get a list of namespace package names to add to the configuration.
These are added to the `namespace_packages` list passed to
`distutils.setup`.
"""
return []
def get_py_modules(self):
"""
Get a list of top-level modules to add to the configuration.
These are added to the `py_modules` list passed to
`distutils.setup`.
"""
return []
def get_package_data(self):
"""
Get a package data dictionary to add to the configuration.
These are merged into to the `package_data` list passed to
`distutils.setup`.
"""
return {}
def get_extension(self):
"""
Get a list of C extensions (`distutils.core.Extension`
objects) to add to the configuration. These are added to the
`extensions` list passed to `distutils.setup`.
"""
return None
def get_install_requires(self):
"""
Get a list of Python packages that we require.
pip/easy_install will attempt to download and install this
package if it is not installed.
"""
return []
def get_setup_requires(self):
"""
Get a list of Python packages that we require at build time.
pip/easy_install will attempt to download and install this
package if it is not installed.
"""
return []
def _check_for_pkg_config(self, package, include_file, min_version=None,
version=None):
"""
A convenience function for writing checks for a
pkg_config-defined dependency.
`package` is the pkg_config package name.
`include_file` is a top-level include file we expect to find.
`min_version` is the minimum version required.
`version` will override the found version if this package
requires an alternate method for that. Set version='unknown'
if the version is not known but you still want to disabled
pkg_config version check.
"""
if version is None:
version = pkg_config.get_version(package)
if version is None:
raise CheckFailed(
"pkg-config information for '%s' could not be found." %
package)
if min_version == 'PATCH':
raise CheckFailed(
"Requires patches that have not been merged upstream.")
if min_version and version != 'unknown':
if (not is_min_version(version, min_version)):
raise CheckFailed(
"Requires %s %s or later. Found %s." %
(package, min_version, version))
ext = self.get_extension()
if ext is None:
ext = make_extension('test', [])
pkg_config.setup_extension(ext, package)
check_include_file(
ext.include_dirs + get_include_dirs(), include_file, package)
return 'version %s' % version
def do_custom_build(self):
"""
If a package needs to do extra custom things, such as building a
third-party library, before building an extension, it should
override this method.
"""
pass
def install_help_msg(self):
"""
Do not override this method !
Generate the help message to show if the package is not installed.
To use this in subclasses, simply add the dictionary `pkg_names` as
a class variable:
pkg_names = {
"apt-get": <Name of the apt-get package>,
"yum": <Name of the yum package>,
"dnf": <Name of the dnf package>,
"brew": <Name of the brew package>,
"port": <Name of the port package>,
"windows_url": <The url which has installation instructions>
}
All the dictionary keys are optional. If a key is not present or has
the value `None` no message is provided for that platform.
"""
def _try_managers(*managers):
for manager in managers:
pkg_name = self.pkg_names.get(manager, None)
if pkg_name:
try:
# `shutil.which()` can be used when Python 2.7 support
# is dropped. It is available in Python 3.3+
_ = check_output(["which", manager],
stderr=subprocess.STDOUT)
if manager == 'port':
pkgconfig = 'pkgconfig'
else:
pkgconfig = 'pkg-config'
return ('Try installing {0} with `{1} install {2}` '
'and pkg-config with `{1} install {3}`'
.format(self.name, manager, pkg_name,
pkgconfig))
except subprocess.CalledProcessError:
pass
message = None
if sys.platform == "win32":
url = self.pkg_names.get("windows_url", None)
if url:
message = ('Please check {0} for instructions to install {1}'
.format(url, self.name))
elif sys.platform == "darwin":
message = _try_managers("brew", "port")
elif sys.platform.startswith("linux"):
release = platform.linux_distribution()[0].lower()
if release in ('debian', 'ubuntu'):
message = _try_managers('apt-get')
elif release in ('centos', 'redhat', 'fedora'):
message = _try_managers('dnf', 'yum')
return message
class OptionalPackage(SetupPackage):
optional = True
force = False
config_category = "packages"
default_config = "auto"
@classmethod
def get_config(cls):
"""
Look at `setup.cfg` and return one of ["auto", True, False] indicating
if the package is at default state ("auto"), forced by the user (case
insensitively defined as 1, true, yes, on for True) or opted-out (case
insensitively defined as 0, false, no, off for False).
"""
conf = cls.default_config
if config is not None and config.has_option(cls.config_category, cls.name):
try:
conf = config.getboolean(cls.config_category, cls.name)
except ValueError:
conf = config.get(cls.config_category, cls.name)
return conf
def check(self):
"""
Do not override this method!
For custom dependency checks override self.check_requirements().
Two things are checked: Configuration file and requirements.
"""
# Check configuration file
conf = self.get_config()
# Default "auto" state or install forced by user
if conf in [True, 'auto']:
message = "installing"
# Set non-optional if user sets `True` in config
if conf is True:
self.optional = False
# Configuration opt-out by user
else:
# Some backend extensions (e.g. Agg) need to be built for certain
# other GUI backends (e.g. TkAgg) even when manually disabled
if self.force is True:
message = "installing forced (config override)"
else:
raise CheckFailed("skipping due to configuration")
# Check requirements and add extra information (if any) to message.
# If requirements are not met a CheckFailed should be raised in there.
additional_info = self.check_requirements()
if additional_info:
message += ", " + additional_info
# No CheckFailed raised until now, return install message.
return message
def check_requirements(self):
"""
Override this method to do custom dependency checks.
- Raise CheckFailed() if requirements are not met.
- Return message with additional information, or an empty string
(or None) for no additional information.
"""
return ""
class OptionalBackendPackage(OptionalPackage):
config_category = "gui_support"
class Platform(SetupPackage):
name = "platform"
def check(self):
return sys.platform
class Python(SetupPackage):
name = "python"
def check(self):
major, minor1, minor2, s, tmp = sys.version_info
if major < 2:
raise CheckFailed(
"Requires Python 2.7 or later")
elif major == 2 and minor1 < 7:
raise CheckFailed(
"Requires Python 2.7 or later (in the 2.x series)")
elif major == 3 and minor1 < 4:
raise CheckFailed(
"Requires Python 3.4 or later (in the 3.x series)")
return sys.version
class Matplotlib(SetupPackage):
name = "matplotlib"
def check(self):
return versioneer.get_version()
def get_packages(self):
return [
'matplotlib',
'matplotlib.backends',
'matplotlib.backends.qt_editor',
'matplotlib.compat',
'matplotlib.projections',
'matplotlib.axes',
'matplotlib.sphinxext',
'matplotlib.style',
'matplotlib.testing',
'matplotlib.testing._nose',
'matplotlib.testing._nose.plugins',
'matplotlib.testing.jpl_units',
'matplotlib.tri',
'matplotlib.cbook'
]
def get_py_modules(self):
return ['pylab']
def get_package_data(self):
return {
'matplotlib':
[
'mpl-data/fonts/afm/*.afm',
'mpl-data/fonts/pdfcorefonts/*.afm',
'mpl-data/fonts/pdfcorefonts/*.txt',
'mpl-data/fonts/ttf/*.ttf',
'mpl-data/fonts/ttf/LICENSE_STIX',
'mpl-data/fonts/ttf/COPYRIGHT.TXT',
'mpl-data/fonts/ttf/README.TXT',
'mpl-data/fonts/ttf/RELEASENOTES.TXT',
'mpl-data/images/*.xpm',
'mpl-data/images/*.svg',
'mpl-data/images/*.gif',
'mpl-data/images/*.pdf',
'mpl-data/images/*.png',
'mpl-data/images/*.ppm',
'mpl-data/example/*.npy',
'mpl-data/matplotlibrc',
'backends/web_backend/*.*',
'backends/web_backend/js/*.*',
'backends/web_backend/jquery/js/*.min.js',
'backends/web_backend/jquery/css/themes/base/*.min.css',
'backends/web_backend/jquery/css/themes/base/images/*',
'backends/web_backend/css/*.*',
'backends/Matplotlib.nib/*',
'mpl-data/stylelib/*.mplstyle',
]}
class SampleData(OptionalPackage):
"""
This handles the sample data that ships with matplotlib. It is
technically optional, though most often will be desired.
"""
name = "sample_data"
def get_package_data(self):
return {
'matplotlib':
[
'mpl-data/sample_data/*.*',
'mpl-data/sample_data/axes_grid/*.*',
]}
class Toolkits(OptionalPackage):
name = "toolkits"
def get_packages(self):
return [
'mpl_toolkits',
'mpl_toolkits.mplot3d',
'mpl_toolkits.axes_grid',
'mpl_toolkits.axes_grid1',
'mpl_toolkits.axisartist',
]
def get_namespace_packages(self):
return ['mpl_toolkits']
class Tests(OptionalPackage):
name = "tests"
pytest_min_version = '3.0.0'
default_config = False
def check(self):
super(Tests, self).check()
msgs = []
msg_template = ('{package} is required to run the Matplotlib test '
'suite. Please install it with pip or your preferred '
'tool to run the test suite')
bad_pytest = msg_template.format(
package='pytest %s or later' % self.pytest_min_version
)
try:
import pytest
if is_min_version(pytest.__version__, self.pytest_min_version):
msgs += ['using pytest version %s' % pytest.__version__]
else:
msgs += [bad_pytest]
except ImportError:
msgs += [bad_pytest]
if PY3min:
msgs += ['using unittest.mock']
else:
try:
import mock
msgs += ['using mock %s' % mock.__version__]
except ImportError:
msgs += [msg_template.format(package='mock')]
return ' / '.join(msgs)
def get_packages(self):
return [
'matplotlib.tests',
'matplotlib.sphinxext.tests',
]
def get_package_data(self):
baseline_images = [
'tests/baseline_images/%s/*' % x
for x in os.listdir('lib/matplotlib/tests/baseline_images')]
return {
'matplotlib':
baseline_images +
[
'tests/cmr10.pfb',
'tests/mpltest.ttf',
'tests/test_rcparams.rc',
'tests/test_utf32_be_rcparams.rc',
'sphinxext/tests/tinypages/*.rst',
'sphinxext/tests/tinypages/*.py',
'sphinxext/tests/tinypages/_static/*',
]}
class Toolkits_Tests(Tests):
name = "toolkits_tests"
def check_requirements(self):
conf = self.get_config()
toolkits_conf = Toolkits.get_config()
tests_conf = Tests.get_config()
if conf is True:
Tests.force = True
Toolkits.force = True
elif conf == "auto" and not (toolkits_conf and tests_conf):
# Only auto-install if both toolkits and tests are set
# to be installed
raise CheckFailed("toolkits_tests needs 'toolkits' and 'tests'")
return ""
def get_packages(self):
return [
'mpl_toolkits.tests',
]
def get_package_data(self):
baseline_images = [
'tests/baseline_images/%s/*' % x
for x in os.listdir('lib/mpl_toolkits/tests/baseline_images')]
return {'mpl_toolkits': baseline_images}
def get_namespace_packages(self):
return ['mpl_toolkits']
class DelayedExtension(Extension, object):
"""
A distutils Extension subclass where some of its members
may have delayed computation until reaching the build phase.
This is so we can, for example, get the Numpy include dirs
after pip has installed Numpy for us if it wasn't already
on the system.
"""
def __init__(self, *args, **kwargs):
super(DelayedExtension, self).__init__(*args, **kwargs)
self._finalized = False
self._hooks = {}
def add_hook(self, member, func):
"""
Add a hook to dynamically compute a member.
Parameters
----------
member : string
The name of the member
func : callable
The function to call to get dynamically-computed values
for the member.
"""
self._hooks[member] = func
def finalize(self):
self._finalized = True
class DelayedMember(property):
def __init__(self, name):
self._name = name
def __get__(self, obj, objtype=None):
result = getattr(obj, '_' + self._name, [])
if obj._finalized:
if self._name in obj._hooks:
result = obj._hooks[self._name]() + result
return result
def __set__(self, obj, value):
setattr(obj, '_' + self._name, value)
include_dirs = DelayedMember('include_dirs')
class Numpy(SetupPackage):
name = "numpy"
@staticmethod
def include_dirs_hook():
if PY3min:
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import imp
import numpy
imp.reload(numpy)
else:
import __builtin__
if hasattr(__builtin__, '__NUMPY_SETUP__'):
del __builtin__.__NUMPY_SETUP__
import numpy
reload(numpy)
ext = Extension('test', [])
ext.include_dirs.append(numpy.get_include())
if not has_include_file(
ext.include_dirs, os.path.join("numpy", "arrayobject.h")):
warnings.warn(
"The C headers for numpy could not be found. "
"You may need to install the development package")
return [numpy.get_include()]
def check(self):
min_version = extract_versions()['__version__numpy__']
try:
import numpy
except ImportError:
return 'not found. pip may install it below.'
if not is_min_version(numpy.__version__, min_version):
raise SystemExit(
"Requires numpy %s or later to build. (Found %s)" %
(min_version, numpy.__version__))
return 'version %s' % numpy.__version__
def add_flags(self, ext):
# Ensure that PY_ARRAY_UNIQUE_SYMBOL is uniquely defined for
# each extension
array_api_name = 'MPL_' + ext.name.replace('.', '_') + '_ARRAY_API'
ext.define_macros.append(('PY_ARRAY_UNIQUE_SYMBOL', array_api_name))
ext.add_hook('include_dirs', self.include_dirs_hook)
ext.define_macros.append(('NPY_NO_DEPRECATED_API',
'NPY_1_7_API_VERSION'))
# Allow NumPy's printf format specifiers in C++.
ext.define_macros.append(('__STDC_FORMAT_MACROS', 1))
def get_setup_requires(self):
return ['numpy>=1.7.1']
def get_install_requires(self):
return ['numpy>=1.7.1']
class LibAgg(SetupPackage):
name = 'libagg'
def check(self):
self.__class__.found_external = True
try:
return self._check_for_pkg_config(
'libagg', 'agg2/agg_basics.h', min_version='PATCH')
except CheckFailed as e:
self.__class__.found_external = False
return str(e) + ' Using local copy.'
def add_flags(self, ext, add_sources=True):
if self.found_external:
pkg_config.setup_extension(ext, 'libagg')
else:
ext.include_dirs.insert(0, 'extern/agg24-svn/include')
if add_sources:
agg_sources = [
'agg_bezier_arc.cpp',
'agg_curves.cpp',
'agg_image_filters.cpp',
'agg_trans_affine.cpp',
'agg_vcgen_contour.cpp',
'agg_vcgen_dash.cpp',
'agg_vcgen_stroke.cpp',
'agg_vpgen_segmentator.cpp'
]
ext.sources.extend(
os.path.join('extern', 'agg24-svn', 'src', x) for x in agg_sources)
class FreeType(SetupPackage):
name = "freetype"
pkg_names = {
"apt-get": "libfreetype6-dev",
"yum": "freetype-devel",
"dnf": "freetype-devel",
"brew": "freetype",
"port": "freetype",
"windows_url": "http://gnuwin32.sourceforge.net/packages/freetype.htm"
}
def check(self):
if options.get('local_freetype'):
return "Using local version for testing"
if sys.platform == 'win32':
try:
check_include_file(get_include_dirs(), 'ft2build.h', 'freetype')
except CheckFailed:
check_include_file(get_include_dirs(), 'freetype2\\ft2build.h', 'freetype')
return 'Using unknown version found on system.'
status, output = getstatusoutput("freetype-config --ftversion")
if status == 0:
version = output
else:
version = None
# Early versions of freetype grep badly inside freetype-config,
# so catch those cases. (tested with 2.5.3).
if version is None or 'No such file or directory\ngrep:' in version:
version = self.version_from_header()
# pkg_config returns the libtool version rather than the
# freetype version so we need to explicitly pass the version
# to _check_for_pkg_config
return self._check_for_pkg_config(
'freetype2', 'ft2build.h',
min_version='2.3', version=version)
def version_from_header(self):
version = 'unknown'
ext = self.get_extension()
if ext is None:
return version
# Return the first version found in the include dirs.
for include_dir in ext.include_dirs:
header_fname = os.path.join(include_dir, 'freetype.h')
if os.path.exists(header_fname):
major, minor, patch = 0, 0, 0
with open(header_fname, 'r') as fh:
for line in fh:
if line.startswith('#define FREETYPE_'):
value = line.rsplit(' ', 1)[1].strip()
if 'MAJOR' in line:
major = value
elif 'MINOR' in line:
minor = value
else:
patch = value
return '.'.join([major, minor, patch])
def add_flags(self, ext):
if options.get('local_freetype'):
src_path = os.path.join(
'build', 'freetype-{0}'.format(LOCAL_FREETYPE_VERSION))
# Statically link to the locally-built freetype.
# This is certainly broken on Windows.
ext.include_dirs.insert(0, os.path.join(src_path, 'include'))
if sys.platform == 'win32':
libfreetype = 'libfreetype.lib'
else:
libfreetype = 'libfreetype.a'
ext.extra_objects.insert(
0, os.path.join(src_path, 'objs', '.libs', libfreetype))
ext.define_macros.append(('FREETYPE_BUILD_TYPE', 'local'))
else:
pkg_config.setup_extension(
ext, 'freetype2',
default_include_dirs=[
'include/freetype2', 'freetype2',
'lib/freetype2/include',
'lib/freetype2/include/freetype2'],
default_library_dirs=[
'freetype2/lib'],
default_libraries=['freetype', 'z'])
ext.define_macros.append(('FREETYPE_BUILD_TYPE', 'system'))
def do_custom_build(self):
# We're using a system freetype
if not options.get('local_freetype'):
return
src_path = os.path.join(
'build', 'freetype-{0}'.format(LOCAL_FREETYPE_VERSION))
# We've already built freetype
if sys.platform == 'win32':
libfreetype = 'libfreetype.lib'
else:
libfreetype = 'libfreetype.a'
if os.path.isfile(os.path.join(src_path, 'objs', '.libs', libfreetype)):
return
tarball = 'freetype-{0}.tar.gz'.format(LOCAL_FREETYPE_VERSION)
tarball_path = os.path.join('build', tarball)
try:
tarball_cache_dir = _get_xdg_cache_dir()
tarball_cache_path = os.path.join(tarball_cache_dir, tarball)
except:
# again, do not really care if this fails
tarball_cache_dir = None
tarball_cache_path = None
if not os.path.isfile(tarball_path):
if (tarball_cache_path is not None and
os.path.isfile(tarball_cache_path)):
if get_file_hash(tarball_cache_path) == LOCAL_FREETYPE_HASH:
try:
os.makedirs('build')
except OSError:
# Don't care if it exists.
pass
try:
shutil.copy(tarball_cache_path, tarball_path)
print('Using cached tarball: {}'
.format(tarball_cache_path))
except OSError:
# If this fails, oh well just re-download
pass
if not os.path.isfile(tarball_path):
if PY3min:
from urllib.request import urlretrieve
else:
from urllib import urlretrieve
if not os.path.exists('build'):
os.makedirs('build')
url_fmts = [
'https://downloads.sourceforge.net/project/freetype'
'/freetype2/{version}/{tarball}',
'https://download.savannah.gnu.org/releases/freetype'
'/{tarball}'
]
for url_fmt in url_fmts:
tarball_url = url_fmt.format(
version=LOCAL_FREETYPE_VERSION, tarball=tarball)
print("Downloading {0}".format(tarball_url))
try:
urlretrieve(tarball_url, tarball_path)
except IOError: # URLError (a subclass) on Py3.
print("Failed to download {0}".format(tarball_url))
else:
if get_file_hash(tarball_path) != LOCAL_FREETYPE_HASH:
print("Invalid hash.")
else:
break
else:
raise IOError("Failed to download freetype. "
"You can download the file by "
"alternative means and copy it "
" to '{0}'".format(tarball_path))
try:
os.makedirs(tarball_cache_dir)
except OSError:
# Don't care if it exists.
pass
try:
shutil.copy(tarball_path, tarball_cache_path)
print('Cached tarball at: {}'.format(tarball_cache_path))
except OSError:
# If this fails, we can always re-download.
pass
if get_file_hash(tarball_path) != LOCAL_FREETYPE_HASH:
raise IOError(
"{0} does not match expected hash.".format(tarball))
print("Building {0}".format(tarball))
if sys.platform != 'win32':
# compilation on all other platforms than windows
cflags = 'CFLAGS="{0} -fPIC" '.format(os.environ.get('CFLAGS', ''))
subprocess.check_call(
['tar', 'zxf', tarball], cwd='build')
subprocess.check_call(
[cflags + './configure --with-zlib=no --with-bzip2=no '
'--with-png=no --with-harfbuzz=no'], shell=True, cwd=src_path)
subprocess.check_call(
[cflags + 'make'], shell=True, cwd=src_path)
else:
# compilation on windows
FREETYPE_BUILD_CMD = """\
call "%ProgramFiles%\\Microsoft SDKs\\Windows\\v7.0\\Bin\\SetEnv.Cmd" /Release /{xXX} /xp
call "{vcvarsall}" {xXX}
set MSBUILD=C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319\\MSBuild.exe
rd /S /Q %FREETYPE%\\objs
%MSBUILD% %FREETYPE%\\builds\\windows\\{vc20xx}\\freetype.sln /t:Clean;Build /p:Configuration="{config}";Platform={WinXX}
echo Build completed, moving result"
:: move to the "normal" path for the unix builds...
mkdir %FREETYPE%\\objs\\.libs
:: REMINDER: fix when changing the version
copy %FREETYPE%\\objs\\{vc20xx}\\{xXX}\\freetype261.lib %FREETYPE%\\objs\\.libs\\libfreetype.lib
if errorlevel 1 (
rem This is a py27 version, which has a different location for the lib file :-/
copy %FREETYPE%\\objs\\win32\\{vc20xx}\\freetype261.lib %FREETYPE%\\objs\\.libs\\libfreetype.lib
)
"""
from setup_external_compile import fixproj, prepare_build_cmd, VS2010, X64, tar_extract
# Note: freetype has no build profile for 2014, so we don't bother...
vc = 'vc2010' if VS2010 else 'vc2008'
WinXX = 'x64' if X64 else 'Win32'
tar_extract(tarball_path, "build")
# This is only false for py2.7, even on py3.5...
if not VS2010:
fixproj(os.path.join(src_path, 'builds', 'windows', vc, 'freetype.sln'), WinXX)
fixproj(os.path.join(src_path, 'builds', 'windows', vc, 'freetype.vcproj'), WinXX)
cmdfile = os.path.join("build", 'build_freetype.cmd')
with open(cmdfile, 'w') as cmd:
cmd.write(prepare_build_cmd(FREETYPE_BUILD_CMD, vc20xx=vc, WinXX=WinXX,
config='Release' if VS2010 else 'LIB Release'))
os.environ['FREETYPE'] = src_path
subprocess.check_call([cmdfile], shell=True)
class FT2Font(SetupPackage):
name = 'ft2font'
def get_extension(self):
sources = [
'src/ft2font.cpp',
'src/ft2font_wrapper.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib.ft2font', sources)
FreeType().add_flags(ext)
Numpy().add_flags(ext)
return ext
class Png(SetupPackage):
name = "png"
pkg_names = {
"apt-get": "libpng12-dev",
"yum": "libpng-devel",
"dnf": "libpng-devel",
"brew": "libpng",
"port": "libpng",
"windows_url": "http://gnuwin32.sourceforge.net/packages/libpng.htm"
}
def check(self):
if sys.platform == 'win32':
check_include_file(get_include_dirs(), 'png.h', 'png')
return 'Using unknown version found on system.'
status, output = getstatusoutput("libpng-config --version")
if status == 0:
version = output
else:
version = None
try:
return self._check_for_pkg_config(
'libpng', 'png.h',
min_version='1.2', version=version)
except CheckFailed as e:
if has_include_file(get_include_dirs(), 'png.h'):
return str(e) + ' Using unknown version found on system.'
raise
def get_extension(self):
sources = [
'src/_png.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib._png', sources)
pkg_config.setup_extension(
ext, 'libpng', default_libraries=['png', 'z'],
alt_exec='libpng-config --ldflags')
Numpy().add_flags(ext)
return ext
class Qhull(SetupPackage):
name = "qhull"
def check(self):
self.__class__.found_external = True
try:
return self._check_for_pkg_config(
'libqhull', 'libqhull/qhull_a.h', min_version='2015.2')
except CheckFailed as e:
self.__class__.found_pkgconfig = False
self.__class__.found_external = False
return str(e) + ' Using local copy.'
def add_flags(self, ext):
if self.found_external:
pkg_config.setup_extension(ext, 'qhull',
default_libraries=['qhull'])
else:
ext.include_dirs.insert(0, 'extern')
ext.sources.extend(sorted(glob.glob('extern/libqhull/*.c')))
class TTConv(SetupPackage):
name = "ttconv"
def get_extension(self):
sources = [
'src/_ttconv.cpp',
'extern/ttconv/pprdrv_tt.cpp',
'extern/ttconv/pprdrv_tt2.cpp',
'extern/ttconv/ttutil.cpp'
]
ext = make_extension('matplotlib.ttconv', sources)
Numpy().add_flags(ext)
ext.include_dirs.insert(0, 'extern')
return ext
class Path(SetupPackage):
name = "path"
def get_extension(self):
sources = [
'src/py_converters.cpp',
'src/_path_wrapper.cpp'
]
ext = make_extension('matplotlib._path', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
return ext
class Image(SetupPackage):
name = "image"
def get_extension(self):
sources = [
'src/_image.cpp',
'src/mplutils.cpp',
'src/_image_wrapper.cpp',
'src/py_converters.cpp'
]
ext = make_extension('matplotlib._image', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
return ext
class Contour(SetupPackage):
name = "contour"
def get_extension(self):
sources = [
"src/_contour.cpp",
"src/_contour_wrapper.cpp",
]
ext = make_extension('matplotlib._contour', sources)
Numpy().add_flags(ext)
return ext
class QhullWrap(SetupPackage):
name = "qhull_wrap"
def get_extension(self):
sources = ['src/qhull_wrap.c']
ext = make_extension('matplotlib._qhull', sources,
define_macros=[('MPL_DEVNULL', os.devnull)])
Numpy().add_flags(ext)
Qhull().add_flags(ext)
return ext
class Tri(SetupPackage):
name = "tri"
def get_extension(self):
sources = [
"lib/matplotlib/tri/_tri.cpp",
"lib/matplotlib/tri/_tri_wrapper.cpp",
"src/mplutils.cpp"
]
ext = make_extension('matplotlib._tri', sources)
Numpy().add_flags(ext)
return ext
class InstallRequires(SetupPackage):
name = "install_requires"
def check(self):
return "handled by setuptools"
def get_install_requires(self):
install_requires = [
"cycler>=0.10",
"pyparsing>=2.0.1,!=2.0.4,!=2.1.2,!=2.1.6",
"python-dateutil>=2.0",
"pytz",
"six>=1.10",
]
if sys.version_info < (3,):
install_requires += ["backports.functools_lru_cache"]
if sys.version_info < (3,) and os.name == "posix":
install_requires += ["subprocess32"]
return install_requires
class BackendAgg(OptionalBackendPackage):
name = "agg"
force = True
def get_extension(self):
sources = [
"src/mplutils.cpp",
"src/py_converters.cpp",
"src/_backend_agg.cpp",
"src/_backend_agg_wrapper.cpp"
]
ext = make_extension('matplotlib.backends._backend_agg', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
FreeType().add_flags(ext)
return ext
class BackendTkAgg(OptionalBackendPackage):
name = "tkagg"
force = True
def check(self):
return "installing; run-time loading from Python Tcl / Tk"
def runtime_check(self):
""" Checks whether TkAgg runtime dependencies are met
"""
pkg_name = 'tkinter' if PY3min else 'Tkinter'
try:
import_module(pkg_name)
except ImportError:
return False
return True
def get_extension(self):
sources = [
'src/py_converters.cpp',
'src/_tkagg.cpp'
]
ext = make_extension('matplotlib.backends._tkagg', sources)
self.add_flags(ext)
Numpy().add_flags(ext)
LibAgg().add_flags(ext, add_sources=False)
return ext
def add_flags(self, ext):
ext.include_dirs.insert(0, 'src')
if sys.platform == 'win32':
# PSAPI library needed for finding Tcl / Tk at run time
ext.libraries.extend(['psapi'])
class BackendGtk(OptionalBackendPackage):
name = "gtk"
def check_requirements(self):
try:
import gtk
except ImportError:
raise CheckFailed("Requires pygtk")
except RuntimeError:
raise CheckFailed('pygtk present, but import failed.')
else:
version = (2, 2, 0)
if gtk.pygtk_version < version:
raise CheckFailed(
"Requires pygtk %d.%d.%d or later. "
"Found %d.%d.%d" % (version + gtk.pygtk_version))
ext = self.get_extension()
self.add_flags(ext)
check_include_file(ext.include_dirs,
os.path.join("gtk", "gtk.h"),
'gtk')
check_include_file(ext.include_dirs,
os.path.join("pygtk", "pygtk.h"),
'pygtk')
return 'Gtk: %s pygtk: %s' % (
".".join(str(x) for x in gtk.gtk_version),
".".join(str(x) for x in gtk.pygtk_version))
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def get_extension(self):
sources = [
'src/_backend_gdk.c'
]
ext = make_extension('matplotlib.backends._backend_gdk', sources)
self.add_flags(ext)
Numpy().add_flags(ext)
return ext
def add_flags(self, ext):
if sys.platform == 'win32':
def getoutput(s):
ret = os.popen(s).read().strip()
return ret
if 'PKG_CONFIG_PATH' not in os.environ:
# If Gtk+ is installed, pkg-config is required to be installed
os.environ['PKG_CONFIG_PATH'] = 'C:\\GTK\\lib\\pkgconfig'
# popen broken on my win32 plaform so I can't use pkgconfig
ext.library_dirs.extend(
['C:/GTK/bin', 'C:/GTK/lib'])
ext.include_dirs.extend(
['win32_static/include/pygtk-2.0',
'C:/GTK/include',
'C:/GTK/include/gobject',
'C:/GTK/include/gext',
'C:/GTK/include/glib',
'C:/GTK/include/pango',
'C:/GTK/include/atk',
'C:/GTK/include/X11',
'C:/GTK/include/cairo',
'C:/GTK/include/gdk',
'C:/GTK/include/gdk-pixbuf',
'C:/GTK/include/gtk',
])
pygtkIncludes = getoutput(
'pkg-config --cflags-only-I pygtk-2.0').split()
gtkIncludes = getoutput(
'pkg-config --cflags-only-I gtk+-2.0').split()
includes = pygtkIncludes + gtkIncludes
ext.include_dirs.extend([include[2:] for include in includes])
pygtkLinker = getoutput('pkg-config --libs pygtk-2.0').split()
gtkLinker = getoutput('pkg-config --libs gtk+-2.0').split()
linkerFlags = pygtkLinker + gtkLinker
ext.libraries.extend(
[flag[2:] for flag in linkerFlags if flag.startswith('-l')])
ext.library_dirs.extend(
[flag[2:] for flag in linkerFlags if flag.startswith('-L')])
ext.extra_link_args.extend(
[flag for flag in linkerFlags if not
(flag.startswith('-l') or flag.startswith('-L'))])
# visual studio doesn't need the math library
if (sys.platform == 'win32' and
win32_compiler == 'msvc' and
'm' in ext.libraries):
ext.libraries.remove('m')
elif sys.platform != 'win32':
pkg_config.setup_extension(ext, 'pygtk-2.0')
pkg_config.setup_extension(ext, 'gtk+-2.0')
class BackendGtkAgg(BackendGtk):
name = "gtkagg"
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def get_extension(self):
sources = [
'src/py_converters.cpp',
'src/_gtkagg.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib.backends._gtkagg', sources)
self.add_flags(ext)
LibAgg().add_flags(ext)
Numpy().add_flags(ext)
return ext
def backend_gtk3agg_internal_check(x):
try:
import gi
except ImportError:
return (False, "Requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except ValueError:
return (False, "Requires gtk3 development files to be installed.")
except AttributeError:
return (False, "pygobject version too old.")
try:
from gi.repository import Gtk, Gdk, GObject
except (ImportError, RuntimeError):
return (False, "Requires pygobject to be installed.")
return (True, "version %s.%s.%s" % (
Gtk.get_major_version(),
Gtk.get_micro_version(),
Gtk.get_minor_version()))
class BackendGtk3Agg(OptionalBackendPackage):
name = "gtk3agg"
def check_requirements(self):
if 'TRAVIS' in os.environ:
raise CheckFailed("Can't build with Travis")
# This check needs to be performed out-of-process, because
# importing gi and then importing regular old pygtk afterward
# segfaults the interpreter.
try:
p = multiprocessing.Pool()
except:
return "unknown (can not use multiprocessing to determine)"
try:
res = p.map_async(backend_gtk3agg_internal_check, [0])
success, msg = res.get(timeout=10)[0]
except multiprocessing.TimeoutError:
p.terminate()
# No result returned. Probaly hanging, terminate the process.
success = False
raise CheckFailed("Check timed out")
except:
p.close()
# Some other error.
success = False
msg = "Could not determine"
raise
else:
p.close()
finally:
p.join()
if success:
return msg
else:
raise CheckFailed(msg)
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def backend_gtk3cairo_internal_check(x):
try:
import cairocffi
except ImportError:
try:
import cairo
except ImportError:
return (False, "Requires cairocffi or pycairo to be installed.")
try:
import gi
except ImportError:
return (False, "Requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except ValueError:
return (False, "Requires gtk3 development files to be installed.")
except AttributeError:
return (False, "pygobject version too old.")
try:
from gi.repository import Gtk, Gdk, GObject
except (RuntimeError, ImportError):
return (False, "Requires pygobject to be installed.")
return (True, "version %s.%s.%s" % (
Gtk.get_major_version(),
Gtk.get_micro_version(),
Gtk.get_minor_version()))
class BackendGtk3Cairo(OptionalBackendPackage):
name = "gtk3cairo"
def check_requirements(self):
if 'TRAVIS' in os.environ:
raise CheckFailed("Can't build with Travis")
# This check needs to be performed out-of-process, because
# importing gi and then importing regular old pygtk afterward
# segfaults the interpreter.
try:
p = multiprocessing.Pool()
except:
return "unknown (can not use multiprocessing to determine)"
try:
res = p.map_async(backend_gtk3cairo_internal_check, [0])
success, msg = res.get(timeout=10)[0]
except multiprocessing.TimeoutError:
p.terminate()
# No result returned. Probaly hanging, terminate the process.
success = False
raise CheckFailed("Check timed out")
except:
p.close()
success = False
raise
else:
p.close()
finally:
p.join()
if success:
return msg
else:
raise CheckFailed(msg)
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
class BackendWxAgg(OptionalBackendPackage):
name = "wxagg"
def check_requirements(self):
wxversioninstalled = True
try:
import wxversion
except ImportError:
wxversioninstalled = False
if wxversioninstalled:
try:
_wx_ensure_failed = wxversion.AlreadyImportedError
except AttributeError:
_wx_ensure_failed = wxversion.VersionError
try:
wxversion.ensureMinimal('2.9')
except _wx_ensure_failed:
pass
try:
import wx
backend_version = wx.VERSION_STRING
except ImportError:
raise CheckFailed("requires wxPython")
if not is_min_version(backend_version, "2.9"):
raise CheckFailed(
"Requires wxPython 2.9, found %s" % backend_version)
return "version %s" % backend_version
class BackendMacOSX(OptionalBackendPackage):
name = 'macosx'
def check_requirements(self):
if sys.platform != 'darwin':
raise CheckFailed("Mac OS-X only")
return 'darwin'
def get_extension(self):
sources = [
'src/_macosx.m'
]
ext = make_extension('matplotlib.backends._macosx', sources)
ext.extra_link_args.extend(['-framework', 'Cocoa'])
return ext
class Windowing(OptionalBackendPackage):
"""
Builds the windowing extension.
"""
name = "windowing"
def check_requirements(self):
if sys.platform != 'win32':
raise CheckFailed("Microsoft Windows only")
config = self.get_config()
if config is False:
raise CheckFailed("skipping due to configuration")
return ""
def get_extension(self):
sources = [
"src/_windowing.cpp"
]
ext = make_extension('matplotlib._windowing', sources)
ext.include_dirs.extend(['C:/include'])
ext.libraries.extend(['user32'])
ext.library_dirs.extend(['C:/lib'])
ext.extra_link_args.append("-mwindows")
return ext
class BackendQtBase(OptionalBackendPackage):
def convert_qt_version(self, version):
version = '%x' % version
temp = []
while len(version) > 0:
version, chunk = version[:-2], version[-2:]
temp.insert(0, str(int(chunk, 16)))
return '.'.join(temp)
def check_requirements(self):
'''
If PyQt4/PyQt5 is already imported, importing PyQt5/PyQt4 will fail
so we need to test in a subprocess (as for Gtk3).
'''
try:
p = multiprocessing.Pool()
except:
# Can't do multiprocessing, fall back to normal approach
# (this will fail if importing both PyQt4 and PyQt5).
try:
# Try in-process
msg = self.callback(self)
except RuntimeError:
raise CheckFailed(
"Could not import: are PyQt4 & PyQt5 both installed?")
else:
# Multiprocessing OK
try:
res = p.map_async(self.callback, [self])
msg = res.get(timeout=10)[0]
except multiprocessing.TimeoutError:
p.terminate()
# No result returned. Probaly hanging, terminate the process.
raise CheckFailed("Check timed out")
except:
# Some other error.
p.close()
raise
else:
# Clean exit
p.close()
finally:
# Tidy up multiprocessing
p.join()
return msg
def backend_pyside_internal_check(self):
try:
from PySide import __version__
from PySide import QtCore
except ImportError:
raise CheckFailed("PySide not found")
else:
return ("Qt: %s, PySide: %s" %
(QtCore.__version__, __version__))
def backend_pyqt4_internal_check(self):
try:
from PyQt4 import QtCore
except ImportError:
raise CheckFailed("PyQt4 not found")
try:
qt_version = QtCore.QT_VERSION
pyqt_version_str = QtCore.PYQT_VERSION_STR
except AttributeError:
raise CheckFailed('PyQt4 not correctly imported')
else:
return ("Qt: %s, PyQt: %s" % (self.convert_qt_version(qt_version), pyqt_version_str))
def backend_qt4_internal_check(self):
successes = []
failures = []
try:
successes.append(backend_pyside_internal_check(self))
except CheckFailed as e:
failures.append(str(e))
try:
successes.append(backend_pyqt4_internal_check(self))
except CheckFailed as e:
failures.append(str(e))
if len(successes) == 0:
raise CheckFailed('; '.join(failures))
return '; '.join(successes + failures)
class BackendQt4(BackendQtBase):
name = "qt4agg"
def __init__(self, *args, **kwargs):
BackendQtBase.__init__(self, *args, **kwargs)
self.callback = backend_qt4_internal_check
def backend_pyside2_internal_check(self):
try:
from PySide2 import __version__
from PySide2 import QtCore
except ImportError:
raise CheckFailed("PySide2 not found")
else:
return ("Qt: %s, PySide2: %s" %
(QtCore.__version__, __version__))
def backend_pyqt5_internal_check(self):
try:
from PyQt5 import QtCore
except ImportError:
raise CheckFailed("PyQt5 not found")
try:
qt_version = QtCore.QT_VERSION
pyqt_version_str = QtCore.PYQT_VERSION_STR
except AttributeError:
raise CheckFailed('PyQt5 not correctly imported')
else:
return ("Qt: %s, PyQt: %s" % (self.convert_qt_version(qt_version), pyqt_version_str))
def backend_qt5_internal_check(self):
successes = []
failures = []
try:
successes.append(backend_pyside2_internal_check(self))
except CheckFailed as e:
failures.append(str(e))
try:
successes.append(backend_pyqt5_internal_check(self))
except CheckFailed as e:
failures.append(str(e))
if len(successes) == 0:
raise CheckFailed('; '.join(failures))
return '; '.join(successes + failures)
class BackendQt5(BackendQtBase):
name = "qt5agg"
def __init__(self, *args, **kwargs):
BackendQtBase.__init__(self, *args, **kwargs)
self.callback = backend_qt5_internal_check
class BackendCairo(OptionalBackendPackage):
name = "cairo"
def check_requirements(self):
try:
import cairocffi
except ImportError:
try:
import cairo
except ImportError:
raise CheckFailed("cairocffi or pycairo not found")
else:
return "pycairo version %s" % cairo.version
else:
return "cairocffi version %s" % cairocffi.version
class DviPng(SetupPackage):
name = "dvipng"
optional = True
def check(self):
try:
output = check_output('dvipng -version', shell=True,
stderr=subprocess.STDOUT)
return "version %s" % output.splitlines()[1].decode().split()[-1]
except (IndexError, ValueError, subprocess.CalledProcessError):
raise CheckFailed()
class Ghostscript(SetupPackage):
name = "ghostscript"
optional = True
def check(self):
if sys.platform == 'win32':
# mgs is the name in miktex
gs_execs = ['gswin32c', 'gswin64c', 'mgs', 'gs']
else:
gs_execs = ['gs']
for gs_exec in gs_execs:
try:
command = gs_exec + ' --version'
output = check_output(command, shell=True,
stderr=subprocess.STDOUT)
return "version %s" % output.decode()[:-1]
except (IndexError, ValueError, subprocess.CalledProcessError):
pass
raise CheckFailed()
class LaTeX(SetupPackage):
name = "latex"
optional = True
def check(self):
try:
output = check_output('latex -version', shell=True,
stderr=subprocess.STDOUT)
line = output.splitlines()[0].decode()
pattern = '(3\.1\d+)|(MiKTeX \d+.\d+)'
match = re.search(pattern, line)
return "version %s" % match.group(0)
except (IndexError, ValueError, AttributeError, subprocess.CalledProcessError):
raise CheckFailed()
class PdfToPs(SetupPackage):
name = "pdftops"
optional = True
def check(self):
try:
output = check_output('pdftops -v', shell=True,
stderr=subprocess.STDOUT)
for line in output.splitlines():
line = line.decode()
if 'version' in line:
return "version %s" % line.split()[2]
except (IndexError, ValueError, subprocess.CalledProcessError):
pass
raise CheckFailed()
class OptionalPackageData(OptionalPackage):
config_category = "package_data"
class Dlls(OptionalPackageData):
"""
On Windows, this packages any DLL files that can be found in the
lib/matplotlib/* directories.
"""
name = "dlls"
def check_requirements(self):
if sys.platform != 'win32':
raise CheckFailed("Microsoft Windows only")
def get_package_data(self):
return {'': ['*.dll']}
@classmethod
def get_config(cls):
"""
Look at `setup.cfg` and return one of ["auto", True, False] indicating
if the package is at default state ("auto"), forced by the user (True)
or opted-out (False).
"""
try:
return config.getboolean(cls.config_category, cls.name)
except:
return False # <-- default
| [
"[email protected]"
] | |
d92f30b3e758222776245aa95fcb11704d4d7d8b | 9272584f18cdc8450713b2376fef966934f3fd3e | /starblock/starblock.pyde | c8bf8e37339c6b6f39cc330a7547603edd25f169 | [] | no_license | FranciscoPython/TamashiVR | ff949ad610ba5f2f870ab9438a2fd89d85079ae0 | 403461bea9f0cff785308089ca2ad69be927697b | refs/heads/master | 2020-12-22T15:26:54.250975 | 2020-03-02T13:14:20 | 2020-03-02T13:14:20 | 236,840,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,105 | pyde | key_mode = 1
padpos1 = 0
x = 10
def frame():
fill(145)
rect (0, 0, 500, 500)
def Innerframe():
fill(45)
rect (10, 10, 480, 480)
padpos= -10
def paddle():
fill(255)
circle(padpos , 460 , 20)
circle(padpos + 20 , 460, 20)
rect(padpos, 450 , 20 , 20)
def setup():
size(500,500)
background(0,0,0)
x=0
y=30
fill (99)
rect(x,y,width, height)
fill (8, 3, 61)
rect (x+10,y+10,width-20, height-50)
for i in range(10):
for j in range(8):
Blue = random(0, 10)
Red = random (2,255)
Green = random (0,10)
fill (Red, Green, Blue)
rect (x+10+ i*48, y+10+j*20, 48, 20)
def draw():
"""
if frameCount%120 == 0:
stroke(8, 3, 61)
fill (8, 3, 61, 80)
rect (10,200,width-20, height-50)
stroke(255, 255, 0, )
fill( 255, 255, 255, 0)
for i in range(250):
Xpos = random(15, 485)
Ypos = random (202,485)
Size = random (.1,.5)
circle( Xpos, Ypos, Size)
if frameCount%360 ==0:
stroke(8, 3, 61)
fill (8, 3, 61, 50)
rect (10,200,width-20, height-50)
"""
stroke(8, 3, 61)
fill (8, 3, 61, 1)
rect (10,200,width-20, height-50)
if frameCount%2 == 0:
stroke(255, 255, 0, )
fill( 255, 255, 255, 0)
Xpos = random(15, 485)
Ypos = random (202,485)
Size = random (.1,.5)
circle(Xpos, Ypos, Size)
stroke(8, 3, 61)
fill (8, 3, 61)
Xpos = random(10, 465)
Ypos = random (200,465)
rect (Xpos, Ypos, 25, 25)
if frameCount%5 == 0:
for i in range(10):
for j in range(8):
Blue = random(0, 10)
Red = random (2,255)
Green = random (0,10)
fill (Red, Green, Blue, 40)
rect (10+ i*48, 40+j*20, 48, 20)
global padpos1
global key_mode
if keyPressed:
key_mode = 1
if mousePressed:
key_mode = 0
if key_mode == 0:
padpos1 = mouseX
if key_mode == 1:
if keyPressed:
if keyCode == LEFT:
padpos1 = padpos1 - 10
if keyCode == RIGHT:
padpos1 = padpos1 + 10
if padpos1 >= 470:
padpos1 = 470
if padpos1 <= 30:
padpos1 = 30
pushMatrix()
translate(padpos1,0)
paddle()
popMatrix()
"""
for j in range(100):
positionX = map(random(0,1), 0, 1, 15, 385)
positionY = map(random(0,1), 0, 1, 200, 385)
for i in range(8):
stroke(255, 255, 0)
strokeWeight(1)
x = 5 * cos(2*PI/8 * i)
y = 5 * sin(2*PI/8 * i)
line ( 200, 200, 200+ x, 200+y)
"""
| [
"[email protected]"
] | |
feaca30d209710ef59254b4a7a876cbbc712270f | 016cf414259dccd8e45856ef0cd131cf27f66fec | /datapreprocessing/file_to_wav.py | a443a9b3b87dee5ea6ad204e9024e6261fc3732e | [] | no_license | steinszzh/2020capstone | acccd69924ccaf3de77907808422f049631408ac | 95d223f15ffbd39af2d79532ee0ed73613b4a399 | refs/heads/master | 2023-02-03T05:53:06.444073 | 2020-12-21T12:51:01 | 2020-12-21T12:51:01 | 288,187,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 19 20:44:10 2020
@author: zhihongz
"""
import os
from pydub import AudioSegment
def convert_to_wav(dir_path):
for file_path in os.listdir(dir_path):
if file_path.split('.')[-1] != "wav":
read_file = AudioSegment.from_file(os.path.join(dir_path,file_path), file_path.split('.')[-1])
os.remove(os.path.join(dir_path,file_path))
base_name = file_path.split('.')[:-1]
# read_file = read_file.set_channels(8)
# base_name = ".".join(base_name)
read_file.export(os.path.join(dir_path,f"{base_name[0]}.wav"), format="wav")
if __name__ == '__main__':
dir_path= './dev-clean/2078/142845' # folder name
all_files = os.listdir(dir_path) # get all filenames # get .wav filenames
conv= convert_to_wav(dir_path)
| [
"[email protected]"
] | |
7bbd45dc9c290c1a74e2526119f7f5cc401db529 | 874abdd97c48329a10e13845fe75bbb18dbfd650 | /stocks.py | 3e863b6654d85f3459bf1f0f72a4721b2fdb4bd5 | [] | no_license | JakeSigwart/Stock_Dataset | ff732cf268bb9b138168947eb0b3ae50d52bec81 | 972b82f80d835785c9682b29b695d3823f3122db | refs/heads/master | 2021-05-03T22:55:15.202175 | 2018-02-06T03:42:33 | 2018-02-06T03:42:33 | 120,394,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | import os
import time
import pickle
import numpy as np
import pandas as pd
import datetime as dt
from Stock_dataset import *
path = os.path.dirname(__file__)
todays_date = str(dt.date.today())
tickers = ['AAPL', 'AMZN', 'NVDA', 'GM', 'T', 'CAH']
#sp_500_tickers = np.load(path + '\\data\\tickers.npy')
dataset = Stock_dataset(tickers, path+'\\data\\data.pkl', path+'\\data\\dates.pkl', path+'\\data\\proc.npy')
#dataset.quandl_api_key("YOUR API KEY HERE")
data, dates = dataset.fetch_data('2017-01-01', '2017-07-01')
dataset.save_new_data(data, dates, True)
numeric_data, _ = dataset.organize_data_from_vars(data, dates)
proc_data, processed_data_stock, processed_data_dates, combined_dates = dataset.process_data(numeric_data, dates, False)
#processed_data, dates = dataset.update_data(todays_date) #Un-comment this and comment the above 4 lines after processing first data fetch
num_dates = len(dates)
print(tickers)
print('Data metrics for date: ' + str(dates[num_dates-1]))
print(proc_data[num_dates-1])
| [
"[email protected]"
] | |
0497e0262a8ee739513125f73d20dec716f79060 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/cylicRot_20200714234806.py | 755b17fab1acf221b7f045ba530fc306bc41432f | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | # given an array rotate it k times to the right
def rotate(arr,k):
# first I'd rotate the array once
# so how do we rotate the array
# we move the last element to the firs place and
# the rest follow suit
# [1,2,3,4]
# [4,2,3,1]
# [4,1,3,2]
# [4,1,2,3]
# [4,1,2,3]
# all we are doing is swapping the elements
newArr = []
for i in range(len(arr)):
k = len(arr) - 1
print('k',k,'i',i)
arr[i],arr[k] = arr[k],arr[i]
print(arr)
rotate([1,2,3,4],4)
| [
"[email protected]"
] | |
6fd9bb6dae2bb50a8a9fbac2eb6d9c75a6b3da23 | 17b22d94c938bddafd4420424997a5b82afca6f9 | /hw3.py | 5681a5988ca075e7baa42656fd9a02b0070d78bf | [] | no_license | amikoz/HW3-Kozenasheva151 | 42623a21c5a7c6f2522f15034d834b9c9073eaed | a4ab011d2cb18843bb7551cdbb829a8ed33bc53a | refs/heads/master | 2021-06-08T21:12:15.162713 | 2016-12-04T21:15:16 | 2016-12-04T21:15:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,574 | py | import re
import urllib.request
import os
import html
def download_page1(pageUrl1):
try:
page1 = urllib.request.urlopen(pageUrl1)
text1 = page1.read().decode('UTF-8')
except:
text1 = 'unavailable page'
return text1
def txt_1(text1):
regPostTitletxt1 = re.compile(' <b class="regnum_title">REGNUM</b></span>(.*?)</div>', flags=re.DOTALL)
t1 = regPostTitletxt1.findall(text1)
if t1:
txt_1 = t1
new_text1 = []
regTag1 = re.compile('<.*?>', flags=re.DOTALL)
regSpace1 = re.compile('\s{2,}', flags=re.DOTALL)
for finaltext1 in txt_1:
clean_t1 = regSpace1.sub("", finaltext1)
clean_t = regTag1.sub("", clean_t1)
new_text1.append(clean_t)
for finaltext1 in new_text1:
finaltext1.replace(" →»—«–", " ")
if finaltext1:
txt_1= html.unescape(finaltext1)
else:
txt_2 = 'no text'
return txt_1
def func1(txt_1):
n = txt_1.lower()
n2 = n.replace(',', '')
n1 = n2.replace('.', '')
n0 = n1.replace('»', '')
n3 = n0.replace('«', '')
n4 = n3.replace('-', '')
n5 = n4.replace('\n', '')
n6 = n5.replace(':', '')
n7 = re.sub(u"[0-9]{1,}", " ", n6)
m1 = n7.split(" ")
A = set(m1)
return A
def download_page2(pageUrl2):
try:
page2 = urllib.request.urlopen(pageUrl2)
text2 = page2.read().decode('UTF-8')
except:
text2 = 'unavailable page'
return text2
def txt_2(text2):
regPostTitletxt2 = re.compile('<div itemprop="articleBody">(.*?)<div data-type="Incut. By wide" class="b-read-more b-read-more_wide">', flags=re.DOTALL)
t2 = regPostTitletxt2.findall(text2)
if t2:
txt_2= t2
new_text2 = []
regTag2 = re.compile('<.*?>', flags=re.DOTALL)
regSpace2 = re.compile('\s{2,}', flags=re.DOTALL)
for finaltext2 in txt_2:
clean_t2 = regSpace2.sub("", finaltext2)
clean_t2 = regTag2.sub("", clean_t2)
new_text2.append(clean_t2)
for finaltext2 in new_text2:
finaltext2.replace(" →»—«–", " ")
if finaltext2:
txt_2 = html.unescape(finaltext2)
else:
txt_2 = 'no text'
return txt_2
def func2(txt_2):
n2 = txt_2.lower()
n22 = n2.replace(',', '')
n12 = n22.replace('.', '')
n02 = n12.replace('»', '')
n32 = n02.replace('«', '')
n42 = n32.replace('-', '')
n52 = n42.replace('\n', '')
n62 = n52.replace(':', '')
n72 = re.sub(u"[0-9]{1,}", " ", n62)
m2 = n72.split(" ")
B = set(m2)
return B
def download_page3(pageUrl3):
try:
page3 = urllib.request.urlopen(pageUrl3)
text3 = page3.read().decode('UTF-8')
except:
text3 = 'unavailable page'
return text3
def txt_3(text3):
regPostTitletxt3 = re.compile('<div class="b-text clearfix js-topic__text" itemprop="articleBody">(.*?)<aside class="b-inline-topics-box b-box_floated b-inline-topics-box_wide b-box_left">', flags=re.DOTALL)
t3 = regPostTitletxt3.findall(text3)
if t3:
txt_3 = t3
new_text3 = []
regTag3 = re.compile('<.*?>', flags=re.DOTALL)
regSpace3 = re.compile('\s{2,}', flags=re.DOTALL)
for finaltext3 in txt_3:
clean_t3 = regSpace3.sub("", finaltext3)
clean_t3 = regTag3.sub("", clean_t3)
new_text3.append(clean_t3)
for finaltext3 in new_text3:
finaltext3.replace(" →»—«–", " ")
if finaltext3:
txt_3 = html.unescape(finaltext3)
else:
txt_3 = 'no text'
return txt_3
def func3(txt_3):
n3 = txt_3.lower()
n23 = n3.replace(',', '')
n13 = n23.replace('.', '')
n03 = n13.replace('»', '')
n33 = n03.replace('«', '')
n43 = n33.replace('-', '')
n53 = n43.replace('\n', '')
n63 = n53.replace(':', '')
n73 = re.sub(u"[0-9]{1,}", " ", n63)
m3 = n73.split(" ")
C = set(m3)
return C
def download_page4(pageUrl4):
try:
page4 = urllib.request.urlopen(pageUrl4)
text4 = page4.read().decode('UTF-8')
except:
text4 = 'unavailable page'
return text4
def txt_4(text4):
regPostTitletxt4 = re.compile('<p class="lid">(.*?)<p><div class="article__incut">', flags=re.DOTALL)
t4 = regPostTitletxt4.findall(text4)
if t4:
txt_4 = t4
new_text4 = []
regTag4 = re.compile('<.*?>', flags=re.DOTALL)
regSpace4 = re.compile('\s{2,}', flags=re.DOTALL)
for finaltext4 in txt_4:
clean_t4 = regSpace4.sub("", finaltext4)
clean_t4 = regTag4.sub("", clean_t4)
new_text4.append(clean_t4)
for finaltext4 in new_text4:
finaltext4.replace(" →»—«–", " ")
if finaltext4:
txt_4 = html.unescape(finaltext4)
else:
txt_4 = 'no text'
return txt_4
def func4(txt_4):
n4 = txt_4.lower()
n24 = n4.replace(',', '')
n14 = n24.replace('.', '')
n04 = n14.replace('»', '')
n34 = n04.replace('«', '')
n44 = n34.replace('-', '')
n54 = n44.replace('\n', '')
n64 = n54.replace(':', '')
n74 = re.sub(u"[0-9]{1,}", " ", n64)
m4 = n74.split(" ")
D = set(m4)
return D
def intersec(A, B, C, D):
inter1 = A.intersection(B)
inter2 = inter1.intersection(C)
inter = inter2.intersection(D)
print('Пересечение множеств: ', inter)
def symmdif(A, B, C, D):
sd1 = A.symmetric_difference(B)
sd2 = sd1.symmetric_difference(C)
sd = sd2.symmetric_difference(D)
print('Симметрическая разность множeств: ', sd)
def main():
pageUrl1 = 'https://regnum.ru/news/innovatio/2211264.html'
text1 = download_page1(pageUrl1)
g1 = txt_1(text1)
b1 = func1(g1)
pageUrl2 = 'https://rg.ru/2016/11/29/na-marse-obnaruzhen-labirint.html'
text2 = download_page2(pageUrl2)
g2 = txt_2(text2)
b2 = func2(g2)
pageUrl3 = 'https://lenta.ru/news/2016/11/29/mars/'
text3 = download_page3(pageUrl3)
g3 = txt_3(text3)
b3 = func3(g3)
pageUrl4 = 'http://www.mk.ru/science/2016/11/29/tainstvennyy-labirint-na-marse-privlek-vnimanie-planetologov.html'
text4 = download_page4(pageUrl4)
g4 = txt_4(text4)
b4 = func4(g4)
intersec(b1, b2, b3, b4)
symmdif(b1, b2, b3, b4)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e615006a23c81dc60b0a5cdc99d864b0a4c4a7d4 | c724fad90be2e221cb0f5c0005ebcfbdfdb35d27 | /backend/fitsii_19945/wsgi.py | cfa9f31b691c6399a7797d950bc243dc2bb70ac9 | [] | no_license | crowdbotics-apps/fitsii-19945 | d461349a510febd39f4edcaeb2b8b722664e3bf0 | 040621b4053e58b9c323ef7222a6a36465c4806e | refs/heads/master | 2022-12-07T18:18:50.580128 | 2020-09-02T16:56:11 | 2020-09-02T16:56:11 | 292,342,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for fitsii_19945 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fitsii_19945.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
821bcebfcedc6d629c6e2fbce307378367cc9129 | 1c88eef044c7ca83b545001e123b8bf064884bb5 | /palindrome.py | 465ca3b80f8bfacd18257869beb6ec3ec3ac0710 | [] | no_license | JacobDuvall/demosPy | 05329592ad8526d0d72201a68faf7c8234774f73 | c039943869d3a2cd62c4b6ff759857d3b5e7054e | refs/heads/master | 2020-06-17T21:48:06.066695 | 2019-07-09T20:04:36 | 2019-07-09T20:04:36 | 196,067,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | import unittest
def digits(x):
"""Convert an integer into a list of digits.
Args:
x: The number whose digits we want.
Returns: A list of the digits, in order, of ``x``.
>>> digits(4586378)
[8, 7, 3, 6, 8, 5, 4]
"""
digs = []
while x != 0:
div, mod = divmod(x, 10)
digs.append(mod)
x = div
return digs
def is_palindrome(x):
"""Determine if an integer is a palindrome.
Args:
x: The number to check for palindromicity
Returns: True if the digits of ``x`` are a palindrome,
False otherwise.
>>> is_palindrome(1234)
False
>>> is_palindrome(2468642)
True
"""
digs = digits(x)
for f, r in zip(digs, reversed(digs)):
if f != r:
return False
return True
class Tests(unittest.TestCase):
"""Tests for the ``is_palindrome()`` function."""
def test_negative(self):
"Check that it returns False correctly."
self.assertFalse(is_palindrome(1234))
def test_positive(self):
"Check that it returns True correctly."
self.assertTrue(is_palindrome(1234321))
def test_single_digit(self):
"Check that it works for single digit numbers."
for i in range(10):
self.assertTrue(is_palindrome(i))
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
f0ec9069cd636274166bcd07ca0cebc104ee447b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03598/s680963277.py | c8861d19ff2e2ce27d5b6a660a4fb273c93d87c7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | N = int(input())
K = int(input())
x = list(map(int, input().split()))
A=[]
B=[]
for i in range(len(x)):
a = min(2*(x[i]), 2*abs(K-x[i]))
A.append(a)
print(sum(A)) | [
"[email protected]"
] | |
d07d964851d7ea84722cc1c566fdb976f5049c0a | 10d98fecb882d4c84595364f715f4e8b8309a66f | /non_semantic_speech_benchmark/distillation/train_keras_test.py | 58293b999787e89c984afb7ffed56dbb033ecc48 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 3,089 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for non_semantic_speech_benchmark.eval_embedding.keras.train_keras."""
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import mock
import tensorflow as tf
from non_semantic_speech_benchmark.distillation import train_keras
def _get_data(*args, **kwargs):
del args
assert 'samples_key' in kwargs
assert 'min_length' in kwargs
assert 'batch_size' in kwargs
bs = kwargs['batch_size']
samples = tf.zeros((bs, 16000), tf.float32)
targets = tf.ones([bs, 10], tf.float32)
return tf.data.Dataset.from_tensors((samples, targets)).repeat()
class TrainKerasTest(parameterized.TestCase):
@parameterized.parameters(
{'bottleneck_dimension': 3, 'alpha': 1.0},
{'bottleneck_dimension': 5, 'alpha': 0.5},
)
def test_get_model(self, bottleneck_dimension, alpha):
batched_samples = tf.zeros([3, 16000])
output_dimension = 10
targets = tf.ones([3, output_dimension])
model = train_keras.models.get_keras_model(
f'mobilenet_debug_{alpha}_False',
bottleneck_dimension=bottleneck_dimension,
output_dimension=output_dimension)
loss_obj = tf.keras.losses.MeanSquaredError()
opt = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.MeanSquaredError()
train_mae = tf.keras.metrics.MeanAbsoluteError()
summary_writer = tf.summary.create_file_writer(
absltest.get_default_test_tmpdir())
train_step = train_keras.get_train_step(
model, loss_obj, opt, train_loss, train_mae, summary_writer)
gstep = opt.iterations
train_step(batched_samples, targets, gstep)
self.assertEqual(1, gstep)
train_step(batched_samples, targets, gstep)
self.assertEqual(2, gstep)
@mock.patch.object(train_keras.get_data, 'get_data', new=_get_data)
@mock.patch.object(train_keras.hub, 'load')
@flagsaver.flagsaver
def test_full_flow(self, mock_load):
del mock_load
flags.FLAGS.file_pattern = 'dummy'
flags.FLAGS.teacher_model_hub = 'dummy'
flags.FLAGS.output_key = 'dummmy'
flags.FLAGS.bottleneck_dimension = 2
flags.FLAGS.output_dimension = 10
flags.FLAGS.shuffle_buffer_size = 4
flags.FLAGS.samples_key = 'audio'
flags.FLAGS.logdir = absltest.get_default_test_tmpdir()
train_keras.train_and_report(debug=True)
if __name__ == '__main__':
tf.compat.v2.enable_v2_behavior()
assert tf.executing_eagerly()
absltest.main()
| [
"[email protected]"
] | |
867accc1fa0ae63cde0cb8f95b38ab6d178fb261 | f9141cb0e7677d9892fe1edddad3dd20db96fc0a | /rule_class.py | e91f8826a7ec8eb72061b57bf50b4cbba436e3a9 | [
"MIT"
] | permissive | andytaylor823/play-euchre | beb47d26dbf35d08de97e4b51b2712338a69fe68 | 32887980487e07865b799de96069f50866760a12 | refs/heads/master | 2020-08-02T13:53:10.030200 | 2020-04-27T12:01:23 | 2020-04-27T12:01:23 | 211,376,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | import basicprogs as b
import boardstate_class as bsc
from inspect import signature # use this to check the "condition" argument
class rule:
def __init__(self, condition, rule_type, name='rule'):
if not callable(condition):
print('Error: condition argument must be a callable function')
raise(ValueError)
sig = signature(condition)
if len(sig.parameters) != 2:
print('Error: condition argument can only take two arguments')
raise(ValueError)
if not isinstance(rule_type, str):
print('Error: rule_type argument must be a string')
raise(ValueError)
if rule_type.lower() not in ['lead', 'follow', 'call']:
print('Error: rule_type argument can only be "lead", "follow", or "call"')
raise(ValueError)
self.type = rule_type
self.condition = condition
self.name = name
def is_satisfied(self, board, pos):
if not isinstance(board, bsc.boardstate):
print('Error: improper board argument')
raise(ValueError)
if not isinstance(pos, str):
print('Error: position argument not a string')
raise(ValueError)
if pos.lower() not in ['o1', 'o2', 'p', 'd']:
print('Error: invalid position choice given')
raise(ValueError)
c, have = self.condition(board, pos)
if have: return(c)
else: return(None)
| [
"[email protected]"
] | |
d1f2676ff8d9ab324c0423935125dcea3d8e25d4 | 0affb6a667543c825dd44e85d6af6b7be5c8cf8b | /day11/day11_2.py | 08c98ae924fdeff84a8823e3116c184578360091 | [] | no_license | 233-wang-233/python | 2fa4c7a7c4d7ba2579cea89d9ba30203956942d4 | 0824b9b50fba7d4557a3de60e2c0b830d6dac196 | refs/heads/master | 2021-01-25T22:33:42.960067 | 2020-04-02T07:55:45 | 2020-04-02T07:55:45 | 243,209,061 | 0 | 0 | null | 2020-03-02T06:50:27 | 2020-02-26T08:26:24 | Python | UTF-8 | Python | false | false | 504 | py | '''
两进程进行通信
一个输出Ping,一个输出Pong,两个进程输出的Ping和Pong加起来一共10个
'''
from multiprocessing import Process
from time import sleep
counter=0
def sub_task(string):
global counter
while counter<10:
print(string,end=' ',flush=True)
counter+=1
sleep(0.01)
def main():
Process(target=sub_task,args=('ping',)).start()
Process(target=sub_task,args=('pong',)).start()
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
ed0c506a4e560bd296f8b23da088c23994a9bb50 | 4f51225cd157b0e31bc4268d2eb4d31159b074c4 | /simulation_main.py | 23c41147d3dc73ef20228cbc66bb91777ae90a93 | [] | no_license | Jarvis-X/pioneer_free_run | 3be03098349a0476a0b1ec91ce318b5aface54c7 | 5bad268f28d54a1da65930ec49ddde1404c5d878 | refs/heads/master | 2022-07-31T01:17:43.708467 | 2020-05-15T22:52:35 | 2020-05-15T22:52:35 | 264,040,655 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,711 | py | # Make sure to have the server side running in CoppeliaSim:
# in a child script of a CoppeliaSim scene, add following command
# to be executed just once, at simulation start:
#
# simRemoteApi.start(19999)
#
# then start simulation, and run this program.
#
# created by: Jiawei Xu
try:
import sim
import numpy as np
import cv2
import time
except:
print ('--------------------------------------------------------------')
print ('Library loading failed!')
print ('')
def filter_red(img):
red1 = cv2.inRange(img, (0, 220, 100), (5, 255, 255))
red2 = cv2.inRange(img, (175, 220, 100), (180, 255, 255))
return red1+red2
def filter_green(img):
green = cv2.inRange(img, (55, 220, 100), (65, 255, 255))
return green
def sensor_color(img):
sensor_array = np.sum(img, 0, dtype=np.uint16)
sensor_array_shrunken = [0.0]*16
for i in range(16):
sensor_array_shrunken[i] = np.sum(sensor_array[i*16:(i+1)*16], dtype=np.uint32)*1.0
if sensor_array_shrunken[i] > 300000:
sensor_array_shrunken[i] = 1.0
elif sensor_array_shrunken[i] < 60000:
sensor_array_shrunken[i] = 0.0
else:
sensor_array_shrunken[i] = (sensor_array_shrunken[i]-60000)/(300000.0-60000.0)
# print sensor_array_shrunken
return sensor_array_shrunken
def free_running(sonar_readings, img, ID, left_motor, right_motor):
red_threshold = filter_red(img)
green_threshold = filter_green(img)
green_sensor_array = sensor_color(green_threshold)
red_sensor_array = sensor_color(red_threshold)
# cv2.imshow("red", red_threshold)
# cv2.imshow("green", green_threshold)
# cv2.waitKey(1)
v_left = 1.0
v_right = 1.0
for i in range(len(sonar_readings)):
v_left += sonar_readings[i]*braitenberg_sonar_L[i]
v_right += sonar_readings[i]*braitenberg_sonar_R[i]
print v_left, " ", v_right
# TODO: add red cube avoidance terms
for i in range(len(red_sensor_array)):
v_left += red_sensor_array[i] * braitenberg_red_L[i]
v_right += red_sensor_array[i] * braitenberg_red_R[i]
for i in range(len(green_sensor_array)):
v_left += green_sensor_array[i] * braitenberg_green_L[i]
v_right += green_sensor_array[i] * braitenberg_green_R[i]
print v_left, " ", v_right
# print v_left, " ", v_right
sim.simxSetJointTargetVelocity(ID, left_motor, v_left, sim.simx_opmode_oneshot)
sim.simxSetJointTargetVelocity(ID, right_motor, v_right, sim.simx_opmode_oneshot)
def read_image(image_ready, ID, handler):
res, resolution, image = sim.simxGetVisionSensorImage(ID, handler, 0, sim.simx_opmode_buffer)
if res == sim.simx_return_ok:
if not image_ready:
print "image OK!!!"
image_ready = True
img = np.array(image, dtype=np.uint8)
img.resize([resolution[1], resolution[0], 3])
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
return img, image_ready
elif res == sim.simx_return_novalue_flag:
if image_ready:
print "no image"
image_ready = False
return np.array([], dtype=np.uint8), image_ready
else:
print "error: " + str(res)
return np.array([], dtype=np.uint8), image_ready
def read_sonar(ID, handlers, sonar_ready):
points = [None]*8
states = [False]*8
for i in range(8):
res, states[i], points[i], _, normal_vec = sim.simxReadProximitySensor(ID, handlers[i], sim.simx_opmode_buffer)
dists = [i[2] for i in points]
if sonar_ready:
for i in range(len(dists)):
if states[i] and dists[i] < 0.5:
if dists[i] < 0.2:
dists[i] = 0.2
# map how close an obstacle is to the robot to [0, 1]
dists[i] = 1.0 - (dists[i] - 0.2) / (0.5 - 0.2)
else:
dists[i] = 0.0
return dists, sonar_ready
else:
flag = True
for i in range(len(dists)):
if dists[i] == 0.0:
flag = False
break
if flag:
sonar_ready = True
return None, sonar_ready
if __name__ == "__main__":
print ('Program started')
sim.simxFinish(-1) # just in case, close all opened connections
clientID = sim.simxStart('127.0.0.1', 19999, True, True, 5000, 5) # Connect to CoppeliaSim
if clientID != -1:
print ('Connected to remote API server')
# Now try to retrieve data in a blocking fashion (i.e. a service call):
res, objs = sim.simxGetObjects(clientID, sim.sim_handle_all, sim.simx_opmode_blocking)
if res == sim.simx_return_ok:
print ('Number of objects in the scene: ', len(objs))
else:
print ('Remote API function call returned with error code: ', res)
time.sleep(2)
# get vision sensor handler
print 'Vision Sensor object handling'
res, veh_camera = sim.simxGetObjectHandle(clientID, 'veh_camera', sim.simx_opmode_oneshot_wait)
# get sonor handler
print 'Sonar object handling'
veh_sonar = [None]*8
for i in range(8):
res, veh_sonar[i] = sim.simxGetObjectHandle(clientID, 'Pioneer_p3dx_ultrasonicSensor'+'{}'.format(i+1),
sim.simx_opmode_oneshot_wait)
# print res == sim.simx_return_ok
# get left motor handler
res, veh_left_motor = sim.simxGetObjectHandle(clientID, 'Pioneer_p3dx_leftMotor', sim.simx_opmode_oneshot_wait)
# print res == sim.simx_return_ok
# get right motor handler
res, veh_right_motor = sim.simxGetObjectHandle(clientID, 'Pioneer_p3dx_rightMotor', sim.simx_opmode_oneshot_wait)
# print res == sim.simx_return_ok
# let the server prepare the first image
print 'Getting first image'
res, resolution, image = sim.simxGetVisionSensorImage(clientID, veh_camera, 0, sim.simx_opmode_streaming)
image_ready_flag = False
# let the server prepare the first sonar reading
points = [None] * 8
for i in range(8):
res, state, points[i], _, normal_vec = sim.simxReadProximitySensor(clientID, veh_sonar[i],
sim.simx_opmode_streaming)
braitenberg_sonar_L = [0.2, 0.0, -0.2, -0.4, -0.6, -0.8, -1.0, -1.2]
braitenberg_sonar_R = [-1.2, -1.0, -0.8, -0.6, -0.4, -0.2, 0.0, 0.2]
# TODO: fix the red cube avoidance
braitenberg_red_L = [-0.65, -0.6, -0.55, -0.5, -0.45, -0.4, -0.35, -0.3, -0.25, -0.2, -0.15, -0.1, -0.05, 0.0, 0.05, 0.10]
braitenberg_red_R = [0.10, 0.05, 0.0, -0.05, -0.1, -0.15, -0.2, -0.25, -0.3, -0.35, -0.4, -0.45, -0.5, -0.55, -0.6, -0.65]
braitenberg_green_L = [0.8, 0.75, 0.7, 0.65, 0.6, 0.55, 0.5, 0.45, 0.4, 0.35, 0.3, 0.25, 0.2, 0.15, 0.1, 0.05]
braitenberg_green_R = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8]
# braitenberg_sonar_L = [0.6, 0.8, 0.6, 0.4, -0.4, -0.6, -0.8, -0.6]
# braitenberg_sonar_R = [-0.6, -0.8, -0.6, -0.4, 0.4, 0.6, 0.8, 0.6]
# print [j[2] for j in points]
sonar_ready_flag = False
# keep running until the server shuts down
while sim.simxGetConnectionId(clientID) != -1:
image, image_ready_flag = read_image(image_ready_flag, clientID, veh_camera)
detections, sonar_ready_flag = read_sonar(clientID, veh_sonar, sonar_ready_flag)
# print detections
# if image_ready_flag:
# cv2.imshow("image", image)
# cv2.waitKey(1)
if image_ready_flag and sonar_ready_flag and not detections is None:
free_running(detections, image, clientID, veh_left_motor, veh_right_motor)
cv2.destroyAllWindows()
# Now send some data to CoppeliaSim in a non-blocking fashion:
# sim.simxAddStatusbarMessage(clientID, 'Hello CoppeliaSim!', sim.simx_opmode_oneshot)
# Before closing the connection to CoppeliaSim, make sure that the last command sent out had time to arrive. You
# can guarantee this with (for example):
sim.simxGetPingTime(clientID)
# Now close the connection to CoppeliaSim:
sim.simxFinish(clientID)
else:
print 'Failed connecting to remote API server'
print 'Program ended'
| [
"[email protected]"
] | |
626e284b40ec0447bfcba31a165d86827eb7df2a | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /gHrMmA7emP6CFAMnb_6.py | 35eeb43f5be552b55e650249bf1ff464b8e37754 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py |
def is_apocalyptic(n):
L=str(2**n).split('666')
if len(L)==1:
return "Safe"
elif len(L)==2:
return "Single"
elif len(L)==3:
return "Double"
elif len(L)==4:
return "Triple"
| [
"[email protected]"
] | |
4e8d14003c2e112ef076b89c4c8a3ad6613f9a2c | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/do/customization_failed.py | b63b14e03d5fddb6d06ae4f32d77239d433f8930 | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def CustomizationFailed(vim, *args, **kwargs):
'''The customization sequence in the guest failed.'''
obj = vim.client.factory.create('{urn:vim25}CustomizationFailed')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'template', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'logLocation', 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"[email protected]"
] | |
85041057b18077c038426fd96461f5dbd0ed30a7 | 747febe786dd6b7fd6c63cfe73dbe3023354daa8 | /src/the_tale/the_tale/game/companions/tests/test_abilities_effects.py | 433f74ad27be13a54043c1f878032a3d17dfde97 | [
"BSD-3-Clause"
] | permissive | the-tale/the-tale | 4e4b8d91dc873a5fb935fe58e9721a877baa6d3f | e8450bd2332344da805b1851e728da5a3e5bf0ef | refs/heads/develop | 2023-08-01T13:53:46.835667 | 2022-12-25T18:04:56 | 2022-12-25T18:04:56 | 1,949,167 | 98 | 52 | BSD-3-Clause | 2023-02-15T18:57:33 | 2011-06-24T18:49:48 | Python | UTF-8 | Python | false | false | 35,535 | py |
import smart_imports
smart_imports.all()
effects = companions_abilities_effects
MODIFIERS = heroes_relations.MODIFIERS
class BaseEffectsTests(utils_testcase.TestCase):
def setUp(self):
super(BaseEffectsTests, self).setUp()
game_logic.create_test_map()
self.account = self.accounts_factory.create_account()
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(self.account.id)
self.hero = self.storage.accounts_to_heroes[self.account.id]
self.companion_record = logic.create_companion_record(utg_name=game_names.generator().get_test_name(),
description='description',
type=tt_beings_relations.TYPE.random(),
max_health=10,
dedication=relations.DEDICATION.random(),
archetype=game_relations.ARCHETYPE.random(),
mode=relations.MODE.random(),
abilities=companions_abilities_container.Container(),
communication_verbal=tt_beings_relations.COMMUNICATION_VERBAL.random(),
communication_gestures=tt_beings_relations.COMMUNICATION_GESTURES.random(),
communication_telepathic=tt_beings_relations.COMMUNICATION_TELEPATHIC.random(),
intellect_level=tt_beings_relations.INTELLECT_LEVEL.random(),
structure=tt_beings_relations.STRUCTURE.random(),
features=frozenset((tt_beings_relations.FEATURE.random(), tt_beings_relations.FEATURE.random())),
movement=tt_beings_relations.MOVEMENT.random(),
body=tt_beings_relations.BODY.random(),
size=tt_beings_relations.SIZE.random(),
orientation=tt_beings_relations.ORIENTATION.random(),
weapons=[artifacts_objects.Weapon(weapon=artifacts_relations.STANDARD_WEAPON.random(),
material=tt_artifacts_relations.MATERIAL.random(),
power_type=artifacts_relations.ARTIFACT_POWER_TYPE.random())],
state=relations.STATE.ENABLED)
self.hero.set_companion(logic.create_companion(self.companion_record))
def apply_ability(self, ability):
container = companions_abilities_container.Container(common=(),
start=frozenset((ability,)),
coherence=None,
honor=None,
peacefulness=None)
self.companion_record.abilities = container
self.hero.reset_accessors_cache()
def get_ability(self, *argv):
return random.choice([ability
for ability in effects.ABILITIES.records
if any(isinstance(ability.effect, effect) for effect in argv)])
class CommonTests(BaseEffectsTests):
def test_aprox(self):
self.assertEqual(effects.aprox(1, 2, 1), 1.2)
self.assertEqual(effects.aprox(1, 2, 2), 1.4)
self.assertEqual(effects.aprox(1, 2, 3), 1.6)
self.assertEqual(effects.aprox(1, 2, 4), 1.8)
self.assertEqual(effects.aprox(1, 2, 5), 2)
class CoherenceSpeedTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CoherenceSpeed(0.8)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COHERENCE_EXPERIENCE, 10), 8)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COHERENCE_EXPERIENCE, 11), 8.8)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COHERENCE_EXPERIENCE,)), 11), 11)
effect = effects.CoherenceSpeed(1.2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COHERENCE_EXPERIENCE, 10), 12)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COHERENCE_EXPERIENCE, 11), 13.2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COHERENCE_EXPERIENCE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CoherenceSpeed)
self.hero.companion.coherence = c.COMPANIONS_MAX_COHERENCE - 1
self.hero.companion.experience = 0
self.hero.companion.add_experience(10)
old_delta = self.hero.companion.experience
self.hero.companion.experience = 0
self.apply_ability(ability)
self.hero.companion.add_experience(10)
new_delta = self.hero.companion.experience
self.assertEqual(int(round(old_delta * ability.effect.multiplier_left)), new_delta)
class ChangeHabitsTests(BaseEffectsTests):
def test_effect(self):
effect = effects.ChangeHabits(habit_type=game_relations.HABIT_TYPE.HONOR,
habit_sources=(heroes_relations.HABIT_CHANGE_SOURCE.COMPANION_HONOR_NEUTRAL_1,
heroes_relations.HABIT_CHANGE_SOURCE.COMPANION_HONOR_NEUTRAL_2))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.HABITS_SOURCES, set()), set((heroes_relations.HABIT_CHANGE_SOURCE.COMPANION_HONOR_NEUTRAL_1,
heroes_relations.HABIT_CHANGE_SOURCE.COMPANION_HONOR_NEUTRAL_2)))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.HABITS_SOURCES,)), set()), set())
def check_habits_changed(self, honor, peacefulness, honor_check, peacefulness_check):
self.hero.habit_honor.set_habit(honor)
self.hero.habit_peacefulness.set_habit(peacefulness)
for habit_source in self.hero.companion.modify_attribute(heroes_relations.MODIFIERS.HABITS_SOURCES, set()):
self.hero.update_habits(habit_source)
self.assertTrue(honor_check(self.hero.habit_honor.raw_value))
self.assertTrue(peacefulness_check(self.hero.habit_peacefulness.raw_value))
def test_in_game__aggressive(self):
self.apply_ability(effects.ABILITIES.AGGRESSIVE)
self.check_habits_changed(honor=-c.HABITS_BORDER, peacefulness=0,
honor_check=lambda v: v == -c.HABITS_BORDER,
peacefulness_check=lambda v: v < 0)
self.check_habits_changed(honor=0, peacefulness=c.HABITS_BORDER,
honor_check=lambda v: v == 0,
peacefulness_check=lambda v: v < c.HABITS_BORDER)
self.check_habits_changed(honor=c.HABITS_BORDER, peacefulness=-c.HABITS_BORDER,
honor_check=lambda v: v == c.HABITS_BORDER,
peacefulness_check=lambda v: v == -c.HABITS_BORDER)
def test_in_game__peaceful(self):
self.apply_ability(effects.ABILITIES.PEACEFUL)
self.check_habits_changed(honor=-c.HABITS_BORDER, peacefulness=0,
honor_check=lambda v: v == -c.HABITS_BORDER,
peacefulness_check=lambda v: v > 0)
self.check_habits_changed(honor=0, peacefulness=c.HABITS_BORDER,
honor_check=lambda v: v == 0,
peacefulness_check=lambda v: v == c.HABITS_BORDER)
self.check_habits_changed(honor=c.HABITS_BORDER, peacefulness=-c.HABITS_BORDER,
honor_check=lambda v: v == c.HABITS_BORDER,
peacefulness_check=lambda v: v > -c.HABITS_BORDER)
def test_in_game__reserved(self):
self.apply_ability(effects.ABILITIES.RESERVED)
self.check_habits_changed(honor=-c.HABITS_BORDER, peacefulness=0,
honor_check=lambda v: v == -c.HABITS_BORDER,
peacefulness_check=lambda v: v == 0)
self.check_habits_changed(honor=0, peacefulness=c.HABITS_BORDER,
honor_check=lambda v: v == 0,
peacefulness_check=lambda v: v < c.HABITS_BORDER)
self.check_habits_changed(honor=c.HABITS_BORDER, peacefulness=-c.HABITS_BORDER,
honor_check=lambda v: v == c.HABITS_BORDER,
peacefulness_check=lambda v: v > -c.HABITS_BORDER)
def test_in_game__canny(self):
self.apply_ability(effects.ABILITIES.CANNY)
self.check_habits_changed(honor=-c.HABITS_BORDER, peacefulness=0,
honor_check=lambda v: v > -c.HABITS_BORDER,
peacefulness_check=lambda v: v == 0)
self.check_habits_changed(honor=0, peacefulness=c.HABITS_BORDER,
honor_check=lambda v: v == 0,
peacefulness_check=lambda v: v == c.HABITS_BORDER)
self.check_habits_changed(honor=c.HABITS_BORDER, peacefulness=-c.HABITS_BORDER,
honor_check=lambda v: v < c.HABITS_BORDER,
peacefulness_check=lambda v: v == -c.HABITS_BORDER)
def test_in_game__honest(self):
self.apply_ability(effects.ABILITIES.HONEST)
self.check_habits_changed(honor=-c.HABITS_BORDER, peacefulness=0,
honor_check=lambda v: v > -c.HABITS_BORDER,
peacefulness_check=lambda v: v == 0)
self.check_habits_changed(honor=0, peacefulness=c.HABITS_BORDER,
honor_check=lambda v: v > 0,
peacefulness_check=lambda v: v == c.HABITS_BORDER)
self.check_habits_changed(honor=c.HABITS_BORDER, peacefulness=-c.HABITS_BORDER,
honor_check=lambda v: v == c.HABITS_BORDER,
peacefulness_check=lambda v: v == -c.HABITS_BORDER)
def test_in_game__sneaky(self):
self.apply_ability(effects.ABILITIES.SNEAKY)
self.check_habits_changed(honor=-c.HABITS_BORDER, peacefulness=0,
honor_check=lambda v: v == -c.HABITS_BORDER,
peacefulness_check=lambda v: v == 0)
self.check_habits_changed(honor=0, peacefulness=c.HABITS_BORDER,
honor_check=lambda v: v < 0,
peacefulness_check=lambda v: v == c.HABITS_BORDER)
self.check_habits_changed(honor=c.HABITS_BORDER, peacefulness=-c.HABITS_BORDER,
honor_check=lambda v: v < c.HABITS_BORDER,
peacefulness_check=lambda v: v == -c.HABITS_BORDER)
class QuestMoneyRewardTests(BaseEffectsTests):
def test_effect(self):
effect = effects.QuestMoneyReward(0.5)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.QUEST_MONEY_REWARD, 10), 10.5)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.QUEST_MONEY_REWARD, 11), 11.5)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.QUEST_MONEY_REWARD,)), 11), 11)
effect = effects.QuestMoneyReward(2.0)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.QUEST_MONEY_REWARD, 10), 12)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.QUEST_MONEY_REWARD, 11), 13)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.QUEST_MONEY_REWARD,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.QuestMoneyReward)
with self.check_changed(lambda: self.hero.quest_money_reward_multiplier()):
self.apply_ability(ability)
class MaxBagSizeTests(BaseEffectsTests):
def test_effect(self):
effect = effects.MaxBagSize(666)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.MAX_BAG_SIZE, 10), 676)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.MAX_BAG_SIZE, 11), 677)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.MAX_BAG_SIZE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.MaxBagSize)
with self.check_changed(lambda: self.hero.max_bag_size):
self.apply_ability(ability)
class PoliticsPowerTests(BaseEffectsTests):
def test_effect(self):
effect = effects.PoliticsPower(3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.POWER, 11), 14.0)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.POWER, )), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.PoliticsPower)
with self.check_changed(lambda: self.hero.politics_power_modifier):
self.apply_ability(ability)
class MagicDamageBonusTests(BaseEffectsTests):
def test_effect(self):
effect = effects.MagicDamageBonus(2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.MAGIC_DAMAGE, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.PHYSIC_DAMAGE, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.MAGIC_DAMAGE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.MagicDamageBonus)
with self.check_changed(lambda: self.hero.magic_damage_modifier):
self.apply_ability(ability)
class PhysicDamageBonusTests(BaseEffectsTests):
def test_effect(self):
effect = effects.PhysicDamageBonus(2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.MAGIC_DAMAGE, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.PHYSIC_DAMAGE, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.PHYSIC_DAMAGE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.PhysicDamageBonus)
with self.check_changed(lambda: self.hero.physic_damage_modifier):
self.apply_ability(ability)
class SpeedTests(BaseEffectsTests):
def test_effect(self):
effect = effects.Speed(2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.SPEED, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.SPEED,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.Speed)
with self.check_changed(lambda: self.hero.move_speed):
self.apply_ability(ability)
class BattleAbilityTests(BaseEffectsTests):
def test_effect(self):
effect = effects.BattleAbilityFireball()
self.assertEqual(effect._modify_attribute({}, MODIFIERS.INITIATIVE, 10), 10.25)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.ADDITIONAL_ABILITIES, []), [effect.ABILITY])
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.INITIATIVE, MODIFIERS.ADDITIONAL_ABILITIES)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.BattleAbilityHit,
effects.BattleAbilityStrongHit,
effects.BattleAbilityRunUpPush,
effects.BattleAbilityFireball,
effects.BattleAbilityPoisonCloud,
effects.BattleAbilityFreezing)
with self.check_changed(lambda: self.hero.initiative):
with self.check_changed(lambda: len(self.hero.companion.modify_attribute(heroes_relations.MODIFIERS.ADDITIONAL_ABILITIES,
heroes_relations.MODIFIERS.ADDITIONAL_ABILITIES.default()))):
self.apply_ability(ability)
class InitiativeTests(BaseEffectsTests):
def test_effect(self):
effect = effects.Initiative(2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.INITIATIVE, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.INITIATIVE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.Initiative)
with self.check_changed(lambda: self.hero.initiative):
self.apply_ability(ability)
class BattleProbabilityTests(BaseEffectsTests):
def test_effect(self):
effect = effects.BattleProbability(1.5)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.BATTLES_PER_TURN, 10), 11.5)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.BATTLES_PER_TURN,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.BattleProbability)
with self.check_changed(lambda: self.hero.battles_per_turn_summand):
self.apply_ability(ability)
class LootProbabilityTests(BaseEffectsTests):
def test_effect(self):
effect = effects.LootProbability(2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.LOOT_PROBABILITY, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.LOOT_PROBABILITY,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.LootProbability)
with self.check_changed(lambda: self.hero.loot_probability(mobs_storage.mobs.all()[0])):
self.apply_ability(ability)
class CompanionDamageTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionDamage(3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_DAMAGE, 10), 13)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_DAMAGE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionDamage)
with mock.patch('the_tale.game.balance.constants.COMPANIONS_BONUS_DAMAGE_PROBABILITY', 6666666666):
with self.check_changed(lambda: self.hero.companion_damage):
self.apply_ability(ability)
class CompanionDamageProbabilityTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionDamageProbability(3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_DAMAGE_PROBABILITY, 10), 30)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_DAMAGE_PROBABILITY,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionDamageProbability)
with self.check_changed(lambda: self.hero.companion_damage_probability):
self.apply_ability(ability)
class CompanionStealMoneyTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionStealMoney(3)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_STEAL_MONEY))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_STEAL_MONEY_MULTIPLIER))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_STEAL_MONEY, MODIFIERS.COMPANION_STEAL_MONEY_MULTIPLIER))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_STEAL_MONEY, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_STEAL_MONEY_MULTIPLIER, 10), 30)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_STEAL_MONEY, MODIFIERS.COMPANION_STEAL_MONEY_MULTIPLIER)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionStealMoney)
with self.check_changed(lambda: self.hero.can_companion_steal_money()):
with self.check_changed(lambda: self.hero.companion_steal_money_modifier):
self.apply_ability(ability)
class CompanionStealItemTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionStealItem(3)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_STEAL_ITEM))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_STEAL_ITEM_MULTIPLIER))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_STEAL_ITEM, MODIFIERS.COMPANION_STEAL_ITEM))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_STEAL_ITEM, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_STEAL_ITEM_MULTIPLIER, 10), 30)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_STEAL_ITEM, MODIFIERS.COMPANION_STEAL_ITEM_MULTIPLIER)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionStealItem)
with self.check_changed(lambda: self.hero.can_companion_steal_item()):
with self.check_changed(lambda: self.hero.companion_steal_artifact_probability_multiplier):
self.apply_ability(ability)
class CompanionSparePartsTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionSpareParts()
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_SPARE_PARTS))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_SPARE_PARTS,))))
def test_in_game(self):
ability = self.get_ability(effects.CompanionSpareParts)
with self.check_changed(lambda: self.hero.can_companion_broke_to_spare_parts()):
self.apply_ability(ability)
class CompanionSayWisdomTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionSayWisdom(3)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_SAY_WISDOM))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_SAY_WISDOM_PROBABILITY))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_SAY_WISDOM, MODIFIERS.COMPANION_SAY_WISDOM_PROBABILITY))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_SAY_WISDOM, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_SAY_WISDOM_PROBABILITY, 10), 30)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_SAY_WISDOM, MODIFIERS.COMPANION_SAY_WISDOM_PROBABILITY)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionSayWisdom)
with self.check_changed(lambda: self.hero.can_companion_say_wisdom()):
with self.check_changed(lambda: self.hero.companion_say_wisdom_probability):
self.apply_ability(ability)
class CompanionExpPerHealTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionExpPerHeal(2)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_EXP_PER_HEAL))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_EXP_PER_HEAL_PROBABILITY))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_EXP_PER_HEAL, MODIFIERS.COMPANION_EXP_PER_HEAL_PROBABILITY))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_EXP_PER_HEAL, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_EXP_PER_HEAL_PROBABILITY, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_EXP_PER_HEAL, MODIFIERS.COMPANION_EXP_PER_HEAL_PROBABILITY)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionExpPerHeal)
with self.check_changed(lambda: self.hero.can_companion_exp_per_heal()):
with self.check_changed(lambda: self.hero.companion_exp_per_heal_probability):
self.apply_ability(ability)
class DoubleReligionProfitTests(BaseEffectsTests):
def test_effect(self):
effect = effects.DoubleReligionProfit(0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.DOUBLE_RELIGION_PROFIT, 0), 0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.DOUBLE_RELIGION_PROFIT,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.DoubleReligionProfit)
with self.check_changed(lambda: self.hero.double_religion_profit_probability):
self.apply_ability(ability)
class CompanionEatCorpsesTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionEatCorpses(3)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_EAT_CORPSES))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_EAT_CORPSES_PROBABILITY))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_EAT_CORPSES, MODIFIERS.COMPANION_EAT_CORPSES_PROBABILITY))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_EAT_CORPSES, 1), 1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_EAT_CORPSES_PROBABILITY, 1), 3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_EAT_CORPSES, MODIFIERS.COMPANION_EAT_CORPSES_PROBABILITY)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionEatCorpses)
with self.check_changed(lambda: self.hero.can_companion_eat_corpses()):
with self.check_changed(lambda: self.hero.companion_eat_corpses_probability):
self.apply_ability(ability)
class CompanionRegenerateTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionRegenerate(2)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_REGENERATE))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_REGENERATE_PROBABILITY))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_REGENERATE, MODIFIERS.COMPANION_REGENERATE_PROBABILITY))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_REGENERATE, 10), 10)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_REGENERATE_PROBABILITY, 10), 20)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_REGENERATE, MODIFIERS.COMPANION_REGENERATE_PROBABILITY)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionRegenerate)
with self.check_changed(lambda: self.hero.can_companion_regenerate()):
with self.check_changed(lambda: self.hero.companion_regenerate_probability):
self.apply_ability(ability)
class CompanionEatAndDiscountTest(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionEat(0.5)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_MONEY_FOR_FOOD))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_MONEY_FOR_FOOD,))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_MONEY_FOR_FOOD, 2), 1.0)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_MONEY_FOR_FOOD,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionEat)
with self.check_changed(lambda: self.hero.can_companion_eat()):
with self.check_changed(lambda: self.hero.companion_money_for_food_multiplier):
self.apply_ability(ability)
class CompanionDrinkArtifactTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionDrinkArtifact(0.5)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_DRINK_ARTIFACT))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_DRINK_ARTIFACT_PROBABILITY))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_DRINK_ARTIFACT, MODIFIERS.COMPANION_DRINK_ARTIFACT_PROBABILITY))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_DRINK_ARTIFACT, 2), 2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_DRINK_ARTIFACT_PROBABILITY, 2), 1.0)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_DRINK_ARTIFACT, MODIFIERS.COMPANION_DRINK_ARTIFACT_PROBABILITY,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionDrinkArtifact)
with self.check_changed(lambda: self.hero.can_companion_drink_artifact()):
with self.check_changed(lambda: self.hero.companion_drink_artifact_probability):
self.apply_ability(ability)
class CompanionExorcistTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionExorcist(0.5)
self.assertTrue(effect._check_attribute(MODIFIERS.COMPANION_EXORCIST))
self.assertFalse(effect._check_attribute(MODIFIERS.COMPANION_EXORCIST_PROBABILITY))
self.assertFalse(effect._check_attribute(MODIFIERS.random(exclude=(MODIFIERS.COMPANION_EXORCIST, MODIFIERS.COMPANION_EXORCIST_PROBABILITY))))
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_EXORCIST, 2), 2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_EXORCIST_PROBABILITY, 2), 1.0)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_EXORCIST, MODIFIERS.COMPANION_EXORCIST_PROBABILITY,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionExorcist)
with self.check_changed(lambda: self.hero.can_companion_do_exorcism()):
with self.check_changed(lambda: self.hero.companion_do_exorcism_probability):
self.apply_ability(ability)
class RestLenghtTests(BaseEffectsTests):
def test_effect(self):
effect = effects.RestLenght(3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.REST_LENGTH, 12), 36)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.REST_LENGTH,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.RestLenght)
with self.check_changed(lambda: self.hero.rest_length):
self.apply_ability(ability)
class IDLELenghtTests(BaseEffectsTests):
def test_effect(self):
effect = effects.IDLELenght(3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.IDLE_LENGTH, 12), 36)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.IDLE_LENGTH,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.IDLELenght)
with self.check_changed(lambda: self.hero.idle_length):
self.apply_ability(ability)
class CompanionBlockProbabilityTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionBlockProbability(3)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_BLOCK_PROBABILITY, 12), 36)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_BLOCK_PROBABILITY, )), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionBlockProbability)
with self.check_changed(lambda: self.hero.companion_block_probability_multiplier):
self.apply_ability(ability)
class HucksterTests(BaseEffectsTests):
def test_effect(self):
effect = effects.Huckster(buy_bonus_left=3, buy_bonus_right=3,
sell_bonus_left=2, sell_bonus_right=2)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.BUY_PRICE, 12), 15)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.BUY_PRICE, MODIFIERS.SELL_PRICE)), 11), 11)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.SELL_PRICE, 130), 132)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.BUY_PRICE, MODIFIERS.SELL_PRICE)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.Huckster)
with self.check_changed(self.hero.buy_price):
with self.check_changed(self.hero.sell_price):
self.apply_ability(ability)
class EtherealMagnetTests(BaseEffectsTests):
def test_effect(self):
effect = effects.EtherealMagnet(0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.CHARACTER_QUEST_PRIORITY, 0), 0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.CHARACTER_QUEST_PRIORITY,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.EtherealMagnet)
with self.check_changed(lambda: self.hero.attribute_modifier(heroes_relations.MODIFIERS.CHARACTER_QUEST_PRIORITY)):
self.apply_ability(ability)
class CompanionTeleportTests(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionTeleport(0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_TELEPORTATOR, 0), 0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_TELEPORTATOR,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionTeleport)
with self.check_changed(lambda: self.hero.companion_teleport_probability):
self.apply_ability(ability)
class CompanionFly(BaseEffectsTests):
def test_effect(self):
effect = effects.CompanionFly(0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_FLYER, 0), 0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_FLYER,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.CompanionFly)
with self.check_changed(lambda: self.hero.companion_fly_probability):
self.apply_ability(ability)
class UnsociableTests(BaseEffectsTests):
def test_effect(self):
effect = effects.Unsociable(0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.COMPANION_LEAVE_IN_PLACE, 0), 0.1)
self.assertEqual(effect._modify_attribute({}, MODIFIERS.random(exclude=(MODIFIERS.COMPANION_LEAVE_IN_PLACE,)), 11), 11)
def test_in_game(self):
ability = self.get_ability(effects.Unsociable)
with self.check_changed(lambda: self.hero.companion_leave_in_place_probability):
self.apply_ability(ability)
| [
"[email protected]"
] | |
cefea000be2b8713b9d4ea548c735c4984caf7de | 3904a5773c5aa047692895dce1225be7d84f5cc7 | /ML_AI_TechWithTim/K-Means/K_Means.py | f33bc323b87c4aba7ff873f2b6d3cbe38641d449 | [] | no_license | snehilk1312/ML_1 | 063038586296c4f6f0ab92422a6c60dd007c4068 | 8e3b081b1037ab999ca78fa282ce7041730d082a | refs/heads/master | 2020-09-07T20:01:45.509060 | 2020-03-15T15:44:54 | 2020-03-15T15:44:54 | 220,898,676 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | # Importing modules
import numpy as np
import sklearn
from sklearn.preprocessing import scale
from sklearn.datasets import load_digits
from sklearn.cluster import KMeans
from sklearn import metrics
# Loading Data sets
digits = load_digits()
data = scale(digits.data)
y = digits.target
k = len(np.unique(y)) # or here k=10
samples, features = data.shape
def bench_k_means(estimator, name, data):
estimator.fit(data)
print('%-9s\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, estimator.inertia_,
metrics.homogeneity_score(y, estimator.labels_),
metrics.completeness_score(y, estimator.labels_),
metrics.v_measure_score(y, estimator.labels_),
metrics.adjusted_rand_score(y, estimator.labels_),
metrics.adjusted_mutual_info_score(y, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean')))
clf = KMeans(n_clusters=k, init="random", n_init=10)
bench_k_means(clf, "1", data)
| [
"[email protected]"
] | |
4b4f20ebd7680757a8764a77720b31af1cef4c8a | 17d4b72032e404ed45057c4fc5af04670b7c27b5 | /7.11-Number Pattern.py | 8f7ad45768d4c5b5237b66dcf1a3bcae9253a923 | [] | no_license | Mahendra710/Number_Pattern | 0e10e93dec6f0f28c6e0916f813bfe4340f647db | 2d2a3e0fb1b281092465700e965a87350227aafc | refs/heads/main | 2023-04-18T01:44:27.456717 | 2021-05-12T05:34:11 | 2021-05-12T05:34:11 | 366,267,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | num=input("Enter an odd length number:")
length=len(num)
for i in range(length):
for j in range(length):
if i==j or i+j==length-1:
print(num[i],end=" ")
else:
print(" ",end=" ")
print() | [
"[email protected]"
] | |
b4577f6dc2ca7a3c75449f92e21cad3aa1b6b5fe | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2814/60652/240209.py | 19b8d713af73e09dfece90f18c9ba12646de0b4a | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | n = int(input())
l=list(map(int,input().split()))
l.sort()
num_s=0
wait_time=0
for i in l:
if i>=wait_time:
num_s+=1
wait_time+=i
print(num_s) | [
"[email protected]"
] | |
ed6e780555fec531ecbb2776df56d400039000ec | bfeb52b7c0faa33cff389cd6c197266aeeb5002a | /lab1/gauss.py | 835866b807dda95a9b53e3ec1b4a549359588042 | [] | no_license | NaylyaZh99/numeric_methods | e7e737f56ca865d5ddd6debdc0eee5ed29c91ecd | cef2f4f4a9a0a13b8a90ce62f23f64c99e0fd396 | refs/heads/master | 2022-01-28T05:12:47.791370 | 2019-06-01T03:39:12 | 2019-06-01T03:39:12 | 189,677,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | import numpy as np
import time
import matplotlib.pyplot as plt
def gauss(n, A, f):
res = np.zeros(n)
for k in range(n):
for j in range(k + 1, n):
A[k][j] = A[k][j] / A[k][k]
f[k] = f[k] / A[k][k]
A[k][k] = 1
for i in range(k + 1, n):
for j in range(k + 1, n):
A[i][j] = A[i][j] - A[k][j] * A[i][k];
f[i] = f[i] - f[k] * A[i][k]
A[i][k] = 0
for i in range(n - 1, -1, -1):
res[i] = f[i]
for j in range(i + 1, n):
res[i] = res[i] - A[i][j] * res[j]
return res
X = np.array(0)
Y = np.array(0)
Y_lib = np.array(0)
n = int(input())
shift = int(input())
wastedTime = 0
while wastedTime <= 1:
X = np.append(X, n)
A = np.random.rand(n,n)
for i in range(n):
Sum = 0
for j in range(n):
if j != i:
Sum += abs(A[i][j])
A[i][i] += Sum
A_lib = np.array(A)
f = np.random.rand(n)
f_lib = np.array(f)
start = time.time()
x = gauss(n, A, f)
wastedTime = time.time() - start
print(wastedTime)
Y = np.append(Y, wastedTime)
start = time.time()
x_lib = np.linalg.solve(A_lib, f_lib)
wastedTime_lib = time.time() - start
Y_lib = np.append(Y_lib, wastedTime_lib)
n = n + shift
print(X)
print(Y)
print(Y_lib)
plt.plot(X, Y)
plt.plot(X, Y_lib)
plt.xlabel('matrix size')
plt.ylabel('sec')
plt.legend(("my realization", "integrated fuction"))
plt.show()
| [
"[email protected]"
] | |
d1e360b771fc125f22caf4d80ca157b3557aa97c | 96d7b268ed589c3cf2caa47b5478f6c24f883e34 | /爬取不得解视频.py | 6bff5b6bf076a93dbe23f3c99b2ff5b4c21e63dd | [] | no_license | 494589939/learn-python3- | 2c5836ae2af415a606f2a0398b175c8e7fa6686c | b13b600ab334f7fa04183b777a0579388aab9a02 | refs/heads/master | 2021-01-20T15:27:07.728339 | 2018-03-18T08:46:19 | 2018-03-18T08:46:19 | 82,815,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | #! /usr/bin/env python
from bs4 import BeautifulSoup
import requests,urllib
#解析URL
def req(url):
try:
headers={"User-Agent":"Mozilla/5.0"}
html=requests.get(url,headers=headers)
html.raise_for_status()
html.encoding=html.apparent_encoding
soup=BeautifulSoup(html.text,"html.parser")
return soup
except:
print("输入错误")
#获取视频URL地址并下载
def filt(soup):
for names in soup.select(".j-video-c"):#筛选名字
for video in names.select(".j-video"):#筛选URL
name=names.get("data-title")[:-3]
mp=video.get("data-mp4")
urllib.request.urlretrieve(mp,r'D:\python项目\video\%s.mp4'%name)
print("正在下载:"+name+mp)
if __name__=="__main__":
page=input("请输入要结束的页数:")
i=1
while i <=int(page):
url="http://www.budejie.com/video/%s"%i
filt(req(url))
i+=1
| [
"[email protected]"
] | |
e5e84e25c5a9c701c6e2880d5b7603b6945e33a5 | b685f62e0f41349b39fc573f10338fa6148e3dd7 | /wucaicheng_Sign v2.py | e5bc18c47b8bf3601f209f9c038b8f1b27296bb0 | [] | no_license | ifr4me/MyPython | 61e46fa1c9e5886b589ab104eaf50690623ff2a2 | 6e4b64d3c3dad30bbcb5e76a48b704f10d73b838 | refs/heads/master | 2021-09-12T13:27:30.526976 | 2018-04-17T08:47:45 | 2018-04-17T08:47:45 | 103,880,499 | 0 | 0 | null | 2017-09-18T02:16:11 | 2017-09-18T02:16:11 | null | UTF-8 | Python | false | false | 2,069 | py | # coding=utf-8
#cron 58 6-10/1 * * * python /root/wucaicheng.py
__author__="iframe"
__Date__="20171018"
import requests,json,time
import logging
LOG_FILE = '/var/log/wucaicheng.log'
#LOG_FILE = 'd:\\wucaicheng.log'
#If find date in LOG_FILE, exit. so you need set cron execute twice everyday, when the server is not available.
date = time.strftime("%Y-%m-%d", time.localtime())
print date
log = open(LOG_FILE, "a+")
try:
all_log = log.read()
result = all_log.find(date)
print result
if result > 0 :
exit()
finally:
log.close()
# prepare login and sign
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'X - Requested - With':'XMLHttpRequest',
'Origin':'http://bj.wucaicheng.com.cn'}
wucaichengUrl = "http://bj.wucaicheng.com.cn/html/member/api/1/login"
signUrl = 'http://bj.wucaicheng.com.cn/html/member/api/1/sign'
postData = {'type':'2','phone':'18611111111','phonecode':'','password':'yourpassword'}
log_level = logging.DEBUG
logger = logging.getLogger("loggingmodule.NomalLogger")
handler = logging.FileHandler(LOG_FILE)
formatter = logging.Formatter("[%(asctime)s]%(message)s")
#formatter = logging.Formatter("[%(levelname)s][%(funcName)s][%(asctime)s]%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(log_level)
#logger.info("this is a info msg!")
# request = urllib2.Request(wucaichengurl ,data=postData ,headers = headers)
# response = urllib2.urlopen(url = wucaichengurl, data=urllib.urlencode(postData))
# print response .read()
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
s = requests.session()
login = s.post(wucaichengUrl, data=postData, headers=headers)
print login.content
response = s.post(signUrl, cookies=login.cookies, headers = headers)
print response.content
decode = json.loads(response.content)
print decode['meta']['msg']
msg = '%s\n%s\n%s\n' % (login.content , response.content , decode['meta']['msg'])
#print msg
logger.info(msg=msg)
s.close()
| [
"[email protected]"
] | |
bf604c5c88de4b1652ed6c32594f61c0e84a082f | b6a59c78b4143441077f9ce81c9a6951687f9103 | /quiz/common/templatetags/common_tags.py | c22c495f3760396d2cbf01c3943b9cb2026abee6 | [] | no_license | EkaterinaEIvanova/quiz | 7389bd26eb891ba5a7033b91698321cbba7d2d7d | 6f93a5d6e604f127be0d29e8eebbb07c10eb9d47 | refs/heads/master | 2023-03-22T00:54:27.100204 | 2021-03-10T07:35:08 | 2021-03-10T07:35:08 | 346,270,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from django import template
register = template.Library()
@register.simple_tag()
def get_name_or_email(user):
name = user.name if user.name else user.email
return name
| [
"[email protected]"
] | |
8c4bc2f8647439b9567f65c61e77308b8808c395 | 63b0fed007d152fe5e96640b844081c07ca20a11 | /ABC/ABC200~ABC299/ABC275/a.py | 8e0374be934c1e0a79209fb5d3a1594e525966c5 | [] | no_license | Nikkuniku/AtcoderProgramming | 8ff54541c8e65d0c93ce42f3a98aec061adf2f05 | fbaf7b40084c52e35c803b6b03346f2a06fb5367 | refs/heads/master | 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | n = int(input())
h = list(map(int, input().split()))
maxh = max(h)
ans = h.index(maxh)+1
print(ans)
| [
"[email protected]"
] | |
df35d4e2bc4e83da4aa1b6939db8d9e229e0bd70 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/type_agency_profile_level_4.py | 5ee1ce876f63b649381647bc034c48d77dea4ecb | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 318 | py | from __future__ import annotations
from enum import Enum
__NAMESPACE__ = "http://www.travelport.com/schema/common_v37_0"
class TypeAgencyProfileLevel4(Enum):
"""
Profile levels in the Agency Hierarchy.
"""
AGENCY = "Agency"
BRANCH = "Branch"
BRANCH_GROUP = "BranchGroup"
AGENT = "Agent"
| [
"[email protected]"
] | |
6ffac5ea208ba2d6e273b1fdd1775d31f9762364 | 9eab77cb998e94ceb2b2d08738b05a98982505f1 | /sentiment-analysis/pythoncodes/01-text-to-id.py | 16b8e56535efcf07addf12250c40f7bd8382a0a7 | [] | no_license | behrouzmadahian/python | 1584dd13cde8531e69bb6fab76f148dc3fc0da57 | 5d4dbde8d570623fe785e78a3e45cd05ea80aa08 | refs/heads/master | 2021-06-28T16:53:09.927450 | 2020-09-21T14:02:55 | 2020-09-21T14:02:55 | 156,713,696 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | import re
from nltk.corpus import stopwords
from nltk import word_tokenize
stop_words = stopwords.words('english')
def remove_stop_words(word_list, stopwords):
filtered_list = [w for w in word_list if not w in stopwords]
return filtered_list
# Removes punctuation, parentheses, question marks, etc., and leaves only alphanumeric characters
def clean_sentences(string):
strip_special_chars = re.compile("[^a-zA-Z0-9_]+")
string = string.lower().replace("<br />", " ")
return re.sub(strip_special_chars, "", string.lower())
def text_to_ids(text, vocab_list):
text_cleaned = clean_sentences(text)
word_list = word_tokenize(text_cleaned)
word_list = remove_stop_words(word_list, stop_words)
word_inds = [vocab_list.index(w) for w in word_list]
return word_inds
| [
"[email protected]"
] | |
f0e0f44f0622cc09917ec205144aca3599559424 | 302734e285f1c77a1985f72da18d92810dda6151 | /features/environment.py | f2db32270068fb2417c84bc95d4ec33200c490c4 | [
"MIT"
] | permissive | zhammer/morning-cd-playlists | 924dd297188d86d019d7f176a9cf49ff33f31237 | 10426bc1435d4f34761d8782ad0599f40865596f | refs/heads/master | 2022-12-11T02:29:12.972491 | 2019-02-07T00:35:18 | 2019-02-07T00:35:18 | 162,019,835 | 0 | 0 | MIT | 2022-12-10T03:33:27 | 2018-12-16T16:28:45 | Python | UTF-8 | Python | false | false | 605 | py | import os
import behave
from features.support.fixtures import (
with_aws_lambda_environment_variables,
with_empty_db
)
TEST_DATABASE_CONNECTION_STRING = os.environ.get(
'TEST_DATABASE_CONNECTION_STRING',
'sqlite:///morning_cd_behave_tests.db'
)
def before_scenario(context: behave.runner.Context, scenario: behave.model.Scenario) -> None:
behave.use_fixture(
with_aws_lambda_environment_variables,
context,
TEST_DATABASE_CONNECTION_STRING
)
behave.use_fixture(
with_empty_db,
context,
TEST_DATABASE_CONNECTION_STRING
)
| [
"[email protected]"
] | |
e01c6ae624a61f1dbf75ab56e391078604fe1b12 | ad77e334bba34e1aa17debc66e90dd1e78d32af9 | /Clg/HW/MultipleVal.py | d5a926c4f74d3cd395651ada2f07e630a776776e | [] | no_license | SnehuD/Python-Programs | 7c08bfa82789dd9139b2687ec3d6cf3d13a33fc5 | ecf6c92ce2673885b4345e0ae70cfe3d01677911 | refs/heads/main | 2023-08-23T06:33:46.535386 | 2021-10-03T17:02:51 | 2021-10-03T17:02:51 | 413,140,529 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | # Assigning multiple values to multiple variables
a, b, c = 5, 3.2, "Hello"
print(a)
print(b)
print(c)
| [
"[email protected]"
] | |
9d1b5f66a246d990332925b934287b88f6a5b75a | 9d3f8a896b97ee19a32a57b25c7ebcb17ac76730 | /Text_cleaner.py | 7ce7e240568289cdd0139c635b6971de3b1f5281 | [] | no_license | iam-Raghav/Translation_using_attention | c0b9210ffa1f6704c6213f68995a2c4a53aec1bc | 1577e8f7d3f4f2b76b9a3b2c081391559a4b2272 | refs/heads/master | 2020-11-27T12:22:54.723514 | 2019-12-21T14:43:35 | 2019-12-21T14:43:35 | 229,439,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | from __future__ import print_function
import numpy as np
import pandas as pd
import re
import unicodedata
#word_tokenize accepts a string as an input, not a file.
rawtext_filename = "E:\_Translation\data\eng-fra.txt" #KEY IN PATH OF SOURCE FILE
cleantext_filename = "E:\_Translation\data\eng-fra_clean.txt" #KEY IN PATH OF THE DESTINATION AND CLEAN TEXT FILE
max_length = 8
#File Loading
###################################
df = pd.read_csv(rawtext_filename,header=None,encoding = "utf-8", sep='\t')
###################################
#Converts text to ascii and remove unwanted special characters.
###################################
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
#Removing punctuations from the text
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
df1=pd.DataFrame()
for i in range(len(df.iloc[:,1])):
if len(df.iloc[i,0].split()) < max_length:
df.iloc[i, 0] = normalizeString(df.iloc[i, 0])
df.iloc[i, 1] = normalizeString(df.iloc[i, 1])
df1 = df1.append(df.loc[i], ignore_index= False)
df1.to_csv(cleantext_filename,sep='\t',header=False,index = False)
print("DONE...")
| [
"[email protected]"
] | |
6bc9cfedf2ac61a6012b493b3f2430a79c3a77a4 | b4fd7421a2796672276219f32085371f732b76c5 | /untitled3.py | dd0b589b66e3d72061004840da1a941b6bccd669 | [] | no_license | sanjanapatill/CDR-Analysis-with-Insights | 16e4d1ac29f44f3ee9897a928b790f3a5c904db6 | 16705dd3dea99d5962d2d04e5a65e2986fbf1999 | refs/heads/main | 2023-01-01T19:17:06.802197 | 2020-10-25T15:43:55 | 2020-10-25T15:43:55 | 307,131,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,414 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 25 19:34:38 2020
@author: Sanjana
"""
# https://pastebin.com/QbYwcrgh
"""
This is the initial stage of the CDR Analysis project.
We are going to learn about loading the CDR data and transform it for our
Data Analysis and visualization purpose for Business perspective.
"""
"""
column no - actual name
1 serviceProvider
4 group Information
5 direction
9 startTime
13 EndTime
14 Miss Call Information
31 Group ID
120 userId
180 twoStageDialingDigits
146 relatedCallId
147 relatedCallIdReason
267 vpDialingfacResult
312 locationType
345 userAgent
date
starttime
endtime
duration
hourly_range
weekly_range
"""
# Loading all your libraries
import pandas as pd
# Declaring your Global variables
# main function to load the data and transform it for further use
def main():
dataset_name = "cdr_data.csv"
# Required columns
call_columns = ["4", "5","14", "31", "120", "147", "267", "312", "345", \
"date","starttime", "endtime","duration", "hourly_range","weekly_range"]
# We have used low_memory = False as some columns contains mixed datatype
# header = None is used as dataset doesn't contain column name
call_dataset = pd.read_csv(dataset_name, usecols = call_columns,low_memory = False)
# coulmns for service data
service_columns = ["31", "120", "147", "345","date", "starttime", "endtime","duration"]
service_dataset = call_dataset[service_columns]
# columns for device data
device_columns = ["5", "31", "120", "312", "345", "date","starttime", "endtime","duration"]
device_dataset = call_dataset[device_columns]
# Output
# Renaming columns name according to the Client
call_dataset = call_dataset.rename(columns = {"4":"Group", "5":"Call_Direction","14":"Missed Calls",
"31":"GroupID", "120":"UserID", "147":"Features", "267":" vpDialingfacResult",
"312":"UsageDeviceType",
"345":"UserDeviceType"})
service_dataset = service_dataset.rename(columns={"120":"UserID",
"31":"GroupID", "147":"FeatureName",
"345":"UserDeviceType","date":"FeatureEventDate"
})
device_dataset = device_dataset.rename(columns={"5": "DeviceEventTypeDirection",
"120":"UserID", "31":"GroupID",
"345":"UserDeviceType","date":"DeviceEventDate",
"312":"UsageDeviceType"})
call_dataset.to_csv("Call_data.csv", index=None)
service_dataset.to_csv("Service_data.csv", index=None)
device_dataset.to_csv("Device_data.csv", index=None)
if (__name__ == '__main__'):
main()
| [
"[email protected]"
] | |
0d7dd8e40973b221f489ff37dce90f802a587cf1 | 385d46cd1fc7df47814f68f450b8949df466131e | /easypolls.net.py | d26e7b8400005e076a4f88cfaa00ca4a75e277d2 | [] | no_license | ashaheedq/Voting-bot | e5c049b9e771a0ea5241dd41719fcb8016d6aefe | 98ab208f1b7799108f82cf2ff341f6d26147807e | refs/heads/master | 2022-12-12T07:44:17.698515 | 2020-09-10T08:11:18 | 2020-09-10T08:11:18 | 294,345,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | #!/usr/bin/python
url = 'http://www.easypolls.net/poll?jsoncallback=jQuery1102043759123584718296_1436987933237&command=getPoll&pollId=5ecad213e4b017b74559a5ce&isMobile=false&_=1436987933238'
import mechanize, http.cookiejar as cookielib, json, time
from random import randint
br = mechanize.Browser()
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [ ('Host','www.easypolls.net'), ('User-Agent','Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:38.0) Gecko/20100101 Firefox/38.0'), ('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), ('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), ('Accept-Language','en-US,en;q=0.5'), ('Accept-Encoding','deflate') ]
count = 1
while True:
cj.clear()
res = br.open(url)
page = res.read()
search = 'pollKey":"'
pos = page.find(search) + len(search)
pkey = page[pos:pos+7]
submit = 'http://www.easypolls.net/poll?jsoncallback=jQuery1102048485376518992906_143698468448%s&multResp2=2&pollId=5ecad213e4b017b74559a5ce&pollKey=%s&command=saveResponse&_=1590362878722' % (str(randint(1,9)),pkey)
res = br.open(submit)
page = res.read()
page = page[ page.find('{') : page.rfind('}')+1 ]
data = json.loads(page)
if data['resultCode'] == '0':
print ('Successful:', count)
count += 1
else:
print ('Unsuccessful')
#time.sleep(2) | [
"[email protected]"
] | |
ffd932dbd780505eb4bef606f414e3d7a4c848cc | fa93e53a9eee6cb476b8998d62067fce2fbcea13 | /build/position_controllers/catkin_generated/pkg.installspace.context.pc.py | 23b00e2290c58c2e5784fc5a4572705354fb4fd1 | [] | no_license | oyetripathi/ROS_conclusion_project | 2947ee2f575ddf05480dabc69cf8af3c2df53f73 | 01e71350437d57d8112b6cec298f89fc8291fb5f | refs/heads/master | 2023-06-30T00:38:29.711137 | 2021-08-05T09:17:54 | 2021-08-05T09:17:54 | 392,716,311 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "controller_interface;forward_command_controller".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lposition_controllers".split(';') if "-lposition_controllers" != "" else []
PROJECT_NAME = "position_controllers"
PROJECT_SPACE_DIR = "/home/sandeepan/tiago_public_ws/install"
PROJECT_VERSION = "0.4.2"
| [
"[email protected]"
] | |
f9b92cfbc1d8a521f0e1b94fab75fc5c6d9ce986 | 5f7a2753b88be27c995863067cdae62fef5f2d6a | /soft-attention/core/orisolver.py | 610f31b14bd0c22a981baa5b68b13839bec28de6 | [] | no_license | 16GMCN/COMS4995-Deep-Learning | 118164f91760c17dbd38487268a4bca337be33dd | 4332ff68a23d54f2e233c60039d0852a0e001ec7 | refs/heads/master | 2020-04-05T16:58:31.603167 | 2018-11-20T18:09:56 | 2018-11-20T18:09:56 | 157,037,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,563 | py | import tensorflow as tf
import matplotlib.pyplot as plt
import skimage.transform
import numpy as np
import time
import os
import pickle
from scipy import ndimage
from core.utils import *
from core.bleu import evaluate
class CaptioningSolver(object):
def __init__(self, model, data, val_data, **kwargs):
"""
Required Arguments:
- model: Show Attend and Tell caption generating model
- data: Training data; dictionary with the following keys:
- features: Feature vectors of shape (82783, 196, 512)
- file_names: Image file names of shape (82783, )
- captions: Captions of shape (400000, 17)
- image_idxs: Indices for mapping caption to image of shape (400000, )
- word_to_idx: Mapping dictionary from word to index
- val_data: validation data; for print out BLEU scores for each epoch.
Optional Arguments:
- n_epochs: The number of epochs to run for training.
- batch_size: Mini batch size.
- update_rule: A string giving the name of an update rule
- learning_rate: Learning rate; default value is 0.01.
- print_every: Integer; training losses will be printed every print_every iterations.
- save_every: Integer; model variables will be saved every save_every epoch.
- pretrained_model: String; pretrained model path
- model_path: String; model path for saving
- test_model: String; model path for test
"""
self.model = model
self.data = data
self.val_data = val_data
self.n_epochs = kwargs.pop('n_epochs', 10)
self.batch_size = kwargs.pop('batch_size', 4)
self.update_rule = kwargs.pop('update_rule', 'adam')
self.learning_rate = kwargs.pop('learning_rate', 0.01)
self.print_bleu = kwargs.pop('print_bleu', False)
self.print_every = kwargs.pop('print_every', 100)
self.save_every = kwargs.pop('save_every', 1)
self.log_path = kwargs.pop('log_path', './log/')
self.model_path = kwargs.pop('model_path', './model/')
self.pretrained_model = kwargs.pop('pretrained_model', None)
self.test_model = kwargs.pop('test_model', './model/lstm/model-1')
# set an optimizer by update rule
if self.update_rule == 'adam':
self.optimizer = tf.train.AdamOptimizer
elif self.update_rule == 'momentum':
self.optimizer = tf.train.MomentumOptimizer
elif self.update_rule == 'rmsprop':
self.optimizer = tf.train.RMSPropOptimizer
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
def train(self):
# train/val dataset
n_examples = self.data['captions'].shape[0]
#n_examples = 5000
n_iters_per_epoch = int(np.ceil(float(n_examples)/self.batch_size))
features = self.data['features']
captions = self.data['captions']
image_idxs = self.data['image_idxs']
val_features = self.val_data['features']
n_iters_val = int(np.ceil(float(val_features.shape[0])/self.batch_size))
# build graphs for training model and sampling captions
loss = self.model.build_model()
with tf.variable_scope(tf.get_variable_scope()) as scope:
with tf.name_scope('optimizer'):
tf.get_variable_scope().reuse_variables()
_, _, generated_captions = self.model.build_sampler(max_len=20)
optimizer = self.optimizer(learning_rate=self.learning_rate)
grads = tf.gradients(loss, tf.trainable_variables())
grads_and_vars = list(zip(grads, tf.trainable_variables()))
train_op = optimizer.apply_gradients(grads_and_vars=grads_and_vars)
# summary op
tf.summary.scalar('batch_loss', loss)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
for grad, var in grads_and_vars:
tf.summary.histogram(var.op.name+'/gradient', grad)
summary_op = tf.summary.merge_all()
print "The number of epoch: %d" %self.n_epochs
print "Data size: %d" %n_examples
print "Batch size: %d" %self.batch_size
print "Iterations per epoch: %d" %n_iters_per_epoch
config = tf.ConfigProto(allow_soft_placement = True)
#config.gpu_options.per_process_gpu_memory_fraction=0.9
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
tf.initialize_all_variables().run()
summary_writer = tf.summary.FileWriter(self.log_path, graph=tf.get_default_graph())
saver = tf.train.Saver(max_to_keep=20)
if self.pretrained_model is not None:
print "Start training with pretrained Model."
saver.restore(sess, self.pretrained_model)
prev_loss = -1
curr_loss = 0
start_t = time.time()
for e in range(self.n_epochs):
rand_idxs = np.random.permutation(n_examples)
captions = captions[rand_idxs]
image_idxs = image_idxs[rand_idxs]
for i in range(n_iters_per_epoch):
captions_batch = captions[i*self.batch_size:(i+1)*self.batch_size]
image_idxs_batch = image_idxs[i*self.batch_size:(i+1)*self.batch_size]
print image_idxs_batch
features_batch = features[image_idxs_batch]
feed_dict = {self.model.features: features_batch, self.model.captions: captions_batch}
_, l = sess.run([train_op, loss], feed_dict)
curr_loss += l
# write summary for tensorboard visualization
if i % 10 == 0:
summary = sess.run(summary_op, feed_dict)
summary_writer.add_summary(summary, e*n_iters_per_epoch + i)
if (i+1) % self.print_every == 0:
print ("\nTrain loss at epoch %d & iteration %d (mini-batch): %.5f" %(e+1, i+1, l))
ground_truths = captions[image_idxs == image_idxs_batch[0]]
decoded = decode_captions(ground_truths, self.model.idx_to_word)
for j, gt in enumerate(decoded):
print ("Ground truth %d: %s" %(j+1, gt))
gen_caps = sess.run(generated_captions, feed_dict)
decoded = decode_captions(gen_caps, self.model.idx_to_word)
print ("Generated caption: %s\n" %decoded[0])
print ("Previous epoch loss: ", prev_loss)
print ("Current epoch loss: ", curr_loss)
print ("Elapsed time: ", time.time() - start_t)
prev_loss = curr_loss
curr_loss = 0
# print out BLEU scores and file write
if self.print_bleu:
all_gen_cap = np.ndarray((val_features.shape[0], 20))
for i in range(n_iters_val):
features_batch = val_features[i*self.batch_size:(i+1)*self.batch_size]
feed_dict = {self.model.features: features_batch}
gen_cap = sess.run(generated_captions, feed_dict=feed_dict)
all_gen_cap[i*self.batch_size:(i+1)*self.batch_size] = gen_cap
all_decoded = decode_captions(all_gen_cap, self.model.idx_to_word)
save_pickle(all_decoded, "./data/val/val.candidate.captions.pkl")
scores = evaluate(data_path='./data', split='val', get_scores=True)
write_bleu(scores=scores, path=self.model_path, epoch=e)
# save model's parameters
if (e+1) % self.save_every == 0:
saver.save(sess, os.path.join(self.model_path, 'model'), global_step=e+1)
print "model-%s saved." %(e+1)
def test(self, data, split='train', attention_visualization=True, save_sampled_captions=True):
'''
Args:
- data: dictionary with the following keys:
- features: Feature vectors of shape (5000, 196, 512)
- file_names: Image file names of shape (5000, )
- captions: Captions of shape (24210, 17)
- image_idxs: Indices for mapping caption to image of shape (24210, )
- features_to_captions: Mapping feature to captions (5000, 4~5)
- split: 'train', 'val' or 'test'
- attention_visualization: If True, visualize attention weights with images for each sampled word. (ipthon notebook)
- save_sampled_captions: If True, save sampled captions to pkl file for computing BLEU scores.
'''
features = data['features']
# build a graph to sample captions
alphas, betas, sampled_captions = self.model.build_sampler(max_len=20) # (N, max_len, L), (N, max_len)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
saver = tf.train.Saver()
saver.restore(sess, self.test_model)
features_batch, image_files = sample_coco_minibatch(data, self.batch_size)
feed_dict = { self.model.features: features_batch }
alps, bts, sam_cap = sess.run([alphas, betas, sampled_captions], feed_dict) # (N, max_len, L), (N, max_len)
decoded = decode_captions(sam_cap, self.model.idx_to_word)
if attention_visualization:
for n in range(10):
print ("Sampled Caption: %s" %decoded[n])
# Plot original image
img = ndimage.imread(image_files[n])
plt.subplot(4, 5, 1)
plt.imshow(img)
plt.axis('off')
# Plot images with attention weights
words = decoded[n].split(" ")
for t in range(len(words)):
if t > 18:
break
plt.subplot(4, 5, t+2)
plt.text(0, 1, '%s(%.2f)'%(words[t], bts[n,t]) , color='black', backgroundcolor='white', fontsize=8)
plt.imshow(img)
alp_curr = alps[n,t,:].reshape(14,14)
alp_img = skimage.transform.pyramid_expand(alp_curr, upscale=16, sigma=20)
plt.imshow(alp_img, alpha=0.85)
plt.axis('off')
plt.show()
if save_sampled_captions:
all_sam_cap = np.ndarray((features.shape[0], 20))
num_iter = int(np.ceil(float(features.shape[0]) / self.batch_size))
for i in range(num_iter):
features_batch = features[i*self.batch_size:(i+1)*self.batch_size]
feed_dict = { self.model.features: features_batch }
all_sam_cap[i*self.batch_size:(i+1)*self.batch_size] = sess.run(sampled_captions, feed_dict)
all_decoded = decode_captions(all_sam_cap, self.model.idx_to_word)
save_pickle(all_decoded, "./data/%s/%s.candidate.captions.pkl" %(split,split))
| [
"[email protected]"
] | |
fa3ae7f5ccd6a41322f07b548a062a83ba1d35b3 | b6e02f46e2e8c8582fa53329d69577ac192b3519 | /scraper.py | 224e3ebaf68a5b148a9db6db492a3695b70522cb | [] | no_license | AxSmasher44/simple-amazon-listing-webscraper | 7afe56a52f2bba4e27bdf4294dad752a3bd99613 | f741b51e88a69cdfb6d2e44dd2772280baa0103e | refs/heads/master | 2022-12-17T18:12:56.504114 | 2020-09-29T12:41:34 | 2020-09-29T12:41:34 | 299,605,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | import requests
from bs4 import BeautifulSoup
from csv import DictWriter
"""WARNING:Don't send requests continuously!!!
Only works for amazon india"""
def amazon_scraper(item):
if " " in item:
url_item = item.split(" ")
url_string = "+".join(url_item)
URL = "https://www.amazon.in/s?k="+url_string+"&ref=nb_sb_noss_2"
else:
URL = "https://www.amazon.in/s?k="+item+"&ref=nb_sb_noss_2"
all_items = []
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
products = soup.find_all("div", class_="sg-col-inner")
for product in products:
all_items.append({
"Name":product.find("span", class_="a-size-medium a-color-base a-text-normal").get_text(),
"Price":product.find("span", class_="a-offscreen").get_text()
})
return all_items
print(amazon_scraper("apple watch"))
def product_listings_to_csv(product):
item_dict = amazon_scraper(product)
with open(product+".csv", "w", newline="", encoding="UTF-8") as file:
headers = ["Name", "Price"]
csv_writer = DictWriter(file, fieldnames=headers)
csv_writer.writeheader()
for item in item_dict:
csv_writer.writerow(item)
| [
"[email protected]"
] | |
f17ce69e556c7992b8b72734be67d9a8c66a6a95 | beab1ca3413c3397a4c5a3152b04a58585c75147 | /algos/sorting/bubble_sort.py | 0a542abbef1b7970fdc52fe0983e682342792879 | [] | no_license | john-a-m/snippets | 60c3373e2ae9f4e2ea17884aac665e070e6783f8 | 90c6160220909a30a3503a4243d51d833330c49b | refs/heads/master | 2021-01-21T04:25:28.507958 | 2019-08-05T16:00:15 | 2019-08-05T16:00:15 | 30,418,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | def bubble_sort(nums):
for passnum in range(len(nums) - 1, 0, -1):
for i in range(passnum):
if nums[i] > nums[i + 1]:
nums[i], nums[i + 1] = nums[i + 1], nums[i]
if __name__ == "__main__":
nums = [54,26,93,17,77,31,44,55,20]
bubble_sort(nums)
print nums
| [
"[email protected]"
] | |
3d6198b0abdc87164e432fd09c0390ecba72de19 | de1abd0ebbb817aa5f23d369e7dda360fd6f1c32 | /chapter8/7-NltkAnalysis.py | 486c02f2c7559694ee722504c06720e50861ed6a | [] | no_license | CodedQuen/Web-Scraping-with-Python- | 33aaa2e3733aa1f2b8c7a533d74f5d08ac868197 | 67f2d5f57726d5a943f5f044480e68c36076965b | refs/heads/master | 2022-06-13T01:34:39.764531 | 2020-05-05T11:07:01 | 2020-05-05T11:07:01 | 261,435,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | from nltk import word_tokenize, sent_tokenize, pos_tag
sentences = sent_tokenize("Google is one of the best companies in the world. I constantly google myself to see what I'm up to.")
nouns = ['NN', 'NNS', 'NNP', 'NNPS']
for sentence in sentences:
if "google" in sentence.lower():
taggedWords = pos_tag(word_tokenize(sentence))
for word in taggedWords:
if word[0].lower() == "google" and word[1] in nouns:
print(sentence)
| [
"[email protected]"
] | |
32b0d0a1e7c59df238be50af8ed751a950d96502 | 7ca55428503fc915fcffb8059d30654b625a6b26 | /54-merge_sort.py | 97a612c87752b25458be8094ab24483513f62345 | [] | no_license | Akshata2704/APS-2020 | 453e9eafb511e3e5fc73d939180c3402eb93134e | 8f095ae1af9653499f1dedcdfe12b60b1ad1f65c | refs/heads/master | 2020-12-21T03:10:52.043400 | 2020-05-15T18:51:48 | 2020-05-15T18:51:48 | 236,286,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 22:35:40 2020
@author: AKSHATA
"""
def merge(arr, l, m, r):
n1 = m - l + 1
n2 = r- m
L = [0] * (n1)
R = [0] * (n2)
for i in range(0 , n1):
L[i] = arr[l + i]
for j in range(0 , n2):
R[j] = arr[m + 1 + j]
i = 0
j = 0
k = l
while i < n1 and j < n2 :
if L[i] <= R[j]:
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
k += 1
while i < n1:
arr[k] = L[i]
i += 1
k += 1
while j < n2:
arr[k] = R[j]
j += 1
k += 1
def mergeSort(arr,l,r):
if l < r:
m = (l+(r-1))//2
mergeSort(arr, l, m)
mergeSort(arr, m+1, r)
merge(arr, l, m, r)
arr = [12, 11, 13, 5, 6, 7]
n = len(arr)
print ("Given array is")
for i in range(n):
print ("%d" %arr[i]),
mergeSort(arr,0,n-1)
print ("\n\nSorted array is")
for i in range(n):
print ("%d" %arr[i]),
,
| [
"[email protected]"
] | |
48821661f53b8b2fd7a14393c175437f8f7231b3 | cc856a6efb22c82eaa6bc9bcadb36ab519c2b3eb | /test.py | 7de592f88c9b4629879bdd2da2c3554672ca5170 | [] | no_license | ajaybati/Past-Projects | 59b861f76ca5cd5b1a7b85462b92666660263d57 | 204ada5e7f91cea4d8108c5f66f159b384d7dbdd | refs/heads/master | 2020-12-21T10:05:49.450890 | 2020-01-31T07:43:02 | 2020-01-31T07:43:02 | 236,396,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | list=['1', '(8', 'ounce)', 'container', 'plain', 'yogurt', '1⁄3', 'cup', 'chopped', 'seeded', 'cucumber', '(thinly', 'slice', 'remainder', 'of', 'cucumber)', '2', 'tablespoons', 'finely', 'chopped', 'onions', '1', 'garlic', 'clove,', 'minced', '1', 'teaspoon', 'sugarFilling', '1', 'lb', 'lean', 'ground', 'beef', '(I', 'use', 'ground', 'turkey)', '1', '1⁄2', 'teaspoons', 'dried', 'oregano', '1', 'teaspoon', 'garlic', 'powder', '1', 'teaspoon', 'onion', 'powder', '1', 'teaspoon', 'salt', '(optional)', '3⁄4', 'teaspoon', 'pepper', '4', 'pita', 'breads', '3', 'cups', 'shredded', 'lettuce', '1', 'large', 'tomatoes,', 'chopped', '1', 'small', 'onion,', 'chopped']
start=0
starter=1
print("ok")
list[1:6]=(' '.join(list[1:6])
| [
"[email protected]"
] | |
48351d6d1b511a8717bd34a114b6e54683357290 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/acllog/flowcounteraghist1d.py | 2e2a886e4137ca0fffa75a3d90db0646a85fbed6 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,371 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class FlowCounterAgHist1d(Mo):
"""
A class that represents historical aggregated statistics for Flow Record Counter in a 1 day sampling interval. This class updates every hour.
"""
meta = StatsClassMeta("cobra.model.acllog.FlowCounterAgHist1d", "Flow Record Counter")
counter = CounterMeta("hitscount", CounterCategory.COUNTER, "hits", "Hits Counter")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "hitscountCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "hitscountPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "hitscountSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "hitscountThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "hitscountTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "hitscountRate"
meta._counters.append(counter)
meta.moClassName = "acllogFlowCounterAgHist1d"
meta.rnFormat = "HDacllogFlowCounterAg1d-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical aggregated Flow Record Counter stats in 1 day"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.acllog.PermitL3Pkt")
meta.parentClasses.add("cobra.model.acllog.DropL2Pkt")
meta.parentClasses.add("cobra.model.acllog.DropL2Flow")
meta.parentClasses.add("cobra.model.acllog.DropL3Pkt")
meta.parentClasses.add("cobra.model.acllog.DropL3Flow")
meta.parentClasses.add("cobra.model.acllog.PermitL2Flow")
meta.parentClasses.add("cobra.model.acllog.PermitL3Flow")
meta.parentClasses.add("cobra.model.acllog.PermitL2Pkt")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.acllog.FlowCounterAgHist")
meta.rnPrefixes = [
('HDacllogFlowCounterAg1d-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "hitscountCum", "hitscountCum", 25142, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Hits Counter cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("hitscountCum", prop)
prop = PropMeta("str", "hitscountPer", "hitscountPer", 25143, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Hits Counter periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("hitscountPer", prop)
prop = PropMeta("str", "hitscountRate", "hitscountRate", 25147, PropCategory.IMPLICIT_RATE)
prop.label = "Hits Counter rate"
prop.isOper = True
prop.isStats = True
meta.props.add("hitscountRate", prop)
prop = PropMeta("str", "hitscountSpct", "hitscountSpct", 25144, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Hits Counter suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("hitscountSpct", prop)
prop = PropMeta("str", "hitscountThr", "hitscountThr", 25145, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Hits Counter thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("hitscountThr", prop)
prop = PropMeta("str", "hitscountTr", "hitscountTr", 25146, PropCategory.IMPLICIT_TREND)
prop.label = "Hits Counter trend"
prop.isOper = True
prop.isStats = True
meta.props.add("hitscountTr", prop)
prop = PropMeta("str", "index", "index", 25066, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
405d7228f719d71c4b76b31841b43bc34e7a2c0d | 80d20d3edd874a011361800939f4ef6982673137 | /balance.py | 321ffc73e54d9c8b635da940facb59a3daaa9543 | [] | no_license | vincelwt/krakenoverview | 2969d1ef9f4bd068521ffefc7421c2c0e414f43a | b7043a3852da866f4097323209b35807ccff9801 | refs/heads/master | 2021-01-22T08:09:47.603675 | 2017-05-27T14:20:10 | 2017-05-27T14:20:10 | 92,603,134 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,175 | py | import krakenex, json, os, time
from termcolor import cprint, colored
from tabulate import tabulate
k = krakenex.API()
k.load_key('krakenvince.key')
liveValues = []
compound = []
# Updates liveValues array with current prices of all currencies
def updateLivevalues():
global liveValues
assets = k.query_public('Ticker', {'pair': 'ETHEUR,XBTEUR,LTCEUR,XMREUR,XRPEUR,DASHEUR,ETCEUR,ZECEUR,GNOEUR,REPEUR'})['result']
liveValues = []
for attr, value in assets.iteritems():
liveValues.append([attr.replace('ZEUR', '').replace('EUR', ''), float(value['a'][0]) ])
# Returns an array with the currency price a week ago & a day ago
def getOldvalues(pair):
pair = pair.replace('XBT', 'BTC').replace('ASH', 'DASH')
timeAgo = int(time.time()-3600*24*7)
spread = k.query_public('OHLC', {'pair': pair, 'interval': '1440', 'since': str(timeAgo)})
for attr, value in spread['result'].iteritems():
if not 'EUR' in attr: continue
weekAgo = value[0]
weekAgoAverage = ( float(weekAgo[1]) + float(weekAgo[2]) + float(weekAgo[3]) + float(weekAgo[4]) ) / 4
dayAgo = value[5]
dayAgoAverage = ( float(dayAgo[1]) + float(dayAgo[2]) + float(dayAgo[3]) + float(dayAgo[4]) ) / 4
return [weekAgoAverage, dayAgoAverage]
# Update total outcome of all made trades
def printTrades():
global compound
recentTrades = k.query_private('ClosedOrders', {})
compound = []
for attr, value in recentTrades['result']['closed'].iteritems():
type = value['descr']['type']
asset = value['descr']['pair'][:3]
source = value['descr']['pair'][-3:]
price = float( value['price'] )
vol = float( value['vol_exec'] )
cost = float( value['cost'] )
#print source, price
if type == 'sell':
vol = -vol
cost = -cost
touched = False
touched2 = False
for e in compound:
if e[0] == asset:
e[1] += vol
e[2] += cost
touched = True
if not touched:
compound.append([asset, vol, cost])
#print '%15s %13s %12s' % (colored(type+asset, 'grey', 'on_yellow'), str(vol), colored(cost, 'white', 'on_magenta'))
#print value
#print '--------Total ---------'
#for e in compound:
# print '%6s %13s %12s' % (colored(e[0], 'grey', 'on_yellow'), str(e[1]), colored(e[2], 'white', 'on_magenta'))
def printBalance():
print colored('Updating data...', 'green')
table = [['Balance', 'Quantity', 'Euro amount', 'Net results', 'Last 24h', 'Last week']]
currencies = k.query_private('Balance')['result']
balance = k.query_private('TradeBalance', {'asset': 'ZEUR'})['result']
totalChange = 0
totalWeekChange = 0
totalDayChange = 0
# For each currency in Kraken "wallet"
for attr, pair in currencies.iteritems():
value = 0
change = 0
weekChange = 0
dayChange = 0
pair = float(pair)
valueStr = ''
for values in liveValues:
if values[0] == attr:
value = pair*values[1]
valueStr = str( int(value) )+' EUR'
if (attr != 'ZEUR'): # No need to calc changes for EUR fiat
oldData = getOldvalues(attr[-3:]+'EUR')
weekChange = value-(pair*oldData[0])
dayChange = value-(pair*oldData[1])
for e in compound:
if e[0] in attr:
change = float("%.2f" % float(value-e[2]))
totalChange += change
totalDayChange += dayChange
totalWeekChange += weekChange
changeStr = ''
if change > 0:
changeStr = colored(str("%.2f" % change)+' EUR', 'white', 'on_cyan')
elif change < 0:
changeStr = colored(str("%.2f" % change)+' EUR', 'white', 'on_red')
weekChangeStr = ''
if weekChange > 0:
weekChangeStr = colored(str("%.2f" % weekChange)+' EUR', 'white', 'on_cyan')
elif weekChange < 0:
weekChangeStr = colored(str("%.2f" % weekChange)+' EUR', 'white', 'on_red')
dayChangeStr = ''
if dayChange > 0:
dayChangeStr = colored(str("%.2f" % dayChange)+' EUR', 'white', 'on_cyan')
elif dayChange < 0:
dayChangeStr = colored(str("%.2f" % dayChange)+' EUR', 'white', 'on_red')
toPrint = [colored(attr, 'grey', 'on_yellow'), str(pair), colored(valueStr, 'white', 'on_magenta'), changeStr, dayChangeStr, weekChangeStr]
table.append(toPrint)
totalChangeStr = ''
if totalChange > 0:
totalChangeStr = colored(str("%.2f" % totalChange)+' EUR', 'white', 'on_cyan')
elif totalChange < 0:
totalChangeStr = colored(str("%.2f" % totalChange)+' EUR', 'white', 'on_red')
totalDayChangeStr = ''
if totalDayChange > 0:
totalDayChangeStr = colored(str("%.2f" % totalDayChange)+' EUR', 'white', 'on_cyan')
elif totalDayChange < 0:
totalDayChangeStr = colored(str("%.2f" % totalDayChange)+' EUR', 'white', 'on_red')
totalWeekChangeStr = ''
if totalWeekChange > 0:
totalWeekChangeStr = colored(str("%.2f" % totalWeekChange)+' EUR', 'white', 'on_cyan')
elif totalWeekChange < 0:
totalWeekChangeStr = colored(str("%.2f" % totalWeekChange)+' EUR', 'white', 'on_red')
table.append([colored('Total', 'white', 'on_blue'), 'x', colored( str(int(float( balance['eb'] )))+' EUR', 'white', 'on_green'), totalChangeStr, totalDayChangeStr, totalWeekChangeStr])
os.system('clear')
print tabulate(table, tablefmt="grid")
while 1:
try:
updateLivevalues()
printTrades()
printBalance()
except:
cprint('Error getting balance.', 'red')
time.sleep(60) | [
"[email protected]"
] | |
00366ebb2a1b53335b701e3f22e14fd88941755c | bca25ecb5677b528cdf3d68b6ead3ef64290fdac | /Solid_rocket_motor_ignition_system_based_on_air-methane_mixture_-_Cantera (1).py | 5874f2923ccf0ba1f6c7e483a53084c3b2415a3b | [] | no_license | gibon1617/mkws | fe78eb8fd09dce38dc568001fab7fb21ef3e1ea2 | 0850f38503c9b664088387f9daa91f719fcf7823 | refs/heads/master | 2020-06-04T02:27:16.808515 | 2019-06-14T10:07:35 | 2019-06-14T10:07:35 | 191,833,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,334 | py | import math as m
import cantera as ct
import numpy as np
import matplotlib.pyplot as plt
# Reaction mechanism GRI-Mech 3.0
gas = ct.Solution('gri30.xml')
# Energy which has to be provided in order to ignite the rocket fuel of A2 rocket motor
ignition_energy = 1.2*1550 # [kcal]
ign_energy = ignition_energy*4186.8 # [J]
# Outer combustion chamber (reactor) for ignition mixture will be used
d = 40.0 # mm diameter of the reactor
l = 100.0 # mm length of the reactor
Vr = m.pi*d*d*0.25*l/1000000000 # [m^3] volume of the reactor
mdot = 0.025 # [kg/s] mass flow in the reactor
mt = []
Tt = []
Qet = []
Eqt = []
tsim = 0.005 # [s] time spended in the reactor by flowing gases
eq_ratio = 0.6 # initial equivalence ratio
while eq_ratio < 1.6:
print(eq_ratio)
# gas definition, initial conditions and inlet
gas.TP = 300.0, ct.one_atm*10
gas.set_equivalence_ratio(eq_ratio, 'CH4:1.0', 'O2:1.0, N2:3.76')
inlet = ct.Reservoir(gas)
# filling combustor with a gas
gas.equilibrate('HP')
combustor = ct.IdealGasReactor(gas)
combustor.volume = Vr
# exhaust definition
exhaust = ct.Reservoir(gas)
# mass flow
inlet_mfc = ct.MassFlowController(inlet, combustor, mdot=mdot)
# simulation definition
sim = ct.ReactorNet([combustor])
# Reactor's states array
states = ct.SolutionArray(gas)
#Simulation
sim.set_initial_time(0.0) # reset the integrator
sim.advance(tsim)
states.append(combustor.thermo.state)
V = mdot/combustor.density
Q = -np.sum(states.net_production_rates * states.partial_molar_enthalpies)
Qe = Q*V
t = ign_energy/Qe
mpal = mdot*t
print('masa = {:.2f}; T = {:.1f};'.format(mpal, combustor.T))
# writing results to arrays
mt.append(mpal)
Tt.append(t)
Qet.append(Qe)
Eqt.append(eq_ratio)
eq_ratio += 0.01
print('Qe = {:.2f}; mpal = {:.2f}; t = {:.2f}'.format(Qe, mpal, t))
Q=0.0
mpal=0.0
#plots
f, ax1 = plt.subplots(1,1)
ax1.plot(Eqt, mt, '.-', color='C0')
ax2 = ax1.twinx()
ax1.set_xlabel('equivalence ratio [-]')
ax1.set_ylabel('mixture mass [kg]', color='C0')
ax2.plot(Eqt,Tt, '.-', color='C0')
ax2.set_ylabel('t [s]', color='C0')
f.tight_layout()
plt.show() | [
"[email protected]"
] | |
8835421c4159ffa65247218af967d81b9433de71 | 4c4fdd5ce7cbf6bc4b0028c35d24f7b871a0b4d5 | /guvipairsum.py | 453e1dc0206c201cdf924864fe3e480e9b14b5fc | [] | no_license | Ashiscodebat/lemmecode | 7df1510e069e851e3f395efec83474813ca20b91 | 4a5dd5377d0c964d03f0e189d5142aece0c730e6 | refs/heads/master | 2020-04-01T16:55:00.731129 | 2018-10-22T15:52:27 | 2018-10-22T15:52:27 | 153,403,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | def checkrep(a,b,dic):
flag = 0
for key,vals in dic.items():
#if key + vals > a + b:
#return 1
if a != key and b != key:
flag = 1
else:
return 0
return 1
n = int(input())
l= []
d = {}
dic = {}
count = 0
an = 0
minsum = 1000000
for i in range(0,n):
s =int(input())
l.append(s)
for i in range(0,n):
for j in range(i+1,n):
if l[i] + l[j] == 0 :
an = checkrep(l[i],l[j],dic)
if an == 1:
d = {l[i]:l[j]}
dic.update(d)
count = count + 1
an = 0
else:
continue
li = []
for key,val in dic.items():
li.extend([key,val])
if li == []:
print("No pair")
exit(1)
for i in range(0,len(li)):
print(li[i],end = " ")
| [
"[email protected]"
] | |
7b44412ce11d8c6c342152422abcba093327737b | 3a48cfb0b43fe61f52355a67b2b5700aa8c5ddf2 | /src/som/interpreter/ast/nodes/message/generic_node.py | 5cfc38a7257dfdd24617ab9116a1996177084454 | [
"MIT"
] | permissive | SOM-st/RTruffleSOM | ce380d02985b0ef1f41f400409f61377dc3a583e | 1efc698577830ff3fcd1607e7155d9c6423e8804 | refs/heads/master | 2021-01-17T07:25:19.895376 | 2020-12-08T18:56:50 | 2020-12-08T18:56:50 | 17,311,290 | 9 | 2 | MIT | 2020-09-02T16:08:31 | 2014-03-01T08:45:25 | Python | UTF-8 | Python | false | false | 2,256 | py | from rpython.rlib.debug import make_sure_not_resized
from rpython.rlib.jit import we_are_jitted
from ..dispatch import SuperDispatchNode, UninitializedDispatchNode, send_does_not_understand
from .abstract_node import AbstractMessageNode
class GenericMessageNode(AbstractMessageNode):
_immutable_fields_ = ['_dispatch?']
_child_nodes_ = ['_dispatch']
def __init__(self, selector, universe, rcvr_expr, arg_exprs,
source_section = None):
AbstractMessageNode.__init__(self, selector, universe, rcvr_expr,
arg_exprs, source_section)
if rcvr_expr.is_super_node():
dispatch = SuperDispatchNode(selector, rcvr_expr.get_super_class(),
universe)
else:
dispatch = UninitializedDispatchNode(selector, universe)
self._dispatch = self.adopt_child(dispatch)
def replace_dispatch_list_head(self, node):
self._dispatch.replace(node)
def execute(self, frame):
rcvr, args = self._evaluate_rcvr_and_args(frame)
return self.execute_evaluated(frame, rcvr, args)
def execute_evaluated(self, frame, rcvr, args):
assert frame is not None
assert rcvr is not None
assert args is not None
make_sure_not_resized(args)
if we_are_jitted():
return self._direct_dispatch(rcvr, args)
else:
return self._dispatch.execute_dispatch(rcvr, args)
def _direct_dispatch(self, rcvr, args):
method = self._lookup_method(rcvr)
if method:
return method.invoke(rcvr, args)
else:
return send_does_not_understand(rcvr, self._selector, args, self._universe)
def _lookup_method(self, rcvr):
rcvr_class = self._class_of_receiver(rcvr)
return rcvr_class.lookup_invokable(self._selector)
def _class_of_receiver(self, rcvr):
if self._rcvr_expr.is_super_node():
return self._rcvr_expr.get_super_class()
return rcvr.get_class(self._universe)
def __str__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
self._selector,
self._source_section)
| [
"[email protected]"
] | |
904780b13a2b9bff1a93d620fd8790a37652d91f | d822501d09895216da854f2db8227164775970bd | /Functions/palindrome.py | ff198055c64c23fc99c5310034c3c4b9e3b3a87c | [] | no_license | Jitendrap1702/Coding_Ninjas_Intro_to_Python | 8bf7418b53d9e4ab1146950d4fef46a51cc33157 | 44371c5595c7507de0a1b4c0e596664d34c026c4 | refs/heads/master | 2022-12-10T22:07:27.213614 | 2020-09-06T16:00:24 | 2020-09-06T16:00:24 | 293,270,812 | 1 | 1 | null | 2020-09-06T13:48:35 | 2020-09-06T12:19:30 | Python | UTF-8 | Python | false | false | 225 | py | def checkPalindrome(num):
k=str(num)
if k==k[::-1]:
return True
else:
return False
num = int(input())
isPalindrome = checkPalindrome(num)
if(isPalindrome):
print('true')
else:
print('false')
| [
"[email protected]"
] | |
b3c22a904dac91d8b29c6d27d6ce97e5e99f49d8 | a034d4ba39789e4a351112c46dd04a38180cd06c | /appengine/monorail/framework/sql.py | 41fb66b26a0bd748c5788f67fa37cb9b6da157a5 | [
"BSD-3-Clause"
] | permissive | asdfghjjklllllaaa/infra | 050ad249ab44f264b4e2080aa9537ce74aafb022 | 8f63af54e46194cd29291813f2790ff6e986804d | refs/heads/master | 2023-01-10T21:55:44.811835 | 2019-07-01T14:03:32 | 2019-07-01T14:03:32 | 194,691,941 | 1 | 0 | BSD-3-Clause | 2023-01-07T07:12:37 | 2019-07-01T14:45:29 | Python | UTF-8 | Python | false | false | 37,456 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""A set of classes for interacting with tables in SQL."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import random
import re
import sys
import time
import settings
if not settings.unit_test_mode:
import MySQLdb
from framework import exceptions
from framework import framework_helpers
from infra_libs import ts_mon
from Queue import Queue
class ConnectionPool(object):
"""Manage a set of database connections such that they may be re-used.
"""
def __init__(self, poolsize=1):
self.poolsize = poolsize
self.queues = {}
@framework_helpers.retry(3, delay=0.1, backoff=2)
def get(self, instance, database):
"""Retun a database connection, or throw an exception if none can
be made.
"""
key = instance + '/' + database
if not key in self.queues:
queue = Queue(self.poolsize)
self.queues[key] = queue
queue = self.queues[key]
if queue.empty():
cnxn = cnxn_ctor(instance, database)
else:
cnxn = queue.get()
# Make sure the connection is still good.
cnxn.ping()
cnxn.commit()
return cnxn
def release(self, cnxn):
if not cnxn.pool_key in self.queues:
raise BaseException('unknown pool key: %s' % cnxn.pool_key)
q = self.queues[cnxn.pool_key]
if q.full():
cnxn.close()
else:
q.put(cnxn)
@framework_helpers.retry(2, delay=1, backoff=2)
def cnxn_ctor(instance, database):
logging.info('About to connect to SQL instance %r db %r', instance, database)
if settings.unit_test_mode:
raise ValueError('unit tests should not need real database connections')
try:
if settings.local_mode:
start_time = time.time()
cnxn = MySQLdb.connect(
host='127.0.0.1', port=3306, db=database, user='root', charset='utf8')
else:
start_time = time.time()
cnxn = MySQLdb.connect(
unix_socket='/cloudsql/' + instance, db=database, user='root',
charset='utf8')
duration = int((time.time() - start_time) * 1000)
DB_CNXN_LATENCY.add(duration)
CONNECTION_COUNT.increment({'success': True})
except MySQLdb.OperationalError:
CONNECTION_COUNT.increment({'success': False})
raise
cnxn.pool_key = instance + '/' + database
cnxn.is_bad = False
return cnxn
# One connection pool per database instance (master, replicas are each an
# instance). We'll have four connections per instance because we fetch
# issue comments, stars, spam verdicts and spam verdict history in parallel
# with promises.
cnxn_pool = ConnectionPool(settings.db_cnxn_pool_size)
# MonorailConnection maintains a dictionary of connections to SQL databases.
# Each is identified by an int shard ID.
# And there is one connection to the master DB identified by key MASTER_CNXN.
MASTER_CNXN = 'master_cnxn'
CONNECTION_COUNT = ts_mon.CounterMetric(
'monorail/sql/connection_count',
'Count of connections made to the SQL database.',
[ts_mon.BooleanField('success')])
DB_CNXN_LATENCY = ts_mon.CumulativeDistributionMetric(
'monorail/sql/db_cnxn_latency',
'Time needed to establish a DB connection.',
None)
DB_QUERY_LATENCY = ts_mon.CumulativeDistributionMetric(
'monorail/sql/db_query_latency',
'Time needed to make a DB query.',
[ts_mon.StringField('type')])
DB_COMMIT_LATENCY = ts_mon.CumulativeDistributionMetric(
'monorail/sql/db_commit_latency',
'Time needed to make a DB commit.',
None)
DB_ROLLBACK_LATENCY = ts_mon.CumulativeDistributionMetric(
'monorail/sql/db_rollback_latency',
'Time needed to make a DB rollback.',
None)
DB_RETRY_COUNT = ts_mon.CounterMetric(
'monorail/sql/db_retry_count',
'Count of queries retried.',
None)
DB_QUERY_COUNT = ts_mon.CounterMetric(
'monorail/sql/db_query_count',
'Count of queries sent to the DB.',
[ts_mon.StringField('type')])
DB_COMMIT_COUNT = ts_mon.CounterMetric(
'monorail/sql/db_commit_count',
'Count of commits sent to the DB.',
None)
DB_ROLLBACK_COUNT = ts_mon.CounterMetric(
'monorail/sql/db_rollback_count',
'Count of rollbacks sent to the DB.',
None)
DB_RESULT_ROWS = ts_mon.CumulativeDistributionMetric(
'monorail/sql/db_result_rows',
'Number of results returned by a DB query.',
None)
def RandomShardID():
"""Return a random shard ID to load balance across replicas."""
return random.randint(0, settings.num_logical_shards - 1)
class MonorailConnection(object):
"""Create and manage connections to the SQL servers.
We only store connections in the context of a single user request, not
across user requests. The main purpose of this class is to make using
sharded tables easier.
"""
def __init__(self):
self.sql_cnxns = {} # {MASTER_CNXN: cnxn, shard_id: cnxn, ...}
def GetMasterConnection(self):
"""Return a connection to the master SQL DB."""
if MASTER_CNXN not in self.sql_cnxns:
self.sql_cnxns[MASTER_CNXN] = cnxn_pool.get(
settings.db_instance, settings.db_database_name)
logging.info(
'created a master connection %r', self.sql_cnxns[MASTER_CNXN])
return self.sql_cnxns[MASTER_CNXN]
def GetConnectionForShard(self, shard_id):
"""Return a connection to the DB replica that will be used for shard_id."""
if shard_id not in self.sql_cnxns:
physical_shard_id = shard_id % settings.num_logical_shards
replica_name = settings.db_replica_names[
physical_shard_id % len(settings.db_replica_names)]
shard_instance_name = (
settings.physical_db_name_format % replica_name)
self.sql_cnxns[shard_id] = cnxn_pool.get(
shard_instance_name, settings.db_database_name)
logging.info('created a replica connection for shard %d', shard_id)
return self.sql_cnxns[shard_id]
def Execute(self, stmt_str, stmt_args, shard_id=None, commit=True, retries=1):
"""Execute the given SQL statement on one of the relevant databases."""
if shard_id is None:
# No shard was specified, so hit the master.
sql_cnxn = self.GetMasterConnection()
else:
sql_cnxn = self.GetConnectionForShard(shard_id)
try:
return self._ExecuteWithSQLConnection(
sql_cnxn, stmt_str, stmt_args, commit=commit)
except MySQLdb.OperationalError as e:
logging.exception(e)
logging.info('retries: %r', retries)
if retries > 0:
DB_RETRY_COUNT.increment()
self.sql_cnxns = {} # Drop all old mysql connections and make new.
return self.Execute(
stmt_str, stmt_args, shard_id=shard_id, commit=commit,
retries=retries - 1)
else:
raise e
def _ExecuteWithSQLConnection(
self, sql_cnxn, stmt_str, stmt_args, commit=True):
"""Execute a statement on the given database and return a cursor."""
if stmt_str.startswith('INSERT') or stmt_str.startswith('REPLACE'):
logging.info('SQL stmt_str: \n%s', stmt_str)
logging.info('SQL stmt_args: %r', stmt_args)
else:
logging.info('SQL stmt: \n%s', (stmt_str % tuple(stmt_args)))
start_time = time.time()
cursor = sql_cnxn.cursor()
cursor.execute('SET NAMES utf8mb4')
logging.info('made cursor on %r in %d ms',
sql_cnxn, int((time.time() - start_time) * 1000))
if stmt_str.startswith('INSERT') or stmt_str.startswith('REPLACE'):
cursor.executemany(stmt_str, stmt_args)
duration = (time.time() - start_time) * 1000
DB_QUERY_LATENCY.add(duration, {'type': 'write'})
DB_QUERY_COUNT.increment({'type': 'write'})
else:
cursor.execute(stmt_str, args=stmt_args)
duration = (time.time() - start_time) * 1000
DB_QUERY_LATENCY.add(duration, {'type': 'read'})
DB_QUERY_COUNT.increment({'type': 'read'})
DB_RESULT_ROWS.add(cursor.rowcount)
logging.info('%d rows in %d ms', cursor.rowcount,
int(duration))
if commit and not stmt_str.startswith('SELECT'):
try:
sql_cnxn.commit()
duration = (time.time() - start_time) * 1000
DB_COMMIT_LATENCY.add(duration)
DB_COMMIT_COUNT.increment()
except MySQLdb.DatabaseError:
sql_cnxn.rollback()
duration = (time.time() - start_time) * 1000
DB_ROLLBACK_LATENCY.add(duration)
DB_ROLLBACK_COUNT.increment()
return cursor
def Commit(self):
"""Explicitly commit any pending txns. Normally done automatically."""
sql_cnxn = self.GetMasterConnection()
try:
sql_cnxn.commit()
except MySQLdb.DatabaseError:
logging.exception('Commit failed for cnxn, rolling back')
sql_cnxn.rollback()
def Close(self):
"""Safely close any connections that are still open."""
for sql_cnxn in self.sql_cnxns.values():
try:
sql_cnxn.rollback() # Abandon any uncommitted changes.
cnxn_pool.release(sql_cnxn)
except MySQLdb.DatabaseError:
# This might happen if the cnxn is somehow already closed.
logging.exception('ProgrammingError when trying to close cnxn')
class SQLTableManager(object):
"""Helper class to make it easier to deal with an SQL table."""
def __init__(self, table_name):
self.table_name = table_name
def Select(
self, cnxn, distinct=False, cols=None, left_joins=None,
joins=None, where=None, or_where_conds=False, group_by=None,
order_by=None, limit=None, offset=None, shard_id=None, use_clause=None,
having=None, **kwargs):
"""Compose and execute an SQL SELECT statement on this table.
Args:
cnxn: MonorailConnection to the databases.
distinct: If True, add DISTINCT keyword.
cols: List of columns to retrieve, defaults to '*'.
left_joins: List of LEFT JOIN (str, args) pairs.
joins: List of regular JOIN (str, args) pairs.
where: List of (str, args) for WHERE clause.
or_where_conds: Set to True to use OR in the WHERE conds.
group_by: List of strings for GROUP BY clause.
order_by: List of (str, args) for ORDER BY clause.
limit: Optional LIMIT on the number of rows returned.
offset: Optional OFFSET when using LIMIT.
shard_id: Int ID of the shard to query.
use_clause: Optional string USE clause to tell the DB which index to use.
having: List of (str, args) for Optional HAVING clause
**kwargs: WHERE-clause equality and set-membership conditions.
Keyword args are used to build up more WHERE conditions that compare
column values to constants. Key word Argument foo='bar' translates to 'foo
= "bar"', and foo=[3, 4, 5] translates to 'foo IN (3, 4, 5)'.
Returns:
A list of rows, each row is a tuple of values for the requested cols.
"""
cols = cols or ['*'] # If columns not specified, retrieve all columns.
stmt = Statement.MakeSelect(
self.table_name, cols, distinct=distinct,
or_where_conds=or_where_conds)
if use_clause:
stmt.AddUseClause(use_clause)
if having:
stmt.AddHavingTerms(having)
stmt.AddJoinClauses(left_joins or [], left=True)
stmt.AddJoinClauses(joins or [])
stmt.AddWhereTerms(where or [], **kwargs)
stmt.AddGroupByTerms(group_by or [])
stmt.AddOrderByTerms(order_by or [])
stmt.SetLimitAndOffset(limit, offset)
stmt_str, stmt_args = stmt.Generate()
cursor = cnxn.Execute(stmt_str, stmt_args, shard_id=shard_id)
rows = cursor.fetchall()
cursor.close()
return rows
def SelectRow(
self, cnxn, cols=None, default=None, where=None, **kwargs):
"""Run a query that is expected to return just one row."""
rows = self.Select(cnxn, distinct=True, cols=cols, where=where, **kwargs)
if len(rows) == 1:
return rows[0]
elif not rows:
logging.info('SelectRow got 0 results, so using default %r', default)
return default
else:
raise ValueError('SelectRow got %d results, expected only 1', len(rows))
def SelectValue(self, cnxn, col, default=None, where=None, **kwargs):
"""Run a query that is expected to return just one row w/ one value."""
row = self.SelectRow(
cnxn, cols=[col], default=[default], where=where, **kwargs)
return row[0]
def InsertRows(
self, cnxn, cols, row_values, replace=False, ignore=False,
commit=True, return_generated_ids=False):
"""Insert all the given rows.
Args:
cnxn: MonorailConnection object.
cols: List of column names to set.
row_values: List of lists with values to store. The length of each
nested list should be equal to len(cols).
replace: Set to True if inserted values should replace existing DB rows
that have the same DB keys.
ignore: Set to True to ignore rows that would duplicate existing DB keys.
commit: Set to False if this operation is part of a series of operations
that should not be committed until the final one is done.
return_generated_ids: Set to True to return a list of generated
autoincrement IDs for inserted rows. This requires us to insert rows
one at a time.
Returns:
If return_generated_ids is set to True, this method returns a list of the
auto-increment IDs generated by the DB. Otherwise, [] is returned.
"""
if not row_values:
return None # Nothing to insert
generated_ids = []
if return_generated_ids:
# We must insert the rows one-at-a-time to know the generated IDs.
for row_value in row_values:
stmt = Statement.MakeInsert(
self.table_name, cols, [row_value], replace=replace, ignore=ignore)
stmt_str, stmt_args = stmt.Generate()
cursor = cnxn.Execute(stmt_str, stmt_args, commit=commit)
if cursor.lastrowid:
generated_ids.append(cursor.lastrowid)
cursor.close()
return generated_ids
stmt = Statement.MakeInsert(
self.table_name, cols, row_values, replace=replace, ignore=ignore)
stmt_str, stmt_args = stmt.Generate()
cnxn.Execute(stmt_str, stmt_args, commit=commit)
return []
def InsertRow(
self, cnxn, replace=False, ignore=False, commit=True, **kwargs):
"""Insert a single row into the table.
Args:
cnxn: MonorailConnection object.
replace: Set to True if inserted values should replace existing DB rows
that have the same DB keys.
ignore: Set to True to ignore rows that would duplicate existing DB keys.
commit: Set to False if this operation is part of a series of operations
that should not be committed until the final one is done.
**kwargs: column=value assignments to specify what to store in the DB.
Returns:
The generated autoincrement ID of the key column if one was generated.
Otherwise, return None.
"""
cols = sorted(kwargs.keys())
row = tuple(kwargs[col] for col in cols)
generated_ids = self.InsertRows(
cnxn, cols, [row], replace=replace, ignore=ignore,
commit=commit, return_generated_ids=True)
if generated_ids:
return generated_ids[0]
else:
return None
def Update(self, cnxn, delta, where=None, commit=True, limit=None, **kwargs):
"""Update one or more rows.
Args:
cnxn: MonorailConnection object.
delta: Dictionary of {column: new_value} assignments.
where: Optional list of WHERE conditions saying which rows to update.
commit: Set to False if this operation is part of a series of operations
that should not be committed until the final one is done.
limit: Optional LIMIT on the number of rows updated.
**kwargs: WHERE-clause equality and set-membership conditions.
Returns:
Int number of rows updated.
"""
if not delta:
return 0 # Nothing is being changed
stmt = Statement.MakeUpdate(self.table_name, delta)
stmt.AddWhereTerms(where, **kwargs)
stmt.SetLimitAndOffset(limit, None)
stmt_str, stmt_args = stmt.Generate()
cursor = cnxn.Execute(stmt_str, stmt_args, commit=commit)
result = cursor.rowcount
cursor.close()
return result
def IncrementCounterValue(self, cnxn, col_name, where=None, **kwargs):
"""Atomically increment a counter stored in MySQL, return new value.
Args:
cnxn: MonorailConnection object.
col_name: int column to increment.
where: Optional list of WHERE conditions saying which rows to update.
**kwargs: WHERE-clause equality and set-membership conditions. The
where and kwargs together should narrow the update down to exactly
one row.
Returns:
The new, post-increment value of the counter.
"""
stmt = Statement.MakeIncrement(self.table_name, col_name)
stmt.AddWhereTerms(where, **kwargs)
stmt_str, stmt_args = stmt.Generate()
cursor = cnxn.Execute(stmt_str, stmt_args)
assert cursor.rowcount == 1, (
'missing or ambiguous counter: %r' % cursor.rowcount)
result = cursor.lastrowid
cursor.close()
return result
def Delete(self, cnxn, where=None, or_where_conds=False, commit=True,
limit=None, **kwargs):
"""Delete the specified table rows.
Args:
cnxn: MonorailConnection object.
where: Optional list of WHERE conditions saying which rows to update.
or_where_conds: Set to True to use OR in the WHERE conds.
commit: Set to False if this operation is part of a series of operations
that should not be committed until the final one is done.
limit: Optional LIMIT on the number of rows deleted.
**kwargs: WHERE-clause equality and set-membership conditions.
Returns:
Int number of rows updated.
"""
# Deleting the whole table is never intended in Monorail.
assert where or kwargs
stmt = Statement.MakeDelete(self.table_name, or_where_conds=or_where_conds)
stmt.AddWhereTerms(where, **kwargs)
stmt.SetLimitAndOffset(limit, None)
stmt_str, stmt_args = stmt.Generate()
cursor = cnxn.Execute(stmt_str, stmt_args, commit=commit)
result = cursor.rowcount
cursor.close()
return result
class Statement(object):
"""A class to help build complex SQL statements w/ full escaping.
Start with a Make*() method, then fill in additional clauses as needed,
then call Generate() to return the SQL string and argument list. We pass
the string and args to MySQLdb separately so that it can do escaping on
the arg values as appropriate to prevent SQL-injection attacks.
The only values that are not escaped by MySQLdb are the table names
and column names, and bits of SQL syntax, all of which is hard-coded
in our application.
"""
@classmethod
def MakeSelect(cls, table_name, cols, distinct=False, or_where_conds=False):
"""Constuct a SELECT statement."""
assert _IsValidTableName(table_name)
assert all(_IsValidColumnName(col) for col in cols)
main_clause = 'SELECT%s %s FROM %s' % (
(' DISTINCT' if distinct else ''), ', '.join(cols), table_name)
return cls(main_clause, or_where_conds=or_where_conds)
@classmethod
def MakeInsert(
cls, table_name, cols, new_values, replace=False, ignore=False):
"""Constuct an INSERT statement."""
if replace == True:
return cls.MakeReplace(table_name, cols, new_values, ignore)
assert _IsValidTableName(table_name)
assert all(_IsValidColumnName(col) for col in cols)
ignore_word = ' IGNORE' if ignore else ''
main_clause = 'INSERT%s INTO %s (%s)' % (
ignore_word, table_name, ', '.join(cols))
return cls(main_clause, insert_args=new_values)
@classmethod
def MakeReplace(
cls, table_name, cols, new_values, ignore=False):
"""Construct an INSERT...ON DUPLICATE KEY UPDATE... statement.
Uses the INSERT/UPDATE syntax because REPLACE is literally a DELETE
followed by an INSERT, which doesn't play well with foreign keys.
INSERT/UPDATE is an atomic check of whether the primary key exists,
followed by an INSERT if it doesn't or an UPDATE if it does.
"""
assert _IsValidTableName(table_name)
assert all(_IsValidColumnName(col) for col in cols)
ignore_word = ' IGNORE' if ignore else ''
main_clause = 'INSERT%s INTO %s (%s)' % (
ignore_word, table_name, ', '.join(cols))
return cls(main_clause, insert_args=new_values, duplicate_update_cols=cols)
@classmethod
def MakeUpdate(cls, table_name, delta):
"""Constuct an UPDATE statement."""
assert _IsValidTableName(table_name)
assert all(_IsValidColumnName(col) for col in delta.keys())
update_strs = []
update_args = []
for col, val in delta.items():
update_strs.append(col + '=%s')
update_args.append(val)
main_clause = 'UPDATE %s SET %s' % (
table_name, ', '.join(update_strs))
return cls(main_clause, update_args=update_args)
@classmethod
def MakeIncrement(cls, table_name, col_name, step=1):
"""Constuct an UPDATE statement that increments and returns a counter."""
assert _IsValidTableName(table_name)
assert _IsValidColumnName(col_name)
main_clause = (
'UPDATE %s SET %s = LAST_INSERT_ID(%s + %%s)' % (
table_name, col_name, col_name))
update_args = [step]
return cls(main_clause, update_args=update_args)
@classmethod
def MakeDelete(cls, table_name, or_where_conds=False):
"""Constuct a DELETE statement."""
assert _IsValidTableName(table_name)
main_clause = 'DELETE FROM %s' % table_name
return cls(main_clause, or_where_conds=or_where_conds)
def __init__(
self, main_clause, insert_args=None, update_args=None,
duplicate_update_cols=None, or_where_conds=False):
self.main_clause = main_clause # E.g., SELECT or DELETE
self.or_where_conds = or_where_conds
self.insert_args = insert_args or [] # For INSERT statements
for row_value in self.insert_args:
if not all(_IsValidDBValue(val) for val in row_value):
raise exceptions.InputException('Invalid DB value %r' % (row_value,))
self.update_args = update_args or [] # For UPDATEs
for val in self.update_args:
if not _IsValidDBValue(val):
raise exceptions.InputException('Invalid DB value %r' % val)
self.duplicate_update_cols = duplicate_update_cols or [] # For REPLACE-ish
self.use_clauses = []
self.join_clauses, self.join_args = [], []
self.where_conds, self.where_args = [], []
self.having_conds, self.having_args = [], []
self.group_by_terms, self.group_by_args = [], []
self.order_by_terms, self.order_by_args = [], []
self.limit, self.offset = None, None
def Generate(self):
"""Return an SQL string having %s placeholders and args to fill them in."""
clauses = [self.main_clause] + self.use_clauses + self.join_clauses
if self.where_conds:
if self.or_where_conds:
clauses.append('WHERE ' + '\n OR '.join(self.where_conds))
else:
clauses.append('WHERE ' + '\n AND '.join(self.where_conds))
if self.group_by_terms:
clauses.append('GROUP BY ' + ', '.join(self.group_by_terms))
if self.having_conds:
assert self.group_by_terms
clauses.append('HAVING %s' % ','.join(self.having_conds))
if self.order_by_terms:
clauses.append('ORDER BY ' + ', '.join(self.order_by_terms))
if self.limit and self.offset:
clauses.append('LIMIT %d OFFSET %d' % (self.limit, self.offset))
elif self.limit:
clauses.append('LIMIT %d' % self.limit)
elif self.offset:
clauses.append('LIMIT %d OFFSET %d' % (sys.maxint, self.offset))
if self.insert_args:
clauses.append('VALUES (' + PlaceHolders(self.insert_args[0]) + ')')
args = self.insert_args
if self.duplicate_update_cols:
clauses.append('ON DUPLICATE KEY UPDATE %s' % (
', '.join(['%s=VALUES(%s)' % (col, col)
for col in self.duplicate_update_cols])))
assert not (self.join_args + self.update_args + self.where_args +
self.group_by_args + self.order_by_args + self.having_args)
else:
args = (self.join_args + self.update_args + self.where_args +
self.group_by_args + self.having_args + self.order_by_args)
assert not (self.insert_args + self.duplicate_update_cols)
args = _BoolsToInts(args)
stmt_str = '\n'.join(clause for clause in clauses if clause)
assert _IsValidStatement(stmt_str), stmt_str
return stmt_str, args
def AddUseClause(self, use_clause):
"""Add a USE clause (giving the DB a hint about which indexes to use)."""
assert _IsValidUseClause(use_clause), use_clause
self.use_clauses.append(use_clause)
def AddJoinClauses(self, join_pairs, left=False):
"""Save JOIN clauses based on the given list of join conditions."""
for join, args in join_pairs:
assert _IsValidJoin(join), join
assert join.count('%s') == len(args), join
self.join_clauses.append(
' %sJOIN %s' % (('LEFT ' if left else ''), join))
self.join_args.extend(args)
def AddGroupByTerms(self, group_by_term_list):
"""Save info needed to generate the GROUP BY clause."""
assert all(_IsValidGroupByTerm(term) for term in group_by_term_list)
self.group_by_terms.extend(group_by_term_list)
def AddOrderByTerms(self, order_by_pairs):
"""Save info needed to generate the ORDER BY clause."""
for term, args in order_by_pairs:
assert _IsValidOrderByTerm(term), term
assert term.count('%s') == len(args), term
self.order_by_terms.append(term)
self.order_by_args.extend(args)
def SetLimitAndOffset(self, limit, offset):
"""Save info needed to generate the LIMIT OFFSET clause."""
self.limit = limit
self.offset = offset
def AddWhereTerms(self, where_cond_pairs, **kwargs):
"""Generate a WHERE clause."""
where_cond_pairs = where_cond_pairs or []
for cond, args in where_cond_pairs:
assert _IsValidWhereCond(cond), cond
assert cond.count('%s') == len(args), cond
self.where_conds.append(cond)
self.where_args.extend(args)
for col, val in sorted(kwargs.items()):
assert _IsValidColumnName(col), col
eq = True
if col.endswith('_not'):
col = col[:-4]
eq = False
if isinstance(val, set):
val = list(val) # MySQL inteface cannot handle sets.
if val is None or val == []:
op = 'IS' if eq else 'IS NOT'
self.where_conds.append(col + ' ' + op + ' NULL')
elif isinstance(val, list):
op = 'IN' if eq else 'NOT IN'
# Sadly, MySQLdb cannot escape lists, so we flatten to multiple "%s"s
self.where_conds.append(
col + ' ' + op + ' (' + PlaceHolders(val) + ')')
self.where_args.extend(val)
else:
op = '=' if eq else '!='
self.where_conds.append(col + ' ' + op + ' %s')
self.where_args.append(val)
def AddHavingTerms(self, having_cond_pairs):
"""Generate a HAVING clause."""
for cond, args in having_cond_pairs:
assert _IsValidHavingCond(cond), cond
assert cond.count('%s') == len(args), cond
self.having_conds.append(cond)
self.having_args.extend(args)
def PlaceHolders(sql_args):
"""Return a comma-separated list of %s placeholders for the given args."""
return ','.join('%s' for _ in sql_args)
TABLE_PAT = '[A-Z][_a-zA-Z0-9]+'
COLUMN_PAT = '[a-z][_a-z]+'
COMPARE_OP_PAT = '(<|>|=|!=|>=|<=|LIKE|NOT LIKE)'
SHORTHAND = {
'table': TABLE_PAT,
'column': COLUMN_PAT,
'tab_col': r'(%s\.)?%s' % (TABLE_PAT, COLUMN_PAT),
'placeholder': '%s', # That's a literal %s that gets passed to MySQLdb
'multi_placeholder': '%s(, ?%s)*',
'compare_op': COMPARE_OP_PAT,
'opt_asc_desc': '( ASC| DESC)?',
'opt_alias': '( AS %s)?' % TABLE_PAT,
'email_cond': (r'\(?'
r'('
r'(LOWER\(Spare\d+\.email\) IS NULL OR )?'
r'LOWER\(Spare\d+\.email\) '
r'(%s %%s|IN \(%%s(, ?%%s)*\))'
r'( (AND|OR) )?'
r')+'
r'\)?' % COMPARE_OP_PAT),
'hotlist_cond': (r'\(?'
r'('
r'(LOWER\(Cond\d+\.name\) IS NULL OR )?'
r'LOWER\(Cond\d+\.name\) '
r'(%s %%s|IN \(%%s(, ?%%s)*\))'
r'( (AND|OR) )?'
r')+'
r'\)?' % COMPARE_OP_PAT),
'phase_cond': (r'\(?'
r'('
r'(LOWER\(Phase\d+\.name\) IS NULL OR )?'
r'LOWER\(Phase\d+\.name\) '
r'(%s %%s|IN \(%%s(, ?%%s)*\))?'
r'( (AND|OR) )?'
r')+'
r'\)?' % COMPARE_OP_PAT),
'approval_cond': (r'\(?'
r'('
r'(LOWER\(Cond\d+\.status\) IS NULL OR )?'
r'LOWER\(Cond\d+\.status\) '
r'(%s %%s|IN \(%%s(, ?%%s)*\))'
r'( (AND|OR) )?'
r')+'
r'\)?' % COMPARE_OP_PAT),
}
def _MakeRE(regex_str):
"""Return a regular expression object, expanding our shorthand as needed."""
return re.compile(regex_str.format(**SHORTHAND))
TABLE_RE = _MakeRE('^{table}$')
TAB_COL_RE = _MakeRE('^{tab_col}$')
USE_CLAUSE_RE = _MakeRE(
r'^USE INDEX \({column}\) USE INDEX FOR ORDER BY \({column}\)$')
HAVING_RE_LIST = [
_MakeRE(r'^COUNT\(\*\) {compare_op} {placeholder}$')]
COLUMN_RE_LIST = [
TAB_COL_RE,
_MakeRE(r'\*'),
_MakeRE(r'COUNT\(\*\)'),
_MakeRE(r'COUNT\({tab_col}\)'),
_MakeRE(r'COUNT\(DISTINCT\({tab_col}\)\)'),
_MakeRE(r'MAX\({tab_col}\)'),
_MakeRE(r'MIN\({tab_col}\)'),
_MakeRE(r'GROUP_CONCAT\((DISTINCT )?{tab_col}( ORDER BY {tab_col})?' \
r'( SEPARATOR \'.*\')?\)'),
]
JOIN_RE_LIST = [
TABLE_RE,
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r'( AND {tab_col} = {tab_col})?'
r'( AND {tab_col} IN \({multi_placeholder}\))?$'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r'( AND {tab_col} = {tab_col})?'
r'( AND {tab_col} = {placeholder})?'
r'( AND {tab_col} IN \({multi_placeholder}\))?'
r'( AND {tab_col} = {tab_col})?$'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r'( AND {tab_col} = {tab_col})?'
r'( AND {tab_col} = {placeholder})?'
r'( AND {tab_col} IN \({multi_placeholder}\))?'
r'( AND {tab_col} IS NULL)?'
r'( AND \({tab_col} IS NULL'
r' OR {tab_col} NOT IN \({multi_placeholder}\)\))?$'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r'( AND {tab_col} = {tab_col})?'
r'( AND {tab_col} = {placeholder})?'
r' AND \(?{tab_col} {compare_op} {placeholder}\)?'
r'( AND {tab_col} = {tab_col})?$'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r'( AND {tab_col} = {tab_col})?'
r'( AND {tab_col} = {placeholder})?'
r' AND {tab_col} = {tab_col}$'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r'( AND {tab_col} = {tab_col})?'
r'( AND {tab_col} = {placeholder})?'
r' AND \({tab_col} IS NULL OR'
r' {tab_col} != {placeholder}\)$'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col}'
r' AND LOWER\({tab_col}\) = LOWER\({placeholder}\)'),
_MakeRE(
r'^{table}{opt_alias} ON {tab_col} = {tab_col} AND {email_cond}$'),
_MakeRE(
r'^{table}{opt_alias} ON {email_cond}$'),
_MakeRE(
r'^{table}{opt_alias} ON '
r'\({tab_col} = {tab_col} OR {tab_col} = {tab_col}\)$'),
_MakeRE(
r'^\({table} AS {table} JOIN User AS {table} '
r'ON {tab_col} = {tab_col} AND {email_cond}\) '
r'ON Issue(Snapshot)?.id = {tab_col}'
r'( AND {tab_col} IS NULL)?'),
_MakeRE(
r'^\({table} JOIN Hotlist AS {table} '
r'ON {tab_col} = {tab_col} AND {hotlist_cond}\) '
r'ON Issue.id = {tab_col}?'),
_MakeRE(
r'^\({table} AS {table} JOIN IssuePhaseDef AS {table} '
r'ON {tab_col} = {tab_col} AND {phase_cond}\) '
r'ON Issue.id = {tab_col}?'),
_MakeRE(
r'^IssuePhaseDef AS {table} ON {phase_cond}'),
_MakeRE(
r'^Issue2ApprovalValue AS {table} ON {tab_col} = {tab_col} '
r'AND {tab_col} = {placeholder} AND {approval_cond}'),
_MakeRE(
r'^{table} AS {table} ON {tab_col} = {tab_col} '
r'LEFT JOIN {table} AS {table} ON {tab_col} = {tab_col}'),
]
ORDER_BY_RE_LIST = [
_MakeRE(r'^{tab_col}{opt_asc_desc}$'),
_MakeRE(r'^LOWER\({tab_col}\){opt_asc_desc}$'),
_MakeRE(r'^ISNULL\({tab_col}\){opt_asc_desc}$'),
_MakeRE(r'^\(ISNULL\({tab_col}\) AND ISNULL\({tab_col}\)\){opt_asc_desc}$'),
_MakeRE(r'^FIELD\({tab_col}, {multi_placeholder}\){opt_asc_desc}$'),
_MakeRE(r'^FIELD\(IF\(ISNULL\({tab_col}\), {tab_col}, {tab_col}\), '
r'{multi_placeholder}\){opt_asc_desc}$'),
_MakeRE(r'^CONCAT\({tab_col}, {tab_col}\){opt_asc_desc}$'),
]
GROUP_BY_RE_LIST = [
TAB_COL_RE,
]
WHERE_COND_RE_LIST = [
_MakeRE(r'^TRUE$'),
_MakeRE(r'^FALSE$'),
_MakeRE(r'^{tab_col} IS NULL$'),
_MakeRE(r'^{tab_col} IS NOT NULL$'),
_MakeRE(r'^{tab_col} {compare_op} {tab_col}$'),
_MakeRE(r'^{tab_col} {compare_op} {placeholder}$'),
_MakeRE(r'^{tab_col} %% {placeholder} = {placeholder}$'),
_MakeRE(r'^{tab_col} IN \({multi_placeholder}\)$'),
_MakeRE(r'^{tab_col} NOT IN \({multi_placeholder}\)$'),
_MakeRE(r'^LOWER\({tab_col}\) IS NULL$'),
_MakeRE(r'^LOWER\({tab_col}\) IS NOT NULL$'),
_MakeRE(r'^LOWER\({tab_col}\) {compare_op} {placeholder}$'),
_MakeRE(r'^LOWER\({tab_col}\) IN \({multi_placeholder}\)$'),
_MakeRE(r'^LOWER\({tab_col}\) NOT IN \({multi_placeholder}\)$'),
_MakeRE(r'^LOWER\({tab_col}\) LIKE {placeholder}$'),
_MakeRE(r'^LOWER\({tab_col}\) NOT LIKE {placeholder}$'),
_MakeRE(r'^timestep < \(SELECT MAX\(j.timestep\) FROM Invalidate AS j '
r'WHERE j.kind = %s '
r'AND j.cache_key = Invalidate.cache_key\)$'),
_MakeRE(r'^\({tab_col} IS NULL OR {tab_col} {compare_op} {placeholder}\) '
'AND \({tab_col} IS NULL OR {tab_col} {compare_op} {placeholder}'
'\)$'),
_MakeRE(r'^\({tab_col} IS NOT NULL AND {tab_col} {compare_op} '
'{placeholder}\) OR \({tab_col} IS NOT NULL AND {tab_col} '
'{compare_op} {placeholder}\)$'),
]
# Note: We never use ';' for multiple statements, '@' for SQL variables, or
# any quoted strings in stmt_str (quotes are put in my MySQLdb for args).
STMT_STR_RE = re.compile(
r'\A(SELECT|UPDATE|DELETE|INSERT|REPLACE) [\'-+=!<>%*.,()\w\s]+\Z',
re.MULTILINE)
def _IsValidDBValue(val):
if isinstance(val, basestring):
return '\x00' not in val
return True
def _IsValidTableName(table_name):
return TABLE_RE.match(table_name)
def _IsValidColumnName(column_expr):
return any(regex.match(column_expr) for regex in COLUMN_RE_LIST)
def _IsValidUseClause(use_clause):
return USE_CLAUSE_RE.match(use_clause)
def _IsValidHavingCond(cond):
if cond.startswith('(') and cond.endswith(')'):
cond = cond[1:-1]
if ' OR ' in cond:
return all(_IsValidHavingCond(c) for c in cond.split(' OR '))
if ' AND ' in cond:
return all(_IsValidHavingCond(c) for c in cond.split(' AND '))
return any(regex.match(cond) for regex in HAVING_RE_LIST)
def _IsValidJoin(join):
return any(regex.match(join) for regex in JOIN_RE_LIST)
def _IsValidOrderByTerm(term):
return any(regex.match(term) for regex in ORDER_BY_RE_LIST)
def _IsValidGroupByTerm(term):
return any(regex.match(term) for regex in GROUP_BY_RE_LIST)
def _IsValidWhereCond(cond):
if cond.startswith('NOT '):
cond = cond[4:]
if cond.startswith('(') and cond.endswith(')'):
cond = cond[1:-1]
if any(regex.match(cond) for regex in WHERE_COND_RE_LIST):
return True
if ' OR ' in cond:
return all(_IsValidWhereCond(c) for c in cond.split(' OR '))
if ' AND ' in cond:
return all(_IsValidWhereCond(c) for c in cond.split(' AND '))
return False
def _IsValidStatement(stmt_str):
"""Final check to make sure there is no funny junk sneaking in somehow."""
return (STMT_STR_RE.match(stmt_str) and
'--' not in stmt_str)
def _BoolsToInts(arg_list):
"""Convert any True values to 1s and Falses to 0s.
Google's copy of MySQLdb has bool-to-int conversion disabled,
and yet it seems to be needed otherwise they are converted
to strings and always interpreted as 0 (which is FALSE).
Args:
arg_list: (nested) list of SQL statment argument values, which may
include some boolean values.
Returns:
The same list, but with True replaced by 1 and False replaced by 0.
"""
result = []
for arg in arg_list:
if isinstance(arg, (list, tuple)):
result.append(_BoolsToInts(arg))
elif arg is True:
result.append(1)
elif arg is False:
result.append(0)
else:
result.append(arg)
return result
| [
"[email protected]"
] | |
59919a9d9900991467fcaabb4cc8e2acaff0e9e0 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/aphotomanager/testcase/firstcases/testcase5_028.py | 6856a16cc6fb6a518aa1c467766e72d1e3596a1c | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,391 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'de.k3b.android.androFotoFinder',
'appActivity' : 'de.k3b.android.androFotoFinder.FotoGalleryActivity',
'resetKeyboard' : True,
'androidCoverage' : 'de.k3b.android.androFotoFinder/de.k3b.android.androFotoFinder.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase028
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Show in new gallery\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/home\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"android:id/home\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"/storage/sdcard/Pictures/Wikipedia/Michael Mosman District Judge.jpg\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"/storage/sdcard/pic4.jpg\")", "new UiSelector().className(\"android.widget.TextView\").instance(7)")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/action_edit\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.k3b.android.androFotoFinder:id/menu_item_share\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"5_028\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'de.k3b.android.androFotoFinder'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"[email protected]"
] | |
8ea3b5fe878c8992db43d66125d44a9c5a56b3e4 | c4755fc30069506249cea50240a42f9765c9a87d | /13_dqn_keras_type_c/05_0_Keras_type_c_frozen_lake_dueling_GREEN.py | b32bc6c23f081ba316f5a235a07cd0aa0f85d058 | [] | no_license | RichardMinsooGo-RL-Single-agent/2_frozen_lake | 32e5ea206b2d7d20bfc226748f1e30fe54def378 | 680810a9ff162cdce646d08bb016cc0661db397f | refs/heads/master | 2022-12-14T17:32:50.642594 | 2020-09-07T05:16:30 | 2020-09-07T05:16:30 | 277,676,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,601 | py | import random
import numpy as np
import time, datetime
from collections import deque
import pylab
import sys
import pickle
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.layers import *
from keras.models import Sequential,Model
import keras
from keras import backend as K_back
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import Adam
state_size = 64
action_size = 5
n_rows = 9
n_cols = 9
model_path = "save_model/"
graph_path = "save_graph/"
# Make folder for save data
if not os.path.exists(model_path):
os.makedirs(model_path)
if not os.path.exists(graph_path):
os.makedirs(graph_path)
load_model = True
class Frozen_Lake:
def __init__(self):
# player velocity, max velocity, downward accleration, accleration on flap
self.agent_row = 0
self.agent_col = 0
self.rand_init = np.random.randint(low=0, high=3)
def reset_env(self):
self.agent_row = 0
self.agent_col = 0
self.rand_init = np.random.randint(low=0, high=3)
state = np.zeros((n_rows,n_cols))
# rand_init = 0
state[2][(self.rand_init+1)%9] = 2
state[4][(self.rand_init+4)%9] = 2
state[6][(self.rand_init+7)%9] = 2
state[8][8] = 4
state[0][0] = 5
return state
def frame_step(self, action, ep_step):
if action == 0:
if (self.agent_row + 1) < 9:
self.agent_row += 1
if action == 1:
if self.agent_row > 0:
self.agent_row -= 1
if action == 2:
if self.agent_col > 0:
self.agent_col -= 1
if action == 3:
if (self.agent_col+1) < 9:
self.agent_col += 1
agent_pos = np.zeros((9,9))
agent_pos[self.agent_row][self.agent_col] = 5
ice_lake = np.zeros((9,9))
hole_1_col = int((self.rand_init+ep_step+1)%9)
hole_2_col = int((self.rand_init+ep_step+4)%9)
hole_3_col = int((self.rand_init+ep_step+7)%9)
ice_lake[2][hole_1_col] = 2
ice_lake[4][hole_2_col] = 2
ice_lake[6][hole_3_col] = 2
ice_lake[8][8] = 4
next_state = agent_pos + ice_lake
# print(next_state)
# reward = agent_row - 8 + agent_col - 8
reward = -1
done = False
if np.count_nonzero(next_state == 7) > 0:
if ep_step < 15:
reward = reward - 200
else:
reward = reward - 100
# done = True
if np.count_nonzero(next_state == 9) > 0:
done = True
reward = 500
if ep_step == 500:
done = True
return next_state, reward, done
# it uses Neural Network to approximate q function
# and replay memory & target q network
class DQN_agent:
def __init__(self):
# get size of state and action
self.progress = " "
self.state_size = state_size
self.action_size = action_size
# train time define
self.training_time = 20*60
# These are hyper parameters for the DQN
self.learning_rate = 0.001
self.discount_factor = 0.99
self.epsilon_max = 0.2
# final value of epsilon
self.epsilon_min = 0.0001
self.epsilon_decay = 0.0005
self.epsilon = self.epsilon_max
self.step = 0
self.score = 0
self.episode = 0
self.hidden1, self.hidden2 = 251, 251
self.ep_trial_step = 500
# Parameter for Experience Replay
self.size_replay_memory = 10000
self.batch_size = 64
self.input_shape = (n_rows,n_cols,1)
# Experience Replay
self.memory = deque(maxlen=self.size_replay_memory)
# Parameter for Target Network
self.target_update_cycle = 100
# create main model and target model
self.model = self.build_model()
self.target_model = self.build_model()
# approximate Q function using Neural Network
# state is input and Q Value of each action is output of network
def build_model(self):
state = Input(shape=self.input_shape)
net1 = Convolution2D(32, kernel_size=(3, 3),activation='relu', \
padding = 'valid', input_shape=self.input_shape)(state)
net2 = Convolution2D(64, kernel_size=(3, 3), activation='relu', padding = 'valid')(net1)
net3 = MaxPooling2D(pool_size=(2, 2))(net2)
net4 = Flatten()(net3)
lay_2 = Dense(units=self.hidden2,activation='relu',kernel_initializer='he_uniform',\
name='hidden_layer_1')(net4)
value_= Dense(units=1,activation='linear',kernel_initializer='he_uniform',\
name='Value_func')(lay_2)
ac_activation = Dense(units=self.action_size,activation='linear',\
kernel_initializer='he_uniform',name='action')(lay_2)
#Compute average of advantage function
avg_ac_activation = Lambda(lambda x: K_back.mean(x,axis=1,keepdims=True))(ac_activation)
#Concatenate value function to add it to the advantage function
concat_value = Concatenate(axis=-1,name='concat_0')([value_,value_])
concat_avg_ac = Concatenate(axis=-1,name='concat_ac_{}'.format(0))([avg_ac_activation,avg_ac_activation])
for i in range(1,self.action_size-1):
concat_value = Concatenate(axis=-1,name='concat_{}'.format(i))([concat_value,value_])
concat_avg_ac = Concatenate(axis=-1,name='concat_ac_{}'.format(i))([concat_avg_ac,avg_ac_activation])
#Subtract concatenated average advantage tensor with original advantage function
ac_activation = Subtract()([ac_activation,concat_avg_ac])
#Add the two (Value Function and modified advantage function)
merged_layers = Add(name='final_layer')([concat_value,ac_activation])
model = Model(inputs = state,outputs=merged_layers)
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return model
# pick samples randomly from replay memory (with batch_size)
def train_model(self):
minibatch = random.sample(self.memory, self.batch_size)
states = np.zeros((self.batch_size, n_rows, n_cols, 1))
next_states = np.zeros((self.batch_size, n_rows, n_cols, 1))
actions, rewards, dones = [], [], []
for i in range(self.batch_size):
states[i] = minibatch[i][0]
actions.append( minibatch[i][1])
rewards.append( minibatch[i][2])
next_states[i] = minibatch[i][3]
dones.append( minibatch[i][4])
q_value = self.model.predict(states)
tgt_q_value_next = self.target_model.predict(next_states)
for i in range(self.batch_size):
# Q Learning: get maximum Q value at s' from target model
if dones[i]:
q_value[i][actions[i]] = rewards[i]
else:
q_value[i][actions[i]] = rewards[i] + self.discount_factor * (np.amax(tgt_q_value_next[i]))
# Decrease epsilon while training
if self.epsilon > self.epsilon_min:
self.epsilon -= self.epsilon_decay
else :
self.epsilon = self.epsilon_min
# make minibatch which includes target q value and predicted q value
# and do the model fit!
self.model.fit(states, q_value, batch_size=self.batch_size, epochs=1, verbose=0)
# get action from model using epsilon-greedy policy
def get_action(self, state):
# choose an action_arr epsilon greedily
action_arr = np.zeros(self.action_size)
action = 0
if random.random() < self.epsilon:
# print("----------Random action_arr----------")
action = random.randrange(self.action_size)
action_arr[action] = 1
else:
# Predict the reward value based on the given state
Q_value = self.model.predict(state)
action = np.argmax(Q_value[0])
action_arr[action] = 1
return action_arr, action
# save sample <s,a,r,s'> to the replay memory
def append_sample(self, state, action, reward, next_state, done):
#in every action put in the memory
self.memory.append((state, action, reward, next_state, done))
# after some time interval update the target model to be same with model
def Copy_Weights(self):
self.target_model.set_weights(self.model.get_weights())
def save_model(self):
# Save the variables to disk.
self.model.save_weights(model_path+"/model.h5")
save_object = (self.epsilon, self.episode, self.step)
with open(model_path + '/epsilon_episode.pickle', 'wb') as ggg:
pickle.dump(save_object, ggg)
print("\n Model saved in file: %s" % model_path)
def main():
agent = DQN_agent()
game = Frozen_Lake()
# Initialize variables
# Load the file if the saved file exists
if os.path.isfile(model_path+"/Model_dueling_0.h5"):
agent.model.load_weights(model_path+"/Model_dueling_0.h5")
if os.path.isfile(model_path + '/epsilon_episode.pickle'):
with open(model_path + '/epsilon_episode.pickle', 'rb') as ggg:
agent.epsilon, agent.episode, agent.step = pickle.load(ggg)
print('\n\n Variables are restored!')
else:
print('\n\n Variables are initialized!')
agent.epsilon = agent.epsilon_max
avg_score = 0
episodes, scores = [], []
# start training
# Step 3.2: run the game
display_time = datetime.datetime.now()
# print("\n\n",game_name, "-game start at :",display_time,"\n")
start_time = time.time()
# initialize target model
agent.Copy_Weights()
while time.time() - start_time < agent.training_time and avg_score < 470:
# while agent.episode < 1:
state = game.reset_env()
done = False
agent.score = 0
ep_step = 0
rewards = 0
# if agent.progress == "Training":
# print(state)
state = state.reshape(1,n_rows,n_cols,1)
while not done and ep_step < agent.ep_trial_step:
if len(agent.memory) < agent.size_replay_memory:
agent.progress = "Exploration"
else:
agent.progress = "Training"
ep_step += 1
agent.step += 1
action_arr, action = agent.get_action(state)
next_state, reward, done = game.frame_step(action, ep_step)
rewards += reward
next_state = next_state.reshape(1,n_rows,n_cols,1)
agent.append_sample(state, action, reward, next_state, done)
# print("next_state_shape :\n", next_state.shape)
state = next_state
# sys.exit()
if agent.progress == "Training":
agent.train_model()
if done or ep_step % agent.target_update_cycle == 0:
# return# copy q_net --> target_net
agent.Copy_Weights()
agent.score = rewards
if done:
if agent.progress == "Training":
agent.episode += 1
scores.append(agent.score)
episodes.append(agent.episode)
avg_score = np.mean(scores[-min(30, len(scores)):])
print('episode :{:>6,d}'.format(agent.episode),'/ ep step :{:>5,d}'.format(ep_step), \
'/ rewards :{:>4.1f}'.format(rewards),'/ status :', agent.progress, \
'/ epsilon :{:>1.4f}'.format(agent.epsilon),'/ last 30 avg :{:> 4.1f}'.format(avg_score) )
break
# Save model
agent.save_model()
pylab.plot(episodes, scores, 'b')
pylab.savefig("./save_graph/cartpole_duelingdqn.png")
e = int(time.time() - start_time)
print(' Elasped time :{:02d}:{:02d}:{:02d}'.format(e // 3600, (e % 3600 // 60), e % 60))
sys.exit()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
63d97a4042ea1c94875bb42957b33061db5ac700 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnreggio.py | 32b325c4e4eef33665e12e96b01b39fc616f374c | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 216 | py | ii = [('ClarGE2.py', 1), ('RoscTTI3.py', 1), ('RoscTTI2.py', 2), ('MedwTAI.py', 1), ('HogaGMM.py', 3), ('MartHRW.py', 1), ('WestJIT.py', 1), ('RoscTTI.py', 1), ('BrewDTO.py', 2), ('ClarGE3.py', 2), ('RogeSIP.py', 1)] | [
"[email protected]"
] | |
ad2f6648b4306ac0436f87746bef147dfb923663 | 6c616cc95ad9c5989d237b75bb190d336f943a79 | /cryptography.py | 84d3cc1145e82b12c1d8383c5ae96eb926bd738c | [] | no_license | Hoshizx/CryptographyPython1 | 7ec5400d49e188dec1056a4ba59add53779927a1 | 3eb8ce31a82e2d1752f6c437b2470ba613c96fa9 | refs/heads/main | 2023-01-31T01:49:20.384767 | 2020-12-12T03:27:03 | 2020-12-12T03:27:03 | 320,738,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py |
while True:
msg = input("メッセージ: ")
result_cryptography = " "
i = len(msg) - 1
while i>=0:
result_cryptography = result_cryptography + msg[i]
i = i-1
print("コード結果: "+ result_cryptography)
| [
"[email protected]"
] | |
4a2b53bd3b55146afd68cccc08de400d3c6b2a95 | d957aac7c4c73f0547c322bf7eae98b8ca57cf0e | /BaekJoon/BaekJoon_1152.py | ad635b521490363e520377ed8d2d60c68d928ba3 | [] | no_license | iamzero-j/PythonAlgorithm | babe7499cf5b8b80ce74b0b11075739a4d5ae00b | 3591d0645768c6af5ace3af36f71167b0053c713 | refs/heads/master | 2023-03-02T23:04:25.627784 | 2021-02-16T06:47:41 | 2021-02-16T06:47:41 | 276,972,465 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | # 출처 : 백준 -> 문자열 : 1152번 https://www.acmicpc.net/problem/1152
#단어의 개수
#공백이 연속적으로 올 수 없음
#ins=input().split(" ") 사용시 아무것도 안 치고 출력 하였을 때 0 이 아닌 1이 나옴
ins=input().split()
print(len(ins))
| [
"[email protected]"
] | |
fe616439df2cf983c744ea323919525c2e94cbb2 | 814fd0bea5bc063a4e34ebdd0a5597c9ff67532b | /chrome/common/extensions/docs/server2/refresh_tracker_test.py | f1f596f1afefe93317d8fa365571a158aa4abe97 | [
"BSD-3-Clause"
] | permissive | rzr/chromium-crosswalk | 1b22208ff556d69c009ad292bc17dca3fe15c493 | d391344809adf7b4f39764ac0e15c378169b805f | refs/heads/master | 2021-01-21T09:11:07.316526 | 2015-02-16T11:52:21 | 2015-02-16T11:52:21 | 38,887,985 | 0 | 0 | NOASSERTION | 2019-08-07T21:59:20 | 2015-07-10T15:35:50 | C++ | UTF-8 | Python | false | false | 1,941 | py | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from object_store_creator import ObjectStoreCreator
from refresh_tracker import RefreshTracker
class RefreshTrackerTest(unittest.TestCase):
def setUp(self):
self._refresh_tracker = RefreshTracker(ObjectStoreCreator.ForTest())
def testNonExistentRefreshIsIncomplete(self):
self.assertFalse(self._refresh_tracker.GetRefreshComplete('unicorns').Get())
def testEmptyRefreshIsComplete(self):
refresh_id = 'abcdefghijklmnopqrstuvwxyz'
self._refresh_tracker.StartRefresh(refresh_id, []).Get()
self.assertTrue(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
def testRefreshCompletion(self):
refresh_id = 'this is fun'
self._refresh_tracker.StartRefresh(refresh_id, ['/do/foo', '/do/bar']).Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, '/do/foo').Get()
self.assertFalse(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
self._refresh_tracker.MarkTaskComplete(refresh_id, '/do/bar').Get()
self.assertTrue(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
def testUnknownTasksAreIrrelevant(self):
refresh_id = 'i am a banana'
self._refresh_tracker.StartRefresh(refresh_id, ['a', 'b', 'c', 'd']).Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, 'a').Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, 'b').Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, 'c').Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, 'q').Get()
self.assertFalse(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
self._refresh_tracker.MarkTaskComplete(refresh_id, 'd').Get()
self.assertTrue(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
71746fea62c81bca85cdda17b939d1bf146287de | a0501ee2c6ea376beb4e1e5e9e656f7bebb7d8c5 | /problema16.py | db816e9cdfb1024fae2653f366033ad8a7dcbafe | [] | no_license | jose-brenis-lanegra/T09_Brenis.Niquen | 0e988c1ae8c89fe7f9cf92010297193e376fc233 | 21f292120244b33496d71dcefeb40c6c8a5b4490 | refs/heads/master | 2020-11-25T15:51:01.886824 | 2019-12-18T03:37:34 | 2019-12-18T03:37:34 | 228,745,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | import libreria
import os
#hallar la volumen de un tetraedro
a=int(os.sys.argv[1])
volumen=libreria.volumen_teraedro(a)
print("el volumen del tetraedro es:", volumen)
| [
"[email protected]"
] | |
ed317eaf3d927e878cd3e726402cd5e2619d3ece | 654926e03dd260c45e0d59e408283a4305a1bf0e | /Train.py | 7f81c7a908b56dc25fa613c279ab6acd506b55a6 | [] | no_license | vedika19/Recommendation-System | e90bae3c6f0ddb1b6c871035c36e4e875d66a6cf | ddf00fa5bc1104ccb800840cb607248131256c79 | refs/heads/master | 2020-12-24T12:59:26.567930 | 2016-09-21T13:48:37 | 2016-09-21T13:48:37 | 68,821,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,397 | py | import psycopg2
import sys
import pprint
import math
'''RecommendationSystem(uid,mid,rating)
m_id=str(mid)
query = ("SELECT movie1,movie2,similarity FROM u1similarity WHERE movie1= %s OR movie2= %s ;")
data = (m_id,m_id)
conn_string = "host='localhost' dbname='postgres' user='postgres' password=''"
conn2 = psycopg2.connect(conn_string)
cursor2= conn2.cursor()
cursor2.execute(query,data)
records2=cursor2.fetchall()
print records2'''
'''
conn_string = "host='localhost' dbname='postgres' user='postgres' password=''"
conn = psycopg2.connect(conn_string)
cursor= conn.cursor()
query = "SELECT user_id,movie_id,rating FROM u1test where p_rating is NULL;"
cursor.execute(query)
records = cursor.fetchall()
for i in records:
print i
common=[]
uid=str(i[0])
print uid
print i
common={}
query = ("SELECT user_id,movie_id,rating FROM u1base WHERE user_id = %s ;")
data = [uid]
conn1 = psycopg2.connect(conn_string)
cursor1= conn1.cursor()
cursor1.execute(query,data)
records1=cursor1.fetchall()
#print records1
if len(records1)<4:
print 'Cold Start'
#Cold Start()
else :
print 'Recommendation System'
mid=str(i[1])
query = ("SELECT movie1, movie2, similarity FROM u1similarity where (movie1=%s OR movie2=%s) ORDER BY similarity desc LIMIT 500 ;")
data = (mid,mid)
conn_string = "host='localhost' dbname='postgres' user='postgres' password=''"
conn2 = psycopg2.connect(conn_string)
cursor2= conn2.cursor()
cursor2.execute(query,data)
records2=cursor2.fetchall()
print records2
for re in records2:
#print re[1],i[1]
if re[0]==i[1]:
for rec1 in records1:
#print rec1[1],re[1],rec1[1]==re[1]
if rec1[1]==re[1]:
common[re[1]]=rec1[2],re[2]
else:
for rec1 in records1:
if re[0] ==rec1[1]:
common[re[0]]=rec1[2],re[2]
for k,v in common.iteritems():
print k,v
cursor1.close()
cursor2.close()
predicted=0
num=0
den=0
similarity_p=0
for k,v in common.iteritems():
num=num+v[0]*v[1]
den=den+v[1]
if den == 0:
similarity_p=0
else:
similarity_p=num/den
print similarity_p
sp=str(similarity_p)
i0=str(i[0])
i1=str(i[1])
print sp,i0,i1
query = ("UPDATE u1test SET (p_rating) = (%s) where (user_id) = (%s) AND (movie_id)= (%s) ;")
data = (sp,i0,i1)
conn_string = "host='localhost' dbname='postgres' user='postgres' password=''"
conn = psycopg2.connect(conn_string)
cursor1= conn.cursor()
cursor1.execute(query,data)
conn.commit()
# Calculating RMSE
rmse=0
conn_string = "host='localhost' dbname='postgres' user='postgres' password=''"
conn = psycopg2.connect(conn_string)
cursor= conn.cursor()
query = "SELECT rmse FROM u1test "
cursor.execute(query)
records = cursor.fetchall()
for i in records:
rmse=rmse+i[0]
rmse=rmse/len(records)
rmse=math.sqrt(rmse)
print rmse'''
print"THE TOP 50 RECOMMENDED MOVIES"
conn_string = "host='localhost' dbname='postgres' user='postgres' password=''"
conn = psycopg2.connect(conn_string)
cursor= conn.cursor()
query = "SELECT * FROM recommendation order by p_rating desc LIMIT 50"
cursor.execute(query)
records = cursor.fetchall()
for i in records:
cursor2= conn.cursor()
md=str(i[1])
query2 = "SELECT movie_title FROM movie where movie_id = %s ;"
data2=[md]
cursor2.execute(query2,data2)
records1 = cursor2.fetchall()
for j in records1:
print md ,j[0]
| [
"[email protected]"
] | |
b563672c1f0906584832778d726b6ba3cac18c7f | 060ce17de7b5cdbd5f7064d1fceb4ded17a23649 | /fn_microsoft_defender/fn_microsoft_defender/util/customize.py | bb2e546adca2b9b9f81794d806d0518c8a1f2dd2 | [
"MIT"
] | permissive | ibmresilient/resilient-community-apps | 74bbd770062a22801cef585d4415c29cbb4d34e2 | 6878c78b94eeca407998a41ce8db2cc00f2b6758 | refs/heads/main | 2023-06-26T20:47:15.059297 | 2023-06-23T16:33:58 | 2023-06-23T16:33:58 | 101,410,006 | 81 | 107 | MIT | 2023-03-29T20:40:31 | 2017-08-25T14:07:33 | Python | UTF-8 | Python | false | false | 6,691 | py | # -*- coding: utf-8 -*-
"""Generate the Resilient customizations required for fn_microsoft_defender"""
import base64
import os
import io
try:
from resilient import ImportDefinition
except ImportError:
# Support Apps running on resilient-circuits < v35.0.195
from resilient_circuits.util import ImportDefinition
RES_FILE = "data/export.res"
def codegen_reload_data():
"""
Parameters required reload codegen for the fn_microsoft_defender package
"""
return {
"package": u"fn_microsoft_defender",
"message_destinations": [u"fn_microsoft_defender"],
"functions": [u"defender_alert_search", u"defender_app_execution", u"defender_collect_machine_investigation_package", u"defender_delete_indicator", u"defender_find_machines", u"defender_find_machines_by_file", u"defender_find_machines_by_filter", u"defender_get_file_information", u"defender_get_incident", u"defender_get_related_alert_information", u"defender_list_indicators", u"defender_machine_isolation", u"defender_machine_scan", u"defender_machine_vulnerabilities", u"defender_quarantine_file", u"defender_set_indicator", u"defender_update_alert", u"defender_update_incident"],
"workflows": [u"defender_atp_app_execution", u"defender_atp_collect_machine_investigation_package", u"defender_atp_delete_indicator", u"defender_atp_find_machines", u"defender_atp_find_machines_by_file_hash", u"defender_atp_get_file_information", u"defender_atp_machine_isolation", u"defender_atp_machine_scan", u"defender_atp_machine_vulnerabilities", u"defender_atp_set_indicator", u"defender_atp_update_alert", u"defender_atp_update_indicator", u"defender_close_incident", u"defender_find_machines_by_filter", u"defender_get_incident", u"defender_get_updated_machine_information", u"defender_list_indicators", u"defender_quarantine_file", u"defender_refresh_incident", u"defender_sync_comment", u"defender_sync_incident"],
"actions": [u"Create Artifact from Indicator", u"Defender Close Incident", u"Defender Find Machine by DNS name", u"Defender Find Machines by File Hash", u"Defender Find Machines by Internal IP Address", u"Defender Get File Information", u"Defender Get Incident", u"Defender List Indicators", u"Defender Machine App Execution Restriction", u"Defender Machine Collect Investigation Package", u"Defender Machine Isolate Action", u"Defender Machine Quarantine File", u"Defender Machine Refresh Information", u"Defender Machine Scan", u"Defender Machine Update Information", u"Defender Machine Vulnerabilities", u"Defender Refresh Incident", u"Defender Set Indicator", u"Defender Sync Comment", u"Defender Sync Incident", u"Defender Update Alert", u"Delete Indicator", u"Update Indicator"],
"incident_fields": [u"defender_classification", u"defender_determination", u"defender_incident_createtime", u"defender_incident_id", u"defender_incident_lastupdatetime", u"defender_incident_url", u"defender_tags"],
"incident_artifact_types": [],
"incident_types": [],
"datatables": [u"defender_alerts", u"defender_indicators", u"defender_machines"],
"automatic_tasks": [],
"scripts": [u"Create Artifact from Indicator"],
}
def customization_data(client=None):
"""
Returns a Generator of ImportDefinitions (Customizations).
Install them using `resilient-circuits customize`
IBM Resilient Platform Version: 39.0.6328
Contents:
- Message Destinations:
- fn_microsoft_defender
- Functions:
- defender_alert_search
- defender_app_execution
- defender_collect_machine_investigation_package
- defender_delete_indicator
- defender_find_machines
- defender_find_machines_by_file
- defender_find_machines_by_filter
- defender_get_file_information
- defender_get_incident
- defender_get_related_alert_information
- defender_list_indicators
- defender_machine_isolation
- defender_machine_scan
- defender_machine_vulnerabilities
- defender_quarantine_file
- defender_set_indicator
- defender_update_alert
- defender_update_incident
- Workflows:
- defender_atp_app_execution
- defender_atp_collect_machine_investigation_package
- defender_atp_delete_indicator
- defender_atp_find_machines
- defender_atp_find_machines_by_file_hash
- defender_atp_get_file_information
- defender_atp_machine_isolation
- defender_atp_machine_scan
- defender_atp_machine_vulnerabilities
- defender_atp_set_indicator
- defender_atp_update_alert
- defender_atp_update_indicator
- defender_close_incident
- defender_find_machines_by_filter
- defender_get_incident
- defender_get_updated_machine_information
- defender_list_indicators
- defender_quarantine_file
- defender_refresh_incident
- defender_sync_comment
- defender_sync_incident
- Rules:
- Create Artifact from Indicator
- Defender Close Incident
- Defender Find Machine by DNS name
- Defender Find Machines by File Hash
- Defender Find Machines by Internal IP Address
- Defender Get File Information
- Defender Get Incident
- Defender List Indicators
- Defender Machine App Execution Restriction
- Defender Machine Collect Investigation Package
- Defender Machine Isolate Action
- Defender Machine Quarantine File
- Defender Machine Refresh Information
- Defender Machine Scan
- Defender Machine Update Information
- Defender Machine Vulnerabilities
- Defender Refresh Incident
- Defender Set Indicator
- Defender Sync Comment
- Defender Sync Incident
- Defender Update Alert
- Delete Indicator
- Update Indicator
- Incident Fields:
- defender_classification
- defender_determination
- defender_incident_createtime
- defender_incident_id
- defender_incident_lastupdatetime
- defender_incident_url
- defender_tags
- Data Tables:
- defender_alerts
- defender_indicators
- defender_machines
- Scripts:
- Create Artifact from Indicator
"""
res_file = os.path.join(os.path.dirname(__file__), RES_FILE)
if not os.path.isfile(res_file):
raise FileNotFoundError("{} not found".format(RES_FILE))
with io.open(res_file, mode='rt') as f:
b64_data = base64.b64encode(f.read().encode('utf-8'))
yield ImportDefinition(b64_data) | [
"[email protected]"
] | |
e96fb62e4c0431bce5afe872d8630f24a0f4bb80 | ac9ed2852d6e8217229cbeda0a6dd5f98953415a | /CoursPytorch/CIFAR10/models/resnet/resnet_model.py | 1351716654670393a01e0a7bba684b9d20d0b8d6 | [] | no_license | penda-diagne/projetMaster | 4afaf042aa140875c0f42a7d8cb4a27b2a3e23f1 | 44a74c53c7ae15ab556d46620be2cee8ea5e6cbc | refs/heads/master | 2021-08-19T05:22:06.364165 | 2021-07-05T21:45:25 | 2021-07-05T21:45:25 | 242,139,656 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,428 | py | class ResNet(nn.Module):
def __init__(self, block, layers):
super(ResNet, self).__init__()
self.in_channels = 16
self.conv = conv3_3(3, 16)
self.bn = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self.make_layer(block, 16, layers[0])
self.layer2 = self.make_layer(block, 32, layers[1], 2)
self.layer3 = self.make_layer(block, 64, layers[2], 2)
self.avg_pool = nn.AvgPool2d(8)
self.fc = nn.Linear(64, 10)
def make_layer(self, block, out_channels, blocks, stride=1):
downsample = None
if (stride != 1) or (self.in_channels != out_channels):
downsample = nn.Sequential(
conv3_3(self.in_channels, out_channels, stride=stride),
nn.BatchNorm2d(out_channels))
layers = []
layers.append(block(self.in_channels, out_channels, stride, downsample))
self.in_channels = out_channels
for i in range(1, blocks):
layers.append(block(out_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv(x.float())
out = self.bn(out)
out = self.relu(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
| [
"[email protected]"
] | |
a36bee353387612fcbb653c63cb1367cb93a70da | ba5dbb751a44974f1ec1914b580fc79734cd3204 | /prog_fadd.py | 96202c1804756e39ececd8f4686d1e9305271fe3 | [] | no_license | HARINI14/HARINI.R | 6be01708e2b09e3f5f47fe0c87a6c487063c03f0 | 1c5d9f89181211605859360c2bb3d505aee06990 | refs/heads/master | 2021-05-11T12:21:49.788490 | 2018-03-13T09:52:34 | 2018-03-13T09:52:34 | 117,656,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | def add (x,y):
return x+y
def sub (x,y):
return x-y
print (add(2,7))
print(sub(7,2))
print (add(100,200))
| [
"[email protected]"
] | |
c0705ef0d9e607dd455bfaf0f2917fd5a8ebcf1d | 88c39688db3835ed45c8efe43d4de2b8ade66b62 | /models/utils.py | c48b64d32d355502cd546c35e8fb19784b4948fd | [] | no_license | huyen-spec/NEGCUT | f87a77e65b24fe80a1d66e54f6561df528ff6785 | b6f3c65e9d4be747567c9baba415cadc320071a2 | refs/heads/main | 2023-09-03T19:57:47.056295 | 2021-10-21T13:42:22 | 2021-10-21T13:42:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,245 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import numpy as np
###############################################################################
# Helper Functions
###############################################################################
def get_filter(filt_size=3):
if(filt_size == 1):
a = np.array([1., ])
elif(filt_size == 2):
a = np.array([1., 1.])
elif(filt_size == 3):
a = np.array([1., 2., 1.])
elif(filt_size == 4):
a = np.array([1., 3., 3., 1.])
elif(filt_size == 5):
a = np.array([1., 4., 6., 4., 1.])
elif(filt_size == 6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(filt_size == 7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:, None] * a[None, :])
filt = filt / torch.sum(filt)
return filt
class Downsample(nn.Module):
def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size == 1):
if(self.pad_off == 0):
return inp[:, :, ::self.stride, ::self.stride]
else:
return self.pad(inp)[:, :, ::self.stride, ::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
class Upsample(nn.Module):
def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):
super(Upsample, self).__init__()
self.filt_size = filt_size
self.filt_odd = np.mod(filt_size, 2) == 1
self.pad_size = int((filt_size - 1) / 2)
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size) * (stride**2)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])
def forward(self, inp):
ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]
if(self.filt_odd):
return ret_val
else:
return ret_val[:, :, :-1, :-1]
def get_pad_layer(pad_type):
if(pad_type in ['refl', 'reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl', 'replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type == 'zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized' % pad_type)
return PadLayer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if debug:
print(classname)
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
# if not amp:
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training
if initialize_weights:
init_weights(net, init_type, init_gain=init_gain, debug=debug)
return net
##################################################################################
# Normalization layers
##################################################################################
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x):
return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
class Identity(nn.Module):
def forward(self, x):
return x
##################################################################################
# Basic Blocks
##################################################################################
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out | [
"[email protected]"
] | |
a4204b8de8fa12aaab8d15d25093be83ff68d98f | 05010198ebb8b61fe7a96e5e074125d65850e527 | /geometricka-kalkulacka.py | 5ca05cef99a9e3aa5eae3adc6035439ee1d4b801 | [] | no_license | Jakub-program/python-beginningforme | 9be83729e6f9d0e7a760f8802866d9c0aa365d8c | 69bfb4dd40bc2213f74eebe497dce8ede1002f3c | refs/heads/main | 2023-01-28T20:58:40.139546 | 2020-12-13T18:13:40 | 2020-12-13T18:13:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | strana = float(input('Zadej stranu čtverce v centimetrech: '))
cislo_je_spravne = strana > 0
if cislo_je_spravne:
print('Obvod čtverce se stranou', strana, 'je', 4 * strana, 'cm')
print('Obsah čtverce se stranou', strana, 'je', strana * strana, 'cm2')
else:
print('Strana musí být kladná, jinak z toho nebude čtverec!')
print('Děkujeme za použití geometrické kalkulačky.')
| [
"[email protected]"
] | |
001515a2694bbc65e444cc746ce8266e4cb6b53a | 5850ae6560f23640645f23c5276b037daf45aa64 | /generate_reports.py | 96437e8cf4943f58d777c836bd7683a016b3b992 | [] | no_license | hayashikan/irs-program | a38237f513941da1f58ac5954c57425c47f2a94f | 8c9f3f8417f774e7601475ce75e7ecdb9d6763d6 | refs/heads/master | 2021-01-11T12:18:34.706313 | 2017-03-09T14:48:44 | 2017-03-09T14:48:44 | 76,469,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | # -*- coding: utf-8 -*-
"""
Project: MAM Integrated Reporting System
Author: LIN, Han (Jo)
"""
# import modules
import os
import sys
import inspect
# import modules in subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(
os.path.split(inspect.getfile(inspect.currentframe()))[0], "resources")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
# import IRS in subfolder "resources"
from IRS import Integrated_Reporting_System
# DO NOT MODIFY CODE BEFORE HERE -----------------------------------------
# run report by following code -------------------------------------------
# 'default.mamspec' file is in the same folder as this program
# you can change the file name as the .mamspec file in this folder
IRS = Integrated_Reporting_System('default.mamspec')
IRS.generate_report() # this command is to generate report
| [
"[email protected]"
] | |
dd0eb441e105f56c21813d7d9263c17466d46938 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/217/usersdata/274/113684/submittedfiles/av2_p3_m2.py | 56a351331cc54ba12f7e3c1497129b302fa40d64 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | # -*- coding: utf-8 -*
n=int(input("Dimensão do Quadrado: "))
while notn>=3:
n=int(input("Dimensão do Quadrado: "))
M=[]
for i in range(0,n,1):
L=[]
for j in range(o,n,1):
L.append(int(input("Elemento da Linha: "))
M.append(L)
somaL=[]
for i in range(0,n,1):
somaL.append(sum(M[i]))
somaC=[]
for j in range(0,n,1):
C=0
for i in range (0,n,1):
C=C+M[i][j]
somaC.append(C)
b=[somaL[0]]
cont=0
k=0
VE=0
VC=0
for i in range(0,n,1):
if somaL[i]in b:
continue
else:
ct+ct=1
k=1
if ct==1:
VE=somaL[k]
VC+somaL[0]
if ct!=1:
VE=somaL[0]
VC+somaL[1]
k=0
b2=[somaC[0]]
cont2=0
k2=0
VE2=0
for i in range(0,n,1):
if somaC[i]in b2:
continue
else:
ct2=ct2+1
k2=i
if cont2==1:
VE2=somaC[k2]
if ct!=1:
VE2=somaC[0]
k2=0
O=VC-(VE-M[k][k2])
P=M[k][k2]
print(O)
print(P)
| [
"[email protected]"
] | |
f44f6d9972814a4e7a1f84001a60cf2ac08ac418 | 5c26eafece0ee85a7ed4b6a34ee52753d7c86e49 | /polyaxon/estimators/hooks/step_hooks.py | 0e177575b29f1a02195d3439137b45db2c0d2a1a | [
"MIT"
] | permissive | StetHD/polyaxon | 345257076d484b2267ba20d9d346f1367cdd92d3 | dabddb9b6ea922a0549e3c6fd7711231f7462fa3 | refs/heads/master | 2021-03-19T06:45:51.806485 | 2017-09-26T14:31:26 | 2017-09-26T14:36:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,184 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from tensorflow.python.training import basic_session_run_hooks
from polyaxon.estimators.hooks.utils import can_run_hook
class StepLoggingTensorHook(basic_session_run_hooks.LoggingTensorHook):
"""Prints the given tensors once every N local steps or once every N seconds.
A modified version of tensorflow.python.training.basic_session_run_hooks LoggingTensorHook.
Checks the context for `no_run_hooks_op` before calling the the hook.
The tensors will be printed to the log, with `INFO` severity.
Args:
tensors: `dict` that maps string-valued tags to tensors/tensor names,
or `iterable` of tensors/tensor names.
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
every_n_secs: `int` or `float`, print the values of `tensors` once every N
seconds. Exactly one of `every_n_iter` and `every_n_secs` should be
provided.
formatter: function, takes dict of `tag`->`Tensor` and returns a string.
If `None` uses default printing all tensors.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
def __init__(self, tensors, every_n_iter=None, every_n_secs=None, formatter=None):
super(StepLoggingTensorHook, self).__init__(tensors, every_n_iter, every_n_secs, formatter)
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = can_run_hook(run_context)
if self._should_trigger:
return super(StepLoggingTensorHook, self).before_run(run_context)
else:
return None
class StopAtStepHook(basic_session_run_hooks.StopAtStepHook):
"""Monitor to request stop at a specified step.
(A mirror to tensorflow.python.training.basic_session_run_hooks StopAtStepHook.)
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
def __init__(self, num_steps=None, last_step=None):
super(StopAtStepHook, self).__init__(num_steps, last_step)
class StepCheckpointSaverHook(basic_session_run_hooks.CheckpointSaverHook):
"""Saves checkpoints every N steps or seconds.
(A mirror to tensorflow.python.training.basic_session_run_hooks CheckpointSaverHook.)
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances.
Used for callbacks that run immediately after the corresponding
CheckpointSaverHook callbacks, only in steps where the
CheckpointSaverHook was triggered.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: Exactly one of saver or scaffold should be set.
"""
def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None,
checkpoint_basename="model.ckpt", scaffold=None, listeners=None):
super(StepCheckpointSaverHook, self).__init__(checkpoint_dir, save_secs, save_steps, saver,
checkpoint_basename, scaffold, listeners)
class StepCounterHook(basic_session_run_hooks.StepCounterHook):
"""Steps per second monitor.
(A mirror to tensorflow.python.training.basic_session_run_hooks CheckpointSaverHook.)
"""
def __init__(self, every_n_steps=100, every_n_secs=None, output_dir=None, summary_writer=None):
super(StepCounterHook, self).__init__(
every_n_steps, every_n_secs, output_dir, summary_writer)
class StepSummarySaverHook(basic_session_run_hooks.SummarySaverHook):
"""Saves summaries every N steps.
(A mirror to tensorflow.python.training.basic_session_run_hooks NanTensorHook.)
Args:
save_steps: `int`, save summaries every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save summaries every N seconds.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string` containing the serialized `Summary`
protocol buffer or a list of `Tensor`. They are most likely an output
by TF summary methods like `tf.summary.scalar` or
`tf.summary.merge_all`. It can be passed in as one tensor; if more
than one, they must be passed in as a list.
Raises:
ValueError: Exactly one of scaffold or summary_op should be set.
"""
def __init__(self, save_steps=None, save_secs=None, output_dir=None, summary_writer=None,
scaffold=None, summary_op=None):
super(StepSummarySaverHook, self).__init__(
save_steps, save_secs, output_dir, summary_writer, scaffold, summary_op)
STEP_HOOKS = OrderedDict([
('StepLoggingTensorHook', StepLoggingTensorHook),
('StopAtStepHook', StopAtStepHook),
('StepCheckpointSaverHook', StepCheckpointSaverHook),
('StepCounterHook', StepCounterHook),
('StepSummarySaverHook', StepSummarySaverHook),
])
| [
"[email protected]"
] | |
b284c2b20a27edfd46ff6f14ba59bcd5aff733d3 | be026334d457b1f78050f8262cd693922c6c8579 | /onnxruntime/python/tools/transformers/fusion_gpt_attention_megatron.py | 5418ccf513c770d3ec626ac6520e367c249eaa37 | [
"MIT"
] | permissive | ConnectionMaster/onnxruntime | 953c34c6599c9426043a8e5cd2dba05424084e3b | bac9c0eb50ed5f0361f00707dd6434061ef6fcfe | refs/heads/master | 2023-04-05T00:01:50.750871 | 2022-03-16T15:49:42 | 2022-03-16T15:49:42 | 183,019,796 | 1 | 0 | MIT | 2023-04-04T02:03:14 | 2019-04-23T13:21:11 | C++ | UTF-8 | Python | false | false | 10,803 | py | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
import numpy as np
from logging import getLogger
from onnx import helper, numpy_helper, TensorProto
from onnx_model import OnnxModel
from fusion_base import Fusion
from fusion_utils import FusionUtils
from fusion_gpt_attention import FusionGptAttentionPastBase
logger = getLogger(__name__)
def is_close(value, expected_value):
return abs(value - expected_value) <= 1e-6
class FusionGptAttentionMegatron(FusionGptAttentionPastBase):
"""
Fuse GPT-2 Attention with past state subgraph from Megatron into one Attention node.
"""
def __init__(self, model: OnnxModel, num_heads: int):
super().__init__(model, num_heads)
def fuse_attention_node(self, matmul_before_split, add_before_split, past, present, input, reshape_qkv, mask):
attention_node_name = self.model.create_node_name('GptAttention')
int32_mask = self.cast_attention_mask(mask)
output = reshape_qkv.output[0]
i = 1 if (add_before_split.input[0] == matmul_before_split.output[0]) else 0
attention_node = helper.make_node(
'Attention',
inputs=[input, matmul_before_split.input[1], add_before_split.input[i], int32_mask, past],
outputs=[output, present],
name=attention_node_name)
attention_node.domain = "com.microsoft"
attention_node.attribute.extend([
helper.make_attribute("num_heads", self.num_heads),
helper.make_attribute("unidirectional", 0) # unidirectional shall not be ON for 4D attention mask
])
nodes_to_add = [attention_node]
self.nodes_to_add.extend(nodes_to_add)
for node in nodes_to_add:
self.node_name_to_graph_name[node.name] = self.this_graph_name
self.nodes_to_remove.append(reshape_qkv)
# we rely on prune_graph() to clean old subgraph nodes
self.prune_graph = True
def match_mask(self, sub_qk, mul_qk, matmul_qk, layernorm_before_attention):
mask_nodes = self.model.match_parent_path(
sub_qk,
['Mul', 'Sub', 'Slice', 'Slice'],
[1, 0, 1, 0]) # yapf: disable
if mask_nodes is None:
logger.debug("fuse_attention: failed to match unidirectional mask path")
return None
(mul_mask, sub_mask, last_slice_mask, slice_mask) = mask_nodes
if mul_qk.input[1] != last_slice_mask.output[0]:
logger.debug("fuse_attention failed: mul_qk.input[1] != last_slice_mask.output[0]")
return None
if not self.utils.check_node_input_value(mul_mask, 1, 10000.0):
logger.debug("fuse_attention failed: mul_mask input 1 is not constant 10000.0")
return None
if not self.utils.check_node_input_value(sub_mask, 0, 1.0):
logger.debug("fuse_attention failed: sub_mask input 0 is not constant 1.0")
return None
if not self.model.find_graph_input(slice_mask.input[0]):
logger.info("expect slick_mask input 0 to be graph input")
return None
if not self.utils.check_node_input_value(last_slice_mask, 1, [0]):
logger.debug("fuse_attention failed: last_slice_mask input 1 (starts) is not constant [0]")
return None
if not self.utils.check_node_input_value(last_slice_mask, 3, [3]):
logger.debug("fuse_attention failed: last_slice_mask input 3 (axes) is not constant [3]")
return False
if not self.utils.check_node_input_value(last_slice_mask, 4, [1]):
logger.debug("fuse_attention failed: last_slice_mask input 4 (steps) is not constant [1]")
return False
if not self.utils.check_node_input_value(slice_mask, 3, [2]):
logger.debug("fuse_attention failed: slice_mask input 3 (axes) is not constant [2]")
return None
if not self.utils.check_node_input_value(slice_mask, 4, [1]):
logger.debug("fuse_attention failed: slice_mask input 4 (steps) is not constant [1]")
return None
last_slice_path = self.model.match_parent_path(last_slice_mask, ['Unsqueeze', 'Gather', 'Shape', 'MatMul'],
[2, 0, 0, 0])
if last_slice_path is None or last_slice_path[-1] != matmul_qk:
logger.debug("fuse_attention: failed to match last slice path")
return None
first_slice_path = self.model.match_parent_path(slice_mask, ['Unsqueeze', 'Gather', 'Shape', 'MatMul'],
[2, 0, 0, 0])
if first_slice_path is None or first_slice_path[-1] != matmul_qk:
logger.debug("fuse_attention: failed to match first slice path")
return None
first_slice_sub = self.model.match_parent_path(slice_mask, ['Unsqueeze', 'Sub', 'Gather', 'Shape', 'MatMul'],
[1, 0, 0, 0, 0])
if first_slice_sub is None or first_slice_sub[-1] != matmul_qk:
logger.debug("fuse_attention: failed to match last slice sub path")
return None
first_slice_sub_1 = self.model.match_parent_path(slice_mask,
['Unsqueeze', 'Sub', 'Gather', 'Shape', 'LayerNormalization'],
[1, 0, 1, 0, 0])
if first_slice_sub_1 is None or first_slice_sub_1[-1] != layernorm_before_attention:
logger.debug("fuse_attention: failed to match last slice sub path 1")
return None
return slice_mask.input[0]
def fuse(self, normalize_node, input_name_to_nodes, output_name_to_node):
past = None
present = None
qkv_nodes = self.model.match_parent_path(
normalize_node,
['Add', 'Add', 'MatMul', 'Reshape', 'Transpose', 'MatMul'],
[ 0, 1, None, 0, 0, 0],
output_name_to_node=output_name_to_node,
) # yapf: disable
if qkv_nodes is None:
return
(add_skip, add_after_attention, matmul_after_attention, reshape_qkv, transpose_qkv, matmul_qkv) = qkv_nodes
skip_input = add_skip.input[0]
v_nodes = self.model.match_parent_path(
matmul_qkv,
['Concat', 'Transpose', 'Reshape', 'Split', 'Add', 'MatMul', 'LayerNormalization'],
[1, 1, 0, 0, 0, None, 0]) # yapf: disable
if v_nodes is None:
logger.debug("fuse_attention: failed to match v path")
return
(concat_v, transpose_v, reshape_v, split_v, add_before_split, matmul_before_split,
layernorm_before_attention) = v_nodes
if skip_input != layernorm_before_attention.input[0]:
logger.debug("fuse_attention: skip_input != layernorm_before_attention.input[0]")
return
qk_nodes = self.model.match_parent_path(matmul_qkv, ['Softmax', 'Sub', 'Mul', 'MatMul'], [0, 0, 0, 0])
if qk_nodes is None:
logger.debug("fuse_attention: failed to match qk path")
return None
(softmax_qk, sub_qk, mul_qk, matmul_qk) = qk_nodes
if self.model.get_node_attribute(softmax_qk, "axis") != 3:
logger.debug("fuse_attention failed: softmax_qk axis != 3")
return None
attention_mask = self.match_mask(sub_qk, mul_qk, matmul_qk, layernorm_before_attention)
q_nodes = self.model.match_parent_path(matmul_qk, ['Div', 'Transpose', 'Reshape', 'Split'], [0, 0, 0, 0])
if q_nodes is None:
logger.debug("fuse_attention: failed to match q path")
return
(div_q, transpose_q, reshape_q, split_q) = q_nodes
if split_v != split_q:
logger.debug("fuse_attention: skip since split_v != split_q")
return
k_nodes = self.model.match_parent_path(matmul_qk,
['Div', 'Transpose', 'Concat', 'Transpose', 'Reshape', 'Split'],
[1, 0, 0, 1, 0, 0])
if k_nodes is None:
logger.debug("fuse_attention: failed to match k path")
return
(div_k, _, concat_k, transpose_k, reshape_k, split_k) = k_nodes
if split_v != split_k:
logger.debug("fuse_attention: skip since split_v != split_k")
return
i, value = self.model.get_constant_input(reshape_k)
if not (isinstance(value, np.ndarray) and list(value.shape) == [4] and value[0] == 0 and value[1] == 0
and value[2] > 0 and value[3] > 0):
logger.debug("fuse_attention: reshape constant input is not [0, 0, N, H]")
return
num_heads = value[2]
if num_heads != self.num_heads:
logger.info(f"Detected num_heads={num_heads}. Ignore user specified value {self.num_heads}")
self.num_heads = num_heads
hidden_size_per_head = value[3]
i, value = self.model.get_constant_input(div_k)
expected_value = float(np.sqrt(np.sqrt(hidden_size_per_head)))
if not is_close(value, expected_value):
logger.debug(f"fuse_attention: div_k value={value} expected={expected_value}")
return
i, value = self.model.get_constant_input(div_q)
if not is_close(value, expected_value):
logger.debug(f"fuse_attention: div_q value={value} expected={expected_value}")
return
# Match past and present paths
past = self.match_past_pattern_2(concat_k, concat_v, output_name_to_node)
if past is None:
logger.debug("fuse_attention: match past failed")
return
if not self.model.find_graph_input(past):
logger.debug("fuse_attention: past is not graph input.")
# For GPT2LMHeadModel_BeamSearchStep, there is an extra Gather node to select beam index so it is not graph input.
present = self.match_present(concat_v, input_name_to_nodes)
if present is None:
logger.debug("fuse_attention: match present failed")
return
if not self.model.find_graph_output(present):
logger.info("fuse_attention: expect present to be graph output")
return
self.fuse_attention_node(matmul_before_split, add_before_split, past, present,
layernorm_before_attention.output[0], reshape_qkv, attention_mask)
| [
"[email protected]"
] | |
6a4c16868431e1e23eb5da001f0272c6e45ae97e | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /7ECZC8CBEhy5QkvN3_15.py | b7cee2eac0f62400c8ad19d3b56c9c8b2daff2e8 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py |
def how_many_walls(n, w, h):
sum_ = 0
count = 0
wallSquare = w * h
while sum_ <= n:
sum_ += wallSquare
count += 1
return count - 1
| [
"[email protected]"
] | |
05289f7ef1d3bf5a16d16440b251f0bf7002e2b3 | ff1477deb2b0bf0580ea512c1843a4085e639932 | /main.py | 521a0f3e15a8d864423d29388bc782737b4fb0e9 | [
"MIT"
] | permissive | SkylerHoward/O | f7ff9955499483f4368e01cd5c2991970b160d29 | 989246a5cdc297ab9f76cb6b26daebd799a03741 | refs/heads/master | 2021-07-08T19:57:12.042530 | 2017-10-07T13:33:44 | 2017-10-07T13:33:44 | 106,098,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | import weather, morning, time, sleep, events
from datetime import datetime
from speak import *
speak = speech()
def main():
while True:
command = input('> ')
if command == 'sleep':
speak.speak('Good night.')
for line in sleep.main():
speak.speak(line)
if command == 'quit':
quit()
if command == 'events':
te = events.today()
speak.speak('You have {} events today'.format(len(te)))
for line in te:
speak.speak(line)
main() | [
"[email protected]"
] | |
3f2801ee0162f33263eb9044744a84e3a7a154e9 | 587973fdf376f448b90f44f713742ea02062666b | /config.py | 418ebf67d4cb96815ebbce4d111bc70640c10183 | [] | no_license | whistlepark/UCSD-ECE148-WI20-TEAM6 | 79383160cd4c8cc458903bf45b5dd2ca4dbed1bd | 9db60ed81146959c295963c47b780fcf3d20bc9f | refs/heads/master | 2021-01-15T03:27:32.500974 | 2020-03-08T23:58:24 | 2020-03-08T23:58:24 | 242,863,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,067 | py | """
CAR CONFIG
This file is read by your car application's manage.py script to change the car
performance.
EXMAPLE
-----------
import dk
cfg = dk.load_config(config_path='~/mycar/config.py')
print(cfg.CAMERA_RESOLUTION)
"""
import os
#PATHS
CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(CAR_PATH, 'data')
MODELS_PATH = os.path.join(CAR_PATH, 'models')
#VEHICLE
DRIVE_LOOP_HZ = 20 # the vehicle loop will pause if faster than this speed.
MAX_LOOPS = None # the vehicle loop can abort after this many iterations, when given a positive integer.
#CAMERA
CAMERA_TYPE = "PICAM" # (PICAM|WEBCAM|CVCAM|CSIC|V4L|MOCK)
IMAGE_W = 160
IMAGE_H = 120
IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono
CAMERA_FRAMERATE = DRIVE_LOOP_HZ
# For CSIC camera - If the camera is mounted in a rotated position, changing the below parameter will correct the output frame orientation
CSIC_CAM_GSTREAMER_FLIP_PARM = 0 # (0 => none , 4 => Flip horizontally, 6 => Flip vertically)
#9865, over rides only if needed, ie. TX2..
PCA9685_I2C_ADDR = 0x40 #I2C address, use i2cdetect to validate this number
PCA9685_I2C_BUSNUM = None #None will auto detect, which is fine on the pi. But other platforms should specify the bus num.
#DRIVETRAIN
#These options specify which chasis and motor setup you are using. Most are using SERVO_ESC.
#DC_STEER_THROTTLE uses HBridge pwm to control one steering dc motor, and one drive wheel motor
#DC_TWO_WHEEL uses HBridge pwm to control two drive motors, one on the left, and one on the right.
#SERVO_HBRIDGE_PWM use ServoBlaster to output pwm control from the PiZero directly to control steering, and HBridge for a drive motor.
DRIVE_TRAIN_TYPE = "SERVO_ESC" # SERVO_ESC|DC_STEER_THROTTLE|DC_TWO_WHEEL|SERVO_HBRIDGE_PWM
#STEERING
STEERING_CHANNEL = 1 #channel on the 9685 pwm board 0-15
STEERING_LEFT_PWM = 460 #pwm value for full left steering
STEERING_RIGHT_PWM = 290 #pwm value for full right steering
#THROTTLE
THROTTLE_CHANNEL = 0 #channel on the 9685 pwm board 0-15
THROTTLE_FORWARD_PWM = 500 #pwm value for max forward throttle
THROTTLE_STOPPED_PWM = 370 #pwm value for no movement
THROTTLE_REVERSE_PWM = 220 #pwm value for max reverse throttle
#DC_STEER_THROTTLE with one motor as steering, one as drive
#these GPIO pinouts are only used for the DRIVE_TRAIN_TYPE=DC_STEER_THROTTLE
HBRIDGE_PIN_LEFT = 18
HBRIDGE_PIN_RIGHT = 16
HBRIDGE_PIN_FWD = 15
HBRIDGE_PIN_BWD = 13
#DC_TWO_WHEEL - with two wheels as drive, left and right.
#these GPIO pinouts are only used for the DRIVE_TRAIN_TYPE=DC_TWO_WHEEL
HBRIDGE_PIN_LEFT_FWD = 18
HBRIDGE_PIN_LEFT_BWD = 16
HBRIDGE_PIN_RIGHT_FWD = 15
HBRIDGE_PIN_RIGHT_BWD = 13
#TRAINING
#The DEFAULT_MODEL_TYPE will choose which model will be created at training time. This chooses
#between different neural network designs. You can override this setting by passing the command
#line parameter --type to the python manage.py train and drive commands.
DEFAULT_MODEL_TYPE = 'linear' #(linear|categorical|rnn|imu|behavior|3d|localizer|latent)
BATCH_SIZE = 128 #how many records to use when doing one pass of gradient decent. Use a smaller number if your gpu is running out of memory.
TRAIN_TEST_SPLIT = 0.8 #what percent of records to use for training. the remaining used for validation.
MAX_EPOCHS = 100 #how many times to visit all records of your data
SHOW_PLOT = True #would you like to see a pop up display of final loss?
VEBOSE_TRAIN = True #would you like to see a progress bar with text during training?
USE_EARLY_STOP = True #would you like to stop the training if we see it's not improving fit?
EARLY_STOP_PATIENCE = 5 #how many epochs to wait before no improvement
MIN_DELTA = .0005 #early stop will want this much loss change before calling it improved.
PRINT_MODEL_SUMMARY = True #print layers and weights to stdout
OPTIMIZER = None #adam, sgd, rmsprop, etc.. None accepts default
LEARNING_RATE = 0.001 #only used when OPTIMIZER specified
LEARNING_RATE_DECAY = 0.0 #only used when OPTIMIZER specified
SEND_BEST_MODEL_TO_PI = False #change to true to automatically send best model during training
CACHE_IMAGES = True #keep images in memory. will speed succesive epochs, but crater if not enough mem.
PRUNE_CNN = False #This will remove weights from your model. The primary goal is to increase performance.
PRUNE_PERCENT_TARGET = 75 # The desired percentage of pruning.
PRUNE_PERCENT_PER_ITERATION = 20 # Percenge of pruning that is perform per iteration.
PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 # The max amout of validation loss that is permitted during pruning.
PRUNE_EVAL_PERCENT_OF_DATASET = .05 # percent of dataset used to perform evaluation of model.
#Pi login information
#When using the continuous train option, these credentials will
#be used to copy the final model to your vehicle. If not using this option, no need to set these.
PI_USERNAME = "pi" # username on pi
PI_PASSWD = "raspberry" # password is optional. Only used from Windows machine. Ubuntu and mac users should copy their public keys to the pi. `ssh-copy-id username@hostname`
PI_HOSTNAME = "raspberrypi.local" # the network hostname or ip address
PI_DONKEY_ROOT = "/home/pi/mycar" # the location of the mycar dir on the pi. this will be used to help locate the final model destination.
# Region of interst cropping
# only supported in Categorical and Linear models.
# If these crops values are too large, they will cause the stride values to become negative and the model with not be valid.
ROI_CROP_TOP = 0 #the number of rows of pixels to ignore on the top of the image
ROI_CROP_BOTTOM = 0 #the number of rows of pixels to ignore on the bottom of the image
#Model transfer options
#When copying weights during a model transfer operation, should we freeze a certain number of layers
#to the incoming weights and not allow them to change during training?
FREEZE_LAYERS = False #default False will allow all layers to be modified by training
NUM_LAST_LAYERS_TO_TRAIN = 7 #when freezing layers, how many layers from the last should be allowed to train?
#JOYSTICK
USE_JOYSTICK_AS_DEFAULT = False #when starting the manage.py, when True, will not require a --js option to use the joystick
JOYSTICK_MAX_THROTTLE = 0.5 #this scalar is multiplied with the -1 to 1 throttle value to limit the maximum throttle. This can help if you drop the controller or just don't need the full speed available.
JOYSTICK_STEERING_SCALE = 1.0 #some people want a steering that is less sensitve. This scalar is multiplied with the steering -1 to 1. It can be negative to reverse dir.
AUTO_RECORD_ON_THROTTLE = False #if true, we will record whenever throttle is not zero. if false, you must manually toggle recording with some other trigger. Usually circle button on joystick.
CONTROLLER_TYPE='F710' #(ps3|ps4|xbox|nimbus|wiiu|F710|rc3)
USE_NETWORKED_JS = False #should we listen for remote joystick control over the network?
NETWORK_JS_SERVER_IP = "192.168.0.1"#when listening for network joystick control, which ip is serving this information
JOYSTICK_DEADZONE = 0.0 # when non zero, this is the smallest throttle before recording triggered.
JOYSTICK_THROTTLE_DIR = 1.0 # use -1.0 to flip forward/backward, use 1.0 to use joystick's natural forward/backward
#For the categorical model, this limits the upper bound of the learned throttle
#it's very IMPORTANT that this value is matched from the training PC config.py and the robot.py
#and ideally wouldn't change once set.
MODEL_CATEGORICAL_MAX_THROTTLE_RANGE = 0.5
#RNN or 3D
SEQUENCE_LENGTH = 3 #some models use a number of images over time. This controls how many.
#IMU
HAVE_IMU = False #when true, this add a Mpu6050 part and records the data. Can be used with a
#SOMBRERO
HAVE_SOMBRERO = False #set to true when using the sombrero hat from the Donkeycar store. This will enable pwm on the hat.
#RECORD OPTIONS
RECORD_DURING_AI = False #normally we do not record during ai mode. Set this to true to get image and steering records for your Ai. Be careful not to use them to train.
#LED
HAVE_RGB_LED = False #do you have an RGB LED like https://www.amazon.com/dp/B07BNRZWNF
LED_INVERT = False #COMMON ANODE? Some RGB LED use common anode. like https://www.amazon.com/Xia-Fly-Tri-Color-Emitting-Diffused/dp/B07MYJQP8B
#LED board pin number for pwm outputs
#These are physical pinouts. See: https://www.raspberrypi-spy.co.uk/2012/06/simple-guide-to-the-rpi-gpio-header-and-pins/
LED_PIN_R = 12
LED_PIN_G = 10
LED_PIN_B = 16
#LED status color, 0-100
LED_R = 0
LED_G = 0
LED_B = 1
#LED Color for record count indicator
REC_COUNT_ALERT = 1000 #how many records before blinking alert
REC_COUNT_ALERT_CYC = 15 #how many cycles of 1/20 of a second to blink per REC_COUNT_ALERT records
REC_COUNT_ALERT_BLINK_RATE = 0.4 #how fast to blink the led in seconds on/off
#first number is record count, second tuple is color ( r, g, b) (0-100)
#when record count exceeds that number, the color will be used
RECORD_ALERT_COLOR_ARR = [ (0, (1, 1, 1)),
(3000, (5, 5, 5)),
(5000, (5, 2, 0)),
(10000, (0, 5, 0)),
(15000, (0, 5, 5)),
(20000, (0, 0, 5)), ]
#LED status color, 0-100, for model reloaded alert
MODEL_RELOADED_LED_R = 100
MODEL_RELOADED_LED_G = 0
MODEL_RELOADED_LED_B = 0
#BEHAVIORS
#When training the Behavioral Neural Network model, make a list of the behaviors,
#Set the TRAIN_BEHAVIORS = True, and use the BEHAVIOR_LED_COLORS to give each behavior a color
TRAIN_BEHAVIORS = False
BEHAVIOR_LIST = ['Left_Lane', "Right_Lane"]
BEHAVIOR_LED_COLORS =[ (0, 10, 0), (10, 0, 0) ] #RGB tuples 0-100 per chanel
#Localizer
#The localizer is a neural network that can learn to predice it's location on the track.
#This is an experimental feature that needs more developement. But it can currently be used
#to predict the segement of the course, where the course is divided into NUM_LOCATIONS segments.
TRAIN_LOCALIZER = False
NUM_LOCATIONS = 10
BUTTON_PRESS_NEW_TUB = False #when enabled, makes it easier to divide our data into one tub per track length if we make a new tub on each X button press.
#DonkeyGym
#Only on Ubuntu linux, you can use the simulator as a virtual donkey and
#issue the same python manage.py drive command as usual, but have them control a virtual car.
#This enables that, and sets the path to the simualator and the environment.
#You will want to download the simulator binary from: https://github.com/tawnkramer/donkey_gym/releases/download/v18.9/DonkeySimLinux.zip
#then extract that and modify DONKEY_SIM_PATH.
DONKEY_GYM = False
DONKEY_SIM_PATH = "path to sim" #"/home/tkramer/projects/sdsandbox/sdsim/build/DonkeySimLinux/donkey_sim.x86_64"
DONKEY_GYM_ENV_NAME = "donkey-generated-track-v0" # ("donkey-generated-track-v0"|"donkey-generated-roads-v0"|"donkey-warehouse-v0"|"donkey-avc-sparkfun-v0")
#publish camera over network
#This is used to create a tcp service to pushlish the camera feed
PUB_CAMERA_IMAGES = False
#When racing, to give the ai a boost, configure these values.
AI_LAUNCH_DURATION = 0.0 # the ai will output throttle for this many seconds
AI_LAUNCH_THROTTLE = 0.0 # the ai will output this throttle value
AI_LAUNCH_ENABLE_BUTTON = 'R2' # this keypress will enable this boost. It must be enabled before each use to prevent accidental trigger.
AI_LAUNCH_KEEP_ENABLED = False # when False ( default) you will need to hit the AI_LAUNCH_ENABLE_BUTTON for each use. This is safest. When this True, is active on each trip into "local" ai mode.
#Scale the output of the throttle of the ai pilot for all model types.
AI_THROTTLE_MULT = 1.0 # this multiplier will scale every throttle value for all output from NN models
#Path following
PATH_FILENAME = "donkey_path.pkl" #the path will be saved to this filename
PATH_SCALE = 5.0 # the path display will be scaled by this factor in the web page
PATH_OFFSET = (0, 0) # 255, 255 is the center of the map. This offset controls where the origin is displayed.
PATH_MIN_DIST = 0.3 # after travelling this distance (m), save a path point
PID_P = -10.0 # proportional mult for PID path follower
PID_I = 0.000 # integral mult for PID path follower
PID_D = -0.2 # differential mult for PID path follower
PID_THROTTLE = 0.2 # constant throttle value during path following
SAVE_PATH_BTN = "cross" # joystick button to save path
RESET_ORIGIN_BTN = "triangle" # joystick button to press to move car back to origin
| [
"[email protected]"
] | |
e4ae96c0131406c2419a148c0186b3269acfa42f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03964/s755365360.py | 9f2a66cabd6d3f24f2aafce6d59b731dbfbc227f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | import bisect
import collections
import copy
import functools
import heapq
import math
import sys
from collections import deque
from collections import defaultdict
input = sys.stdin.readline
MOD = 10**9+7
N = int(input())
T = [0]*N
A = [0]*N
for i in range(N):
T[i],A[i] = map(int,(input().split()))
t,a = T[0],A[0]
for i in range(1,N):
s = T[i] + A[i]
now = 1
l = 1
r = 10**18//s + 1
mae = -1
while now != mae:
mae = now
if T[i]*now < t or A[i]*now < a:
l = now
else:
r = now
now = (l+r+1)//2
t,a = T[i]*now,A[i]*now
print(t+a) | [
"[email protected]"
] | |
526512060ec60f64cab763dcdc20a58c882fa21b | e3040a2e23a856e319e02037dc6baf3882c796b9 | /samples/openapi3/client/petstore/python/petstore_api/paths/pet_find_by_status/get.py | bca423ad68f208522270ab2159908c0f06ae7b00 | [
"Apache-2.0"
] | permissive | mishin/openapi-generator | 2ed2e0739c0cc2a627c25191d5898071d9294036 | 3ed650307513d552404f3d76487f3b4844acae41 | refs/heads/master | 2023-06-10T03:01:09.612130 | 2022-10-14T08:29:15 | 2022-10-14T08:29:15 | 271,080,285 | 0 | 0 | Apache-2.0 | 2023-05-30T02:01:25 | 2020-06-09T18:29:41 | Java | UTF-8 | Python | false | false | 12,472 | py | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from petstore_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from petstore_api import schemas # noqa: F401
from petstore_api.model.pet import Pet
from . import path
# Query params
class StatusSchema(
schemas.ListSchema
):
class MetaOapg:
class items(
schemas.EnumBase,
schemas.StrSchema
):
class MetaOapg:
enum_value_to_name = {
"available": "AVAILABLE",
"pending": "PENDING",
"sold": "SOLD",
}
@schemas.classproperty
def AVAILABLE(cls):
return cls("available")
@schemas.classproperty
def PENDING(cls):
return cls("pending")
@schemas.classproperty
def SOLD(cls):
return cls("sold")
def __new__(
cls,
arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]],
_configuration: typing.Optional[schemas.Configuration] = None,
) -> 'StatusSchema':
return super().__new__(
cls,
arg,
_configuration=_configuration,
)
def __getitem__(self, i: int) -> MetaOapg.items:
return super().__getitem__(i)
RequestRequiredQueryParams = typing_extensions.TypedDict(
'RequestRequiredQueryParams',
{
'status': typing.Union[StatusSchema, list, tuple, ],
}
)
RequestOptionalQueryParams = typing_extensions.TypedDict(
'RequestOptionalQueryParams',
{
},
total=False
)
class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams):
pass
request_query_status = api_client.QueryParameter(
name="status",
style=api_client.ParameterStyle.FORM,
schema=StatusSchema,
required=True,
)
_auth = [
'http_signature_test',
'petstore_auth',
]
class SchemaFor200ResponseBodyApplicationXml(
schemas.ListSchema
):
class MetaOapg:
@staticmethod
def items() -> typing.Type['Pet']:
return Pet
def __new__(
cls,
arg: typing.Union[typing.Tuple['Pet'], typing.List['Pet']],
_configuration: typing.Optional[schemas.Configuration] = None,
) -> 'SchemaFor200ResponseBodyApplicationXml':
return super().__new__(
cls,
arg,
_configuration=_configuration,
)
def __getitem__(self, i: int) -> 'Pet':
return super().__getitem__(i)
class SchemaFor200ResponseBodyApplicationJson(
schemas.ListSchema
):
class MetaOapg:
@staticmethod
def items() -> typing.Type['Pet']:
return Pet
def __new__(
cls,
arg: typing.Union[typing.Tuple['Pet'], typing.List['Pet']],
_configuration: typing.Optional[schemas.Configuration] = None,
) -> 'SchemaFor200ResponseBodyApplicationJson':
return super().__new__(
cls,
arg,
_configuration=_configuration,
)
def __getitem__(self, i: int) -> 'Pet':
return super().__getitem__(i)
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationXml,
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/xml': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationXml),
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: schemas.Unset = schemas.unset
headers: schemas.Unset = schemas.unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
)
_status_code_to_response = {
'200': _response_for_200,
'400': _response_for_400,
}
_all_accept_content_types = (
'application/xml',
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _find_pets_by_status_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _find_pets_by_status_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _find_pets_by_status_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _find_pets_by_status_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Finds Pets by status
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params)
used_path = path.value
prefix_separator_iterator = None
for parameter in (
request_query_status,
):
parameter_data = query_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
if prefix_separator_iterator is None:
prefix_separator_iterator = parameter.get_prefix_separator_iterator()
serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator)
for serialized_value in serialized_data.values():
used_path += serialized_value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class FindPetsByStatus(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def find_pets_by_status(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def find_pets_by_status(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def find_pets_by_status(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def find_pets_by_status(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._find_pets_by_status_oapg(
query_params=query_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._find_pets_by_status_oapg(
query_params=query_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"[email protected]"
] | |
06ce341e0e7626e2104a0667155275b069268653 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Kivy/pycon2013/html5slides/scripts/md/render.py | b5ef0975e20eb201985c57c5b48cd150050171da | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:6e3940fcf589334234bc7943dfc2c0d8e860fc139a432eae485128714022232c
size 1807
| [
"[email protected]"
] | |
e5b0887d810d27576528bafda388fdfd915d3c4f | c6320d68968de93ce9d686f5a59bb34909d089bb | /03_Polynomial_Regression/polynomial_regression_rad.py | fafb65739a4f26fa1c7981097fe77412704b96b8 | [] | no_license | rbartosinski/MachineLearningRes | 0835e6b9f94c309bf2ce8ff7ceb73912a7eeea63 | 5a1af15e77d589149aa1cb22cb96f56956fd9a0f | refs/heads/master | 2020-04-07T00:58:03.692579 | 2019-01-11T13:49:12 | 2019-01-11T13:49:12 | 157,925,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 20 15:04:28 2018
@author: radek
"""
#wczytanie bibliotek
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#wczytanie danych
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
#dopasowanie LR do setu
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
#dopasowanie Polynomial Regr. do setu
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(X)
lin_reg2 = LinearRegression()
lin_reg2.fit(X_poly, y)
#wizualizacja LR
plt.scatter(X, y, color='red')
plt.plot(X, lin_reg.predict(X), color='blue')
plt.title('Position level vs. Salary (Linear Regression')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
#wizulizacja PR
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red')
plt.plot(X_grid, lin_reg2.predict(poly_reg.fit_transform(X_grid)), color='blue')
plt.title('Position level vs. Salary (Polynomial Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
#wynik z LR
lin_reg.predict(6.5)
#wynik z PR
lin_reg_2.predict(poly_reg.fit_transform(6.5)) | [
"[email protected]"
] | |
84ef03241f898679f28eceb6fc11716406bc5283 | dc85229e6da452a54577cef2740de9413f3e4528 | /ArmstrongNumbers.py | a1bd165f3ec84125cad4064709e22ed5a15d6f2c | [] | no_license | mutlukilic/Python | 1ea2549027dc5e120934c06fca8f98e3e1865148 | 180407f14fd480cc70f0758b7b6f1554aa2630f9 | refs/heads/master | 2021-01-13T04:17:20.684858 | 2017-02-16T11:48:24 | 2017-02-16T11:48:24 | 77,472,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | basamak_sayisi = input("Basamak sayisini giriniz : ")
for i in range(10**(basamak_sayisi-1),10**basamak_sayisi):
armstrong = 0
for j in str(i):
armstrong += int(j)**basamak_sayisi
if(armstrong == i):
print(i)
| [
"[email protected]"
] | |
717a32ee923895084358b984f07330c001396344 | 1aa015c1d08a4cba09ce749cfe325e996039459c | /Pygame Tutorial/TuterForBook04.py | ec3cacb39832a8e21245de3de37c82d8be4e9dde | [] | no_license | Jerrykim91/Pygame_Study | 98e9494e661d42229b7e0118095bf9d8636e266e | 6b8fe0ee239d2f90447a5b09bb742323677dfec5 | refs/heads/master | 2022-03-10T18:19:42.281581 | 2019-11-15T19:34:22 | 2019-11-15T19:34:22 | 216,942,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,588 | py | # 게임으로 배우는 파이썬 교재를 기반으로 실습
""" draw_ lines0.py"""
# import
import pygame
import sys
import random
from pygame.locals import QUIT
# 초기화
pygame.init()
# 변수
SCREEN_W, SCREEN_H = 400, 300
# x, y = [0, 0]
# 여러가지 색
# 0-255 ( R, B, G )
RED = 255, 0, 0 # 적색: 적 255, 녹 0, 청 0
GREEN = 0, 255, 0 # 녹색: 적 0, 녹 255, 청 0
BLUE = 0, 0, 255 # 청색: 적 0, 녹 0, 청 255
PURPLE = 127, 0, 127 # 보라색: 적 127, 녹 0, 청 127
BLACK = 0, 0, 0 # 검은색: 적 0, 녹 0, 청 0
GRAY = 127, 127, 127 # 회색: 적 127, 녹 127, 청 127
WHITE = 255, 255, 255 # 하얀색: 적 255, 녹 255, 청 255
# 창 설정
SCREEN = pygame.display.set_mode((SCREEN_W, SCREEN_H))
FPSCLOCK = pygame.time.Clock() # CPU를 성능을 조절하기 위해서는 필수
# 타이틀
pygame.display.set_caption("렌덤 라인 만들기")
# 메인 함수 생성
def main():
""" main routine """
run = True
while run:
EVENTS = pygame.event.get()
for event in EVENTS:
if event.type == QUIT:
pygame.quit()
sys.exit()
SCREEN.fill((255, 255, 255)) # 흰색으로 화면을 채운다.
pointlist = []
for _ in range(10):
xpos = random.randint(0, 400)
ypos = random.randint(0, 300)
pointlist.append((xpos, ypos))
pygame.draw.lines(SCREEN, BLACK, True, pointlist, 5)
pygame.display.update()
FPSCLOCK.tick(3)
if __name__=='__main__':
main()
| [
"[email protected]"
] | |
1d0479b10748363c8598f680dd8ac691974f0c9e | 11060ca244940baef96a51d794d73aab44fc31c6 | /src/brainstorming/tornado/modbus/pymodbus/__init__.py | 0bb3d9b53e2360b44fb5246e72a6c065e1fdb427 | [] | no_license | D3f0/txscada | eb54072b7311068a181c05a03076a0b835bb0fe1 | f8e1fd067a1d001006163e8c3316029f37af139c | refs/heads/master | 2020-12-24T06:27:17.042056 | 2016-07-27T17:17:56 | 2016-07-27T17:17:56 | 3,565,335 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | """
Pymodbus: Modbus Protocol Implementation
-----------------------------------------
This package can supply modbus clients and servers:
client:
- Can perform single get/set on discretes and registers
- Can perform multiple get/set on discretes and registers
- Working on diagnostic/file/pipe/setting/info requets
- Can fully scrape a host to be cloned
server:
- Can function as a fully implemented TCP modbus server
- Working on creating server control context
- Working on serial communication
- Working on funtioning as a RTU/ASCII
- Can mimic a server based on the supplied input data
TwistedModbus is built on top of the Pymodbus developed from code by:
Copyright (c) 2001-2005 S.W.A.C. GmbH, Germany.
Copyright (c) 2001-2005 S.W.A.C. Bohemia s.r.o., Czech Republic.
Hynek Petrak <[email protected]>
Released under the the GPLv2
"""
from pymodbus.version import _version
__version__ = _version.short().split('+')[0]
#---------------------------------------------------------------------------#
# Block unhandled logging
#---------------------------------------------------------------------------#
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
h = NullHandler()
logging.getLogger("pymodbus").addHandler(h)
| [
"devnull@localhost"
] | devnull@localhost |
620cd2171907d5b1e5e1ef3c3d3379f91f34ad8e | 54564fcd4205990d5610f39dcb0e3a778037591d | /data_plotting.py | f34746099759f8f86a409e9809ede5cf771f9af8 | [] | no_license | amishasacheti/Sign-Language-Recognition-using-SVM | bb50592016f6be37c8422a156679aa3c9c8a2663 | 42967d80338f2dcb80c659a1f2d858866c08bc06 | refs/heads/master | 2022-11-16T12:56:24.808663 | 2020-07-16T18:31:51 | 2020-07-16T18:31:51 | 270,957,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,676 | py | #display dataset in 3D
#STEP 1
import pandas as pd
import numpy as np
import math
from mpl_toolkits import mplot3d
%matplotlib inline
import matplotlib.pyplot as plt
data= np.loadtxt('D:\\final sem project\\dataset of coordinates\\1-hand_gestures\\afternoon_apurve_7.txt')
count=len(data)
bone_list = [[1, 2],[2,3],[2,4],[3,5],[4,6],[5,7],[6,8],[7,9],[8,10],[2,11],[11,12],[12,13],[12,14],[13,15],[14,16],[15,17],[17,19],[16,18],[18,20]]
lis=[[2,3],[3,5],[2,5],[2,4],[4,6],[2,6]]
lis = np.array(lis) - 1
lis1=[[3,5],[5,7],[3,7],[4,6],[6,8],[4,8]]
lis1 = np.array(lis1) - 1
lis2=[[9,5],[5,7],[9,7],[10,6],[6,8],[10,8]]
lis2 = np.array(lis2) - 1
print(count)
bone_list = np.array(bone_list) - 1
def findangle(x1,x2,x3,y1,y2,y3,z1,z2,z3):
cl=np.array([x1-x3,y1-y3,z1-z3])
cr=np.array([x2-x3,y2-y3,z2-z3])
modcl=np.linalg.norm(cl)
modcr=np.linalg.norm(cr)
m=modcl*modcr
l=np.cross(cl,cr)
n=l/m
k=np.array([0,0,1])
d=np.dot(n,k)
modn=np.linalg.norm(n)
modk=np.linalg.norm(k)
a=d/(modn*modk)
theta=math.acos(a) #angle in radian
theta1=(math.pi)-theta
angle=(theta1*180)/math.pi #angle in degree
#print(angle)
#print(theta1*180/math.pi)
return angle
for i in range(count):
#fig, ax = plt.subplots(1, figsize=(3, 8))
fig = plt.figure()
#ax = plt.axes(projection='3d')
ax = fig.add_subplot(1, 2, 2, projection='3d')
ax.set_title('Skeleton')
#plt.title('Skeleton')
#plt.xlim(-0.8,0.4)
#plt.ylim(-1.5,1.5)
x=data[i][0::3]
y=data[i][1::3]
z=data[i][2::3]
#ax.scatter(x, y,s=40)
ax.scatter3D(x, z,y,s=40)
ax.set_xlim(-0.2,0.5)
ax.set_ylim(-0.8,1.5)
ax.set_zlim(0,2)
for bone in bone_list:
ax.plot([x[bone[0]], x[bone[1]]],[z[bone[0]], z[bone[1]]], [y[bone[0]], y[bone[1]]],'r')
#ax.plot([x[bone[0]], x[bone[1]]], [y[bone[0]], y[bone[1]]],[z[bone[0]], z[bone[1]]], 'r')
#ax.plot([z[bone[0]], z[bone[1]]], [x[bone[0]], x[bone[1]]], [y[bone[0]], y[bone[1]]],'r')
#display dataset in 2D
for i in range(count):
fig, ax = plt.subplots(1, figsize=(8, 8))
#fig = plt.figure()
#ax = plt.axes(projection='3d')
#ax = fig.add_subplot(1, 2, 2, projection='3d')
#ax.set_title('Skeleton')
plt.title('Skeleton')
plt.xlim(-0.8,0.4)
plt.ylim(-1.5,1.5)
x=data[i][0::3]
y=data[i][1::3]
z=data[i][2::3]
ax.scatter(x, y,s=40)
#ax.scatter3D(x, y, z,s=40)
#ax.set_xlim(-0.8,-0.1)
#ax.set_ylim(-0.5,1.5)
#ax.set_zlim(-3,4)
for bone in bone_list:
ax.plot([x[bone[0]], x[bone[1]]], [y[bone[0]], y[bone[1]]], 'r')
| [
"[email protected]"
] | |
ac8fcee7be310f87e1cf6a7479d7dec05c585cc6 | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/dash_bootstrap_components/_components/CardText.py | c0146873ce75910bc6733eabc85670d925f82320 | [
"MIT"
] | permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 3,332 | py | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class CardText(Component):
"""A CardText component.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional): The children of this component
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- style (dict; optional): Defines CSS styles which will override styles previously set.
- className (string; optional): Often used with CSS to style elements with common properties.
- key (string; optional): A unique identifier for the component, used to improve
performance by React.js while rendering components
See https://reactjs.org/docs/lists-and-keys.html for more info
- tag (string; optional): HTML tag to use for the card text, default: p
- color (string; optional): Text color, options: primary, secondary, success, warning, danger, info,
muted, light, dark, body, white, black-50, white-50."""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, style=Component.UNDEFINED, className=Component.UNDEFINED, key=Component.UNDEFINED, tag=Component.UNDEFINED, color=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'style', 'className', 'key', 'tag', 'color']
self._type = 'CardText'
self._namespace = 'dash_bootstrap_components/_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'style', 'className', 'key', 'tag', 'color']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(CardText, self).__init__(children=children, **args)
def __repr__(self):
if(any(getattr(self, c, None) is not None
for c in self._prop_names
if c is not self._prop_names[0])
or any(getattr(self, c, None) is not None
for c in self.__dict__.keys()
if any(c.startswith(wc_attr)
for wc_attr in self._valid_wildcard_attributes))):
props_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self._prop_names
if getattr(self, c, None) is not None])
wilds_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self.__dict__.keys()
if any([c.startswith(wc_attr)
for wc_attr in
self._valid_wildcard_attributes])])
return ('CardText(' + props_string +
(', ' + wilds_string if wilds_string != '' else '') + ')')
else:
return (
'CardText(' +
repr(getattr(self, self._prop_names[0], None)) + ')')
| [
"[email protected]"
] | |
9b73114f7ea4cb451dfbd939500b3c97b30e2d8a | 673440c09033912157d1c3767d5308f95755e76a | /ManachersAlgo.py | 34e2ae34f01f3af98fb2e6b72aa5e397af5e4c02 | [] | no_license | jagadeshwarrao/programming | 414193b1c538e37684378233d0532bd786d63b32 | 1b343251a8ad6a81e307d31b2025b11e0b28a707 | refs/heads/master | 2023-02-02T19:26:21.187561 | 2020-12-21T18:21:00 | 2020-12-21T18:21:00 | 274,644,612 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py |
def findLongestPalindromicString(text):
N = len(text)
if N == 0:
return
N = 2*N+1
L = [0] * N
L[0] = 0
L[1] = 1
C = 1
R = 2
i = 0
iMirror = 0
maxLPSLength = 0
maxLPSCenterPosition = 0
start = -1
end = -1
diff = -1
for i in xrange(2,N):
iMirror = 2*C-i
L[i] = 0
diff = R - i
if diff > 0:
L[i] = min(L[iMirror], diff)
try:
while ((i+L[i]) < N and (i-L[i]) > 0) and \
(((i+L[i]+1) % 2 == 0) or \
(text[(i+L[i]+1)/2] == text[(i-L[i]-1)/2])):
L[i]+=1
except Exception as e:
pass
if L[i] > maxLPSLength:
maxLPSLength = L[i]
maxLPSCenterPosition = i
if i + L[i] > R:
C = i
R = i + L[i]
start = (maxLPSCenterPosition - maxLPSLength) / 2
end = start + maxLPSLength - 1
print "LPS of string is " + text + " : ",
print text[start:end+1],
print "\n",
text1 = "babcbabcbaccba"
findLongestPalindromicString(text1)
text2 = "abaaba"
findLongestPalindromicString(text2)
text3 = "abababa"
findLongestPalindromicString(text3)
text4 = "abcbabcbabcba"
findLongestPalindromicString(text4)
text5 = "forgeeksskeegfor"
findLongestPalindromicString(text5)
text6 = "caba"
findLongestPalindromicString(text6)
text7 = "abacdfgdcaba"
findLongestPalindromicString(text7)
text8 = "abacdfgdcabba"
findLongestPalindromicString(text8)
text9 = "abacdedcaba"
findLongestPalindromicString(text9)
| [
"[email protected]"
] | |
b3cf8a2a7df390f336d567d193e3c7b558bce83b | 3e41ec4d8c82d0f8333eada932988d580d0b8e15 | /NQueens/nqueens.py | b8939c0e6f6c27dbf7862e794708c1977fc694d8 | [] | no_license | Rakavee/Algorithms | 3a68eb380b6b46f93677779c0b054e8e1c2a6dbc | 98432dbb97829658997f693d08cb8a1938317a7e | refs/heads/master | 2020-04-29T08:55:57.530330 | 2019-03-20T20:59:36 | 2019-03-20T20:59:36 | 175,895,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,970 | py |
# coding: utf-8
# Source: https://www.sanfoundry.com/python-program-solve-n-queen-problem-without-recursion/
# Source does not use linked lists.
# # Linked List implementation of Non-Recursive N-Queens Problem
# In[1]:
class Node:
def __init__(self, right=None, left=None, up=None, down=None, data=None):
self.right = right
self.left = left
self.up = up
self.down = down
self.data = data
# In[2]:
class nQueensBoard:
def __init__(self, size):
self.size = size
self.queen_columns = [] #queen_columns[r]=c where r,c denotes the position of the queens on the board.
self.nodes=[ [0]*self.size for _ in range(self.size) ]
for i in range(self.size):
for j in range(self.size):
self.nodes[i][j] = Node(data=0)
if i > 0:
self.nodes[i][j].up = self.nodes[i-1][j]
self.nodes[i-1][j].down = self.nodes[i][j]
if j > 0:
self.nodes[i][j].left = self.nodes[i][j-1]
self.nodes[i][j-1].right = self.nodes[i][j]
def isSafe(self, column):
row = len(self.queen_columns)
# check column
for qc in self.queen_columns:
if column == qc:
return False
# check diagonal
for qr, qc in enumerate(self.queen_columns):
if qc - qr == column - row:
return False
# check other diagonal
for qr, qc in enumerate(self.queen_columns):
if ((self.size - qc) - qr
== (self.size - column) - row):
return False
return True
def print_board(self):
current_node = self.nodes[0][0]
next_row = current_node.down
while(current_node):
print("| ",current_node.data, end = " ")
if(current_node.right != None):
current_node = current_node.right
elif(current_node.down != None):
current_node = next_row
next_row = current_node.down
print(' |\n')
else:
print(' |')
break
print("................................................................")
def fill_board(self):
for row in range(self.size):
for column in range(self.size):
if column == self.queen_columns[row]:
self.nodes[row][column].data = "Q"
#print('|Q', end=' ')
else:
self.nodes[row][column].data = "-"
#print('|-', end=' ')
#print("|")
#print("................................................................")
self.print_board()
def populate(self, size):
number_of_solutions = 0
row = 0
column = 0
# iterate over rows of board
while True:
while column < size:
if self.isSafe(column):
self.queen_columns.append(column)
row += 1
column = 0
break
else:
column += 1
# if could not find column to place in or if board is full
if (column == size or row == size):
if row == size:
self.fill_board()
print()
number_of_solutions += 1
self.queen_columns.pop()
row -= 1
#backtrack
try:
prev_column = self.queen_columns.pop()
except IndexError:
break
row -= 1
column = 1 + prev_column
print('Number of solutions:', number_of_solutions)
# In[3]:
n = int(input('Enter n: '))
board1 = nQueensBoard(size = n)
board1.populate(n)
| [
"[email protected]"
] | |
0fbc3c0d1ea493c7b8c03b62c9104b1f4803931c | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/2/ef3.py | 7ea48f160f18a4c7e1914e1c32d84b3d8df9be75 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'eF3':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
a40784738ed092668081456e1b724bb29a5780e8 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2790/60589/243105.py | 230152d093784ddcfff077a0a0b37bbdb892f405 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | nm=input().split(' ')
n=int(nm[0])
m=int(nm[1])
a=list(map(int,input().split(' ')))
b=list(map(int,input().split(' ')))
ans=[]
a.sort()
for e in b:
has=False
for i in range(n):
if a[i]>e:
has=True
ans.append(i)
break
if not has:
ans.append(n)
ans=list(map(str,ans))
print(' '.join(ans)) | [
"[email protected]"
] | |
243ff6fb2682b150f69e6ebee075bb6e21f72971 | dcf887939f3d4f53ddf73e05e21114aa07b86a18 | /app/views.py | e2fa29ff792f1f4dbbac522b3d2a7176738d8bb8 | [] | no_license | Dyepitech/Epytodo | 771905e8eb1fadb3bbe9c65da8148eeed1568a9e | d91cb0788d1404f1731618c43d3c23f9b284c29b | refs/heads/master | 2022-11-18T02:55:50.471008 | 2020-07-13T13:01:05 | 2020-07-13T13:01:05 | 270,681,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | from app import app
from flask import render_template, session
from flask import jsonify
from flask import Flask, request
import pymysql as sql
from app import controller
from app import models
@app.route('/', methods=['GET'])
def route_index():
return models.main_page()
@app.route('/register', methods=['POST', 'GET'])
def route_register():
if request.method == "POST":
try:
data = controller.user_sql(json_data=request.json, form_data=request.form)
return models.create_user(data)
except:
return jsonify(error="internal error")
else:
return render_template("register.html")
@app.route('/login', methods=['POST', 'GET'])
def route_login():
if request.method == "POST":
try:
data = controller.user_sql(json_data=request.json, form_data=request.form)
return models.check_user(data)
except:
return (jsonify(error="internal error"), models.main_page())
else:
return render_template("login.html")
@app.route('/task', methods=['GET'])
def route_user_task():
try:
return models.display_all_task()
except:
return jsonify(error="internal error")
@app.route('/delete/task/<id_task>', methods=['POST'])
def route_user_task_del(id_task):
try:
return models.task_delete(id_task=id_task)
except:
return jsonify(error="internal error")
@app.route('/todo', methods=['POST', 'GET'])
def route_addtask():
if request.method == "POST":
try:
data = controller.task_sql(json_data=request.json, form_data=request.form)
return models.create_task(data=data)
except Exception as e:
print(e)
return jsonify(error="internal error")
else:
return render_template("todo.html")
| [
"[email protected]"
] | |
9962922584c412b05fbb00dc271d5fd91f46fe79 | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/.install/.backup/lib/third_party/ruamel/yaml/resolver.py | 84227072e066b8f2528baaf4a25c43995cd4061a | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 14,599 | py | # coding: utf-8
from __future__ import absolute_import
import re
try:
from .error import * # NOQA
from .nodes import * # NOQA
from .compat import string_types
except (ImportError, ValueError): # for Jython
from ruamel.yaml.error import * # NOQA
from ruamel.yaml.nodes import * # NOQA
from ruamel.yaml.compat import string_types
__all__ = ['BaseResolver', 'Resolver', 'VersionedResolver']
_DEFAULT_VERSION = (1, 2)
class ResolverError(YAMLError):
pass
class BaseResolver(object):
DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self._loader_version = None
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
@classmethod
def add_implicit_resolver(cls, tag, regexp, first):
if 'yaml_implicit_resolvers' not in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append(
(tag, regexp))
@classmethod
def add_path_resolver(cls, tag, path, kind=None):
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if 'yaml_path_resolvers' not in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError("Invalid path element: %s" % element)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, string_types) \
and node_check is not None:
raise ResolverError("Invalid node checker: %s" % node_check)
if not isinstance(index_check, (string_types, int)) \
and index_check is not None:
raise ResolverError("Invalid index checker: %s" % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
raise ResolverError("Invalid node kind: %s" % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
def descend_resolver(self, current_node, current_index):
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind,
current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path,
kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind,
current_node, current_index):
node_check, index_check = path[depth-1]
if isinstance(node_check, string_types):
if current_node.tag != node_check:
return
elif node_check is not None:
if not isinstance(current_node, node_check):
return
if index_check is True and current_index is not None:
return
if (index_check is False or index_check is None) \
and current_index is None:
return
if isinstance(index_check, string_types):
if not (isinstance(current_index, ScalarNode) and
index_check == current_index.value):
return
elif isinstance(index_check, int) and not isinstance(index_check,
bool):
if index_check != current_index:
return
return True
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == u'':
resolvers = self.yaml_implicit_resolvers.get(u'', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
@property
def processing_version(self):
return None
class Resolver(BaseResolver):
pass
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:bool',
re.compile(u'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list(u'yYnNtTfFoO'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:int',
re.compile(u'''^(?:[-+]?0b[0-1_]+
|[-+]?0o?[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
list(u'-+0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:merge',
re.compile(u'^(?:<<)$'),
[u'<'])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:null',
re.compile(u'''^(?: ~
|null|Null|NULL
| )$''', re.X),
[u'~', u'n', u'N', u''])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:timestamp',
re.compile(u'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \\t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)?
(?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list(u'0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:value',
re.compile(u'^(?:=)$'),
[u'='])
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:yaml',
re.compile(u'^(?:!|&|\\*)$'),
list(u'!&*'))
# resolvers consist of
# - a list of applicable version
# - a tag
# - a regexp
# - a list of first characters to match
implicit_resolvers = [
([(1, 2)],
u'tag:yaml.org,2002:bool',
re.compile(u'''^(?:true|True|TRUE|false|False|FALSE)$''', re.X),
list(u'tTfF')),
([(1, 1)],
u'tag:yaml.org,2002:bool',
re.compile(u'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list(u'yYnNtTfFoO')),
([(1, 2), (1, 1)],
u'tag:yaml.org,2002:float',
re.compile(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.')),
([(1, 2)],
u'tag:yaml.org,2002:int',
re.compile(u'''^(?:[-+]?0b[0-1_]+
|[-+]?0o?[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+)$''', re.X),
list(u'-+0123456789')),
([(1, 1)],
u'tag:yaml.org,2002:int',
re.compile(u'''^(?:[-+]?0b[0-1_]+
|[-+]?0o?[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
list(u'-+0123456789')),
([(1, 2), (1, 1)],
u'tag:yaml.org,2002:merge',
re.compile(u'^(?:<<)$'),
[u'<']),
([(1, 2), (1, 1)],
u'tag:yaml.org,2002:null',
re.compile(u'''^(?: ~
|null|Null|NULL
| )$''', re.X),
[u'~', u'n', u'N', u'']),
([(1, 2), (1, 1)],
u'tag:yaml.org,2002:timestamp',
re.compile(u'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \\t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)?
(?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list(u'0123456789')),
([(1, 2), (1, 1)],
u'tag:yaml.org,2002:value',
re.compile(u'^(?:=)$'),
[u'=']),
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
([(1, 2), (1, 1)],
u'tag:yaml.org,2002:yaml',
re.compile(u'^(?:!|&|\\*)$'),
list(u'!&*')),
]
class VersionedResolver(BaseResolver):
"""
contrary to the "normal" resolver, the smart resolver delays loading
the pattern matching rules. That way it can decide to load 1.1 rules
or the (default) 1.2 that no longer support octal without 0o, sexagesimals
and Yes/No/On/Off booleans.
"""
def __init__(self, version=None):
BaseResolver.__init__(self)
self._loader_version = self.get_loader_version(version)
self._version_implicit_resolver = {}
def add_version_implicit_resolver(self, version, tag, regexp, first):
if first is None:
first = [None]
impl_resolver = self._version_implicit_resolver.setdefault(version, {})
for ch in first:
impl_resolver.setdefault(ch, []).append((tag, regexp))
def get_loader_version(self, version):
if version is None or isinstance(version, tuple):
return version
if isinstance(version, list):
return tuple(version)
# assume string
return tuple(map(int, version.split(u'.')))
@property
def resolver(self):
"""
select the resolver based on the version we are parsing
"""
version = self.processing_version
if version not in self._version_implicit_resolver:
for x in implicit_resolvers:
if version in x[0]:
self.add_version_implicit_resolver(version, x[1], x[2], x[3])
return self._version_implicit_resolver[version]
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == u'':
resolvers = self.resolver.get(u'', [])
else:
resolvers = self.resolver.get(value[0], [])
resolvers += self.resolver.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
@property
def processing_version(self):
try:
version = self.yaml_version
except AttributeError:
# dumping
version = self.use_version
if version is None:
version = self._loader_version
if version is None:
version = _DEFAULT_VERSION
return version
| [
"[email protected]"
] |
Subsets and Splits